input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
import os
import numpy as np
import random
class Reader(object):
def __init__(self, dataset_dir, listfile=None):
self._dataset_dir = dataset_dir
if listfile is None:
listfile_path = os.path.join(dataset_dir, "listfile.csv")
else:
listfile_path = listfile
with open(listfile_path, "r") as lfile:
self._data = lfile.readlines()
def get_number_of_examples(self):
return len(self._data)
def random_shuffle(self, seed=None):
if (seed is not None):
random.seed(seed)
random.shuffle(self._data)
def read_example(self, index):
raise NotImplementedError()
def read_next(self):
if not hasattr(self, '_current_index'):
self._current_index = 0
to_read_index = self._current_index
self._current_index += 1
if (self._current_index == self.get_number_of_examples()):
self._current_index = 0
return self.read_example(to_read_index)
class DecompensationReader(Reader):
r"""
Reader for decompensation prediction task.
Parameters
----------
dataset_dir : str
Directory where timeseries files are stored.
listilfe : str
Path to a listfile. If this parameter is left `None` then
`dataset_dir/listfile.csv` will be used.
"""
def __init__(self, dataset_dir, listfile=None):
Reader.__init__(self, dataset_dir, listfile)
self._data = [line.split(',') for line in self._data]
self._data = [(x, float(t), int(y)) for (x, t, y) in self._data]
def _read_timeseries(self, ts_filename, time_bound):
ret = []
with open(os.path.join(self._dataset_dir, ts_filename), "r") as tsfile:
header = tsfile.readline().strip().split(',')
assert header[0] == "Hours"
for line in tsfile:
mas = line.strip().split(',')
t = float(mas[0])
if t > time_bound + 1e-6:
break
ret.append(np.array(mas))
return (np.stack(ret), header)
def read_example(self, index):
r"""
Reads the example with given index.
Parameters
----------
index : int
Index of the line of the listfile to read (counting starts from 0).
Returns (X, t, y, header)
-------
X : np.array
2D array containing all events. Each row corresponds to a moment.
First coloumn is the time and other columns correspond to different
variables.
t : float
Lenght of the data in hours. Note, in general, it is not eqaul to the
timestamp of last event.
y : int (0 or 1)
Mortality within next 24 hours.
header : array of strings
Names of the columns. The ordering of the columns is always the same.
"""
if (index < 0 or index >= len(self._data)):
raise ValueError("Index must be from 0 (inclusive) to number of examples (exclusive).")
t = self._data[index][1]
(X, header) = self._read_timeseries(self._data[index][0], t)
y = self._data[index][2]
return (X, t, y, header)
class InHospitalMortalityReader(Reader):
r"""
Reader for in-hospital moratality prediction task.
Parameters
----------
dataset_dir : str
Directory where timeseries files are stored.
listilfe : str
Path to a listfile. If this parameter is left `None` then
`dataset_dir/listfile.csv` will be used.
period_length : float
Length of the period (in hours) from which the prediction is done.
"""
def __init__(self, dataset_dir, listfile=None, period_length=48.0):
Reader.__init__(self, dataset_dir, listfile)
self._data = [line.split(',') for line in self._data]
self._data = [(x, int(y)) for (x, y) in self._data]
self._period_length = period_length
def _read_timeseries(self, ts_filename):
ret = []
with open(os.path.join(self._dataset_dir, ts_filename), "r") as tsfile:
header = tsfile.readline().strip().split(',')
assert header[0] == "Hours"
for line in tsfile:
mas = line.strip().split(',')
ret.append(np.array(mas))
return (np.stack(ret), header)
def read_example(self, index):
r"""
Reads the example with given index.
Parameters
----------
index : int
Index of the line of the listfile to read (counting starts from 0).
Returns (X, t, y, header)
-------
X : np.array
2D array containing all events. Each row corresponds to a moment.
First coloumn is the time and other columns correspond to different
variables.
t : float
Lenght of the data in hours. Note, in general, it is not eqaul to the
timestamp of last event.
y : int (0 or 1)
In-hospital mortality.
header : array of strings
Names of the columns. The ordering of the columns is always the same.
"""
if (index < 0 or index >= len(self._data)):
raise ValueError("Index must be from 0 (inclusive) to number of lines (exclusive).")
(X, header) = self._read_timeseries(self._data[index][0])
y = self._data[index][1]
return (X, self._period_length, y, header)
class LengthOfStayReader(Reader):
r"""
Reader for length of stay prediction task.
Parameters
----------
dataset_dir : str
Directory where timeseries files are stored.
listilfe : str
Path to a listfile. If this parameter is left `None` then
`dataset_dir/listfile.csv` will be used.
"""
def __init__(self, dataset_dir, listfile=None):
Reader.__init__(self, dataset_dir, listfile)
self._data = [line.split(',') for line in self._data]
self._data = [(x, float(t), float(y)) for (x, t, y) in self._data]
def _read_timeseries(self, ts_filename, time_bound):
ret = []
with open(os.path.join(self._dataset_dir, ts_filename), "r") as tsfile:
header = tsfile.readline().strip().split(',')
assert header[0] == "Hours"
for line in tsfile:
mas = line.strip().split(',')
t = float(mas[0])
if t > time_bound + 1e-6:
break
ret.append(np.array(mas))
return (np.stack(ret), header)
def read_example(self, index):
r"""
Reads the example with given index.
Parameters
----------
index : int
Index of the line of the listfile to read (counting starts from 0).
Returns (X, t, y, header)
-------
X : np.array
2D array containing all events. Each row corresponds to a moment.
First coloumn is the time and other columns correspond to different
variables.
t : float
Lenght of the data in hours. Note, in general, it is not eqaul to the
timestamp of last event.
y : float
Remaining time in ICU.
header : array of strings
Names of the columns. The ordering of the columns is always the same.
"""
if (index < 0 or index >= len(self._data)):
raise ValueError("Index must be from 0 (inclusive) to number of lines (exclusive).")
t = self._data[index][1]
(X, header) = self._read_timeseries(self._data[index][0], t)
y = self._data[index][2]
return (X, t, y, header)
class PhenotypingReader(Reader):
r"""
Reader for phenotype classification task.
Parameters
----------
dataset_dir : str
Directory where timeseries files are stored.
listilfe : str
Path to a listfile. If this parameter is left `None` then
`dataset_dir/listfile.csv` will be used.
"""
def __init__(self, dataset_dir, listfile=None):
Reader.__init__(self, dataset_dir, listfile)
self._listfile_header = self._data[0]
self._data = self._data[1:]
self._data = [line.split(',') for line in self._data]
self._data = [(mas[0], float(mas[1]), map(int, mas[2:])) for mas in self._data]
def _read_timeseries(self, ts_filename):
ret = []
with open(os.path.join(self._dataset_dir, ts_filename), "r") as tsfile:
header = tsfile.readline().strip().split(',')
assert header[0] == "Hours"
for line in tsfile:
mas = line.strip().split(',')
ret.append(np.array(mas))
return (np.stack(ret), header)
def read_example(self, index):
r"""
Reads the example with given index.
Parameters
----------
index : int
Index of the line of the listfile to read (counting starts from 0).
Returns (X, t, y, header)
-------
X : np.array
2D array containing all events. Each row corresponds to a moment.
First coloumn is the time and other columns correspond to different
variables.
t : float
Lenght of the data in hours. Note, in general, it is not eqaul to the
timestamp of last event.
y : array of ints
Phenotype labels.
header : array of strings
Names of the columns. The ordering of the columns is always the same.
"""
if (index < 0 or index >= len(self._data)):
raise ValueError("Index must be from 0 (inclusive) to number of lines (exclusive).")
(X, header) = self._read_timeseries(self._data[index][0])
y = self._data[index][2]
return (X, self._data[index][1], y, header)
class MultitaskReader(Reader):
r"""
Reader for multitask.
Parameters
----------
dataset_dir : str
Directory where timeseries files are stored.
listilfe : str
Path to a listfile. If this parameter is left `None` then
`dataset_dir/listfile.csv` will be used.
"""
def __init__(self, dataset_dir, listfile=None):
Reader.__init__(self, dataset_dir, listfile)
self._listfile_header = self._data[0]
self._data = self._data[1:]
self._data = [line.split(',') for line in self._data]
def process_ihm(ihm):
return map(int, ihm.split(';'))
def process_los(los):
los = los.split(';')
return (map(int, los[:len(los)/2]), map(float, los[len(los)/2:]))
def process_ph(ph):
return map(int, ph.split(';'))
def process_decomp(decomp):
decomp = decomp.split(';')
return (map(int, decomp[:len(decomp)/2]), map(int, decomp[len(decomp)/2:]))
self._data = [(fname, float(t), process_ihm(ihm), process_los(los),
process_ph(ph), process_decomp(decomp))
for fname, t, ihm, los, ph, decomp in self._data]
def _read_timeseries(self, ts_filename):
ret = []
with open(os.path.join(self._dataset_dir, ts_filename), "r") as tsfile:
header = tsfile.readline().strip().split(',')
assert header[0] == "Hours"
for line in tsfile:
mas = line.strip().split(',')
ret.append(np.array(mas))
return (np.stack(ret), header)
def read_example(self, index):
r"""
Reads the example with given index.
Parameters
----------
index : int
Index of the line of the listfile to read (counting starts from 0).
Returns (X, t, ihm, los, ph, decomp, header)
-------
X : np.array
2D array containing all events. Each row corresponds to a moment.
First coloumn is the time and other columns correspond to different
variables.
t : float
Lenght of the data in | |
bollo_presente = False
bollo = 0
for k,v in lista_codici_iva.iteritems():
codice_iva = k
importo_netto = v
# print "LISTA CODICI : ",codice_iva,importo_netto
dettaglio_iva = db(db.anagrafica_codici_iva.codice_iva == codice_iva).select().first()
percentuale_iva = dettaglio_iva.percentuale_iva
descrizione_iva = dettaglio_iva.descrizione_codice_iva
imposta_iva = return_imposta(v,percentuale_iva)
if dettaglio_iva.bollo_su_importi_esenti is True:
if not bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
bollo_presente = True
fattura.footer_2(codice_iva,"",return_currency(importo_netto),descrizione_iva,return_currency(imposta_iva),return_currency(bollo))
bollo = 0
if bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
importo_totale += float(bollo)
importo_totale_da_salvare = importo_totale +imposta_iva
# print "Importo totale "+str(importo_totale_da_salvare)
importo_totale = Money(str(importo_totale),"EUR")
importo_totale = importo_totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
fattura.footer(str(importo_totale)," "," "," "," ",str(importo_totale),str(return_currency(imposta_totale)))
fattura.totale(str(importo_totale_da_salvare))
# db.fatture_salvate.insert(scadenza=scadenza_salvata,nome_cliente=nome_cliente,data_fattura = datetime.datetime.now().strftime("%d/%m/%Y"),numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = importo_totale_da_salvare)
# print "SCADENZA {0}".format(scadenza)
"""
fattura.foote,Field('nome_cliente')sr("Totale merce","Sconto","Netto merce","spese varie","spese_trasporto","totale_imponibile","Totale imposta")
fattura.footer_2("CodIva","Spese accessorie","Imponibile","Iva","Imposta","Bolli")
fattura.footer_2("CodIva2","Spese accessorie2","Imponibile2","Iva2","Imposta2","Bolli2")
fattura.totale("14567645")
"""
fattura.add_row("","","","","","","","","")
fattura.add_row("",annotazioni,"","","","","","","")
fattura.insert_rows()
fattura.create_pdf()
# db(db.fattura).delete()
# db.fattura.insert(numero_fattura = numero_fattura_da_salvare)
@service.jsonrpc
@service.jsonrpc2
def crea_fattura_istantanea(args):
id_cliente=args['0']
# print "ID CLIENTE : ",id_cliente
numero_corrente_fattura = db(db.fattura).select().first()["numero_fattura"]
numero = int(numero_corrente_fattura.split("/")[0])
anno = int(numero_corrente_fattura.split("/")[1])
numero +=1
numero_fattura_da_salvare = str(numero)+"/"+str(anno)
"""
Dati cliente
"""
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
annotazioni=dati_cliente.annotazioni
bollo= dati_cliente.bollo
if bollo:
db(db.righe_in_fattura_istantanea.codice_articolo=="BOLLO").delete()
db.righe_in_fattura_istantanea.insert(
codice_articolo="BOLLO",
descrizione="art. 15 DPR 633/72",
riferimento_ordine="",
qta="1",
prezzo="2",
sconti="",
codice_iva="Esenzione Iva",
commento=""
)
scritta_esenzione = False
# print "1"
# print dettagli_banca
# print "2"
start_date = datetime.datetime.now()
fattura = FATTURA("FATTURA IMMEDIATA",datetime.datetime.now().date().strftime("%d/%m/%Y"),numero_fattura_da_salvare)
fattura.intestazione(nome_cliente,citta_cliente,indirizzo_cliente,cap_cliente,provincia_cliente,nazione_cliente,cf_cliente,pi_cliente)
try:
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),"PAGAMENTO","SCADENZA")
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica cliente"
return locals()
fattura.rows=[]
lista_codici_iva = {}
importo_totale = 0
imposta_totale = 0
imposta_iva = 0
lista_ddt = []
if True:
rows = db(db.righe_in_fattura_istantanea).select()
for row in rows:
try:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if ("M.S." or "ms") in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
giorni_mese_successivo = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni_mese_successivo"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = int(giorni_mese_successivo))
scadenza = scadenza.strftime("%d/%m/%Y")
else:
# Fine mese senza M.S.
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
pass
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),pagamento,str(scadenza))
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
sconti = row.sconti
if row.sconti is None:
sconti=""
if len(row.codice_articolo) > 0 and 'commento' not in row.codice_articolo:
try:
if row.prezzo == "0":
row.prezzo = ""
f = float(row.prezzo)
# print "SONO QUI : PREZZO = ".format(f)
except:
msg = "Prezzo non presente Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
try:
f=float(row.qta)
except:
msg = "Quantità non valida Cod.Art : " + row.codice_articolo
response.flash=msg
# print "!QWUEIQWEUQWUE"
return locals()
pass
importo = saved_importo = float(row.qta) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["percentuale_iva"]
descrizione_codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["descrizione_codice_iva"]
if "Esenzione" in descrizione_codice_iva:
scritta_esenzione = True
importo_totale +=saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = saved_importo
else:
lista_codici_iva[codice_iva] += saved_importo
else:
row.u_m,row.codice_articolo,prezzo,sconti,importo,codice_iva,row.riferimento_ordine,row.qta = "","","","","","","",""
row.descrizione=row.commento
fattura.add_row(row.codice_articolo,row.descrizione,row.riferimento_ordine,row.u_m,row.qta,prezzo,sconti,importo,codice_iva)
if scritta_esenzione:
scritta_esenzione_cliente = dati_cliente.descrizione_esenzione_iva
fattura.add_row("","","","","","","","","")
fattura.add_row("","","","","","","","","")
scritte = scritta_esenzione_cliente.split(",")
for scritta in scritte:
fattura.add_row("",scritta,"","","","","","","")
# print lista_codici_iva
bollo_presente = False
bollo = 0
for k,v in lista_codici_iva.iteritems():
codice_iva = k
importo_netto = v
# print "LISTA CODICI : ",codice_iva,importo_netto
dettaglio_iva = db(db.anagrafica_codici_iva.codice_iva == codice_iva).select().first()
percentuale_iva = dettaglio_iva.percentuale_iva
descrizione_iva = dettaglio_iva.descrizione_codice_iva
imposta_iva = return_imposta(v,percentuale_iva)
if dettaglio_iva.bollo_su_importi_esenti is True:
if not bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
bollo_presente = True
fattura.footer_2(codice_iva,"",return_currency(importo_netto),descrizione_iva,return_currency(imposta_iva),return_currency(bollo))
bollo = 0
"""
if bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
importo_totale += float(bollo)
"""
importo_totale_da_salvare = importo_totale +imposta_iva
importo_totale = Money(str(importo_totale),"EUR")
importo_totale = importo_totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
fattura.footer(str(importo_totale)," "," "," "," ",str(importo_totale),str(return_currency(imposta_totale)))
fattura.totale(str(importo_totale_da_salvare))
lista_ddt=[] #Fattura senza ddt = istantanea
db.fatture_salvate.insert(scadenza=scadenza,nome_cliente=nome_cliente,data_fattura = datetime.datetime.now().strftime("%d/%m/%Y"),numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = importo_totale_da_salvare)
# print "SCADENZA {0}".format(scadenza)
"""
fattura.foote,Field('nome_cliente')sr("Totale merce","Sconto","Netto merce","spese varie","spese_trasporto","totale_imponibile","Totale imposta")
fattura.footer_2("CodIva","Spese accessorie","Imponibile","Iva","Imposta","Bolli")
fattura.footer_2("CodIva2","Spese accessorie2","Imponibile2","Iva2","Imposta2","Bolli2")
fattura.totale("14567645")
"""
fattura.add_row("","","","","","","","","")
fattura.add_row("",annotazioni,"","","","","","","")
fattura.insert_rows()
fattura.create_pdf()
db(db.fattura).delete()
db.fattura.insert(numero_fattura = numero_fattura_da_salvare)
@service.jsonrpc
@service.jsonrpc2
def crea_fattura_istantanea_accredito(args):
id_cliente=args['0']
# print "ID CLIENTE : ",id_cliente
numero_corrente_fattura = db(db.fattura).select().first()["numero_fattura"]
numero = int(numero_corrente_fattura.split("/")[0])
anno = int(numero_corrente_fattura.split("/")[1])
numero +=1
numero_fattura_da_salvare = str(numero)+"/"+str(anno)
"""
Dati cliente
"""
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
annotazioni=dati_cliente.annotazioni
# print "1"
# print dettagli_banca
# print "2"
start_date = datetime.datetime.now()
fattura = FATTURA("NOTA DI ACCREDITO",datetime.datetime.now().date().strftime("%d/%m/%Y"),numero_fattura_da_salvare)
fattura.intestazione(nome_cliente,citta_cliente,indirizzo_cliente,cap_cliente,provincia_cliente,nazione_cliente,cf_cliente,pi_cliente)
try:
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),"PAGAMENTO","SCADENZA")
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica cliente"
return locals()
fattura.rows=[]
lista_codici_iva = {}
importo_totale = 0
imposta_totale = 0
imposta_iva = 0
lista_ddt = []
if True:
rows = db(db.righe_in_fattura_istantanea).select()
for row in rows:
try:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if ("M.S." or "ms") in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
giorni_mese_successivo = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni_mese_successivo"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = int(giorni_mese_successivo))
scadenza = scadenza.strftime("%d/%m/%Y")
else:
# Fine mese senza M.S.
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
pass
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),pagamento,str(scadenza))
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
sconti = row.sconti
if row.sconti is None:
sconti=""
if len(row.codice_articolo) > 0 and not 'commento' in row.codice_articolo:
try:
if row.prezzo == "0":
row.prezzo = ""
f = float(row.prezzo)
# print "SONO QUI : PREZZO = ".format(f)
except:
msg = "Prezzo non presente Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
try:
f=float(row.qta)
except:
msg = "Quantità non valida Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
pass
importo = saved_importo = float(row.qta) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["percentuale_iva"]
importo_totale +=saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = saved_importo
else:
lista_codici_iva[codice_iva] += saved_importo
else:
row.codice_articolo,prezzo,sconti,importo,codice_iva,row.riferimento_ordine,row.qta = "","","","","","",""
row.descrizione=row.commento
row.u_m=""
fattura.add_row(row.codice_articolo,row.descrizione,row.riferimento_ordine,row.u_m,row.qta,prezzo,sconti,importo,codice_iva)
# print lista_codici_iva
bollo_presente = False
bollo = 0
for k,v in lista_codici_iva.iteritems():
codice_iva = k
importo_netto = v
# print "LISTA CODICI : ",codice_iva,importo_netto
dettaglio_iva = db(db.anagrafica_codici_iva.codice_iva == codice_iva).select().first()
percentuale_iva = dettaglio_iva.percentuale_iva
descrizione_iva = dettaglio_iva.descrizione_codice_iva
imposta_iva = return_imposta(v,percentuale_iva)
if dettaglio_iva.bollo_su_importi_esenti is True:
if not bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
bollo_presente = True
fattura.footer_2(codice_iva,"",return_currency(importo_netto),descrizione_iva,return_currency(imposta_iva),return_currency(bollo))
bollo = 0
if bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
importo_totale += float(bollo)
importo_totale_da_salvare = importo_totale +imposta_iva
| |
#!/usr/bin/env python
################################################################################
# $Id$
# $Revision$
# $Date$
################################################################################
#
# Written by <NAME>
# see LICENSE.txt for license information
#
################################################################################
#
# tftornado.py - use BitTornado with torrentflux-b4rt
# http://tf-b4rt.berlios.de/
#
################################################################################
from BitTornado import PSYCO
if PSYCO.psyco:
try:
import psyco
assert psyco.__version__ >= 0x010100f0
psyco.full()
except:
pass
from BitTornado.download_bt1 import BT1Download, defaults, parse_params, get_usage, get_response
from BitTornado.RawServer import RawServer, UPnP_ERROR
from random import seed
from socket import error as socketerror
from BitTornado.bencode import bencode
from BitTornado.natpunch import UPnP_test
from threading import Event
from os.path import abspath, isfile
from os import getpid, remove
from sys import argv, stdout
import sys
from hashlib import sha1
from time import strftime
from BitTornado.clock import clock
from BitTornado import createPeerID, version
assert sys.version >= '2', "Install Python 2.0 or greater"
try:
True
except:
True = 1
False = 0
PROFILER = False
if __debug__: LOGFILE=open(argv[3]+"."+str(getpid()),"w")
def traceMsg(msg):
try:
if __debug__:
LOGFILE.write(msg + "\n")
LOGFILE.flush()
except:
return
#------------------------------------------------------------------------------#
# tfb static methods #
#------------------------------------------------------------------------------#
def fmttime(n):
""" fmttime """
# short format :
return fmttimeshort(n)
# long format :
# return fmttimelong(n)
def fmttimeshort(n):
""" fmttimeshort """
if n == 0:
return 'complete!'
try:
n = int(n)
assert n >= 0 and n < 5184000 # 60 days
except:
return '<unknown>'
m, s = divmod(n, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
if d >= 7:
return '-'
elif d > 0:
return '%dd %02d:%02d:%02d' % (d, h, m, s)
else:
return '%02d:%02d:%02d' % (h, m, s)
def fmttimelong(n):
""" fmttimelong """
if n == 0:
return 'complete!'
try:
n = int(n)
assert n >= 0 and n < 5184000 # 60 days
except:
return '<unknown>'
m, s = divmod(n, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
y, d = divmod(d, 365)
dec, y = divmod(y, 10)
cent, dec = divmod(dec, 10)
if cent > 0:
return '%dcent %ddec %dy %dd %02d:%02d:%02d' % (cent, dec, y, d, h, m, s)
elif dec > 0:
return '%ddec %dy %dd %02d:%02d:%02d' % (dec, y, d, h, m, s)
elif y > 0:
return '%dy %dd %02d:%02d:%02d' % (y, d, h, m, s)
elif d > 0:
return '%dd %02d:%02d:%02d' % (d, h, m, s)
else:
return '%02d:%02d:%02d' % (h, m, s)
def transferLog(message, ts):
""" transferLog """
try:
FILE = open(transferLogFile,"a+")
if not ts:
FILE.write(message)
else:
FILE.write(strftime('[%Y/%m/%d - %H:%M:%S]') + " " + message)
FILE.flush()
FILE.close()
except Exception, e:
sys.stderr.write("Failed to write log-file : " + transferLogFile + "\n")
#------------------------------------------------------------------------------#
# HeadlessDisplayer #
#------------------------------------------------------------------------------#
class HeadlessDisplayer:
def __init__(self):
self.done = False
self.file = ''
self.running = '1'
self.percentDone = ''
self.timeEst = 'Connexion'
self.downloadTo = ''
self.downRate = ''
self.upRate = ''
self.shareRating = ''
self.percentShare = ''
self.upTotal = 0
self.downTotal = 0
self.seedStatus = ''
self.peerStatus = ''
self.seeds = ''
self.peers = ''
self.errors = []
self.last_update_time = -1
self.autoShutdown = 'False'
self.user = 'unknown'
self.size = 0
self.shareKill = '100'
self.distcopy = ''
self.stoppedAt = ''
self.dow = None
self.displayCounter = 0
def finished(self):
if __debug__: traceMsg('finished - begin')
self.done = True
self.percentDone = '100'
self.timeEst = 'Telechargement Fini'
self.downRate = ''
self.display()
if self.autoShutdown == 'True':
self.upRate = ''
if self.stoppedAt == '':
self.writeStatus()
if __debug__: traceMsg('finished - end - raising ki')
raise KeyboardInterrupt
if __debug__: traceMsg('finished - end')
def failed(self):
if __debug__: traceMsg('failed - begin')
self.done = True
self.percentDone = '0'
self.timeEst = 'Download Failed!'
self.downRate = ''
self.display()
if self.autoShutdown == 'True':
self.upRate = ''
if self.stoppedAt == '':
self.writeStatus()
if __debug__:traceMsg('failed - end - raising ki')
raise KeyboardInterrupt
if __debug__: traceMsg('failed - end')
def error(self, errormsg):
self.errors.append(errormsg)
# log error
transferLog("error: " + errormsg + "\n", True)
def chooseFile(self, default, size, saveas, dir):
self.file = '%s (%.1f MB)' % (default, float(size) / (1 << 20))
self.size = size
if saveas != '':
default = saveas
self.downloadTo = abspath(default)
return default
def newpath(self, path):
self.downloadTo = path
def scrub_errs(self):
new_errors = []
try:
if self.errors:
last_errMsg = ''
errCount = 0
for err in self.errors:
try:
if last_errMsg == '':
last_errMsg = err
elif last_errMsg == err:
errCount += 1
elif last_errMsg != err:
if errCount > 0:
new_errors.append(last_errMsg + ' (x' + str(errCount+1) + ')')
else:
new_errors.append(last_errMsg)
errCount = 0
last_errMsg = err
except:
if __debug__: traceMsg('scrub_errs - Failed scrub')
pass
try:
if len(new_errors) > 0:
if last_errMsg != new_errors[len(new_errors)-1]:
if errCount > 0:
new_errors.append(last_errMsg + ' (x' + str(errCount+1) + ')')
else:
new_errors.append(last_errMsg)
else:
if errCount > 0:
new_errors.append(last_errMsg + ' (x' + str(errCount+1) + ')')
else:
new_errors.append(last_errMsg)
except:
if __debug__: traceMsg('scrub_errs - Failed during scrub last Msg ')
pass
if len(self.errors) > 100:
while len(self.errors) > 100 :
del self.errors[0:99]
self.errors = new_errors
except:
if __debug__: traceMsg('scrub_errs - Failed during scrub Errors')
pass
return new_errors
def display(self, dpflag = Event(), fractionDone = None, timeEst = None,
downRate = None, upRate = None, activity = None,
statistics = None, **kws):
if self.last_update_time + 0.1 > clock() and fractionDone not in (0.0, 1.0) and activity is not None:
return
self.last_update_time = clock()
if fractionDone is not None:
self.percentDone = str(float(int(fractionDone * 1000)) / 10)
if timeEst is not None:
self.timeEst = fmttime(timeEst)
if activity is not None and not self.done:
self.timeEst = activity
if downRate is not None:
self.downRate = '%.1f kB/s' % (float(downRate) / (1 << 10))
if upRate is not None:
self.upRate = '%.1f kB/s' % (float(upRate) / (1 << 10))
if statistics is not None:
if (statistics.shareRating < 0) or (statistics.shareRating > 100):
self.shareRating = 'oo (%.1f MB up / %.1f MB down)' % (float(statistics.upTotal) / (1<<20), float(statistics.downTotal) / (1<<20))
self.downTotal = statistics.downTotal
self.upTotal = statistics.upTotal
else:
self.shareRating = '%.3f (%.1f MB up / %.1f MB down)' % (statistics.shareRating, float(statistics.upTotal) / (1<<20), float(statistics.downTotal) / (1<<20))
self.downTotal = statistics.downTotal
self.upTotal = statistics.upTotal
if not self.done:
self.seedStatus = '%d seen now, plus %.3f distributed copies' % (statistics.numSeeds,0.001*int(1000*statistics.numCopies))
self.seeds = (str(statistics.numSeeds))
else:
self.seedStatus = '%d seen recently, plus %.3f distributed copies' % (statistics.numOldSeeds,0.001*int(1000*statistics.numCopies))
self.seeds = (str(statistics.numOldSeeds))
self.peers = '%d' % (statistics.numPeers)
self.distcopy = '%.3f' % (0.001*int(1000*statistics.numCopies))
self.peerStatus = '%d seen now, %.1f%% done at %.1f kB/s' % (statistics.numPeers,statistics.percentDone,float(statistics.torrentRate) / (1 << 10))
dpflag.set()
# process command-stack
die = self.processCommandStack()
# shutdown if requested
if die:
self.execShutdown()
return;
# ratio- + limit- checks / shutdown / stat
if self.stoppedAt == '':
die = False
downRate = self.downTotal
die = False
if downRate == 0 and self.upTotal > 0:
downRate = self.size
if self.done:
self.percentDone = '100'
downRate = self.size
if self.autoShutdown == 'True':
transferLog("die-when-done set, setting shutdown-flag...\n", True)
die = True
if self.upTotal > 0:
self.percentShare = '%.1f' % ((float(self.upTotal)/float(downRate))*100)
else:
self.percentShare = '0.0'
if self.done and self.percentShare is not '' and self.autoShutdown == 'False':
if (float(self.percentShare) >= float(self.shareKill)) and (self.shareKill != '0'):
transferLog("seed-limit "+str(self.shareKill)+" reached, setting shutdown-flag...\n", True)
die = True
self.upRate = ''
elif (not self.done) and (self.timeEst == 'complete!') and (self.percentDone == '100.0'):
if (float(self.percentShare) >= float(self.shareKill)) and (self.shareKill != '0'):
transferLog("seed-limit "+str(self.shareKill)+" reached, setting shutdown-flag...\n", True)
die = True
self.upRate = ''
# shutdown / write stat-file
if die:
self.execShutdown()
else:
# write every 5 secs
if self.displayCounter < 5:
self.displayCounter += 1
else:
self.displayCounter = 0
# write stat-file
self.writeStatus()
def processCommandStack(self):
""" processCommandStack """
if isfile(transferCommandFile):
# process file
transferLog("Processing command-file " + transferCommandFile + "...\n", True)
try:
# read file to mem
f = open(transferCommandFile, 'r')
commands = f.readlines()
f.close
# remove file
try:
remove(transferCommandFile)
except:
transferLog("Failed to remove command-file : " + transferCommandFile + "\n", True)
pass
# exec commands
if len(commands) > 0:
for command in commands:
command = command.replace("\n", "")
if len(command) > 0:
# exec, early out when reading a quit-command
if self.execCommand(command):
return True
else:
transferLog("No commands found.\n", True)
except:
transferLog("Failed to read command-file : " + transferCommandFile + "\n", True)
pass
return False
def execCommand(self, command):
""" execCommand """
opCode = command[0]
# q
if opCode == 'q':
transferLog("command: stop-request, setting shutdown-flag...\n", True)
return True
# u
elif opCode == 'u':
if len(command) < 2:
transferLog("invalid rate.\n", True)
return False
rateNew = command[1:]
transferLog("command: setting upload-rate to | |
self.z[t, l*self.r_part:(l+1)*self.r_part]
Y[idx] = self.y[t, l]
idx += 1
idx_list.append(idx)
if self.k == 1:
pars[:, 0], u_hat_temp[:, 0] = self.multivariate_OLS(Y, X)
elif self.k == 21:
pars[:, l], u_hat_temp[:, l] = self.multivariate_OLS(Y, X)
zeros = np.zeros((max_delay_AR+288, self.k), dtype=np.float32)
u_hat = np.concatenate((np.zeros((max_delay_AR, self.k)), u_hat_temp[idx_list[0]:idx_list[1], :]), axis=0)
u_hat = np.concatenate((u_hat, zeros, u_hat_temp[idx_list[1]:idx_list[2], :]), axis=0)
u_hat = np.concatenate((u_hat, zeros, u_hat_temp[idx_list[2]:idx_list[3], :]), axis=0)
Phi = [pars[j*self.k:(j+1)*self.k, :] for j in range(p_tot)]
Xi = np.zeros((self.k, self.r), dtype=np.float32)
if self.k == 1:
Xi[0, :] = pars[p_tot*self.k:, 0]
elif self.k == 21:
for l in range(self.k):
Xi[l, l*self.r_part:(l+1)*self.r_part] = pars[p_tot*self.k:, l]
Sigma_u = np.sum(np.array([np.outer(u_hat[t, :], u_hat[t, :]) for t in range(self.n-2*288-(max_delay_AR*self.nr_missing_t))]), axis=0)/(self.n-2*288-(max_delay_AR*self.nr_missing_t)-1)
return Xi, Phi, Sigma_u, u_hat
def sVARMAX_fit(self, m, m_s):
"""
Fit s-VARMAX using OLS.
1) Fit a s-VARX(m) x (m_s)_s for m >> p and m_s >> p_s model to y
using OLS. Compute the residuals u_hat for the resulting model.
2) Using u_hat do OLS to estimate the s-VARMAX(p, q) x (p_s, q_s)_s
parameters.
Parameters
----------
m : int
Autoregressive order for the s-VARX(m) x (m_s)_s in step 1).
m_s : int
Seasonal autoregressive order for the s-VARX(m) x (m_s)_s in step 1).
Returns
-------
Xi : ndarray, size=(k, r)
Exogenous variable parameter matrix.
Phi : list, len=(p_tot)
List of autoregressive parameter matrices given as
ndarrays of size=(k, k).
Psi : list, len=(q_tot)
List of moving average parameter matrices given as
ndarrays of size=(k, k).
Sigma_u : ndarray, size=(k, k)
Covariance of white noise process.
"""
if self.p_s != 0: assert self.s > m
# Step 1)
_, _, _, u_hat = self.sVARX_fit(m, m_s, self.s)
if self.p_s == 0 and self.q_s == 0:
if self.l == "all":
print(f"Fit a VARIMAX({self.p}, {self.d}, {self.q}) model.")
elif self.l != "all":
print(f"Fit a ARIMAX({self.p}, {self.d}, {self.q}) model.")
else:
if self.l == "all":
print(f"Fit a s-VARIMAX({self.p}, {self.d}, {self.q}) x ({self.p_s}, {0}, {self.q_s})_{self.s} model.")
elif self.l != "all":
print(f"Fit a s-ARIMAX({self.p}, {self.d}, {self.q}) x ({self.p_s}, {0}, {self.q_s})_{self.s} model.")
# Step 2)
delay_list_AR = [j_s*self.s+j for j_s in range(self.p_s+1) for j in range(self.p+1)][1:]
delay_list_MA = [i_s*self.s+i for i_s in range(self.q_s+1) for i in range(self.q+1)][1:]
pars = np.zeros(((self.p_tot+self.q_tot)*self.k + self.r_part, self.k), dtype=np.float32)
u_hat_new = np.zeros((self.n-2*288-(self.max_delay_AR*self.nr_missing_t), self.k), dtype=np.float32)
if self.l == "all":
iter_l = 0
else:
iter_l = self.l
for l in range(iter_l, iter_l+self.k):
idx = 0
Y = np.zeros(self.n-2*288-(self.max_delay_AR*self.nr_missing_t), dtype=np.float32)
X = np.zeros((self.n-2*288-(self.max_delay_AR*self.nr_missing_t), (self.p_tot+self.q_tot)*self.k + self.r_part), dtype=np.float32)
for missing_t_idx in range(self.nr_missing_t):
a = self.missing_t[missing_t_idx]+self.max_delay_AR
if missing_t_idx < self.nr_missing_t-1:
b = self.missing_t[missing_t_idx+1]-288
else:
b = self.missing_t[missing_t_idx+1]
for t in range(a, b):
X_t_AR = np.zeros((self.p_tot, self.k), dtype=np.float32)
X_t_MA = np.zeros((self.q_tot, self.k), dtype=np.float32)
for counter, delay_AR in enumerate(delay_list_AR):
X_t_AR[counter, :] = self.y[t-delay_AR, :]
for counter, delay_MA in enumerate(delay_list_MA):
X_t_MA[counter, :] = -u_hat[t-delay_MA, :]
X[idx, :(self.p_tot+self.q_tot)*self.k] = np.vstack((X_t_AR, X_t_MA)).flatten()
if self.k == 1:
X[idx, (self.p_tot+self.q_tot)*self.k:] = self.z[t, :]
Y[idx] = self.y[t, 0]
elif self.k == 21:
X[idx, (self.p_tot+self.q_tot)*self.k:] = self.z[t, l*self.r_part:(l+1)*self.r_part]
Y[idx] = self.y[t, l]
idx += 1
if self.k == 1:
pars[:, 0], u_hat_new[:, 0] = self.multivariate_OLS(Y, X)
elif self.k == 21:
pars[:, l], u_hat_new[:, l] = self.multivariate_OLS(Y, X)
Phi = [pars[j*self.k:(j+1)*self.k, :] for j in range(self.p_tot)]
Psi = [pars[self.p_tot*self.k+i*self.k:self.p_tot*self.k+(i+1)*self.k, :] for i in range(self.q_tot)]
Xi = np.zeros((self.k, self.r), dtype=np.float32)
if self.k == 1:
Xi[0, :] = pars[(self.p_tot+self.q_tot)*self.k:, 0]
elif self.k == 21:
for l in range(self.k):
Xi[l, l*self.r_part:(l+1)*self.r_part] = pars[(self.p_tot+self.q_tot)*self.k:, l]
Sigma_u = np.sum(np.array([np.outer(u_hat_new[t, :], u_hat_new[t, :]) for t in range(self.n-2*288-(self.max_delay_AR*self.nr_missing_t))]), axis=0)/(self.n-2*288-(self.max_delay_AR*self.nr_missing_t)-1)
return Xi, Phi, Psi, Sigma_u
def multivariate_OLS(self, Y, X):
"""
Compute OLS for a multivariate regression problem.
Parameters
----------
Y : ndarray, size=(n, k)
Target.
X : ndarray, size=(n, r)
Design matrix.
Returns
-------
B : ndarray, size=(r, k)
Parameter matrix.
eps : ndarray, size=(n, k)
Residuals.
"""
t1 = time()
B = np.linalg.inv(X.T @ X) @ X.T @ Y
t2 = time()
print("Parameter fit time: {}".format(t2-t1))
eps = Y - X @ B
return B, eps
def estimate_noise(self, tau_ahead, y_test, z_NWP_test, z_reg_test, test_missing_t):
"""
Parameters
----------
tau_ahead : int
Compute and test on up to tau-ahead forecasts.
y_test : ndarray, size=(n_test, k)
Endogenous variable.
z_NWP_test : ndarray, size=(tau_ahead, nwp_n_test, 11*k)
Numerical weather predictions with the given transformations.
The first axis is the tau_ahead axis while the second axis gives
a new set of NWP. The last axis is as follows:
(T, sin(WD10m), cos(WD10m), sin(WD100m), cos(WD100m),
WS10m, WS10m^2, WS10m^3, WS100m, WS100m^2, WS100m^3).
z_reg_test : ndarray, size=(n_test, 2)
Regulation data for DK1 in the first column and DK2 in the second
column.
test_missing_t : list
List of time indices where a discontinuity in time is present due
to missing power history data. The first entry in the list is
zero and the last entry in the list is n.
Returns
-------
u_hat : ndarray, size=(n_test, k)
Noise process.
"""
array_AR = np.array([j_s*self.s+j for j_s in range(self.p_s+1) for j in range(self.p+1)])[1:]
array_MA = np.array([i_s*self.s+i for i_s in range(self.q_s+1) for i in range(self.q+1)])[1:]
test_nr_missing_t = len(test_missing_t)-1
n_test, _ = y_test.shape
u_hat = np.zeros((n_test, self.k))
for missing_t_idx in range(test_nr_missing_t):
a = test_missing_t[missing_t_idx]+self.max_delay
if missing_t_idx < test_nr_missing_t-1:
b = test_missing_t[missing_t_idx+1]-tau_ahead-288
else:
b = test_missing_t[missing_t_idx+1]-tau_ahead
for t in range(a, b):
u_hat[t, :] += y_test[t, :]
for j, idx in enumerate(array_AR):
u_hat[t, :] -= np.dot(self.Phi[j], y_test[t-idx, :])
for i, idx in enumerate(array_MA):
u_hat[t, :] += np.dot(self.Psi[i], u_hat[t-idx, :])
z_data = self.make_z(0, t, z_reg_test, z_NWP_test)
u_hat[t, :] -= np.dot(self.Xi, z_data)
return u_hat
def forecast(self, tau_ahead, y_test, z_reg_test, z_NWP_test,
test_missing_t, P_test):
"""
Compute the tau-ahead forecast using the truncated forecasting as
defined in property 3.7 of Shumway2017.
Parameters
----------
tau_ahead : int
Compute tau-ahead forecast t+tau given time t-1.
y_test : ndarray, size=(n_test, k)
Endogenous variable.
z_reg_test : ndarray, size=(n_test, 2)
Regulation data for DK1 in the first column and DK2 in the second
column.
z_NWP_test : ndarray, size=(tau_ahead, nwp_n_test, 11*k)
Numerical weather predictions with the given transformations.
The first axis is the tau_ahead axis while the second axis gives
a new set of NWP. The last axis is as follows:
(T, sin(WD10m), cos(WD10m), sin(WD100m), cos(WD100m),
WS10m, WS10m^2, WS10m^3, WS100m, WS100m^2, WS100m^3).
test_missing_t : list
List of time indices where a discontinuity in time is present due
to missing power history data. The first entry in the list is
zero and the last entry in the list is n.
P_test : ndarray, size=(n_test+1, k), optional
Wind power at time t-2. Used when first order differencing is used.
Returns
-------
P_bar : ndarray, size=(t_end-t_start, k)
Wind power forecast.
idx_list : list
List containing the indices for which forecasts are made. This
is needed due to the missing data.
"""
array_AR = np.array([j_s*self.s+j for j_s in range(self.p_s+1) for j in range(self.p+1)])[1:]
array_MA = np.array([i_s*self.s+i for i_s in range(self.q_s+1) for i in range(self.q+1)])[1:]
test_nr_missing_t = len(test_missing_t)-1
n_test, _ = y_test.shape
y_bar = np.zeros((tau_ahead, n_test, self.k))
if self.d == 1:
P_bar = np.zeros((tau_ahead, n_test, self.k))
phi_mat = np.hstack(self.Phi)
if self.q != 0 or self.q_s != 0:
psi_mat = np.hstack(self.Psi)
beta = np.concatenate((phi_mat, -psi_mat, self.Xi), axis=1)
else:
beta = np.concatenate((phi_mat, self.Xi), axis=1)
u_hat = self.estimate_noise(tau_ahead, y_test, z_NWP_test,
z_reg_test, test_missing_t)
idx_list = []
for tau_i in range(tau_ahead):
if tau_i % 20 == 0:
print("Tau ahead: {}".format(tau_i))
for missing_t_idx in range(test_nr_missing_t):
a = test_missing_t[missing_t_idx]+self.max_delay
if missing_t_idx < test_nr_missing_t-1:
b = test_missing_t[missing_t_idx+1]-tau_ahead-288
else:
b = test_missing_t[missing_t_idx+1]-tau_ahead
for t in range(a, b):
if tau_i == 0:
idx_list.append(t)
z_data = self.make_z(0, t, z_reg_test, z_NWP_test)
y_vec = y_test[t-array_AR, :].flatten()
u_vec = u_hat[t-array_MA, :].flatten()
data_vec = np.hstack((y_vec, u_vec, z_data))
y_bar[0, t, :] = np.dot(beta, data_vec)
else:
bar_AR = array_AR[tau_i-array_AR >= 0]
test_AR = array_AR[tau_i-array_AR < 0]
hat_MA = array_MA[tau_i-array_MA < 0]
if len(bar_AR) != 0:
y_vec_bar = y_bar[tau_i-bar_AR, t, :].flatten()
else:
y_vec_bar = np.array([])
if len(test_AR) != 0:
y_vec_test = y_test[t+tau_i-test_AR, :].flatten()
else:
y_vec_test = np.array([])
if len(hat_MA) != 0:
u_vec = u_hat[t+tau_i-hat_MA, :].flatten()
else:
u_vec = np.array([])
y_bar[tau_i, t, :] += np.dot(phi_mat, np.hstack((y_vec_bar, y_vec_test)))
if self.q != 0 or self.q_s != 0:
y_bar[tau_i, t, :] -= np.dot(psi_mat[:, (len(array_MA)-len(hat_MA))*self.k:], u_vec)
z_data = self.make_z(tau_i, t, | |
Got %d, expected %d' \
% (i, curSessionCount, curRes.cNumSessions));
fRc = False;
break;
if curGuestSession is not None \
and curGuestSession.name != curGuestSessionName:
reporter.error('Test #%d failed: Session name does not match: Got "%s", expected "%s"' \
% (i, curGuestSession.name, curGuestSessionName));
fRc = False;
break;
fRc2 = curTest.closeSession();
if fRc2 is False:
reporter.error('Test #%d failed: Session could not be closed' % (i,));
fRc = False;
break;
if fRc is False:
return (False, oTxsSession);
# Multiple sessions.
iMaxGuestSessions = 31; # Maximum number of concurrent guest session allowed.
# Actually, this is 32, but we don't test session 0.
multiSession = {};
reporter.log2('Opening multiple guest tsessions at once ...');
for i in range(iMaxGuestSessions + 1):
multiSession[i] = tdTestSession(sUser = sUser, sPassword = <PASSWORD>, sSessionName = 'MultiSession #%d' % (i,));
multiSession[i].setEnvironment(oSession, oTxsSession, oTestVm);
curSessionCount = multiSession[i].getSessionCount(self.oTstDrv.oVBoxMgr);
reporter.log2('MultiSession test #%d count is %d' % (i, curSessionCount));
if curSessionCount is not i:
reporter.error('MultiSession count #%d must be %d, got %d' % (i, i, curSessionCount));
fRc = False;
break;
fRc2, _ = multiSession[i].createSession('MultiSession #%d' % (i,));
if fRc2 is not True:
if i < iMaxGuestSessions:
reporter.error('MultiSession #%d test failed' % (i,));
fRc = False;
else:
reporter.log('MultiSession #%d exceeded concurrent guest session count, good' % (i,));
break;
curSessionCount = multiSession[i].getSessionCount(self.oTstDrv.oVBoxMgr);
if curSessionCount is not iMaxGuestSessions:
reporter.error('Final MultiSession count must be %d, got %d'
% (iMaxGuestSessions, curSessionCount));
return (False, oTxsSession);
reporter.log2('Closing MultiSessions ...');
iLastSession = iMaxGuestSessions - 1;
for i in range(iLastSession): # Close all but the last opened session.
fRc2 = multiSession[i].closeSession();
reporter.log2('MultiSession #%d count is %d' % (i, multiSession[i].getSessionCount(self.oTstDrv.oVBoxMgr),));
if fRc2 is False:
reporter.error('Closing MultiSession #%d failed' % (i,));
fRc = False;
break;
curSessionCount = multiSession[i].getSessionCount(self.oTstDrv.oVBoxMgr);
if curSessionCount is not 1:
reporter.error('Final MultiSession count #2 must be 1, got %d' % (curSessionCount,));
fRc = False;
try:
# r=bird: multiSession[0].oGuestSession is None! Why don't you just use 'assert' or 'if' to check
# the functioning of the __testcase__?
# Make sure that accessing the first opened guest session does not work anymore because we just removed (closed) it.
curSessionName = multiSession[0].oGuestSession.name;
reporter.error('Accessing first removed MultiSession should not be possible, got name="%s"' % (curSessionName,));
fRc = False;
except:
reporter.logXcpt('Could not access first removed MultiSession object, good:');
try:
# Try Accessing last opened session which did not get removed yet.
curSessionName = multiSession[iLastSession].oGuestSession.name;
reporter.log('Accessing last standing MultiSession worked, got name="%s"' % (curSessionName,));
multiSession[iLastSession].closeSession();
curSessionCount = multiSession[i].getSessionCount(self.oTstDrv.oVBoxMgr);
if curSessionCount is not 0:
reporter.error('Final MultiSession count #3 must be 0, got %d' % (curSessionCount,));
fRc = False;
except:
reporter.logXcpt('Could not access last standing MultiSession object:');
fRc = False;
## @todo Test session timeouts.
return (fRc, oTxsSession);
def testGuestCtrlSessionFileRefs(self, oSession, oTxsSession, oTestVm): # pylint: disable=R0914
"""
Tests the guest session file reference handling.
"""
if oTestVm.isWindows():
sUser = "Administrator";
sPassword = "password";
sDomain = "";
sFile = "C:\\windows\\system32\\kernel32.dll";
# Number of stale guest files to create.
cStaleFiles = 10;
fRc = True;
try:
oGuest = oSession.o.console.guest;
oGuestSession = oGuest.createSession(sUser, sPassword, sDomain, \
"testGuestCtrlSessionFileRefs");
fWaitFor = [ vboxcon.GuestSessionWaitForFlag_Start ];
waitResult = oGuestSession.waitForArray(fWaitFor, 30 * 1000);
#
# Be nice to Guest Additions < 4.3: They don't support session handling and
# therefore return WaitFlagNotSupported.
#
if waitResult != vboxcon.GuestSessionWaitResult_Start \
and waitResult != vboxcon.GuestSessionWaitResult_WaitFlagNotSupported:
# Just log, don't assume an error here (will be done in the main loop then).
reporter.log('Session did not start successfully, returned wait result: %d' \
% (waitResult));
return (False, oTxsSession);
reporter.log('Session successfully started');
#
# Open guest files and "forget" them (stale entries).
# For them we don't have any references anymore intentionally.
#
reporter.log2('Opening stale files');
for i in range(0, cStaleFiles):
try:
if self.oTstDrv.fpApiVer >= 5.0:
oGuestSession.fileOpen(sFile, vboxcon.FileAccessMode_ReadOnly, vboxcon.FileOpenAction_OpenExisting, 0);
else:
oGuestSession.fileOpen(sFile, "r", "oe", 0);
# Note: Use a timeout in the call above for not letting the stale processes
# hanging around forever. This can happen if the installed Guest Additions
# do not support terminating guest processes.
except:
reporter.errorXcpt('Opening stale file #%d failed:' % (i,));
fRc = False;
break;
if fRc:
cFiles = len(self.oTstDrv.oVBoxMgr.getArray(oGuestSession, 'files'));
if cFiles != cStaleFiles:
reporter.error('Test failed: Got %d stale files, expected %d' % (cFiles, cStaleFiles));
fRc = False;
if fRc:
#
# Open non-stale files and close them again.
#
reporter.log2('Opening non-stale files');
aaFiles = [];
for i in range(0, cStaleFiles):
try:
if self.oTstDrv.fpApiVer >= 5.0:
oCurFile = oGuestSession.fileOpen(sFile, vboxcon.FileAccessMode_ReadOnly,
vboxcon.FileOpenAction_OpenExisting, 0);
else:
oCurFile = oGuestSession.fileOpen(sFile, "r", "oe", 0);
aaFiles.append(oCurFile);
except:
reporter.errorXcpt('Opening non-stale file #%d failed:' % (i,));
fRc = False;
break;
if fRc:
cFiles = len(self.oTstDrv.oVBoxMgr.getArray(oGuestSession, 'files'));
if cFiles != cStaleFiles * 2:
reporter.error('Test failed: Got %d total files, expected %d' % (cFiles, cStaleFiles * 2));
fRc = False;
if fRc:
reporter.log2('Closing all non-stale files again ...');
for i in range(0, cStaleFiles):
try:
aaFiles[i].close();
except:
reporter.errorXcpt('Waiting for non-stale file #%d failed:' % (i,));
fRc = False;
break;
cFiles = len(self.oTstDrv.oVBoxMgr.getArray(oGuestSession, 'files'));
# Here we count the stale files (that is, files we don't have a reference
# anymore for) and the opened and then closed non-stale files (that we still keep
# a reference in aaFiles[] for).
if cFiles != cStaleFiles:
reporter.error('Test failed: Got %d total files, expected %d' \
% (cFiles, cStaleFiles));
fRc = False;
if fRc:
#
# Check if all (referenced) non-stale files now are in "closed" state.
#
reporter.log2('Checking statuses of all non-stale files ...');
for i in range(0, cStaleFiles):
try:
curFilesStatus = aaFiles[i].status;
if curFilesStatus != vboxcon.FileStatus_Closed:
reporter.error('Test failed: Non-stale file #%d has status %d, expected %d' \
% (i, curFilesStatus, vboxcon.FileStatus_Closed));
fRc = False;
except:
reporter.errorXcpt('Checking status of file #%d failed:' % (i,));
fRc = False;
break;
if fRc:
reporter.log2('All non-stale files closed');
cFiles = len(self.oTstDrv.oVBoxMgr.getArray(oGuestSession, 'files'));
reporter.log2('Final guest session file count: %d' % (cFiles,));
# Now try to close the session and see what happens.
reporter.log2('Closing guest session ...');
oGuestSession.close();
except:
reporter.errorXcpt('Testing for stale processes failed:');
fRc = False;
return (fRc, oTxsSession);
#def testGuestCtrlSessionDirRefs(self, oSession, oTxsSession, oTestVm):
# """
# Tests the guest session directory reference handling.
# """
# fRc = True;
# return (fRc, oTxsSession);
def testGuestCtrlSessionProcRefs(self, oSession, oTxsSession, oTestVm): # pylint: disable=R0914
"""
Tests the guest session process reference handling.
"""
if oTestVm.isWindows():
sUser = "Administrator";
sPassword = "password";
sDomain = "";
sCmd = "C:\\windows\\system32\\cmd.exe";
aArgs = [sCmd,];
# Number of stale guest processes to create.
cStaleProcs = 10;
fRc = True;
try:
oGuest = oSession.o.console.guest;
oGuestSession = oGuest.createSession(sUser, sPassword, sDomain, \
"testGuestCtrlSessionProcRefs");
fWaitFor = [ vboxcon.GuestSessionWaitForFlag_Start ];
waitResult = oGuestSession.waitForArray(fWaitFor, 30 * 1000);
#
# Be nice to Guest Additions < 4.3: They don't support session handling and
# therefore return WaitFlagNotSupported.
#
if waitResult != vboxcon.GuestSessionWaitResult_Start \
and waitResult != vboxcon.GuestSessionWaitResult_WaitFlagNotSupported:
# Just log, don't assume an error here (will be done in the main loop then).
reporter.log('Session did not start successfully, returned wait result: %d' \
% (waitResult));
return (False, oTxsSession);
reporter.log('Session successfully started');
#
# Fire off forever-running processes and "forget" them (stale entries).
# For them we don't have any references anymore intentionally.
#
reporter.log2('Starting stale processes');
for i in range(0, cStaleProcs):
try:
oGuestSession.processCreate(sCmd,
aArgs if self.oTstDrv.fpApiVer >= 5.0 else aArgs[1:], [],
[ vboxcon.ProcessCreateFlag_WaitForStdOut ], \
30 * 1000);
# Note: Use a timeout in the call above for not letting the stale processes
# hanging around forever. This can happen if the installed Guest Additions
# do not support terminating guest processes.
except:
reporter.logXcpt('Creating stale process #%d failed:' % (i,));
fRc = False;
break;
if fRc:
cProcs = len(self.oTstDrv.oVBoxMgr.getArray(oGuestSession, 'processes'));
if cProcs != cStaleProcs:
reporter.error('Test failed: Got %d stale processes, expected %d' % (cProcs, cStaleProcs));
fRc = False;
if fRc:
#
# Fire off non-stale processes and wait for termination.
#
if oTestVm.isWindows():
aArgs = [ sCmd, '/C', 'dir', '/S', 'C:\\Windows\\system'];
reporter.log2('Starting non-stale processes');
aaProcs = [];
for i in range(0, cStaleProcs):
try:
oCurProc = oGuestSession.processCreate(sCmd, aArgs if self.oTstDrv.fpApiVer >= 5.0 else aArgs[1:],
[], [], 0); # Infinite timeout.
aaProcs.append(oCurProc);
except:
reporter.logXcpt('Creating non-stale | |
5*np.log(2) +
5*np.log(1 - mckin/mbkin)))/(45927*mbkin**9) -
(321536*mckin**10*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 5*np.log(2) + 5*np.log(1 - mckin/mbkin)))/(229635*mbkin**10) -
(4115968*(np.log(2) + np.log(1 - mckin/mbkin))*(1 + 6*np.log(2) +
6*np.log(1 - mckin/mbkin)))/2525985 +
(16463872*mckin*(np.log(2) + np.log(1 - mckin/mbkin))*(1 + 6*np.log(2) +
6*np.log(1 - mckin/mbkin)))/(841995*mbkin) -
(8231936*mckin**2*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 6*np.log(2) + 6*np.log(1 - mckin/mbkin)))/(76545*mbkin**2) +
(16463872*mckin**3*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 6*np.log(2) + 6*np.log(1 - mckin/mbkin)))/(45927*mbkin**3) -
(4115968*mckin**4*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 6*np.log(2) + 6*np.log(1 - mckin/mbkin)))/(5103*mbkin**4) +
(32927744*mckin**5*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 6*np.log(2) + 6*np.log(1 - mckin/mbkin)))/(25515*mbkin**5) -
(16463872*mckin**6*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 6*np.log(2) + 6*np.log(1 - mckin/mbkin)))/(10935*mbkin**6) +
(32927744*mckin**7*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 6*np.log(2) + 6*np.log(1 - mckin/mbkin)))/(25515*mbkin**7) -
(4115968*mckin**8*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 6*np.log(2) + 6*np.log(1 - mckin/mbkin)))/(5103*mbkin**8) +
(16463872*mckin**9*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 6*np.log(2) + 6*np.log(1 - mckin/mbkin)))/(45927*mbkin**9) -
(8231936*mckin**10*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 6*np.log(2) + 6*np.log(1 - mckin/mbkin)))/(76545*mbkin**10) +
(16463872*mckin**11*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 6*np.log(2) + 6*np.log(1 - mckin/mbkin)))/(841995*mbkin**11) -
(4115968*mckin**12*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 6*np.log(2) + 6*np.log(1 - mckin/mbkin)))/(2525985*mbkin**12) -
(68816896*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/297675 +
(68816896*mckin*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/
(42525*mbkin) - (68816896*mckin**2*(1 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(14175*mbkin**2) +
(68816896*mckin**3*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/
(8505*mbkin**3) - (68816896*mckin**4*(1 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(8505*mbkin**4) +
(68816896*mckin**5*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/
(14175*mbkin**5) - (68816896*mckin**6*(1 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(42525*mbkin**6) +
(68816896*mckin**7*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/
(297675*mbkin**7) + (16384*np.pi**2*(1 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/2835 - (16384*mckin*np.pi**2*
(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(405*mbkin) +
(16384*mckin**2*np.pi**2*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/
(135*mbkin**2) - (16384*mckin**3*np.pi**2*(1 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(81*mbkin**3) +
(16384*mckin**4*np.pi**2*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/
(81*mbkin**4) - (16384*mckin**5*np.pi**2*(1 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(135*mbkin**5) +
(16384*mckin**6*np.pi**2*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/
(405*mbkin**6) - (16384*mckin**7*np.pi**2*(1 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(2835*mbkin**7) -
(3696256*(np.log(2) + np.log(1 - mckin/mbkin))*(1 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/2189187 +
(7392512*mckin*(np.log(2) + np.log(1 - mckin/mbkin))*(1 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(312741*mbkin) -
(3696256*mckin**2*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(24057*mbkin**2) +
(14785024*mckin**3*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(24057*mbkin**3) -
(3696256*mckin**4*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(2187*mbkin**4) +
(7392512*mckin**5*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(2187*mbkin**5) -
(3696256*mckin**6*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(729*mbkin**6) +
(29570048*mckin**7*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(5103*mbkin**7) -
(3696256*mckin**8*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(729*mbkin**8) +
(7392512*mckin**9*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(2187*mbkin**9) -
(3696256*mckin**10*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(2187*mbkin**10) +
(14785024*mckin**11*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(24057*mbkin**11) -
(3696256*mckin**12*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(24057*mbkin**12) +
(7392512*mckin**13*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(312741*mbkin**13) -
(3696256*mckin**14*(np.log(2) + np.log(1 - mckin/mbkin))*
(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(2189187*mbkin**14) +
(8192*(np.log(2) + np.log(1 - mckin/mbkin))*(2 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/315 -
(8192*mckin*(np.log(2) + np.log(1 - mckin/mbkin))*(2 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(45*mbkin) +
(8192*mckin**2*(np.log(2) + np.log(1 - mckin/mbkin))*(2 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(15*mbkin**2) -
(8192*mckin**3*(np.log(2) + np.log(1 - mckin/mbkin))*(2 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(9*mbkin**3) +
(8192*mckin**4*(np.log(2) + np.log(1 - mckin/mbkin))*(2 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(9*mbkin**4) -
(8192*mckin**5*(np.log(2) + np.log(1 - mckin/mbkin))*(2 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(15*mbkin**5) +
(8192*mckin**6*(np.log(2) + np.log(1 - mckin/mbkin))*(2 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(45*mbkin**6) -
(8192*mckin**7*(np.log(2) + np.log(1 - mckin/mbkin))*(2 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(315*mbkin**7) +
(1316864*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/11025 -
(10534912*mckin*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(11025*mbkin) + (5267456*mckin**2*(1 + 8*np.log(2) +
8*np.log(1 - mckin/mbkin)))/(1575*mbkin**2) -
(10534912*mckin**3*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(1575*mbkin**3) + (2633728*mckin**4*(1 + 8*np.log(2) +
8*np.log(1 - mckin/mbkin)))/(315*mbkin**4) -
(10534912*mckin**5*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(1575*mbkin**5) + (5267456*mckin**6*(1 + 8*np.log(2) +
8*np.log(1 - mckin/mbkin)))/(1575*mbkin**6) -
(10534912*mckin**7*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(11025*mbkin**7) + (1316864*mckin**8*(1 + 8*np.log(2) +
8*np.log(1 - mckin/mbkin)))/(11025*mbkin**8) -
(8192*np.pi**2*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/2835 +
(65536*mckin*np.pi**2*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(2835*mbkin) - (32768*mckin**2*np.pi**2*(1 + 8*np.log(2) +
8*np.log(1 - mckin/mbkin)))/(405*mbkin**2) +
(65536*mckin**3*np.pi**2*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(405*mbkin**3) - (16384*mckin**4*np.pi**2*(1 + 8*np.log(2) +
8*np.log(1 - mckin/mbkin)))/(81*mbkin**4) +
(65536*mckin**5*np.pi**2*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(405*mbkin**5) - (32768*mckin**6*np.pi**2*(1 + 8*np.log(2) +
8*np.log(1 - mckin/mbkin)))/(405*mbkin**6) +
(65536*mckin**7*np.pi**2*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(2835*mbkin**7) - (8192*mckin**8*np.pi**2*(1 + 8*np.log(2) +
8*np.log(1 - mckin/mbkin)))/(2835*mbkin**8) -
(563052544*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/72335025 +
(563052544*mckin*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(8037225*mbkin) - (2252210176*mckin**2*(1 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(8037225*mbkin**2) +
(2252210176*mckin**3*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(3444525*mbkin**3) - (1126105088*mckin**4*(1 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(1148175*mbkin**4) +
(1126105088*mckin**5*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(1148175*mbkin**5) - (2252210176*mckin**6*(1 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(3444525*mbkin**6) +
(2252210176*mckin**7*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(8037225*mbkin**7) - (563052544*mckin**8*(1 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(8037225*mbkin**8) +
(563052544*mckin**9*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(72335025*mbkin**9) + (4096*np.pi**2*(1 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/8505 -
(4096*mckin*np.pi**2*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(945*mbkin) + (16384*mckin**2*np.pi**2*(1 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(945*mbkin**2) -
(16384*mckin**3*np.pi**2*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(405*mbkin**3) + (8192*mckin**4*np.pi**2*(1 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(135*mbkin**4) -
(8192*mckin**5*np.pi**2*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(135*mbkin**5) + (16384*mckin**6*np.pi**2*(1 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(405*mbkin**6) -
(16384*mckin**7*np.pi**2*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(945*mbkin**7) + (4096*mckin**8*np.pi**2*(1 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(945*mbkin**8) -
(4096*mckin**9*np.pi**2*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(8505*mbkin**9) - (321536*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/229635 +
(321536*mckin*(np.log(2) + np.log(1 - mckin/mbkin))*(2 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(25515*mbkin) -
(1286144*mckin**2*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/(25515*mbkin**2) +
(1286144*mckin**3*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/(10935*mbkin**3) -
(643072*mckin**4*(np.log(2) + np.log(1 - mckin/mbkin))*(2 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(3645*mbkin**4) +
(643072*mckin**5*(np.log(2) + np.log(1 - mckin/mbkin))*(2 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(3645*mbkin**5) -
(1286144*mckin**6*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/(10935*mbkin**6) +
(1286144*mckin**7*(np.log(2) + np.log(1 - mckin/mbkin))*
(2 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/(25515*mbkin**7) -
(321536*mckin**8*(np.log(2) + np.log(1 - mckin/mbkin))*(2 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(25515*mbkin**8) +
(321536*mckin**9*(np.log(2) + np.log(1 - mckin/mbkin))*(2 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(229635*mbkin**9) -
(493125632*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/72335025 +
(986251264*mckin*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(14467005*mbkin) - (493125632*mckin**2*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(1607445*mbkin**2) +
(3945005056*mckin**3*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(4822335*mbkin**3) - (986251264*mckin**4*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(688905*mbkin**4) +
(1972502528*mckin**5*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(1148175*mbkin**5) - (986251264*mckin**6*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(688905*mbkin**6) +
(3945005056*mckin**7*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(4822335*mbkin**7) - (493125632*mckin**8*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(1607445*mbkin**8) +
(986251264*mckin**9*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(14467005*mbkin**9) - (493125632*mckin**10*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(72335025*mbkin**10) +
(2048*np.pi**2*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/8505 -
(4096*mckin*np.pi**2*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(1701*mbkin) + (2048*mckin**2*np.pi**2*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(189*mbkin**2) -
(16384*mckin**3*np.pi**2*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(567*mbkin**3) + (4096*mckin**4*np.pi**2*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(81*mbkin**4) -
(8192*mckin**5*np.pi**2*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(135*mbkin**5) + (4096*mckin**6*np.pi**2*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(81*mbkin**6) -
(16384*mckin**7*np.pi**2*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(567*mbkin**7) + (2048*mckin**8*np.pi**2*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(189*mbkin**8) -
(4096*mckin**9*np.pi**2*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(1701*mbkin**9) + (2048*mckin**10*np.pi**2*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(8505*mbkin**10) -
(15503317504*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/2917512675 +
(15503317504*mckin*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/
(265228425*mbkin) - (15503317504*mckin**2*(1 + 11*np.log(2) +
11*np.log(1 - mckin/mbkin)))/(53045685*mbkin**2) +
(15503317504*mckin**3*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/
(17681895*mbkin**3) - (31006635008*mckin**4*(1 + | |
<reponame>Brown-University-Library/easyrequest_project<filename>easyrequest_app/views.py
# -*- coding: utf-8 -*-
import datetime, json, logging, os, pprint
from django.conf import settings as project_settings
from django.contrib.auth import logout
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseRedirect, HttpResponseServerError
from django.shortcuts import render
from django.utils.http import urlquote
from django.views.decorators.csrf import csrf_exempt
from easyrequest_app import models
from easyrequest_app.lib import common, version_helper
from easyrequest_app.lib.mail import Emailer
log = logging.getLogger(__name__)
emailer = Emailer()
login_helper = models.LoginHelper()
shib_view_helper = models.ShibViewHelper()
processor_helper = models.Processor()
shib_logout_helper = models.ShibLogoutHelper()
barcode_handler_helper = models.BarcodeHandlerHelper()
pic_loc_helper = models.PickupLocation()
summary_helper = models.SummaryHelper()
stats_builder = models.StatsBuilder()
def info( request ):
""" Returns basic info about the easyrequest_hay webapp.
Triggered by root easyrequest_hay url. """
log.debug( '\n\nstarting info(); request.__dict__, ```%s```' % request.__dict__ )
start = datetime.datetime.now()
context = {
'pattern_header': common.grab_pattern_header(),
'pattern_header_active': json.loads( os.environ['EZRQST__PATTERN_HEADER_ACTIVE_JSON'] )
}
if request.GET.get('format', '') == 'json':
context_json = json.dumps(context, sort_keys=True, indent=2)
resp = HttpResponse( context_json, content_type='application/javascript; charset=utf-8' )
else:
if context['pattern_header_active'] == True:
template = 'easyrequest_app_templates/info_02.html'
else:
template = 'easyrequest_app_templates/info.html'
resp = render( request, template, context )
return resp
def login( request ):
""" Stores referring url, bib, and item-id in session.
Presents shib (and in non-COVID times manual) login. """
log.info( '\n\nstarting login()' )
log.debug( 'rquest.GET, ``%s``' % request.GET )
context = {
'pattern_header': common.grab_pattern_header(),
'pattern_header_active': json.loads( os.environ['EZRQST__PATTERN_HEADER_ACTIVE_JSON'] )
}
if not login_helper.validate_source(request):
if context['pattern_header_active'] == True:
context['message'] = """You seem to have attempted to get to this login page without having started from Josiah, the Library's search web-application at <a href="https://search.library.brown.edu/">https://search.library.brown.edu/</a>. Please start there and try again. If you need help, please contact Library staff at the "Feedback" or "Help" link above, and they'll assist you. """
template = 'easyrequest_app_templates/problem_02.html'
resp = render( request, template, context )
else:
message = """You seem to have attempted to get to this login page without having started from Josiah, the Library's search web-application at ``https://search.library.brown.edu/``. Please start there and try again. If you need help, please contact Library staff at ``%s``, and they'll assist you. """ % (
login_helper.EMAIL_AUTH_HELP,
)
resp = HttpResponseBadRequest( message )
return resp
if not login_helper.validate_params( request.GET ):
if context['pattern_header_active'] == True:
log.debug( 'here01' )
log.debug( f'log.problems, ``{login_helper.problems}``' )
if 'empty item-number submitted' in login_helper.problems and len( request.GET.get('itemnum') ) == 0 and len( request.GET.get('bibnum') ) > 0:
bib_url = f'https://search.library.brown.edu/catalog/{request.GET["bibnum"]}'
problem_text = ', '.join( login_helper.problems )
log.debug( f'bib_url, ``{bib_url}``' )
context['message'] = f"""Requesting problem (we're working on this): ``{problem_text}``. Please try requesting from the record-view directly (as opposed to the search-results-view), which should be at the url <a href="{bib_url}">{bib_url}</a>."""
else:
context['message'] = """This request could not be submitted for the following reason%s: ``%s``. Please contact Library staff at the "Feedback" or "Help" link above, and they'll assist you.""" % (
'' if len(login_helper.problems) < 2 else 's',
', '.join( login_helper.problems ),
)
template = 'easyrequest_app_templates/problem_02.html'
resp = render( request, template, context )
else:
if 'empty item-number submitted' in login_helper.problems and len( request.GET.get('itemnum') ) == 0 and len( request.GET.get('bibnum') ) > 0:
bib_url = f'https://search.library.brown.edu/catalog/{request.GET["bibnum"]}'
problem_text = ', '.join( login_helper.problems )
message = f"""Requesting problem (we're working on this): ``{problem_text}``. Please try requesting from the record-view directly (as opposed to the search-results-view), which should be at the url <a href="{bib_url}">{bib_url}</a>."""
else:
message = """This request could not be submitted for the following reason%s: ``%s``. Please contact Library staff at ``%s``, and they'll assist you. """ % (
'' if len(login_helper.problems) < 2 else 's',
', '.join( login_helper.problems ),
login_helper.EMAIL_AUTH_HELP,
)
resp = HttpResponseBadRequest( message )
return resp
login_helper.initialize_session( request )
# ( title, callnumber, item_id ) = login_helper.get_item_info( request.GET['bibnum'], request.GET['barcode'] )
( title, callnumber, item_id ) = login_helper.get_item_info( request.GET['bibnum'], request.GET['itemnum'] )
login_helper.update_session( request, title, callnumber, item_id )
context = login_helper.prepare_context( request )
# return render( request, 'easyrequest_app_templates/login.html', context )
if request.GET.get('format', '') == 'json':
context_json = json.dumps(context, sort_keys=True, indent=2)
resp = HttpResponse( context_json, content_type='application/javascript; charset=utf-8' )
else:
if context['pattern_header_active'] == True:
template = 'easyrequest_app_templates/login_02.html'
else:
template = 'easyrequest_app_templates/login.html'
resp = render( request, template, context )
return resp
# def login( request ):
# """ Stores referring url, bib, and item-id in session.
# Presents shib (and in non-COVID times manual) login. """
# log.info( 'starting login()' )
# log.debug( 'rquest.GET, ``%s``' % request.GET )
# context = {
# 'pattern_header': common.grab_pattern_header(),
# 'pattern_header_active': json.loads( os.environ['EZRQST__PATTERN_HEADER_ACTIVE_JSON'] )
# }
# if not login_helper.validate_source(request):
# if context['pattern_header_active'] == True:
# context['message'] = """You seem to have attempted to get to this login page without having started from Josiah, the Library's search web-application at <a href="https://search.library.brown.edu/">https://search.library.brown.edu/</a>. Please start there and try again. If you need help, please contact Library staff at the "Feedback" or "Help" link above, and they'll assist you. """
# template = 'easyrequest_app_templates/problem_02.html'
# resp = render( request, template, context )
# else:
# message = """You seem to have attempted to get to this login page without having started from Josiah, the Library's search web-application at ``https://search.library.brown.edu/``. Please start there and try again. If you need help, please contact Library staff at ``%s``, and they'll assist you. """ % (
# login_helper.EMAIL_AUTH_HELP,
# )
# resp = HttpResponseBadRequest( message )
# return resp
# if not login_helper.validate_params( request.GET ):
# if context['pattern_header_active'] == True:
# context['message'] = """This request could not be submitted for the following reason%s: ``%s``. Please contact Library staff at the "Feedback" or "Help" link above, and they'll assist you.""" % (
# '' if len(login_helper.problems) < 2 else 's',
# ', '.join( login_helper.problems ),
# )
# template = 'easyrequest_app_templates/problem_02.html'
# resp = render( request, template, context )
# else:
# message = """This request could not be submitted for the following reason%s: ``%s``. Please contact Library staff at ``%s``, and they'll assist you. """ % (
# '' if len(login_helper.problems) < 2 else 's',
# ', '.join( login_helper.problems ),
# login_helper.EMAIL_AUTH_HELP,
# )
# resp = HttpResponseBadRequest( message )
# return resp
# login_helper.initialize_session( request )
# # ( title, callnumber, item_id ) = login_helper.get_item_info( request.GET['bibnum'], request.GET['barcode'] )
# ( title, callnumber, item_id ) = login_helper.get_item_info( request.GET['bibnum'], request.GET['itemnum'] )
# login_helper.update_session( request, title, callnumber, item_id )
# context = login_helper.prepare_context( request )
# # return render( request, 'easyrequest_app_templates/login.html', context )
# if request.GET.get('format', '') == 'json':
# context_json = json.dumps(context, sort_keys=True, indent=2)
# resp = HttpResponse( context_json, content_type='application/javascript; charset=utf-8' )
# else:
# if context['pattern_header_active'] == True:
# template = 'easyrequest_app_templates/login_02.html'
# else:
# template = 'easyrequest_app_templates/login.html'
# resp = render( request, template, context )
# return resp
@csrf_exempt # temp for migration
def barcode_handler( request ):
""" Handles barcode login.
On auth success, redirects user to non-seen views.processor()
On auth failure, redirects back to views.login() """
log.debug( 'starting barcode_login_handler()' )
if barcode_handler_helper.validate_params(request) is not True: # puts param values in session
return barcode_handler_helper.prep_login_redirect( request )
if barcode_handler_helper.authenticate( request.session['barcode_login_name'], request.session['barcode_login_barcode'] ) is False: # if login fails, redirect user back to login page with error messages that will display
return barcode_handler_helper.prep_login_redirect( request )
patron_info_dct = barcode_handler_helper.authorize( request.session['barcode_login_barcode'] )
if patron_info_dct is False:
return barcode_handler_helper.prep_login_redirect( request )
barcode_handler_helper.update_session( request, patron_info_dct ) # TODO: like easyrequest-hay, grab the user's sierra-patron-id and store it to the session
return barcode_handler_helper.prep_processor_redirect( request )
@csrf_exempt # temp for migration
def shib_handler( request ):
""" Stores pickup location to session and redirects to shib_login() """
log.debug( '\n\nstarting shib_handler()' )
if request.method == 'POST': # from login.html
log.debug( 'post detected' )
request.session['pickup_location'] = request.POST['pickup_location']
log.debug( 'redirect url will be, `%s`' % reverse('shib_login_url') )
return HttpResponseRedirect( reverse('shib_login_url') )
else:
log.info( 'non-post detected, returning 400/bad-request' )
return HttpResponseBadRequest( "This web-application supports Josiah, the Library's search web-application. If you think you should be able to access this url, please contact '%s'." % login_helper.EMAIL_AUTH_HELP )
@csrf_exempt # temp for migration
def shib_login( request ):
""" Examines shib headers, sets session-auth.
Redirects user to non-seen processor() view. """
log.debug( '\n\nstarting shib_login()' )
( validity, shib_dict ) = shib_view_helper.check_shib_headers( request )
if validity is False:
return_response = shib_view_helper.prep_login_redirect( request )
else:
request.session['sierra_patron_id'] = shib_view_helper.sierra_patron_id
return_response = shib_view_helper.build_response( request, shib_dict )
log.debug( 'about to return shib response' )
return return_response
@csrf_exempt # temp for migration
def processor( request ):
""" Handles item request:,
- Ensures user is authenticated.
- Saves request.
- Places hold.
- Emails patron.
- Triggers shib_logout() view. """
if processor_helper.check_request( request ) == False:
return HttpResponseRedirect( reverse('info_url') )
try:
itmrqst = processor_helper.save_data( request )
# processor_helper.place_request( itmrqst, request.session['josiah_api_name'], request.session['pickup_location'] )
| |
"""
Manages finding, running and recoding benchmark results.
This module has shamelessly borrows from
the `airspeed velocity (asv) <http://asv.readthedocs.io/en/latest>`_
file `benchmark.py <https://github.com/spacetelescope/asv/blob/master/asv/benchmark.py>`_.
See the `airspeed velocity (asv) <http://asv.readthedocs.io/en/latest>`_
`LICENSE <https://github.com/spacetelescope/asv/blob/master/LICENSE.rst>`_.
"""
from __future__ import absolute_import
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import mpi4py.MPI as mpi
import sys
import datetime
try:
import cProfile as profile
import pstats
except BaseException:
profile = None
from hashlib import sha256
import inspect
import itertools
import json
import os
import re
import textwrap
import timeit
import gc
from importlib import import_module
from .. import logging as _logging
from .utils.misc import get_process_time_timer
__license__ = "https://github.com/spacetelescope/asv/blob/master/LICENSE.rst"
def _get_attr(source, name, ignore_case=False):
if ignore_case:
attrs = [getattr(source, key) for key in dir(source)
if key.lower() == name.lower()]
if len(attrs) > 1:
raise ValueError(
"{0} contains multiple {1} functions.".format(
source.__name__, name))
elif len(attrs) == 1:
return attrs[0]
else:
return None
else:
return getattr(source, name, None)
def _get_all_attrs(sources, name, ignore_case=False):
for source in sources:
val = _get_attr(source, name, ignore_case=ignore_case)
if val is not None:
yield val
def _get_first_attr(sources, name, default, ignore_case=False):
for val in _get_all_attrs(sources, name, ignore_case=ignore_case):
return val
return default
def get_setup_cache_key(func):
if func is None:
return None
return '{0}:{1}'.format(inspect.getsourcefile(func),
inspect.getsourcelines(func)[1])
def get_source_code(items):
"""
Extract source code of given items, and concatenate and dedent it.
"""
sources = []
prev_class_name = None
for func in items:
try:
lines, lineno = inspect.getsourcelines(func)
except TypeError:
continue
if not lines:
continue
src = "\n".join(line.rstrip() for line in lines)
src = textwrap.dedent(src)
class_name = None
if inspect.ismethod(func):
# Add class name
if hasattr(func, 'im_class'):
class_name = func.im_class.__name__
elif hasattr(func, '__qualname__'):
names = func.__qualname__.split('.')
if len(names) > 1:
class_name = names[-2]
if class_name and prev_class_name != class_name:
src = "class {0}:\n {1}".format(
class_name, src.replace("\n", "\n "))
elif class_name:
src = " {1}".format(
class_name, src.replace("\n", "\n "))
sources.append(src)
prev_class_name = class_name
return "\n\n".join(sources).rstrip()
class Benchmark(object):
"""
Represents a single benchmark.
"""
# The regex of the name of function or method to be considered as
# this type of benchmark. The default in the base class, will
# match nothing.
name_regex = re.compile('^$')
def __init__(self, name, func, attr_sources):
self.name = name
self.func = func
self.pretty_name = getattr(func, "pretty_name", name)
self._attr_sources = list(attr_sources)
self._setups = None
self._teardowns = None
self._setup_cache = None
self.setup_cache_key = None
self.setup_cache_timeout = None
self.timeout = None
self.code = None
self.version = None
self.type = None
self.unit = None
self._redo_setup_next = False
self._params = None
self.param_names = None
self._current_params = None
self._comm = None
self._setup_error = None
self._profiler = None
def initialise(self):
"""
"""
if self._setups is None:
if isinstance(self._attr_sources[-1], str):
self._attr_sources[-1] = import_module(self._attr_sources[-1])
self._setups = list(_get_all_attrs(self._attr_sources, 'setup', True))[::-1]
self._teardowns = list(_get_all_attrs(self._attr_sources, 'teardown', True))
self._setup_cache = _get_first_attr(self._attr_sources, 'setup_cache', None)
self.setup_cache_key = get_setup_cache_key(self._setup_cache)
self.setup_cache_timeout = _get_first_attr([self._setup_cache], "timeout", None)
self.timeout = _get_first_attr(self._attr_sources, "timeout", 60.0)
self.code = get_source_code([self.func] + self._setups + [self._setup_cache])
if sys.version_info[0] >= 3:
code_text = self.code.encode('utf-8')
else:
code_text = self.code
code_hash = sha256(code_text).hexdigest()
self.version = str(_get_first_attr(self._attr_sources, "version", code_hash))
self.type = "base"
self.unit = "unit"
self._redo_setup_next = False
self._params = _get_first_attr(self._attr_sources, "params", [])
self.param_names = _get_first_attr(self._attr_sources, "param_names", [])
self._current_params = ()
# Enforce params format
try:
self.param_names = [str(x) for x in list(self.param_names)]
except ValueError:
raise ValueError("%s.param_names is not a list of strings" % (self.name,))
try:
self._params = list(self._params)
except ValueError:
raise ValueError("%s.params is not a list" % (self.name,))
if self._params and not isinstance(self._params[0], (tuple, list)):
# Accept a single list for one parameter only
self._params = [self._params]
else:
self._params = [[item for item in entry] for entry in self._params]
if len(self.param_names) != len(self._params):
self.param_names = self.param_names[:len(self._params)]
self.param_names += ['param%d' % (k + 1,) for k in range(len(self.param_names),
len(self._params))]
# Exported parameter representations
self.params_repr = [[repr(item) for item in entry] for entry in self._params]
@property
def root_rank(self):
"""
An :samp:`int` indicating the *root* rank process of :attr:`comm`.
"""
return 0
@property
def comm(self):
"""
The :obj:`mpi4pi.MPI.Comm` used for synchronization.
"""
return self._comm
@comm.setter
def comm(self, comm):
self._comm = comm
@property
def setup_error(self):
"""
The error which occured during :meth:`do_setup`, :samp:`None` if no error occurred.
"""
return self._setup_error
def barrier(self):
"""
Barrier.
"""
self.comm.barrier()
def bcast(self, value):
"""
Broadcast value from :attr:`root_rank` to all ranks of :attr:`comm`.
:rtype: :obj:`object`
:return: value on rank :attr:`root_rank` rank process.
"""
return self.comm.bcast(value, self.root_rank)
@property
def params(self):
"""
The list of benchmark parameters.
"""
self.initialise()
return self._params
@property
def current_params(self):
"""
The current set of parameters, set via :meth:`set_param_idx`.
"""
return self._current_params
def set_param_idx(self, param_idx):
"""
Set the parameter combo via the index :samp:`{param_idx}`.
:raises ValueError: if :samp:`param_idx` is out of range.
"""
self.initialise()
try:
self._current_params, = itertools.islice(
itertools.product(*self._params),
param_idx, param_idx + 1)
except ValueError:
raise ValueError(
"Invalid benchmark parameter permutation index: %r" % (param_idx,))
def insert_param(self, param):
"""
Insert a parameter at the front of the parameter list.
"""
self.initialise()
self._current_params = tuple([param] + list(self._current_params))
def __repr__(self):
return '<{0} {1}>'.format(self.__class__.__name__, self.name)
def do_setup(self):
self.initialise()
self._setup_error = None
try:
for setup in self._setups:
setup(*self._current_params)
except NotImplementedError as e:
# allow skipping test
self._setup_error = e
return True
return False
def redo_setup(self):
self.initialise()
if not self._redo_setup_next:
self._redo_setup_next = True
return
self.do_teardown()
self.do_setup()
def do_teardown(self):
for teardown in self._teardowns:
teardown(*self._current_params)
def do_setup_cache(self):
if self._setup_cache is not None:
return self._setup_cache()
def do_run(self):
return self.run(*self._current_params)
def do_profile_run(self):
if profile is None:
raise RuntimeError("cProfile could not be imported")
self._profiler = None
self.redo_setup()
if self.comm.rank == self.root_rank:
self._profiler = profile.Profile()
self._profiler.disable()
self.run(*self._current_params)
profiler = self._profiler
self._profiler = None
return profiler
class TimeBenchmark(Benchmark):
"""
Represents a single benchmark for timing.
"""
name_regex = re.compile(
'^(Time[A-Z_].+)|(time_.+)$')
def __init__(self, name, func, attr_sources):
Benchmark.__init__(self, name, func, attr_sources)
self.type = "time"
self.unit = "seconds"
self._attr_sources = attr_sources
self._repeat = None
self._number = None
self._goal_time = None
self._warmup_time = None
self._default_timer = None
self._timer = None
self._wall_timer = None
def _load_vars(self):
self._repeat = _get_first_attr(self._attr_sources, 'repeat', 0)
self._number = int(_get_first_attr(self._attr_sources, 'number', 0))
self._goal_time = _get_first_attr(self._attr_sources, 'goal_time', 0.1)
self._warmup_time = _get_first_attr(self._attr_sources, 'warmup_time', -1)
self._timer = _get_first_attr(self._attr_sources, 'timer', self.default_timer)
self._wall_timer = _get_first_attr(self._attr_sources, 'wall_timer', mpi.Wtime)
@property
def default_timer(self):
"""
An :obj:`callable` used to measure benchmark duration, e.g. :func:`time.process_time`.
"""
if self._default_timer is None:
self._default_timer = get_process_time_timer()
return self._default_timer
@default_timer.setter
def default_timer(self, timer):
self._default_timer = timer
@property
def repeat(self):
if self._repeat is None:
self._load_vars()
return self._repeat
@property
def number(self):
if self._number is None:
self._load_vars()
return self._number
@property
def goal_time(self):
if self._goal_time is None:
self._load_vars()
return self._goal_time
@property
def warmup_time(self):
if self._warmup_time is None:
self._load_vars()
return self._warmup_time
@property
def timer(self):
if self._timer is None:
self._load_vars()
return self._timer
@property
def wall_timer(self):
if self._wall_timer is None:
self._load_vars()
return self._wall_timer
def wall_time(self):
"""
Return *current* time in seconds.
"""
return self.wall_timer()
def do_setup(self):
result = Benchmark.do_setup(self)
# For parameterized tests, setup() is allowed to change these
self._load_vars()
return result
def run(self, *param):
number = self.number
repeat = self.repeat
if repeat == 0:
repeat = 10
warmup_time = self.warmup_time
if warmup_time < 0:
if '__pypy__' in sys.modules:
warmup_time = 1.0
else:
# Transient effects exist also on CPython, e.g. from
# OS scheduling
warmup_time = 0.1
if param:
def func():
return self.func(*param)
else:
func = self.func
timer = \
timeit.Timer(
stmt=func,
setup=self.redo_setup,
timer=self.timer
)
samples, number, samples_pre_barrier, samples_post_barrier = \
self.benchmark_timing(timer, repeat, warmup_time, number=number)
samples_list = [samples, samples_pre_barrier, samples_post_barrier]
for i in range(len(samples_list)):
samples_list[i] = [s / number for s in samples_list[i]]
return \
{
'samples': samples_list[0],
'number': number,
'wall_samples_pre_barrier': samples_list[1],
'wall_samples_post_barrier': samples_list[2]
}
def benchmark_timing(self, timer, repeat, warmup_time, number=0):
goal_time = self.goal_time
start_time = self.bcast(self.wall_time())
max_time = start_time + min(warmup_time + 1.3 * repeat * goal_time,
self.timeout - 1.3 * goal_time)
def too_slow():
# too slow, don't take more samples
return self.bcast(self.wall_time()) > max_time
if number == 0:
# Select number & warmup.
#
# This needs to be done at the same time, because the
# benchmark timings at the beginning can be larger, and
# lead to too small number being selected.
number = 1
while True:
self._redo_setup_next = False
self.barrier()
gc.disable()
start = self.wall_time()
timing = timer.timeit(number)
self.barrier()
gc.enable()
end = self.wall_time()
wall_time, timing = self.bcast((end - start, timing))
actual_timing = max(wall_time, timing)
if actual_timing >= goal_time:
if self.bcast(self.wall_time()) > start_time + warmup_time:
break
else:
try:
p = | |
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
from pandas import (
DataFrame,
DatetimeIndex,
Series,
date_range,
)
import pandas._testing as tm
from pandas.core.window import ExponentialMovingWindow
def test_doc_string():
df = DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
df.ewm(com=0.5).mean()
def test_constructor(frame_or_series):
c = frame_or_series(range(5)).ewm
# valid
c(com=0.5)
c(span=1.5)
c(alpha=0.5)
c(halflife=0.75)
c(com=0.5, span=None)
c(alpha=0.5, com=None)
c(halflife=0.75, alpha=None)
# not valid: mutually exclusive
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
c(com=0.5, alpha=0.5)
with pytest.raises(ValueError, match=msg):
c(span=1.5, halflife=0.75)
with pytest.raises(ValueError, match=msg):
c(alpha=0.5, span=1.5)
# not valid: com < 0
msg = "comass must satisfy: comass >= 0"
with pytest.raises(ValueError, match=msg):
c(com=-0.5)
# not valid: span < 1
msg = "span must satisfy: span >= 1"
with pytest.raises(ValueError, match=msg):
c(span=0.5)
# not valid: halflife <= 0
msg = "halflife must satisfy: halflife > 0"
with pytest.raises(ValueError, match=msg):
c(halflife=0)
# not valid: alpha <= 0 or alpha > 1
msg = "alpha must satisfy: 0 < alpha <= 1"
for alpha in (-0.5, 1.5):
with pytest.raises(ValueError, match=msg):
c(alpha=alpha)
@pytest.mark.parametrize("method", ["std", "mean", "var"])
def test_numpy_compat(method):
# see gh-12811
e = ExponentialMovingWindow(Series([2, 4, 6]), alpha=0.5)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(dtype=np.float64)
def test_ewma_times_not_datetime_type():
msg = r"times must be datetime64\[ns\] dtype."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(times=np.arange(5))
def test_ewma_times_not_same_length():
msg = "times must be the same length as the object."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(times=np.arange(4).astype("datetime64[ns]"))
def test_ewma_halflife_not_correct_type():
msg = "halflife must be a timedelta convertible object"
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(halflife=1, times=np.arange(5).astype("datetime64[ns]"))
def test_ewma_halflife_without_times(halflife_with_times):
msg = "halflife can only be a timedelta convertible argument if times is not None."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(halflife=halflife_with_times)
@pytest.mark.parametrize(
"times",
[
np.arange(10).astype("datetime64[D]").astype("datetime64[ns]"),
date_range("2000", freq="D", periods=10),
date_range("2000", freq="D", periods=10).tz_localize("UTC"),
],
)
@pytest.mark.parametrize("min_periods", [0, 2])
def test_ewma_with_times_equal_spacing(halflife_with_times, times, min_periods):
halflife = halflife_with_times
data = np.arange(10.0)
data[::2] = np.nan
df = DataFrame({"A": data, "time_col": date_range("2000", freq="D", periods=10)})
with tm.assert_produces_warning(FutureWarning, match="nuisance columns"):
# GH#42738
result = df.ewm(halflife=halflife, min_periods=min_periods, times=times).mean()
expected = df.ewm(halflife=1.0, min_periods=min_periods).mean()
tm.assert_frame_equal(result, expected)
def test_ewma_with_times_variable_spacing(tz_aware_fixture):
tz = tz_aware_fixture
halflife = "23 days"
times = DatetimeIndex(
["2020-01-01", "2020-01-10T00:04:05", "2020-02-23T05:00:23"]
).tz_localize(tz)
data = np.arange(3)
df = DataFrame(data)
result = df.ewm(halflife=halflife, times=times).mean()
expected = DataFrame([0.0, 0.5674161888241773, 1.545239952073459])
tm.assert_frame_equal(result, expected)
def test_ewm_with_nat_raises(halflife_with_times):
# GH#38535
ser = Series(range(1))
times = DatetimeIndex(["NaT"])
with pytest.raises(ValueError, match="Cannot convert NaT values to integer"):
ser.ewm(com=0.1, halflife=halflife_with_times, times=times)
def test_ewm_with_times_getitem(halflife_with_times):
# GH 40164
halflife = halflife_with_times
data = np.arange(10.0)
data[::2] = np.nan
times = date_range("2000", freq="D", periods=10)
df = DataFrame({"A": data, "B": data})
result = df.ewm(halflife=halflife, times=times)["A"].mean()
expected = df.ewm(halflife=1.0)["A"].mean()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["com", "halflife", "span", "alpha"])
def test_ewm_getitem_attributes_retained(arg, adjust, ignore_na):
# GH 40164
kwargs = {arg: 1, "adjust": adjust, "ignore_na": ignore_na}
ewm = DataFrame({"A": range(1), "B": range(1)}).ewm(**kwargs)
expected = {attr: getattr(ewm, attr) for attr in ewm._attributes}
ewm_slice = ewm["A"]
result = {attr: getattr(ewm, attr) for attr in ewm_slice._attributes}
assert result == expected
def test_ewm_vol_deprecated():
ser = Series(range(1))
with tm.assert_produces_warning(FutureWarning):
result = ser.ewm(com=0.1).vol()
expected = ser.ewm(com=0.1).std()
tm.assert_series_equal(result, expected)
def test_ewma_times_adjust_false_raises():
# GH 40098
with pytest.raises(
NotImplementedError, match="times is not supported with adjust=False."
):
Series(range(1)).ewm(
0.1, adjust=False, times=date_range("2000", freq="D", periods=1)
)
@pytest.mark.parametrize(
"func, expected",
[
[
"mean",
DataFrame(
{
0: range(5),
1: range(4, 9),
2: [7.428571, 9, 10.571429, 12.142857, 13.714286],
},
dtype=float,
),
],
[
"std",
DataFrame(
{
0: [np.nan] * 5,
1: [4.242641] * 5,
2: [4.6291, 5.196152, 5.781745, 6.380775, 6.989788],
}
),
],
[
"var",
DataFrame(
{
0: [np.nan] * 5,
1: [18.0] * 5,
2: [21.428571, 27, 33.428571, 40.714286, 48.857143],
}
),
],
],
)
def test_float_dtype_ewma(func, expected, float_numpy_dtype):
# GH#42452
df = DataFrame(
{0: range(5), 1: range(6, 11), 2: range(10, 20, 2)}, dtype=float_numpy_dtype
)
e = df.ewm(alpha=0.5, axis=1)
result = getattr(e, func)()
tm.assert_frame_equal(result, expected)
def test_times_string_col_deprecated():
# GH 43265
data = np.arange(10.0)
data[::2] = np.nan
df = DataFrame({"A": data, "time_col": date_range("2000", freq="D", periods=10)})
with tm.assert_produces_warning(FutureWarning, match="Specifying times"):
result = df.ewm(halflife="1 day", min_periods=0, times="time_col").mean()
expected = df.ewm(halflife=1.0, min_periods=0).mean()
tm.assert_frame_equal(result, expected)
def test_ewm_sum_adjust_false_notimplemented():
data = Series(range(1)).ewm(com=1, adjust=False)
with pytest.raises(NotImplementedError, match="sum is not"):
data.sum()
@pytest.mark.parametrize(
"expected_data, ignore",
[[[10.0, 5.0, 2.5, 11.25], False], [[10.0, 5.0, 5.0, 12.5], True]],
)
def test_ewm_sum(expected_data, ignore):
# xref from Numbagg tests
# https://github.com/numbagg/numbagg/blob/v0.2.1/numbagg/test/test_moving.py#L50
data = Series([10, 0, np.nan, 10])
result = data.ewm(alpha=0.5, ignore_na=ignore).sum()
expected = Series(expected_data)
tm.assert_series_equal(result, expected)
def test_ewma_adjust():
vals = Series(np.zeros(1000))
vals[5] = 1
result = vals.ewm(span=100, adjust=False).mean().sum()
assert np.abs(result - 1) < 1e-2
def test_ewma_cases(adjust, ignore_na):
# try adjust/ignore_na args matrix
s = Series([1.0, 2.0, 4.0, 8.0])
if adjust:
expected = Series([1.0, 1.6, 2.736842, 4.923077])
else:
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
def test_ewma_nan_handling():
s = Series([1.0] + [np.nan] * 5 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([1.0] * len(s)))
s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4))
@pytest.mark.parametrize(
"s, adjust, ignore_na, w",
[
(
Series([np.nan, 1.0, 101.0]),
True,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
),
(
Series([np.nan, 1.0, 101.0]),
True,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
),
(
Series([np.nan, 1.0, 101.0]),
False,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
),
(
Series([np.nan, 1.0, 101.0]),
False,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
),
(
Series([1.0, np.nan, 101.0]),
True,
False,
[(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, 1.0],
),
(
Series([1.0, np.nan, 101.0]),
True,
True,
[(1.0 - (1.0 / (1.0 + 2.0))), np.nan, 1.0],
),
(
Series([1.0, np.nan, 101.0]),
False,
False,
[(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, (1.0 / (1.0 + 2.0))],
),
(
Series([1.0, np.nan, 101.0]),
False,
True,
[(1.0 - (1.0 / (1.0 + 2.0))), np.nan, (1.0 / (1.0 + 2.0))],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
True,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))) ** 3, np.nan, np.nan, 1.0, np.nan],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
True,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), np.nan, np.nan, 1.0, np.nan],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
False,
False,
[
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
np.nan,
(1.0 / (1.0 + 2.0)),
np.nan,
],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
False,
True,
[
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
np.nan,
np.nan,
(1.0 / (1.0 + 2.0)),
np.nan,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
True,
False,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
1.0,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
True,
True,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 2,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
1.0,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
False,
False,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)),
(1.0 / (1.0 + 2.0))
* ((1.0 - (1.0 / (1.0 + 2.0))) ** 2 + (1.0 / (1.0 + 2.0))),
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
False,
True,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 2,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)),
(1.0 / (1.0 + 2.0)),
],
),
],
)
def test_ewma_nan_handling_cases(s, adjust, ignore_na, w):
# GH 7603
expected = (s.multiply(w).cumsum() / Series(w).cumsum()).fillna(method="ffill")
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = s.ewm(com=2.0, adjust=adjust).mean()
tm.assert_series_equal(result, expected)
def test_ewm_alpha():
# GH 10789
arr = np.random.randn(100)
locs = np.arange(20, 40)
arr[locs] = np.NaN
s = Series(arr)
a = s.ewm(alpha=0.61722699889169674).mean()
b = s.ewm(com=0.62014947789973052).mean()
c = s.ewm(span=2.240298955799461).mean()
d = s.ewm(halflife=0.721792864318).mean()
tm.assert_series_equal(a, b)
tm.assert_series_equal(a, c)
tm.assert_series_equal(a, d)
def test_ewm_domain_checks():
# GH 12492
arr = np.random.randn(100)
locs = np.arange(20, 40)
arr[locs] = np.NaN
s = Series(arr)
msg = "comass must satisfy: comass >= 0"
with pytest.raises(ValueError, match=msg):
s.ewm(com=-0.1)
s.ewm(com=0.0)
s.ewm(com=0.1)
msg = "span must satisfy: span >= 1"
with pytest.raises(ValueError, match=msg):
s.ewm(span=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(span=0.0)
with pytest.raises(ValueError, match=msg):
s.ewm(span=0.9)
s.ewm(span=1.0)
s.ewm(span=1.1)
msg = "halflife must satisfy: | |
of example IDs for my_vars.TP/TN/FP/FN
Returns
-------
dict.
Updated confusion matrix.
"""
# print("neighbors:\n{}".format(neighbor))
predicted = rule[class_col_name]
true = example[class_col_name]
# print("example label: {} vs. rule label: {}".format(predicted, true))
predicted_id = example.name
# Potentially remove example from confusion matrix
conf_matrix[my_vars.TP].discard(predicted_id)
conf_matrix[my_vars.TN].discard(predicted_id)
conf_matrix[my_vars.FP].discard(predicted_id)
conf_matrix[my_vars.FN].discard(predicted_id)
# Add updated value
if true == positive_class:
if predicted == true:
conf_matrix[my_vars.TP].add(predicted_id)
# print("pred: {} <-> true: {} -> tp".format(predicted, true))
else:
conf_matrix[my_vars.FN].add(predicted_id)
# print("pred: {} <-> true: {} -> fn".format(predicted, true))
else:
if predicted == true:
conf_matrix[my_vars.TN].add(predicted_id)
# print("pred: {} <-> true: {} -> tn".format(predicted, true))
else:
conf_matrix[my_vars.FP].add(predicted_id)
# print("pred: {} <-> true: {} -> fp".format(predicted, true))
return conf_matrix
def f1(conf_matrix):
"""
Computes the F1 score: F1 = 2 * (precision * recall) / (precision + recall)
Parameters
----------
conf_matrix: dict - confusion matrix holding a set of example IDs for my_vars.TP/TN/FP/FN
Returns
-------
float.
F1-score
"""
f1 = 0.0
if conf_matrix is not None:
tp = len(conf_matrix[my_vars.TP])
fp = len(conf_matrix[my_vars.FP])
fn = len(conf_matrix[my_vars.FN])
# tn = len(my_vars.conf_matrix[my_vars.TN])
precision = 0
recall = 0
prec_denom = tp + fp
rec_denom = tp + fn
if prec_denom > 0:
precision = tp / prec_denom
if rec_denom > 0:
recall = tp / rec_denom
# print("recall: {} precision: {}".format(recall, precision))
f1_denom = precision + recall
if f1_denom > 0:
f1 = 2*precision*recall / f1_denom
return f1
def is_duplicate(new_rule, existing_rule_ids):
"""
Checks if a rule is a duplicate of existing rules.
Parameters
----------
new_rule: pd.Series - new rule which is a potential duplicate rule
existing_rule_ids: list of int - rule IDs that have the same hash as <new_rule> and are thus potential duplicates
Returns
-------
int.
ID of the duplicate rule or my_vars.UNIQUE_RULE otherwise.
"""
# Check potential rules value by value if they are identical
duplicate_rule_id = my_vars.UNIQUE_RULE
# Since rule ID doesn't exist for the hashed rule, there might be a rule with a different ID but same values
if new_rule.name not in existing_rule_ids:
for rule_id in existing_rule_ids:
possible_duplicate = my_vars.all_rules[rule_id]
if _are_duplicates(new_rule, possible_duplicate):
duplicate_rule_id = possible_duplicate.name
break
return duplicate_rule_id
def _are_duplicates(rule_i, rule_j):
"""Returns True if two rules are duplicates (= all values of the rules are identical) of each other and
False otherwise"""
are_identical = True
# Same number of features in both rules
if len(rule_i) == len(rule_j):
for (idx_i, val_i), (idx_j, val_j) in zip(rule_i.iteritems(), rule_j.iteritems()):
# Same feature
if idx_i == idx_j:
# Strings
if isinstance(val_i, str):
if val_i != val_j:
are_identical = False
break
# Tuples
elif isinstance(val_i, Bounds):
lower_i, upper_i = val_i
lower_j, upper_j = val_j
if abs(lower_i - lower_j) > my_vars.PRECISION or abs(upper_i - upper_j > my_vars.PRECISION):
are_identical = False
break
# Numbers
else:
if abs(val_i - val_j) > my_vars.PRECISION:
are_identical = False
break
else:
are_identical = False
break
else:
are_identical = False
return are_identical
def find_duplicate_rule_id(generalized_rule, rule_hash):
"""
Checks if a new rule is unique and otherwise returns the ID of the rule that has the same hash as the new rule.
Parameters
----------
generalized_rule: pd.Series - new rule that was generalized
rule_hash: int - hash value of the rule
Returns
-------
int.
The ID of the duplicate rule or my_vars.UNIQUE_RULE if the new rule is unique.
"""
duplicate_rule_id = my_vars.UNIQUE_RULE
# Hash collisions might occur, so there could be multiple rules with the same hash value
if rule_hash in my_vars.unique_rules:
existing_rule_ids = my_vars.unique_rules[rule_hash]
print("existing rule ids", existing_rule_ids)
for rid in existing_rule_ids:
print(rid)
print("possible duplicate:", my_vars.all_rules[rid])
duplicate_rule_id = is_duplicate(generalized_rule, existing_rule_ids)
return duplicate_rule_id
def _delete_old_rule_hash(rule):
"""
Deletes the hash of the old rule.
Parameters
----------
rule: pd.Series - rule that was deleted
"""
rule_hash = compute_hashable_key(rule)
print("delete old hash of {}: {}".format(rule.name, rule_hash))
print("before update:", my_vars.unique_rules)
# print("remove old hash of rule {}: {}".format(rule.name, old_hash))
# rules_with_same_hash = my_vars.unique_rules[old_hash]
# if len(rules_with_same_hash) > 1:
# my_vars.unique_rules[old_hash].discard(rule.name)
# else:
# del my_vars.unique_rules[old_hash]
rules_with_same_hash = my_vars.unique_rules.get(rule_hash, set())
if len(rules_with_same_hash) > 1:
my_vars.unique_rules[rule_hash].discard(rule.name)
# If a rule was extended, it wasn't added to my_vars.unique_rules, so the additional check is necessary
elif rule_hash in my_vars.unique_rules:
del my_vars.unique_rules[rule_hash]
print("after update:", my_vars.unique_rules)
def merge_rule_statistics_of_duplicate(existing_rule, duplicate_rule):
"""
Merges the statistics of a rule, that was just generalized and became a duplicate of an existing rule, with the
statistics of the existing rule, s.t. the generalized rule is deleted and all statistics are updated for the
existing rule.
IMPORTANT: <duplicate_rule> is the base rule from which <duplicate_rule> was generalized
Parameters
----------
existing_rule: pd.Series - existing rule whose statistics will be updated
duplicate_rule: pd.Series - base rule that was generalized and became a duplicate, thus it's statistics will be
deleted once they were added to <existing_rule>
"""
print("existing rule", existing_rule.name)
print("duplicate rule", duplicate_rule.name)
# 1. Update existing rule
duplicate_seed_example_id = my_vars.seed_rule_example[duplicate_rule.name]
# existing_seed_example_id = my_vars.seed_rule_example[existing_rule.name]
print("seed example per rule:", my_vars.seed_rule_example)
# my_vars.seed_rule_example[existing_rule.name] = duplicate_seed_example_id
print("rules for which the examples are seeds:", my_vars.seed_example_rule)
# my_vars.seed_example_rule[existing_seed_example_id].add(duplicate_rule.name)
print("updating which rule covers which examples:", my_vars.examples_covered_by_rule)
covered = my_vars.examples_covered_by_rule.get(duplicate_rule.name, set())
if len(covered) > 0:
my_vars.examples_covered_by_rule[existing_rule.name] = \
my_vars.examples_covered_by_rule.get(existing_rule.name, set()).union(covered)
print("after merging:", my_vars.examples_covered_by_rule)
affected_examples = my_vars.closest_examples_per_rule.get(duplicate_rule.name, set())
print("closest rule per example", my_vars.closest_rule_per_example)
for example_id in affected_examples:
_, dist = my_vars.closest_rule_per_example[example_id]
my_vars.closest_rule_per_example[example_id] = Data(rule_id=existing_rule.name, dist=dist)
print("after update:", my_vars.closest_rule_per_example)
print("closest examples per rule:", my_vars.closest_examples_per_rule)
my_vars.closest_examples_per_rule[existing_rule.name] = \
my_vars.closest_examples_per_rule.get(existing_rule.name, set()).union(affected_examples)
# 2. Delete statistics of duplicate rule
del my_vars.seed_rule_example[duplicate_rule.name]
if len(my_vars.seed_example_rule[duplicate_seed_example_id]) > 1:
my_vars.seed_example_rule[duplicate_seed_example_id].discard(duplicate_rule.name)
else:
del my_vars.seed_example_rule[duplicate_seed_example_id]
print("seed example rule updated", my_vars.seed_example_rule)
print("seed rule example updated", my_vars.seed_rule_example)
if duplicate_rule.name in my_vars.examples_covered_by_rule:
del my_vars.examples_covered_by_rule[duplicate_rule.name]
if duplicate_rule.name in my_vars.closest_examples_per_rule:
del my_vars.closest_examples_per_rule[duplicate_rule.name]
print("closest examples per rule after merging:", my_vars.closest_examples_per_rule)
del my_vars.all_rules[duplicate_rule.name]
_delete_old_rule_hash(duplicate_rule)
def add_one_best_rule(df, neighbors, rule, rules, f1, class_col_name, counts, min_max, classes):
"""
Implements AddOneBestRule() from the paper, i.e. Algorithm 3.
Parameters
----------
neighbors: pd.DataFrame - nearest examples for <rule>
rule: pd.Series - rule whose effect on the F1 score should be evaluated
rules: list of pd.Series - list of all rules in the rule set RS and <rule> is at the end in that list
class_col_name: str - name of the column in the series holding the class label
counts: dict of Counters - contains for nominal classes how often the value of an co-occurs with each class label
min_max: pd.DataFrame - min and max value per numeric feature.
classes: list of str - class labels in the dataset.
Returns
-------
bool, list of pd.Series, float or bool, None, float.
True if a generalized version of the rule improves the F1 score, False otherwise. Returns the updated list of
rules - all rules in that list are unique, i.e. if the best found rule becomes identical with any existing one
(that isn't updated), it'll be ignored. The new F1 score using the generalized rule.
Returns False, <rules>, <f1> if <neighbors> is None
"""
# Without deep copy, a shallow copy of <rules> is used, hence changing the returned rules would change the original
# rules
rules = copy.deepcopy(rules)
best_f1 = f1
best_generalization = rule
improved = False
# best_example_id = None
best_closest_rule_dist = None
best_conf_matrix = None
best_closest_examples_per_rule = None
best_covered = None
best_hash = None
print("rule:\n{}".format(rule))
print("best f1:", best_f1)
# No neighbors
if neighbors is None:
return False, rules, best_f1
dtypes = neighbors.dtypes
for example_id, example in neighbors.iterrows():
print("add_1 generalize rule for example {}".format(example.name))
generalized_rule = most_specific_generalization(example, rule, class_col_name, dtypes)
# print("generalized rule:\n{}".format(generalized_rule))
current_f1, current_conf_matrix, current_closest_rule, current_closest_examples_per_rule, current_covered, _\
= evaluate_f1_temporarily(df, generalized_rule, generalized_rule.name, class_col_name, counts, min_max,
classes)
print(current_f1, best_f1)
if current_f1 >= best_f1:
print("{} >= {}".format(current_f1, f1))
best_f1 = current_f1
best_generalization = generalized_rule
best_closest_examples_per_rule = current_closest_examples_per_rule
best_covered = current_covered
improved = True
best_conf_matrix = current_conf_matrix
best_closest_rule_dist = current_closest_rule
best_hash = compute_hashable_key(generalized_rule)
if improved:
print("improvement!")
# Replace old rule with new one. Note that <rule> (see parameters) is the last rule in <rules>
idx = -1
# replace_rule = False
# Only update existing if its generalization isn't a duplicate - if it is, just delete the existing rule
duplicate_rule_id = find_duplicate_rule_id(best_generalization, best_hash)
# Generalized rule isn't a duplicate
if duplicate_rule_id == my_vars.UNIQUE_RULE:
# Delete old hash entry | |
# ~~~
# This file is part of the paper:
#
# "A NON-CONFORMING DUAL APPROACH FOR ADAPTIVE TRUST-REGION REDUCED BASIS
# APPROXIMATION OF PDE-CONSTRAINED OPTIMIZATION"
#
# https://github.com/TiKeil/NCD-corrected-TR-RB-approach-for-pde-opt
#
# Copyright 2019-2020 all developers. All rights reserved.
# License: Licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
# Authors:
# <NAME> (2019 - 2020)
# <NAME> (2019 - 2020)
# ~~~
import numpy as np
import time
from copy import deepcopy
from pdeopt.tools import truncated_conj_grad as TruncCG
def projection_onto_range(parameter_space, mu):
ranges = parameter_space.ranges
for (key, item) in parameter_space.parameter_type.items():
range_ = ranges[key]
if sum(item) < 2: # these are the cases () and (1,)
if mu[key] < range_[0]:
if item == ():
mu[key] = range_[0]
else:
mu[key] = [range_[0]]
if mu[key] > range_[1]:
if item == ():
mu[key] = range_[1]
else:
mu[key] = [range_[1]]
else:
for j in range(item[0]):
if mu[key][j] < range_[0]:
mu[key][j] = range_[0]
if mu[key][j] > range_[1]:
mu[key][j] = range_[1]
return mu
def active_and_inactive_sets(parameter_space, mu, epsilon):
Act = []
ranges = parameter_space.ranges
for (key,item) in parameter_space.parameter_type.items():
range_ = ranges[key]
if sum(item) < 2:
if mu[key] - range_[0] <= epsilon:
Act.append(1.0)
elif range_[1] - mu[key] <= epsilon:
Act.append(1.0)
else:
Act.append(0.0)
else:
for j in range(item[0]):
if mu[key][j] - range_[0] <= epsilon:
Act.append(1.0)
elif range_[1] - mu[key][j] <= epsilon:
Act.append(1.0)
else:
Act.append(0.0)
Act = np.array(Act)
Inact = np.ones(Act.shape) - Act
return Act, Inact
def armijo_rule(opt_model, parameter_space, TR_parameters, mu_i, Ji, direction):
j = 0
condition = True
while condition and j < TR_parameters['max_iterations_armijo']:
mu_ip1 = mu_i + (TR_parameters['initial_step_armijo'] ** j) * direction
mu_ip1_dict = opt_model.primal_model.parse_parameter(opt_model.pre_parse_parameter(mu_ip1))
mu_ip1_dict = projection_onto_range(parameter_space,mu_ip1_dict)
mu_ip1 = opt_model.parse_parameter_inverse(mu_ip1_dict)
Jip1 = opt_model.output_functional_hat(mu_ip1_dict)
if not TR_parameters['full_order_model']:
u_cp = opt_model.solve(mu_ip1_dict)
p_cp = opt_model.solve_dual(mu_ip1_dict)
est = opt_model.estimate_output_functional_hat(u_cp, p_cp, mu_ip1_dict)
else:
est = 0.0
if Jip1 <= Ji - (TR_parameters['armijo_alpha'] / ((TR_parameters['initial_step_armijo'] ** j))) * (np.linalg.norm(mu_ip1-mu_i)**2) and abs(est / Jip1) <= TR_parameters['radius']:
condition = False
j = j + 1
if condition: # This means that we exit the loop because of maximum iteration reached
print("Maximum iteration for Armijo rule reached")
mu_ip1 = mu_i
mu_ip1_dict = opt_model.primal_model.parse_parameter(opt_model.pre_parse_parameter(mu_ip1))
Jip1 = Ji
est = TR_parameters['radius']*Ji # so that the Qian-Grepl method stops as well
return mu_ip1, mu_ip1_dict, Jip1, abs(est / Jip1) #the last is needed for the boundary criterium
def compute_new_hessian_approximation(new_mu,old_mu,new_gradient,old_gradient,old_B):
gk = new_gradient-old_gradient
pk = new_mu-old_mu
den = gk.dot(pk)
if den>0.0:
Hkgk = old_B.dot(gk)
coeff = gk.dot(Hkgk)
Hkgkpkt = np.outer(Hkgk,pk)
pkHkgkt = np.outer(pk,Hkgk)
pkpkt = np.outer(pk,pk)
new_B = old_B + (den+coeff)/(den*den) * pkpkt - (1.0/den) * Hkgkpkt - (1.0/den)*pkHkgkt
else:
print("Curvature condition: {}".format(den))
print("Reset direction to - gradient")
new_B = np.eye(old_gradient.size)
return new_B
def compute_modified_hessian_action_matrix_version(H,Active,Inactive,eta):
etaA = np.multiply(Active, eta)
etaI = np.multiply(Inactive, eta)
Hessian_prod = H.dot(etaI)
Action_of_modified_H = etaA + np.multiply(Inactive, Hessian_prod)
return Action_of_modified_H
def solve_optimization_subproblem_BFGS(opt_model, parameter_space, mu_k_dict, TR_parameters, timing=False):
if not TR_parameters['full_order_model']:
print('___ starting subproblem')
if 'beta' not in TR_parameters:
print('Setting beta to the default 0.95')
TR_parameters['beta'] = 0.95
else:
print("Starting parameter {}".format(mu_k_dict))
tic_ = time.time()
times = []
mus = []
Js = []
FOCs = []
mu_diff = 1e6
J_diff = 1e6
Ji = opt_model.output_functional_hat(mu_k_dict)
gradient = opt_model.output_functional_hat_gradient(mu_k_dict)
normgrad = np.linalg.norm(gradient)
mu_i = opt_model.parse_parameter_inverse(mu_k_dict)
mu_i_dict = opt_model.primal_model.parse_parameter(opt_model.pre_parse_parameter(mu_i))
mu_i_1 = mu_i - gradient
mu_i_1_dict = projection_onto_range(opt_model.parameter_space, opt_model.parse_parameter(opt_model.pre_parse_parameter(mu_i_1)))
mu_i_1 = opt_model.parse_parameter_inverse(mu_i_1_dict)
epsilon_i = TR_parameters['epsilon_i']
if not isinstance(epsilon_i,float):
epsilon_i = np.linalg.norm(mu_i_1 - mu_i)#/(np.linalg.norm(mu_i)+1e-8)
B = np.eye(mu_i.size)
Active_i, Inactive_i = active_and_inactive_sets(opt_model.parameter_space, mu_i_dict, epsilon_i)
i = 0
while i < TR_parameters['max_iterations_subproblem']:
if i>0:
if not TR_parameters['full_order_model']:
if boundary_TR_criterium >= TR_parameters['beta']*TR_parameters['radius']:
print('boundary criterium of the TR satisfied, so stopping the sub-problem solver')
return mu_ip1_dict, Jcp, i, Jip1, FOCs
if normgrad < TR_parameters['sub_tolerance'] or J_diff < TR_parameters['safety_tolerance'] or mu_diff< TR_parameters['safety_tolerance']:
print("Subproblem converged: FOC = {}, mu_diff = {}, J_diff = {} ".format(normgrad,mu_diff,J_diff))
break
else:
if normgrad < TR_parameters['sub_tolerance']:
print("Converged: FOC = {}".format(normgrad))
break
if i == 0 and not TR_parameters['full_order_model']:
print("Computing the approximate Cauchy point and then start the BFGS method")
direction = -gradient
else:
if Inactive_i.sum() == 0.0:
if TR_parameters["full_order_model"]:
print("All indexes are active, I am using -gradient as direction")
direction = -gradient
else:
direction = compute_modified_hessian_action_matrix_version(B, Active_i, Inactive_i, -gradient)
if np.dot(direction,gradient) > 0:
print('Not a descendent direction ... taking -gradient as direction')
direction = -gradient
if TR_parameters["full_order_model"]:
mu_ip1, mu_ip1_dict, Jip1, _ = armijo_rule(opt_model, parameter_space, TR_parameters, mu_i, Ji, direction)
else:
mu_ip1, mu_ip1_dict, Jip1, boundary_TR_criterium = armijo_rule(opt_model, parameter_space, TR_parameters, mu_i, Ji, direction)
if i == 0:
if not TR_parameters['full_order_model']:
Jcp = Jip1
else:
Jcp = None
mu_diff = np.linalg.norm(mu_i - mu_ip1) / np.linalg.norm(mu_i)
J_diff = abs(Ji - Jip1) / abs(Ji)
old_mu = deepcopy(mu_i)
mu_i_dict = mu_ip1_dict
Ji = Jip1
old_gradient = deepcopy(gradient)
gradient = opt_model.output_functional_hat_gradient(mu_i_dict)
mu_box = opt_model.parse_parameter(opt_model.pre_parse_parameter(opt_model.parse_parameter_inverse(mu_i_dict)-gradient))
first_order_criticity = opt_model.parse_parameter_inverse(mu_i_dict)-opt_model.parse_parameter_inverse(projection_onto_range(parameter_space, mu_box))
normgrad = np.linalg.norm(first_order_criticity)
mu_i = opt_model.parse_parameter_inverse(mu_i_dict)
mu_i_dict = opt_model.primal_model.parse_parameter(opt_model.pre_parse_parameter(mu_i))
mu_i_1 = mu_i - gradient
mu_i_1_dict = projection_onto_range(opt_model.parameter_space,opt_model.parse_parameter(opt_model.pre_parse_parameter(mu_i_1)))
mu_i_1 = opt_model.parse_parameter_inverse(mu_i_1_dict)
if not isinstance(epsilon_i,float):
epsilon_i = np.linalg.norm(mu_i_1 - mu_i)
Active_i, Inactive_i = active_and_inactive_sets(opt_model.parameter_space, mu_i_dict, epsilon_i)
B = compute_new_hessian_approximation(mu_i, old_mu, gradient, old_gradient, B)
if TR_parameters["full_order_model"]:
print("Step {}, functional {} , FOC condition {}".format(mu_ip1, Ji, np.linalg.norm(first_order_criticity)))
times.append(time.time() -tic_)
mus.append(mu_ip1)
Js.append(Ji)
FOCs.append(normgrad)
i = i + 1
print("relative differences mu {} and J {}".format(mu_diff, J_diff))
if timing:
return mu_ip1_dict, Jcp, i, Jip1, times, mus, Js, FOCs
else:
return mu_ip1_dict, Jcp, i, Jip1, FOCs
def modified_hessian_action(mu,Active,Inactive,opt_model,eta):
# Used only by the projected Newton Method
etaA = np.multiply(Active,eta)
etaI = np.multiply(Inactive,eta)
Action_on_I = opt_model.output_functional_hessian_operator(mu, etaI, False)
Action_of_modified_operator = etaA + np.multiply(Inactive,Action_on_I)
return Action_of_modified_operator
def solve_optimization_NewtonMethod(opt_model, parameter_space, mu_k_dict, TR_parameters, timing=False):
#This method is used to compute an accurate approximation of the optimal parameter mu_bar with the FOM.
# (Eventually also in the global Greedy). It is not used in the TR algorithm in this paper.
print("Starting parameter {}".format(mu_k_dict))
if 'global_RB' not in TR_parameters:
TR_parameters['global_RB']=False
tic_toc = time.time()
times = []
mus = []
Js = []
FOCs = []
Jcp = None
mu_diff = 1e6
J_diff = 1e6
Ji = opt_model.output_functional_hat(mu_k_dict)
gradient = opt_model.output_functional_hat_gradient(mu_k_dict)
normgrad = np.linalg.norm(gradient)
mu_i = opt_model.parse_parameter_inverse(mu_k_dict)
mu_i_dict = opt_model.primal_model.parse_parameter(opt_model.pre_parse_parameter(mu_i))
mu_i_1 = mu_i - gradient
mu_i_1_dict = projection_onto_range(opt_model.parameter_space, opt_model.parse_parameter(opt_model.pre_parse_parameter(mu_i_1)))
mu_i_1 = opt_model.parse_parameter_inverse(mu_i_1_dict)
epsilon_i = TR_parameters['epsilon_i']
if not isinstance(epsilon_i,float):
epsilon_i = np.linalg.norm(mu_i_1 - mu_i)
i = 0
while i < TR_parameters['max_iterations']:
if i>0:
if TR_parameters['full_order_model'] or TR_parameters['global_RB']:
if normgrad < TR_parameters['sub_tolerance']:
print("Converged: FOC = {}".format(normgrad))
break
Active_i, Inactive_i = active_and_inactive_sets(opt_model.parameter_space, mu_i_dict, epsilon_i)
if Inactive_i.sum() == 0.0:
deltamu = gradient
if TR_parameters["full_order_model"] or TR_parameters['global_RB']:
print("I am using projected gradient instead of Newton")
else:
print("Using truncated CG for the linear system")
deltamu, itcg,rescg, infocg = TruncCG(A_func=lambda v: modified_hessian_action(mu=mu_i_dict, Active= Active_i, Inactive= Inactive_i, opt_model=opt_model, eta=v), b= gradient, tol = 1.e-10)
if infocg > 0:
print("Choosing the gradient as direction")
deltamu = gradient
if np.dot(-deltamu,gradient) >= -1.e-14:
print('Not a descendent direction ... taking gradient as direction')
deltamu = gradient
mu_ip1, mu_ip1_dict, Jip1, _, = armijo_rule(opt_model, parameter_space, TR_parameters, mu_i, Ji, -deltamu)
mu_diff = np.linalg.norm(mu_i - mu_ip1) / np.linalg.norm(mu_i)
J_diff = abs(Ji - Jip1) / abs(Ji)
mu_i_dict = mu_ip1_dict
Ji = Jip1
gradient = opt_model.output_functional_hat_gradient(mu_i_dict)
mu_box = opt_model.parse_parameter(opt_model.pre_parse_parameter(opt_model.parse_parameter_inverse(mu_i_dict)-gradient))
first_order_criticity = opt_model.parse_parameter_inverse(mu_i_dict)-opt_model.parse_parameter_inverse(projection_onto_range(parameter_space, mu_box))
normgrad = np.linalg.norm(first_order_criticity)
mu_i = opt_model.parse_parameter_inverse(mu_i_dict)
mu_i_dict = opt_model.primal_model.parse_parameter(opt_model.pre_parse_parameter(mu_i))
mu_i_1 = mu_i - gradient
mu_i_1_dict = projection_onto_range(opt_model.parameter_space,opt_model.parse_parameter(opt_model.pre_parse_parameter(mu_i_1)))
mu_i_1 = opt_model.parse_parameter_inverse(mu_i_1_dict)
if not isinstance(epsilon_i,float):
epsilon_i = np.linalg.norm(mu_i_1 - mu_i)
print("Step {}, functional {} , FOC condition {}".format(mu_ip1, Ji, np.linalg.norm(first_order_criticity)))
times.append(time.time() -tic_toc)
mus.append(mu_ip1)
Js.append(Ji)
FOCs.append(normgrad)
i = i + 1
print("relative differences mu {} and J {}".format(mu_diff, J_diff))
if timing:
return mu_ip1_dict, Jcp, i, Jip1, times, mus, Js, FOCs
else:
return mu_ip1_dict, Jcp, i, Jip1, FOCs, 0
def enrichment_step(mu, reductor, opt_fom=None):
new_reductor = deepcopy(reductor)
u, p = new_reductor.extend_bases(mu)
opt_rom = new_reductor.reduce()
return opt_rom, new_reductor, u, p
def TR_algorithm(opt_rom, reductor, TR_parameters=None, extension_params=None, opt_fom=None, return_opt_rom=False):
if TR_parameters is None:
mu_k = opt_rom.parameter_space.sample_randomly(1)[0]
TR_parameters = {'radius': 0.1, 'sub_tolerance': 1e-8, 'max_iterations': 30, 'max_iterations_subproblem':400,
'starting_parameter': mu_k, 'max_iterations_armijo': 50, 'initial_step_armijo': 0.5,
'armijo_alpha': 1e-4, 'full_order_model': False,
'epsilon_i': 1e-8, 'Qian-Grepl': False, 'safety_tolerance': 1e-16, 'beta': 0.95}
else:
if 'radius' not in TR_parameters:
TR_parameters['radius'] = 0.1
if 'sub_tolerance' not in TR_parameters:
TR_parameters['sub_tolerance'] = 1e-8
if 'max_iterations' not in TR_parameters:
TR_parameters['max_iterations'] = 30
if 'max_iterations_subproblem' not in TR_parameters:
TR_parameters['max_iterations_subproblem'] = 400
if 'starting_parameter' not in TR_parameters:
TR_parameters['starting_parameter'] = opt_rom.parameter_space.sample_randomly(1)[0]
| |
to networks and information systems relevant to essential functions are identified, analysed, prioritised, and managed.", # noqa: E501
"score": 2
}, {
"answer": "Your approach to risk is focused on the possibility of adverse impact to your essential function, leading to a detailed understanding of how such impact might arise as a consequence of possible attacker actions and the security properties of your networks and information systems.", # noqa: E501
"score": 2
}, {
"answer": "Your risk assessments are based on a clearly understood set of threat assumptions, informed by an up-to-date understanding of security threats to your essential function and your sector.", # noqa: E501
"score": 2
}, {
"answer": "Your risk assessments are informed by an understanding of the vulnerabilities in the networks and information systems supporting your essential function.", # noqa: E501
"score": 2
}, {
"answer": "The output from your risk management process is a clear set of security requirements that will address the risks in line with your organisational approach to security.", # noqa: E501
"score": 2
}, {
"answer": "Significant conclusions reached in the course of your risk management process are communicated to key security decision-makers and accountable individuals.", # noqa: E501
"score": 2
}, {
"answer": "You conduct risk assessments when significant events potentially affect the essential function, such as replacing a system or a change in the cyber security threat.", # noqa: E501
"score": 2
}, {
"answer": "Your risk assessments are dynamic and updated in the light of relevant changes which may include technical changes to networks and information systems, change of use and new threat information.", # noqa: E501
"score": 2
}, {
"answer": "The effectiveness of your risk management process is reviewed periodically, and improvements made as required.", # noqa: E501
"score": 2
}, {
"answer": "You perform detailed threat analysis and understand how this applies to your organisation in the context of the threat to your sector and the wider CNI.", # noqa: E501
"score": 2
}]
}, {
"name": "<NAME>", # noqa: E501
"question": "You have gained confidence in the effectiveness of the security of your technology, people, and processes relevant to essential functions.", # noqa: E501
"answers": [{
"answer": "A particular product or service is seen as a \"silver bullet\" and vendor claims are taken at face value.", # noqa: E501
"score": 0
}, {
"answer": "Assurance methods are applied without appreciation of their strengths and limitations, such as the risks of penetration testing in operational environments.", # noqa: E501
"score": 0
}, {
"answer": "Assurance is assumed because there have been no known problems to date.", # noqa: E501
"score": 0
}, {
"answer": "You validate that the security measures in place to protect the networks and information systems are effective and remain effective for the lifetime over which they are needed.", # noqa: E501
"score": 2
}, {
"answer": "You understand the assurance methods available to you and choose appropriate methods to gain confidence in the security of essential functions.", # noqa: E501
"score": 2
}, {
"answer": "Your confidence in the security as it relates to your technology, people, and processes can be justified to, and verified by, a third party.", # noqa: E501
"score": 2
}, {
"answer": "Security deficiencies uncovered by assurance activities are assessed, prioritised and remedied when necessary in a timely and effective way.", # noqa: E501
"score": 2
}, {
"answer": "The methods used for assurance are reviewed to ensure they are working as intended and remain the most appropriate method to use.", # noqa: E501
"score": 2
}]
}, {
"name": "A3a Asset Management", # noqa: E501
"question": "Everything required to deliver, maintain or support networks and information systems necessary for the operation of essential functions is determined and understood. This includes data, people and systems, as well as any supporting infrastructure (such as power or cooling).", # noqa: E501
"answers": [{
"answer": "Inventories of assets relevant to the essential function are incomplete, non-existent, or inadequately detailed.", # noqa: E501
"score": 0
}, {
"answer": "Only certain domains or types of asset are documented and understood. Dependencies between assets are not understood (such as the dependencies between IT and OT).", # noqa: E501
"score": 0
}, {
"answer": "Information assets, which could include personally identifiable information or other sensitive information, are stored for long periods of time with no clear business need or retention policy.", # noqa: E501
"score": 0
}, {
"answer": "Knowledge critical to the management, operation, or recovery of essential functions is held by one or two key individuals with no succession plan.", # noqa: E501
"score": 0
}, {
"answer": "Asset inventories are neglected and out of date.", # noqa: E501
"score": 0
}, {
"answer": "All assets relevant to the secure operation of essential functions are identified and inventoried (at a suitable level of detail). The inventory is kept up-to-date.", # noqa: E501
"score": 2
}, {
"answer": "Dependencies on supporting infrastructure (e.g. power, cooling etc) are recognised and recorded.", # noqa: E501
"score": 2
}, {
"answer": "You have prioritised your assets according to their importance to the operation of the essential function.", # noqa: E501
"score": 2
}, {
"answer": "You have assigned responsibility for managing physical assets.", # noqa: E501
"score": 2
}, {
"answer": "Assets relevant to essential functions are managed with cyber security in mind throughout their lifecycle, from creation through to eventual decommissioning or disposal.", # noqa: E501
"score": 2
}]
}, {
"name": "A4a Supply Chain", # noqa: E501
"question": "The organisation understands and manages security risks to networks and information systems supporting the operation of essential functions that arise as a result of dependencies on external suppliers. This includes ensuring that appropriate measures are employed where third party services are used.", # noqa: E501
"answers": [{
"answer": "You do not know what data belonging to you is held by suppliers, or how it is managed.", # noqa: E501
"score": 0
}, {
"answer": "Elements of the supply chain for essential functions are subcontracted and you have little or no visibility of the sub-contractors.", # noqa: E501
"score": 0
}, {
"answer": "Relevant contracts do not have security requirements.", # noqa: E501
"score": 0
}, {
"answer": "Suppliers have access to systems that provide your essential function that is unrestricted, not monitored or bypasses your own security controls.", # noqa: E501
"score": 0
}, {
"answer": "You understand the general risks suppliers may pose to your essential functions.", # noqa: E501
"score": 1
}, {
"answer": "You know the extent of your supply chain for essential functions, including sub-contractors.", # noqa: E501
"score": 1
}, {
"answer": "You engage with suppliers about security, and you set and communicate security requirements in contracts.", # noqa: E501
"score": 1
}, {
"answer": "You are aware of all third-party connections and have assurance that they meet your organisation's security requirements.", # noqa: E501
"score": 1
}, {
"answer": "Your approach to security incident management considers incidents that might arise in your supply chain.", # noqa: E501
"score": 1
}, {
"answer": "You have confidence that information shared with suppliers that is necessary for the operation of your essential function is appropriately protected from well-known attacks and known vulnerabilities.", # noqa: E501
"score": 1
}, {
"answer": "You have a deep understanding of your supply chain, including sub-contractors and the wider risks it faces. You consider factors such as supplier's partnerships, competitors, nationality and other organisations with which they sub-contract. This informs your risk assessment and procurement processes.", # noqa: E501
"score": 2
}, {
"answer": "Your approach to supply chain risk management considers the risks to your essential functions arising from supply chain subversion by capable and well-resourced | |
<reponame>kursawe/hesdynamics<gh_stars>0
# import PyDDE
import numpy as np
import scipy.signal
import scipy.optimize
import scipy.interpolate
import multiprocessing as mp
from numba import jit
from numpy import ndarray, number
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
# import seaborn.apionly as sns
import pandas as pd
import socket
import jitcdde
import warnings
import seaborn as sns
import logging
logging.getLogger("tensorflow").setLevel(logging.WARNING)
# try:
# import gpflow
# except ImportError:
# print('Could not import gpflow. Gpflow will not be available for GP regression. This will not affect any functions used in our publications.')
import sklearn.gaussian_process as gp
import GPy
try:
import george
except ImportError:
print('Could not import george. George will not be available for GP regression. This will not affect any functions used in our publications.')
domain_name = socket.getfqdn()
if domain_name == 'jochen-ThinkPad-S1-Yoga-12':
number_of_available_cores = 2
elif domain_name.endswith('csf3.alces.network'):
number_of_available_cores = 24
else:
# number_of_available_cores = 1
number_of_available_cores = mp.cpu_count()
def generate_deterministic_trajectory( duration = 720,
repression_threshold = 10000,
hill_coefficient = 5,
mRNA_degradation_rate = np.log(2)/30,
protein_degradation_rate = np.log(2)/90,
basal_transcription_rate = 1,
translation_rate = 1,
transcription_delay = 29,
initial_mRNA = 0,
initial_protein = 0,
for_negative_times = 'initial',
integrator = 'agnostic'):
'''Generate one trace of the Hes5 model. This function implements the deterministic model in
Monk, Current Biology (2003).
Parameters
----------
duration : float
duration of the trace in minutes
repression_threshold : float
repression threshold, Hes autorepresses itself if its copynumber is larger
than this repression threshold. Corresponds to P0 in the Monk paper
hill_coefficient : float
exponent in the hill function regulating the Hes autorepression. Small values
make the response more shallow, whereas large values will lead to a switch-like
response if the protein concentration exceeds the repression threshold
mRNA_degradation_rate : float
Rate at which mRNA is degraded, in copynumber per minute
protein_degradation_rate : float
Rate at which Hes protein is degraded, in copynumber per minute
basal_transcription_rate : float
Rate at which mRNA is described, in copynumber per minute, if there is no Hes
autorepression. If the protein copy number is close to or exceeds the repression threshold
the actual transcription rate will be lower
translation_rate : float
rate at protein translation, in Hes copy number per mRNA copy number and minute,
transcription_delay : float
delay of the repression response to Hes protein in minutes. The rate of mRNA transcription depends
on the protein copy number at this amount of time in the past.
for_negative_times : string
decides what protein and MRNA values are assumed for negative times. This
is necessary since function values for t-tau are required for all t>0.
The values 'initial', 'zero' and 'no_negative' are supported. The default 'initial' will assume that protein and
mRNA numbers were constant at the values of the initial condition for all negative times.
If 'zero' is chosen, then the protein and mRNA numbers are assumed to be 0 at negative times.
If 'no_negative' is chosen, no assumptions are made for negative times, and transcription
is blocked until transcription_delay has passed.
integrator : string
'agnostic' or 'PyDDE' are allowed integrators. If 'agnostic' is used, the langevin equation
with noise_strength zero will be employed. In this case the argument for 'for_negative_times'
will be ignored
Returns
-------
trace : ndarray
2 dimenstional array, first column is time, second column mRNA number,
third column is Hes5 protein copy number
'''
if integrator == 'agnostic':
trace = generate_agnostic_noise_trajectory(duration,
repression_threshold,
hill_coefficient,
mRNA_degradation_rate,
protein_degradation_rate,
basal_transcription_rate,
translation_rate,
transcription_delay,
mRNA_noise_strength = 0,
protein_noise_strength = 0,
initial_mRNA = initial_mRNA,
initial_protein = initial_protein,
equilibration_time = 0,
time_step = 0.01,
sampling_frequency = 1)
return trace
elif integrator == 'PyDDE':
hes5_dde = PyDDE.dde()
initial_condition = np.array([initial_mRNA,initial_protein])
# The coefficients (constants) in the equations
if for_negative_times == 'initial':
negative_times_indicator = 0.0
elif for_negative_times == 'zero':
negative_times_indicator = 1.0
elif for_negative_times == 'no_negative':
negative_times_indicator = 2.0
else:
ValueError("The parameter set for for_negative_times could not be interpreted.")
parameters = np.array([repression_threshold,
hill_coefficient,
mRNA_degradation_rate,
protein_degradation_rate,
basal_transcription_rate,
translation_rate,
transcription_delay,
negative_times_indicator])
hes5_dde.dde(y=initial_condition, times=np.arange(0.0, duration, 1.0),
func=hes5_ddegrad, parms=parameters,
tol=0.000005, dt=0.01, hbsize=10000, nlag=1, ssc=[0.0, 0.0])
#hbsize is buffer size, I believe this is how many values in the past are stored
#nlag is the number of delay variables (tau_1, tau2, ... taun_lag)
#ssc means "statescale" and would somehow only matter for values close to 0
this_data = hes5_dde.data
return hes5_dde.data
def hes5_ddegrad(y, parameters, time):
'''Gradient of the Hes5 delay differential equation for
deterministic runs of the model.
It evaluates the right hand side of DDE 1 in Monk(2003).
Parameters
----------
y : ndarray
vector of the form [mRNA, protein] contain the concentration of these species at time t
parameters : ndarray
vector of the form [repression_threshold, hill_coefficient, mRNA_degradation_rate,
protein_degradation_rate, basal_transcription_rate, translation_rate,
transcription_delay, negative_times_indicator]
containing the value of these parameters.
The value of negative_times_indicator corresponds to for_negative_times in generate_deterministic_trajectory().
The value 0.0 corresponds to the option 'initial', whereas 1.0 corresponds to 'zero',
and 2.0 corresponds to 'no_negative'.
time : float
time at which the gradient is calculated
Returns
-------
gradient : ndarray
vector of the form [dmRNA, dProtein] containing the evaluated right hand side of the
delay differential equation for the species concentrations provided in y, the given
parameters, and at time t.
'''
repression_threshold = float(parameters[0]); #P0
hill_coefficient = parameters[1]; #NP
mRNA_degradation_rate = parameters[2]; #MuM
protein_degradation_rate = parameters[3]; #Mup
basal_transcription_rate = parameters[4]; #alpha_m
translation_rate = parameters[5]; #alpha_p
time_delay = parameters[6]; #tau
negative_times_indicator = parameters[7] #string for negative times
if negative_times_indicator == 0.0:
for_negative_times = 'initial'
elif negative_times_indicator == 1.0:
for_negative_times = 'zero'
elif negative_times_indicator == 2.0:
for_negative_times = 'no_negative'
else:
ValueError("Could not interpret the value of for_negative_times")
mRNA = float(y[0])
protein = float(y[1])
if (time>time_delay):
past_protein = PyDDE.pastvalue(1,time-time_delay,0)
elif time>0.0:
if for_negative_times == 'initial':
past_protein = PyDDE.pastvalue(1,0.0,0)
elif for_negative_times == 'zero':
past_protein = 0.0
else:
past_protein = protein
dprotein = translation_rate*mRNA - protein_degradation_rate*protein
if for_negative_times != 'no_negative':
hill_function_value = 1.0/(1.0+pow(past_protein/repression_threshold,hill_coefficient))
dmRNA = basal_transcription_rate*hill_function_value-mRNA_degradation_rate*mRNA
else:
if time < time_delay:
dmRNA = -mRNA_degradation_rate*mRNA
else:
hill_function_value = 1.0/(1.0+pow(past_protein/repression_threshold,hill_coefficient))
dmRNA = basal_transcription_rate*hill_function_value-mRNA_degradation_rate*mRNA
return np.array( [dmRNA,dprotein] )
def generate_deterministic_goodfellow_trajectory( duration = 7200,
basal_mRNA_transcription_rate = 1.0,
basal_miRNA_transcription_rate = 1.0,
translation_rate = 10,
repression_threshold_protein_on_mRNA = 100,
repression_threshold_protein_on_miRNA = 100,
repression_threshold_miRNA_on_mRNA = 100,
repression_threshold_miRNA_on_protein = 100,
hill_coefficient_protein_on_mRNA = 5,
hill_coefficient_protein_on_miRNA = 5,
hill_coefficient_miRNA_on_mRNA = 5,
hill_coefficient_miRNA_on_protein = 100,
transcription_delay = 19,
upper_mRNA_degradation_rate = 0.03,
lower_mRNA_degradation_rate = 0.03,
protein_degradation_rate = 0.03,
miRNA_degradation_rate = 0.00001,
initial_mRNA = 3,
initial_protein = 100,
initial_miRNA = 1,
for_negative_times='initial'):
'''Generate one trace of the Goodfellow model. This function implements the deterministic model in
Goodfellow, Nature Communications (2014).
Parameters
----------
duration : float
duration of the trace in minutes
repression_threshold_protein_on_mRNA : float
repression threshold, Hes autorepresses its own transcription if its copynumber is larger
than this repression threshold. Corresponds to P0 in the Goodfellow paper
repression_threshold_protein_on_miRNA : float
repression threshold, Hes represses production of micro RNA if the Hes copynumber is larger
than this repression threshold. Corresponds to P1 in the Goodfellow paper.
repression_threshold_miRNA_on_mRNA : float
repression threshold, the micro RNA represses Hes transcription if the micro RNA copynumber is larger
than this repression threshold. Corresponds to r0 in the Goodfellow paper
hill_coefficient_protein_on_mRNA : float
exponent in the hill function regulating the Hes autorepression of its own transcription.
Small values make the response more shallow, whereas large values will lead to a switch-like
response if the protein concentration exceeds the repression threshold, corresponds to n0 in the
Goodfellow paper.
hill_coefficient_miRNA_on_mRNA : float
exponent in the hill function regulating the impact of the micro RNA on mRNA translation. Small values
make the response more shallow, whereas large values will lead to a switch-like response if the miRNA
concentration exceeds the repression threshold. Corresponds to m0 in the Goodfellow paper.
hill_coefficient_protein_on_miRNA : float
exponent in the hill function regulating the repression of miRNA transcription by Hes. Small values
make the response more shallow, whereas large values will lead to a switch-like response if the protein
concentration exceeds the repression threshold. Corresponds to n1 in the Goodfellow paper.
upper_mRNA_degradation_rate : float
upper bound for the rate at which mRNA is degraded, in copynumber per minute. Corresponds to b_l in the
Goodfellow paper.
lower_mRNA_degradation_rate : float
lower bound for the rate at which | |
+ 1)
plt.plot(band / 1e6, np.abs(response), 'b.-')
plt.plot(band / 1e6, np.abs(response_guess), 'g')
plt.plot(band / 1e6, np.abs(response_fit), 'r')
plt.xlabel('Frequency (MHz)')
plt.ylabel('Amplitude (nm)')
plt.tight_layout(pad=0.0, w_pad=0.0, h_pad=0.0)
plt.subplot(resonances, 2, (n + 1) + 2)
plt.plot(band / 1e6, np.angle(response), '.-')
plt.plot(band / 1e6, np.angle(response_guess), 'g')
plt.plot(band / 1e6, np.angle(response_fit), 'r')
plt.xlabel('Frequency (MHz)')
plt.ylabel('Phase (Rad)')
plt.tight_layout(pad=0.0, w_pad=0.0, h_pad=0.0)
# Normalize to first resonance Q
self.TF_norm = self.coef_mat[0, 2] * (TF - np.min(np.abs(TF))) / \
(np.max(np.abs(TF)) - np.min(np.abs(TF)))
return
def generate_tf(self, can_params_dict={}, plot=False):
"""
Uses the cantilever simulation to generate a tune as the transfer function
:param can_params_dict: use ffta.pixel_utils.load.cantilever_params()
:type can_params_dict: Dict
:param plot: Plots the time-dependent tune
:type plot: bool
"""
if isinstance(can_params_dict, str):
can_params_dict = cantilever_params(can_params_dict)
can_params_dict = can_params_dict['Initial']
can_params = {'amp_invols': 7.5e-08,
'def_invols': 6.88e-08,
'soft_amp': 0.3,
'drive_freq': 309412.0,
'res_freq': 309412.0,
'k': 43.1,
'q_factor': 340.0}
force_params = {'es_force': 1e-10,
'ac_force': 6e-07,
'dc_force': 3e-09,
'delta_freq': -170.0,
'tau': 0.001,
'v_dc': 3.0,
'v_ac': 2.0,
'v_cpd': 1.0,
'dCdz': 1e-10,
'v_step': 1.0}
sim_params = {'trigger': 0.02,
'total_time': 0.05
}
for k, v in can_params_dict.items():
can_params.update(k=v)
# Update from GKPixel class
sim_params['trigger'] = self.trigger
sim_params['sampling_rate'] = self.sampling_rate
sim_params['total_time'] = self.total_time
sim_params['sampling_rate'] = self.sampling_rate
can_params['drive_freq'] = self.drive_freq
can_params['res_freq'] = self.drive_freq
force_keys = ['es_force']
can_keys = {'amp_invols': ['amp_invols', 'AMPINVOLS'],
'q': ['q_factor', 'Q'],
'k': ['SpringConstant', 'k']}
for f in force_keys:
if 'Force' in can_params_dict:
force_params.update(es_force=can_params_dict['Force'])
elif 'es_force' in can_params_dict:
force_params.update(es_force=can_params_dict['es_force'])
for c in ['amp_invols', 'q', 'k']:
for l in can_keys[c]:
if l in can_params_dict:
can_params.update(l=can_params_dict[l])
if can_params['k'] < 1e-3:
can_params['k'] *= 1e9 # old code had this off by 1e9
cant = Cantilever(can_params, force_params, sim_params)
cant.trigger = cant.total_time # don't want a trigger
Z, _ = cant.simulate()
Z = Z.flatten()
if plot:
plt.figure()
plt.plot(Z)
plt.title('Tip response)')
TF = np.fft.fftshift(np.fft.fft(Z))
Q = can_params['q_factor']
mid = int(len(self.f_ax) / 2)
drive_bin = np.searchsorted(self.f_ax[mid:], self.drive_freq) + mid
TFmax = np.abs(TF[drive_bin])
TF_norm = Q * (TF - np.min(np.abs(TF))) / (TFmax - np.min(np.abs(TF)))
self.tf = Z
self.TF = TF
self.TF_norm = TF_norm
self.tf_f_ax = np.linspace(-self.sampling_rate / 2, self.sampling_rate / 2, num=self.tf.shape[0])
self.tf_exc = gen_chirp(sampling_rate=self.sampling_rate,
length=self.tf.shape / self.sampling_rate)
self.TF_EXC = np.fft.fftshift(np.fft.fft(self.tf_exc))
return
def force_out(self, plot=False, noise_tolerance=1e-6, phase_shift=0):
"""
Reconstructs force by dividing by transfer function
:param plot: Generates plot of reconstructed force. The default is False.
:type plot: bool, optional
:param noise_tolerance: Use to determine noise_floor, The default is 1e-6
:type noise_tolerance: float, optional
:param phase_shift: Desired phase shift in radians
:type phase_shift: float, optional
"""
if not any(self.TF_norm):
raise AttributeError('Supply Transfer Function or use generate_tf()')
center = int(len(self.SIG) / 2)
drive_bin = int(self.drive_freq / (self.sampling_rate / len(self.SIG)))
SIG = np.copy(self.SIG)
if phase_shift != 0:
self.phase_shift = phase_shift
if self.phase_shift != 0:
# DFT shift theorem
period = self.sampling_rate / self.drive_freq
ph = self.phase_shift * period / (2 * pi)
SIG = self.SIG * np.exp(-1j * ph * self.f_ax / (0.5 * len(self.f_ax)))
self.FORCE = np.zeros(len(SIG), dtype=complex)
noise_limit = np.ceil(get_noise_floor(SIG, noise_tolerance))
# Only save bins above the noise_limit
signal_pass = np.where(np.abs(SIG) > noise_limit)[0]
if 2 * drive_bin + center not in signal_pass:
warnings.warn('Second resonance not in passband; increase noise_tolerance')
self.FORCE[signal_pass] = SIG[signal_pass]
self.FORCE = self.FORCE / self.TF_norm
self.force = np.real(np.fft.ifft(np.fft.ifftshift(self.FORCE)))
del SIG
if plot:
start = int(0.5 * self.trigger * self.sampling_rate)
stop = int(1.5 * self.trigger * self.sampling_rate)
plt.figure()
plt.plot(self.t_ax[start:stop], self.force[start:stop])
plt.title('Force (output/TF_norm) vs time, near trigger')
plt.xlabel('Time (s)')
plt.ylabel('Force (N)')
self.plot_response()
return
def noise_filter(self, bw=1e3, plot=True, noise_tolerance=1e-6):
"""
Denoising filter for 50 kHz harmonics (electrical noise in the system)
:param bw: Bandwidth for the notch filters
:type bw: float, optional
:param plot: Generates plot of reconstructed force. The default is False.
:type plot: bool, optional
:param noise_tolerance: Use to determine noise_floor, The default is 1e-6
:type noise_tolerance: float, optional
"""
nbf = px.processing.fft.NoiseBandFilter(len(self.force), self.sampling_rate,
[2E3, 50E3, 100E3, 150E3, 200E3],
[4E3, bw, bw, bw, bw])
filt_line, _, _ = px.processing.gmode_utils.test_filter(self.force,
frequency_filters=nbf,
noise_threshold=noise_tolerance,
show_plots=plot)
self.force = np.real(filt_line)
self.FORCE = np.fft.fftshift(np.fft.fft(self.force))
return
def plot_response(self):
"""
Plots the transfer function and calculated force in frequency space
"""
plt.figure()
plt.semilogy(self.f_ax, np.abs(self.SIG), 'b')
plt.semilogy(self.f_ax, np.abs(self.TF_norm), 'g')
plt.semilogy(self.f_ax, np.abs(self.FORCE), 'k')
plt.xlim((0, 2.5 * self.drive_freq))
plt.legend(labels=['Signal', 'TF-normalized', 'Force_out'])
plt.title('Frequency Response of the Data')
return
def _calc_cpd_params(self, periods=2, return_dict=False):
"""
Calculates the parameters needed to calculate the CPD
:param periods: Number of cantilever cycles to average over. The default is 2.
:type periods: int, optional
:param return_dict: Dictionary of these parameters for debugging purposes
:type return_dict: bool, optional
:returns: if return_dict == True, return _cpdd
:rtype:
"""
self.periods = periods
self.pxl_time = self.n_points / self.sampling_rate # how long each pixel is in time (8.192 ms)
self.time_per_osc = (1 / self.drive_freq) # period of drive frequency
self.pnts_per_period = self.sampling_rate * self.time_per_osc # points in a cycle
self.num_periods = int(self.pxl_time / self.time_per_osc) # number of periods in each pixel
self.num_CPD = int(np.floor(
self.num_periods / self.periods)) # length of CPD array since each CPD takes some number of periods
self.pnts_per_CPD = int(np.floor(self.pnts_per_period * self.periods)) # points used to calculate CPD
self.remainder = int(self.n_points % self.pnts_per_CPD)
if return_dict:
_cpdd = {'pxl_time': self.pxl_time,
'time_per_osc': self.time_per_osc,
'pnts_per_period': self.pnts_per_period,
'num_periods': self.num_periods,
'num_CPD': self.num_CPD,
'pnts_per_CPD': self.pnts_per_CPD,
'remainder': self.remainder}
return _cpdd
return
def analyze_cpd(self, verbose=False, deg=2, use_raw=False, periods=2,
overlap=False):
"""
Extracts CPD and capacitance gradient from data.
:param verbose:
:type verbose: bool
:param deg: Degree of polynomial fit. Default is 2, which is a quadratic fit.
Unless there's a good reason, quadratic is correct to use
:type deg: int
:param use_raw: Uses the signal_array instead of the reconstructed force
:type use_raw: bool, optional
:param periods: Numer of cantilever cycles to average over for CPD extraction
:type periods: int, optional
:param overlap: If False, each CPD is from a separate part of the signal.
If True, shifts signal by 1 pixel and recalculates
:type overlap: bool, optional
"""
self._calc_cpd_params(periods)
pnts = self.pnts_per_CPD
step = pnts
if overlap:
self.t_ax_wH = np.copy(self.t_ax)
step = 1
cpd_px = np.arange(0, self.n_points, step)
test_wH = np.zeros((len(cpd_px), deg + 1))
for n, p in enumerate(cpd_px):
if use_raw:
resp_x = np.float32(self.signal_array[p:p + pnts])
else:
resp_x = np.float32(self.force[p:p + pnts])
resp_x -= np.mean(resp_x)
V_per_osc = self.exc_wfm[p:p + pnts]
popt, _ = npPoly.polyfit(V_per_osc, resp_x, deg, full=True)
test_wH[n] = popt.flatten()
self.test_wH = test_wH
self.CPD = -0.5 * test_wH[:, 1] / test_wH[:, 2]
self.capacitance = test_wH[:, 2]
if any(np.argwhere(np.isnan(self.CPD))):
self.CPD[-1] = self.CPD[-2]
self.capacitance[-1] = self.capacitance[-2]
def plot_cpd(self, smooth=None):
'''
Plots the CPD response
:param smooth: Boxcar smoothign kernel. Value of 3 is reasonable
:type smooth: int, optional
:returns:
:rtype: matplotlib figure axis object
'''
fig, ax = plt.subplots(figsize=(5, 5), facecolor='white')
tx = np.linspace(0, self.total_time, self.num_CPD)
if smooth:
ax.plot(tx * 1e3, fftconvolve(self.CPD[:self.num_CPD], np.ones(smooth) / 3, mode='same'), 'b')
else:
ax.plot(tx * 1e3, self.CPD[:self.num_CPD], 'b')
ax.set_xlabel('Time (ms)')
ax.set_ylabel('CPD (V)')
ax.set_title('CPD response')
return ax
def filter_cpd(self):
"""
Filters the capacitance based on pixel parameter self.filter_bandwidth
(typical is 10 kHz, which is somewhat large)
"""
center = int(len(self.CPD) / 2)
df = self.sampling_rate / len(self.CPD)
bin_width = int(self.filter_bandwidth / df)
return
def min_phase(self, phases_to_test=[2.0708, 2.1208, 2.1708],
noise_tolerance=1e-6, verbose=True):
"""
Determine the optimal phase shift due to cable lag
:param phases_to_test: Which phases to shift the signal with. The default is [2.0708, 2.1208, 2.1708],
which is 0.5, 0.55, 0.5 + pi/2
:type phases_to_test: list, optional
:param noise_tolerance: Use to determine noise_floor, The default is 1e-6
:type noise_tolerance : float, optional
"""
# have to iterate this cell many times to find the right phase
phases_to_test = np.array(phases_to_test)
numplots = len(phases_to_test)
fig, ax = plt.subplots(nrows=numplots, figsize=(6, int(4 * numplots)),
facecolor='white')
for x, ph in enumerate(phases_to_test):
self.phase_shift = ph
self.force_out(plot=False, noise_tolerance=noise_tolerance)
if len(phases_to_test) > 1:
usid.plot_utils.rainbow_plot(ax[x], self.exc_wfm, self.force)
ax[x].set_title('Phase=' + str(ph))
else:
usid.plot_utils.rainbow_plot(ax, self.exc_wfm, self.force)
ax.set_title('Phase=' + str(ph))
if verbose:
print('Set self.phase_shift to match desired phase offset (radians)')
return
def min_phase_fft(self, signal):
"""
:param signal:
:type signal:
:returns:
:rtype:
"""
fits = []
xpts = np.arange(-2 * pi, 2 * pi, | |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.451536,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 3.91153,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0189193,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.217548,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.178851,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0644642,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.103978,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0524848,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.220927,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0463072,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.18449,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0337888,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00270392,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0235179,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0199971,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0573066,
'Execution Unit/Register Files/Runtime Dynamic': 0.0227011,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0542764,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.161191,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.02775,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000146501,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000146501,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000126765,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 4.8615e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00028726,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000707028,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00143455,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0192238,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.2228,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0471822,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0652925,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.50066,
'Instruction Fetch Unit/Runtime Dynamic': 0.13384,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0608528,
'L2/Runtime Dynamic': 0.0184433,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.69281,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.254358,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0147426,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0147425,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 1.76243,
'Load Store Unit/Runtime Dynamic': 0.341805,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0363527,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.072705,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0129017,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0138148,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.076029,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00773728,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.254301,
'Memory Management Unit/Runtime Dynamic': 0.0215521,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.3522,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0888827,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00399013,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.031119,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': | |
'P*7d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.l>0:
moves = 'L*7d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.n>0:
moves = 'N*7d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.s>0:
moves = 'S*7d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.g>0:
moves = 'G*7d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.b>0:
moves = 'B*7d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.r>0:
moves = 'R*7d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s7c =='':
if Wboard.p>0 and (Wboard.w7h !='p' and Wboard.w7g !='p' and Wboard.w7f !='p' and Wboard.w7e !='p' and Wboard.w7d !='p' and Wboard.w7c !='p' and Wboard.w7b !='p' and Wboard.w7a !='p'):
moves = 'P*7c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.l>0:
moves = 'L*7c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.n>0:
moves = 'N*7c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.s>0:
moves = 'S*7c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.g>0:
moves = 'G*7c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.b>0:
moves = 'B*7c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.r>0:
moves = 'R*7c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s7b =='':
if Wboard.p>0 and (Wboard.w7h !='p' and Wboard.w7g !='p' and Wboard.w7f !='p' and Wboard.w7e !='p' and Wboard.w7d !='p' and Wboard.w7c !='p' and Wboard.w7b !='p' and Wboard.w7a !='p'):
moves = 'P*7b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.l>0:
moves = 'L*7b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.n>0:
moves = 'N*7b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.s>0:
moves = 'S*7b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.g>0:
moves = 'G*7b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.b>0:
moves = 'B*7b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.r>0:
moves = 'R*7b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s7a =='':
if Wboard.p>0 and (Wboard.w7h !='p' and Wboard.w7g !='p' and Wboard.w7f !='p' and Wboard.w7e !='p' and Wboard.w7d !='p' and Wboard.w7c !='p' and Wboard.w7b !='p' and Wboard.w7a !='p'):
moves = 'P*7a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.l>0:
moves = 'L*7a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.n>0:
moves = 'N*7a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.s>0:
moves = 'S*7a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.g>0:
moves = 'G*7a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.b>0:
moves = 'B*7a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.r>0:
moves = 'R*7a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s6i =='':
if Wboard.s>0:
moves = 'S*6i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.g>0:
moves = 'G*6i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.b>0:
moves = 'B*6i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.r>0:
moves = 'R*6i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s6h =='':
if Wboard.p>0 and (Wboard.w6h !='p' and Wboard.w6g !='p' and Wboard.w6f !='p' and Wboard.w6e !='p' and Wboard.w6d !='p' and Wboard.w6c !='p' and Wboard.w6b !='p' and Wboard.w6a !='p'):
moves = 'P*6h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.l>0:
moves = 'L*6h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.s>0:
moves = 'S*6h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.g>0:
moves = 'G*6h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.b>0:
moves = 'B*6h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.r>0:
moves = 'R*6h'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s6g =='':
if Wboard.p>0 and (Wboard.w6h !='p' and Wboard.w6g !='p' and Wboard.w6f !='p' and Wboard.w6e !='p' and Wboard.w6d !='p' and Wboard.w6c !='p' and Wboard.w6b !='p' and Wboard.w6a !='p'):
moves = 'P*6g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.l>0:
moves = 'L*6g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.n>0:
moves = 'N*6g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.s>0:
moves = 'S*6g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.g>0:
moves = 'G*6g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.b>0:
moves = 'B*6g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.r>0:
moves = 'R*6g'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s6f =='':
if Wboard.p>0 and (Wboard.w6h !='p' and Wboard.w6g !='p' and Wboard.w6f !='p' and Wboard.w6e !='p' and Wboard.w6d !='p' and Wboard.w6c !='p' and Wboard.w6b !='p' and Wboard.w6a !='p'):
moves = 'P*6f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.l>0:
moves = 'L*6f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.n>0:
moves = 'N*6f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.s>0:
moves = 'S*6f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.g>0:
moves = 'G*6f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.b>0:
moves = 'B*6f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.r>0:
moves = 'R*6f'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s6e =='':
if Wboard.p>0 and (Wboard.w6h !='p' and Wboard.w6g !='p' and Wboard.w6f !='p' and Wboard.w6e !='p' and Wboard.w6d !='p' and Wboard.w6c !='p' and Wboard.w6b !='p' and Wboard.w6a !='p'):
moves = 'P*6e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.l>0:
moves = 'L*6e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.n>0:
moves = 'N*6e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.s>0:
moves = 'S*6e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.g>0:
moves = 'G*6e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.b>0:
moves = 'B*6e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.r>0:
moves = 'R*6e'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s6d =='':
if Wboard.p>0 and (Wboard.w6h !='p' and Wboard.w6g !='p' and Wboard.w6f !='p' and Wboard.w6e !='p' and Wboard.w6d !='p' and Wboard.w6c !='p' and Wboard.w6b !='p' and Wboard.w6a !='p'):
moves = 'P*6d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.l>0:
moves = 'L*6d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.n>0:
moves = 'N*6d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.s>0:
moves = 'S*6d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.g>0:
moves = 'G*6d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.b>0:
moves = 'B*6d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.r>0:
moves = 'R*6d'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s6c =='':
if Wboard.p>0 and (Wboard.w6h !='p' and Wboard.w6g !='p' and Wboard.w6f !='p' and Wboard.w6e !='p' and Wboard.w6d !='p' and Wboard.w6c !='p' and Wboard.w6b !='p' and Wboard.w6a !='p'):
moves = 'P*6c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.l>0:
moves = 'L*6c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.n>0:
moves = 'N*6c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.s>0:
moves = 'S*6c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.g>0:
moves = 'G*6c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.b>0:
moves = 'B*6c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.r>0:
moves = 'R*6c'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s6b =='':
if Wboard.p>0 and (Wboard.w6h !='p' and Wboard.w6g !='p' and Wboard.w6f !='p' and Wboard.w6e !='p' and Wboard.w6d !='p' and Wboard.w6c !='p' and Wboard.w6b !='p' and Wboard.w6a !='p'):
moves = 'P*6b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.l>0:
moves = 'L*6b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.n>0:
moves = 'N*6b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.s>0:
moves = 'S*6b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.g>0:
moves = 'G*6b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.b>0:
moves = 'B*6b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.r>0:
moves = 'R*6b'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s6a =='':
if Wboard.p>0 and (Wboard.w6h !='p' and Wboard.w6g !='p' and Wboard.w6f !='p' and Wboard.w6e !='p' and Wboard.w6d !='p' and Wboard.w6c !='p' and Wboard.w6b !='p' and Wboard.w6a !='p'):
moves = 'P*6a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.l>0:
moves = 'L*6a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.n>0:
moves = 'N*6a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.s>0:
moves = 'S*6a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.g>0:
moves = 'G*6a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.b>0:
moves = 'B*6a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.r>0:
moves = 'R*6a'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if board.s5i =='':
if Wboard.s>0:
moves = 'S*5i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.g>0:
moves = 'G*5i'
kaihimore(moves)
if oute.oute == 0:
depth1.append(moves)
if Wboard.b>0:
| |
# License is MIT: see LICENSE.md.
"""Nestle: nested sampling routines to evaluate Bayesian evidence."""
import sys
import warnings
import math
import numpy as np
try:
from scipy.cluster.vq import kmeans2
HAVE_KMEANS = True
except ImportError: # pragma: no cover
HAVE_KMEANS = False
__all__ = ["sample", "print_progress", "mean_and_cov", "resample_equal",
"Result"]
__version__ = "0.2.0"
SQRTEPS = math.sqrt(float(np.finfo(np.float64).eps))
# -----------------------------------------------------------------------------
# Helpers
def vol_prefactor(n):
"""Volume constant for an n-dimensional sphere:
for n even: (2pi)^(n /2) / (2 * 4 * ... * n)
for n odd : 2 * (2pi)^((n-1)/2) / (1 * 3 * ... * n)
"""
if n % 2 == 0:
f = 1.
i = 2
while i <= n:
f *= (2. / i * math.pi)
i += 2
else:
f = 2.
i = 3
while i <= n:
f *= (2. / i * math.pi)
i += 2
return f
def randsphere(n, rstate=np.random):
"""Draw a random point within an n-dimensional unit sphere"""
z = rstate.randn(n)
return z * rstate.rand()**(1./n) / np.sqrt(np.sum(z**2))
def random_choice(a, p, rstate=np.random):
"""replacement for numpy.random.choice (only in numpy 1.7+)"""
if abs(np.sum(p) - 1.) > SQRTEPS: # same tol as in np.random.choice.
raise ValueError("probabilities do not sum to 1")
r = rstate.rand()
i = 0
t = p[i]
while t < r:
i += 1
t += p[i]
return i
def resample_equal(samples, weights, rstate=None):
"""Resample the samples so that the final samples all have equal weight.
Each input sample appears in the output array either
`floor(weights[i] * N)` or `ceil(weights[i] * N)` times, with
`floor` or `ceil` randomly selected (weighted by proximity).
Parameters
----------
samples : `~numpy.ndarray`
Unequally weight samples returned by the nested sampling algorithm.
Shape is (N, ...), with N the number of samples.
weights : `~numpy.ndarray`
Weight of each sample. Shape is (N,).
Returns
-------
equal_weight_samples : `~numpy.ndarray`
Samples with equal weights, same shape as input samples.
Examples
--------
>>> x = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
>>> w = np.array([0.6, 0.2, 0.15, 0.05])
>>> nestle.resample_equal(x, w)
array([[ 1., 1.],
[ 1., 1.],
[ 1., 1.],
[ 3., 3.]])
Notes
-----
Implements the systematic resampling method described in
`this PDF <http://people.isy.liu.se/rt/schon/Publications/HolSG2006.pdf>`_.
Another way to sample according to weights would be::
N = len(weights)
new_samples = samples[np.random.choice(N, size=N, p=weights)]
However, the method used in this function is less "noisy".
"""
if abs(np.sum(weights) - 1.) > SQRTEPS: # same tol as in np.random.choice.
raise ValueError("weights do not sum to 1")
if rstate is None:
rstate = np.random
N = len(weights)
# make N subdivisions, and choose positions with a consistent random offset
positions = (rstate.random() + np.arange(N)) / N
idx = np.zeros(N, dtype=np.int)
cumulative_sum = np.cumsum(weights)
i, j = 0, 0
while i < N:
if positions[i] < cumulative_sum[j]:
idx[i] = j
i += 1
else:
j += 1
return samples[idx]
class Result(dict):
"""Represents a sampling result.
Since this class is essentially a subclass of dict with attribute
accessors, one can see which attributes are available using the
`keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if list(self.keys()):
m = max(list(map(len, list(self.keys())))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in list(self.items())])
else:
return self.__class__.__name__ + "()"
def summary(self):
"""Return a nicely formatted string giving summary."""
return ("niter: {:d}\n"
"ncall: {:d}\n"
"nsamples: {:d}\n"
"logz: {:6.3f} +/- {:6.3f}\n"
"h: {:6.3f}"
.format(self.niter, self.ncall, len(self.samples),
self.logz, self.logzerr, self.h))
def mean_and_cov(x, weights):
"""Compute weighted sample mean and covariance.
Parameters
----------
x : `~numpy.ndarray`
2-D array containing data samples. Shape is (M, N) where N is the
number of variables and M is the number of samples or observations.
This is ordering is equivalent to using ``rowvar=0`` in numpy.cov.
weights : `~numpy.ndarray`
1-D array of sample weights. Shape is (M,).
Returns
-------
mean : `~numpy.ndarray`
Weighted average of samples, with shape (N,).
cov : `~numpy.ndarray`
The covariance matrix of the variables with shape (N, N).
Notes
-----
Implements formula described here:
https://en.wikipedia.org/wiki/Sample_mean_and_sample_covariance
(see "weighted samples" section)
"""
mean = np.average(x, weights=weights, axis=0)
dx = x - mean
wsum = np.sum(weights)
w2sum = np.sum(weights**2)
cov = wsum / (wsum**2 - w2sum) * np.einsum('i,ij,ik', weights, dx, dx)
return mean, cov
def print_progress(info):
"""Callback function that prints a running total on a single line.
Parameters
----------
info : dict
Dictionary containing keys ``'it'`` and ``'logz'``.
"""
print("\r\033[Kit={:6d} logz={:8f}".format(info['it'], info['logz']),
end='')
sys.stdout.flush() # because flush keyword not in print() in py2.7
# -----------------------------------------------------------------------------
# Ellipsoid
class Ellipsoid(object):
"""An N-ellipsoid.
Defined by::
(x - v)^T A (x - v) = 1
where the vector ``v`` is the center of the ellipse and ``A`` is an N x N
matrix. Assumes that ``A`` is symmetric positive definite.
Parameters
----------
ctr : `~numpy.ndarray` with shape ``(N,)``
Coordinates of ellipse center. Note that the array is *not* copied.
This array is never modified internally.
a : `~numpy.ndarray` with shape ``(N, N)``
Matrix describing the axes. Watch out! This array is *not* copied.
but may be modified internally!
"""
def __init__(self, ctr, a):
self.n = len(ctr)
self.ctr = ctr # center coordinates
self.a = a # ~ inverse of covariance of points contained
self.vol = vol_prefactor(self.n) / np.sqrt(np.linalg.det(a))
# eigenvalues (l) are a^-2, b^-2, ... (lengths of principle axes)
# eigenvectors (v) are normalized principle axes
l, v = np.linalg.eigh(a)
self.axlens = 1. / np.sqrt(l)
# Scaled eigenvectors are the axes: axes[:,i] is the i-th
# axis. Multiplying this matrix by a vector will transform a
# point in the unit n-sphere into a point in the ellipsoid.
self.axes = np.dot(v, np.diag(self.axlens))
def scale_to_vol(self, vol):
"""Scale ellipoid to satisfy a target volume."""
f = (vol / self.vol) ** (1.0 / self.n) # linear factor
self.a *= f**-2
self.axlens *= f
self.axes *= f
self.vol = vol
def major_axis_endpoints(self):
"""Return the endpoints of the major axis"""
i = np.argmax(self.axlens) # which is the major axis?
v = self.axes[:, i] # vector to end of major axis
return self.ctr - v, self.ctr + v
def contains(self, x):
"""Does the ellipse contain the point?"""
d = x - self.ctr
return np.dot(np.dot(d, self.a), d) <= 1.0
def randoffset(self, rstate=np.random):
"""Return an offset from ellipsoid center, randomly distributed
within ellipsoid."""
return np.dot(self.axes, randsphere(self.n, rstate=rstate))
def sample(self, rstate=np.random):
"""Chose a sample randomly distributed within the ellipsoid.
Returns
-------
x : 1-d array
A single point within the ellipsoid.
"""
return self.ctr + self.randoffset(rstate=rstate)
def samples(self, nsamples, rstate=np.random):
"""Chose a sample randomly distributed within the ellipsoid.
Returns
-------
x : (nsamples, ndim) array
Coordinates within the ellipsoid.
"""
x = np.empty((nsamples, self.n), dtype=np.float)
for i in range(nsamples):
x[i, :] = self.sample(rstate=rstate)
return x
def __repr__(self):
return "Ellipsoid(ctr={})".format(self.ctr)
# -----------------------------------------------------------------------------
# Functions for determining the ellipsoid or set of ellipsoids bounding a
# set of points.
def make_eigvals_positive(a, targetprod):
"""For the symmetric square matrix ``a``, increase any zero eigenvalues
to fulfill the given target product of eigenvalues.
Returns a (possibly) new matrix."""
w, v = np.linalg.eigh(a) # Use eigh because we assume a is symmetric.
mask = w < 1.e-10
if np.any(mask):
nzprod = np.product(w[~mask]) # product of nonzero eigenvalues
nzeros = mask.sum() # number of zero eigenvalues
w[mask] = (targetprod / nzprod) ** (1./nzeros) # adjust zero eigvals
a = np.dot(np.dot(v, np.diag(w)), np.linalg.inv(v)) # re-form cov
return a
def bounding_ellipsoid(x, pointvol=0., minvol=False):
"""Calculate bounding ellipsoid containing a set of points x.
Parameters
----------
x : (npoints, ndim) ndarray
Coordinates of points.
pointvol : float, optional
Used to set a minimum bound on the ellipsoid volume when
minvol is True.
minvol : bool, optional
If True, ensure that ellipsoid volume is at least len(x) * pointvol.
Returns
-------
ellipsoid : Ellipsoid
"""
npoints, ndim = x.shape
# If there is only a single point, return an N-sphere with volume `pointvol`
# centered at the point.
if npoints == 1:
r = (pointvol / vol_prefactor(ndim))**(1./ndim)
return Ellipsoid(x[0], (1. / r**2) * np.identity(ndim))
# Calculate covariance of points
ctr = np.mean(x, axis=0)
delta | |
<filename>amznas.py
#!/usr/bin/env python
# Command line utility for Amazonian Nasality project
# TODO: check --lx param
# TODO: try to prevent lx recording when not requested
try:
import os
import re
import glob
import subprocess
import yaml
import numpy as np
from pathlib import Path
from datetime import datetime as dt
import scipy.io.wavfile
import wave
from eggdisp import egg_display
import click
from phonlab.utils import dir2df, get_timestamp_now
except:
print()
print('Could not import required modules.')
print('Try to load them with:')
print(' conda activate amznas')
print()
exit(0)
datadir = os.path.join(os.environ['USERPROFILE'], 'Desktop', 'amznas')
class AmzCfg(object):
'''A config for the Amazon Nasality project.'''
def __init__(self, datadir=datadir, ymlname='amznas.yml'):
super(AmzCfg, self).__init__()
self.datadir = datadir
self.cfgfile = os.path.join(datadir, ymlname)
self._lang = None
self._researcher = None
try:
with open(self.cfgfile, 'r') as fh:
cfg = yaml.safe_load(fh)
for fld in ('lang', 'researcher'):
try:
assert(re.match('^[a-zA-Z]{3}$', cfg[fld]))
setattr(self, f'_{fld}', cfg[fld])
except AssertionError:
msg = f'''
The '{fld}' value must be a 3-character code.
You must correct the value in {self.cfgfile} before continuing.
'''
raise RuntimeError(msg)
except KeyError:
print(f'No config default for {fld}.')
except FileNotFoundError:
pass
def prompt_for_save(self, fld, val):
msg = f'''
You have changed the configuration to:
lang: {val if fld == 'lang' else self.lang}
researcher: {val if fld == 'researcher' else self.researcher}
Save this configuration for next time? (y/n)
'''
r = input(msg).strip().lower()
if r == 'y':
return True
elif r == 'n':
return False
else:
return self.prompt_for_save(fld, val)
def save(self):
with open(self.cfgfile, 'w') as fh:
yaml.dump(
{'lang': self.lang, 'researcher': self.researcher},
fh,
default_flow_style=False
)
@property
def lang(self):
return self._lang
@lang.setter
def lang(self, val):
try:
assert(re.match('^[a-zA-Z]{3}$', val))
except AssertionError:
msg = 'Lang identifier must be a 3-character ISO code.'
raise RuntimeError(msg)
if self._lang != val:
do_save = self.prompt_for_save('lang', val)
self._lang = val
if do_save is True:
self.save()
print(f'Saved configuration in {self.cfgfile}.')
else:
print('Configuration change not saved.')
@property
def researcher(self):
return self._researcher
@researcher.setter
def researcher(self, val):
try:
assert(re.match('^[a-zA-Z]{3}$', val))
except AssertionError:
msg = 'Researcher identifier must be a 3-character code.'
raise RuntimeError(msg)
if self._researcher != val:
do_save = prompt_for_save('researcher', val)
self._researcher = val
if do_save is True:
self.save()
print(f'Saved configuration in {self.cfgfile}.')
else:
print('Configuration change not saved.')
def validate_ident(ctx, param, value):
if value is None and param.name in ('researcher', 'lang'):
try:
value = cfg[param.name]
except KeyError:
raise click.BadParameter(f'must be included as a command parameter or in the config file {cfgfile}.')
try:
assert(re.match('^[a-zA-Z]{3}$', value))
except AssertionError:
raise click.BadParameter(f'Identifier "{value}" must be exactly three characters')
return value.lower()
def next_token(sessdir, lang, spkr, researcher, tstamp, item):
'''Get the number of the next token for a .wav acquisition file, as a str.'''
date = tstamp.split('T')[0]
token = '0'
# 1. Windows filesystems are case-insensitive. If the project's
# transcription system distinguishes phone by case, e.g. s vs. S, then it
# is not possible to distinguish items that differ only in case of one
# or more characters. As a result we use re.IGNORECASE when matching
# filenames, and the token count conflates these items.
#
# 2. Only the date portion of the timestamp is important
# for determining the token number, and the time portion is ignored.
fnpat = re.compile(
f'^{lang}_{spkr}_{researcher}_{date}[^_]*_{item}_(?P<token>\d+)\.wav$',
re.IGNORECASE
)
df = dir2df(sessdir, fnpat=fnpat)
if len(df) > 0:
token = df['token'].astype(int).max() + 1
return str(token)
def get_fpath(sessdir, lang, spkr, researcher, tstamp, item, token=None):
'''Construct and return filepath for acquisition .wav file.'''
if token == None or token < 0:
nexttok = next_token(sessdir, lang, spkr, researcher, tstamp, item)
token = int(nexttok) if token == None else int(nexttok)+token
fname = f'{lang}_{spkr}_{researcher}_{tstamp}_{item}_{token}'
return (
token,
os.path.join(sessdir, f'{fname}.wav'),
os.path.join(sessdir, f'{fname}.ini')
)
def find_wav(sessdir, lang, spkr, researcher, date, item, token):
'''Find existing acquisition .wav file.'''
fre = f'{lang}_{spkr}_{researcher}_{date}T??????_{item}_{token}.wav'
return glob.glob(os.path.join(sessdir, fre))
def get_ini(lx, spkr, item, token, utt, dev_version):
'''Return string rep of ini file.'''
chansel = '00001111' if dev_version == '1' else '00111001'
lxstr = '011' if lx is True else '0'
return f'''
[Device]
ChannelSelection = {chansel}
Lx = {lxstr}
SampleRate = 120000
MICGAIN = 4
LXGAIN = 2
NXGAIN = 4
NXPREAMP = 0
[Subject]
ID = {spkr}
Surname =
Firstname =
UtteranceID = {item}_{token}
Utterance = {utt}
'''
def run_acq(fpath, inifile, seconds):
'''Run an acquisition.'''
args = [
os.path.normpath('C:/bin/Recorder.exe'),
'-ini', inifile,
'-of', fpath
]
msg = 'Acquiring. Press Ctrl-C to stop.'
if seconds is not None:
args.extend(['-tm', seconds])
msg = f'Acquiring for {seconds} seconds.'
try:
subprocess.run(args)
except KeyboardInterrupt:
pass
def stash_chanmeans(wav, chan, token, sessdir, lang, spkr, researcher, today):
'''
Store channel means in a yaml file in the session directory.
'''
yamlfile = os.path.join(
sessdir,
f'{lang}_{spkr}_{today}_session.yaml'
)
try:
with open(yamlfile, 'r') as fh:
sessmd = yaml.safe_load(fh)
except FileNotFoundError:
sessmd = {
'session': {
'spkr': spkr,
'lang': lang,
},
'acq': []
}
(rate, data) = scipy.io.wavfile.read(wav)
cmeans = data.mean(axis=0)
chanmeans = []
for cidx, c in enumerate(chan):
label = 'no_label' if c is None or c == '' else c
chanmeans.append({
'idx': cidx,
'type': label,
# If we don't cast to float yaml.dump exports the value
# as a numpy object instead of a simple float.
'mean': float(cmeans[cidx]),
'status': 'automean'
})
sessmd['acq'].append({
'item': '_zero_',
'token': token,
'researcher': researcher,
'fname': os.path.basename(wav),
'channels': chanmeans
})
with open(yamlfile, 'w') as fh:
yaml.dump(sessmd, fh, sort_keys=False)
def load_sess_yaml(sessdir, lang, spkr, today):
'''
Load session metadata from yaml file.
'''
yamlfile = os.path.join(
sessdir,
f'{lang}_{spkr}_{today}_session.yaml'
)
try:
with open(yamlfile, 'r') as fh:
sessmd = yaml.safe_load(fh)
except FileNotFoundError:
sessmd = {
'session': {
'spkr': spkr,
'lang': lang,
},
'acq': []
}
return sessmd
def wav_display(wav, chan, cutoff, lporder, chanmeans):
(rate, data) = scipy.io.wavfile.read(wav)
if len(chanmeans) == data.shape[1]:
data -= np.array(chanmeans).astype(data.dtype)
r = egg_display(
data,
rate,
chan=chan,
del_btn=None,
title=wav,
cutoff=cutoff,
order=lporder,
acqfile=wav
)
#print(f'egg_display returned "{r}"')
@click.group()
def cli():
pass
@cli.command()
@click.option('--spkr', callback=validate_ident, help='Three-letter speaker identifier')
@click.option('--lang', callback=validate_ident, help='Three-letter language identifier (ISO 639-3)')
@click.option('--researcher', callback=validate_ident, help='Three-letter researcher (linguist) identifier')
@click.option('--item', help='Representation of the stimulus item')
@click.option('--utt', required=False, default='', help='Utterance metadata (optional)')
@click.option('--seconds', required=False, default='', help='Acquisition duration (optional)')
@click.option('--autozero', required=False, default='0', type=int, help='Remove mean from display using _zero_ token # (optional)')
@click.option('--lx', is_flag=True, help='Turn on LX (EGG) channel')
@click.option('--no-disp', is_flag=True, help='Skip display after acquisition')
@click.option('--cutoff', required=False, default=50, help='Lowpass filter cutoff in Hz (optional; default 50)')
@click.option('--lporder', required=False, default=3, help='Lowpass filter order (optional; default 3)')
@click.option('--dev-version', required=False, default='2', help='EGG-D800 device version (optional; default 2)')
def acq(spkr, lang, researcher, item, utt, seconds, autozero, lx, no_disp, cutoff, lporder, dev_version):
'''
Make a recording.
'''
today = dt.today()
todaystamp = dt.strftime(today, '%Y%m%d')
tstamp = dt.strftime(today, '%Y%m%dT%H%M%S')
sessdir = os.path.join(datadir, lang, spkr, todaystamp)
Path(sessdir).mkdir(parents=True, exist_ok=True)
token, fpath, inifile = get_fpath(
sessdir, lang, spkr, researcher, tstamp, item, token=None
)
ini = get_ini(lx, spkr, item, token, utt, dev_version)
with open(inifile, 'w') as out:
out.write(ini)
run_acq(fpath, inifile, seconds)
chan = ['audio', 'orfl', None, 'nsfl']
if lx is True:
chan[2] = 'lx'
if item == '_zero_':
stash_chanmeans(
fpath,
chan=chan,
token=token,
sessdir=sessdir,
lang=lang,
spkr=spkr,
researcher=researcher,
today=todaystamp
)
if no_disp is False:
if autozero >= 0 and item != '_zero_':
sessmd = load_sess_yaml(
sessdir, lang=lang, spkr=spkr, today=todaystamp
)
chanmeans = []
for a in sessmd['acq']:
if a['item'] == '_zero_' and a['token'] == autozero:
chanmeans = np.zeros(len(a['channels']))
for c in a['channels']:
if c['type'] in ('orfl', 'nsfl'):
chanmeans[c['idx']] = c['mean']
break
if len(chanmeans) == 0:
print(f"Didn't find _zero_ token {autozero} for the current session!")
else:
chanmeans = [] # No adjustment
wav_display(
fpath,
chan=chan,
cutoff=cutoff,
lporder=lporder,
chanmeans=chanmeans
)
@cli.command()
@click.option('--wavfile', required=False, default=None, help="Input .wav file")
@click.option('--spkr', callback=validate_ident, help='Three-letter speaker identifier')
@click.option('--lang', callback=validate_ident, help='Three-letter language identifier (ISO 639-3)')
@click.option('--researcher', callback=validate_ident, help='Three-letter researcher (linguist) identifier')
@click.option('--item', help='Representation of the stimulus item')
@click.option('--date', required=False, default='today', help="YYYYMMDD session date")
@click.option('--token', type=int, required=False, default=-1, help="Token identifier (optional; defaults to last token)")
@click.option('--autozero', required=False, default='0', type=int, help='Remove mean from display using _zero_ token (optional)')
@click.option('--cutoff', required=False, default=50, help='Lowpass filter cutoff in Hz (optional; default 50)')
@click.option('--lporder', required=False, default=3, help='Lowpass filter order (optional; default 3)')
def disp(wavfile, spkr, lang, researcher, item, date, token, autozero, cutoff,
lporder):
'''
Display an eggd800 wavfile recording. If given, the --wavfile parameter
identifies the .wav file to display. Otherwise, the name is constructed
from the other parameters in a way that matches the acq() parameters.
The --token parameter is used to specify the token identifier.
Use negative values to count tokens in reverse: -1 for last token,
-2 for second-to-last, and so on.
The --autozero parameter is used to specify which _zero_ token from the
acquisition | |
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
# flake8: noqa: E501
"""
Unit testing infrastructure for Scapy
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import getopt
import glob
import importlib
import hashlib
import copy
import bz2
import os.path
import time
import traceback
import warnings
import zlib
from scapy.consts import WINDOWS
import scapy.modules.six as six
from scapy.modules.six.moves import range
from scapy.compat import base64_bytes, bytes_hex, plain_str
# Util class #
class Bunch:
__init__ = lambda self, **kw: setattr(self, '__dict__', kw)
def retry_test(func):
"""Retries the passed function 3 times before failing"""
success = False
ex = Exception("Unknown")
for i in six.moves.range(3):
try:
result = func()
except Exception as e:
time.sleep(1)
ex = e
else:
success = True
break
if not success:
raise ex
assert success
return result
# Import tool #
def import_module(name):
if name.endswith(".py"):
name = name[:-3]
try:
return importlib.import_module(name, package="scapy")
except Exception:
return importlib.import_module(name)
# INTERNAL/EXTERNAL FILE EMBEDDING #
class File:
def __init__(self, name, URL, local):
self.name = name
self.local = local.encode("utf8")
self.URL = URL
def get_local(self):
return bz2.decompress(base64_bytes(self.local))
def get_URL(self):
return self.URL
def write(self, dir):
if dir:
dir += "/"
with open(dir + self.name, "wb") as fdesc:
fdesc.write(self.get_local())
# Embed a base64 encoded bziped version of js and css files
# to work if you can't reach Internet.
class External_Files:
UTscapy_js = File("UTscapy.js", "https://scapy.net/files/UTscapy/UTscapy.js", # noqa: E501
"""<KEY>""")
UTscapy_css = File("UTscapy.css", "https://scapy.net/files/UTscapy/UTscapy.css", # noqa: E501
"""<KEY>PKjP60EqqlDUaST
/i7kinChIXSAmRgA==\n""")
def get_local_dict(cls):
return {x: y.name for (x, y) in six.iteritems(cls.__dict__)
if isinstance(y, File)}
get_local_dict = classmethod(get_local_dict)
def get_URL_dict(cls):
return {x: y.URL for (x, y) in six.iteritems(cls.__dict__)
if isinstance(y, File)}
get_URL_dict = classmethod(get_URL_dict)
# HELPER CLASSES FOR PARAMETRING OUTPUT FORMAT #
class EnumClass:
def from_string(cls, x):
return cls.__dict__[x.upper()]
from_string = classmethod(from_string)
class Format(EnumClass):
TEXT = 1
ANSI = 2
HTML = 3
LATEX = 4
XUNIT = 5
# TEST CLASSES #
class TestClass:
def __getitem__(self, item):
return getattr(self, item)
def add_keywords(self, kws):
if isinstance(kws, six.string_types):
kws = [kws.lower()]
for kwd in kws:
kwd = kwd.lower()
if kwd.startswith('-'):
try:
self.keywords.remove(kwd[1:])
except KeyError:
pass
else:
self.keywords.add(kwd)
class TestCampaign(TestClass):
def __init__(self, title):
self.title = title
self.filename = None
self.headcomments = ""
self.campaign = []
self.keywords = set()
self.crc = None
self.sha = None
self.preexec = None
self.preexec_output = None
self.end_pos = 0
self.interrupted = False
def add_testset(self, testset):
self.campaign.append(testset)
testset.keywords.update(self.keywords)
def trunc(self, index):
self.campaign = self.campaign[:index]
def startNum(self, beginpos):
for ts in self:
for t in ts:
t.num = beginpos
beginpos += 1
self.end_pos = beginpos
def __iter__(self):
return self.campaign.__iter__()
def all_tests(self):
for ts in self:
for t in ts:
yield t
class TestSet(TestClass):
def __init__(self, name):
self.name = name
self.tests = []
self.comments = ""
self.keywords = set()
self.crc = None
self.expand = 1
def add_test(self, test):
self.tests.append(test)
test.keywords.update(self.keywords)
def trunc(self, index):
self.tests = self.tests[:index]
def __iter__(self):
return self.tests.__iter__()
class UnitTest(TestClass):
def __init__(self, name):
self.name = name
self.test = ""
self.comments = ""
self.result = "passed" # make instance True at init to have a different truth value than None
self.output = ""
self.num = -1
self.keywords = set()
self.crc = None
self.expand = 1
def decode(self):
if six.PY2:
self.test = self.test.decode("utf8", "ignore")
self.output = self.output.decode("utf8", "ignore")
self.comments = self.comments.decode("utf8", "ignore")
self.result = self.result.decode("utf8", "ignore")
def __nonzero__(self):
return self.result == "passed"
__bool__ = __nonzero__
# Careful note: all data not included will be set by default.
# Use -c as first argument !!
def parse_config_file(config_path, verb=3):
"""Parse provided json to get configuration
Empty default json:
{
"testfiles": [],
"breakfailed": true,
"onlyfailed": false,
"verb": 3,
"dump": 0,
"crc": true,
"scapy": "scapy",
"preexec": {},
"global_preexec": "",
"outputfile": null,
"local": true,
"format": "ansi",
"num": null,
"modules": [],
"kw_ok": [],
"kw_ko": []
}
"""
import json
with open(config_path) as config_file:
data = json.load(config_file, encoding="utf8")
if verb > 2:
print("### Loaded config file", config_path, file=sys.stderr)
def get_if_exist(key, default):
return data[key] if key in data else default
return Bunch(testfiles=get_if_exist("testfiles", []),
breakfailed=get_if_exist("breakfailed", True),
remove_testfiles=get_if_exist("remove_testfiles", []),
onlyfailed=get_if_exist("onlyfailed", False),
verb=get_if_exist("verb", 3),
dump=get_if_exist("dump", 0), crc=get_if_exist("crc", 1),
scapy=get_if_exist("scapy", "scapy"),
preexec=get_if_exist("preexec", {}),
global_preexec=get_if_exist("global_preexec", ""),
outfile=get_if_exist("outputfile", sys.stdout),
local=get_if_exist("local", False),
num=get_if_exist("num", None),
modules=get_if_exist("modules", []),
kw_ok=get_if_exist("kw_ok", []),
kw_ko=get_if_exist("kw_ko", []),
format=get_if_exist("format", "ansi"))
# PARSE CAMPAIGN #
def parse_campaign_file(campaign_file):
test_campaign = TestCampaign("Test campaign")
test_campaign.filename = campaign_file.name
testset = None
test = None
testnb = 0
for l in campaign_file.readlines():
if l[0] == '#':
continue
if l[0] == "~":
(test or testset or test_campaign).add_keywords(l[1:].split())
elif l[0] == "%":
test_campaign.title = l[1:].strip()
elif l[0] == "+":
testset = TestSet(l[1:].strip())
test_campaign.add_testset(testset)
test = None
elif l[0] == "=":
test = UnitTest(l[1:].strip())
test.num = testnb
testnb += 1
if testset is None:
error_m = "Please create a test set (i.e. '+' section)."
raise getopt.GetoptError(error_m)
testset.add_test(test)
elif l[0] == "*":
if test is not None:
test.comments += l[1:]
elif testset is not None:
testset.comments += l[1:]
else:
test_campaign.headcomments += l[1:]
else:
if test is None:
if l.strip():
print("Unknown content [%s]" % l.strip(), file=sys.stderr)
else:
test.test += l
return test_campaign
def dump_campaign(test_campaign):
print("#" * (len(test_campaign.title) + 6))
print("## %(title)s ##" % test_campaign)
print("#" * (len(test_campaign.title) + 6))
if test_campaign.sha and test_campaign.crc:
print("CRC=[%(crc)s] SHA=[%(sha)s]" % test_campaign)
print("from file %(filename)s" % test_campaign)
print()
for ts in test_campaign:
if ts.crc:
print("+--[%s]%s(%s)--" % (ts.name, "-" * max(2, 80 - len(ts.name) - 18), ts.crc)) # noqa: E501
else:
print("+--[%s]%s" % (ts.name, "-" * max(2, 80 - len(ts.name) - 6)))
if ts.keywords:
print(" kw=%s" % ",".join(ts.keywords))
for t in ts:
print("%(num)03i %(name)s" % t)
c = k = ""
if t.keywords:
k = "kw=%s" % ",".join(t.keywords)
if t.crc:
c = "[%(crc)s] " % t
if c or k:
print(" %s%s" % (c, k))
# COMPUTE CAMPAIGN DIGESTS #
if six.PY2:
def crc32(x):
return "%08X" % (0xffffffff & zlib.crc32(x))
def sha1(x):
return hashlib.sha1(x).hexdigest().upper()
else:
def crc32(x):
return "%08X" % (0xffffffff & zlib.crc32(bytearray(x, "utf8")))
def sha1(x):
return hashlib.sha1(x.encode("utf8")).hexdigest().upper()
def compute_campaign_digests(test_campaign):
dc = ""
for ts in test_campaign:
dts = ""
for t in ts:
dt = t.test.strip()
t.crc = crc32(dt)
dts += "\0" + dt
ts.crc = crc32(dts)
dc += "\0\x01" + dts
test_campaign.crc = crc32(dc)
with open(test_campaign.filename) as fdesc:
test_campaign.sha = sha1(fdesc.read())
# FILTER CAMPAIGN #
def filter_tests_on_numbers(test_campaign, num):
if num:
for ts in test_campaign:
ts.tests = [t for t in ts.tests if t.num in num]
test_campaign.campaign = [ts for ts in test_campaign.campaign
if ts.tests]
def _filter_tests_kw(test_campaign, kw, keep):
def kw_match(lst, kw):
return any(k for k in lst if kw == k)
if kw:
kw = kw.lower()
if keep:
cond = lambda x: x
else:
cond = lambda x: not x
for ts in test_campaign:
ts.tests = [t for t in ts.tests if cond(kw_match(t.keywords, kw))]
def filter_tests_keep_on_keywords(test_campaign, kw):
return _filter_tests_kw(test_campaign, kw, True)
def filter_tests_remove_on_keywords(test_campaign, kw):
return _filter_tests_kw(test_campaign, kw, False)
def remove_empty_testsets(test_campaign):
test_campaign.campaign = [ts for ts in test_campaign.campaign if ts.tests]
#### RUN TEST #####
def run_test(test, get_interactive_session, verb=3, ignore_globals=None, my_globals=None):
"""An internal UTScapy function to run a single test"""
test.output, res = get_interactive_session(test.test.strip(), ignore_globals=ignore_globals, verb=verb, my_globals=my_globals)
test.result = "failed"
try:
if res is None or res:
test.result = "passed"
if test.output.endswith('KeyboardInterrupt\n'):
test.result = "interrupted"
raise KeyboardInterrupt
except Exception:
test.output += "UTscapy: Error during result interpretation:\n"
test.output += "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2],))
finally:
if test.result == "failed":
from scapy.sendrecv import debug
# Add optional debugging data to log
if debug.crashed_on:
cls, val = debug.crashed_on
test.output += "\n\nPACKET DISSECTION FAILED ON:\n %s(hex_bytes('%s'))" % (cls.__name__, plain_str(bytes_hex(val)))
debug.crashed_on = None
test.decode()
if verb > 1:
print("%(result)6s %(crc)s %(name)s" % test, file=sys.stderr)
return bool(test)
#### RUN CAMPAIGN #####
def import_UTscapy_tools(ses):
"""Adds UTScapy tools directly to a session"""
ses["retry_test"] = retry_test
ses["Bunch"] = Bunch
def run_campaign(test_campaign, get_interactive_session, verb=3, ignore_globals=None): # noqa: E501
passed = failed = 0
scapy_ses = importlib.import_module(".all", "scapy").__dict__
import_UTscapy_tools(scapy_ses)
if test_campaign.preexec:
test_campaign.preexec_output = get_interactive_session(test_campaign.preexec.strip(), ignore_globals=ignore_globals, my_globals=scapy_ses)[0]
try:
for i, testset in enumerate(test_campaign):
for j, t in enumerate(testset):
if run_test(t, get_interactive_session, verb, my_globals=scapy_ses):
passed += 1
else:
failed += 1
except KeyboardInterrupt:
failed += 1
testset.trunc(j+1)
test_campaign.trunc(i+1)
test_campaign.interrupted = True
if verb:
print("Campaign interrupted!", file=sys.stderr)
test_campaign.passed = passed
test_campaign.failed = failed
if verb:
print("Campaign CRC=%(crc)s SHA=%(sha)s" % test_campaign, file=sys.stderr) # noqa: E501
print("PASSED=%i FAILED=%i" % (passed, failed), file=sys.stderr)
return failed
# INFO LINES #
def info_line(test_campaign):
filename = test_campaign.filename
if filename is None:
return "Run %s by UTscapy" % time.ctime()
else:
return "Run | |
and self.default is not None:
_dict['Default'] = self.default
if hasattr(self, 'sw') and self.sw is not None:
_dict['SW'] = self.sw.to_dict()
if hasattr(self, 'pkc_s11') and self.pkc_s11 is not None:
_dict['PKCS11'] = self.pkc_s11.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Bccsp object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'Bccsp') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Bccsp') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class DefaultEnum(str, Enum):
"""
The name of the crypto library implementation to use for the BlockChain Crypto
Service Provider (bccsp). Defaults to `SW`.
"""
SW = 'SW'
PKCS11 = 'PKCS11'
class BccspPKCS11():
"""
Hardware-based blockchain crypto provider.
:attr str label: Token Label.
:attr str pin: The user PIN.
:attr str hash: (optional) The hash family to use for the BlockChain Crypto
Service Provider (bccsp).
:attr float security: (optional) The length of hash to use for the BlockChain
Crypto Service Provider (bccsp).
"""
def __init__(self,
label: str,
pin: str,
*,
hash: str = None,
security: float = None) -> None:
"""
Initialize a BccspPKCS11 object.
:param str label: Token Label.
:param str pin: The user PIN.
:param str hash: (optional) The hash family to use for the BlockChain
Crypto Service Provider (bccsp).
:param float security: (optional) The length of hash to use for the
BlockChain Crypto Service Provider (bccsp).
"""
self.label = label
self.pin = pin
self.hash = hash
self.security = security
@classmethod
def from_dict(cls, _dict: Dict) -> 'BccspPKCS11':
"""Initialize a BccspPKCS11 object from a json dictionary."""
args = {}
if 'Label' in _dict:
args['label'] = _dict.get('Label')
else:
raise ValueError('Required property \'Label\' not present in BccspPKCS11 JSON')
if 'Pin' in _dict:
args['pin'] = _dict.get('Pin')
else:
raise ValueError('Required property \'Pin\' not present in BccspPKCS11 JSON')
if 'Hash' in _dict:
args['hash'] = _dict.get('Hash')
if 'Security' in _dict:
args['security'] = _dict.get('Security')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a BccspPKCS11 object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'label') and self.label is not None:
_dict['Label'] = self.label
if hasattr(self, 'pin') and self.pin is not None:
_dict['Pin'] = self.pin
if hasattr(self, 'hash') and self.hash is not None:
_dict['Hash'] = self.hash
if hasattr(self, 'security') and self.security is not None:
_dict['Security'] = self.security
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this BccspPKCS11 object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'BccspPKCS11') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'BccspPKCS11') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class BccspSW():
"""
Software based blockchain crypto provider.
:attr str hash: The hash family to use for the BlockChain Crypto Service
Provider (bccsp).
:attr float security: The length of hash to use for the BlockChain Crypto
Service Provider (bccsp).
"""
def __init__(self,
hash: str,
security: float) -> None:
"""
Initialize a BccspSW object.
:param str hash: The hash family to use for the BlockChain Crypto Service
Provider (bccsp).
:param float security: The length of hash to use for the BlockChain Crypto
Service Provider (bccsp).
"""
self.hash = hash
self.security = security
@classmethod
def from_dict(cls, _dict: Dict) -> 'BccspSW':
"""Initialize a BccspSW object from a json dictionary."""
args = {}
if 'Hash' in _dict:
args['hash'] = _dict.get('Hash')
else:
raise ValueError('Required property \'Hash\' not present in BccspSW JSON')
if 'Security' in _dict:
args['security'] = _dict.get('Security')
else:
raise ValueError('Required property \'Security\' not present in BccspSW JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a BccspSW object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'hash') and self.hash is not None:
_dict['Hash'] = self.hash
if hasattr(self, 'security') and self.security is not None:
_dict['Security'] = self.security
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this BccspSW object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'BccspSW') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'BccspSW') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CaResponse():
"""
Contains the details of a CA.
:attr str id: (optional) The unique identifier of this component. Must start
with a letter, be lowercase and only contain letters and numbers. If `id` is not
provide a component id will be generated using the field `display_name` as the
base.
:attr str dep_component_id: (optional) The unique id for the component in
Kubernetes. Not available if component was imported.
:attr str display_name: (optional) A descriptive name for this CA. The IBP
console tile displays this name.
:attr str api_url: (optional) The gRPC URL for the peer. Typically, client
applications would send requests to this URL. Include the protocol, hostname/ip
and port.
:attr str operations_url: (optional) The operations URL for the CA. Include the
protocol, hostname/ip and port.
:attr object config_override: (optional) The **cached** configuration override
that was set for the Kubernetes deployment. Field does not exist if an override
was not set of if the component was imported.
:attr str location: (optional) Indicates where the component is running.
:attr MspCryptoField msp: (optional) The msp crypto data.
:attr CaResponseResources resources: (optional) The **cached** Kubernetes
resource attributes for this component. Not available if CA was imported.
:attr str scheme_version: (optional) The versioning of the IBP console format of
this JSON.
:attr CaResponseStorage storage: (optional) The **cached** Kubernetes storage
attributes for this component. Not available if CA was imported.
:attr List[str] tags: (optional)
:attr float timestamp: (optional) UTC UNIX timestamp of component onboarding to
the UI. In milliseconds.
:attr str version: (optional) The cached Hyperledger Fabric release version.
:attr str zone: (optional) Specify the Kubernetes zone for the deployment. The
deployment will use a k8s node in this zone. Find the list of possible zones by
retrieving your Kubernetes node labels: `kubectl get nodes --show-labels`. [More
information](https://kubernetes.io/docs/setup/best-practices/multiple-zones).
"""
def __init__(self,
*,
id: str = None,
dep_component_id: str = None,
display_name: str = None,
api_url: str = None,
operations_url: str = None,
config_override: object = None,
location: str = None,
msp: 'MspCryptoField' = None,
resources: 'CaResponseResources' = None,
scheme_version: str = None,
storage: 'CaResponseStorage' = None,
tags: List[str] = None,
timestamp: float = None,
version: str = None,
zone: str = None) -> None:
"""
Initialize a CaResponse object.
:param str id: (optional) The unique identifier of this component. Must
start with a letter, be lowercase and only contain letters and numbers. If
`id` is not provide a component id will be generated using the field
`display_name` as the base.
:param str dep_component_id: (optional) The unique id for the component in
Kubernetes. Not available if component was imported.
:param str display_name: (optional) A descriptive name for this CA. The IBP
console tile displays this name.
:param str api_url: (optional) The gRPC URL for the peer. Typically, client
applications would send requests to this URL. Include the protocol,
hostname/ip and port.
:param str operations_url: (optional) The operations URL for the CA.
Include the protocol, hostname/ip and port.
:param object config_override: (optional) The **cached** configuration
override that was set for the Kubernetes deployment. Field does not exist
if an override was not set of if the component was imported.
:param str location: (optional) Indicates where the component is running.
:param MspCryptoField msp: (optional) The msp crypto | |
<gh_stars>100-1000
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import genmsg.msgs
from genmsg.msgs import MsgSpec
from genmsg.msg_loader import MsgContext
def get_test_dir():
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'files'))
def test_is_special():
import genpy.generator
for t in ['time', 'duration', 'Header']:
assert genpy.generator.is_special(t)
def test_Simple():
import genpy.generator
val = genpy.generator.get_special('time').import_str
assert 'import genpy' == val, val
assert 'import genpy' == genpy.generator.get_special('duration').import_str
assert 'import std_msgs.msg' == genpy.generator.get_special('Header').import_str
assert 'genpy.Time()' == genpy.generator.get_special('time').constructor
assert 'genpy.Duration()' == genpy.generator.get_special('duration').constructor
assert 'std_msgs.msg._Header.Header()' == genpy.generator.get_special('Header').constructor
assert 'self.foo.canon()' == genpy.generator.get_special('time').get_post_deserialize('self.foo')
assert 'bar.canon()' == genpy.generator.get_special('time').get_post_deserialize('bar')
assert 'self.foo.canon()' == genpy.generator.get_special('duration').get_post_deserialize('self.foo')
assert None == genpy.generator.get_special('Header').get_post_deserialize('self.foo')
def test_compute_post_deserialize():
import genpy.generator
assert 'self.bar.canon()' == genpy.generator.compute_post_deserialize('time', 'self.bar')
assert 'self.bar.canon()' == genpy.generator.compute_post_deserialize('duration', 'self.bar')
assert None == genpy.generator.compute_post_deserialize('Header', 'self.bar')
assert None == genpy.generator.compute_post_deserialize('int8', 'self.bar')
assert None == genpy.generator.compute_post_deserialize('string', 'self.bar')
def test_flatten():
import genpy.generator
from genpy.generator import flatten
msg_context = MsgContext.create_default()
simple = MsgSpec(['string'], ['data'], [], 'string data\n', 'simple/String')
simple2 = MsgSpec(['string', 'int32'], ['data', 'data2'], [], 'string data\nint32 data2\n', 'simpe/Data2')
assert simple == flatten(msg_context, simple)
assert simple2 == flatten(msg_context, simple2)
b1 = MsgSpec(['int8'], ['data'], [], 'X', 'f_msgs/Base')
b2 = MsgSpec(['f_msgs/Base'], ['data'], [], 'X', 'f_msgs/Base2')
b3 = MsgSpec(['f_msgs/Base2', 'f_msgs/Base2'], ['data3', 'data4'], [], 'X', 'f_msgs/Base3')
b4 = MsgSpec(['f_msgs/Base3', 'f_msgs/Base3'], ['dataA', 'dataB'], [], 'X', 'f_msgs/Base4')
msg_context.register('f_msgs/Base', b1)
msg_context.register('f_msgs/Base2', b2)
msg_context.register('f_msgs/Base3', b3)
msg_context.register('f_msgs/Base4', b4)
assert MsgSpec(['int8'], ['data.data'], [], 'X', 'f_msgs/Base2') == flatten(msg_context, b2)
assert MsgSpec(['int8', 'int8'], ['data3.data.data', 'data4.data.data'], [], 'X', 'f_msgs/Base3') == flatten(msg_context, b3)
assert MsgSpec(['int8', 'int8', 'int8', 'int8'],
['dataA.data3.data.data', 'dataA.data4.data.data', 'dataB.data3.data.data', 'dataB.data4.data.data'],
[], 'X', 'f_msgs/Base4') == flatten(msg_context, b4)
def test_flatten_array_objects():
# make sure array of types don't flatten
from genpy.generator import flatten
msg_context = MsgContext.create_default()
b1 = MsgSpec(['int8'], ['data'], [], 'X', 'f_msgs/Base')
b5 = MsgSpec(['f_msgs/Base[]'], ['data'], [], 'X', 'f_msgs/Base5')
msg_context.register('f_msgs/Base', b1)
msg_context.register('f_msgs/Base5', b5)
assert b5 == flatten(msg_context, b5)
def test_default_value():
from genpy.generator import default_value
msg_context = MsgContext.create_default()
msg_context.register('fake_msgs/String', MsgSpec(['string'], ['data'], [], 'string data\n', 'fake_msgs/String'))
msg_context.register('fake_msgs/ThreeNums', MsgSpec(['int32', 'int32', 'int32'], ['x', 'y', 'z'], [], 'int32 x\nint32 y\nint32 z\n', 'fake_msgs/ThreeNums'))
# trip-wire: make sure all builtins have a default value
for t in genmsg.msgs.BUILTIN_TYPES:
assert type(default_value(msg_context, t, 'roslib')) == str
# simple types first
for t in ['uint8', 'int8', 'uint16', 'int16', 'uint32', 'int32', 'uint64', 'int64', 'byte', 'char']:
assert '0' == default_value(msg_context, t, 'std_msgs')
assert '0' == default_value(msg_context, t, 'roslib')
for t in ['float32', 'float64']:
assert '0.' == default_value(msg_context, t, 'std_msgs')
assert '0.' == default_value(msg_context, t, 'roslib')
assert "''" == default_value(msg_context, 'string', 'roslib')
# builtin specials
assert 'genpy.Time()' == default_value(msg_context, 'time', 'roslib')
assert 'genpy.Duration()' == default_value(msg_context, 'duration', 'roslib')
assert 'std_msgs.msg._Header.Header()' == default_value(msg_context, 'Header', 'roslib')
assert 'genpy.Time()' == default_value(msg_context, 'time', 'std_msgs')
assert 'genpy.Duration()' == default_value(msg_context, 'duration', 'std_msgs')
assert 'std_msgs.msg._Header.Header()' == default_value(msg_context, 'Header', 'std_msgs')
# generic instances
# - unregistered type
assert None == default_value(msg_context, "unknown_msgs/Foo", "unknown_msgs")
# - wrong context
assert None == default_value(msg_context, 'ThreeNums', 'std_msgs')
# - registered types
assert 'fake_msgs.msg.String()' == default_value(msg_context, 'fake_msgs/String', 'std_msgs')
assert 'fake_msgs.msg.String()' == default_value(msg_context, 'fake_msgs/String', 'fake_msgs')
assert 'fake_msgs.msg.String()' == default_value(msg_context, 'String', 'fake_msgs')
assert 'fake_msgs.msg.ThreeNums()' == default_value(msg_context, 'fake_msgs/ThreeNums', 'roslib')
assert 'fake_msgs.msg.ThreeNums()' == default_value(msg_context, 'fake_msgs/ThreeNums', 'fake_msgs')
assert 'fake_msgs.msg.ThreeNums()' == default_value(msg_context, 'ThreeNums', 'fake_msgs')
# var-length arrays always default to empty arrays... except for byte and uint8 which are strings
for t in ['int8', 'uint16', 'int16', 'uint32', 'int32', 'uint64', 'int64', 'float32', 'float64']:
val = default_value(msg_context, t+'[]', 'std_msgs')
assert '[]' == val, "[%s]: %s"%(t, val)
assert '[]' == default_value(msg_context, t+'[]', 'roslib')
assert "''" == default_value(msg_context, 'uint8[]', 'roslib')
# fixed-length arrays should be zero-filled... except for byte and uint8 which are strings
for t in ['float32', 'float64']:
assert '[0.,0.,0.]' == default_value(msg_context, t+'[3]', 'std_msgs')
assert '[0.]' == default_value(msg_context, t+'[1]', 'std_msgs')
for t in ['int8', 'uint16', 'int16', 'uint32', 'int32', 'uint64', 'int64']:
assert '[0,0,0,0]' == default_value(msg_context, t+'[4]', 'std_msgs')
assert '[0]' == default_value(msg_context, t+'[1]', 'roslib')
assert "chr(0)*1" == default_value(msg_context, 'uint8[1]', 'roslib')
assert "chr(0)*4" == default_value(msg_context, 'uint8[4]', 'roslib')
assert '[]' == default_value(msg_context, 'fake_msgs/String[]', 'std_msgs')
assert '[fake_msgs.msg.String(),fake_msgs.msg.String()]' == default_value(msg_context, 'fake_msgs/String[2]', 'std_msgs')
def test_make_python_safe():
from genpy.generator import make_python_safe
from genmsg.msgs import Constant
s = MsgSpec(['int32', 'int32', 'int32', 'int32', 'int32', 'int32'], ['ok', 'if', 'self', 'fine', 'self.x', 'self.while'],
[Constant('int32', 'if', '1', '1'), Constant('int32', 'okgo', '1', '1')],
'x', 'test_msgs/Foo')
s2 = make_python_safe(s)
assert s != s2
assert ['ok', 'if_', 'self_', 'fine', 'self.x', 'self.while_'] == s2.names, s2.names
assert s2.types == s.types
assert [Constant('int32', 'if_', '1', '1') == Constant('int32', 'okgo', '1', '1')], s2.constants
assert s2.text == s.text
def test_compute_pkg_type():
from genpy.generator import compute_pkg_type, MsgGenerationException
try:
compute_pkg_type('std_msgs', 'really/bad/std_msgs/String')
except MsgGenerationException: pass
assert ('std_msgs', 'String') == compute_pkg_type('std_msgs', 'std_msgs/String')
assert ('std_msgs', 'String') == compute_pkg_type('foo', 'std_msgs/String')
assert ('std_msgs', 'String') == compute_pkg_type('std_msgs', 'String')
def test_compute_import():
import genpy.generator
msg_context = MsgContext.create_default()
assert [] == genpy.generator.compute_import(msg_context, 'foo', 'bar')
assert [] == genpy.generator.compute_import(msg_context, 'foo', 'int32')
msg_context.register('ci_msgs/Base', MsgSpec(['int8'], ['data'], [], 'int8 data\n', 'ci_msgs/Base'))
msg_context.register('ci2_msgs/Base2', MsgSpec(['ci_msgs/Base'], ['data2'], [], 'ci_msgs/Base data2\n', 'ci2_msgs/Base2'))
msg_context.register('ci3_msgs/Base3', MsgSpec(['ci2_msgs/Base2'], ['data3'], [], 'ci2_msgs/Base2 data3\n', 'ci3_msgs/Base3'))
msg_context.register('ci4_msgs/Base', MsgSpec(['int8'], ['data'], [], 'int8 data\n', 'ci4_msgs/Base'))
msg_context.register('ci4_msgs/Base4', MsgSpec(['ci2_msgs/Base2', 'ci3_msgs/Base3'],
['data4a', 'data4b'],
[], 'ci2_msgs/Base2 data4a\nci3_msgs/Base3 data4b\n', 'ci4_msgs/Base4'))
msg_context.register('ci5_msgs/Base', MsgSpec(['time'], ['data'], [], 'time data\n', 'ci5_msgs/Base'))
assert ['import ci_msgs.msg'] == genpy.generator.compute_import(msg_context, 'foo', 'ci_msgs/Base')
assert ['import ci_msgs.msg'] == genpy.generator.compute_import(msg_context, 'ci_msgs', 'ci_msgs/Base')
assert ['import ci2_msgs.msg', 'import ci_msgs.msg'] == genpy.generator.compute_import(msg_context, 'ci2_msgs', 'ci2_msgs/Base2')
assert ['import ci2_msgs.msg', 'import ci_msgs.msg'] == genpy.generator.compute_import(msg_context, 'foo', 'ci2_msgs/Base2')
assert ['import ci3_msgs.msg', 'import ci2_msgs.msg', 'import ci_msgs.msg'] == genpy.generator.compute_import(msg_context, 'ci3_msgs', 'ci3_msgs/Base3')
assert set(['import ci4_msgs.msg', 'import ci3_msgs.msg', 'import ci2_msgs.msg', 'import ci_msgs.msg']) == set(genpy.generator.compute_import(msg_context, 'foo', 'ci4_msgs/Base4'))
assert set(['import ci4_msgs.msg', 'import ci3_msgs.msg', 'import ci2_msgs.msg', 'import ci_msgs.msg']) == set(genpy.generator.compute_import(msg_context, 'ci4_msgs', 'ci4_msgs/Base4'))
assert ['import ci4_msgs.msg'] == genpy.generator.compute_import(msg_context, 'foo', 'ci4_msgs/Base')
assert ['import ci4_msgs.msg'] == genpy.generator.compute_import(msg_context, 'ci4_msgs', 'ci4_msgs/Base')
assert ['import ci4_msgs.msg'] == genpy.generator.compute_import(msg_context, 'ci4_msgs', 'Base')
assert ['import ci5_msgs.msg', 'import genpy'] == genpy.generator.compute_import(msg_context, 'foo', 'ci5_msgs/Base')
def test_get_registered_ex():
import genpy.generator
msg_context = MsgContext.create_default()
s = MsgSpec(['string'], ['data'], [], 'string data\n', 'tgr_msgs/String')
msg_context.register('tgr_msgs/String', s)
assert s == genpy.generator.get_registered_ex(msg_context, 'tgr_msgs/String')
try:
genpy.generator.get_registered_ex(msg_context, 'bad_msgs/String')
except genpy.generator.MsgGenerationException: pass
def test_compute_constructor():
from genpy.generator import compute_constructor
msg_context = MsgContext.create_default()
msg_context.register('fake_msgs/String', MsgSpec(['string'], ['data'], [], 'string data\n', 'fake_msgs/String'))
msg_context.register('fake_msgs/ThreeNums', MsgSpec(['int32', 'int32', 'int32'], ['x', 'y', 'z'], [], 'int32 x\nint32 y\nint32 z\n', 'fake_msgs/ThreeNums'))
# builtin specials
assert 'genpy.Time()' == compute_constructor(msg_context, 'roslib', 'time')
assert 'genpy.Duration()' == compute_constructor(msg_context, 'roslib', 'duration')
assert 'std_msgs.msg._Header.Header()' == compute_constructor(msg_context, 'std_msgs', 'Header')
assert 'genpy.Time()' == compute_constructor(msg_context, 'std_msgs', 'time')
assert 'genpy.Duration()' == compute_constructor(msg_context, 'std_msgs', 'duration')
# generic instances
# - unregistered type
assert None == compute_constructor(msg_context, "unknown_msgs", "unknown_msgs/Foo")
assert None == compute_constructor(msg_context, "unknown_msgs", "Foo")
# - wrong context
assert None == compute_constructor(msg_context, 'std_msgs', 'ThreeNums')
# - registered types
assert 'fake_msgs.msg.String()' == compute_constructor(msg_context, 'std_msgs', 'fake_msgs/String')
assert 'fake_msgs.msg.String()' == compute_constructor(msg_context, 'fake_msgs', 'fake_msgs/String')
assert 'fake_msgs.msg.String()' == compute_constructor(msg_context, 'fake_msgs', 'String')
assert 'fake_msgs.msg.ThreeNums()' == compute_constructor(msg_context, 'fake_msgs', 'fake_msgs/ThreeNums')
assert 'fake_msgs.msg.ThreeNums()' == compute_constructor(msg_context, 'fake_msgs', 'fake_msgs/ThreeNums')
assert 'fake_msgs.msg.ThreeNums()' == compute_constructor(msg_context, 'fake_msgs', 'ThreeNums')
def test_len_serializer_generator():
import genpy.generator
# generator tests are mainly tripwires/coverage tests
# Test Serializers
# string serializer simply initializes local var
g = genpy.generator.len_serializer_generator('foo', True, True)
assert 'length = len(foo)' == '\n'.join(g)
# array len serializer writes var
g = genpy.generator.len_serializer_generator('foo', | |
None ) : return ( [ None , None ] )
if 89 - 89: o0oOOo0O0Ooo % o0oOOo0O0Ooo
return ( [ packet , Oo000o0o0 ] )
if 8 - 8: Ii1I % oO0o - o0oOOo0O0Ooo
if 14 - 14: OOooOOo * IiII
def lcaf_decode_eid ( self , packet ) :
oOO0OOOoO0ooo = "BBB"
I1111ii1i = struct . calcsize ( oOO0OOOoO0ooo )
if ( len ( packet ) < I1111ii1i ) : return ( [ None , None ] )
if 15 - 15: o0oOOo0O0Ooo + OoooooooOO - OOooOOo - o0oOOo0O0Ooo . iIii1I11I1II1 / Ii1I
if 33 - 33: OoO0O00
if 91 - 91: I11i % I11i % iII111i
if 19 - 19: I11i / I11i + I1IiiI * OoO0O00 - iII111i . Oo0Ooo
if 76 - 76: iII111i % OOooOOo / OoooooooOO . I1IiiI % OoO0O00 % i1IIi
oOoo , Ooooo0OO , O00OO0oOOO = struct . unpack ( oOO0OOOoO0ooo ,
packet [ : I1111ii1i ] )
if 95 - 95: Oo0Ooo - O0 / I1ii11iIi11i . I1IiiI / o0oOOo0O0Ooo % OoOoOO00
if ( O00OO0oOOO == LISP_LCAF_INSTANCE_ID_TYPE ) :
return ( [ self . lcaf_decode_iid ( packet ) , None ] )
elif ( O00OO0oOOO == LISP_LCAF_MCAST_INFO_TYPE ) :
packet , Oo000o0o0 = self . lcaf_decode_sg ( packet )
return ( [ packet , Oo000o0o0 ] )
elif ( O00OO0oOOO == LISP_LCAF_GEO_COORD_TYPE ) :
oOO0OOOoO0ooo = "BBBBH"
I1111ii1i = struct . calcsize ( oOO0OOOoO0ooo )
if ( len ( packet ) < I1111ii1i ) : return ( None )
if 38 - 38: OoOoOO00 % OoooooooOO . oO0o - OoooooooOO + I11i
OOII1iI , Ooooo0OO , O00OO0oOOO , o0o0OO0OO , ii111 = struct . unpack ( oOO0OOOoO0ooo , packet [ : I1111ii1i ] )
if 18 - 18: OoooooooOO + ooOoO0o * OoOoOO00 - OoO0O00
if 42 - 42: oO0o % OoOoOO00 - oO0o + I11i / i11iIiiIii
if ( O00OO0oOOO != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 74 - 74: OoO0O00 - II111iiii - ooOoO0o % i1IIi
ii111 = socket . ntohs ( ii111 )
packet = packet [ I1111ii1i : : ]
if ( ii111 > len ( packet ) ) : return ( None )
if 42 - 42: i11iIiiIii / O0
oOo0o0oOoo0Oo = lisp_geo ( "" )
self . instance_id = 0
self . afi = LISP_AFI_GEO_COORD
self . address = oOo0o0oOoo0Oo
packet = oOo0o0oOoo0Oo . decode_geo ( packet , ii111 , o0o0OO0OO )
self . mask_len = self . host_mask_len ( )
if 8 - 8: I1Ii111
return ( [ packet , None ] )
if 51 - 51: i11iIiiIii
if 1 - 1: iIii1I11I1II1 . i1IIi . i11iIiiIii % I1ii11iIi11i
if 58 - 58: i11iIiiIii * i11iIiiIii - OoO0O00
if 8 - 8: i11iIiiIii * OoOoOO00 . o0oOOo0O0Ooo
if 27 - 27: I1ii11iIi11i + Ii1I % I1Ii111
if 20 - 20: Oo0Ooo
class lisp_elp_node ( ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . probe = False
self . strict = False
self . eid = False
self . we_are_last = False
if 33 - 33: oO0o - OoOoOO00 - i11iIiiIii + I1Ii111 + iIii1I11I1II1
if 2 - 2: OoooooooOO + IiII / iII111i . iIii1I11I1II1 * OoOoOO00
def copy_elp_node ( self ) :
IIi1i1111i = lisp_elp_node ( )
IIi1i1111i . copy_address ( self . address )
IIi1i1111i . probe = self . probe
IIi1i1111i . strict = self . strict
IIi1i1111i . eid = self . eid
IIi1i1111i . we_are_last = self . we_are_last
return ( IIi1i1111i )
if 84 - 84: OOooOOo
if 68 - 68: I1Ii111
if 92 - 92: oO0o * Ii1I / OoO0O00 % II111iiii
class lisp_elp ( ) :
def __init__ ( self , name ) :
self . elp_name = name
self . elp_nodes = [ ]
self . use_elp_node = None
self . we_are_last = False
if 54 - 54: oO0o + I11i - OoO0O00
if 86 - 86: OoooooooOO
def copy_elp ( self ) :
o00oOO0 = lisp_elp ( self . elp_name )
o00oOO0 . use_elp_node = self . use_elp_node
o00oOO0 . we_are_last = self . we_are_last
for IIi1i1111i in self . elp_nodes :
o00oOO0 . elp_nodes . append ( IIi1i1111i . copy_elp_node ( ) )
if 51 - 51: i11iIiiIii
return ( o00oOO0 )
if 91 - 91: OOooOOo
if 22 - 22: OoooooooOO + OoOoOO00 - Ii1I . iII111i / OoooooooOO / I1IiiI
def print_elp ( self , want_marker ) :
Oo0OooO00O = ""
for IIi1i1111i in self . elp_nodes :
OoO00Oo0 = ""
if ( want_marker ) :
if ( IIi1i1111i == self . use_elp_node ) :
OoO00Oo0 = "*"
elif ( IIi1i1111i . we_are_last ) :
OoO00Oo0 = "x"
if 73 - 73: I1IiiI . iIii1I11I1II1
if 50 - 50: OoO0O00 - O0 % OOooOOo
Oo0OooO00O += "{}{}({}{}{}), " . format ( OoO00Oo0 ,
IIi1i1111i . address . print_address_no_iid ( ) ,
"r" if IIi1i1111i . eid else "R" , "P" if IIi1i1111i . probe else "p" ,
"S" if IIi1i1111i . strict else "s" )
if 6 - 6: Oo0Ooo
return ( Oo0OooO00O [ 0 : - 2 ] if Oo0OooO00O != "" else "" )
if 9 - 9: Oo0Ooo - II111iiii - i1IIi - ooOoO0o / o0oOOo0O0Ooo * I1ii11iIi11i
if 29 - 29: ooOoO0o
def select_elp_node ( self ) :
oo00OOOOOO0Oo , oo0ooo0ooOOo0 , Ooooo = lisp_myrlocs
OOOoO000 = None
if 64 - 64: o0oOOo0O0Ooo
for IIi1i1111i in self . elp_nodes :
if ( oo00OOOOOO0Oo and IIi1i1111i . address . is_exact_match ( oo00OOOOOO0Oo ) ) :
OOOoO000 = self . elp_nodes . index ( IIi1i1111i )
break
if 72 - 72: iIii1I11I1II1 / OoooooooOO * ooOoO0o / ooOoO0o % O0 + IiII
if ( oo0ooo0ooOOo0 and IIi1i1111i . address . is_exact_match ( oo0ooo0ooOOo0 ) ) :
OOOoO000 = self . elp_nodes . index ( IIi1i1111i )
break
if 96 - 96: iII111i / i11iIiiIii + Oo0Ooo . I1IiiI + iII111i % OoOoOO00
if 19 - 19: i11iIiiIii . Oo0Ooo . OoOoOO00 - I1IiiI
if 85 - 85: I11i - OoO0O00 % iIii1I11I1II1 . iII111i + ooOoO0o . Oo0Ooo
if 87 - 87: iII111i
if 86 - 86: IiII - I11i
if 99 - 99: i1IIi + I1ii11iIi11i
if 24 - 24: ooOoO0o / OoooooooOO % I1ii11iIi11i * ooOoO0o
if ( OOOoO000 == None ) :
self . use_elp_node = self . elp_nodes [ 0 ]
IIi1i1111i . we_are_last = False
return
if 14 - 14: I1ii11iIi11i + OoO0O00 - I1IiiI - Oo0Ooo
if 44 - 44: II111iiii / I1ii11iIi11i
if 39 - 39: OoooooooOO % OoO0O00
if 83 - 83: OOooOOo % I1IiiI + O0 % OoooooooOO
if 84 - 84: I11i - Oo0Ooo % ooOoO0o - II111iiii
if 29 - 29: IiII
if ( self . elp_nodes [ - 1 ] == self . elp_nodes [ OOOoO000 ] ) :
self . use_elp_node = None
IIi1i1111i . we_are_last = True
return
if 4 - 4: II111iiii * o0oOOo0O0Ooo - IiII * iII111i
if 91 - 91: I1Ii111 * iII111i * OoO0O00
if 79 - 79: iII111i + oO0o
if 19 - 19: I1Ii111 - OOooOOo . ooOoO0o . O0 + II111iiii . OoooooooOO
if 97 - 97: O0 / OoOoOO00 / ooOoO0o
self . use_elp_node = self . elp_nodes [ OOOoO000 + 1 ]
return
if 11 - 11: II111iiii . i11iIiiIii - Ii1I . IiII
if 10 - 10: OOooOOo * OoooooooOO
if 12 - 12: II111iiii - O0 . i1IIi % oO0o % OoooooooOO
class lisp_geo ( ) :
def __init__ ( self , name ) :
self . geo_name = name
self . latitude = 0xffffffff
self . lat_mins = 0
self . lat_secs = 0
self . longitude = 0xffffffff
| |
* self._n_joints]
actions_speeds = actions_speeds.reshape(
(2, self._n_joints))
actions_accelerations = actions[:, 2 * self._n_joints:]
actions_accelerations = actions_accelerations.reshape(
(2, self._n_joints))
speeds = np.vstack([self._previous_hermite_speeds, actions_speeds])
accelerations = np.vstack([self._previous_hermite_accelerations, actions_accelerations])
speeds[-1] *= shape_factor
accelerations[-1] *= shape_factor
eval = np.linspace(0, 1, span)
poly = CubicHermiteSpline(x, speeds, accelerations)
velocities = poly(eval) * self._upper_velocity_limits[np.newaxis]
velocities = velocities[np.newaxis] # shape [1, span, 7]
self._previous_hermite_speeds = speeds[-1]
self._previous_hermite_accelerations = accelerations[-1]
elif mode == "full_raw":
velocities = actions * self._upper_velocity_limits[np.newaxis]
velocities = velocities[:, np.newaxis] # shape [span, 1, 7]
elif mode == "one_raw":
velocities = actions * self._upper_velocity_limits[np.newaxis]
velocities = velocities[np.newaxis] # shape [1, 1, 7]
else:
raise ValueError("Unrecognized movement mode ({})".format(mode))
return velocities
@communicate_return_value
def apply_movement(self, actions, mode='minimalist', span=10):
velocities = self.get_movement_velocities(actions, mode=mode, span=span) # shape [n_states_to_be_returned, mini_sequence_length, n_joints]
normalized_velocities = velocities / self._upper_velocity_limits[np.newaxis]
metabolic_costs = np.sum(normalized_velocities ** 2, axis=(1, 2)) # shape [n_states_to_be_returned]
states_sequence = []
stateful_objects_states_sequence = []
for mini_sequence in velocities:
state, stateful_objects_state = self.get_data()
states_sequence.append(np.copy(state))
stateful_objects_states_sequence.append(np.copy(stateful_objects_state))
for velocity in mini_sequence:
self.set_joint_target_velocities(velocity)
self.step_sim()
return np.vstack(states_sequence), np.vstack(stateful_objects_states_sequence), metabolic_costs
@communicate_return_value
def apply_movement_get_frames(self, actions, cam_id, mode='minimalist', span=10):
velocities = self.get_movement_velocities(actions, mode=mode, span=span)
normalized_velocities = velocities / self._upper_velocity_limits[np.newaxis]
metabolic_costs = np.sum(normalized_velocities ** 2, axis=(1, 2)) # shape [n_states_to_be_returned]
states_sequence = []
stateful_objects_states_sequence = []
frames = []
for mini_sequence in velocities:
state, stateful_objects_state = self.get_data()
states_sequence.append(np.copy(state))
stateful_objects_states_sequence.append(np.copy(stateful_objects_state))
for velocity in mini_sequence:
frames.append(self.get_frame(cam_id))
self.set_joint_target_velocities(velocity)
self.step_sim()
return np.vstack(states_sequence), np.vstack(stateful_objects_states_sequence), metabolic_costs, np.array(frames)
def set_control_loop_enabled(self, bool):
for arm in self._arm_list:
arm.set_control_loop_enabled(bool)
def set_motor_locked_at_zero_velocity(self, bool):
for arm in self._arm_list:
arm.set_motor_locked_at_zero_velocity(bool)
@communicate_return_value
def get_joint_forces(self):
last = 0
next = 0
for arm, joint_count in zip(self._arm_list, self._arm_joints_count):
next += joint_count
self._arm_joints_torques_buffer[last:next] = \
arm.get_joint_forces()
last = next
return self._arm_joints_torques_buffer
def set_joint_forces(self, forces):
last = 0
next = 0
for arm, joint_count in zip(self._arm_list, self._arm_joints_count):
next += joint_count
arm.set_joint_forces(forces[last:next])
last = next
@communicate_return_value
def get_joint_upper_velocity_limits(self):
last = 0
next = 0
self._upper_velocity_limits = np.zeros(self._n_joints, dtype=np.float32)
for arm, joint_count in zip(self._arm_list, self._arm_joints_count):
next += joint_count
self._upper_velocity_limits[last:next] = \
arm.get_joint_upper_velocity_limits()
last = next
return self._upper_velocity_limits
@communicate_return_value
def get_joint_intervals(self):
last = 0
next = 0
self._intervals = np.zeros((self._n_joints, 2), dtype=np.float32)
for arm, joint_count in zip(self._arm_list, self._arm_joints_count):
next += joint_count
_, self._intervals[last:next] = \
arm.get_joint_intervals()
last = next
return self._intervals
@communicate_return_value
def get_n_joints(self):
return self._n_joints
def create_environment(self, type='one_arm_4_buttons'):
if type == 'one_arm_4_buttons':
self.add_arm()
distance = 0.65
self.add_button(position=( distance, 0, 0))
self.add_button(position=(-distance, 0, 0))
self.add_button(position=(0, distance, 0))
self.add_button(position=(0, -distance, 0))
elif type == 'one_arm_4_buttons_45':
self.add_arm()
distance = 0.65
sqrt2_2 = 0.7071
self.add_button(position=( distance * sqrt2_2, distance * sqrt2_2, 0))
self.add_button(position=(-distance * sqrt2_2, -distance * sqrt2_2, 0))
self.add_button(position=(-distance * sqrt2_2, distance * sqrt2_2, 0))
self.add_button(position=( distance * sqrt2_2, -distance * sqrt2_2, 0))
elif type == 'one_arm_4_buttons_near':
self.add_arm()
distance = 0.45
self.add_button(position=( distance, 0, 0))
self.add_button(position=(-distance, 0, 0))
self.add_button(position=(0, distance, 0))
self.add_button(position=(0, -distance, 0))
elif type == 'one_arm_8_buttons':
self.add_arm()
distance = 0.65
sqrt2_distance = distance / np.sqrt(2)
self.add_button(position=( distance, 0, 0))
self.add_button(position=(-distance, 0, 0))
self.add_button(position=(0, distance, 0))
self.add_button(position=(0, -distance, 0))
self.add_button(position=(sqrt2_distance, sqrt2_distance, 0))
self.add_button(position=(-sqrt2_distance, sqrt2_distance, 0))
self.add_button(position=(sqrt2_distance, -sqrt2_distance, 0))
self.add_button(position=(-sqrt2_distance, -sqrt2_distance, 0))
elif type == 'one_arm_4_buttons_4_taps':
self.add_arm()
distance = 0.65
sqrt2_distance = distance / np.sqrt(2)
self.add_button(position=( distance, 0, 0))
self.add_button(position=(-distance, 0, 0))
self.add_button(position=(0, distance, 0))
self.add_button(position=(0, -distance, 0))
self.add_tap(position=(sqrt2_distance, sqrt2_distance, 0))
self.add_tap(position=(-sqrt2_distance, sqrt2_distance, 0))
self.add_tap(position=(sqrt2_distance, -sqrt2_distance, 0))
self.add_tap(position=(-sqrt2_distance, -sqrt2_distance, 0))
elif type == 'one_arm_2_buttons_2_levers':
self.add_arm()
distance = 0.65
self.add_lever(position=( distance, 0, 0))
self.add_lever(position=(-distance, 0, 0))
self.add_button(position=(0, distance, 0))
self.add_button(position=(0, -distance, 0))
elif type == 'one_arm_2_buttons_1_levers_1_tap':
self.add_arm()
distance = 0.65
self.add_lever(position=( distance, 0, 0))
self.add_tap(position=(-distance, 0, 0))
self.add_button(position=(0, distance, 0))
self.add_button(position=(0, -distance, 0))
elif type == 'one_arm_4_buttons_2_taps_2_levers':
self.add_arm()
distance = 0.65
sqrt2_distance = distance / np.sqrt(2)
self.add_lever(position=( distance, 0, 0))
self.add_lever(position=(-distance, 0, 0))
self.add_tap(position=(0, distance, 0))
self.add_tap(position=(0, -distance, 0))
self.add_button(position=(sqrt2_distance, sqrt2_distance, 0))
self.add_button(position=(-sqrt2_distance, sqrt2_distance, 0))
self.add_button(position=(sqrt2_distance, -sqrt2_distance, 0))
self.add_button(position=(-sqrt2_distance, -sqrt2_distance, 0))
else:
raise ValueError("Unrecognized environment type ({})".format(type))
def step_sim(self):
self._pyrep.step()
def start_sim(self):
self._pyrep.start()
def stop_sim(self):
self._pyrep.stop()
@communicate_return_value
def get_simulation_timestep(self):
return self._pyrep.get_simulation_timestep()
def set_simulation_timestep(self, dt):
self._pyrep.set_simulation_timestep(dt)
@consumer_to_producer_method_conversion
class SimulationProducer(object):
def __init__(self, scene="", gui=False):
self._process_io = {}
self._process_io["must_quit"] = mp.Event()
self._process_io["simulaton_ready"] = mp.Event()
self._process_io["command_pipe_empty"] = mp.Event()
self._process_io["slot_in_command_queue"] = mp.Semaphore(100)
pipe_out, pipe_in = mp.Pipe(duplex=False)
self._process_io["command_pipe_in"] = pipe_in
self._process_io["command_pipe_out"] = pipe_out
pipe_out, pipe_in = mp.Pipe(duplex=False)
self._process_io["return_value_pipe_in"] = pipe_in
self._process_io["return_value_pipe_out"] = pipe_out
pipe_out, pipe_in = mp.Pipe(duplex=False)
self._process_io["exception_pipe_in"] = pipe_in
self._process_io["exception_pipe_out"] = pipe_out
self._consumer = SimulationConsumer(self._process_io, scene, gui=gui)
self._consumer.start()
self._logger = logging.getLogger(f"Simulation({self._consumer._id: 2d})")
self._logger.info("consumer {} started".format(self._consumer._id))
self._closed = False
# atexit.register(self.close)
def _get_process_io(self):
return self._process_io
def _check_consumer_alive(self):
if not self._consumer.is_alive():
self._consumer.join()
self._logger.critical("### My friend died ;( raising its exception: ###\n")
self._consumer.join()
self._closed = True
exc, traceback = self._process_io["exception_pipe_out"].recv()
raise SimulationConsumerFailed(exc, traceback)
return True
def _send_command(self, function, *args, **kwargs):
self._process_io["command_pipe_in"].send((function, args, kwargs))
semaphore = self._process_io["slot_in_command_queue"]
while not semaphore.acquire(block=False, timeout=0.1):
self._check_consumer_alive()
def _wait_for_answer(self):
while not self._process_io["return_value_pipe_out"].poll(1):
self._check_consumer_alive()
answer = self._process_io["return_value_pipe_out"].recv()
return answer
def _wait_consumer_ready(self):
self._process_io["simulaton_ready"].wait()
def close(self):
if not self._closed:
self._logger.debug("Producer closing")
if self._consumer.is_alive():
self._wait_command_pipe_empty(timeout=10)
self._logger.debug("command pipe empty, setting must_quit flag")
self._process_io["must_quit"].set()
self._logger.debug("flushing command pipe")
self.good_bye()
self._closed = True
self._logger.debug("succesfully closed, needs to be joined")
else:
self._logger.debug("already closed, doing nothing")
def join(self, timeout=10):
self._logger.debug(f"joining ({timeout}) ...")
self._consumer.join(timeout=timeout)
if self._consumer.exitcode is None:
self._logger.warning(f"joining ({timeout}) ... failed")
self._logger.warning("sending SIGTERM")
self._consumer.terminate()
self._consumer.join(timeout=timeout)
self._logger.warning(f"joining ({timeout}) after SIGTERM ...")
self._consumer.join(timeout=timeout)
if self._consumer.exitcode is None:
self._logger.warning(f"joining ({timeout}) after SIGTERM ... failed")
else:
try:
self._logger.debug(f"joining ({timeout}) ... joined!")
self._logger.info(f"Coppelia closed")
except LookupError:
pass
def _wait_command_pipe_empty(self, timeout=None):
self._send_command(SimulationConsumer.signal_command_pipe_empty)
if not self._process_io["command_pipe_empty"].wait(timeout=timeout):
self._logger.info(f"Command pipe was not empty after a timeout of {timeout}sec. Exiting without completing all commands")
else:
self._process_io["command_pipe_empty"].clear()
def __del__(self):
self.close()
@producer_to_pool_method_convertion
class SimulationPool:
def __init__(self, size, scene="", guis=[]):
self.size = size
self._producers = [
SimulationProducer(scene, gui=i in guis) for i in range(size)
]
self._active_producers_indices = list(range(size))
self._distribute_args_mode = False
self.wait_consumer_ready()
@contextmanager
def specific(self, list_or_int):
_active_producers_indices_before = self._active_producers_indices
indices = list_or_int if type(list_or_int) is list else [list_or_int]
self._active_producers_indices = indices
yield
self._active_producers_indices = _active_producers_indices_before
@contextmanager
def distribute_args(self):
self._distribute_args_mode = True
yield len(self._active_producers_indices)
self._distribute_args_mode = False
def _get_active_producers(self):
return [self._producers[i] for i in self._active_producers_indices]
_active_producers = property(_get_active_producers)
def close(self):
for producer in self._producers:
producer.close()
for producer in self._producers:
producer.join()
def wait_consumer_ready(self):
for producer in self._producers:
producer._wait_consumer_ready()
def __del__(self):
self.close()
if __name__ == '__main__':
def test_1():
scene = ""
simulation = SimulationProducer(scene, gui=True)
simulation.add_tap(position=(1, 1, 0), orientation=(0, 0, 1))
simulation.add_tap(position=(2, 1, 0), orientation=(0, 0, 1))
simulation.add_button(position=(0, 1, 0), orientation=(0, 0, 0))
simulation.add_button(position=(0, 0, 0), orientation=(0, 0, 0))
simulation.add_lever(position=(1, 0, 0), orientation=(0, 0, 0))
simulation.add_lever(position=(2, 0, 0), orientation=(0, 0, 0))
simulation.start_sim()
for j in range(1):
for i in range(100):
simulation.step_sim()
simulation.set_stateful_objects_states([1, 0, 0, 0, 0, 0])
for i in range(100):
simulation.step_sim()
simulation.set_stateful_objects_states([0, 1, 0, 0, 0, 0])
for i in range(100):
simulation.step_sim()
simulation.set_stateful_objects_states([0, 0, 1, 0, 0, 0])
for i in range(100):
simulation.step_sim()
simulation.set_stateful_objects_states([0, 0, 0, 1, 0, 0])
for i in range(100):
simulation.step_sim()
simulation.set_stateful_objects_states([0, 0, 0, 0, 1, 0])
for i in range(100):
simulation.step_sim()
simulation.set_stateful_objects_states([0, 0, 0, 0, 0, 1])
print(simulation.get_stateful_objects_states())
for i in range(5000):
simulation.step_sim()
print(i, end='\r')
simulation.stop_sim()
simulation.close()
def test_2():
simulations = SimulationPool(32)
simulations.add_tap(position=(1, 1, 0), orientation=(0, 0, 1))
simulations.add_tap(position=(2, 1, 0), orientation=(0, 0, 1))
simulations.add_button(position=(0, 1, 0), orientation=(0, 0, 0))
simulations.add_button(position=(0, 0, 0), orientation=(0, 0, 0))
simulations.add_lever(position=(1, 0, 0), orientation=(0, 0, 0))
simulations.add_lever(position=(2, 0, 0), orientation=(0, 0, 0))
simulations.start_sim()
simulations.set_stateful_objects_states([0, 0, 0, 0, 1, 0])
print(simulations.get_stateful_objects_states())
with simulations.specific(0):
simulations.set_stateful_objects_states([0, 0, 0, 0, 1, 1])
print(simulations.get_stateful_objects_states())
simulations.stop_sim()
return simulations
def test_3():
import time
M = 32
simulations = SimulationPool(M, guis=[])
simulations.create_environment('one_arm_2_buttons_1_levers_1_tap')
simulations.start_sim()
simulations.step_sim()
print(simulations.get_joint_positions())
print(simulations.get_joint_velocities())
print(simulations.get_joint_forces())
print(simulations.get_joint_upper_velocity_limits())
N = 1000
t0 = time.time()
for i in range(N):
simulations.step_sim()
t1 = time.time()
print("Pool size: {}, {} iteration in {:.3f} sec ({:.3f} it/sec)".format(
M,
N * M,
t1 - t0,
M * N / (t1 - t0)
))
simulations.stop_sim()
simulations.close()
def test_4():
import time
pool_size = 1
simulations = SimulationPool(
pool_size,
scene=MODEL_PATH + '/custom_timestep.ttt',
guis=[0]
)
simulations.create_environment('one_arm_2_buttons_1_levers_1_tap')
dt = 0.05
simulations.set_simulation_timestep(dt)
simulations.set_control_loop_enabled(False)
simulations.start_sim()
with simulations.specific(0):
upper_limits = simulations.get_joint_upper_velocity_limits()[0]
n_joints = simulations.get_n_joints()[0]
N = 10000
periods_in_sec = np.random.randint(
low=2, high=10, size=n_joints)[np.newaxis]
periods = periods_in_sec / dt
x = np.arange(N)[:, np.newaxis]
velocities = np.sin(x / periods * 2 * np.pi) * upper_limits
states = []
t0 = time.time()
for i in range(N):
simulations.step_sim()
simulations.set_joint_target_velocities(velocities[i])
a = simulations.get_joint_forces()
states += simulations.get_state()
t1 | |
trimmedPatchesLR = patchesLR[booleanMask]
trimmedPathcesHR = patchesHR[booleanMask]
return (trimmedPatchesLR, trimmedPathcesHR)
def removeCorruptedTestPatchSets(patchesLR: np.ma.masked_array,
clarityThreshold: float) -> np.ma.masked_array:
'''
Input:
patchesLR: np.ma.masked_array[numImgSet, numPatches, numLowResImg, C, H, W]
clarityThreshold: float
Output:
cleanPatchesLR: np.ma.masked_array[numImgSet, newNumPatches, numLowResImg, C, H, W]
where newNumPatches <= numPatches
'''
desc = '[ INFO ] Removing corrupted test sets '
booleanMask = np.array([isPatchSetNotCorrupted(patchSet, clarityThreshold)
for patchSet in tqdm(patchesHR, desc=desc)])
trimmedPatchesLR = patchesLR[booleanMask]
return trimmedPatchesLR
def isPatchSetNotCorrupted(patchSet: np.ma.masked_array, clarityThreshold: float) -> bool:
'''
Determine if all the LR images are not clear enough.
Return False if ALL LR image clarity is below threshold.
Input:
patchSet: np.ma.masked_array[numPatches, numLowResImg, C, H, W]
clarityThreshold: float
Output:
boolean that answers the question is PatchSet not Corrupted?
'''
# totalPixels = imgSet.shape[2] * imgSet.shape[3] # width * height
isPatchClearEnough = np.array([np.count_nonzero(patch.mask)/(patch.shape[-1]*patch.shape[-2]) < (1-clarityThreshold)
for patch in patchSet])
return np.sum(isPatchClearEnough) != 0
def generatePatches(imgSets: np.ma.masked_array, patchSize: int, stride: int) -> np.ma.masked_array:
'''
Input:
images: np.ma.masked_array[numImgSet, numImgPerImgSet, channels, height, width]
patchSize: int
stride: int
Output:
np.ma.masked_array[numImgSet, numImgPerImgSet * numPatches, channels, patchSize, patchSize]
'''
desc = f'[ INFO ] Generating patches (k={patchSize}, s={stride})'
if imgSets.dtype != 'float32':
imgSets = imgSets.astype(np.float32)
return np.ma.array([generatePatchesPerImgSet(imgSet, patchSize, stride) for imgSet in tqdm(imgSets, desc=desc)])
def generatePatchesPerImgSet(images: np.ma.masked_array, patchSize: int, stride: int) -> np.ma.masked_array:
'''
Generate patches of images systematically.
Input:
images: np.ma.masked_array[numImgPerImgSet, channels, height, width]
patchSize: int
stride: int
Output:
np.ma.masked_array[numImgPerImgSet * numPatches, channels, patchSize, patchSize]
'''
tensorImg = torch.tensor(images)
tensorMsk = torch.tensor(images.mask)
numMskPerImgSet, channels, height, width = images.shape
patchesImg = tensorImg.unfold(0, numMskPerImgSet, numMskPerImgSet).unfold(
1, channels, channels).unfold(2, patchSize, stride).unfold(3, patchSize, stride)
patchesImg = patchesImg.reshape(-1, channels, patchSize, patchSize) # [numImgPerImgSet * numPatches, C, H, W]
patchesImg = patchesImg.numpy()
patchesMsk = tensorMsk.unfold(0, numMskPerImgSet, numMskPerImgSet).unfold(
2, patchSize, stride).unfold(3, patchSize, stride)
patchesMsk = patchesMsk.reshape(-1, channels, patchSize, patchSize)
patchesMsk = patchesMsk.numpy()
return np.ma.masked_array(patchesImg, mask=patchesMsk)
def registerImages(allImgLR: np.ndarray, allMskLR: np.ndarray) -> np.ma.masked_array:
'''
For each imgset, align all its imgs into one coordinate system.
The reference image will be the clearest one. (ie the one withe highest QM accumalitive sum)
Input:
allImgLR: np.ndarray[numImgSet, numImgPerImgSet, channel, height, width]
allMskLR: np.ndarray[numImgSet, numMskPerImgSet, channel, height, width]
Output:
output: np.ma.masked_array with the same dimension
'''
# '[ INFO ] Loading LR masks and dumping '
desc = '[ INFO ] Registering LR images '
return np.ma.array([registerImagesInSet(allImgLR[i], allMskLR[i])
for i in tqdm(range(allImgLR.shape[0]), desc=desc)])
def registerImagesInSet(imgLR: np.ndarray, mskLR: np.ndarray) -> np.ma.masked_array:
'''
Takes in an imgset LR masks and images.
Sorts it and picks a reference, then register.
Input:
imgLR: np.ndarray[numImgPerImgSet, channel, height, width]
mskLR: np.ndarray[numMskPerImgSet, channel, height, width]
Output
regImgMskLR: np.ma.masked_array[numMskPerImgSet, channel, height, width]
This array has a property mask where in if used, returns a boolean array
with the same dimension as the data.
https://docs.scipy.org/doc/numpy-1.15.0/reference/maskedarray.baseclass.html#numpy.ma.MaskedArray.data
'''
sortedIdx = np.argsort([-np.count_nonzero(msk) for msk in mskLR])
clearestToDirtiestImg = imgLR[sortedIdx]
clearestToDirtiestMsk = mskLR[sortedIdx]
referImg = clearestToDirtiestImg[0]
for i, (img, msk) in enumerate(zip(clearestToDirtiestImg, clearestToDirtiestMsk)):
if i == 0:
regImgMskLR = np.expand_dims(np.ma.masked_array(img, mask=~msk), axis=0)
else:
regImg, regMsk = registerFrame(img, msk.astype(bool), referImg)
mskdArray = np.expand_dims(np.ma.masked_array(regImg, mask=~(regMsk > 0)), axis=0)
regImgMskLR = np.ma.concatenate((regImgMskLR, mskdArray))
return regImgMskLR
def registerFrame(img: np.ndarray, msk: np.ndarray, referenceImg: np.ndarray, tech='freq') -> Tuple[np.ndarray, np.ndarray]:
'''
Input:
img: np.ndarray[channel, height, width]
msk: np.ndarray[channel, height, width]
referenceImg: np.ndarray[channel, height, width]
Output:
Tuple(regImg, regMsk)
regImg: np.ndarray[channel, height, width]
regMsk: np.ndarray[channel, height, width]
'''
if tech == 'time':
shiftArray = masked_register_translation(referenceImg, img, msk)
regImg = shift(img, shiftArray, mode='reflect')
regMsk = shift(msk, shiftArray, mode='constant', cval=0)
if tech == 'freq':
shiftArray, _, _ = register_translation(referenceImg, img)
regImg = fourier_shift(np.fft.fftn(img), shiftArray)
regImg = np.fft.ifftn(regImg)
regImg = regImg.real
regMsk = fourier_shift(np.fft.fftn(msk), shiftArray)
regMsk = np.fft.ifftn(regMsk)
regMsk = regMsk.real
return (regImg, regMsk)
def convertToMaskedArray(imgSets: np.ndarray, mskSets: np.ndarray) -> np.ma.masked_array:
'''
Convert Image and Mask array pair to a masked array.
Especially made for HR images.
Input:
imgSets: np.ndarray[numImgSet, numImgPerImgSet, channel, height, width]
mskSets: np.ndarray[numImgSet, numImgPerImgSet, channel, height, width]
Output:
imgMskSets: np.ma.masked_array[numImgSet, numImgPerImgSet, channel, height, width]
'''
imgSets = np.squeeze(imgSets, axis=1) # [numImgSet, channel, height, width]
mskSets = np.squeeze(mskSets, axis=1) # [numImgSet, channel, height, width]
imgMskSets = np.ma.array([np.ma.masked_array(img, mask=~msk)
for img, msk in zip(imgSets, mskSets)]) # [numImgSet, channel, height, width]
imgMskSets = np.expand_dims(imgMskSets, axis=1) # [numImgSet, 1, channel, height, width]
return imgMskSets
def removeCorruptedTrainImageSets(imgMskLR: np.ma.masked_array, imgMskHR: np.ma.masked_array,
clarityThreshold: float) -> Tuple[np.ma.masked_array, np.ma.masked_array]:
'''
Remove imageset if ALL its LR frames is less than the given clarity threshold.
Input:
imgMskLR: np.ma.masked_array[numImgSet, numImgPerImgSet, channel, height, width]
imgMskHR: np.ma.masked_array[numImgSet, 1, channel, height, width]
clarityThreshold: float
Output:
trimmedImgMskLR: np.ma.masked_array[newNumImgSet, numImgPerImgSet, channel, height, width]
trimmedImgMskHR: np.ma.masked_array[newNumImgSet, 1, channel, height, width]
where newNumImgSet <= numImgSet
'''
desc = '[ INFO ] Removing corrupted ImageSets '
booleanMask = np.array([isImageSetNotCorrupted(imgSet, clarityThreshold) for imgSet in tqdm(imgMskLR, desc=desc)])
trimmedImgMskLR = imgMskLR[booleanMask]
trimmedImgMskHR = imgMskHR[booleanMask]
return (trimmedImgMskLR, trimmedImgMskHR)
def removeCorruptedTestImageSets(imgMskLR: np.ma.masked_array,
clarityThreshold: float) -> np.ma.masked_array:
'''
Remove imageset if ALL its LR frames is less than the given clarity threshold.
Input:
imgMskLR: np.ma.masked_array[numImgSet, numImgPerImgSet, channel, height, width]
clarityThreshold: float
Output:
trimmedImgMskLR: np.ma.masked_array[newNumImgSet, numImgPerImgSet, channel, height, width]
where newNumImgSet <= numImgSet
'''
desc = '[ INFO ] Removing corrupted ImageSets '
booleanMask = np.array([isImageSetNotCorrupted(imgSet, clarityThreshold)
for imgSet in tqdm(imgMskLR, desc=desc)])
trimmedImgMskLR = imgMskLR[booleanMask]
return trimmedImgMskLR
def isImageSetNotCorrupted(imgSet: np.ma.masked_array, clarityThreshold: float) -> bool:
'''
Determine if all the LR images are not clear enough.
Return False if ALL LR image clarity is below threshold.
Input:
imgSet: np.ma.masked_array[numImgPerImgSet, channel, height, width]
clarityThreshold: float
Output:
boolean that answers the question is ImageSet not Corrupted?
'''
# totalPixels = imgSet.shape[2] * imgSet.shape[3] # width * height
isImageClearEnough = np.array([np.count_nonzero(img.mask)/(img.shape[1] * img.shape[2]) < (1-clarityThreshold)
for img in imgSet])
return np.sum(isImageClearEnough) != 0
def pickClearLRImgsPerImgSet(imgMskLR: np.ma.masked_array,
numImgToPick: int, clarityThreshold: float) -> np.ma.masked_array:
'''
Pick clearest frames per ImgSet.
Before picking, we remove all frames that don't satisfy the clarity threshold.
After removing the said frames, in the event that the remaining LR frames is less than
the number of img to pick, we randomly pick among the clear frames to satisfy number of frames.
(This might be a form of regularization...)
Input:
imgMskLR: np.ma.masked_array[newNumImgSet, numImgPerImgSet, channel, height, width]
numImgToPick: int
Output:
trimmedImgMskLR: np.ma.masked_array[newNumImgSet, numImgToPick, channel, height, width]
where numImgToPick <= numImgPerImgSet
'''
desc = f'[ INFO ] Picking top {numImgToPick} clearest images '
return np.ma.array([pickClearImg(filterImgMskSet(imgMsk, clarityThreshold), numImgToPick=numImgToPick)
for imgMsk in tqdm(imgMskLR, desc=desc)])
def pickClearImg(imgMsk: np.ma.masked_array, numImgToPick: int) -> np.ma.masked_array:
'''
Pick clearest low resolution images!
Input:
imgMsk: np.ma.masked_array[numImgPerImgSet, channel, height, width]
numImgToPick: int
Ouput:
trimmedImgMsk: np.ma.masked_array[newNumImgPerImgSet, channel, height, width]
where newNumImgPerImgSet <= numImgPerImgSet might not hold.
'''
sortedIndices = np.argsort(-(np.sum(imgMsk.mask, axis=(1, 2, 3))))
sortedImgMskArray = imgMsk[sortedIndices]
if numImgToPick < len(imgMsk):
trimmedImgMsk = sortedImgMskArray[:numImgToPick]
else:
trimmedImgMsk = np.copy(sortedImgMskArray)
while len(trimmedImgMsk) < numImgToPick:
print('Short on data!')
shuffledIndices = np.random.choice(sortedIndices, size=len(sortedIndices), replace=False)
toAppend = imgMsk[shuffledIndices]
trimmedImgMsk = np.ma.concatenate((trimmedImgMsk, toAppend))
trimmedImgMsk = trimmedImgMsk[:numImgToPick]
return trimmedImgMsk
def filterImgMskSet(imgSet: np.ma.masked_array, clarityThreshold: float) -> np.ma.masked_array:
'''
This function is the same as isImageSetNotCorrupted.
except that the out put is a masked version of its array input.
Input:
imgSet: np.ma.masked_array[numImgPerImgSet, channel, height, width]
clarityThreshold: float
Output:
filteredImgSet: np.ma.masked_array[newNumImgPerImgSet, channel, height, width]
where newNumImgPerImgSet <= numImgPerImgSet
'''
# totalPixels = imgSet.shape[2] * imgSet.shape[3] # width * height
isImageClearEnough = np.array([np.count_nonzero(img.mask)/(img.shape[1] * img.shape[2]) < (1-clarityThreshold)
for img in imgSet]) # boolean mask
filteredImgSet = imgSet[isImageClearEnough]
return filteredImgSet
def loadData(arrayDir: str, band: str):
'''
Input:
arrayDir: str -> the path folder for which you saved .npy files
band: str -> 'NIR' or 'RED'
isTrainData: bool -> set to true if dealing with the train dataset
Output:
List[Tuple(train data), Tuple(test data)]
'''
# Check input dir validity
if not os.path.exists(arrayDir):
raise Exception("[ ERROR ] Folder path does not exists...")
if not os.listdir(arrayDir):
raise Exception("[ ERROR ] No files in the provided directory...")
TRAINimgLR = np.load(os.path.join(arrayDir, f'TRAINimgLR_{band}.npy'), allow_pickle=True)
TRAINimgHR = np.load(os.path.join(arrayDir, f'TRAINimgHR_{band}.npy'), allow_pickle=True)
TRAINmskLR = np.load(os.path.join(arrayDir, f'TRAINmskLR_{band}.npy'), allow_pickle=True)
TRAINmskHR = np.load(os.path.join(arrayDir, f'TRAINmskHR_{band}.npy'), allow_pickle=True)
TESTimgLR = np.load(os.path.join(arrayDir, f'TESTimgLR_{band}.npy'), allow_pickle=True)
TESTmskLR = np.load(os.path.join(arrayDir, f'TESTmskLR_{band}.npy'), allow_pickle=True)
TRAIN = (TRAINimgLR, TRAINmskLR, TRAINimgHR, TRAINmskHR)
TEST = (TESTimgLR, TESTmskLR)
return TRAIN, TEST
def loadAndSaveRawData(rawDataDir: str, arrayDir: str, band: str, isGrayScale=True, isTrainData=True):
'''
This function loads every imageset and dumps it into one giant array.
We do this because of memory constraints...
If | |
import asyncio
import re
import warnings
from math import sqrt
from html import unescape
from base64 import b64decode
from urllib.parse import unquote, urlparse
import aiohttp
import async_timeout
from .errors import BadStatusError
from .utils import log, get_headers, IPPattern, IPPortPatternGlobal
class Provider:
"""Proxy provider.
Provider - a website that publish free public proxy lists.
:param str url: Url of page where to find proxies
:param tuple proto:
(optional) List of the types (protocols) that may be supported
by proxies returned by the provider. Then used as :attr:`Proxy.types`
:param int max_conn:
(optional) The maximum number of concurrent connections on the provider
:param int max_tries:
(optional) The maximum number of attempts to receive response
:param int timeout:
(optional) Timeout of a request in seconds
"""
_pattern = IPPortPatternGlobal
def __init__(self, url=None, proto=(), max_conn=4,
max_tries=3, timeout=20, loop=None):
if url:
self.domain = urlparse(url).netloc
self.url = url
self.proto = proto
self._max_tries = max_tries
self._timeout = timeout
self._session = None
self._cookies = {}
self._proxies = set()
# concurrent connections on the current provider
self._sem_provider = asyncio.Semaphore(max_conn)
self._loop = loop or asyncio.get_event_loop()
@property
def proxies(self):
"""Return all found proxies.
:return:
Set of tuples with proxy hosts, ports and types (protocols)
that may be supported (from :attr:`.proto`).
For example:
{('192.168.0.1', '80', ('HTTP', 'HTTPS'), ...)}
:rtype: set
"""
return self._proxies
@proxies.setter
def proxies(self, new):
new = [(host, port, self.proto) for host, port in new if port]
self._proxies.update(new)
async def get_proxies(self):
"""Receive proxies from the provider and return them.
:return: :attr:`.proxies`
"""
log.debug('Try to get proxies from %s' % self.domain)
async with aiohttp.ClientSession(headers=get_headers(),
cookies=self._cookies,
loop=self._loop) as self._session:
await self._pipe()
log.debug('%d proxies received from %s: %s' % (
len(self.proxies), self.domain, self.proxies))
return self.proxies
async def _pipe(self):
await self._find_on_page(self.url)
async def _find_on_pages(self, urls):
if not urls:
return
tasks = []
if not isinstance(urls[0], dict):
urls = set(urls)
for url in urls:
if isinstance(url, dict):
tasks.append(self._find_on_page(**url))
else:
tasks.append(self._find_on_page(url))
await asyncio.gather(*tasks)
async def _find_on_page(self, url, data=None, headers=None, method='GET'):
page = await self.get(url, data=data, headers=headers, method=method)
oldcount = len(self.proxies)
try:
received = self.find_proxies(page)
except Exception as e:
received = []
log.error('Error when executing find_proxies.'
'Domain: %s; Error: %r' % (self.domain, e))
self.proxies = received
added = len(self.proxies) - oldcount
log.debug('%d(%d) proxies added(received) from %s' % (
added, len(received), url))
async def get(self, url, data=None, headers=None, method='GET'):
for _ in range(self._max_tries):
page = await self._get(
url, data=data, headers=headers, method=method)
if page:
break
return page
async def _get(self, url, data=None, headers=None, method='GET'):
page = ''
try:
with (await self._sem_provider),\
async_timeout.timeout(self._timeout, loop=self._loop):
async with self._session.request(
method, url, data=data, headers=headers) as resp:
page = await resp.text()
if resp.status != 200:
log.debug(
'url: %s\nheaders: %s\ncookies: %s\npage:\n%s' % (
url, resp.headers, resp.cookies, page))
raise BadStatusError('Status: %s' % resp.status)
except (UnicodeDecodeError, BadStatusError, asyncio.TimeoutError,
aiohttp.ClientOSError, aiohttp.ClientResponseError,
aiohttp.ServerDisconnectedError) as e:
page = ''
log.debug('%s is failed. Error: %r;' % (url, e))
return page
def find_proxies(self, page):
return self._find_proxies(page)
def _find_proxies(self, page):
proxies = self._pattern.findall(page)
return proxies
class Freeproxylists_com(Provider):
domain = 'freeproxylists.com'
async def _pipe(self):
exp = r'''href\s*=\s*['"](?P<t>[^'"]*)/(?P<uts>\d{10})[^'"]*['"]'''
urls = ['http://www.freeproxylists.com/socks.html',
'http://www.freeproxylists.com/elite.html',
'http://www.freeproxylists.com/anonymous.html']
pages = await asyncio.gather(*[self.get(url) for url in urls])
params = re.findall(exp, ''.join(pages))
tpl = 'http://www.freeproxylists.com/load_{}_{}.html'
# example: http://www.freeproxylists.com/load_socks_1448724717.html
urls = [tpl.format(t, uts) for t, uts in params]
await self._find_on_pages(urls)
class Blogspot_com_base(Provider):
_cookies = {'NCR': 1}
async def _pipe(self):
exp = r'''<a href\s*=\s*['"]([^'"]*\.\w+/\d{4}/\d{2}/[^'"#]*)['"]>'''
pages = await asyncio.gather(*[
self.get('http://%s/' % d) for d in self.domains])
urls = re.findall(exp, ''.join(pages))
await self._find_on_pages(urls)
class Blogspot_com(Blogspot_com_base):
domain = 'blogspot.com'
domains = ['sslproxies24.blogspot.com', 'proxyserverlist-24.blogspot.com',
'freeschoolproxy.blogspot.com', 'googleproxies24.blogspot.com']
class Blogspot_com_socks(Blogspot_com_base):
domain = 'blogspot.com^socks'
domains = ['www.socks24.org', ]
class Webanetlabs_net(Provider):
domain = 'webanetlabs.net'
async def _pipe(self):
exp = r'''href\s*=\s*['"]([^'"]*proxylist_at_[^'"]*)['"]'''
page = await self.get('https://webanetlabs.net/publ/24')
urls = ['https://webanetlabs.net%s' % path
for path in re.findall(exp, page)]
await self._find_on_pages(urls)
class Checkerproxy_net(Provider):
domain = 'checkerproxy.net'
async def _pipe(self):
exp = r'''href\s*=\s*['"](/archive/\d{4}-\d{2}-\d{2})['"]'''
page = await self.get('https://checkerproxy.net/')
urls = ['https://checkerproxy.net/api%s' % path
for path in re.findall(exp, page)]
await self._find_on_pages(urls)
class Proxz_com(Provider):
domain = 'proxz.com'
def find_proxies(self, page):
return self._find_proxies(unquote(page))
async def _pipe(self):
exp = r'''href\s*=\s*['"]([^'"]?proxy_list_high_anonymous_[^'"]*)['"]''' # noqa
url = 'http://www.proxz.com/proxy_list_high_anonymous_0.html'
page = await self.get(url)
urls = ['http://www.proxz.com/%s' % path
for path in re.findall(exp, page)]
urls.append(url)
await self._find_on_pages(urls)
class Proxy_list_org(Provider):
domain = 'proxy-list.org'
_pattern = re.compile(r'''Proxy\('([\w=]+)'\)''')
def find_proxies(self, page):
return [b64decode(hp).decode().split(':')
for hp in self._find_proxies(page)]
async def _pipe(self):
exp = r'''href\s*=\s*['"]\./([^'"]?index\.php\?p=\d+[^'"]*)['"]'''
url = 'http://proxy-list.org/english/index.php?p=1'
page = await self.get(url)
urls = ['http://proxy-list.org/english/%s' % path
for path in re.findall(exp, page)]
urls.append(url)
await self._find_on_pages(urls)
class Aliveproxy_com(Provider):
# more: http://www.aliveproxy.com/socks-list/socks5.aspx/United_States-us
domain = 'aliveproxy.com'
async def _pipe(self):
paths = [
'socks5-list', 'high-anonymity-proxy-list', 'anonymous-proxy-list',
'fastest-proxies', 'us-proxy-list', 'gb-proxy-list',
'fr-proxy-list', 'de-proxy-list', 'jp-proxy-list', 'ca-proxy-list',
'ru-proxy-list', 'proxy-list-port-80', 'proxy-list-port-81',
'proxy-list-port-3128', 'proxy-list-port-8000',
'proxy-list-port-8080']
urls = ['http://www.aliveproxy.com/%s/' % path for path in paths]
await self._find_on_pages(urls)
# редиректит хуй поми кудаъ
class Maxiproxies_com(Provider):
domain = 'maxiproxies.com'
async def _pipe(self):
exp = r'''<a href\s*=\s*['"]([^'"]*example[^'"#]*)['"]>'''
page = await self.get('http://maxiproxies.com/category/proxy-lists/')
urls = re.findall(exp, page)
await self._find_on_pages(urls)
class _50kproxies_com(Provider):
domain = '50kproxies.com'
async def _pipe(self):
exp = r'''<a href\s*=\s*['"]([^'"]*-proxy-list-[^'"#]*)['"]>'''
page = await self.get('http://50kproxies.com/category/proxy-list/')
urls = re.findall(exp, page)
await self._find_on_pages(urls)
class Proxylist_me(Provider):
domain = 'proxylist.me'
async def _pipe(self):
exp = r'''href\s*=\s*['"][^'"]*/?page=(\d+)['"]'''
page = await self.get('https://proxylist.me/')
lastId = max([int(n) for n in re.findall(exp, page)])
urls = ['https://proxylist.me/?page=%d' % n for n in range(lastId)]
await self._find_on_pages(urls)
class Foxtools_ru(Provider):
domain = 'foxtools.ru'
async def _pipe(self):
urls = ['http://api.foxtools.ru/v2/Proxy.txt?page=%d' % n
for n in range(1, 6)]
await self._find_on_pages(urls)
class Gatherproxy_com(Provider):
domain = 'gatherproxy.com'
_pattern_h = re.compile(
r'''(?P<ip>(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?))''' # noqa
r'''(?=.*?(?:(?:(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?))|'(?P<port>[\d\w]+)'))''', # noqa
flags=re.DOTALL)
def find_proxies(self, page):
# if 'gp.dep' in page:
# proxies = self._pattern_h.findall(page) # for http(s)
# proxies = [(host, str(int(port, 16)))
# for host, port in proxies if port]
# else:
# proxies = self._find_proxies(page) # for socks
return [(host, str(int(port, 16)))
for host, port in self._pattern_h.findall(page) if port]
async def _pipe(self):
url = 'http://www.gatherproxy.com/proxylist/anonymity/'
expNumPages = r'href="#(\d+)"'
method = 'POST'
# hdrs = {'Content-Type': 'application/x-www-form-urlencoded'}
urls = []
for t in ['anonymous', 'elite']:
data = {'Type': t, 'PageIdx': 1}
page = await self.get(url, data=data, method=method)
if not page:
continue
lastPageId = max([int(n) for n in re.findall(expNumPages, page)])
urls = [{'url': url, 'data': {'Type': t, 'PageIdx': pid},
'method': method} for pid in range(1, lastPageId + 1)]
# urls.append({'url': 'http://www.gatherproxy.com/sockslist/',
# 'method': method})
await self._find_on_pages(urls)
class Gatherproxy_com_socks(Provider):
domain = 'gatherproxy.com^socks'
async def _pipe(self):
urls = [{'url': 'http://www.gatherproxy.com/sockslist/',
'method': 'POST'}]
await self._find_on_pages(urls)
class Tools_rosinstrument_com_base(Provider):
# more: http://tools.rosinstrument.com/cgi-bin/
# sps.pl?pattern=month-1&max=50&nskip=0&file=proxlog.csv
domain = 'tools.rosinstrument.com'
sqrtPattern = re.compile(r'''sqrt\((\d+)\)''')
bodyPattern = re.compile(r'''hideTxt\(\n*'(.*)'\);''')
_pattern = re.compile(
r'''(?:(?P<domainOrIP>(?:[a-z0-9\-.]+\.[a-z]{2,6})|'''
r'''(?:(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}'''
r'''(?:25[0-5]|2[0-4]\d|[01]?\d\d?))))(?=.*?(?:(?:'''
r'''[a-z0-9\-.]+\.[a-z]{2,6})|(?:(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)'''
r'''\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?))|(?P<port>\d{2,5})))''',
flags=re.DOTALL)
def find_proxies(self, page):
x = self.sqrtPattern.findall(page)
if not x:
return []
x = round(sqrt(float(x[0])))
hiddenBody = self.bodyPattern.findall(page)[0]
hiddenBody = unquote(hiddenBody)
toCharCodes = [ord(char) ^ (x if i % 2 else 0)
for i, char in enumerate(hiddenBody)]
fromCharCodes = ''.join([chr(n) for n in toCharCodes])
page = unescape(fromCharCodes)
return self._find_proxies(page)
class Tools_rosinstrument_com(Tools_rosinstrument_com_base):
domain = 'tools.rosinstrument.com'
async def _pipe(self):
tpl = 'http://tools.rosinstrument.com/raw_free_db.htm?%d&t=%d'
urls = [tpl % (pid, t) for pid in range(51) for t in range(1, 3)]
await self._find_on_pages(urls)
class Tools_rosinstrument_com_socks(Tools_rosinstrument_com_base):
domain = 'tools.rosinstrument.com^socks'
async def _pipe(self):
tpl = 'http://tools.rosinstrument.com/raw_free_db.htm?%d&t=3'
urls = [tpl % pid for pid in range(51)]
await self._find_on_pages(urls)
class Xseo_in(Provider):
domain = 'xseo.in'
charEqNum = {}
def char_js_port_to_num(self, matchobj):
chars = matchobj.groups()[0]
num = ''.join([self.charEqNum[ch] for ch in chars if ch != '+'])
return num
def find_proxies(self, page):
expPortOnJS = r'\(""\+(?P<chars>[a-z+]+)\)'
expCharNum = r'\b(?P<char>[a-z])=(?P<num>\d);'
self.charEqNum = {char: i for char, i in re.findall(expCharNum, page)}
page = re.sub(expPortOnJS, self.char_js_port_to_num, page)
return self._find_proxies(page)
async def _pipe(self):
await self._find_on_page(
url='http://xseo.in/proxylist', data={'submit': 1}, method='POST')
class Nntime_com(Provider):
domain = 'nntime.com'
charEqNum = {}
_pattern = re.compile(
r'''\b(?P<ip>(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}'''
r'''(?:25[0-5]|2[0-4]\d|[01]?\d\d?))(?=.*?(?:(?:(?:(?:25'''
r'''[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)'''
r''')|(?P<port>\d{2,5})))''',
flags=re.DOTALL)
def char_js_port_to_num(self, matchobj):
chars = matchobj.groups()[0]
num = ''.join([self.charEqNum[ch] for ch in chars if ch != '+'])
return num
def find_proxies(self, page):
expPortOnJS = r'\(":"\+(?P<chars>[a-z+]+)\)'
expCharNum = r'\b(?P<char>[a-z])=(?P<num>\d);'
self.charEqNum = {char: i for char, i in re.findall(expCharNum, page)}
page = re.sub(expPortOnJS, self.char_js_port_to_num, page)
return self._find_proxies(page)
async def _pipe(self):
tpl = 'http://www.nntime.com/proxy-updated-{:02}.htm'
urls = [tpl.format(n) for n in range(1, 31)]
await self._find_on_pages(urls)
class Proxynova_com(Provider):
domain = 'proxynova.com'
async def _pipe(self):
expCountries = r'"([a-z]{2})"'
page = await self.get('https://www.proxynova.com/proxy-server-list/')
tpl = 'https://www.proxynova.com/proxy-server-list/country-%s/'
urls = [tpl % isoCode for isoCode in re.findall(expCountries, page)
if isoCode != 'en']
await self._find_on_pages(urls)
class Spys_ru(Provider):
domain = 'spys.ru'
charEqNum = {}
def char_js_port_to_num(self, matchobj):
chars = matchobj.groups()[0].split('+')
# ex: '+(i9w3m3^k1y5)+(g7g7g7^v2e5)+(d4r8o5^i9u1)+(y5c3e5^t0z6)'
# => | |
route into Intake Chords Func, else an empty Set"""
chords_set = set()
if not self.intake_bypass:
chords_set = self.intake_chords_set
return chords_set
def init_unichars_func(self, unichars, optchords):
"""Let people type the From Chars in place of the To Chars"""
unichords = unichars.encode()
funcs = self.func_by_chords
suffixes_by_chords = self.suffixes_by_chords
optfunc = funcs[optchords]
if optchords in suffixes_by_chords.keys():
self._init_suffix_func(unichords, func=optfunc)
else:
self._init_func(unichords, func=optfunc)
def to_optchords(self, optchars):
"""Pick out the Keyboard Input Chords of a Key shifted by the Option Key"""
# pylint: disable=no-self-use
if optchars.startswith("⌥"):
optchords = b"\x1B" # ESC, ⌃[, 27
assert optchars[1] == optchars[1].upper()
optchords += optchars[1].lower().encode()
opttail = optchars[2:]
elif optchars.startswith("⇧⌥:"):
optchords = b":" + optchars[len("⇧⌥:") :].replace("⇧", "").encode()
return optchords # TODO: make this less ugly
else:
assert optchars.startswith("⇧⌥")
optchords = b"\x1B" # ESC, ⌃[, 27
assert optchars[2] == optchars[2].upper()
optchords += optchars[2].encode()
opttail = optchars[3:]
if opttail:
if (len(opttail) == 1) and (opttail in BASIC_LATIN_CHARS_SET):
optchords += opttail.encode().lower() # Em Py ⌥GG, Vi Py ⌥EE, etc
elif opttail == "Tab":
optchords += b"\x09" # TAB, ⌃I, 9 \t
elif (len(opttail) == 2) and (opttail[-1] in string.ascii_uppercase):
if opttail[0] == "⌥": # Em Py ⌥G⌥G, Vi Py ⌥E⌥E, etc
optchords += b"\x1B" + opttail[-1].encode().lower() # ESC, ⌃[, 27
else:
assert opttail[0] == "⇧"
optchords += opttail[-1].encode()
elif (len(opttail) == 3) and (opttail[-1] in string.ascii_uppercase):
assert opttail[:2] == "⇧⌥"
assert opttail[:2] == "⇧⌥", repr(opttail) # Vi Py ⇧⌥E⇧⌥E, etc
optchords += b"\x1B" + opttail[-1].encode() # ESC, ⌃[, 27
else:
assert not opttail, repr(opttail)
# TODO: loosen up this logic to accept more additions before they arrive
return optchords
class ViPyNameError(NameError):
"""Signal trouble like a NameError but mention the Vi Py keymap"""
class TerminalKeyboardVi(TerminalKeyboard):
"""Map Keyboard Inputs to Code, for when feeling like Vi"""
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-instance-attributes
def __init__(self, vi):
super().__init__()
self.vi = vi
self.editor = vi.editor
self.format_status_func = vi.format_vi_status
self.place_cursor_func = vi.place_vi_cursor
self.do_prefix_chord_func = vi.do_vi_prefix_chord
self.eval_prefix_func = vi.eval_vi_prefix
self._init_by_vi_chords_()
self.enter_do_func = vi.enter_do_vi
self.exit_do_func = vi.exit_do_vi
def _init_by_vi_chords_(self):
# pylint: disable=too-many-statements
editor = self.editor
funcs = self.func_by_chords
vi = self.vi
# Define the C0_CONTROL_STDINS
# funcs[b"\x00"] = vi.do_c0_control_nul # NUL, ⌃@, 0
# funcs[b"\x01"] = vi.do_c0_control_soh # SOH, ⌃A, 1
funcs[b"\x02"] = vi.do_scroll_behind_much # STX, ⌃B, 2
funcs[b"\x03"] = vi.do_vi_c0_control_etx # ETX, ⌃C, 3
# funcs[b"\x04"] = vi.do_scroll_ahead_some # EOT, ⌃D, 4
funcs[b"\x05"] = vi.do_scroll_ahead_one # ENQ, ⌃E, 5
funcs[b"\x06"] = vi.do_scroll_ahead_much # ACK, ⌃F, 6
funcs[b"\x07"] = vi.do_say_more # BEL, ⌃G, 7 \a
funcs[b"\x08"] = vi.do_slip_behind # BS, ⌃H, 8 \b
# funcs[b"\x09"] = vi.do_c0_control_tab # TAB, ⌃I, 9 \t
funcs[b"\x0A"] = vi.do_step_down_seek # LF, ⌃J, 10 \n
# funcs[b"\x0B"] = vi.do_c0_control_vt # VT, ⌃K, 11 \v
funcs[b"\x0C"] = editor.do_redraw # FF, ⌃L, 12 \f
funcs[b"\x0D"] = vi.do_step_down_dent # CR, ⌃M, 13 \r
funcs[b"\x0E"] = vi.do_step_down_seek # SO, ⌃N, 14
funcs[b"\x0F"] = vi.do_vi_c0_control_si # SI, ⌃O, 15
funcs[b"\x10"] = vi.do_step_up_seek # DLE, ⌃P, 16
# funcs[b"\x11"] = vi.do_c0_control_dc1 # DC1, XON, ⌃Q, 17
# funcs[b"\x12"] = vi.do_c0_control_dc2 # DC2, ⌃R, 18
# funcs[b"\x13"] = vi.do_c0_control_dc3 # DC3, XOFF, ⌃S, 19
# funcs[b"\x14"] = vi.do_c0_control_dc4 # DC4, ⌃T, 20
# funcs[b"\x15"] = vi.do_scroll_behind_some # NAK, ⌃U, 21
funcs[b"\x16"] = vi.do_vi_c0_control_syn # SYN, ⌃V, 22
# funcs[b"\x17"] = vi.do_c0_control_etb # ETB, ⌃W, 23
# funcs[b"\x18"] = vi.do_c0_control_can # CAN, ⌃X , 24
funcs[b"\x19"] = vi.do_scroll_behind_one # EM, ⌃Y, 25
funcs[b"\x1A"] = vi.do_vi_suspend_frame # SUB, ⌃Z, 26
funcs[b"\x1B"] = vi.do_vi_c0_control_esc # ESC, ⌃[, 27
funcs[b"\x1B[A"] = vi.do_step_up_seek # ↑ Up-Arrow
funcs[b"\x1B[B"] = vi.do_step_down_seek # ↓ Down-Arrow
funcs[b"\x1B[C"] = vi.do_slip_right # → Right-Arrow
funcs[b"\x1B[D"] = vi.do_slip_left # ← Left-Arrow
funcs[b"\x1Bb"] = vi.do_big_word_start_behind # ⌥← Option Left-Arrow, ⌥B
funcs[b"\x1Bf"] = vi.do_big_word_start_ahead # ⌥→ Option Right-Arrow, ⌥F
# funcs[b"\x1C"] = vi.do_eval_vi_line # FS, ⌃\, 28
# funcs[b"\x1D"] = vi.do_c0_control_gs # GS, ⌃], 29
# funcs[b"\x1E"] = vi.do_c0_control_rs # RS, ⌃^, 30
# funcs[b"\x1F"] = vi.do_c0_control_us # US, ⌃_, 31
funcs[b"\x7F"] = vi.do_slip_behind # DEL, ⌃?, 127
# Define the BASIC_LATIN_STDINS
funcs[b" "] = vi.do_slip_ahead
# funcs[b"!"] = vi.do_pipe
# funcs[b'"'] = vi.do_arg
funcs[b"#"] = vi.do_find_behind_vi_this
funcs[b"$"] = vi.do_slip_max_seek
# funcs[b"%"] # TODO: leap to match
# funcs[b"&"] # TODO: & and && for repeating substitution
# funcs[b"'"] # TODO: leap to pin
# funcs[b"("] # TODO: sentence behind
# funcs[b")"] # TODO: sentence ahead
funcs[b"*"] = vi.do_find_ahead_vi_this
funcs[b"+"] = vi.do_step_down_dent
funcs[b","] = vi.do_slip_undo
funcs[b"-"] = vi.do_step_up_dent
funcs[b"."] = vi.do_replay_cut
funcs[b"/"] = vi.do_find_ahead_vi_line
funcs[b"0"] = vi.do_slip_first
funcs[b"1"] = vi.do_vi_digit_argument
funcs[b"2"] = vi.do_vi_digit_argument
funcs[b"3"] = vi.do_vi_digit_argument
funcs[b"4"] = vi.do_vi_digit_argument
funcs[b"5"] = vi.do_vi_digit_argument
funcs[b"6"] = vi.do_vi_digit_argument
funcs[b"7"] = vi.do_vi_digit_argument
funcs[b"8"] = vi.do_vi_digit_argument
funcs[b"9"] = vi.do_vi_digit_argument
self._init_corrector(b":/", corrections=b"/")
self._init_corrector(b":?", corrections=b"?")
# FIXME: Solve Vi Py ⌥⇧:/ ⌥⇧:?
# self._init_func(b":em\r", func=em.do_resume_em)
# self._init_func(b":g?", func=vi.do_find_leading_vi_lines)
self._init_func(b":g/", func=vi.do_find_trailing_vi_lines)
self._init_func(b":n!\r", func=vi.do_next_vi_file)
self._init_func(b":n\r", func=vi.do_might_next_vi_file)
self._init_func(b":q!\r", func=vi.do_quit_vi)
self._init_func(b":q\r", func=vi.do_might_quit_vi)
self._init_func(b":vi\r", func=vi.do_resume_vi)
self._init_func(b":w!\r", func=vi.do_vi_save_buffer)
self._init_func(b":w\r", func=vi.do_might_vi_save_buffer)
self._init_func(b":wn!\r", func=vi.do_flush_next_vi)
self._init_func(b":wn\r", func=vi.do_might_flush_next_vi)
self._init_func(b":wq!\r", func=vi.do_flush_quit_vi)
self._init_func(b":wq\r", func=vi.do_might_flush_quit_vi)
# TODO: think deeper into Vim ⇧:
funcs[b";"] = vi.do_slip_redo
# funcs[b"<"] # TODO: dedent
# funcs[b"="] # TODO: dent after
# funcs[b">"] # TODO: indent
funcs[b"?"] = vi.do_find_behind_vi_line
# self._init_suffix_func(b"@", func=vi.do_replay_from_choice)
funcs[b"A"] = vi.do_slip_beyond_last_take_inserts
funcs[b"B"] = vi.do_big_word_start_behind
funcs[b"C"] = vi.do_chop_take_inserts
funcs[b"D"] = vi.do_chop
funcs[b"E"] = vi.do_big_word_end_ahead
self._init_suffix_func(b"F", func=vi.do_slip_rindex_choice)
funcs[b"G"] = vi.do_step_for_count
funcs[b"H"] = vi.do_step_max_high
funcs[b"I"] = vi.do_slip_dent_take_inserts
funcs[b"J"] = vi.do_slip_last_join_right
# funcs[b"K"] = vi.do_lookup
funcs[b"L"] = vi.do_step_max_low
funcs[b"M"] = vi.do_step_to_middle
funcs[b"N"] = vi.do_vi_find_earlier
funcs[b"O"] = vi.do_slip_first_split_take_inserts
# funcs[b"P"] = vi.do_paste_behind
self._init_func(b"Qvi\r", func=vi.do_continue_vi)
self._init_func(b"QZ", func=vi.do_talk_of_shift_z_shift_q)
self._init_func(b"Qz", func=vi.do_talk_of_shift_z_shift_q)
# TODO: think deeper into Vim Q
funcs[b"R"] = vi.do_take_replaces
funcs[b"S"] = vi.do_slip_first_chop_take_inserts
self._init_suffix_func(b"T", func=vi.do_slip_rindex_plus_choice)
# funcs[b"U"] = vi.do_row_undo
# funcs[b"V"] = vi.do_gloss_rows
funcs[b"W"] = vi.do_big_word_start_ahead
funcs[b"X"] = vi.do_cut_behind
# funcs[b"Y"] = vi.do_copy_row
self._init_func(b"ZQ", func=vi.do_quit_vi)
self._init_func(b"ZZ", func=vi.do_flush_quit_vi)
self._init_func(b"Zq", func=vi.do_talk_of_shift_z_shift_q)
self._init_func(b"Zz", func=vi.do_talk_of_shift_z_shift_z)
# funcs[b"["] # TODO: b"["
self._init_func(b"\\F", func=editor.do_set_invregex)
self._init_func(b"\\i", func=editor.do_set_invignorecase)
self._init_func(b"\\n", func=editor.do_set_invnumber)
# TODO: stop commandeering the personal \Esc \⇧F \I \N Chord Sequences
# funcs[b"]"] # TODO: b"]"
funcs[b"^"] = vi.do_slip_dent
funcs[b"_"] = vi.do_step_down_minus_dent
# funcs[b"`"] # TODO: close to b"'"
funcs[b"a"] = vi.do_slip_take_inserts
funcs[b"b"] = vi.do_lil_word_start_behind
funcs[b"c"] = vi.do_cut_back_after_take_inserts
funcs[b"d"] = vi.do_cut_back_after
funcs[b"e"] = vi.do_lil_word_end_ahead
self._init_suffix_func(b"f", func=vi.do_slip_index_choice)
self._init_corrector(b"g/", corrections=b":g/")
self._init_corrector(b"g?", corrections=b":g?")
# TODO: stop commandeering the personal g/ g? Chord Sequences
# FIXME: Solve Vi Py ⌥⇧:g/ ⌥⇧:g⇧?
# funcs[b"g"]
funcs[b"h"] = vi.do_slip_left
funcs[b"i"] = vi.do_take_inserts
funcs[b"j"] = vi.do_step_down_seek
funcs[b"k"] = vi.do_step_up_seek
funcs[b"l"] = vi.do_slip_right
# self._init_suffix_func(b"m", func=vi.do_drop_pin)
funcs[b"n"] = vi.do_vi_find_later
funcs[b"o"] = vi.do_slip_last_split_take_inserts
# funcs[b"p"] = vi.do_paste_ahead
self._init_suffix_func(b"q", func=vi.do_record_over_choice)
self._init_suffix_func(b"r", func=vi.do_replace_per_choice)
funcs[b"s"] = vi.do_cut_ahead_take_inserts
self._init_suffix_func(b"t", func=vi.do_slip_index_minus_choice)
# funcs[b"u"] = vi.do_undo
# funcs[b"v"] = vi.do_gloss_chars
funcs[b"w"] = vi.do_lil_word_start_ahead
funcs[b"x"] = vi.do_cut_ahead
# funcs[b"y"] = vi.do_copy_after
self._init_func(b"z.", func=vi.do_scroll_till_middle)
self._init_func(b"zb", func=vi.do_scroll_till_bottom)
self._init_func(b"zq", func=vi.do_talk_of_shift_z_shift_q)
self._init_func(b"zt", func=vi.do_scroll_till_top)
funcs[b"{"] = vi.do_paragraph_behind
funcs[b"|"] = vi.do_slip
funcs[b"}"] = vi.do_paragraph_ahead
# funcs[b"~"] = vi.do_flip_char_case
# Define Vi Py Esc Keyboard Input Chords, other than ⌥E ⌥I ⌥N ⌥U,
# found at Keyboard > Use Option as Meta Key = No
# inside macOS Terminal > Preferences > Profiles
vi_optchars_list = r"""
⇧⌥Z⇧⌥Q ⇧⌥Z⇧⌥Z ⇧⌥QVI⌃M
⇧⌥:g/ ⇧⌥:n⇧!⌃M ⇧⌥:n⌃M ⇧⌥:q⇧!⌃M ⇧⌥:q⌃M ⇧⌥:vi⌃M
⇧⌥:w⇧!⌃M ⇧⌥:w⌃M ⇧⌥:wn⇧!⌃M ⇧⌥:wn⌃M ⇧⌥:wq⇧!⌃M ⇧⌥:wq⌃M
⇧⌥$ ⇧⌥^ ⌥0 ⌥F ⇧⌥F ⌥T ⇧⌥T ⌥; ⌥, ⇧⌥| ⌥H ⌥L
⌥W ⌥EE ⌥B ⇧⌥W ⇧⌥E⇧⌥E ⇧⌥B ⇧⌥} ⇧⌥{
⇧⌥G ⇧⌥L ⇧⌥M ⇧⌥H ⇧⌥+ ⇧⌥_ ⌥- ⌥J ⌥K
⌥1 ⌥2 ⌥3 ⌥4 ⌥5 ⌥6 ⌥7 ⌥8 ⌥9
⌥ZT ⌥ZB ⌥Z.
⌥\I ⌥\N ⌥\⇧F
⌥/ ⇧⌥? ⇧⌥* ⇧⌥# ⌥NN ⇧⌥N⇧⌥N
⌥R ⌥A ⌥II ⌥O ⇧⌥R ⇧⌥A ⇧⌥I⇧⌥I ⇧⌥O
⌥X ⇧⌥X ⇧⌥D ⇧⌥J ⌥S ⇧⌥S ⇧⌥C ⌥D ⌥C
""".split()
# ⌥→ ⌥← not solved here
for optchars in vi_optchars_list:
kind0 = optchars.startswith("⌥")
kind1 = optchars.startswith("⇧⌥"), repr(optchars)
kind2 = optchars == "⇧⌥QVI⌃M"
assert kind0 or kind1 or kind2
for optchars in vi_optchars_list:
unichars = TerminalNudgeIn.UNICHARS_BY_OPTCHARS[optchars]
if optchars == "⇧⌥QVI⌃M":
alt_optchords = b"Qvi\r"
elif optchars.startswith("⇧⌥:"):
alt_optchars = optchars
alt_optchars = alt_optchars[len("⇧⌥") :] # keep only the ":"
alt_optchars = alt_optchars.replace("⇧", "")
alt_optchars = alt_optchars.replace("⌃M", "\r")
alt_optchords = alt_optchars.encode()
else:
optchords = self.to_optchords(optchars)
assert optchords[:1] == b"\x1B", repr(optchords) # ESC, ⌃[, 27
alt_optchords = optchords.replace(b"\x1B", b"")
if alt_optchords in (b"ee", b"EE", b"ii", b"II", b"nn", b"NN"):
alt_optchords = alt_optchords[-1:] # TODO: b"uu", b"UU"
self.init_unichars_func(unichars, | |
: {'none', 'deterministic', 'fft', 'fft_tiling', 'winograd', 'guess_once',
'guess_on_shape_change', 'time_once', 'time_on_shape_change'}
Default is the value of :attr:`config.dnn.conv.algo_bwd_data`.
"""
__props__ = ('algo', 'inplace',)
__input_name__ = ('kernel', 'grad', 'output', 'descriptor', 'alpha',
'beta')
def __init__(self, inplace=False, workmem=None, algo=None):
COp.__init__(self, ["dnn_base.c", "dnn_conv_base.c", "dnn_gi.c"],
"APPLY_SPECIFIC(conv_gi)")
if workmem is not None:
warnings.warn(("GpuDnnConvGradI: parameter 'workmem' is "
"deprecated. Use 'algo' instead."), stacklevel=3)
assert algo is None
self.algo = workmem
else:
if algo is None:
algo = config.dnn.conv.algo_bwd_data
self.algo = algo
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [2]}
if version() < (5000, 5000):
if self.algo == 'winograd':
raise RuntimeError("cuDNN's winograd convolution requires "
"cuDNN v5 or more recent")
assert self.algo in ['none', 'deterministic', 'fft', 'fft_tiling',
'winograd', 'guess_once', 'guess_on_shape_change',
'time_once', 'time_on_shape_change']
def __setstate__(self, d):
self.__dict__.update(d)
if not hasattr(self, 'algo'):
if hasattr(self, 'workmem'):
self.algo = self.workmem
else:
self.algo = config.dnn.conv.algo_bwd_data
if not hasattr(self, 'inplace'):
self.inplace = False
self.load_c_code(["dnn_base.c", "dnn_conv_base.c", "dnn_gi.c"])
def grad(self, inp, grads):
kerns, top, output, desc, alpha, beta = inp
img, = grads
img = gpu_contiguous(img)
d_kerns = GpuDnnConvGradW()(img, top, gpu_alloc_empty(*kerns.shape),
desc)
d_top = GpuDnnConv()(img, kerns, gpu_alloc_empty(*top.shape), desc)
d_alpha = grad_not_implemented(self, 4, alpha)
d_beta = grad_not_implemented(self, 5, beta)
return (d_kerns * alpha, d_top * alpha, img * beta,
DisconnectedType()(), d_alpha, d_beta)
def connection_pattern(self, node):
# not connected to desc
return [[1], [1], [1], [0], [1], [1]]
def get_op_params(self):
if self.inplace:
inplace_def = [('CONV_INPLACE', '1')]
else:
inplace_def = []
choose_alg = '0'
choose_alg_once = '0'
choose_alg_time = '0'
if version() == -1 or version() < (3000, 3000):
alg = "0"
else:
if self.algo == 'none':
alg = 'CUDNN_CONVOLUTION_BWD_DATA_ALGO_0'
elif self.algo == 'deterministic':
alg = 'CUDNN_CONVOLUTION_BWD_DATA_ALGO_1'
elif self.algo == 'fft':
# need v3, big workspace
alg = 'CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT'
elif self.algo == 'fft_tiling':
# need v4, big workspace, but less then fft
# need v5, for conv3d.
alg = 'CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING'
elif self.algo == 'winograd':
# need v5
alg = 'CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD'
elif self.algo in ['guess_once', 'guess_on_shape_change']:
# The convolution implementation should be chosen according
# to a heuristic
alg = 'CUDNN_CONVOLUTION_BWD_DATA_ALGO_0'
choose_alg = '1'
if self.algo == 'guess_once':
choose_alg_once = '1'
elif self.algo in ['time_once', 'time_on_shape_change']:
# The convolution implementation should be chosen according
# to timing
alg = 'CUDNN_CONVOLUTION_BWD_DATA_ALGO_0'
choose_alg = '1'
choose_alg_time = '1'
if self.algo == 'time_once':
choose_alg_once = '1'
alg_def = ('CONV_ALGO', alg)
alg_choose_def = ('CHOOSE_ALGO', choose_alg)
alg_choose_once_def = ('CHOOSE_ALGO_ONCE', choose_alg_once)
alg_choose_time_def = ('CHOOSE_ALGO_TIME', choose_alg_time)
return inplace_def + [alg_def, alg_choose_def, alg_choose_once_def,
alg_choose_time_def]
def make_node(self, kern, topgrad, output, desc, alpha=None, beta=None):
kern = as_cuda_ndarray_variable(kern)
topgrad = as_cuda_ndarray_variable(topgrad)
output = as_cuda_ndarray_variable(output)
if kern.type.ndim != 4:
raise TypeError('kern must be 4D tensor')
if topgrad.type.ndim != 4:
raise TypeError('topgrad must be 4D tensor')
if output.type.ndim != 4:
raise TypeError('output must be 4D tensor')
if not isinstance(desc.type, CDataType) \
or desc.type.ctype != 'cudnnConvolutionDescriptor_t':
raise TypeError('desc must be cudnnConvolutionDescriptor_t')
alpha = ensure_float(alpha, _one, 'alpha')
beta = ensure_float(beta, _zero, 'beta')
return Apply(self, [kern, topgrad, output, desc, alpha, beta],
[output.type()])
def infer_shape(self, node, shape):
return [shape[2]]
class GpuDnnConv3dGradI(GpuDnnConvGradI):
"""
The convolution gradient with respect to the inputs.
Parameters
----------
image
kernel
descr
The convolution descriptor
workmem
*deprecated*, use parameter algo instead.
algo : {'none', 'deterministic, 'fft_tiling', 'winograd', 'guess_once',
'guess_on_shape_change', 'time_once', 'time_on_shape_change'}
Default is the value of :attr:`config.dnn.conv.algo_bwd_data`.
"""
__props__ = ('algo', 'inplace',)
__input_name__ = ('kernel', 'grad', 'output', 'descriptor', 'alpha',
'beta')
def __init__(self, inplace=False, workmem=None, algo=None):
if workmem is not None:
warnings.warn(("GpuDnnConv3dGradI: parameter 'workmem' is "
"deprecated. Use 'algo' instead."), stacklevel=3)
assert algo is None
algo = workmem
good_algo = ['none', 'deterministic', 'fft_tiling', 'winograd',
'guess_once', 'guess_on_shape_change', 'time_once',
'time_on_shape_change']
if algo is None and config.dnn.conv.algo_bwd_data not in good_algo:
algo = 'guess_once'
elif algo is not None and algo not in good_algo:
algo = 'guess_once'
super(GpuDnnConv3dGradI, self).__init__(inplace=inplace,
algo=algo)
assert self.algo in good_algo
if version() < (5000, 5000):
if self.algo == 'fft_tiling':
raise RuntimeError("cuDNN 3d tiled-FFT convolution requires "
"cuDNN v5 or more recent")
elif self.algo == 'winograd':
raise RuntimeError("cuDNN 3d winograd convolution requires "
"cuDNN v5 or more recent")
def grad(self, inp, grads):
kerns, top, output, desc, alpha, beta = inp
img, = grads
img = gpu_contiguous(img)
d_kerns = GpuDnnConv3dGradW()(img, top, gpu_alloc_empty(*kerns.shape),
desc)
d_top = GpuDnnConv3d()(img, kerns, gpu_alloc_empty(*top.shape), desc)
d_alpha = grad_not_implemented(self, 4, alpha)
d_beta = grad_not_implemented(self, 5, beta)
return (d_kerns * alpha, d_top * alpha, img * beta,
DisconnectedType()(), d_alpha, d_beta)
def make_node(self, kern, topgrad, output, desc, alpha=None, beta=None):
kern = as_cuda_ndarray_variable(kern)
topgrad = as_cuda_ndarray_variable(topgrad)
output = as_cuda_ndarray_variable(output)
if kern.type.ndim != 5:
raise TypeError('kern must be 5D tensor')
if topgrad.type.ndim != 5:
raise TypeError('topgrad must be 5D tensor')
if output.type.ndim != 5:
raise TypeError('output must be 5D tensor')
if not isinstance(desc.type, CDataType) \
or desc.type.ctype != 'cudnnConvolutionDescriptor_t':
raise TypeError('desc must be cudnnConvolutionDescriptor_t')
alpha = ensure_float(alpha, _one, 'alpha')
beta = ensure_float(beta, _zero, 'beta')
return Apply(self, [kern, topgrad, output, desc, alpha, beta],
[output.type()])
def dnn_conv(img, kerns, border_mode='valid', subsample=(1, 1),
conv_mode='conv', direction_hint=None, workmem=None, algo=None,
precision=None):
"""
GPU convolution using cuDNN from NVIDIA.
The memory layout to use is 'bc01', that is 'batch', 'channel',
'first dim', 'second dim' in that order.
Parameters
----------
img
Images to do the convolution over.
kerns
Convolution filters.
border_mode
One of 'valid', 'full', 'half'; additionally, the padding size can be
directly specified by an integer or a pair of integers (as a tuple),
specifying the amount of zero padding added to _both_ the top and
bottom (first entry) and left and right (second entry) sides of
the image.
subsample
Perform subsampling of the output (default: (1, 1)).
conv_mode
Perform convolution (kernels flipped) or cross-correlation.
One of 'conv', 'cross' (default: 'conv').
direction_hint
Used by graph optimizers to change algorithm choice.
By default, GpuDnnConv will be used to carry out the convolution.
If border_mode is 'valid', subsample is (1,1) and direction_hint is
'bprop weights', it will use GpuDnnConvGradW.
If border_mode is 'full', subsample is (1,1) and direction_hint is
'bprop inputs', it will use GpuDnnConvGradI.
This parameter is used internally by graph optimizers and may be
removed at any time without a deprecation period. You have been warned.
workmem
*deprecated*, use parameter algo instead.
algo : {'none', 'small', 'large', 'fft', 'guess_once', 'guess_on_shape_change', 'time_once', 'time_on_shape_change'}
Convolution implementation to use. Some of its values may require certain
versions of cuDNN to be installed. Default is the value of
:attr:`config.dnn.conv.algo_fwd`.
precision : {'as_input_f32', 'as_input', 'float16', 'float32', 'float64'}
Description of the dtype in which the computation of the convolution
should be done. Possible values are 'as_input', 'float16', 'float32'
and 'float64'. Default is the value of
:attr:`config.dnn.conv.precision`.
"""
# For consistence, when using direction_hint too.
if border_mode == (0, 0):
border_mode = 'valid'
# Establish dtype in which to perform the computation of the convolution
if precision is None:
precision = theano.config.dnn.conv.precision
if precision == 'as_input' or precision == 'as_input_f32':
nprec = theano.scalar.upcast(img.dtype, kerns.dtype)
if nprec == 'float16' and precision == 'as_input_f32':
precision = 'float32'
else:
precision = nprec
# Check if deprecated param 'workmem' is used
if workmem is not None:
warnings.warn(("dnn_conv: parameter 'workmem' is deprecated. Use "
"'algo' instead."), stacklevel=3)
assert algo is None
algo = workmem
# Ensure the value of direction_hint is supported
assert direction_hint in [None, 'bprop weights', 'bprop inputs', 'forward']
fgraph = getattr(img, 'fgraph', None) or getattr(kerns, 'fgraph', None)
if (border_mode == 'valid' and subsample == (1, 1) and
direction_hint == 'bprop weights'):
# Special case: We are asked to use GpuDnnConvGradW. We need to set
# up a suitable 'fake' convolution to compute the gradient for.
img = gpu_contiguous(img.dimshuffle(1, 0, 2, 3))
if conv_mode == 'conv':
# We need to flip manually. These 'kerns' are not the kernels
# that would be flipped by conv_mode='conv' in GpuDnnConvGradW.
kerns = kerns[:, :, ::-1, ::-1]
kerns = gpu_contiguous(kerns.dimshuffle(1, 0, 2, 3))
out_shp = (shape_i(kerns, 1, fgraph),
shape_i(img, 1, fgraph),
shape_i(img, 2, fgraph) - shape_i(kerns, 2, fgraph) + 1,
shape_i(img, 3, fgraph) - shape_i(kerns, 3, fgraph) + 1)
out_shp = assert_conv_shape(out_shp)
out = gpu_alloc_empty(*out_shp)
desc = GpuDnnConvDesc(border_mode='valid', subsample=(1, 1),
conv_mode='cross', precision=precision)(img.shape,
out.shape)
conv = GpuDnnConvGradW()(img, kerns, out, desc)
return as_cuda_ndarray_variable(conv.dimshuffle(1, 0, 2, | |
import numpy as np
from .hetero_likelihoods import GaussianHeteroNoise, Gaussian
import gpflow
from gpflow.param import Param
from gpflow import transforms
from gpflow.model import Model
from gpflow.mean_functions import Zero
import tensorflow as tf
from gpflow.param import AutoFlow, DataHolder, ParamList
from gpflow._settings import settings
float_type = settings.dtypes.float_type
class GPModelAdaptiveNoiseLengthscaleMultDim(Model):
"""
A base class for adaptive GP (non-stationary lengthscale and signal variance)
regression models with heteroscedastic noise,
wherein, noise is represented by a latent GP N(.)
"""
def __init__(self, X, Y, kern, nonstat, noisekern, name='adaptive_noise_lengthscale_gp_multdim'):
Model.__init__(self, name)
self.kern_type = kern
self.nonstat = nonstat
self.noisekern = noisekern
self.likelihood = GaussianHeteroNoise()
if isinstance(X, np.ndarray):
#: X is a data matrix; each row represents one instance
X = DataHolder(X)
if isinstance(Y, np.ndarray):
#: Y is a data matrix; rows correspond to the rows in X, columns are treated independently
Y = DataHolder(Y)
self.likelihood._check_targets(Y.value)
self.X, self.Y = X, Y
self._session = None
@AutoFlow((float_type, [None, None]))
def predict_l(self, Xnew):
"""
Compute the mean and variance of the latent function(s)
at the points `Xnew`.
"""
return self.build_predict_l(Xnew)
@AutoFlow((float_type, [None, None]))
def predict_n(self, Xnew):
"""
Compute the mean and variance of the latent function(s)
at the points `Xnew`.
"""
return self.build_predict_n(Xnew)
@AutoFlow((float_type, [None, None]))
def predict(self, Xnew):
"""
Compute the mean and variance of the latent function(s)
at the points `Xnew`.
"""
return self.build_predict_f(Xnew)
@AutoFlow((float_type, [None, None]), (float_type, [None, None]))
def pred_cov(self, X1, X2):
"""
Compute the posterior covariance matrix b/w X1 and X2.
"""
return self.build_pred_cov_f(X1, X2)
@AutoFlow((float_type, [None, None]), (tf.int32, []))
def posterior_samples_n(self, Xnew, num_samples):
"""
Produce samples from the posterior latent function(s) at the points
Xnew.
"""
mu, var = self.build_predict_f(Xnew, full_cov=True)
jitter = tf.eye(tf.shape(mu)[0], dtype=float_type) * settings.numerics.jitter_level
mu_n, var_n = self.build_predict_n(Xnew)
mu_n = tf.square(tf.exp(mu_n))
A = var[:, :] + jitter
B = tf.multiply(mu_n, tf.eye(tf.shape(mu_n)[0], dtype=float_type))
L = tf.cholesky(A + B)
shape = tf.stack([tf.shape(L)[0], num_samples])
V = tf.random_normal(shape, dtype=settings.dtypes.float_type)
samples = mu[:, ] + tf.matmul(L, V)
return tf.transpose(samples)
@AutoFlow((float_type, [None, None]), (tf.int32, []))
def posterior_samples(self, Xnew, num_samples):
"""
Produce samples from the posterior latent function(s) at the points
Xnew.
"""
mu, var = self.build_predict_f(Xnew, full_cov=True)
jitter = tf.eye(tf.shape(mu)[0], dtype=float_type) * settings.numerics.jitter_level
L = tf.cholesky(var[:, :] + jitter)
shape = tf.stack([tf.shape(L)[0], num_samples])
V = tf.random_normal(shape, dtype=settings.dtypes.float_type)
samples = mu[:, ] + tf.matmul(L, V)
return tf.transpose(samples)
class GPModelAdaptiveLengthscaleMultDim(Model):
"""
A base class for adaptive GP (non-stationary lengthscale and signal variance)
regression models with heteroscedastic noise,
wherein, noise is represented by a latent GP N(.)
"""
def __init__(self, X, Y, kern, nonstat, mean_func, name='adaptive_lengthscale_gp_multdim'):
Model.__init__(self, name)
self.kern_type = kern
self.nonstat = nonstat
self.mean_func = mean_func
self.likelihood = Gaussian()
if isinstance(X, np.ndarray):
#: X is a data matrix; each row represents one instance
X = DataHolder(X)
if isinstance(Y, np.ndarray):
#: Y is a data matrix; rows correspond to the rows in X, columns are treated independently
Y = DataHolder(Y)
self.likelihood._check_targets(Y.value)
self.X, self.Y = X, Y
self._session = None
@AutoFlow((float_type, [None, None]))
def predict_l(self, Xnew):
"""
Compute the mean and variance of the latent function(s)
at the points `Xnew`.
"""
return self.build_predict_l(Xnew)
@AutoFlow((float_type, [None, None]))
def predict(self, Xnew):
"""
Compute the mean and variance of the latent function(s)
at the points `Xnew`.
"""
return self.build_predict_f(Xnew)
@AutoFlow((float_type, [None, None]), (float_type, [None, None]))
def pred_cov(self, X1, X2):
"""
Compute the posterior covariance matrix b/w X1 and X2.
"""
return self.build_pred_cov_f(X1, X2)
@AutoFlow((float_type, [None, None]), (tf.int32, []))
def posterior_samples(self, Xnew, num_samples):
"""
Produce samples from the posterior latent function(s) at the points
Xnew.
"""
mu, var = self.build_predict_f(Xnew, full_cov=True)
jitter = tf.eye(tf.shape(mu)[0], dtype=float_type) * settings.numerics.jitter_level
L = tf.cholesky(var[:, :] + jitter)
shape = tf.stack([tf.shape(L)[0], num_samples])
V = tf.random_normal(shape, dtype=settings.dtypes.float_type)
samples = mu[:, ] + tf.matmul(L, V)
return tf.transpose(samples)
# Need to define a ParamList for listing all the parameters associated with lengthscale gps for each dimension
# A list of parameters.
#This allows us to store parameters in a list whilst making them 'visible' to the gpflow machinery. The correct usage is
# >>> my_list = gpflow.param.ParamList([Param1, Param2])
class GPModelAdaptiveLengthscaleMultDimDev(Model):
"""
A base class for adaptive GP (non-stationary lengthscale and signal variance)
regression models with heteroscedastic noise,
wherein, noise is represented by a latent GP N(.)
"""
def __init__(self, X, Y, kerns_list, nonstat, mean_funcs_list, name='adaptive_lengthscale_gp_multdim'):
Model.__init__(self, name)
# Introducing Paramlist to define kernels and mean functions for lengthscale gps associated with each dimensions
self.kerns_list = ParamList(kerns_list)
self.nonstat = nonstat
self.mean_funcs_list = ParamList(mean_funcs_list)
self.likelihood = Gaussian()
self.num_data = X.shape[0]
self.num_latent = Y.shape[1]
self.num_feat = X.shape[1]
if isinstance(X, np.ndarray):
#: X is a data matrix; each row represents one instance
X = DataHolder(X)
if isinstance(Y, np.ndarray):
#: Y is a data matrix; rows correspond to the rows in X, columns are treated independently
Y = DataHolder(Y)
# Sanity checks : making sure we have defined kernels and mean functions for each lengthscale gp
if len(self.kerns_list) != self.num_feat:
raise ValueError('kernels defined for each lengthscale gps != number of features')
if len(self.mean_funcs_list) != self.num_feat:
raise ValueError('mean functions defined for each lengthscale gps != number of features')
self.likelihood._check_targets(Y.value)
self.X, self.Y = X, Y
self._session = None
@AutoFlow((float_type, [None, None]))
def predict_l(self, Xnew):
"""
Compute the mean and variance of the latent function(s)
at the points `Xnew`.
"""
return self.build_predict_l(Xnew)
@AutoFlow((float_type, [None, None]))
def predict(self, Xnew):
"""
Compute the mean and variance of the latent function(s)
at the points `Xnew`.
"""
return self.build_predict_f(Xnew)
@AutoFlow((float_type, [None, None]), (float_type, [None, None]))
def pred_cov(self, X1, X2):
"""
Compute the posterior covariance matrix b/w X1 and X2.
"""
return self.build_pred_cov_f(X1, X2)
@AutoFlow((float_type, [None, None]), (tf.int32, []))
def posterior_samples(self, Xnew, num_samples):
"""
Produce samples from the posterior latent function(s) at the points
Xnew.
"""
mu, var = self.build_predict_f(Xnew, full_cov=True)
jitter = tf.eye(tf.shape(mu)[0], dtype=float_type) * settings.numerics.jitter_level
L = tf.cholesky(var[:, :] + jitter)
shape = tf.stack([tf.shape(L)[0], num_samples])
V = tf.random_normal(shape, dtype=settings.dtypes.float_type)
samples = mu[:, ] + tf.matmul(L, V)
return tf.transpose(samples)
class GPModelAdaptiveLengthscaleMultDimEllSSDev(Model):
"""
A base class for adaptive GP (non-stationary lengthscale and signal variance)
regression models with input-dependent signal strength and lengthscale.
"""
def __init__(self, X, Y, kerns_ell_list, kerns_ss_list, mean_funcs_ell_list, mean_funcs_ss_list, nonstat, name='adaptive_lengthscale_gp_multdim_ell_ss'):
Model.__init__(self, name)
# Introducing Paramlist to define kernels and mean functions for lengthscale gps associated with each dimensions
self.kerns_ell_list = ParamList(kerns_ell_list)
self.kerns_ss_list = ParamList(kerns_ss_list)
self.nonstat = nonstat
self.mean_funcs_ell_list = ParamList(mean_funcs_ell_list)
self.mean_funcs_ss_list = ParamList(mean_funcs_ss_list)
self.likelihood = Gaussian()
self.num_data = X.shape[0]
self.num_latent = Y.shape[1]
self.num_feat = X.shape[1]
if isinstance(X, np.ndarray):
#: X is a data matrix; each row represents one instance
X = DataHolder(X)
if isinstance(Y, np.ndarray):
#: Y is a data matrix; rows correspond to the rows in X, columns are treated independently
Y = DataHolder(Y)
# Sanity checks : making sure we have defined kernels and mean functions for each lengthscale gp
if len(self.kerns_ell_list) != self.num_feat:
raise ValueError('kernels defined for each lengthscale gps != number of features')
if len(self.kerns_ss_list) != self.num_feat:
raise ValueError('kernels defined for each signal-strength gps != number of features')
if len(self.mean_funcs_ell_list) != self.num_feat:
raise ValueError('mean functions defined for each lengthscale gps != number of features')
if len(self.mean_funcs_ss_list) != self.num_feat:
raise ValueError('mean functions defined for each signal-strength gps != number of features')
self.likelihood._check_targets(Y.value)
self.X, self.Y = X, Y
self._session = None
@AutoFlow((float_type, [None, None]))
def predict_l(self, Xnew):
"""
Compute the mean and variance of the latent function(s)
at the points `Xnew`.
"""
return self.build_predict_l(Xnew)
@AutoFlow((float_type, [None, None]))
def predict_s(self, Xnew):
"""
Compute the mean and variance of the latent function(s)
at the points `Xnew`.
"""
return self.build_predict_s(Xnew)
@AutoFlow((float_type, [None, None]))
def predict(self, Xnew):
"""
Compute the mean and variance of the latent function(s)
at the points `Xnew`.
"""
return self.build_predict_f(Xnew)
@AutoFlow((float_type, [None, None]), (float_type, [None, None]))
def pred_cov(self, X1, X2):
"""
Compute the posterior covariance matrix b/w ```X1``` and ```X2```.
"""
return self.build_pred_cov_f(X1, X2)
@AutoFlow((float_type, [None, None]), (tf.int32, []))
def posterior_samples(self, Xnew, num_samples):
"""
Produce samples from the posterior latent function(s) at the points
```Xnew```.
"""
mu, var = self.build_predict_f(Xnew, full_cov=True)
jitter = tf.eye(tf.shape(mu)[0], dtype=float_type) * settings.numerics.jitter_level
L = tf.cholesky(var[:, :] + jitter)
shape = tf.stack([tf.shape(L)[0], num_samples])
V = | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os, sys, subprocess, bs4,signal, urllib.request, urllib.error, urllib.parse, json,socket
import requests
import re
import json
import sys
import urllib3
import core
import http.client
import socket
from time import sleep
import os as sistema
import readline, rlcompleter
from urllib.parse import quote
from socket import timeout
from urllib.request import urlopen
from urllib.request import Request
from sys import argv
from subprocess import *
import _py_
from core import help
from terminaltables import DoubleTable
from tabulate import tabulate
import importlib
R = '\033[31m' # Red
N = '\033[1;37m' # White
G = '\033[32m' # Green
O = '\033[0;33m' # Orange
B = '\033[1;34m' #Blue
E = '\033[0m' # End
def clean():
os.system("clear")
def enumiax():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/enumiax "+N+"): "))
if cs == 'show options':
help.option()
enumiax()
elif cs == 'set target':
ip=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/enumiax "+G+"(set target)"+G+"): "))
print(("target>>",ip))
wor=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/enumiax "+G+"(set wordlist)"+G+"): "))
runn =eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/enumiax "+G+"): "))
if runn == 'run':
print('')
os.system('enumiax -d %s %s' %(wor,ip))
enumiax()
elif cs =='back':
core.menu.scan()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs =='clear':
clean()
enumiax()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
enumiax()
pass
def load_balancing():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/load_balancing "+N+"): "))
if cs == 'show options':
help.option()
load_balancing()
elif cs == 'set target':
ip=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/load_balancing "+G+"(set target)"+G+"): "))
print(("target>>",ip))
runn =eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/load_balancing "+G+"): "))
if runn == 'run':
print('')
os.system('lbd %s' %(ip))
load_balancing()
elif cs =='back':
core.menu.scan()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs =='clear':
clean()
load_balancing()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
load_balancing()
pass
def port_check():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/port_check "+N+"): "))
if cs == 'show options':
help.option()
port_check()
elif cs == 'set target':
ip=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/port_check "+G+"(set ip)"+G+"): "))
print(("target>>",ip))
runn =eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/port_check "+G+"): "))
if runn == 'run':
print((""+G+""))
os.system('nc -vnzw1 %s 1-6000' % (ip))
port_check()
elif cs =='back':
core.menu.scan()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs =='clear':
clean()
port_check()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
port_check()
pass
def botnet_scanning():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/botnet_scanning "+N+"): "))
if cs == 'show options':
help.option()
botnet_scanning()
elif cs == 'set target':
ip=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/botnet_scanning "+G+"(set IP Zombie)"+G+"): "))
print(("Zombie",ip))
tar=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/botnet_scanning "+G+"(set target)"+G+"): "))
print(("target",tar))
runn =eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/botnet_scanning "+G+"): "))
if runn == 'run':
print('')
os.system('nmap -sI %s %s' %(ip,tar))
botnet_scanning()
elif cs =='back':
core.menu.scan()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs =='clear':
clean()
botnet_scanning()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
botnet_scanning()
pass
def ssl_check():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/check_ssl_certificate "+N+"): "))
if cs == 'show options':
help.option()
sslscan()
elif cs == 'set target':
ip=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/check_ssl_certificate "+G+"(set target)"+G+"): "))
runn =eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/check_ssl_certificate "+G+"): "))
if runn == 'run':
print('')
os.system('sslscan --show-certificate %s' %(ip))
sslscan()
elif cs =='back':
core.menu.scan()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs =='clear':
clean()
sslscan()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
sslscan()
pass
def ssl_cert():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/ssl_cert "+N+"): "))
if cs == 'show options':
help.option()
ssl_cert()
elif cs == 'set target':
ip=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/ssl_cert "+G+"(set target)"+G+"): "))
runn =eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/ssl_cert "+G+"): "))
if runn == 'run':
scan = os.popen("nmap -sV --script ssl-cert "+ ip + "" ).read()
save = open('log/ssl_log.txt','w')
save.write(scan)
save.close()
vuln = os.popen("cat log/ssl_log.txt " ).read()
table= [['scan result'],[vuln]]
print((tabulate(table,tablefmt="fancy_grid",headers="firstrow")))
ssl_cert()
elif cs =='back':
core.menu.scan()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs =='clear':
clean()
ssl_cert()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
ssl_cert()
pass
def sslscan():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/sslscan "+N+"): "))
if cs == 'show options':
help.option()
sslscan()
elif cs == 'set target':
ip=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/sslscan "+G+"(set target)"+G+"): "))
runn =eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/sslscan "+G+"): "))
if runn == 'run':
print('')
os.system('sslscan %s' %(ip))
sslscan()
elif cs =='back':
core.menu.scan()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs =='clear':
clean()
sslscan()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
sslscan()
pass
def zone_walking():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/zone_walking "+N+"): "))
if cs == 'show options':
help.option()
zone_walking()
elif cs == 'set target':
ip=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/zone_walking "+G+"(set target)"+G+"): "))
runn =eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/zone_walking "+G+"): "))
if runn == 'run':
print('')
os.system('dnsrecon -d %s -t zonewalk' %(ip))
zone_walking()
elif cs =='back':
core.menu.scan()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs =='clear':
clean()
zone_walking()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
zone_walking()
pass
def dns_bruteforce():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/dns_bruteforce "+N+"): "))
if cs == 'show options':
help.option()
dns_bruteforce()
elif cs == 'set target':
ip=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/dns_bruteforce "+G+"(set target)"+G+"): "))
wor=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/dns_bruteforce "+G+"(set wordlist)"+G+"): "))
runn =eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/dns_bruteforce "+G+"): "))
if runn == 'run':
print('')
os.system('dnsrecon -d %s -D %s -t brt' %(ip,wor))
dns_bruteforce()
elif cs =='back':
core.menu.scan()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs =='clear':
clean()
dns_bruteforce()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
dns_bruteforce()
pass
def dns_zone_transfer():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/dns_zone_transfer "+N+"): "))
if cs == 'show options':
help.option()
dns_zone_transfer()
elif cs == 'set target':
ip=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/dns_zone_transfer "+G+"(set target)"+G+"): "))
runn =eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/dns_zone_transfer "+G+"): "))
if runn == 'run':
print('')
os.system('dnsrecon -d %s -a' %(ip))
os.system('dnsrecon -d %s -t axfr' % (ip))
dns_zone_transfer()
elif cs =='back':
core.menu.scan()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs =='clear':
clean()
dns_zone_transfer()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
dns_zone_transfer()
pass
def dnsrecon():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/dnsrecon "+N+"): "))
if cs == 'show options':
help.option()
dnsrecon()
elif cs == 'set target':
ip=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/dnsrecon "+G+"(set target)"+G+"): "))
runn =eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/dnsrecon "+G+"): "))
if runn == 'run':
print('')
os.system('dnsrecon -d %s' %(ip))
dnsrecon()
elif cs =='back':
core.menu.scan()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs =='clear':
clean()
dnsrecon()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
dnsrecon()
pass
def dns_reverse():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/reverse_dns "+N+"): "))
if cs == 'show options':
help.option()
dns_reverse()
elif cs == 'set target':
ip=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/reverse_dns "+G+"(set target)"+G+"): "))
scan = os.popen("dig -x "+ ip + "" ).read()
save = open('log/log.txt','w')
save.write(scan)
save.close()
vuln = os.popen("cat log/log.txt " ).read()
table= [['scan result'],[vuln]]
print((tabulate(table,tablefmt="fancy_grid",headers="firstrow")))
dns_reverse()
elif cs =='back':
core.menu.scan()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs =='clear':
clean()
dns_reverse()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
dns_reverse()
pass
def iploc():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/ip_locator "+N+"): "))
if cs == 'show options':
help.option()
iploc()
elif cs == 'set target':
ip=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/ip_locator "+G+"(set domain)"+N+"): "))
url = "http://ip-api.com/json/"
reponse = urllib.request.urlopen(url + ip)
name = reponse.read()
labs = json.loads(name)
print(("\033[92m" + "\n IP: " + labs['query']))
print(("\033[92m" + " Status: " + labs['status']))
print(("\033[92m" + " Region: " + labs['regionName']))
print(("\033[92m" + " Country: " + labs['country']))
print(("\033[92m" + " City: " + labs['city']))
print(("\033[92m" + " ISP: " + labs['isp']))
print(("\033[92m" + " Lat,Lon: " + str(labs['lat']) + "," + str(labs['lon'])))
print(("\033[92m" + " ZIPCODE: " + labs['zip']))
print(("\033[92m" + " TimeZone: " + labs['timezone']))
print(("\033[92m" + " AS: " + labs['as'] + "\n"))
iploc()
elif cs =='back':
core.menu.scan()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs =='clear':
clean()
iploc()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
iploc()
pass
def who():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/whois "+N+"): "))
if cs == 'show options':
help.option()
who()
elif cs == 'set target':
ip=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/whois "+G+"(set ip)"+G+"): "))
scan = os.popen("whois "+ ip + "" ).read()
save = open('log/log.txt','w')
save.write(scan)
save.close()
vuln = os.popen("cat log/log.txt " ).read()
table= [['scan result'],[vuln]]
print((tabulate(table,tablefmt="fancy_grid",headers="firstrow")))
who()
elif cs =='back':
core.menu.scan()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs =='clear':
clean()
who()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
who()
pass
def xss():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/xss_scaner "+N+"): "))
if cs == 'show options':
help.option()
xss()
elif cs == 'set target':
tops | |
then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V2alpha1CronJobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_scheduled_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_scheduled_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_scheduled_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind ScheduledJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_scheduled_job_for_all_namespaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V2alpha1CronJobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['field_selector', 'include_uninitialized', 'label_selector', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_scheduled_job_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/apis/batch/v2alpha1/scheduledjobs'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'include_uninitialized' in params:
query_params['includeUninitialized'] = params['include_uninitialized']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
if 'watch' in params:
query_params['watch'] = params['watch']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJobList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_cron_job(self, name, namespace, body, **kwargs):
"""
partially update the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_namespaced_cron_job(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_cron_job_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_namespaced_cron_job_with_http_info(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_cron_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_cron_job`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_cron_job`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_cron_job`")
collection_formats = {}
resource_path = '/apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V2alpha1CronJob',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_cron_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_namespaced_cron_job_status(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.patch_namespaced_cron_job_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_cron_job_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_cron_job_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_namespaced_cron_job_status_with_http_info(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V2alpha1CronJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_cron_job_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_cron_job_status`")
# verify the required parameter 'namespace' is | |
info overloaded class method 2.
Args:
exp_stack: The expected call stack
capsys: Pytest fixture that captures output
"""
exp_caller_info = CallerInfo(mod_name='test_diag_msg.py',
cls_name='ClassGetCallerInfo2',
func_name='get_caller_info_c2bt',
line_num=7479)
exp_stack.append(exp_caller_info)
update_stack(exp_stack=exp_stack, line_num=7541, add=0)
for i, expected_caller_info in enumerate(list(reversed(exp_stack))):
try:
frame = _getframe(i)
caller_info = get_caller_info(frame)
finally:
del frame
assert caller_info == expected_caller_info
# test call sequence
update_stack(exp_stack=exp_stack, line_num=7548, add=0)
call_seq = get_formatted_call_sequence(depth=len(exp_stack))
assert call_seq == get_exp_seq(exp_stack=exp_stack)
if capsys: # if capsys, test diag_msg
update_stack(exp_stack=exp_stack, line_num=7555, add=0)
before_time = datetime.now()
diag_msg('message 1', 1, depth=len(exp_stack))
after_time = datetime.now()
diag_msg_args = TestDiagMsg.get_diag_msg_args(
depth_arg=len(exp_stack),
msg_arg=['message 1', 1])
verify_diag_msg(exp_stack=exp_stack,
before_time=before_time,
after_time=after_time,
capsys=capsys,
diag_msg_args=diag_msg_args)
# call module level function
update_stack(exp_stack=exp_stack, line_num=7570, add=0)
func_get_caller_info_3(exp_stack=exp_stack, capsys=capsys)
# call method
cls_get_caller_info3 = ClassGetCallerInfo3()
update_stack(exp_stack=exp_stack, line_num=7575, add=1)
cls_get_caller_info3.get_caller_info_m3(exp_stack=exp_stack,
capsys=capsys)
# call static method
update_stack(exp_stack=exp_stack, line_num=7580, add=1)
cls_get_caller_info3.get_caller_info_s3(exp_stack=exp_stack,
capsys=capsys)
# call class method
update_stack(exp_stack=exp_stack, line_num=7585, add=1)
ClassGetCallerInfo3.get_caller_info_c3(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class method
update_stack(exp_stack=exp_stack, line_num=7590, add=1)
cls_get_caller_info3.get_caller_info_m3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class static method
update_stack(exp_stack=exp_stack, line_num=7595, add=1)
cls_get_caller_info3.get_caller_info_s3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class class method
update_stack(exp_stack=exp_stack, line_num=7600, add=1)
ClassGetCallerInfo3.get_caller_info_c3bo(exp_stack=exp_stack,
capsys=capsys)
# call subclass method
cls_get_caller_info3s = ClassGetCallerInfo3S()
update_stack(exp_stack=exp_stack, line_num=7606, add=1)
cls_get_caller_info3s.get_caller_info_m3s(exp_stack=exp_stack,
capsys=capsys)
# call subclass static method
update_stack(exp_stack=exp_stack, line_num=7611, add=1)
cls_get_caller_info3s.get_caller_info_s3s(exp_stack=exp_stack,
capsys=capsys)
# call subclass class method
update_stack(exp_stack=exp_stack, line_num=7616, add=1)
ClassGetCallerInfo3S.get_caller_info_c3s(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass method
update_stack(exp_stack=exp_stack, line_num=7621, add=1)
cls_get_caller_info3s.get_caller_info_m3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass static method
update_stack(exp_stack=exp_stack, line_num=7626, add=1)
cls_get_caller_info3s.get_caller_info_s3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass class method
update_stack(exp_stack=exp_stack, line_num=7631, add=1)
ClassGetCallerInfo3S.get_caller_info_c3bo(exp_stack=exp_stack,
capsys=capsys)
# call base method from subclass method
update_stack(exp_stack=exp_stack, line_num=7636, add=1)
cls_get_caller_info3s.get_caller_info_m3sb(exp_stack=exp_stack,
capsys=capsys)
# call base static method from subclass static method
update_stack(exp_stack=exp_stack, line_num=7641, add=1)
cls_get_caller_info3s.get_caller_info_s3sb(exp_stack=exp_stack,
capsys=capsys)
# call base class method from subclass class method
update_stack(exp_stack=exp_stack, line_num=7646, add=1)
ClassGetCallerInfo3S.get_caller_info_c3sb(exp_stack=exp_stack,
capsys=capsys)
exp_stack.pop()
###############################################################################
# Class 2S
###############################################################################
class ClassGetCallerInfo2S(ClassGetCallerInfo2):
"""Subclass to get caller info2."""
def __init__(self) -> None:
"""The initialization for subclass 2."""
super().__init__()
self.var2 = 2
###########################################################################
# Class 2S Method 1
###########################################################################
def get_caller_info_m2s(self,
exp_stack: Deque[CallerInfo],
capsys: Optional[Any]) -> None:
"""Get caller info method 2.
Args:
exp_stack: The expected call stack
capsys: Pytest fixture that captures output
"""
self.var1 += 1
exp_caller_info = CallerInfo(mod_name='test_diag_msg.py',
cls_name='ClassGetCallerInfo2S',
func_name='get_caller_info_m2s',
line_num=7622)
exp_stack.append(exp_caller_info)
update_stack(exp_stack=exp_stack, line_num=7685, add=0)
for i, expected_caller_info in enumerate(list(reversed(exp_stack))):
try:
frame = _getframe(i)
caller_info = get_caller_info(frame)
finally:
del frame
assert caller_info == expected_caller_info
# test call sequence
update_stack(exp_stack=exp_stack, line_num=7692, add=0)
call_seq = get_formatted_call_sequence(depth=len(exp_stack))
assert call_seq == get_exp_seq(exp_stack=exp_stack)
if capsys: # if capsys, test diag_msg
update_stack(exp_stack=exp_stack, line_num=7699, add=0)
before_time = datetime.now()
diag_msg('message 1', 1, depth=len(exp_stack))
after_time = datetime.now()
diag_msg_args = TestDiagMsg.get_diag_msg_args(
depth_arg=len(exp_stack),
msg_arg=['message 1', 1])
verify_diag_msg(exp_stack=exp_stack,
before_time=before_time,
after_time=after_time,
capsys=capsys,
diag_msg_args=diag_msg_args)
# call module level function
update_stack(exp_stack=exp_stack, line_num=7714, add=0)
func_get_caller_info_3(exp_stack=exp_stack, capsys=capsys)
# call method
cls_get_caller_info3 = ClassGetCallerInfo3()
update_stack(exp_stack=exp_stack, line_num=7719, add=1)
cls_get_caller_info3.get_caller_info_m3(exp_stack=exp_stack,
capsys=capsys)
# call static method
update_stack(exp_stack=exp_stack, line_num=7724, add=1)
cls_get_caller_info3.get_caller_info_s3(exp_stack=exp_stack,
capsys=capsys)
# call class method
update_stack(exp_stack=exp_stack, line_num=7729, add=1)
ClassGetCallerInfo3.get_caller_info_c3(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class method
update_stack(exp_stack=exp_stack, line_num=7734, add=1)
cls_get_caller_info3.get_caller_info_m3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class static method
update_stack(exp_stack=exp_stack, line_num=7739, add=1)
cls_get_caller_info3.get_caller_info_s3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class class method
update_stack(exp_stack=exp_stack, line_num=7744, add=1)
ClassGetCallerInfo3.get_caller_info_c3bo(exp_stack=exp_stack,
capsys=capsys)
# call subclass method
cls_get_caller_info3s = ClassGetCallerInfo3S()
update_stack(exp_stack=exp_stack, line_num=7750, add=1)
cls_get_caller_info3s.get_caller_info_m3s(exp_stack=exp_stack,
capsys=capsys)
# call subclass static method
update_stack(exp_stack=exp_stack, line_num=7755, add=1)
cls_get_caller_info3s.get_caller_info_s3s(exp_stack=exp_stack,
capsys=capsys)
# call subclass class method
update_stack(exp_stack=exp_stack, line_num=7760, add=1)
ClassGetCallerInfo3S.get_caller_info_c3s(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass method
update_stack(exp_stack=exp_stack, line_num=7765, add=1)
cls_get_caller_info3s.get_caller_info_m3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass static method
update_stack(exp_stack=exp_stack, line_num=7770, add=1)
cls_get_caller_info3s.get_caller_info_s3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass class method
update_stack(exp_stack=exp_stack, line_num=7775, add=1)
ClassGetCallerInfo3S.get_caller_info_c3bo(exp_stack=exp_stack,
capsys=capsys)
# call base method from subclass method
update_stack(exp_stack=exp_stack, line_num=7780, add=1)
cls_get_caller_info3s.get_caller_info_m3sb(exp_stack=exp_stack,
capsys=capsys)
# call base static method from subclass static method
update_stack(exp_stack=exp_stack, line_num=7785, add=1)
cls_get_caller_info3s.get_caller_info_s3sb(exp_stack=exp_stack,
capsys=capsys)
# call base class method from subclass class method
update_stack(exp_stack=exp_stack, line_num=7790, add=1)
ClassGetCallerInfo3S.get_caller_info_c3sb(exp_stack=exp_stack,
capsys=capsys)
exp_stack.pop()
###########################################################################
# Class 2S Method 2
###########################################################################
@staticmethod
def get_caller_info_s2s(exp_stack: Deque[CallerInfo],
capsys: Optional[Any]) -> None:
"""Get caller info static method 2.
Args:
exp_stack: The expected call stack
capsys: Pytest fixture that captures output
"""
exp_caller_info = CallerInfo(mod_name='test_diag_msg.py',
cls_name='ClassGetCallerInfo2S',
func_name='get_caller_info_s2s',
line_num=7753)
exp_stack.append(exp_caller_info)
update_stack(exp_stack=exp_stack, line_num=7817, add=0)
for i, expected_caller_info in enumerate(list(reversed(exp_stack))):
try:
frame = _getframe(i)
caller_info = get_caller_info(frame)
finally:
del frame
assert caller_info == expected_caller_info
# test call sequence
update_stack(exp_stack=exp_stack, line_num=7824, add=0)
call_seq = get_formatted_call_sequence(depth=len(exp_stack))
assert call_seq == get_exp_seq(exp_stack=exp_stack)
if capsys: # if capsys, test diag_msg
update_stack(exp_stack=exp_stack, line_num=7831, add=0)
before_time = datetime.now()
diag_msg('message 1', 1, depth=len(exp_stack))
after_time = datetime.now()
diag_msg_args = TestDiagMsg.get_diag_msg_args(
depth_arg=len(exp_stack),
msg_arg=['message 1', 1])
verify_diag_msg(exp_stack=exp_stack,
before_time=before_time,
after_time=after_time,
capsys=capsys,
diag_msg_args=diag_msg_args)
# call module level function
update_stack(exp_stack=exp_stack, line_num=7846, add=0)
func_get_caller_info_3(exp_stack=exp_stack, capsys=capsys)
# call method
cls_get_caller_info3 = ClassGetCallerInfo3()
update_stack(exp_stack=exp_stack, line_num=7851, add=1)
cls_get_caller_info3.get_caller_info_m3(exp_stack=exp_stack,
capsys=capsys)
# call static method
update_stack(exp_stack=exp_stack, line_num=7856, add=1)
cls_get_caller_info3.get_caller_info_s3(exp_stack=exp_stack,
capsys=capsys)
# call class method
update_stack(exp_stack=exp_stack, line_num=7861, add=1)
ClassGetCallerInfo3.get_caller_info_c3(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class method
update_stack(exp_stack=exp_stack, line_num=7866, add=1)
cls_get_caller_info3.get_caller_info_m3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class static method
update_stack(exp_stack=exp_stack, line_num=7871, add=1)
cls_get_caller_info3.get_caller_info_s3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class class method
update_stack(exp_stack=exp_stack, line_num=7876, add=1)
ClassGetCallerInfo3.get_caller_info_c3bo(exp_stack=exp_stack,
capsys=capsys)
# call subclass method
cls_get_caller_info3s = ClassGetCallerInfo3S()
update_stack(exp_stack=exp_stack, line_num=7882, add=1)
cls_get_caller_info3s.get_caller_info_m3s(exp_stack=exp_stack,
capsys=capsys)
# call subclass static method
update_stack(exp_stack=exp_stack, line_num=7887, add=1)
cls_get_caller_info3s.get_caller_info_s3s(exp_stack=exp_stack,
capsys=capsys)
# call subclass class method
update_stack(exp_stack=exp_stack, line_num=7892, add=1)
ClassGetCallerInfo3S.get_caller_info_c3s(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass method
update_stack(exp_stack=exp_stack, line_num=7897, add=1)
cls_get_caller_info3s.get_caller_info_m3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass static method
update_stack(exp_stack=exp_stack, line_num=7902, add=1)
cls_get_caller_info3s.get_caller_info_s3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass class method
update_stack(exp_stack=exp_stack, line_num=7907, add=1)
ClassGetCallerInfo3S.get_caller_info_c3bo(exp_stack=exp_stack,
capsys=capsys)
# call base method from subclass method
update_stack(exp_stack=exp_stack, line_num=7912, add=1)
cls_get_caller_info3s.get_caller_info_m3sb(exp_stack=exp_stack,
capsys=capsys)
# call base static method from subclass static method
update_stack(exp_stack=exp_stack, line_num=7917, add=1)
cls_get_caller_info3s.get_caller_info_s3sb(exp_stack=exp_stack,
capsys=capsys)
# call base class method from subclass class method
update_stack(exp_stack=exp_stack, line_num=7922, add=1)
ClassGetCallerInfo3S.get_caller_info_c3sb(exp_stack=exp_stack,
capsys=capsys)
exp_stack.pop()
###########################################################################
# Class 2S Method 3
###########################################################################
@classmethod
def get_caller_info_c2s(cls,
exp_stack: Deque[CallerInfo],
capsys: Optional[Any]) -> None:
"""Get caller info class method 2.
Args:
exp_stack: The expected call stack
capsys: Pytest fixture that captures output
"""
exp_caller_info = CallerInfo(mod_name='test_diag_msg.py',
cls_name='ClassGetCallerInfo2S',
func_name='get_caller_info_c2s',
line_num=7885)
exp_stack.append(exp_caller_info)
update_stack(exp_stack=exp_stack, line_num=7950, add=0)
for i, expected_caller_info in enumerate(list(reversed(exp_stack))):
try:
frame = _getframe(i)
caller_info = get_caller_info(frame)
finally:
del frame
assert caller_info == expected_caller_info
# test call sequence
update_stack(exp_stack=exp_stack, line_num=7957, add=0)
call_seq = get_formatted_call_sequence(depth=len(exp_stack))
assert call_seq == get_exp_seq(exp_stack=exp_stack)
if capsys: # if capsys, test diag_msg
update_stack(exp_stack=exp_stack, line_num=7964, add=0)
before_time = datetime.now()
diag_msg('message 1', 1, depth=len(exp_stack))
after_time = datetime.now()
diag_msg_args = TestDiagMsg.get_diag_msg_args(
depth_arg=len(exp_stack),
msg_arg=['message 1', 1])
verify_diag_msg(exp_stack=exp_stack,
before_time=before_time,
after_time=after_time,
capsys=capsys,
diag_msg_args=diag_msg_args)
# call module level function
update_stack(exp_stack=exp_stack, line_num=7979, add=0)
func_get_caller_info_3(exp_stack=exp_stack, capsys=capsys)
# call method
cls_get_caller_info3 = ClassGetCallerInfo3()
update_stack(exp_stack=exp_stack, line_num=7984, add=1)
cls_get_caller_info3.get_caller_info_m3(exp_stack=exp_stack,
capsys=capsys)
# call static method
update_stack(exp_stack=exp_stack, line_num=7989, add=1)
cls_get_caller_info3.get_caller_info_s3(exp_stack=exp_stack,
capsys=capsys)
# call class method
update_stack(exp_stack=exp_stack, line_num=7994, add=1)
ClassGetCallerInfo3.get_caller_info_c3(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class method
update_stack(exp_stack=exp_stack, line_num=7999, add=1)
cls_get_caller_info3.get_caller_info_m3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class static method
update_stack(exp_stack=exp_stack, line_num=8004, add=1)
cls_get_caller_info3.get_caller_info_s3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded base class class method
update_stack(exp_stack=exp_stack, line_num=8009, add=1)
ClassGetCallerInfo3.get_caller_info_c3bo(exp_stack=exp_stack,
capsys=capsys)
# call subclass method
cls_get_caller_info3s = ClassGetCallerInfo3S()
update_stack(exp_stack=exp_stack, line_num=8015, add=1)
cls_get_caller_info3s.get_caller_info_m3s(exp_stack=exp_stack,
capsys=capsys)
# call subclass static method
update_stack(exp_stack=exp_stack, line_num=8020, add=1)
cls_get_caller_info3s.get_caller_info_s3s(exp_stack=exp_stack,
capsys=capsys)
# call subclass class method
update_stack(exp_stack=exp_stack, line_num=8025, add=1)
ClassGetCallerInfo3S.get_caller_info_c3s(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass method
update_stack(exp_stack=exp_stack, line_num=8030, add=1)
cls_get_caller_info3s.get_caller_info_m3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass static method
update_stack(exp_stack=exp_stack, line_num=8035, add=1)
cls_get_caller_info3s.get_caller_info_s3bo(exp_stack=exp_stack,
capsys=capsys)
# call overloaded subclass class method
update_stack(exp_stack=exp_stack, line_num=8040, add=1)
ClassGetCallerInfo3S.get_caller_info_c3bo(exp_stack=exp_stack,
capsys=capsys)
# call base method from subclass method
update_stack(exp_stack=exp_stack, line_num=8045, add=1)
cls_get_caller_info3s.get_caller_info_m3sb(exp_stack=exp_stack,
capsys=capsys)
# call base static method from subclass static method
update_stack(exp_stack=exp_stack, line_num=8050, add=1)
cls_get_caller_info3s.get_caller_info_s3sb(exp_stack=exp_stack,
capsys=capsys)
# call base class method from subclass class method
update_stack(exp_stack=exp_stack, line_num=8055, add=1)
ClassGetCallerInfo3S.get_caller_info_c3sb(exp_stack=exp_stack,
capsys=capsys)
exp_stack.pop()
###########################################################################
# Class 2S Method 4
###########################################################################
def get_caller_info_m2bo(self,
exp_stack: Deque[CallerInfo],
capsys: Optional[Any]) -> None:
"""Get caller info overloaded method 2.
Args:
exp_stack: The expected call stack
capsys: Pytest fixture that captures output
"""
exp_caller_info = CallerInfo(mod_name='test_diag_msg.py',
cls_name='ClassGetCallerInfo2S',
func_name='get_caller_info_m2bo',
line_num=8016)
exp_stack.append(exp_caller_info)
update_stack(exp_stack=exp_stack, line_num=8082, add=0)
for i, expected_caller_info in enumerate(list(reversed(exp_stack))):
try:
frame = _getframe(i)
caller_info = get_caller_info(frame)
finally:
del frame
assert caller_info == expected_caller_info
# test call sequence
update_stack(exp_stack=exp_stack, line_num=8089, add=0)
call_seq = get_formatted_call_sequence(depth=len(exp_stack))
assert call_seq == get_exp_seq(exp_stack=exp_stack)
if capsys: # if capsys, test diag_msg
update_stack(exp_stack=exp_stack, line_num=8096, add=0)
before_time = datetime.now()
diag_msg('message 1', 1, depth=len(exp_stack))
after_time = datetime.now()
diag_msg_args = TestDiagMsg.get_diag_msg_args(
depth_arg=len(exp_stack),
msg_arg=['message 1', 1])
verify_diag_msg(exp_stack=exp_stack,
before_time=before_time,
after_time=after_time,
capsys=capsys,
diag_msg_args=diag_msg_args)
# call module level function
update_stack(exp_stack=exp_stack, line_num=8111, add=0)
func_get_caller_info_3(exp_stack=exp_stack, capsys=capsys)
# | |
from pycassa import NotFoundException
from pycassa.pool import ConnectionPool
from pycassa.columnfamily import ColumnFamily
from pycassa.util import OrderedDict, convert_uuid_to_time
from pycassa.system_manager import SystemManager
from pycassa.types import (LongType, IntegerType, TimeUUIDType, LexicalUUIDType,
AsciiType, UTF8Type, BytesType, CompositeType,
OldPycassaDateType, IntermediateDateType, DateType,
BooleanType, CassandraType, DecimalType,
FloatType, Int32Type, UUIDType, DoubleType, DynamicCompositeType)
from pycassa.index import create_index_expression, create_index_clause
import pycassa.marshal as marshal
from nose import SkipTest
from nose.tools import (assert_raises, assert_equal, assert_almost_equal,
assert_true)
from datetime import date, datetime
from uuid import uuid1
from decimal import Decimal
import uuid
import unittest
import time
from collections import namedtuple
TIME1 = uuid.UUID(hex='ddc6118e-a003-11df-8abf-00234d21610a')
TIME2 = uuid.UUID(hex='40ad6d4c-a004-11df-8abf-00234d21610a')
TIME3 = uuid.UUID(hex='dc3d5234-a00b-11df-8abf-00234d21610a')
VALS = ['val1', 'val2', 'val3']
KEYS = ['key1', 'key2', 'key3']
pool = None
TEST_KS = 'PycassaTestKeyspace'
def setup_module():
global pool
credentials = {'username': 'jsmith', 'password': '<PASSWORD>'}
pool = ConnectionPool(TEST_KS, pool_size=10, credentials=credentials, timeout=1.0)
def teardown_module():
pool.dispose()
class TestCFs(unittest.TestCase):
@classmethod
def setup_class(cls):
sys = SystemManager()
sys.create_column_family(TEST_KS, 'StdLong', comparator_type=LongType())
sys.create_column_family(TEST_KS, 'StdInteger', comparator_type=IntegerType())
sys.create_column_family(TEST_KS, 'StdBigInteger', comparator_type=IntegerType())
sys.create_column_family(TEST_KS, 'StdDecimal', comparator_type=DecimalType())
sys.create_column_family(TEST_KS, 'StdTimeUUID', comparator_type=TimeUUIDType())
sys.create_column_family(TEST_KS, 'StdLexicalUUID', comparator_type=LexicalUUIDType())
sys.create_column_family(TEST_KS, 'StdAscii', comparator_type=AsciiType())
sys.create_column_family(TEST_KS, 'StdUTF8', comparator_type=UTF8Type())
sys.create_column_family(TEST_KS, 'StdBytes', comparator_type=BytesType())
sys.create_column_family(TEST_KS, 'StdComposite',
comparator_type=CompositeType(LongType(), BytesType()))
sys.create_column_family(TEST_KS, 'StdDynamicComposite',
comparator_type=DynamicCompositeType({'a': AsciiType(),
'b': BytesType(), 'c': DecimalType(), 'd': DateType(),
'f': FloatType(), 'i': IntegerType(), 'l': LongType(),
'n': Int32Type(), 's': UTF8Type(), 't': TimeUUIDType(),
'u': UUIDType(), 'w': DoubleType(), 'x': LexicalUUIDType(),
'y': BooleanType()}))
sys.close()
cls.cf_long = ColumnFamily(pool, 'StdLong')
cls.cf_int = ColumnFamily(pool, 'StdInteger')
cls.cf_big_int = ColumnFamily(pool, 'StdBigInteger')
cls.cf_decimal = ColumnFamily(pool, 'StdDecimal')
cls.cf_time = ColumnFamily(pool, 'StdTimeUUID')
cls.cf_lex = ColumnFamily(pool, 'StdLexicalUUID')
cls.cf_ascii = ColumnFamily(pool, 'StdAscii')
cls.cf_utf8 = ColumnFamily(pool, 'StdUTF8')
cls.cf_bytes = ColumnFamily(pool, 'StdBytes')
cls.cf_composite = ColumnFamily(pool, 'StdComposite')
cls.cf_dynamic_composite = ColumnFamily(pool, 'StdDynamicComposite')
cls.cfs = [cls.cf_long, cls.cf_int, cls.cf_time, cls.cf_lex,
cls.cf_ascii, cls.cf_utf8, cls.cf_bytes, cls.cf_composite,
cls.cf_dynamic_composite]
def tearDown(self):
for cf in TestCFs.cfs:
for key, cols in cf.get_range():
cf.remove(key)
def make_group(self, cf, cols):
diction = OrderedDict([(cols[0], VALS[0]),
(cols[1], VALS[1]),
(cols[2], VALS[2])])
return {'cf': cf, 'cols': cols, 'dict': diction}
def test_standard_column_family(self):
# For each data type, create a group that includes its column family,
# a set of column names, and a dictionary that maps from the column
# names to values.
type_groups = []
long_cols = [1111111111111111,
2222222222222222,
3333333333333333]
type_groups.append(self.make_group(TestCFs.cf_long, long_cols))
int_cols = [1, 2, 3]
type_groups.append(self.make_group(TestCFs.cf_int, int_cols))
big_int_cols = [1 + int(time.time() * 10 ** 6),
2 + int(time.time() * 10 ** 6),
3 + int(time.time() * 10 ** 6)]
type_groups.append(self.make_group(TestCFs.cf_big_int, big_int_cols))
decimal_cols = [Decimal('1.123456789123456789'),
Decimal('2.123456789123456789'),
Decimal('3.123456789123456789')]
type_groups.append(self.make_group(TestCFs.cf_decimal, decimal_cols))
time_cols = [TIME1, TIME2, TIME3]
type_groups.append(self.make_group(TestCFs.cf_time, time_cols))
lex_cols = [uuid.UUID(bytes='aaa aaa aaa aaaa'),
uuid.UUID(bytes='bbb bbb bbb bbbb'),
uuid.UUID(bytes='ccc ccc ccc cccc')]
type_groups.append(self.make_group(TestCFs.cf_lex, lex_cols))
ascii_cols = ['aaaa', 'bbbb', 'cccc']
type_groups.append(self.make_group(TestCFs.cf_ascii, ascii_cols))
utf8_cols = ['a\u0020', 'b\u0020', 'c\u0020']
type_groups.append(self.make_group(TestCFs.cf_utf8, utf8_cols))
bytes_cols = ['aaaa', 'bbbb', 'cccc']
type_groups.append(self.make_group(TestCFs.cf_bytes, bytes_cols))
composite_cols = [(1, 'foo'), (2, 'bar'), (3, 'baz')]
type_groups.append(self.make_group(TestCFs.cf_composite, composite_cols))
dynamic_composite_cols = [(('LongType', 1), ('BytesType', 'foo')),
(('LongType', 2), ('BytesType', 'bar')),
(('LongType', 3), ('BytesType', 'baz'))]
type_groups.append(self.make_group(TestCFs.cf_dynamic_composite, dynamic_composite_cols))
dynamic_composite_alias_cols = [(('l', 1), ('b', 'foo')),
(('l', 2), ('b', 'bar')),
(('l', 3), ('b', 'baz'))]
type_groups.append(self.make_group(TestCFs.cf_dynamic_composite, dynamic_composite_alias_cols))
# Begin the actual inserting and getting
for group in type_groups:
cf = group.get('cf')
gdict = group.get('dict')
gcols = group.get('cols')
cf.insert(KEYS[0], gdict)
assert_equal(cf.get(KEYS[0]), gdict)
# Check each column individually
for i in range(3):
assert_equal(cf.get(KEYS[0], columns=[gcols[i]]),
{gcols[i]: VALS[i]})
# Check that if we list all columns, we get the full dict
assert_equal(cf.get(KEYS[0], columns=gcols[:]), gdict)
# The same thing with a start and end instead
assert_equal(cf.get(KEYS[0], column_start=gcols[0], column_finish=gcols[2]),
gdict)
# A start and end that are the same
assert_equal(cf.get(KEYS[0], column_start=gcols[0], column_finish=gcols[0]),
{gcols[0]: VALS[0]})
assert_equal(cf.get_count(KEYS[0]), 3)
# Test xget paging
assert_equal(list(cf.xget(KEYS[0], buffer_size=2)), list(gdict.items()))
assert_equal(list(cf.xget(KEYS[0], column_reversed=True, buffer_size=2)),
list(reversed(list(gdict.items()))))
assert_equal(list(cf.xget(KEYS[0], column_start=gcols[0], buffer_size=2)),
list(gdict.items()))
assert_equal(list(cf.xget(KEYS[0], column_finish=gcols[2], buffer_size=2)),
list(gdict.items()))
assert_equal(list(cf.xget(KEYS[0], column_start=gcols[2], column_finish=gcols[0],
column_reversed=True, buffer_size=2)),
list(reversed(list(gdict.items()))))
assert_equal(list(cf.xget(KEYS[0], column_start=gcols[1], column_finish=gcols[1],
column_reversed=True, buffer_size=2)),
[(gcols[1], VALS[1])])
# Test removing rows
cf.remove(KEYS[0], columns=gcols[:1])
assert_equal(cf.get_count(KEYS[0]), 2)
cf.remove(KEYS[0], columns=gcols[1:])
assert_equal(cf.get_count(KEYS[0]), 0)
# Insert more than one row now
cf.insert(KEYS[0], gdict)
cf.insert(KEYS[1], gdict)
cf.insert(KEYS[2], gdict)
### multiget() tests ###
res = cf.multiget(KEYS[:])
for i in range(3):
assert_equal(res.get(KEYS[i]), gdict)
res = cf.multiget(KEYS[2:])
assert_equal(res.get(KEYS[2]), gdict)
# Check each column individually
for i in range(3):
res = cf.multiget(KEYS[:], columns=[gcols[i]])
for j in range(3):
assert_equal(res.get(KEYS[j]), {gcols[i]: VALS[i]})
# Check that if we list all columns, we get the full dict
res = cf.multiget(KEYS[:], columns=gcols[:])
for j in range(3):
assert_equal(res.get(KEYS[j]), gdict)
# The same thing with a start and end instead
res = cf.multiget(KEYS[:], column_start=gcols[0], column_finish=gcols[2])
for j in range(3):
assert_equal(res.get(KEYS[j]), gdict)
# A start and end that are the same
res = cf.multiget(KEYS[:], column_start=gcols[0], column_finish=gcols[0])
for j in range(3):
assert_equal(res.get(KEYS[j]), {gcols[0]: VALS[0]})
### get_range() tests ###
res = cf.get_range(start=KEYS[0])
for sub_res in res:
assert_equal(sub_res[1], gdict)
res = cf.get_range(start=KEYS[0], column_start=gcols[0], column_finish=gcols[2])
for sub_res in res:
assert_equal(sub_res[1], gdict)
res = cf.get_range(start=KEYS[0], columns=gcols[:])
for sub_res in res:
assert_equal(sub_res[1], gdict)
class TestSuperCFs(unittest.TestCase):
@classmethod
def setup_class(cls):
sys = SystemManager()
sys.create_column_family(TEST_KS, 'SuperLong', super=True, comparator_type=LongType())
sys.create_column_family(TEST_KS, 'SuperInt', super=True, comparator_type=IntegerType())
sys.create_column_family(TEST_KS, 'SuperBigInt', super=True, comparator_type=IntegerType())
sys.create_column_family(TEST_KS, 'SuperTime', super=True, comparator_type=TimeUUIDType())
sys.create_column_family(TEST_KS, 'SuperLex', super=True, comparator_type=LexicalUUIDType())
sys.create_column_family(TEST_KS, 'SuperAscii', super=True, comparator_type=AsciiType())
sys.create_column_family(TEST_KS, 'SuperUTF8', super=True, comparator_type=UTF8Type())
sys.create_column_family(TEST_KS, 'SuperBytes', super=True, comparator_type=BytesType())
sys.close()
cls.cf_suplong = ColumnFamily(pool, 'SuperLong')
cls.cf_supint = ColumnFamily(pool, 'SuperInt')
cls.cf_supbigint = ColumnFamily(pool, 'SuperBigInt')
cls.cf_suptime = ColumnFamily(pool, 'SuperTime')
cls.cf_suplex = ColumnFamily(pool, 'SuperLex')
cls.cf_supascii = ColumnFamily(pool, 'SuperAscii')
cls.cf_suputf8 = ColumnFamily(pool, 'SuperUTF8')
cls.cf_supbytes = ColumnFamily(pool, 'SuperBytes')
cls.cfs = [cls.cf_suplong, cls.cf_supint, cls.cf_suptime,
cls.cf_suplex, cls.cf_supascii, cls.cf_suputf8,
cls.cf_supbytes]
def tearDown(self):
for cf in TestSuperCFs.cfs:
for key, cols in cf.get_range():
cf.remove(key)
def make_super_group(self, cf, cols):
diction = OrderedDict([(cols[0], {'bytes': VALS[0]}),
(cols[1], {'bytes': VALS[1]}),
(cols[2], {'bytes': VALS[2]})])
return {'cf': cf, 'cols': cols, 'dict': diction}
def test_super_column_families(self):
# For each data type, create a group that includes its column family,
# a set of column names, and a dictionary that maps from the column
# names to values.
type_groups = []
long_cols = [1111111111111111,
2222222222222222,
3333333333333333]
type_groups.append(self.make_super_group(TestSuperCFs.cf_suplong, long_cols))
int_cols = [1, 2, 3]
type_groups.append(self.make_super_group(TestSuperCFs.cf_supint, int_cols))
big_int_cols = [1 + int(time.time() * 10 ** 6),
2 + int(time.time() * 10 ** 6),
3 + int(time.time() * 10 ** 6)]
type_groups.append(self.make_super_group(TestSuperCFs.cf_supbigint, big_int_cols))
time_cols = [TIME1, TIME2, TIME3]
type_groups.append(self.make_super_group(TestSuperCFs.cf_suptime, time_cols))
lex_cols = [uuid.UUID(bytes='aaa aaa aaa aaaa'),
uuid.UUID(bytes='bbb bbb bbb bbbb'),
uuid.UUID(bytes='ccc ccc ccc cccc')]
type_groups.append(self.make_super_group(TestSuperCFs.cf_suplex, lex_cols))
ascii_cols = ['aaaa', 'bbbb', 'cccc']
type_groups.append(self.make_super_group(TestSuperCFs.cf_supascii, ascii_cols))
utf8_cols = ['a\u0020', 'b\u0020', 'c\u0020']
type_groups.append(self.make_super_group(TestSuperCFs.cf_suputf8, utf8_cols))
bytes_cols = ['aaaa', 'bbbb', 'cccc']
type_groups.append(self.make_super_group(TestSuperCFs.cf_supbytes, bytes_cols))
# Begin the actual inserting and getting
for group in type_groups:
cf = group.get('cf')
gdict = group.get('dict')
gcols = group.get('cols')
cf.insert(KEYS[0], gdict)
assert_equal(cf.get(KEYS[0]), gdict)
# Check each supercolumn individually
for i in range(3):
res = cf.get(KEYS[0], columns=[gcols[i]])
assert_equal(res, {gcols[i]: {'bytes': VALS[i]}})
# Check that if we list all columns, we get the full dict
assert_equal(cf.get(KEYS[0], columns=gcols[:]), gdict)
# The same thing with a start and end instead
assert_equal(cf.get(KEYS[0], column_start=gcols[0], column_finish=gcols[2]), gdict)
# A start and end that are the same
assert_equal(cf.get(KEYS[0], column_start=gcols[0], column_finish=gcols[0]),
{gcols[0]: {'bytes': VALS[0]}})
# test xget paging
assert_equal(list(cf.xget(KEYS[0], buffer_size=2)), list(gdict.items()))
assert_equal(cf.get_count(KEYS[0]), 3)
# Test removing rows
cf.remove(KEYS[0], columns=gcols[:1])
assert_equal(cf.get_count(KEYS[0]), 2)
cf.remove(KEYS[0], columns=gcols[1:])
assert_equal(cf.get_count(KEYS[0]), 0)
# Insert more than one row now
cf.insert(KEYS[0], gdict)
cf.insert(KEYS[1], gdict)
cf.insert(KEYS[2], gdict)
### multiget() tests ###
res = cf.multiget(KEYS[:])
for i in range(3):
assert_equal(res.get(KEYS[i]), gdict)
res = cf.multiget(KEYS[2:])
assert_equal(res.get(KEYS[2]), gdict)
# Check each column individually
for i in range(3):
res = cf.multiget(KEYS[:], columns=[gcols[i]])
for j in range(3):
assert_equal(res.get(KEYS[j]), {gcols[i]: {'bytes': VALS[i]}})
# Check that if we list all columns, we get the full dict
res = cf.multiget(KEYS[:], columns=gcols[:])
for j in range(3):
assert_equal(res.get(KEYS[j]), gdict)
# The same thing with a start and end instead
res = cf.multiget(KEYS[:], column_start=gcols[0], column_finish=gcols[2])
for j in range(3):
assert_equal(res.get(KEYS[j]), gdict)
# A start and end that are the same
res = cf.multiget(KEYS[:], column_start=gcols[0], column_finish=gcols[0])
for j in range(3):
assert_equal(res.get(KEYS[j]), {gcols[0]: {'bytes': VALS[0]}})
### get_range() tests ###
res = cf.get_range(start=KEYS[0])
for sub_res in res:
assert_equal(sub_res[1], gdict)
res = cf.get_range(start=KEYS[0], column_start=gcols[0], column_finish=gcols[2])
for sub_res in res:
assert_equal(sub_res[1], gdict)
res = cf.get_range(start=KEYS[0], columns=gcols[:])
for sub_res in res:
assert_equal(sub_res[1], gdict)
class TestSuperSubCFs(unittest.TestCase):
@classmethod
def setup_class(cls):
sys = SystemManager()
sys.create_column_family(TEST_KS, 'SuperLongSubLong', super=True,
comparator_type=LongType(), subcomparator_type=LongType())
sys.create_column_family(TEST_KS, 'SuperLongSubInt', super=True,
comparator_type=LongType(), subcomparator_type=IntegerType())
sys.create_column_family(TEST_KS, 'SuperLongSubBigInt', super=True,
comparator_type=LongType(), subcomparator_type=IntegerType())
sys.create_column_family(TEST_KS, 'SuperLongSubTime', super=True,
comparator_type=LongType(), subcomparator_type=TimeUUIDType())
sys.create_column_family(TEST_KS, 'SuperLongSubLex', super=True,
comparator_type=LongType(), subcomparator_type=LexicalUUIDType())
sys.create_column_family(TEST_KS, 'SuperLongSubAscii', super=True,
comparator_type=LongType(), subcomparator_type=AsciiType())
sys.create_column_family(TEST_KS, 'SuperLongSubUTF8', super=True,
comparator_type=LongType(), subcomparator_type=UTF8Type())
sys.create_column_family(TEST_KS, 'SuperLongSubBytes', super=True,
comparator_type=LongType(), subcomparator_type=BytesType())
sys.close()
cls.cf_suplong_sublong = ColumnFamily(pool, 'SuperLongSubLong')
cls.cf_suplong_subint = ColumnFamily(pool, 'SuperLongSubInt')
cls.cf_suplong_subbigint = ColumnFamily(pool, 'SuperLongSubBigInt')
cls.cf_suplong_subtime = ColumnFamily(pool, 'SuperLongSubTime')
cls.cf_suplong_sublex = ColumnFamily(pool, 'SuperLongSubLex')
cls.cf_suplong_subascii = ColumnFamily(pool, 'SuperLongSubAscii')
cls.cf_suplong_subutf8 = ColumnFamily(pool, 'SuperLongSubUTF8')
cls.cf_suplong_subbytes | |
>>> bnd_verts = [k for k, _ in enumerate(msh.vertices)]
>>> msh.insert_boundary_vertices(0, bnd_verts)
>>> print(msh.vertices)
[[0. 0.]
[0. 1.]
[1. 1.]
[1. 0.]]
>>> print(msh.material_regions)
[]
>>> # add a material region to the mesh
>>> # this material region fills the bottom half of the mesh
>>> import vcfempy.materials
>>> msh.add_vertices([[0, 0.5], [1, 0.5]])
>>> print(msh.vertices)
[[0. 0. ]
[0. 1. ]
[1. 1. ]
[1. 0. ]
[0. 0.5]
[1. 0.5]]
>>> rock = vcfempy.materials.Material('rock')
>>> mr_rock_verts = [0, 4, 5, 3]
>>> mr_rock = vcfempy.meshgen.MaterialRegion2D(msh,
... mr_rock_verts,
... rock)
>>> print(msh.num_material_regions)
1
>>> for mr in msh.material_regions:
... print(mr.vertices)
[0, 4, 5, 3]
>>> # add another material region filling the top half of the mesh
>>> sand = vcfempy.materials.Material('sand')
>>> mr_sand_verts = [4, 1, 2, 5]
>>> mr_sand = vcfempy.meshgen.MaterialRegion2D(msh,
... mr_sand_verts,
... sand)
>>> print(msh.num_material_regions)
2
>>> for mr in msh.material_regions:
... print(mr.vertices)
[0, 4, 5, 3]
[4, 1, 2, 5]
>>> # remove a material region from the mesh
>>> msh.remove_material_region(mr_rock)
>>> print(msh.num_material_regions)
1
>>> for mr in msh.material_regions:
... print(mr.vertices)
[4, 1, 2, 5]
"""
return self._material_regions
def add_material_region(self, material_region):
"""Add a :c:`MaterialRegion2D` to the :c:`PolyMesh2D`.
Parameters
----------
material_region : :c:`MaterialRegion2D`
:c:`MaterialRegion2D` to add to the :c:`PolyMesh2D`.
Raises
------
TypeError
If **material_region** is not a :c:`MaterialRegion2D`.
ValueError
If **material_region** is already in :a:`material_regions` or
does not have this :c:`PolyMesh2D` as its parent.
Note
----
It is not normally necessary to call :m:`add_material_region` when
creating a new :c:`MaterialRegion2D` since it will add itself to the
parent :c:`PolyMesh2D` by default. This is only necessary if the
:c:`MaterialRegion2D` was created with **add_to_mesh** = ``False`` or
if the :c:`MaterialRegion2D` was previously removed from the
:c:`PolyMesh2D` using :m:`remove_material_region`.
Examples
--------
>>> # create a mesh and a material region, this adds the material
>>> # region by default
>>> import vcfempy.meshgen
>>> msh = vcfempy.meshgen.PolyMesh2D('test mesh')
>>> mr = vcfempy.meshgen.MaterialRegion2D(msh)
>>> print(mr.mesh.name)
test mesh
>>> print(mr in msh.material_regions)
True
>>> # create another material region, but do not add it to the mesh
>>> mr_new = vcfempy.meshgen.MaterialRegion2D(msh, add_to_mesh=False)
>>> print(mr_new.mesh.name)
test mesh
>>> print(mr_new in msh.material_regions)
False
>>> # add the new material region to its parent mesh
>>> msh.add_material_region(mr_new)
>>> print(mr_new in msh.material_regions)
True
>>> # try to add invalid material regions
>>> msh.add_material_region(1)
Traceback (most recent call last):
...
TypeError: material region not vcfempy.meshgen.MaterialRegion2D
>>> msh.add_material_region(mr)
Traceback (most recent call last):
...
ValueError: material region already in list
>>> new_msh = vcfempy.meshgen.PolyMesh2D()
>>> mr_new = vcfempy.meshgen.MaterialRegion2D(new_msh)
>>> msh.add_material_region(mr_new)
Traceback (most recent call last):
...
ValueError: material region does not have self as mesh
"""
if not isinstance(material_region, MaterialRegion2D):
raise TypeError('material region not '
+ 'vcfempy.meshgen.MaterialRegion2D')
if material_region in self.material_regions:
raise ValueError('material region already in list')
if material_region.mesh is not self:
raise ValueError('material region does not have self as mesh')
self.material_regions.append(material_region)
self.mesh_valid = False
def remove_material_region(self, material_region):
"""Remove a :c:`MaterialRegion2D` from the :c:`PolyMesh2D`.
Parameters
----------
material_region : :c:`MaterialRegion2D`
:c:`MaterialRegion2D` to remove from the :c:`PolyMesh2D`.
Raises
------
ValueError
If **material_region** is not in :a:`material_regions`.
Note
----
When removing a material region from the :c:`PolyMesh2D`, the
:a:`MaterialRegion2D.mesh` is not changed, and it can be added again
using :m:`add_material_region` if desired.
Examples
--------
>>> # create a mesh and a material region, then remove it
>>> import vcfempy.meshgen
>>> msh = vcfempy.meshgen.PolyMesh2D()
>>> mr = vcfempy.meshgen.MaterialRegion2D(msh)
>>> msh.remove_material_region(mr)
>>> print(msh.material_regions)
[]
>>> # try to remove a material region that is not in the mesh
>>> msh.remove_material_region(mr)
Traceback (most recent call last):
...
ValueError: list.remove(x): x not in list
"""
self.material_regions.remove(material_region)
self.mesh_valid = False
@property
def num_mesh_edges(self):
"""Number of non-boundary edges to be preserved in mesh generation
for the :c:`PolyMesh2D`.
Returns
-------
`int`
The number of :c:`MeshEdge2D` in the :c:`PolyMesh2D`.
Examples
--------
>>> # create a new mesh, no initial information provided
>>> import vcfempy.meshgen
>>> msh = vcfempy.meshgen.PolyMesh2D()
>>> print(msh.num_mesh_edges)
0
>>> # add some vertices, create a new mesh edge, and add it to
>>> # the mesh
>>> new_verts = [[0, 0], [0, 1], [1, 1], [1.5, 0.5], [1, 0]]
>>> bnd_verts = [k for k, _ in enumerate(new_verts)]
>>> msh.add_vertices(new_verts)
>>> msh.insert_boundary_vertices(0, bnd_verts)
>>> new_verts = [[0.1, 0.1], [0.2, 0.2]]
>>> msh.add_vertices(new_verts)
>>> me = vcfempy.meshgen.MeshEdge2D(msh, [5, 6])
>>> print(msh.num_mesh_edges)
1
"""
return len(self.mesh_edges)
@property
def mesh_edges(self):
"""List of :c:`MeshEdge2D` defining non-boundary edges to be
preserved in mesh generation for the :c:`PolyMesh2D`.
Returns
-------
`list` of :c:`MeshEdge2D`
The list of :c:`MeshEdge2D` in the :c:`PolyMesh2D`.
Note
----
The list of :a:`mesh_edges` is not intended to be directly mutable.
Instead modify it using the :m:`add_mesh_edge` and
:m:`remove_mesh_edge` methods. New :c:`MeshEdge2D` objects require a
parent mesh to be set, and by default will be added to that parent
mesh.
Examples
--------
>>> # initialize a mesh, no initial properties provided
>>> import vcfempy.meshgen
>>> msh = vcfempy.meshgen.PolyMesh2D()
>>> print(msh.mesh_edges)
[]
>>> # add some vertices to the mesh and add a mesh edge
>>> new_verts = [[0, 0], [0, 1], [1, 1], [1, 0]]
>>> msh.add_vertices(new_verts)
>>> bnd_verts = [k for k, _ in enumerate(msh.vertices)]
>>> msh.insert_boundary_vertices(0, bnd_verts)
>>> msh.add_vertices([[0.25, 0.25], [0.75, 0.75]])
>>> print(msh.vertices)
[[0. 0. ]
[0. 1. ]
[1. 1. ]
[1. 0. ]
[0.25 0.25]
[0.75 0.75]]
>>> me = vcfempy.meshgen.MeshEdge2D(msh, [4, 5])
>>> print(msh.num_mesh_edges)
1
>>> for me in msh.mesh_edges:
... print(me.vertices)
[4, 5]
>>> print(msh.vertices[msh.mesh_edges[0].vertices, :])
[[0.25 0.25]
[0.75 0.75]]
>>> # add two more mesh edges
>>> # mesh edges can overlap the boundaries
>>> msh.add_vertices([[0.5, 0.75], [1, 1.25],
... [0.5, 0.25], [0, -0.25]])
>>> print(msh.vertices)
[[ 0. 0. ]
[ 0. 1. ]
[ 1. 1. ]
[ 1. 0. ]
[ 0.25 0.25]
[ 0.75 0.75]
[ 0.5 0.75]
[ 1. 1.25]
[ 0.5 0.25]
[ 0. -0.25]]
>>> me_new = [vcfempy.meshgen.MeshEdge2D(msh, [k, k+1])
... for k in [6, 8]]
>>> print(msh.num_mesh_edges)
3
>>> for me in msh.mesh_edges:
... print(me.vertices)
[4, 5]
[6, 7]
[8, 9]
>>> for k, me in enumerate(msh.mesh_edges):
... print(f'Mesh edge {k}')
... print(msh.vertices[me.vertices, :])
... print()
Mesh edge 0
[[0.25 0.25]
[0.75 0.75]]
<BLANKLINE>
Mesh edge 1
[[0.5 0.75]
[1. 1.25]]
<BLANKLINE>
Mesh edge 2
[[ 0.5 0.25]
[ 0. -0.25]]
<BLANKLINE>
"""
return self._mesh_edges
def add_mesh_edge(self, mesh_edge):
"""Add a :c:`MeshEdge2D` to the :c:`PolyMesh2D`.
Parameters
----------
mesh_edge : :c:`MeshEdge2D`
:c:`MeshEdge2D` to add to the :c:`PolyMesh2D`.
Raises
------
TypeError
If **mesh_edge** is not a :c:`MeshEdge2D`.
ValueError
If **mesh_edge** is already in :a:`mesh_edges` or does not have
this :c:`PolyMesh2D` as its parent.
Note
----
It is not normally necessary to call :m:`add_mesh_edge` when
creating a new :c:`MeshEdge2D` since it will add itself to the
parent :c:`PolyMesh2D` by default. This is only necessary if the
:c:`MeshEdge2D` was created with **add_to_mesh** = ``False`` or
if the :c:`MeshEdge2D` was previously removed from the
:c:`PolyMesh2D` using :m:`remove_mesh_edge`.
Examples
--------
>>> # create a mesh and a mesh edge, this adds the mesh edge by default
>>> import vcfempy.meshgen
>>> msh = vcfempy.meshgen.PolyMesh2D('test mesh')
>>> me = vcfempy.meshgen.MeshEdge2D(msh)
>>> print(me.mesh.name)
test mesh
>>> print(me in msh.mesh_edges)
True
>>> # create another mesh edge, but do not add it to the mesh
>>> me_new = vcfempy.meshgen.MeshEdge2D(msh, add_to_mesh=False)
>>> print(me_new.mesh.name)
test mesh
>>> print(me_new in msh.mesh_edges)
False
>>> # add the new mesh edge to its parent mesh
>>> msh.add_mesh_edge(me_new)
>>> print(me_new in msh.mesh_edges)
True
>>> # try to add invalid mesh edges
>>> msh.add_mesh_edge(1)
Traceback (most recent call last):
...
TypeError: mesh edge not vcfempy.meshgen.MeshEdge2D
>>> msh.add_mesh_edge(me)
Traceback (most recent call last):
...
ValueError: mesh edge already in list
>>> new_msh = vcfempy.meshgen.PolyMesh2D()
>>> me_new = vcfempy.meshgen.MeshEdge2D(new_msh)
>>> msh.add_mesh_edge(me_new)
Traceback (most recent call last):
...
ValueError: mesh edge does not have self as mesh
"""
if not isinstance(mesh_edge, MeshEdge2D):
raise TypeError('mesh edge not vcfempy.meshgen.MeshEdge2D')
if mesh_edge in self.mesh_edges:
raise ValueError('mesh edge already in list')
if mesh_edge.mesh is not self:
raise ValueError('mesh edge does not | |
('renorm', (S, S, S), (1, 2, 3), 'norm_1'),
('renorm', (S, S, S), (inf, 2, 0.5), 'norm_inf'),
('repeat', (S,), (2,), 'single_number'),
('repeat', (), (2, 3), 'scalar'),
('repeat', (2, 2), (3, 2)),
('repeat', (2, 2), (1, 3, 1, 2), 'unsqueeze'),
('cumsum', (S, S, S), (0,), 'dim0', (), [0]),
('cumsum', (S, S, S), (1,), 'dim1', (), [0]),
('cumsum', (S, S, S), (1,), 'dim1_cast', (), [0], (), ident, {'dtype': torch.float64}),
('cumsum', (), (0,), 'dim0_scalar', (), [0]),
('cumprod', (S, S, S), (0,)),
('cumprod', (S, S, S), (1,), 'dim1', (), [0]),
('cumprod', (), (0,), 'scalar'),
('cumprod', (torch.tensor(0., requires_grad=True)), (0,), 'scalar_zeros'),
('cumprod', prod_zeros(S, [0, 1]), (1,), 'zeros_dim2', (), [0]),
('cumprod', prod_zeros(S, [0, 2]), (1,), 'zeros_dim1', (), [0]),
('cumprod', prod_zeros(S, [1, 2]), (1,), 'zeros_dim0', (), [0]),
('cumprod', prod_zeros(S, [1, 2]), (1,), 'zeros_dim0_cast', (), [0], (), ident, {'dtype': torch.float64}),
('log_softmax', (S, S, S), (1, torch.float64,), 'kwarg_dtype_would_break_jit_loader', (True,)),
('unfold', (), (0, 1, 1), 'scalar', (), [0]),
('unfold', (S, S, S, S), (1, 3, 1), '', (), [0]),
('unfold', (S, S, S), (2, 3, 2), 'lastdim', (), [0]),
('addmm', (S, M), ((S, S), (S, M)), '', (True, ['aten::add', 'aten::mm'])),
('addmm', (1,), ((S, S), (S, M)), 'broadcast_lhs', (True, ['aten::add', 'aten::mm'])),
('addmm', (S, M), ((S, S), (S, M)), 'coef', (True,), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addmm', (1,), ((S, S), (S, M)), 'broadcast_lhs_coef', (True,), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addmm', (), ((S, S), (S, M)), 'scalar_broadcast_lhs', (True, ['aten::add', 'aten::mm'])),
('addmm', (), ((S, S), (S, M)), 'scalar_broadcast_lhs_coef', (True,), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addbmm', (S, M), ((S, S, S), (S, S, M)),),
('addbmm', (1,), ((S, S, S), (S, S, M)), 'broadcast_lhs'),
('addbmm', (S, M), ((S, S, S), (S, S, M)), 'coef', (), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addbmm', (1,), ((S, S, S), (S, S, M)), 'broadcast_lhs_coef', (),
(), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addbmm', (), ((S, S, S), (S, S, M)), 'scalar_broadcast_lhs'),
('addbmm', (), ((S, S, S), (S, S, M)), 'scalar_broadcast_lhs_coef', (), (), (), ident,
{'beta': 0.2, 'alpha': 0.6}),
('baddbmm', (S, S, M), ((S, S, S), (S, S, M)),),
('baddbmm', (1,), ((S, S, S), (S, S, M)), 'broadcast_lhs'),
('baddbmm', (S, S, M), ((S, S, S), (S, S, M)), 'coef', (), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('baddbmm', (1,), ((S, S, S), (S, S, M)), 'broadcast_lhs_coef', (),
(), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('baddbmm', (), ((S, S, S), (S, S, M)), 'scalar_broadcast_lhs'),
('baddbmm', (), ((S, S, S), (S, S, M)), 'scalar_broadcast_lhs_coef', (), (), (), ident,
{'beta': 0.2, 'alpha': 0.6}),
('addmv', (S,), ((S, M), (M,)),),
('addmv', (1,), ((S, M), (M,)), 'broadcast_lhs'),
('addmv', (S,), ((S, M), (M,)), 'coef', (), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addmv', (1,), ((S, M), (M,)), 'broadcast_lhs_coef', (), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addmv', (), ((S, M), (M,)), 'scalar_broadcast_lhs'),
('addmv', (), ((S, M), (M,)), 'scalar_broadcast_lhs_coef', (), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addr', (S, M), ((S,), (M,)),),
('addr', (), ((S,), (M,)), 'broadcast_lhs'),
('addr', (S, M), ((S,), (M,)), 'coef', (), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('addr', (), ((S,), (M,)), 'broadcast_lhs_coef', (), (), (), ident, {'beta': 0.2, 'alpha': 0.6}),
('dot', (L,), ((L,),), '', (True,)),
('mm', (S, M), ((M, S),), '', (True,)),
('bmm', (M, S, M), ((M, M, S),), '', (True,)),
('mv', (S, M), ((M,),), '', (True,)),
('ger', (S,), ((M,),)),
('matmul', (L,), ((L,),), '', (True,)),
('matmul', (S, M), ((M,),), "2d_1d", (True,)),
('matmul', (M,), ((M, S),), "1d_2d", (True,)),
('matmul', (S, M), ((M, S),), "2d_2d", (True,)),
('matmul', (S, S, M), ((M,),), "3d_1d", (True,)),
('matmul', (S, S, M), ((M, S),), "3d_2d", (True,)),
('matmul', (M,), ((S, M, S),), "1d_3d", (True,)),
('matmul', (S, M), ((S, M, S),), "2d_3d", (True,)),
('matmul', (S, S, M, M), ((S, S, M, S),), "4d_4d", (True,)),
('matmul', (S, S, M, M), ((M,),), "4d_1d", (True,)),
('matmul', (M,), ((S, S, M, S),), "1d_4d", (True,)),
('matrix_power', (S, S), [2], "n=2"),
('matrix_power', (S, S, S), [3], "n=3"),
('matrix_power', (S, S, S), [1], "n=1"),
('matrix_power', (S, S, S), [0], "n=0"),
('matrix_power', lambda: random_fullrank_matrix_distinct_singular_value(S), [-1], "n=-1", (),
NO_ARGS, [skipIfNoLapack]),
('matrix_power', lambda: random_fullrank_matrix_distinct_singular_value(S), [-3], "n=-3", (),
NO_ARGS, [skipIfNoLapack]),
('matrix_power', lambda: random_fullrank_matrix_distinct_singular_value(S, S), [-2], "n=-2", (),
NO_ARGS, [skipIfNoLapack]),
('mvlgamma', torch.empty(S,).uniform_(0.5, 1), [1], "p=1"),
('mvlgamma', torch.empty(S,).uniform_(1, 2), [2], "p=2"),
('mvlgamma', torch.empty(S, S).uniform_(1.5, 3), [3], "p=3"),
('mvlgamma', torch.empty(S, S).uniform_(2.5, 5), [5], "p=5"),
('addcmul', (S, S), ((S, S), (S, S)), '', (True,)),
('addcmul', (S, S), ((S, 1), (1, S)), 'broadcast_rhs', (True,)),
('addcmul', (1,), ((S, S, 1), (1, S)), 'broadcast_all', (True,)),
('addcmul', (S, S), ((S, S), (S, S)), 'scale', (True,), (), (), ident, {'value': 0.5}),
('addcmul', (S, S), ((S, 1), (1, S)), 'scale_broadcast_rhs', (True,), (), (), ident, {'value': 0.5}),
('addcmul', (1,), ((S, S, 1), (1, S)), 'scale_broadcast_all', (True,), (), (), ident, {'value': 0.5}),
('addcmul', (), ((), ()), 'scalar', (True,)),
('addcmul', (S, S), ((), ()), 'scalar_broadcast_rhs', (True,)),
('addcmul', (), ((S, S, 1), (1, S)), 'scalar_broadcast_lhs', (True,)),
('addcmul', (), ((), ()), 'scalar_scale', (True,), (), (), ident, {'value': 0.5}),
('addcmul', (S, S), ((), ()), 'scalar_scale_broadcast_rhs', (True,), (), (), ident, {'value': 0.5}),
('addcmul', (), ((S, S, 1), (1, S)), 'scalar_scale_broadcast_lhs', (True,), (), (), ident, {'value': 0.5}),
('addcdiv', (S, S), ((S, S), (S, S))),
('addcdiv', (S, S), ((S, 1), (1, S)), 'broadcast_rhs'),
('addcdiv', (1,), ((S, S, 1), (1, S)), 'broadcast_all'),
('addcdiv', (S, S), ((S, S), (S, S)), 'scale', (), (), (), ident, {'value': 0.5}),
('addcdiv', (S, S), ((S, 1), (1, S)), 'scale_broadcast_rhs', (), (), (), ident, {'value': 0.5}),
('addcdiv', (1,), ((S, S, 1), (1, S)), 'scale_broadcast_all', (), (), (), ident, {'value': 0.5}),
('addcdiv', (), ((), ()), 'scalar'),
('addcdiv', (S, S), ((), ()), 'scalar_broadcast_rhs'),
('addcdiv', (), ((S, S, 1), (1, S)), 'scalar_broadcast_lhs'),
('addcdiv', (), ((), ()), 'scalar_scale', (), (), (), ident, {'value': 0.5}),
('addcdiv', (S, S), ((), ()), 'scalar_scale_broadcast_rhs', (), (), (), ident, {'value': 0.5}),
('addcdiv', (), ((S, S, 1), (1, S)), 'scalar_scale_broadcast_lhs', (), (), (), ident, {'value': 0.5}),
('zero_', (S, S, S), NO_ARGS),
('zero_', (), NO_ARGS, 'scalar'),
('logsumexp', (S, S), (1,), '', (True,)),
('logsumexp', (), (0,), 'scalar', (True,)),
('norm', (S, S), (), 'default'),
('norm', (S, S), (2,), '2'),
('norm', (S, S), (0,), '0'),
('norm', (S, S), (0.5,), '0_5'),
('norm', (S, S), (1,), '1'),
('norm', (S, S), (3,), '3'),
('norm', (S, S), (inf,), 'inf'),
('norm', (S, S), (-inf,), '-inf'),
('norm', (S, S), ('fro',), 'fro_default'),
('norm', (S, S), ('fro', [0, 1],), 'fro'),
('norm', (S, S), ('nuc',), 'nuc', (), NO_ARGS, [skipIfNoLapack]),
('norm', (S, S, S), ('nuc', [1, 2]), 'nuc_batched', (), NO_ARGS, [skipIfNoLapack]),
('norm', (S, S), (-1,), 'neg_1'),
('norm', (S, S), (-2,), 'neg_2'),
('norm', (S, S), (-0.5,), 'neg_0_5'),
('norm', (S, S), (-1.5,), 'neg_1_5'),
('norm', (S, S), (-2, 1,), 'neg_2_2_dim', (), [1]),
('norm', (S, S), (-1, 1,), 'neg_1_2_dim', (), [1]),
('norm', (S, S), (0, 1,), '0_2_dim', (), [1]),
('norm', (S, S), (1, 1,), '1_2_dim', (), [1]),
('norm', (S, S), (2, 1,), '2_2_dim', (), [1]),
('norm', (S, S), (3, 1,), '3_2_dim', (), [1]),
('norm', (S, S), (inf, 1,), 'inf_2_dim'),
('norm', torch.rand(S, S, S) + 5e-2, (1.5,), '1_5_default'),
('norm', (S, S, S), (2, 1), '2_dim', (), [1]),
('norm', (S, S, S), (3, 1), '3_dim', (), [1]),
('norm', torch.rand(S, S, S) + 5e-2, (1.5, 1), '1_5_dim', (), [1]),
('norm', (S, S, S), (2, 1, True), 'keepdim_2_dim', (), [1]),
('norm', (S, S, S), (3, 1, True), 'keepdim_3_dim', (), [1]),
('norm', torch.rand(S, S, S) + 5e-2, (1.5, 1, True), 'keepdim_1_5_dim', (), [1]),
('norm', (), (2, 0), '2_dim_scalar', (), [1]),
('norm', (), (3, 0), '3_dim_scalar', (), [1]),
('norm', (), (2, 0, True), 'keepdim_2_dim_scalar', (), [1]),
('norm', (), (3, 0, True), 'keepdim_3_dim_scalar', (), [1]),
('clone', (S, M, S), NO_ARGS),
('clone', (), NO_ARGS, 'scalar'),
('contiguous', (S, S), NO_ARGS, '', (True,)),
('contiguous', torch.randn(S, S).transpose(0, 1), NO_ARGS, 'not_contiguous', (True,)),
('dist', (S, S, S), ((S, S, S),)),
('dist', (S, S, S), ((S,),), 'broadcast_rhs'),
('dist', (S,), ((S, S, S),), 'broadcast_lhs'),
('dist', (S, 1, S), ((S, S),), 'broadcast_all'),
('dist', (), ((),), 'scalar'),
('dist', (S, S, S), ((),), 'scalar_broadcast_rhs'),
('dist', (), ((S, S, S),), 'scalar_broadcast_lhs'),
('dist', (S, S, S), ((S, S, S), 4), '4'),
('dist', (S, S, S), ((S,), 4), '4_broadcast_rhs'),
('dist', (S,), ((S, S, S), 4), | |
finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
flavor = instance.flavor
boot_from_volume = compute_utils.is_volume_backed_instance(context,
instance)
reattach_volumes = False
# 2. Relocate the VM if necessary
# If the dest_compute is different from the source_compute, it means we
# need to relocate the VM here since we are running on the dest_compute
if migration.source_compute != migration.dest_compute:
# Get the root disk vmdk object's adapter type
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
adapter_type = vmdk.adapter_type
self._detach_volumes(instance, block_device_info)
reattach_volumes = True
LOG.debug("Relocating VM for migration to %s",
migration.dest_compute, instance=instance)
try:
self._relocate_vm(vm_ref, context, instance, network_info,
image_meta)
LOG.debug("Relocated VM to %s", migration.dest_compute,
instance=instance)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error("Relocating the VM failed with error: %s", e,
instance=instance)
self._attach_volumes(instance, block_device_info,
adapter_type)
self.update_cluster_placement(context, instance)
self.disable_drs_if_needed(instance)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
# 3.Reconfigure the VM and disk
self._resize_vm(context, instance, vm_ref, flavor, image_meta)
if not boot_from_volume and resize_instance:
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
self._resize_disk(instance, vm_ref, vmdk, flavor)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Purge ephemeral and swap disks
self._remove_ephemerals_and_swap(vm_ref)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
# 5. Update ephemerals
self._resize_create_ephemerals_and_swap(vm_ref, instance,
block_device_info)
self._update_instance_progress(context, instance,
step=5,
total_steps=RESIZE_TOTAL_STEPS)
# 6. Attach the volumes (if necessary)
if reattach_volumes:
self._attach_volumes(instance, block_device_info, adapter_type)
self._update_instance_progress(context, instance,
step=6,
total_steps=RESIZE_TOTAL_STEPS)
# 7. Start VM
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
self._update_instance_progress(context, instance,
step=7,
total_steps=RESIZE_TOTAL_STEPS)
def _relocate_vm(self, vm_ref, context, instance, network_info,
image_meta=None):
image_meta = image_meta or instance.image_meta
storage_policy = self._get_storage_policy(instance.flavor)
allowed_ds_types = ds_util.get_allowed_datastore_types(
image_meta.properties.hw_disk_type)
datastore = ds_util.get_datastore(self._session, self._cluster,
self._datastore_regex,
storage_policy,
allowed_ds_types)
dc_info = self.get_datacenter_ref_and_name(datastore.ref)
folder = self._get_project_folder(dc_info, instance.project_id,
'Instances')
client_factory = self._session.vim.client.factory
spec = vm_util.relocate_vm_spec(client_factory,
res_pool=self._root_resource_pool,
folder=folder, datastore=datastore.ref)
# Iterate over the network adapters and update the backing
if network_info:
spec.deviceChange = []
vif_model = image_meta.properties.get('hw_vif_model',
constants.DEFAULT_VIF_MODEL)
hardware_devices = vm_util.get_hardware_devices(self._session,
vm_ref)
vif_infos = vmwarevif.get_vif_info(self._session,
self._cluster,
utils.is_neutron(),
vif_model,
network_info)
for vif_info in vif_infos:
device = vmwarevif.get_network_device(hardware_devices,
vif_info['mac_address'])
if not device:
msg = _("No device with MAC address %s exists on the "
"VM") % vif_info['mac_address']
raise exception.NotFound(msg)
# Update the network device backing
config_spec = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
vm_util.set_net_device_backing(
client_factory, device, vif_info)
config_spec.operation = "edit"
config_spec.device = device
spec.deviceChange.append(config_spec)
vm_util.relocate_vm(self._session, vm_ref, spec=spec)
def live_migration(self, instance, migrate_data, volume_mapping):
defaults = migrate_data.relocate_defaults
relocate_spec_defaults = defaults["relocate_spec"]
disk_move_type = relocate_spec_defaults.get("diskMoveType",
"moveAllDiskBackingsAndDisallowSharing")
def moref(item):
v = relocate_spec_defaults[item]
return vutil.get_moref(v["value"], v["_type"])
client_factory = self._session.vim.client.factory
datastore = moref("datastore")
relocate_spec = vm_util.relocate_vm_spec(client_factory,
res_pool=moref("pool"),
datastore=datastore,
host=moref("host"),
disk_move_type=disk_move_type,
folder=moref("folder"))
service = defaults.get("service")
if service:
credentials_dict = service.pop("credentials")
credentials = client_factory.create(
"ns0:" + credentials_dict.pop("_type"))
for k, v in credentials_dict.items():
setattr(credentials, k, v)
relocate_spec.service = vm_util.create_service_locator(
client_factory,
service["url"],
service["instance_uuid"],
credentials,
service["ssl_thumbprint"],
)
vm_ref = vm_util.get_vm_ref(self._session, instance)
device_config_spec = []
relocate_spec.deviceChange = device_config_spec
disks = []
relocate_spec.disk = disks
netdevices = []
for device in vm_util.get_hardware_devices(self._session, vm_ref):
class_name = device.__class__.__name__
if class_name in vm_util.ALL_SUPPORTED_NETWORK_DEVICES:
netdevices.append(device)
elif class_name == "VirtualDisk":
locator = client_factory.create(
"ns0:VirtualMachineRelocateSpecDiskLocator")
locator.diskId = device.key
target = volume_mapping.get(device.key)
if not target: # Not a volume
locator.datastore = datastore
else:
locator.datastore = target["datastore_ref"]
profile_id = target.get("profile_id")
if profile_id:
profile_spec = client_factory.create(
"ns0:VirtualMachineDefinedProfileSpec")
profile_spec.profileId = profile_id
locator.profile = [profile_spec]
disks.append(locator)
for vif_info in migrate_data.vif_infos:
device = vmwarevif.get_network_device(netdevices,
vif_info["mac_address"])
if not device:
msg = _("No device with MAC address %s exists on the "
"VM") % vif_info["mac_address"]
raise exception.NotFound(msg)
# Update the network device backing
config_spec = client_factory.create("ns0:VirtualDeviceConfigSpec")
vm_util.set_net_device_backing(client_factory, device, vif_info)
config_spec.operation = "edit"
config_spec.device = device
device_config_spec.append(config_spec)
vm_util.relocate_vm(self._session, vm_ref, spec=relocate_spec)
def _detach_volumes(self, instance, block_device_info):
block_devices = driver.block_device_info_get_mapping(block_device_info)
for disk in block_devices:
self._volumeops.detach_volume(disk['connection_info'], instance)
def _attach_volumes(self, instance, block_device_info, adapter_type):
disks = driver.block_device_info_get_mapping(block_device_info)
# make sure the disks are attached by the boot_index order (if any)
for disk in sorted(disks,
key=lambda d: d['boot_index']
if 'boot_index' in d and d['boot_index'] > -1
else len(disks)):
adapter_type = disk.get('disk_bus') or adapter_type
self._volumeops.attach_volume(disk['connection_info'], instance,
adapter_type)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
ctxt = nova_context.get_admin_context()
instances_info = dict(instance_count=len(instances),
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds", instances_info)
for instance in instances:
LOG.info("Automatically hard rebooting", instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance):
"""Return data about the VM instance."""
powerstate_property = 'runtime.powerState'
if not vm_util._VM_VALUE_CACHE:
self.update_cached_instances()
@vm_util.vm_ref_cache_heal_from_instance
def _get_vm_props(session, instance):
vm_ref = vm_util.get_vm_ref(self._session, instance)
vm_props = vm_util._VM_VALUE_CACHE.get(vm_ref.value, {})
if vm_props and powerstate_property in vm_props:
return vm_props
if CONF.vmware.use_property_collector:
LOG.debug("VM instance data was not found on the cache.")
return session._call_method(
vutil, "get_object_properties_dict",
vm_ref, [powerstate_property])
try:
vm_props = _get_vm_props(self._session, instance)
except vexc.ManagedObjectNotFoundException:
raise exception.InstanceNotFound(instance_id=instance.uuid)
return hardware.InstanceInfo(
state=constants.POWER_STATES[vm_props[powerstate_property]])
def _get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.config",
"summary.quickStats",
"summary.runtime"]
vm_props = self._session._call_method(vutil,
"get_object_properties_dict",
vm_ref,
lst_properties)
data = {}
# All of values received are objects. Convert them to dictionaries
for value in vm_props.values():
prop_dict = vim_util.object_to_dict(value, list_depth=1)
data.update(prop_dict)
return data
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
data = self._get_diagnostics(instance)
# Add a namespace to all of the diagnostsics
return {'vmware:' + k: v for k, v in data.items()}
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics."""
data = self._get_diagnostics(instance)
state = data.get('powerState')
if state:
state = power_state.STATE_MAP[constants.POWER_STATES[state]]
uptime = data.get('uptimeSeconds', 0)
config_drive = configdrive.required_by(instance)
diags = objects.Diagnostics(state=state,
driver='vmwareapi',
config_drive=config_drive,
hypervisor_os='esxi',
uptime=uptime)
diags.memory_details = objects.MemoryDiagnostics(
maximum = data.get('memorySizeMB', 0),
used=data.get('guestMemoryUsage', 0))
# TODO(garyk): add in cpu, nic and disk stats
return diags
def _get_vnc_console_connection(self, instance):
"""Return connection info for a vnc console."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
opt_value = self._session._call_method(vutil,
'get_object_property',
vm_ref,
vm_util.VNC_CONFIG_KEY)
if opt_value:
port = int(opt_value.value)
else:
raise exception.ConsoleTypeUnavailable(console_type='vnc')
return {'port': port,
'internal_access_path': None}
@staticmethod
def _get_machine_id_str(network_info):
machine_id_str = ''
for vif in network_info:
# TODO(vish): add support for dns2
# TODO(sateesh): add support for injection of ipv6 configuration
network = vif['network']
ip_v4 = netmask_v4 = gateway_v4 = broadcast_v4 = dns = None
subnets_v4 = [s for s in network['subnets'] if s['version'] == 4]
if len(subnets_v4) > 0:
if len(subnets_v4[0]['ips']) > 0:
ip_v4 = subnets_v4[0]['ips'][0]
if len(subnets_v4[0]['dns']) > 0:
dns = subnets_v4[0]['dns'][0]['address']
netmask_v4 = str(subnets_v4[0].as_netaddr().netmask)
gateway_v4 = subnets_v4[0]['gateway']['address']
broadcast_v4 = str(subnets_v4[0].as_netaddr().broadcast)
interface_str = ";".join([vif['address'],
ip_v4 and ip_v4['address'] or '',
netmask_v4 or '',
gateway_v4 or '',
broadcast_v4 or '',
dns or ''])
machine_id_str = machine_id_str + interface_str + '#'
return machine_id_str
def _set_machine_id(self, client_factory, instance, network_info,
vm_ref=None):
"""Set the machine id of the VM for guest tools to pick up
and reconfigure the network interfaces.
"""
if vm_ref is None:
vm_ref = vm_util.get_vm_ref(self._session, instance)
machine_id_change_spec = vm_util.get_machine_id_change_spec(
client_factory,
self._get_machine_id_str(network_info))
LOG.debug("Reconfiguring VM instance to set the machine id",
instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, machine_id_change_spec)
LOG.debug("Reconfigured VM instance to set the machine id",
instance=instance)
@utils.synchronized('vmware.get_and_set_vnc_port')
def _get_and_set_vnc_config(self, client_factory, instance, vm_ref):
"""Set the vnc configuration of the VM."""
port = vm_util.get_vnc_port(self._session)
vnc_config_spec = vm_util.get_vnc_config_spec(
client_factory, port)
LOG.debug("Reconfiguring VM instance to enable vnc on "
"port - %(port)s", {'port': port},
instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, vnc_config_spec)
LOG.debug("Reconfigured VM instance to enable vnc on "
"port - %(port)s", {'port': port},
instance=instance)
def _get_ds_browser(self, ds_ref):
ds_browser = self._datastore_browser_mapping.get(ds_ref.value)
if not ds_browser:
ds_browser = self._session._call_method(vutil,
"get_object_property",
ds_ref,
"browser")
self._datastore_browser_mapping[ds_ref.value] = ds_browser
return ds_browser
def _create_folder_if_missing(self, ds_name, ds_ref, folder):
"""Create a folder if it does not exist.
Currently there are two folder that are required on the datastore
- base folder - the folder to store cached images
- temp folder - the folder used for snapshot management and
image uploading
This method is aimed to be used for the management of those
folders to ensure that they are created if they are missing.
The ds_util method mkdir will be used to check if the folder
exists. If this throws and exception 'FileAlreadyExistsException'
then the folder already exists on the datastore.
"""
path = ds_obj.DatastorePath(ds_name, folder)
dc_info = self.get_datacenter_ref_and_name(ds_ref)
try:
ds_util.mkdir(self._session, path, dc_info.ref)
LOG.debug("Folder %s created.", path)
except vexc.FileAlreadyExistsException:
# NOTE(hartsocks): if the folder already exists, that
# just means the folder was prepped by another process.
pass
def check_cache_folder(self, ds_name, ds_ref):
"""Check that the cache folder exists."""
self._create_folder_if_missing(ds_name, ds_ref, self._base_folder)
def check_temp_folder(self, ds_name, ds_ref):
"""Check that the temp folder exists."""
self._create_folder_if_missing(ds_name, ds_ref, self._tmp_folder)
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
# Set the machine.id parameter of the instance to | |
<gh_stars>1-10
import copy
from enum import Enum
from direct.gui.OnscreenText import CollisionTraverser, CollisionHandlerQueue, CollisionNode, \
CollisionRay, OnscreenText, TransparencyAttrib, CollisionSphere
from direct.task import Task
from panda3d.core import BitMask32, LPoint3
from direct.gui.DirectButton import DirectButton
from ChessAI.GameController.game_controller import MoveResult
from ChessBoard.chess_figure import Side
from ChessRender.RenderFsmCommon.Camera.camera3d import Camera3D, Camera2D
from ChessRender.RenderFsmCommon.Lights.lights import Lights
from ChessRender.RenderFsmCommon.button_fsm import ButtonFsm
from ChessRender.RenderFsmCommon.screen_states import ScreenState
from ChessRender.RenderFsmCommon.figure_manage import FigureMngr, figure_as_render_object
from Vector2d.Vector2d import Vector2d, Move
HIGHLIGHT = (0, 1, 1, 1)
class Dimension(Enum):
_2D = 1
_3D = 2
class TapMovementManager:
def __init__(self, render_fsm_ref, game_state):
self.cur_clicked = None
self.cur_clicked_pos = None
self.render_fsm_ref = render_fsm_ref
self.game_state = game_state
def click(self, hiSq, pos):
if self.cur_clicked is None or self.game_state.figures[self.cur_clicked] is None:
if self.game_state.figures[hiSq] is None:
return
cur_side = self.game_state.get_cur_turn_side()
if cur_side is None:
return
if cur_side is Side.WHITE and self.game_state.figures[hiSq].getTag("figue_lat").isupper() or \
cur_side is Side.BLACK and self.game_state.figures[hiSq].getTag("figue_lat").islower():
self.cur_clicked = hiSq
self.cur_clicked_pos = pos
return
else:
return
# We have let go of the piece, but we are not on a square
if self.render_fsm_ref.process_set_move_player is not None:
move = Move(self.cur_clicked_pos, Vector2d(hiSq % 8, hiSq // 8))
if self.game_state.figures[self.cur_clicked].getTag("figue_lat") is "p" and hiSq // 8 is 7:
if self.game_state.get_cur_turn_side() is Side.BLACK and self.game_state.check_move_func(move, Side.BLACK) != MoveResult.INCORRECT:
self.game_state.swap_figures(self.cur_clicked, hiSq)
if self.game_state.figures[self.cur_clicked] is not None:
self.game_state.figures[self.cur_clicked].removeNode()
self.cur_clicked = None
self.game_state.fire_pawn_change_panel(Side.BLACK, copy.deepcopy(move))
self.game_state.dragging = False
return
if self.game_state.figures[self.cur_clicked].getTag("figue_lat") is "P" and hiSq // 8 is 0:
if self.game_state.get_cur_turn_side() is Side.WHITE and self.game_state.check_move_func(move, Side.WHITE) != MoveResult.INCORRECT:
self.game_state.swap_figures(self.cur_clicked, hiSq)
if self.game_state.figures[self.cur_clicked] is not None:
self.game_state.figures[self.cur_clicked].removeNode()
self.cur_clicked = None
self.game_state.fire_pawn_change_panel(Side.WHITE, copy.deepcopy(move))
self.game_state.dragging = False
return
self.render_fsm_ref.process_set_move_player(Move(self.cur_clicked_pos, Vector2d(hiSq % 8, hiSq // 8)))
self.cur_clicked = None
class FsmStateGameState(ScreenState):
def __init__(self, render_fsm, whiteside_pack_name, blackside_pack_name, side, exit_link, check_move_func, get_cur_turn_side, on_exit_func=None):
ScreenState.__init__(self)
self.exit_link = "fsm:MainMenu"
self.button_sizes = (-1.5, 1.5, -0.4, 0.8)
self.render_fsm_ref = render_fsm
self.render_fsm_ref.taskMgr.remove('camRotTask')
self.side = side
self.skysphere = None
self.objMngr = FigureMngr(blackside_pack_name, whiteside_pack_name)
self.dimension = Dimension._3D
self.side = Side.WHITE
self.init_sky_sphere()
self.squares = [None for i in range(64)]
self.info_squares = [None for i in range(100)]
self.cubes = [None for i in range(64)]
self.info_cubes = [None for i in range(100)]
self.init_nodes_to_chsess_board()
self.init_nodes_to_board_info()
self.init_info_panel()
self.pawn_change_panel = None
self.swaped_icons = None
self.str_board = "rnbqkbnr" \
"pppppppp" \
"........" \
"........" \
"........" \
"........" \
"PPPPPPPP" \
"RNBQKBNR"
self.figures = [None for i in range(64)]
self.init_nodes_to_figures()
self._camera_set()
#self.camera_p = Camera(base.camera, base.camLens)
base.disableMouse()
# camera debug god mode
#base.oobe()
self.is_in_past = False
self.lights = Lights(base, self.render_fsm_ref.cur_window_width, self.render_fsm_ref.cur_window_height)
self.screen_atributes.buttons["but:Give up"] = ButtonFsm("Give up", (-1, 0, 0.87), None, None, (-1.6, 1.6, -0.3, 0.9), (1.8, 0.8, 0.8), 0.1)
self.screen_atributes.buttons["but:Exit"] = ButtonFsm("Exit", (-1, 0, 0.73), None, None, (-1.6, 1.6, -0.3, 0.9), (1.8, 0.8, 0.8), 0.1)
self.screen_atributes.buttons["but:2D/3D"] = ButtonFsm("2D/3D", (1, 0, 0.8), None, None, None, (1.8, 0.8, 0.8), 0.2)
self.initialize_button_links()
self.init_ray()
render_fsm.accept("a", self.go_to_past) # left-click grabs a piece
render_fsm.accept("d", self.go_to_future) # releasing places it
render_fsm.accept("mouse1", self.grab_piece) # left-click grabs a piece
render_fsm.accept("mouse1-up", self.release_piece) # releasing places it
render_fsm.accept("mouse2", self.middle_click)
self.need_camera_update = False
render_fsm.accept("mouse3", self.right_click)
render_fsm.accept("mouse3-up", self.right_release)
render_fsm.accept("wheel_up", self.wheel_up)
render_fsm.accept("wheel_down", self.wheel_down)
self.dragging = False
self.dragging_figure_position = None
self.hiSq = False
self.text_info = {}
self.scale = 0.07
self.on_exit_func = on_exit_func
self.check_move_func = check_move_func
self.get_cur_turn_side = get_cur_turn_side
self.tap_movement_manager = TapMovementManager(render_fsm, self)
def go_to_past(self):
board = self.render_fsm_ref.get_hist_movement_manager().get_prev()
if self.render_fsm_ref.get_hist_movement_manager().up_to_date():
return
self.is_in_past = True
self.update_board(board, True)
def go_to_future(self):
board = self.render_fsm_ref.get_hist_movement_manager().get_next()
if self.render_fsm_ref.get_hist_movement_manager().up_to_date():
self.is_in_past = False
self.update_board(board, True)
def change_dimension(self):
self.render_fsm_ref.taskMgr.remove('camPosTask')
if self.dimension == Dimension._3D:
self.dimension = Dimension._2D
else:
self.dimension = Dimension._3D
self.update_board(self.str_board)
self._camera_set()
def _camera_set(self):
if self.dimension == Dimension._3D:
angle = Camera3D.WHITE_ANGLE if self.side is Side.WHITE else Camera3D.BLACK_ANGLE
self.camera_p = Camera3D(base.camera, base.camLens, self.render_fsm_ref.cur_window_width, self.render_fsm_ref.cur_window_height, angle)
else:
angle = Camera2D.WHITE_ANGLE if self.side is Side.WHITE else Camera2D.BLACK_ANGLE
self.camera_p = Camera2D(base.camera, base.camLens, self.render_fsm_ref.cur_window_width, self.render_fsm_ref.cur_window_height, angle)
def init_sky_sphere(self):
if self.side is Side.WHITE:
self.skysphere = self.objMngr.load_skybox_white_side()
else:
self.skysphere = self.objMngr.load_skybox_black_side()
self.skysphere.setBin('background', 1)
self.skysphere.setDepthWrite(0)
self.skysphere.reparentTo(render)
self.skysphere.setPos(0, 0, 0)
self.skysphere.setScale(20)
def clear_state(self):
self.render_fsm_ref.is_clearing = True
for figure in self.figures:
if figure is not None:
figure.removeNode()
for square in self.squares:
square.removeNode()
self.skysphere.removeNode()
for square in self.info_squares:
if square is not None:
square.removeNode()
for cube in self.cubes:
cube.removeNode()
for cube in self.info_cubes:
if cube is not None:
cube.removeNode()
self.lights.unset()
for key in self.text_info:
self.text_info[key].destroy()
self.panel.removeNode()
if self.pawn_change_panel is not None:
self.pawn_change_panel.removeNode()
if self.swaped_icons is not None:
for icon in self.swaped_icons:
icon.removeNode()
self.render_fsm_ref.is_clearing = False
self.render_fsm_ref.taskMgr.remove('camRotTask')
self.render_fsm_ref.taskMgr.add(self.render_fsm_ref.camera_m.update_on_task_rotate, 'camRotTask')
render_fsm = self.render_fsm_ref
render_fsm.ignore("a") # left-click grabs a piece
render_fsm.ignore("d") # releasing places it
render_fsm.ignore("mouse1") # left-click grabs a piece
render_fsm.ignore("mouse1-up") # releasing places it
render_fsm.ignore("mouse2")
render_fsm.ignore("mouse3")
render_fsm.ignore("mouse3-up")
render_fsm.ignore("wheel_up")
render_fsm.ignore("wheel_down")
def on_exit(self):
if self.on_exit_func is not None:
self.on_exit_func()
self.clear_state()
def initialize_button_links(self):
self.screen_atributes.buttons["but:Exit"].add_command(self.on_exit)
self.screen_atributes.buttons["but:Exit"].add_link(self.exit_link)
self.screen_atributes.buttons["but:2D/3D"].add_command(self.change_dimension)
self.screen_atributes.buttons["but:Give up"].add_command(self.render_fsm_ref.on_press_giveup_button)
def wheel_up(self):
self.camera_p.update_on_mouse_wheel(1)
def wheel_down(self):
self.camera_p.update_on_mouse_wheel(-1)
def middle_click(self, steps=60):
self.render_fsm_ref.taskMgr.remove('camPosTask')
#self.camera_p.set_default()
if isinstance(self.camera_p, Camera3D):
self.camera_p.prepare_task_goto_player_side_position(self.get_cur_turn_side(), steps)
self.render_fsm_ref.taskMgr.add(self.camera_p.task_goto_player_side_position, 'camPosTask')
elif isinstance(self.camera_p, Camera2D):
self._camera_set()
def right_click(self):
self.render_fsm_ref.taskMgr.remove('camPosTask')
mouse_watcher = base.mouseWatcherNode
self.camera_p.start_rotating(mouse_watcher.getMouseX(), mouse_watcher.getMouseY())
self.need_camera_update = True
def right_release(self):
self.render_fsm_ref.taskMgr.remove('camPosTask')
self.need_camera_update = False
def grab_piece(self):
if self.is_in_past:
return
# If a square is highlighted and it has a piece, set it to dragging
# mode
if self.hiSq is not False and self.figures[self.hiSq]:
self.dragging = self.hiSq
self.dragging_figure_position = Vector2d(self.hiSq % 8, self.hiSq // 8)
if self.tap_movement_manager is not None:
self.tap_movement_manager.click(self.hiSq, self.dragging_figure_position)
self.hiSq = False
return
if self.hiSq is not None and self.tap_movement_manager is not None:
self.tap_movement_manager.click(self.hiSq, Vector2d(self.hiSq % 8, self.hiSq // 8))
def release_piece(self):
# Letting go of a piece. If we are not on a square, return it to its original
# position. Otherwise, swap it with the piece in the new square
# Make sure we really are dragging something
if self.dragging is not False:
# We have let go of the piece, but we are not on a square
if self.dimension is Dimension._3D:
self.figures[self.dragging].setPos(
self.FigurePos(self.dragging))
else:
self.figures[self.dragging].setPos(
self.FigurePos(self.dragging))
if self.render_fsm_ref.process_set_move_player is not None:
move = Move(self.dragging_figure_position, Vector2d(self.hiSq % 8, self.hiSq // 8))
if self.figures[self.dragging].getTag("figue_lat") is "p" and self.hiSq // 8 is 7:
if self.get_cur_turn_side() is Side.BLACK and self.check_move_func(move, Side.BLACK) != MoveResult.INCORRECT:
self.swap_figures(self.dragging, self.hiSq)
if self.figures[self.dragging] is not None:
self.figures[self.dragging].removeNode()
self.dragging = False
self.fire_pawn_change_panel(Side.BLACK, move)
return
if self.figures[self.dragging].getTag("figue_lat") is "P" and self.hiSq // 8 is 0:
if self.get_cur_turn_side() is Side.WHITE and self.check_move_func(move, Side.WHITE) != MoveResult.INCORRECT:
self.swap_figures(self.dragging, self.hiSq)
if self.figures[self.dragging] is not None:
self.figures[self.dragging].removeNode()
self.dragging = False
self.fire_pawn_change_panel(Side.WHITE, move)
return
self.render_fsm_ref.process_set_move_player(Move(self.dragging_figure_position, Vector2d(self.hiSq % 8, self.hiSq // 8)))
# We are no longer dragging anything
self.dragging = False
def fire_pawn_change_panel(self, side, move):
self.render_fsm_ref.ignore("mouse1") # left-click grabs a piece
self.render_fsm_ref.ignore("mouse1-up") # releasing places it
self.render_fsm_ref.ignore("mouse2")
self.render_fsm_ref.ignore("mouse3")
self.render_fsm_ref.ignore("mouse3-up")
self.render_fsm_ref.ignore("wheel_up")
self.render_fsm_ref.ignore("wheel_down")
self.screen_atributes.buttons["but:2D/3D"].command = None
self.init_pawn_change_panel(side, move)
def swap_figures(self, fr, to):
temp = self.figures[fr]
self.figures[fr] = self.figures[to]
self.figures[to] = temp
if self.figures[fr]:
self.figures[fr].setPos(self.FigurePos(fr))
if self.figures[to]:
self.figures[to].setPos(self.FigurePos(to))
def FigurePos(self, key):
if self.dimension == Dimension._3D:
return self.FigurePos3D(key)
else:
return self.FigurePos2D(key)
def mouse_task(self):
mouse_watcher = base.mouseWatcherNode
if mouse_watcher.hasMouse() and self.need_camera_update:
self.camera_p.update_pos(mouse_watcher.getMouseX(), mouse_watcher.getMouseY())
# First, clear the current highlight
if self.hiSq is not False:
#self.squares[self.hiSq].setColor(self.SquareColor(self.hiSq))
self.hiSq = False
if base.mouseWatcherNode.hasMouse():
# get the mouse position
mpos = base.mouseWatcherNode.getMouse()
# Set the position of the ray based on the mouse position
self.pickerRay.setFromLens(base.camNode, mpos.getX(), mpos.getY())
# If we are dragging something, set the position of the object
# to be at the appropriate point over the plane of the board
if self.dragging is not False:
# Gets the point described by pickerRay.getOrigin(), which is relative to
# camera, relative instead to render
nearPoint = base.render.getRelativePoint(
camera, self.pickerRay.getOrigin())
# Same thing with the direction of the ray
nearVec = base.render.getRelativeVector(
base.camera, self.pickerRay.getDirection())
self.figures[self.dragging].setPos(
self.PointAtZ(.5, nearPoint, nearVec))
# Do the actual collision pass (Do it only on the squares for
# efficiency purposes)
self.myTraverser.traverse(self.squareRoot)
if self.myHandler.getNumEntries() > 0:
# if we have hit something, sort the hits so that the closest
# is first, and highlight that node
self.myHandler.sortEntries()
i = int(self.myHandler.getEntry(0).getIntoNode().getTag('square'))
# Set the highlight on the picked square
#self.squares[i].setColor(HIGHLIGHT)
self.hiSq = i
return Task.cont
def init_nodes_to_figures(self, need_to_add_dragging_figure=True, dragging_pos=None):
"""
Creation of figues on the board (visual interpretation)
:param str_board: chess board in string format
:return: figues: array of objects.
"""
for key in range(64):
if self.str_board[key] != ".":
# skip adding dragging figure
if dragging_pos is not None and key == dragging_pos and need_to_add_dragging_figure is False:
key += 1
continue
if self.dimension is Dimension._3D:
self.figures[key] = self.objMngr.load_figure_model(self.str_board[key])
self.figures[key].setPos(self.FigurePos(key))
else:
self.figures[key] = self.objMngr.load_figure_model_2D(self.str_board[key])
if self.side | |
' { "key":"radar-vl", "type":"double", "default":0 },\n'
' { "key":"radar-f", "type":"double", "default":0 } ], '
'"url":"Vocoder", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"Vocoder", "name":"Vocoder", "params":\n'
' [ \n'
' { "key":"dst", "type":"double", "default":0 },\n'
' { "key":"mst", "type":"enum", "default":"BothChannels", '
'"enum":\n'
' [ "BothChannels", "RightOnly" ] },\n'
' { "key":"bands", "type":"int", "default":0 },\n'
' { "key":"track-vl", "type":"double", "default":0 },\n'
' { "key":"noise-vl", "type":"double", "default":0 },\n'
' { "key":"radar-vl", "type":"double", "default":0 },\n'
' { "key":"radar-f", "type":"double", "default":0 } ], '
'"url":"Vocoder", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"SoundFinder", "name":"Sound Finder", "params":\n'
' [ \n'
' { "key":"sil-lev", "type":"double", "default":0 },\n'
' { "key":"sil-dur", "type":"double", "default":0 },\n'
' { "key":"labelbeforedur", "type":"double", "default":0 },\n'
' { "key":"labelafterdur", "type":"double", "default":0 },\n'
' { "key":"finallabel", "type":"int", "default":0 } ], '
'"url":"Sound_Finder", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"SoundFinder", "name":"Sound Finder", "params":\n'
' [ \n'
' { "key":"sil-lev", "type":"double", "default":0 },\n'
' { "key":"sil-dur", "type":"double", "default":0 },\n'
' { "key":"labelbeforedur", "type":"double", "default":0 },\n'
' { "key":"labelafterdur", "type":"double", "default":0 },\n'
' { "key":"finallabel", "type":"int", "default":0 } ], '
'"url":"Sound_Finder", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"SpectralEditMultiTool", \n'
' "name":"Spectral edit multi tool", "params":\n'
' [ ], \n'
' "url":"Spectral_edit_multi_tool", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"SpectralEditMultiTool", \n'
' "name":"Spectral edit multi tool", "params":\n'
' [ ], \n'
' "url":"Spectral_edit_multi_tool", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"SpectralEditParametricEq", \n'
' "name":"Spectral edit parametric EQ", "params":\n'
' [ \n'
' { "key":"control-gain", "type":"double", "default":0 } ], \n'
' "url":"Spectral_edit_parametric_EQ", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"SpectralEditParametricEq", \n'
' "name":"Spectral edit parametric EQ", "params":\n'
' [ \n'
' { "key":"control-gain", "type":"double", "default":0 } ], \n'
' "url":"Spectral_edit_parametric_EQ", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"SpectralEditShelves", \n'
' "name":"Spectral edit shelves", "params":\n'
' [ \n'
' { "key":"control-gain", "type":"double", "default":0 } ], \n'
' "url":"Spectral_edit_shelves", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"SpectralEditShelves", \n'
' "name":"Spectral edit shelves", "params":\n'
' [ \n'
' { "key":"control-gain", "type":"double", "default":0 } ], \n'
' "url":"Spectral_edit_shelves", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"VocalReductionAndIsolation", \n'
' "name":"Vocal Reduction and Isolation", "params":\n'
' [ \n'
' { "key":"action", "type":"enum", "default":"Remove", "enum":\n'
' [ "Remove", "Isolate", "IsolateInvert", "RemoveCenter", '
'"IsolateCenter", \n'
' "IsolateCenterInvert", "RemoveCenter", "Analyze" ] },\n'
' { "key":"strength", "type":"double", "default":0 },\n'
' { "key":"low-transition", "type":"double", "default":0 },\n'
' { "key":"high-transition", "type":"double", "default":0 } ], \n'
' "url":"Vocal_Reduction_and_Isolation", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"VocalReductionAndIsolation", \n'
' "name":"Vocal Reduction and Isolation", "params":\n'
' [ \n'
' { "key":"action", "type":"enum", "default":"RemoveToMono", '
'"enum":\n'
' [ "RemoveToMono", "Remove", "Isolate", "IsolateInvert", \n'
' "RemoveCenterToMono", "RemoveCenter", "IsolateCenter", \n'
' "IsolateCenterInvert", "Analyze" ] },\n'
' { "key":"strength", "type":"double", "default":0 },\n'
' { "key":"low-transition", "type":"double", "default":0 },\n'
' { "key":"high-transition", "type":"double", "default":0 } ], \n'
' "url":"Vocal_Reduction_and_Isolation", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"NotchFilter", "name":"Notch Filter", "params":\n'
' [ \n'
' { "key":"frequency", "type":"double", "default":0 },\n'
' { "key":"q", "type":"double", "default":0 } ], '
'"url":"Notch_Filter", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"NotchFilter", "name":"Notch Filter", "params":\n'
' [ \n'
' { "key":"frequency", "type":"double", "default":0 },\n'
' { "key":"q", "type":"double", "default":0 } ], '
'"url":"Notch_Filter", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"AdjustableFade", \n'
' "name":"Adjustable Fade", "params":\n'
' [ \n'
' { "key":"type", "type":"enum", "default":"Up", "enum":\n'
' [ "Up", "Down", "SCurveUp", "SCurveDown" ] },\n'
' { "key":"curve", "type":"double", "default":0 },\n'
' { "key":"units", "type":"enum", "default":"Percent", "enum":\n'
' [ "Percent", "dB" ] },\n'
' { "key":"gain0", "type":"double", "default":0 },\n'
' { "key":"gain1", "type":"double", "default":0 },\n'
' { "key":"preset", "type":"enum", "default":"None", "enum":\n'
' [ "None", "LinearIn", "LinearOut", "ExponentialIn", '
'"ExponentialOut", "LogarithmicIn", "LogarithmicOut", "RoundedIn", '
'"RoundedOut", "CosineIn", "CosineOut", "SCurveIn", "SCurveOut" ] } ], \n'
' "url":"Adjustable_Fade", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"AdjustableFade", \n'
' "name":"Adjustable Fade", "params":\n'
' [ \n'
' { "key":"type", "type":"enum", "default":"Up", "enum":\n'
' [ "Up", "Down", "SCurveUp", "SCurveDown" ] },\n'
' { "key":"curve", "type":"double", "default":0 },\n'
' { "key":"units", "type":"enum", "default":"Percent", "enum":\n'
' [ "Percent", "dB" ] },\n'
' { "key":"gain0", "type":"double", "default":0 },\n'
' { "key":"gain1", "type":"double", "default":0 },\n'
' { "key":"preset", "type":"enum", "default":"None", "enum":\n'
' [ "None", "LinearIn", "LinearOut", "ExponentialIn", '
'"ExponentialOut", "LogarithmicIn", "LogarithmicOut", "RoundedIn", '
'"RoundedOut", "CosineIn", "CosineOut", "SCurveIn", "SCurveOut" ] } ], \n'
' "url":"Adjustable_Fade", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"CrossfadeClips", \n'
' "name":"Crossfade Clips", "params":\n'
' [ ], \n'
' "url":"Crossfade_Clips", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"CrossfadeClips", \n'
' "name":"Crossfade Clips", "params":\n'
' [ ], \n'
' "url":"Crossfade_Clips", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"CrossfadeTracks", \n'
' "name":"Crossfade Tracks", "params":\n'
' [ \n'
' { "key":"type", "type":"enum", "default":"ConstantGain", '
'"enum":\n'
' [ "ConstantGain", "ConstantPower1", "ConstantPower2", '
'"CustomCurve" ] },\n'
' { "key":"curve", "type":"double", "default":0 },\n'
' { "key":"direction", "type":"enum", "default":"Automatic", '
'"enum":\n'
' [ "Automatic", "OutIn", "InOut" ] } ], \n'
' "url":"Crossfade_Tracks", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"CrossfadeTracks", \n'
' "name":"Crossfade Tracks", "params":\n'
' [ \n'
' { "key":"type", "type":"enum", "default":"ConstantGain", '
'"enum":\n'
' [ "ConstantGain", "ConstantPower1", "ConstantPower2", '
'"CustomCurve" ] },\n'
' { "key":"curve", "type":"double", "default":0 },\n'
' { "key":"direction", "type":"enum", "default":"Automatic", '
'"enum":\n'
' [ "Automatic", "OutIn", "InOut" ] } ], \n'
' "url":"Crossfade_Tracks", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"Delay", "name":"Delay", "params":\n'
' [ \n'
' { "key":"delay-type", "type":"enum", "default":"Regular", '
'"enum":\n'
' [ "Regular", "BouncingBall", \n'
' "ReverseBouncingBall" ] },\n'
' { "key":"dgain", "type":"double", "default":0 },\n'
' { "key":"delay", "type":"double", "default":0 },\n'
' { "key":"pitch-type", "type":"enum", "default":"PitchTempo", '
'"enum":\n'
' [ "PitchTempo", "LQPitchShift" ] },\n'
' { "key":"shift", "type":"double", "default":0 },\n'
' { "key":"number", "type":"int", "default":0 },\n'
' { "key":"constrain", "type":"enum", "default":"Yes", "enum":\n'
' [ "Yes", "No" ] } ], "url":"Delay", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"Delay", "name":"Delay", "params":\n'
' [ \n'
' { "key":"delay-type", "type":"enum", "default":"Regular", '
'"enum":\n'
' [ "Regular", "BouncingBall", \n'
' "ReverseBouncingBall" ] },\n'
' { "key":"dgain", "type":"double", "default":0 },\n'
' { "key":"delay", "type":"double", "default":0 },\n'
' { "key":"pitch-type", "type":"enum", "default":"PitchTempo", '
'"enum":\n'
' [ "PitchTempo", "LQPitchShift" ] },\n'
' { "key":"shift", "type":"double", "default":0 },\n'
' { "key":"number", "type":"int", "default":0 },\n'
' { "key":"constrain", "type":"enum", "default":"Yes", "enum":\n'
' [ "Yes", "No" ] } ], "url":"Delay", \n'
' "tip":"Released under terms of the GNU General Public License '
'version 2" },\n'
' { "id":"Limiter", "name":"Limiter", "params":\n'
' [ \n'
' { "key":"type", "type":"enum", "default":"SoftLimit", "enum":\n'
' [ "SoftLimit", "HardLimit", "SoftClip", "HardClip" ] },\n'
' { "key":"gain-L", "type":"double", "default":0 },\n'
' { "key":"gain-R", "type":"double", "default":0 },\n'
' { "key":"thresh", "type":"double", "default":0 },\n'
' { "key":"hold", "type":"double", "default":0 },\n'
' { "key":"makeup", "type":"enum", "default":"No", "enum":\n'
' [ "No", "Yes" ] } | |
SHORT STRING | LONG STRING =========
"""
if self.peek_token(1, 3) == '""':
string = self.lex_string(char + self.eat_token('""'))
else:
string = self.lex_string(char)
tokens.append(Token(string, TokenKind.STRING, *self.get_line_info()))
elif char == ".":
"""
========= DELIMITER | FLOAT =========
"""
char = self.peek_char()
codepoint = ord(char) if char else -1
token = "."
token_kind = TokenKind.DELIMITER
# "." digits exponent?
if is_dec_digit(codepoint):
token_kind = TokenKind.DEC_FLOAT
token = "0." + self.lex_digit_part(
is_dec_digit, "floating point"
)
# Check for exponent section
peek_token = self.peek_slice(1, 3)
peek_e = peek_token[0:1]
peek_sign_or_digit = peek_token[1:2]
codepoint = ord(peek_sign_or_digit) if peek_sign_or_digit else -1
if peek_e == "e" and (
is_dec_digit(codepoint)
or peek_sign_or_digit == "+"
or peek_sign_or_digit == "-"
):
token += self.lex_exponent_part()
tokens.append(Token(token, token_kind, *self.get_line_info()))
elif char == "0":
"""
========= INTEGER | FLOAT =========
TODO: validate representable integer and float
"""
token = ""
token_kind = TokenKind.DEC_INTEGER
char = self.peek_char()
if char == "x":
# HEXADECIMAL
self.eat_char() # Consume the x
token = self.lex_digit_part(is_hex_digit)
token_kind = TokenKind.HEX_INTEGER
elif char == "b":
# BINARY
self.eat_char() # Consume the b
token = self.lex_digit_part(is_bin_digit)
token_kind = TokenKind.BIN_INTEGER
elif char == "o":
# OCTAL
self.eat_char() # Consume the o
token = self.lex_digit_part(is_oct_digit)
token_kind = TokenKind.OCT_INTEGER
else:
# DECIMAL
token = self.lex_digit_part(
is_dec_digit, raise_if_empty=False
)
token = "0" + token
# Check for potential floating point value
# digits '.' digits? exponent? | digits exponent
peek_token = self.peek_slice(1, 4)
peek0 = peek_token[0:1] # . || e
peek1 = peek_token[1:2] # digit or e || + or - or digit
peek2 = peek_token[2:3] # + or - or digit
codepoint0 = ord(peek1) if peek1 else -1
codepoint1 = ord(peek2) if peek2 else -1
if peek0 == "." and (
is_dec_digit(codepoint0)
or (
peek1 == "e"
and (
is_dec_digit(codepoint1)
or peek2 == "+"
or peek2 == "-"
)
)
):
token += self.lex_fraction_exponent_part()
token_kind = TokenKind.DEC_FLOAT
elif peek0 == "e" and (
is_dec_digit(codepoint0) or peek1 == "+" or peek1 == "-"
):
token += self.lex_exponent_part()
token_kind = TokenKind.DEC_FLOAT
tokens.append(Token(token, token_kind, *self.get_line_info()))
elif 47 < ord(char) < 58:
"""
========= INTEGER | FLOAT =========
TODO: Validate representable integer and float
"""
token = char + self.lex_digit_part(is_dec_digit, raise_if_empty=False)
token_kind = TokenKind.DEC_INTEGER
# Check for potential floating point value
# digits '.' digits? exponent? | digits exponent
peek_token = self.peek_slice(1, 4)
peek0 = peek_token[0:1] # | . | e
peek1 = peek_token[1:2] # | ε | digit | e | (+ | - | digit)
peek2 = peek_token[2:3] # | (+ | - | digit)
codepoint0 = ord(peek1) if peek1 else -1
codepoint1 = ord(peek2) if peek2 else -1
if peek0 == "." and (
is_dec_digit(codepoint0)
or (is_space(peek1) or peek1 == "")
or (
peek1 == "e"
and (
is_dec_digit(codepoint1)
or peek2 == "+"
or peek2 == "-"
)
)
):
token += self.lex_fraction_exponent_part()
token_kind = TokenKind.DEC_FLOAT
elif peek0 == "e" and (
is_dec_digit(codepoint0) or peek1 == "+" or peek1 == "-"
):
token += self.lex_exponent_part()
token_kind = TokenKind.DEC_FLOAT
tokens.append(Token(token, token_kind, *self.get_line_info()))
elif char == "!":
"""
========= OPERATOR =========
"""
token = char
peek_char = self.peek_char()
if peek_char != "=":
raise LexerError(
f"Encountered unexpected character: {repr(char)}",
*self.get_line_info(),
)
else:
token += self.eat_char()
tokens.append(Token(token, TokenKind.OPERATOR, *self.get_line_info()))
elif is_single_char_operator(char):
"""
========= OPERATOR | DELIMITER =========
"""
token = char
token_kind = TokenKind.OPERATOR
peek_char0 = self.peek_char()
peek_char1 = self.peek_char(2)
if (
peek_char0
and peek_char1
and (
(char == "/" and peek_char0 == "/" and peek_char1 == "=")
or (char == ">" and peek_char0 == ">" and peek_char1 == "=")
or (char == "<" and peek_char0 == "<" and peek_char1 == "=")
or (char == "|" and peek_char0 == "|" and peek_char1 == "=")
)
):
token += self.eat_char() + self.eat_char()
token_kind = TokenKind.DELIMITER
elif peek_char0 and (
(char == ">" and (peek_char0 == "=" or peek_char0 == ">"))
or (char == "<" and (peek_char0 == "=" or peek_char0 == "<"))
or (char == "/" and peek_char0 == "/")
or (char == "|" and peek_char0 == "|")
or (char == "*" and peek_char0 == "*")
):
token += self.eat_char()
elif peek_char0 and (
(char == "-" and peek_char0 == ">")
or (char == "+" and peek_char0 == "=")
or (char == "-" and peek_char0 == "=")
or (char == "*" and peek_char0 == "=")
or (char == "/" and peek_char0 == "=")
or (char == "%" and peek_char0 == "=")
or (char == "&" and peek_char0 == "=")
or (char == "|" and peek_char0 == "=")
or (char == "^" and peek_char0 == "=")
):
token += self.eat_char()
token_kind = TokenKind.DELIMITER
tokens.append(Token(token, token_kind, *self.get_line_info()))
elif is_single_char_delimiter(char):
"""
========= DELIMITER | OPERATOR | INDENTATION =========
"""
token = char
token_kind = TokenKind.DELIMITER
peek_char = self.peek_char()
nested_indentation_num = len(self.indentations)
indentation = self.indentations[-1]
# Check if there is an open bracket.
if char == "(" or char == "[" or char == "{":
self.indentations.append(
Indentation(
open_bracket=char,
start_indentation_count=indentation.indentation_count
)
)
self.is_in_brackets = True
# Check if there is an close bracket.
if char == indentation.close_bracket:
if nested_indentation_num == 2:
self.is_in_brackets = False
if nested_indentation_num > 1:
# If we are in a block and block hasn't been dedented
if indentation.block and (
indentation.indentation_count
> indentation.block.start_indentation_count
):
positive_indent_diff = abs(
indentation.indentation_count
- indentation.block.start_indentation_count
)
for i in range(positive_indent_diff // self.indent_factor):
tokens.append(Token('', TokenKind.DEDENT, *self.get_line_info()))
self.indentations.pop()
# Detecting a top-level block in brackets
if self.is_in_brackets and not indentation.block and char == ':':
offset = 0
is_block = False
# Skip all spaces until we find a newline
while True:
offset += 1
peek_char = self.peek_char(offset)
if is_horizontal_space(peek_char):
continue
elif peek_char == '\n' or peek_char == '\r':
is_block = True
break
else:
break
if is_block:
indentation.block = Block(indentation.indentation_count)
if char == "=" and peek_char == "=":
token += self.eat_char()
token_kind = TokenKind.OPERATOR
elif char == "@" and peek_char == "=":
token += self.eat_char()
tokens.append(Token(token, token_kind, *self.get_line_info()))
elif is_identifier_start(char):
"""
========= IDENTIFIER | OPERATOR | BYTE STRING | IMAGINARY =========
========= PREFIXED STRING | INDENTATION | KEYWORD =========
TODO: is_identifier_start must check for ASCII first
"""
line_info = self.get_line_info()
line_info_before_identifier_lexing = (line_info[0], line_info[1] - 1)
token = char
peek_token = self.peek_slice(0, 3)
two_letter_prefix = peek_token[:2]
two_letter_prefix_delim = peek_token[2:3]
one_letter_prefix = peek_token[:1]
one_letter_prefix_delim = peek_token[1:2]
if (two_letter_prefix == "rb" or two_letter_prefix == "rf") and (
two_letter_prefix_delim == '"' or two_letter_prefix_delim == "'"
):
# TWO LETTER STRING PREFIX
peek_triple_quote_delimiter = self.peek_token(2, 5)
token, token_kind = self.lex_prefixed_string(
two_letter_prefix,
peek_triple_quote_delimiter,
two_letter_prefix == "rb",
)
elif (
one_letter_prefix == "f"
or one_letter_prefix == "b"
or one_letter_prefix == "r"
or one_letter_prefix == "u"
) and (
one_letter_prefix_delim == "'" or one_letter_prefix_delim == '"'
):
# ONE LETTER STRING PREFIX
peek_triple_quote_delimiter = self.peek_token(1, 4)
token, token_kind = self.lex_prefixed_string(
one_letter_prefix,
peek_triple_quote_delimiter,
one_letter_prefix == "b",
)
else:
# IDENTIFIER
next_char = self.peek_char()
prev_char = self.peek_char(-1)
prev_codepoint = ord(prev_char) if prev_char else -1
while next_char and is_identifier_continuation(next_char):
token += self.eat_char()
# Peek at the next character in code.
next_char = self.peek_char()
token_kind = (
TokenKind.KEYWORD
if is_keyword(token)
else TokenKind.IDENTIFIER
)
# OPERATOR
# If this is a coefficient expression like 2_000fahr or (0b100)num,
# insert a `*` operator between the operands.
if (
prev_char
and not is_space(prev_char)
and (is_hex_digit(prev_codepoint) or prev_char == ")")
):
kind, prev_token = tokens[-1].kind, tokens[-1].data
# Bin, Oct and Hex integer literals are not allowed to be used in
# coefficient literal
if (
kind == TokenKind.BIN_INTEGER
or kind == TokenKind.OCT_INTEGER
or kind == TokenKind.HEX_INTEGER
):
raise LexerError(
f"Encountered invalid coefficient literal: "
f"{repr(self.get_numeric_prefix(kind)+ prev_token + token)}",
*self.get_line_info(),
)
if token == "im": # Mutate previous token
prev_token = tokens.pop()
token = prev_token.data
token_kind = (
TokenKind.DEC_INTEGER_IMAG
if prev_token.kind == TokenKind.DEC_INTEGER
else TokenKind.DEC_FLOAT_IMAG
)
else:
tokens.append(
Token(
"*",
TokenKind.OPERATOR,
*line_info_before_identifier_lexing,
)
)
tokens.append(Token(token, token_kind, *self.get_line_info()))
else:
raise LexerError(
f"Encountered unexpected character: {repr(char)}",
*self.get_line_info(),
)
# Consume the next character in code.
char = self.eat_char()
# Checking possible dedents at the end of code
prev_indent = self.indentations[-1].indentation_count
if prev_indent | |
<filename>groupy/groupy.py
# This file is just to mess around with creating Groups in python
import re
class Gel:
"""
A group element.
The Gel object consists of a name and a permutation. The permutation is some bijection from the set of numbers from
1 to n onto itself in the form of a tuple. Since any group element of a finite group can be thought of a member of
the symmetric group, every group element can be realised as one of these tuples.
Parameters
----------
:type g: object,
The object you wish to assign a group meaning to.
:type perm: tuple,
if tuple : The mathematical definition of the group element. If it is of length n, it must contain the numbers
1 to n.
Examples
--------
>>> g = Gel('g', (2,1,4,3))
>>> identity = Gel('e', ())
"""
def __init__(self, g, perm):
def valid_tuple(tup):
"""Checks the tuple adheres to the Gel standard"""
return set(tup) == set(range(1, len(tup)+1))
self.g = g
if valid_tuple(perm):
self.perm = perm
else:
raise Exception("Bad permutation. A permutation tuple of length n must contain all numbers from 1 to n")
def __str__(self):
return str(self.g)
def __repr__(self):
return self.__str__()
def __hash__(self):
return hash(self.g)
def __eq__(self, other):
"""
Boolean equality
A bijection from 1 to n can be thought of as a bijection from the natural numbers onto itself where m -> m
for any m > n. In this way, this equality robustly allows us to compare group elements even if they do not
strictly have the same tuple.
:param other: another Gel object
:return: True/False
"""
if type(other) == Gel:
p = True
for i in range(max(len(self.perm), len(other.perm))):
p = p & (self.gcycle(i + 1) == other.gcycle(i + 1))
return p
else:
return False
def __ne__(self, other):
if type(other) == Gel:
return not self == other
else:
return True
def gcycle(self, n):
# Find the image of the number n under the group element self
if n in self.perm:
return self.perm[n-1]
else:
return n
def gmul(self, other):
# Group multiplication between the permutations of self*other
r = []
for i in range(max(len(self.perm), len(other.perm))):
r.append(self.gcycle(other.gcycle(i + 1)))
return tuple(r)
def __mul__(self, other):
# Create group object (element/coset/etc) through multiplication
if type(other) == Gel:
return Gel(str(self) + str(other), self.gmul(other))
else:
return other.__rmul__(self)
def inv(self):
# Calculate the inverse of a group element
r = []
for i, _ in enumerate(self.perm):
r.append(self.perm.index(i+1)+1)
if len(re.sub(r'[^a-zA-Z]', '', str(self))) == 1:
# Adds inverse symbol to the end of string
return Gel(str(self) + u"\u207B\u00B9", tuple(r))
else:
# Puts complex name in brackets first to avoid confusion
return Gel("(" + str(self) + u")\u207B\u00B9", tuple(r))
def __pow__(self, n):
if type(n) != int:
raise Exception('You can only perform power operations with integers on Gel types.')
if n == 1:
return self
if n == 0:
return Gel('e', ())
if n < 0:
return Gel(f'{self}**{n}', (self.inv()*self.inv().__pow__(-n)).perm)
else:
return Gel(f'{self}**{n}', (self*self.__pow__(n-1)).perm)
def cycle(self):
# Prints cycle notation of group element
s = "(1 "
b = [1]
i = 1
while len(b) < len(self.perm):
i = self.gcycle(i)
if i not in b:
s = s + "{} ".format(i)
b.append(i)
else:
s = s[0:-1] + ")("
i = min(set(self.perm)-set(b))
s = s + "{} ".format(i)
b.append(i)
s = re.sub(r'\(\d+\)', '', s[:-1]+")")
if s == "":
return "e"
else:
return s
def order(self):
# Calculates order of group element
i = 1
while self**i != Gel('e', ()):
i += 1
return i
def is_identity(self):
p = True
for i in range(len(self.perm)):
p = p & (self.gcycle(i) == i)
return p
class GroupLike:
"""
A class that resembles a group but need not meet all of the group axioms.
A GroupLike class consists of a name and a list of group elements. The elements of a GroupLike need not abide by
the axioms of a Group and can be used as a set of group elements (e.g. a coset).
Parameters
-----------
name : str,
The name of your GroupLike object.
elements : list,
List of group elements.
Examples
--------
>>> G = GroupLike('V_4', [Gel('e', () ), Gel('V', (2,1) ), Gel('H', (1,2,4,3) ), Gel('R', (2,1,4,3) )])
"""
def __init__(self, name, elements):
for g in elements:
if type(g) != Gel:
raise Exception('A GroupLike type can only contain Gel type elements.')
self.elements = elements
if type(name) != str:
raise Exception('The name of a GroupLike must be a string, ya idiot.')
self.name = name
self._gelnamedict = {}
self._gelpermdict = {}
for g in self.elements:
self._gelpermdict[g.perm] = g
self._gelnamedict[g.name] = g
def __str__(self):
return self.name
def __repr__(self):
s = self.name + " = {"
for g in self:
s = s + str(g) + ", "
s = s[:-2] + "}"
return s
def __iter__(self):
return self.elements.__iter__()
def __getitem__(self, item):
if type(item) == int:
return self.elements[item]
elif type(item) == str:
return self._gelnamedict[item]
def __len__(self):
return len(self.elements)
def __contains__(self, item):
return item in self.elements
def __eq__(self, other):
if (type(other) != GroupLike) & (type(other) != Group):
return False
else:
p = True
for g in self:
p = p & (g in other)
for h in other:
p = p & (h in self)
return p
def __ne__(self, other):
return not self == other
def append(self, g):
if type(g) == Gel:
self.elements.append(g)
self._gelpermdict[g.perm] = g
self._gelnamedict[g.name] = g
else:
raise Exception(f"You may only append Gel class elements to a {type(self)} class.")
def _hase(self):
return Gel('e', ()) in self
def _isclosed(self):
p = True
for g in self:
for h in self:
p = p & (g*h in self)
return p
def _hasinv(self):
p = True
for g in self:
p = p & (g.inv() in self)
return p
def _has_duplicates(self):
p = False
b = []
for g in self:
if g not in b:
b.append(g)
elif g in b:
p = True
return p
def _remove_duplicates(self):
b = []
for g in self:
if g not in b:
b.append(g)
elif g in b:
self.elements.remove(g)
def isGroup(self):
return self._hase() & self._isclosed() & self._hasinv() & (not self._has_duplicates())
def closure(self):
__G = GroupLike(self.name, self.elements.copy()) # Assignment causes weird errors here without copy
__G._remove_duplicates()
e = Gel('e', ())
if len(__G) == 0:
return Group(__G.name, [e])
if e not in __G:
__G.elements.insert(0, e)
for g in __G:
i = 2
while g**i != e:
if g**i not in __G:
__G.elements.append(g**i)
i += 1
for g in __G:
for h in __G:
if g*h not in __G:
__G.elements.append(g*h)
if __G.isGroup():
return __G
else:
return __G.closure()
def _lcoset(self, other):
if type(other) != Gel:
raise TypeError('Left Cosets can only be defined with a Gel and GroupLike')
left = []
for g in self:
left.append(other*g)
return GroupLike(other.name+self.name, left)
def _rcoset(self, other):
if type(other) != Gel:
raise TypeError('Left Cosets can only be defined with a Gel and GroupLike')
right = []
for g in self:
right.append(g*other)
return GroupLike(self.name+other.name, right)
def __rmul__(self, other):
return self._lcoset(other)
def __mul__(self, other):
return self._rcoset(other)
def isAbelian(self):
p = True
for g in self:
for h in self:
p = p & (g*h == h*g)
return p
# TODO: Find centre of group
class Group(GroupLike):
"""
A class that resembles a mathematical group.
A Group class consists of a name list of group elements. The elements of a Group needs to abide by the axioms of a
Group and will force the closure of the group if force_group is True (which is the default). The main structure is
inherited from GroupLike but certain methods have been added/altered.
Parameters
-----------
name : str,
The name of your Group object.
elements : list,
List of group elements.
force_group : bool, default=True,
Boolean representing whether or not the group should be automatically completed so that it abides by the group
axioms. True will construct the closure of the list of elements, false will allow the user to attempt to submit
a group and raise an exception if it does not meet | |
= property(__class.value, __class.set, None, None)
_ElementMap.update({
__Reference.name() : __Reference,
__Para.name() : __Para,
__Include.name() : __Include,
__List.name() : __List,
__Table.name() : __Table,
__Term.name() : __Term
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_29 = CTD_ANON_29
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_30 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 318, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Caption uses Python identifier Caption
__Caption = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Caption'), 'Caption', '__AbsentNamespace0_CTD_ANON_30_Caption', False, pyxb.utils.utility.Location('ClaML.xsd', 328, 4), )
Caption = property(__Caption.value, __Caption.set, None, None)
# Element THead uses Python identifier THead
__THead = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'THead'), 'THead', '__AbsentNamespace0_CTD_ANON_30_THead', False, pyxb.utils.utility.Location('ClaML.xsd', 334, 4), )
THead = property(__THead.value, __THead.set, None, None)
# Element TBody uses Python identifier TBody
__TBody = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'TBody'), 'TBody', '__AbsentNamespace0_CTD_ANON_30_TBody', False, pyxb.utils.utility.Location('ClaML.xsd', 342, 4), )
TBody = property(__TBody.value, __TBody.set, None, None)
# Element TFoot uses Python identifier TFoot
__TFoot = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'TFoot'), 'TFoot', '__AbsentNamespace0_CTD_ANON_30_TFoot', False, pyxb.utils.utility.Location('ClaML.xsd', 350, 4), )
TFoot = property(__TFoot.value, __TFoot.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_30_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 325, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 325, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
__Caption.name() : __Caption,
__THead.name() : __THead,
__TBody.name() : __TBody,
__TFoot.name() : __TFoot
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_30 = CTD_ANON_30
# Complex type [anonymous] with content type MIXED
class CTD_ANON_31 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 329, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Reference uses Python identifier Reference
__Reference = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Reference'), 'Reference', '__AbsentNamespace0_CTD_ANON_31_Reference', True, pyxb.utils.utility.Location('ClaML.xsd', 254, 4), )
Reference = property(__Reference.value, __Reference.set, None, None)
# Element Term uses Python identifier Term
__Term = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Term'), 'Term', '__AbsentNamespace0_CTD_ANON_31_Term', True, pyxb.utils.utility.Location('ClaML.xsd', 380, 4), )
Term = property(__Term.value, __Term.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_31_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 331, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 331, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
__Reference.name() : __Reference,
__Term.name() : __Term
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_31 = CTD_ANON_31
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_32 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 335, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Row uses Python identifier Row
__Row = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Row'), 'Row', '__AbsentNamespace0_CTD_ANON_32_Row', True, pyxb.utils.utility.Location('ClaML.xsd', 358, 4), )
Row = property(__Row.value, __Row.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_32_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 339, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 339, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
__Row.name() : __Row
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_32 = CTD_ANON_32
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_33 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 343, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Row uses Python identifier Row
__Row = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Row'), 'Row', '__AbsentNamespace0_CTD_ANON_33_Row', True, pyxb.utils.utility.Location('ClaML.xsd', 358, 4), )
Row = property(__Row.value, __Row.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_33_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 347, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 347, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
__Row.name() : __Row
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_33 = CTD_ANON_33
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_34 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 351, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Row uses Python identifier Row
__Row = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Row'), 'Row', '__AbsentNamespace0_CTD_ANON_34_Row', True, pyxb.utils.utility.Location('ClaML.xsd', 358, 4), )
Row = property(__Row.value, __Row.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_34_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 355, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 355, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
__Row.name() : __Row
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_34 = CTD_ANON_34
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_35 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 359, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Cell uses Python identifier Cell
__Cell = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Cell'), 'Cell', '__AbsentNamespace0_CTD_ANON_35_Cell', True, pyxb.utils.utility.Location('ClaML.xsd', 366, 4), )
Cell = property(__Cell.value, __Cell.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_35_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 363, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 363, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
__Cell.name() : __Cell
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_35 = CTD_ANON_35
# Complex type [anonymous] with content type MIXED
class CTD_ANON_36 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 367, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Reference uses Python identifier Reference
__Reference = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Reference'), 'Reference', '__AbsentNamespace0_CTD_ANON_36_Reference', True, pyxb.utils.utility.Location('ClaML.xsd', 254, 4), )
Reference = property(__Reference.value, __Reference.set, None, None)
# Element Para uses Python identifier Para
__Para = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Para'), 'Para', '__AbsentNamespace0_CTD_ANON_36_Para', True, pyxb.utils.utility.Location('ClaML.xsd', 264, 4), )
Para = property(__Para.value, __Para.set, None, None)
# Element Include uses Python identifier Include
__Include = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Include'), 'Include', '__AbsentNamespace0_CTD_ANON_36_Include', True, pyxb.utils.utility.Location('ClaML.xsd', 285, 4), )
Include = property(__Include.value, __Include.set, None, None)
# Element List uses Python identifier List
__List = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'List'), 'List', '__AbsentNamespace0_CTD_ANON_36_List', True, pyxb.utils.utility.Location('ClaML.xsd', 297, 4), )
List = property(__List.value, __List.set, None, None)
# Element Table uses Python identifier Table
__Table = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Table'), 'Table', '__AbsentNamespace0_CTD_ANON_36_Table', True, pyxb.utils.utility.Location('ClaML.xsd', 317, 4), )
Table = property(__Table.value, __Table.set, None, None)
# Element Term uses Python identifier Term
__Term = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Term'), 'Term', '__AbsentNamespace0_CTD_ANON_36_Term', True, pyxb.utils.utility.Location('ClaML.xsd', 380, 4), )
Term = property(__Term.value, __Term.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_36_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 375, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 375, 12)
class_ = property(__class.value, __class.set, None, None)
# Attribute rowspan uses Python identifier rowspan
__rowspan = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rowspan'), 'rowspan', '__AbsentNamespace0_CTD_ANON_36_rowspan', pyxb.binding.datatypes.anySimpleType)
__rowspan._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 376, 12)
__rowspan._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 376, 12)
rowspan = property(__rowspan.value, __rowspan.set, None, None)
# Attribute colspan uses Python identifier colspan
__colspan = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'colspan'), 'colspan', '__AbsentNamespace0_CTD_ANON_36_colspan', pyxb.binding.datatypes.anySimpleType)
__colspan._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 377, 12)
__colspan._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 377, 12)
colspan = property(__colspan.value, __colspan.set, None, None)
_ElementMap.update({
__Reference.name() : __Reference,
__Para.name() : __Para,
__Include.name() : __Include,
__List.name() : __List,
__Table.name() : __Table,
__Term.name() : __Term
})
_AttributeMap.update({
__class.name() : __class,
__rowspan.name() : __rowspan,
__colspan.name() : __colspan
})
_module_typeBindings.CTD_ANON_36 = CTD_ANON_36
# Complex type [anonymous] with content type MIXED
class CTD_ANON_37 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 381, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_37_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 382, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 382, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_37 = CTD_ANON_37
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_38 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 115, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Display uses Python identifier Display
__Display = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Display'), 'Display', '__AbsentNamespace0_CTD_ANON_38_Display', True, pyxb.utils.utility.Location('ClaML.xsd', 136, 4), )
Display = property(__Display.value, __Display.set, None, None)
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__AbsentNamespace0_CTD_ANON_38_name', pyxb.binding.datatypes.ID, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', | |
import ctypes
import platform
import time
from time import sleep
import numpy as np
import pybullet as p
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
from igibson.render.mesh_renderer.mesh_renderer_vr import MeshRendererVR, VrSettings
from igibson.render.viewer import ViewerVR
from igibson.simulator import Simulator
from igibson.utils.vr_utils import VR_CONTROLLERS, VR_DEVICES, VrData, calc_offset, calc_z_rot_from_right
class SimulatorVR(Simulator):
"""
Simulator class is a wrapper of physics simulator (pybullet) and MeshRenderer, it loads objects into
both pybullet and also MeshRenderer and syncs the pose of objects and robot parts.
"""
def __init__(
self,
gravity=9.8,
physics_timestep=1 / 120.0,
render_timestep=1 / 30.0,
solver_iterations=100,
mode="vr",
image_width=128,
image_height=128,
vertical_fov=90,
device_idx=0,
rendering_settings=MeshRendererSettings(),
vr_settings=VrSettings(),
use_pb_gui=False,
):
"""
:param gravity: gravity on z direction.
:param physics_timestep: timestep of physical simulation, p.stepSimulation()
:param render_timestep: timestep of rendering, and Simulator.step() function
:param solver_iterations: number of solver iterations to feed into pybullet, can be reduced to increase speed.
pybullet default value is 50.
:param use_variable_step_num: whether to use a fixed (1) or variable physics step number
:param mode: choose mode from headless, headless_tensor, gui_interactive, gui_non_interactive
:param image_width: width of the camera image
:param image_height: height of the camera image
:param vertical_fov: vertical field of view of the camera image in degrees
:param device_idx: GPU device index to run rendering on
:param rendering_settings: settings to use for mesh renderer
:param vr_settings: settings to use for VR in simulator and MeshRendererVR
:param use_pb_gui: concurrently display the interactive pybullet gui (for debugging)
"""
if platform.system() == "Windows":
# By default, windows does not provide ms level timing accuracy
winmm = ctypes.WinDLL("winmm") # type: ignore
winmm.timeBeginPeriod(1)
# Blend highlight for VR overlay
rendering_settings.blend_highlight = True
# Starting position for the VR (default set to None if no starting position is specified by the user)
self.vr_settings = vr_settings
self.vr_overlay_initialized = False
self.vr_start_pos = None
self.max_haptic_duration = 4000
# Duration of a vsync frame - assumes 90Hz refresh rate
self.vsync_frame_dur = 11.11e-3
# Timing variables for functions called outside of step() that also take up frame time
self.frame_end_time = None
# Variables for data saving and replay in VR
self.last_physics_timestep = -1
self.last_render_timestep = -1
self.last_physics_step_num = -1
self.last_frame_dur = -1
super().__init__(
gravity,
physics_timestep,
render_timestep,
solver_iterations,
mode,
image_width,
image_height,
vertical_fov,
device_idx,
rendering_settings,
use_pb_gui,
)
# Get expected number of vsync frames per iGibson frame Note: currently assumes a 90Hz VR system
self.vsync_frame_num = int(round(self.render_timestep / self.vsync_frame_dur))
# Total amount of time we want non-blocking actions to take each frame
# Leave a small amount of time before the last vsync, just in case we overrun
self.non_block_frame_time = (self.vsync_frame_num - 1) * self.vsync_frame_dur + (
5e-3 if self.vr_settings.curr_device == "OCULUS" else 10e-3
)
def initialize_renderer(self):
self.visual_objects = {}
self.renderer = MeshRendererVR(
rendering_settings=self.rendering_settings, vr_settings=self.vr_settings, simulator=self
)
self.viewer = ViewerVR(
self.vr_settings.use_companion_window,
frame_save_path=self.vr_settings.frame_save_path,
renderer=self.renderer,
)
def add_vr_overlay_text(
self,
text_data="PLACEHOLDER: PLEASE REPLACE!",
font_name="OpenSans",
font_style="Regular",
font_size=48,
color=[0, 0, 0],
pos=[20, 80],
size=[70, 80],
scale=1.0,
background_color=[1, 1, 1, 0.8],
):
"""
Creates Text for use in a VR overlay. Returns the text object to the caller,
so various settings can be changed - eg. text content, position, scale, etc.
:param text_data: starting text to display (can be changed at a later time by set_text)
:param font_name: name of font to render - same as font folder in iGibson assets
:param font_style: style of font - one of [regular, italic, bold]
:param font_size: size of font to render
:param color: [r, g, b] color
:param pos: [x, y] position of top-left corner of text box, in percentage across screen
:param size: [w, h] size of text box in percentage across screen-space axes
:param scale: scale factor for resizing text
:param background_color: color of the background in form [r, g, b, a] - default is semi-transparent white so text is easy to read in VR
"""
if not self.vr_overlay_initialized:
# This function automatically creates a VR text overlay the first time text is added
self.renderer.gen_vr_hud()
self.vr_overlay_initialized = True
# Note: For pos/size - (0,0) is bottom-left and (100, 100) is top-right
# Calculate pixel positions for text
pixel_pos = [int(pos[0] / 100.0 * self.renderer.width), int(pos[1] / 100.0 * self.renderer.height)]
pixel_size = [int(size[0] / 100.0 * self.renderer.width), int(size[1] / 100.0 * self.renderer.height)]
return self.renderer.add_text(
text_data=text_data,
font_name=font_name,
font_style=font_style,
font_size=font_size,
color=color,
pixel_pos=pixel_pos,
pixel_size=pixel_size,
scale=scale,
background_color=background_color,
render_to_tex=True,
)
def add_overlay_image(self, image_fpath, width=1, pos=[0, 0, -1]):
"""
Add an image with a given file path to the VR overlay. This image will be displayed
in addition to any text that the users wishes to display. This function returns a handle
to the VrStaticImageOverlay, so the user can display/hide it at will.
"""
return self.renderer.gen_static_overlay(image_fpath, width=width, pos=pos)
def set_hud_show_state(self, show_state):
"""
Shows/hides the main VR HUD.
:param show_state: whether to show HUD or not
"""
self.renderer.vr_hud.set_overlay_show_state(show_state)
def get_hud_show_state(self):
"""
Returns the show state of the main VR HUD.
"""
return self.renderer.vr_hud.get_overlay_show_state()
def step_vr_system(self):
# Update VR compositor and VR data
vr_system_start = time.perf_counter()
# First sync VR compositor - this is where Oculus blocks (as opposed to Vive, which blocks in update_vr_data)
self.sync_vr_compositor()
# Note: this should only be called once per frame - use get_vr_events to read the event data list in
# subsequent read operations
self.poll_vr_events()
# This is necessary to fix the eye tracking value for the current frame, since it is multi-threaded
self.fix_eye_tracking_value()
# Move user to their starting location
self.perform_vr_start_pos_move()
# Update VR data and wait until 3ms before the next vsync
self.renderer.update_vr_data()
# Update VR system data - eg. offsets, haptics, etc.
self.vr_system_update()
vr_system_dur = time.perf_counter() - vr_system_start
return vr_system_dur
def step(self, print_stats=False):
"""
Step the simulation when using VR. Order of function calls:
1) Simulate physics
2) Render frame
3) Submit rendered frame to VR compositor
4) Update VR data for use in the next frame
"""
assert (
self.scene is not None
), "A scene must be imported before running the simulator. Use EmptyScene for an empty scene."
# Calculate time outside of step
outside_step_dur = 0
if self.frame_end_time is not None:
outside_step_dur = time.perf_counter() - self.frame_end_time
# Simulate Physics in PyBullet
physics_start_time = time.perf_counter()
for _ in range(self.physics_timestep_num):
p.stepSimulation()
physics_dur = time.perf_counter() - physics_start_time
non_physics_start_time = time.perf_counter()
self._non_physics_step()
non_physics_dur = time.perf_counter() - non_physics_start_time
# Sync PyBullet bodies to renderer and then render to Viewer
render_start_time = time.perf_counter()
self.sync()
render_dur = time.perf_counter() - render_start_time
# Sleep until last possible Vsync
pre_sleep_dur = outside_step_dur + physics_dur + non_physics_dur + render_dur
sleep_start_time = time.perf_counter()
if pre_sleep_dur < self.non_block_frame_time:
sleep(self.non_block_frame_time - pre_sleep_dur)
sleep_dur = time.perf_counter() - sleep_start_time
vr_system_dur = self.step_vr_system()
# Calculate final frame duration
# Make sure it is non-zero for FPS calculation (set to max of 1000 if so)
frame_dur = max(1e-3, pre_sleep_dur + sleep_dur + vr_system_dur)
# Set variables for data saving and replay
self.last_physics_timestep = physics_dur
self.last_render_timestep = render_dur
self.last_frame_dur = frame_dur
if print_stats:
print("Frame number {} statistics (ms)".format(self.frame_count))
print("Total out-of-step duration: {}".format(outside_step_dur * 1000))
print("Total physics duration: {}".format(physics_dur * 1000))
print("Total non-physics duration: {}".format(non_physics_dur * 1000))
print("Total render duration: {}".format(render_dur * 1000))
print("Total sleep duration: {}".format(sleep_dur * 1000))
print("Total VR system duration: {}".format(vr_system_dur * 1000))
print("Total frame duration: {} and fps: {}".format(frame_dur * 1000, 1 / frame_dur))
print(
"Realtime factor: {}".format(round((self.physics_timestep_num * self.physics_timestep) / frame_dur, 3))
)
print("-------------------------")
self.frame_count += 1
self.frame_end_time = time.perf_counter()
def vr_system_update(self):
"""
Updates the VR system for a single frame. This includes moving the vr offset,
adjusting the user's height based on button input, and triggering haptics.
"""
# Update VR offset using appropriate controller
if self.vr_settings.touchpad_movement:
vr_offset_device = "{}_controller".format(self.vr_settings.movement_controller)
is_valid, _, _ = self.get_data_for_vr_device(vr_offset_device)
if is_valid:
_, touch_x, touch_y = self.get_button_data_for_controller(vr_offset_device)
new_offset = calc_offset(
self, touch_x, touch_y, self.vr_settings.movement_speed, self.vr_settings.relative_movement_device
)
self.set_vr_offset(new_offset)
# Adjust user height based on y-axis (vertical direction) touchpad input
vr_height_device = "left_controller" if self.vr_settings.movement_controller == "right" else "right_controller"
is_height_valid, _, _ = self.get_data_for_vr_device(vr_height_device)
if is_height_valid:
curr_offset = self.get_vr_offset()
hmd_height = self.get_hmd_world_pos()[2]
_, _, height_y = self.get_button_data_for_controller(vr_height_device)
if height_y < -0.7:
vr_z_offset = -0.01
if hmd_height + curr_offset[2] + vr_z_offset >= self.vr_settings.height_bounds[0]:
self.set_vr_offset([curr_offset[0], curr_offset[1], curr_offset[2] + vr_z_offset])
elif | |
# Anton's Code
# Teacher Notes - OCR, Optical Character Recognition, numpy, imagine => matrix, stackoverflow.com/questions/52633697/selenium-python-how-to-capture-network-traffics-response#make sure you're not downloading at a high rate or risk getting blocked?
# Note* I know the offline section is fairly redundant but my computer handles it quickly so I'll probably leave it
# Make a list of top stocks not just one, also be able to pick bonds n shit
import collections
import csv
import time
from itertools import zip_longest
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
def Convert(string):
li = list(string.split("/"))
return li
print("Note: senator data will only show up if they file paperless")
print('Example:')
print('Year = 2020')
print('Month = 09')
print('Big/Small = Small')
print('Corrupt_Senators = <NAME>/Loeffler, Kelly/Inhofe, <NAME>.')
print('Stocks/Bonds = Stock/Stock Option/Corporate Bond/Other Securities')
print('File_name = Stock Data.csv')
print('Stock Name = DuPont de Nemours, Inc.')
print('Pages = 7')
print(" ")
Year = str(input("Year = ") or "2020")
Month = str(input("Month = ") or "01")
Big_Small = str(input("Big/Small = ") or "Small")
Corrupt_Senators = Convert(str(input("Corrupt Senators = ") or "<NAME>/Loeffler, Kelly/Inhofe, <NAME>."))
StocksorBonds = Convert(str(input("Stocks/Bonds = ") or "Stock/Stock Option/Corporate Bond/Other Securities"))
File_name = str(input("File Name = ") or "Stock Data.csv")
Stock_Name = str(input("Stock Name = ") or "DuPont de Nemours, Inc.")
Pages = int(input("Pages = ") or "1")
if len(StocksorBonds) < 3:
for l in range(3 - len(StocksorBonds)):
StocksorBonds.append(StocksorBonds[-1])
# START//START//START//START//START//START//START//START//START//START//START//START//START//START//START//START//START//START//
start_time = time.time()
while True:
amendeddict1 = {}
amendeddict2 = {}
amendeddict3 = {}
dic1 = {}
testlis = []
link_list2 = {}
noduplinks = []
PDF_links = []
link_list = []
whitelist_nums = set('1234567890')
whitelist_date = set('1234567890/')
whitelist_letters = set('abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ')
driver = webdriver.Chrome() # open automated chrome
wait = WebDriverWait(driver, 5)
driver.implicitly_wait(10)
r = driver.get('https://efdsearch.senate.gov/search/') # open site
driver.find_element_by_id("agree_statement").click() # Check ethics box
driver.find_element_by_id("filerTypes").click() # Click senators box
driver.find_element_by_id("reportTypeLabelPtr").click() # Click periodic transactions
element = driver.find_element_by_id("reportTypeLabelPtr") # look at the periodic transactions element
element.send_keys(Keys.ENTER) # Enter = search
searchbox = driver.find_element_by_id("filedReports_filter").click() # This clicks the search box
driver.find_element_by_xpath('//*[@id="filedReports_filter"]/label/input').send_keys(
Year) # This filters data by the year Var
# We have now navigated the web site and filtered results.
for i in range(Pages):
time.sleep(1)
elems = driver.find_elements_by_partial_link_text('Periodic Transaction Report for')
for elem in elems:
# time.sleep(.5)
href = elem.get_attribute('href')
if href is not None:
link_list.append(href)
# We now have every link we need on the current page
element = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="filedReports_next"]'))).click()
# We now click the next button Pages times
# We now have everylink on the site stored in link_list
# (with the year filter and assuming there aren't more than Pages(var) pages)
print(len(link_list), 'links')
for k in range(len(link_list)):
link_list2[link_list[k]] = k
for i in link_list2.keys():
noduplinks.append(i)
print("Obtained link list")
print(len(noduplinks), 'links after removing duplicates')
time.sleep(1)
# for links in link_list:
test002 = {}
for links in noduplinks:
r = driver.get(links)
html = driver.page_source
doc = BeautifulSoup(html)#, features="html5lib")
test001 = doc.find_all("td")
Header = doc.find("h1")
Senator = doc.find("h2")
Senator = str(Senator)
start = Senator.find('(')
end = Senator.find(')')
if start != -1 and end != -1:
Senator = Senator[start + 1:end]
test001.insert(0, Senator)
test001.insert(0, Header)
test001.insert(1, links)
for i in range(len(test001)):
test001[i] = str(test001[i])
if len(test001) > 3:
dict_key = len(test002)
test002[dict_key] = test001.copy()
boost = 0
for length in range(int((len(test001) - 3) / 9)):
test002[dict_key][3 + boost] = ''.join(
filter(whitelist_nums.__contains__, test002[dict_key][3 + boost]))
test002[dict_key][4 + boost] = ''.join(
filter(whitelist_date.__contains__, test002[dict_key][4 + boost]))
test002[dict_key][4 + boost] = test002[dict_key][4 + boost][:-1]# Delete this line to get a mm/dd/yyyy/ format instead of ##### in the csv
test002[dict_key][5 + boost] = test002[dict_key][5 + boost][4:-5]
hyperlink = test002[dict_key][6 + boost].find('--')
if hyperlink > 0:
test002[dict_key][6 + boost] = 'No Link'
else:
test002[dict_key][6 + boost] = test002[dict_key][6 + boost][6:-7]
test002[dict_key][7 + boost] = BeautifulSoup(test002[dict_key][7 + boost]).text[86:]
test002[dict_key][8 + boost] = test002[dict_key][8 + boost][4:-5]
test002[dict_key][9 + boost] = test002[dict_key][9 + boost][4:-5]
test002[dict_key][10 + boost] = test002[dict_key][10 + boost].split('-')
for n in range(len(test002[dict_key][10])):
test002[dict_key][10 + boost][n] = ''.join(
filter(whitelist_nums.__contains__, test002[dict_key][10 + boost][n]))
comments = test002[dict_key][11 + boost].find('--')
if comments > 0:
test002[dict_key][11 + boost] = 'No Comments'
else:
test002[dict_key][11 + boost] = test002[dict_key][11 + boost][6:-7]
boost += 9
Amended = test002[dict_key][0].find("Amendment 1")
if Amended > 0:
amendeddict1[dict_key] = test002[dict_key].copy()
Amended2 = test002[dict_key][0].find("Amendment 2")
if Amended2 > 0:
amendeddict2[dict_key] = test002[dict_key].copy()
Amended3 = test002[dict_key][0].find("Amendment 3")
if Amended3 > 0:
amendeddict3[dict_key] = test002[dict_key].copy()
else:
PDF_links.append(links)
print(len(test002), 'useable links (non PDF)')
print("Links to non-useable links in 'PDF_links'")
# Pulling page info and formatting above
print("Obtained and formatted page info")
dic1 = {}
amend2 = 0
for stupid in amendeddict1:
for everything in test002:
if amendeddict1[stupid][2] == test002[everything][2] and ''.join(
filter(whitelist_date.__contains__, test002[stupid][0]))[1:-3] == ''.join(
filter(whitelist_date.__contains__, test002[everything][0]))[1:-2]:
dic1[everything] = stupid
print(len(dic1), 'reports amended once')
for stupid in amendeddict2:
for everything in amendeddict1:
if ''.join(filter(whitelist_date.__contains__, amendeddict2[stupid][0]))[2:-3] == ''.join(
filter(whitelist_date.__contains__, amendeddict1[everything][0]))[2:-3] and amendeddict2[stupid][
2] == amendeddict1[everything][2]:
amend2 += 1
dic1[everything] = stupid
print(amend2, 'reports amended twice')
for screwups in dic1:
test002.pop(screwups)
# duplicate checker below
for i in test002:
testlis.append(test002[i][1])
def checkIfDuplicates_1(testlis):
''' Check if given list contains any duplicates '''
if len(testlis) == len(set(testlis)):
return False
else:
return True
result = checkIfDuplicates_1(testlis)
if result:
print('Dictionary contains duplicates')
else:
print('No duplicates found in list')
input_data = test002
transposed_data = list(zip_longest(*input_data.values()))
with open(File_name, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(input_data.keys())
for items in transposed_data:
writer.writerow(items)
# Saving the dictionary to csv above
print("Downloaded page info")
print(len(test002), 'links after removing senator screw ups')
if (len(link_list) % 25 == 0):
print('Maxed out the pages, try increasing Pages.var?')
if len(amendeddict3) > 0:
print("At least one report has been amended 3 times")
# if amend2 >= 1 and len(dic1) >= 5 and len(test002) >= 68: # I belive there is now 72 dictionary entries for 2020
# print('Probably a successful web scrape')
print("Online script took %s seconds to execute" % (time.time() - start_time))
break
# A partial sell means that you have asked your broker to buy or sell stock, but the broker can't buy or sell as much as you
# would like, and a portion of the order remains unfulfilled.
# Every time you trade stocks, you're charged a commission even if it's partially fulfilled.
# Not sure if the corresponding $ value is what got sold or what they tried to sell. No other numbers tho, so I'll prolly
# continue with the same procedure
start_time = time.time()
better = {}
Investors = 0
Removers = 0
Amount = 0
for i in test002:
boost2 = 0
for transactions in range(int((len(test002[i]) - 3) / 9)):
if test002[i][8 + boost2] == "Stock" or test002[i][8 + boost2] == "Stock Option":
if str(int(Year) - 1) not in ''.join(filter(whitelist_nums.__contains__, test002[i][4 + boost2])):
if test002[i][9 + boost2] == "Sale (Partial)" or test002[i][9 + boost2] == "Sale (Full)":
if Big_Small.upper() == "BIG":
Amount = -int(test002[i][10 + boost2][1])
if Big_Small.upper() == "SMALL":
Amount = -int(test002[i][10 + boost2][0])
if (test002[i][7 + boost2].splitlines()[0], test002[i][4 + boost2]) in better.keys():
Investors = better[(test002[i][7 + boost2].splitlines()[0], test002[i][4 + boost2])][0]
Removers = better[(test002[i][7 + boost2].splitlines()[0], test002[i][4 + boost2])][1]
Removers += 1
Amount = better[(test002[i][7 + boost2].splitlines()[0], test002[i][4 + boost2])][2]
if Big_Small.upper() == "BIG":
Amount -= int(test002[i][10 + boost2][1])
if Big_Small.upper() == "SMALL":
Amount -= int(test002[i][10 + boost2][0])
better[(test002[i][7 + boost2].splitlines()[0], test002[i][4 + boost2])] = [Investors, Removers,
Amount]
else:
Removers = 1
Investors = 0
better[(test002[i][7 + boost2].splitlines()[0], test002[i][4 + boost2])] = [Investors, Removers,
Amount]
Removers = 0
if test002[i][9 + boost2] == "Purchase":
if Big_Small.upper() == "BIG":
Amount = int(test002[i][10 + boost2][1])
if Big_Small.upper() == "SMALL":
Amount = int(test002[i][10 + boost2][0])
if (test002[i][7 + boost2].splitlines()[0], test002[i][4 + boost2]) in better.keys():
Investors = better[(test002[i][7 + boost2].splitlines()[0], test002[i][4 + boost2])][0]
Removers = better[(test002[i][7 + boost2].splitlines()[0], test002[i][4 + boost2])][1]
Investors += 1
Amount = better[(test002[i][7 + boost2].splitlines()[0], test002[i][4 + boost2])][2]
if Big_Small.upper() == "BIG":
Amount += int(test002[i][10 + boost2][1])
if Big_Small.upper() == "SMALL":
Amount += int(test002[i][10 + boost2][0])
better[(test002[i][7 + boost2].splitlines()[0], test002[i][4 + boost2])] = [Investors, Removers,
Amount]
else:
Investors = 1
Removers = 0
better[(test002[i][7 + boost2].splitlines()[0], test002[i][4 + boost2])] = [Investors, Removers,
Amount]
Investors = 0
boost2 += 9
best = {}
Investors = 0
Removers = 0
Amount = 0
for | |
self._formula_description = ClientGUICommon.SaneMultilineTextCtrl( my_panel )
( width, height ) = ClientGUICommon.ConvertTextToPixels( self._formula_description, ( 90, 8 ) )
self._formula_description.SetInitialSize( ( width, height ) )
self._formula_description.Disable()
self._edit_formula = ClientGUICommon.BetterButton( my_panel, 'edit formula', self._EditFormula )
self._change_formula_type = ClientGUICommon.BetterButton( my_panel, 'change formula type', self._ChangeFormulaType )
#
self._UpdateControls()
#
button_hbox = wx.BoxSizer( wx.HORIZONTAL )
button_hbox.Add( self._edit_formula, CC.FLAGS_EXPAND_BOTH_WAYS )
button_hbox.Add( self._change_formula_type, CC.FLAGS_EXPAND_BOTH_WAYS )
my_panel.Add( self._formula_description, CC.FLAGS_EXPAND_BOTH_WAYS )
my_panel.Add( button_hbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( my_panel, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
self.SetSizer( vbox )
def _ChangeFormulaType( self ):
if self._current_formula.ParsesSeparatedContent():
new_html = ClientParsing.ParseFormulaHTML( content_to_fetch = ClientParsing.HTML_CONTENT_HTML )
new_json = ClientParsing.ParseFormulaJSON( content_to_fetch = ClientParsing.JSON_CONTENT_JSON )
else:
new_html = ClientParsing.ParseFormulaHTML()
new_json = ClientParsing.ParseFormulaJSON()
new_compound = ClientParsing.ParseFormulaCompound()
new_context_variable = ClientParsing.ParseFormulaContextVariable()
if isinstance( self._current_formula, ClientParsing.ParseFormulaHTML ):
order = ( 'json', 'compound', 'context_variable' )
elif isinstance( self._current_formula, ClientParsing.ParseFormulaJSON ):
order = ( 'html', 'compound', 'context_variable' )
elif isinstance( self._current_formula, ClientParsing.ParseFormulaCompound ):
order = ( 'html', 'json', 'context_variable' )
elif isinstance( self._current_formula, ClientParsing.ParseFormulaContextVariable ):
order = ( 'html', 'json', 'compound', 'context_variable' )
choice_tuples = []
for formula_type in order:
if formula_type == 'html':
choice_tuples.append( ( 'change to a new HTML formula', new_html ) )
elif formula_type == 'json':
choice_tuples.append( ( 'change to a new JSON formula', new_json ) )
elif formula_type == 'compound':
choice_tuples.append( ( 'change to a new COMPOUND formula', new_compound ) )
elif formula_type == 'context_variable':
choice_tuples.append( ( 'change to a new CONTEXT VARIABLE formula', new_context_variable ) )
with ClientGUIDialogs.DialogSelectFromList( self, 'select formula type', choice_tuples ) as dlg:
if dlg.ShowModal() == wx.ID_OK:
self._current_formula = dlg.GetChoice()
self._UpdateControls()
def _EditFormula( self ):
if isinstance( self._current_formula, ClientParsing.ParseFormulaHTML ):
panel_class = EditHTMLFormulaPanel
elif isinstance( self._current_formula, ClientParsing.ParseFormulaJSON ):
panel_class = EditJSONFormulaPanel
elif isinstance( self._current_formula, ClientParsing.ParseFormulaCompound ):
panel_class = EditCompoundFormulaPanel
elif isinstance( self._current_formula, ClientParsing.ParseFormulaContextVariable ):
panel_class = EditContextVariableFormulaPanel
test_context = self._test_context_callable()
dlg_title = 'edit formula'
with ClientGUITopLevelWindows.DialogEdit( self, dlg_title, frame_key = 'deeply_nested_dialog' ) as dlg:
panel = panel_class( dlg, self._current_formula, test_context )
dlg.SetPanel( panel )
if dlg.ShowModal() == wx.ID_OK:
self._current_formula = panel.GetValue()
self._UpdateControls()
def _UpdateControls( self ):
if self._current_formula is None:
self._formula_description.SetValue( '' )
self._edit_formula.Disable()
self._change_formula_type.Disable()
else:
self._formula_description.SetValue( self._current_formula.ToPrettyMultilineString() )
self._edit_formula.Enable()
self._change_formula_type.Enable()
def GetValue( self ):
return self._current_formula
class EditHTMLTagRulePanel( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent, tag_rule ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
( rule_type, tag_name, tag_attributes, tag_index, tag_depth, should_test_tag_string, tag_string_string_match ) = tag_rule.ToTuple()
if tag_name is None:
tag_name = ''
if tag_attributes is None:
tag_attributes = {}
if tag_depth is None:
tag_depth = 1
self._current_description = ClientGUICommon.BetterStaticText( self )
self._rule_type = ClientGUICommon.BetterChoice( self )
self._rule_type.Append( 'search descendents', ClientParsing.HTML_RULE_TYPE_DESCENDING )
self._rule_type.Append( 'walk back up ancestors', ClientParsing.HTML_RULE_TYPE_ASCENDING )
self._tag_name = wx.TextCtrl( self )
self._tag_attributes = ClientGUIControls.EditStringToStringDictControl( self, tag_attributes )
self._tag_index = ClientGUICommon.NoneableSpinCtrl( self, 'index to fetch', none_phrase = 'get all', min = 0, max = 255 )
self._tag_depth = wx.SpinCtrl( self, min = 1, max = 255 )
self._should_test_tag_string = wx.CheckBox( self )
self._tag_string_string_match = StringMatchButton( self, tag_string_string_match )
#
self._rule_type.SelectClientData( rule_type )
self._tag_name.SetValue( tag_name )
self._tag_index.SetValue( tag_index )
self._tag_depth.SetValue( tag_depth )
self._should_test_tag_string.SetValue( should_test_tag_string )
self._UpdateTypeControls()
#
vbox = wx.BoxSizer( wx.VERTICAL )
rows = []
rows.append( ( 'rule type: ', self._rule_type ) )
rows.append( ( 'tag name: ', self._tag_name ) )
gridbox_1 = ClientGUICommon.WrapInGrid( self, rows )
rows = []
rows.append( ( 'index to fetch: ', self._tag_index ) )
rows.append( ( 'depth to climb: ', self._tag_depth ) )
gridbox_2 = ClientGUICommon.WrapInGrid( self, rows )
rows = []
rows.append( ( 'should test tag string: ', self._should_test_tag_string ) )
rows.append( ( 'tag string match: ', self._tag_string_string_match ) )
gridbox_3 = ClientGUICommon.WrapInGrid( self, rows )
vbox.Add( self._current_description, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.Add( gridbox_1, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
vbox.Add( self._tag_attributes, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( gridbox_2, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
vbox.Add( gridbox_3, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self.SetSizer( vbox )
self._UpdateShouldTest()
#
self._rule_type.Bind( wx.EVT_CHOICE, self.EventTypeChanged )
self._tag_name.Bind( wx.EVT_TEXT, self.EventVariableChanged )
self._tag_attributes.Bind( ClientGUIListCtrl.EVT_LIST_CTRL, self.EventVariableChanged)
self._tag_index.Bind( wx.EVT_SPINCTRL, self.EventVariableChanged )
self._tag_depth.Bind( wx.EVT_SPINCTRL, self.EventVariableChanged )
self._should_test_tag_string.Bind( wx.EVT_CHECKBOX, self.EventShouldTestChanged )
def _UpdateShouldTest( self ):
if self._should_test_tag_string.GetValue():
self._tag_string_string_match.Enable()
else:
self._tag_string_string_match.Disable()
def _UpdateTypeControls( self ):
rule_type = self._rule_type.GetChoice()
if rule_type == ClientParsing.HTML_RULE_TYPE_DESCENDING:
self._tag_attributes.Enable()
self._tag_index.Enable()
self._tag_depth.Disable()
else:
self._tag_attributes.Disable()
self._tag_index.Disable()
self._tag_depth.Enable()
self._UpdateDescription()
def _UpdateDescription( self ):
tag_rule = self.GetValue()
label = tag_rule.ToString()
self._current_description.SetLabelText( label )
def EventShouldTestChanged( self, event ):
self._UpdateShouldTest()
def EventTypeChanged( self, event ):
self._UpdateTypeControls()
event.Skip()
def EventVariableChanged( self, event ):
self._UpdateDescription()
event.Skip()
def GetValue( self ):
rule_type = self._rule_type.GetChoice()
tag_name = self._tag_name.GetValue()
if tag_name == '':
tag_name = None
should_test_tag_string = self._should_test_tag_string.GetValue()
tag_string_string_match = self._tag_string_string_match.GetValue()
if rule_type == ClientParsing.HTML_RULE_TYPE_DESCENDING:
tag_attributes = self._tag_attributes.GetValue()
tag_index = self._tag_index.GetValue()
tag_rule = ClientParsing.ParseRuleHTML( rule_type = rule_type, tag_name = tag_name, tag_attributes = tag_attributes, tag_index = tag_index, should_test_tag_string = should_test_tag_string, tag_string_string_match = tag_string_string_match )
elif rule_type == ClientParsing.HTML_RULE_TYPE_ASCENDING:
tag_depth = self._tag_depth.GetValue()
tag_rule = ClientParsing.ParseRuleHTML( rule_type = rule_type, tag_name = tag_name, tag_depth = tag_depth, should_test_tag_string = should_test_tag_string, tag_string_string_match = tag_string_string_match )
return tag_rule
class EditHTMLFormulaPanel( ClientGUIScrolledPanels.EditPanel ):
def __init__( self, parent, formula, test_context ):
ClientGUIScrolledPanels.EditPanel.__init__( self, parent )
#
menu_items = []
page_func = HydrusData.Call( ClientPaths.LaunchPathInWebBrowser, os.path.join( HC.HELP_DIR, 'downloader_parsers_formulae.html#html_formula' ) )
menu_items.append( ( 'normal', 'open the html formula help', 'Open the help page for html formulae in your web browesr.', page_func ) )
help_button = ClientGUICommon.MenuBitmapButton( self, CC.GlobalBMPs.help, menu_items )
help_hbox = ClientGUICommon.WrapInText( help_button, self, 'help for this panel -->', wx.Colour( 0, 0, 255 ) )
#
edit_panel = ClientGUICommon.StaticBox( self, 'edit' )
edit_panel.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_FRAMEBK ) )
self._tag_rules = wx.ListBox( edit_panel, style = wx.LB_SINGLE )
self._tag_rules.Bind( wx.EVT_LEFT_DCLICK, self.EventEdit )
self._add_rule = ClientGUICommon.BetterButton( edit_panel, 'add', self.Add )
self._edit_rule = ClientGUICommon.BetterButton( edit_panel, 'edit', self.Edit )
self._move_rule_up = ClientGUICommon.BetterButton( edit_panel, u'\u2191', self.MoveUp )
self._delete_rule = ClientGUICommon.BetterButton( edit_panel, 'X', self.Delete )
self._move_rule_down = ClientGUICommon.BetterButton( edit_panel, u'\u2193', self.MoveDown )
self._content_to_fetch = ClientGUICommon.BetterChoice( edit_panel )
self._content_to_fetch.Append( 'attribute', ClientParsing.HTML_CONTENT_ATTRIBUTE )
self._content_to_fetch.Append( 'string', ClientParsing.HTML_CONTENT_STRING )
self._content_to_fetch.Append( 'html', ClientParsing.HTML_CONTENT_HTML )
self._content_to_fetch.Bind( wx.EVT_CHOICE, self.EventContentChoice )
self._attribute_to_fetch = wx.TextCtrl( edit_panel )
( tag_rules, content_to_fetch, attribute_to_fetch, string_match, string_converter ) = formula.ToTuple()
self._string_match_button = StringMatchButton( edit_panel, string_match )
self._string_converter_button = StringConverterButton( edit_panel, string_converter )
#
test_panel = ClientGUICommon.StaticBox( self, 'test' )
test_panel.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_FRAMEBK ) )
self._test_panel = TestPanel( test_panel, self.GetValue, test_context = test_context )
#
for rule in tag_rules:
pretty_rule = rule.ToString()
self._tag_rules.Append( pretty_rule, rule )
self._content_to_fetch.SelectClientData( content_to_fetch )
self._attribute_to_fetch.SetValue( attribute_to_fetch )
self._UpdateControls()
#
udd_button_vbox = wx.BoxSizer( wx.VERTICAL )
udd_button_vbox.Add( ( 20, 20 ), CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
udd_button_vbox.Add( self._move_rule_up, CC.FLAGS_VCENTER )
udd_button_vbox.Add( self._delete_rule, CC.FLAGS_VCENTER )
udd_button_vbox.Add( self._move_rule_down, CC.FLAGS_VCENTER )
udd_button_vbox.Add( ( 20, 20 ), CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
tag_rules_hbox = wx.BoxSizer( wx.HORIZONTAL )
tag_rules_hbox.Add( self._tag_rules, CC.FLAGS_EXPAND_BOTH_WAYS )
tag_rules_hbox.Add( udd_button_vbox, CC.FLAGS_VCENTER )
ae_button_hbox = wx.BoxSizer( wx.HORIZONTAL )
ae_button_hbox.Add( self._add_rule, CC.FLAGS_VCENTER )
ae_button_hbox.Add( self._edit_rule, CC.FLAGS_VCENTER )
rows = []
rows.append( ( 'content to fetch:', self._content_to_fetch ) )
rows.append( ( 'attribute to fetch: ', self._attribute_to_fetch ) )
gridbox = ClientGUICommon.WrapInGrid( edit_panel, | |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Curve fitting functions for experiment analysis
"""
# pylint: disable = invalid-name
from typing import List, Dict, Tuple, Callable, Optional, Union
import numpy as np
import scipy.optimize as opt
from qiskit_experiments.exceptions import AnalysisError
from qiskit_experiments.curve_analysis.data_processing import filter_data
from qiskit_experiments.curve_analysis.curve_analysis_result_data import CurveAnalysisResultData
def curve_fit(
func: Callable,
xdata: np.ndarray,
ydata: np.ndarray,
p0: Union[Dict[str, float], np.ndarray],
sigma: Optional[np.ndarray] = None,
bounds: Optional[Union[Dict[str, Tuple[float, float]], Tuple[np.ndarray, np.ndarray]]] = None,
**kwargs,
) -> CurveAnalysisResultData:
r"""Perform a non-linear least squares to fit
This solves the optimization problem
.. math::
\Theta_{\mbox{opt}} = \arg\min_\Theta \sum_i
\sigma_i^{-2} (f(x_i, \Theta) - y_i)^2
using :func:`scipy.optimize.curve_fit`.
Args:
func: a fit function `f(x, *params)`.
xdata: a 1D float array of x-data.
ydata: a 1D float array of y-data.
p0: initial guess for optimization parameters.
sigma: Optional, a 1D array of standard deviations in ydata
in absolute units.
bounds: Optional, lower and upper bounds for optimization
parameters.
kwargs: additional kwargs for :func:`scipy.optimize.curve_fit`.
Returns:
result containing ``popt`` the optimal fit parameters,
``popt_err`` the standard error estimates popt,
``pcov`` the covariance matrix for the fit,
``reduced_chisq`` the reduced chi-squared parameter of fit,
``dof`` the degrees of freedom of the fit,
``xrange`` the range of xdata values used for fit.
Raises:
AnalysisError: if the number of degrees of freedom of the fit is
less than 1, or the curve fitting fails.
.. note::
``sigma`` is assumed to be specified in the same units as ``ydata``
(absolute units). If sigma is instead specified in relative units
the `absolute_sigma=False` kwarg of scipy
:func:`~scipy.optimize.curve_fit` must be used. This affects the
returned covariance ``pcov`` and error ``popt_err`` parameters via
``pcov(absolute_sigma=False) = pcov * reduced_chisq``
``popt_err(absolute_sigma=False) = popt_err * sqrt(reduced_chisq)``.
"""
# Format p0 parameters if specified as dictionary
if isinstance(p0, dict):
param_keys = list(p0.keys())
param_p0 = list(p0.values())
# Convert bounds
if bounds:
lower = [bounds[key][0] for key in param_keys]
upper = [bounds[key][1] for key in param_keys]
param_bounds = (lower, upper)
else:
param_bounds = ([-np.inf] * len(param_keys), [np.inf] * len(param_keys))
# Convert fit function
def fit_func(x, *params):
return func(x, **dict(zip(param_keys, params)))
else:
param_keys = None
param_p0 = p0
if bounds:
param_bounds = bounds
else:
param_bounds = ([-np.inf] * len(p0), [np.inf] * len(p0))
fit_func = func
# Check the degrees of freedom is greater than 0
dof = len(ydata) - len(param_p0)
if dof < 1:
raise AnalysisError(
"The number of degrees of freedom of the fit data and model "
" (len(ydata) - len(p0)) is less than 1"
)
# Format non-number sigma values
if np.all(np.isnan(sigma)):
sigma = None
else:
sigma = np.nan_to_num(sigma)
if np.count_nonzero(sigma) != len(sigma):
# Sigma = 0 causes zero division error
sigma = None
# Override scipy.curve_fit default for absolute_sigma=True
# if sigma is specified.
if sigma is not None and "absolute_sigma" not in kwargs:
kwargs["absolute_sigma"] = True
# Run curve fit
try:
# pylint: disable = unbalanced-tuple-unpacking
popt, pcov = opt.curve_fit(
fit_func, xdata, ydata, sigma=sigma, p0=param_p0, bounds=param_bounds, **kwargs
)
except Exception as ex:
raise AnalysisError(
"scipy.optimize.curve_fit failed with error: {}".format(str(ex))
) from ex
popt_err = np.sqrt(np.diag(pcov))
# Calculate the reduced chi-squared for fit
yfits = fit_func(xdata, *popt)
residues = (yfits - ydata) ** 2
if sigma is not None:
residues = residues / (sigma ** 2)
reduced_chisq = np.sum(residues) / dof
# Compute xdata range for fit
xdata_range = [min(xdata), max(xdata)]
result = {
"popt": popt,
"popt_keys": param_keys,
"popt_err": popt_err,
"pcov": pcov,
"reduced_chisq": reduced_chisq,
"dof": dof,
"xrange": xdata_range,
}
return CurveAnalysisResultData(result)
def multi_curve_fit(
funcs: List[Callable],
series: np.ndarray,
xdata: np.ndarray,
ydata: np.ndarray,
p0: np.ndarray,
sigma: Optional[np.ndarray] = None,
weights: Optional[np.ndarray] = None,
bounds: Optional[Union[Dict[str, Tuple[float, float]], Tuple[np.ndarray, np.ndarray]]] = None,
**kwargs,
) -> CurveAnalysisResultData:
r"""Perform a linearized multi-objective non-linear least squares fit.
This solves the optimization problem
.. math::
\Theta_{\mbox{opt}} = \arg\min_\Theta \sum_{k} w_k
\sum_{i} \sigma_{k, i}^{-2}
(f_k(x_{k, i}, \Theta) - y_{k, i})^2
for multiple series of :math:`x_k, y_k, \sigma_k` data evaluated using
a list of objective functions :math:`[f_k]`
using :func:`scipy.optimize.curve_fit`.
Args:
funcs: a list of objective functions :math:`[f_0, f_1, ...]` where
each function has signature :math`f_k(x, \Theta)`.
series: a 1D int array that specifies the component objective
function :math:`f_k` to evaluate corresponding x and y
data with.
xdata: a 1D float array of xdata.
ydata: a 1D float array of ydata.
p0: initial guess for optimization parameters.
sigma: Optional, a 1D array of standard deviations in ydata
in absolute units.
weights: Optional, a 1D float list of weights :math:`w_k` for each
component function :math:`f_k`.
bounds: Optional, lower and upper bounds for optimization
parameters.
kwargs: additional kwargs for :func:`scipy.optimize.curve_fit`.
Returns:
result containing ``popt`` the optimal fit parameters,
``popt_err`` the standard error estimates popt,
``pcov`` the covariance matrix for the fit,
``reduced_chisq`` the reduced chi-squared parameter of fit,
``dof`` the degrees of freedom of the fit,
``xrange`` the range of xdata values used for fit.
Raises:
AnalysisError: if the number of degrees of freedom of the fit is
less than 1, or the curve fitting fails.
.. note::
``sigma`` is assumed to be specified in the same units as ``ydata``
(absolute units). If sigma is instead specified in relative units
the `absolute_sigma=False` kwarg of scipy
:func:`~scipy.optimize.curve_fit` must be used. This affects the
returned covariance ``pcov`` and error ``popt_err`` parameters via
``pcov(absolute_sigma=False) = pcov * reduced_chisq``
``popt_err(absolute_sigma=False) = popt_err * sqrt(reduced_chisq)``.
"""
num_funcs = len(funcs)
# Get positions for indexes data sets
series = np.asarray(series, dtype=int)
idxs = [series == i for i in range(num_funcs)]
# Combine weights and sigma for transformation
if weights is None:
wsigma = sigma
else:
wsigma = np.zeros(ydata.size)
if sigma is None:
for i in range(num_funcs):
wsigma[idxs[i]] = 1 / np.sqrt(weights[i])
else:
for i in range(num_funcs):
wsigma[idxs[i]] = sigma[idxs[i]] / np.sqrt(weights[i])
# Define multi-objective function
def f(x, *args, **kwargs):
y = np.zeros(x.size)
for i in range(num_funcs):
xi = x[idxs[i]]
yi = funcs[i](xi, *args, **kwargs)
y[idxs[i]] = yi
return y
# Run linearized curve_fit
result_data = curve_fit(f, xdata, ydata, p0, sigma=wsigma, bounds=bounds, **kwargs)
return result_data
def process_curve_data(
data: List[Dict[str, any]], data_processor: Callable, x_key: str = "xval", **filters
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Return tuple of arrays (x, y, sigma) data for curve fitting.
Args
data: list of circuit data dictionaries containing counts.
data_processor: callable for processing data to y, sigma
x_key: key for extracting xdata value from metadata (Default: "xval").
filters: additional kwargs to filter metadata on.
Returns:
tuple: ``(x, y, sigma)`` tuple of arrays of x-values,
y-values, and standard deviations of y-values.
"""
filtered_data = filter_data(data, **filters)
size = len(filtered_data)
xdata = np.zeros(size, dtype=float)
ydata = np.zeros(size, dtype=float)
ydata_var = np.zeros(size, dtype=float)
for i, datum in enumerate(filtered_data):
metadata = datum["metadata"]
xdata[i] = metadata[x_key]
y_mean, y_var = data_processor(datum)
ydata[i] = y_mean
ydata_var[i] = y_var
return xdata, ydata, np.sqrt(ydata_var)
def process_multi_curve_data(
data: List[Dict[str, any]],
data_processor: Callable,
x_key: str = "xval",
series_key: str = "series",
**filters,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Return tuple of arrays (series, x, y, sigma) data for multi curve fitting.
Args
data: list of circuit data dictionaries.
data_processor: callable for processing data to y, sigma
x_key: key for extracting xdata value from metadata (Default: "xval").
series_key: key for extracting series value from metadata (Default: "series").
filters: additional kwargs to filter metadata on.
Returns:
tuple: ``(series, x, y, sigma)`` tuple of arrays of series values,
x-values, y-values, and standard deviations of y-values.
"""
filtered_data = filter_data(data, **filters)
size = len(filtered_data)
series = np.zeros(size, dtype=int)
xdata = np.zeros(size, dtype=float)
ydata = np.zeros(size, dtype=float)
ydata_var = np.zeros(size, dtype=float)
for i, datum in enumerate(filtered_data):
metadata = datum["metadata"]
series[i] = metadata[series_key]
xdata[i] = metadata[x_key]
y_mean, y_var = data_processor(datum)
ydata[i] = y_mean
ydata_var[i] | |
<filename>pter/searcher.py<gh_stars>0
import datetime
import string
from pytodotxt import Task
class Searcher:
def __init__(self, text=None, casesensitive=True, default_threshold=None, hide_sequential=True):
self.words = set()
self.not_words = set()
self.projects = set()
self.not_projects = set()
self.contexts = set()
self.not_contexts = set()
self.ids = set()
self.not_ids = set()
self.filenames = set()
self.not_filenames = set()
self.after = set()
self.refs = set()
self.done = None
self.show_hidden = False
self.priority = None
self.default_threshold = default_threshold
self.hide_sequential = hide_sequential
if default_threshold is None or len(default_threshold) == 0:
self.default_threshold = 'today'
self.threshold = self.default_threshold
self.due = None
self.created = None
self.completed = None
self.casesensitive = casesensitive
self.sources = None
self.task_by_id = None
self.text = text
if text is not None:
self.parse()
def reset(self):
self.words = set()
self.not_words = set()
self.projects = set()
self.not_projects = set()
self.contexts = set()
self.not_contexts = set()
self.ids = set()
self.not_ids = set()
self.filenames = set()
self.not_filenames = set()
self.after = set()
self.refs = set()
self.done = None
self.show_hidden = False
self.priority = None
self.threshold = get_relative_date(self.default_threshold, Task.DATE_FMT) or self.default_threshold
self.due = None
self.created = None
self.completed = None
def update_sources(self, sources):
self.sources = sources
self.task_by_id = {}
self.parents = {}
with_sequence = set()
for source in self.sources:
for task in source.tasks:
if len(task.attr_id) == 0:
continue
if len(task.attr_after) > 0:
with_sequence.add(task)
for taskid in task.attr_id:
if taskid not in self.task_by_id:
self.task_by_id[taskid] = set()
self.task_by_id[taskid].add(task)
# for each task obtain a set of their direct and indirect parents
for task in with_sequence:
parents = self._parse_ids('after', task)
queue = parents.copy()
while len(queue) > 0:
otherid = queue.pop()
if otherid in self.parents:
parents |= self.parents[otherid]
break
parents.add(otherid)
if otherid not in self.task_by_id:
continue
for other in self.task_by_id[otherid]:
queue |= self._parse_ids('after', other).difference(parents)
if len(parents) > 0:
for taskid in task.attr_id:
if taskid not in self.parents:
self.parents[taskid] = set()
self.parents[taskid] |= parents
def _parse_ids(self, keyword, task):
these = set()
for value in task.attributes.get(keyword, []):
these |= {that for that in value.split(',')}
if self.casesensitive:
return {that.lower() for that in these}
else:
return these
def parse(self):
self.reset()
text = self.text
if not self.casesensitive:
text = text.lower()
for part in text.split(' '):
do_not = False
if part.startswith('sort:'):
continue
if part.startswith('not:'):
do_not = True
part = part[4:]
elif part.startswith('-'):
do_not = True
part = part[1:]
if len(part) == 0:
continue
if part.startswith('@') and len(part) > 1:
if do_not:
self.not_contexts.add(part[1:])
else:
self.contexts.add(part[1:])
elif part.startswith('+') and len(part) > 1:
if do_not:
self.not_projects.add(part[1:])
else:
self.projects.add(part[1:])
elif part.startswith('done:'):
_, value = part.split(':', 1)
if len(value) == 0:
self.done = None
else:
self.done = value.lower().startswith('y')
elif part.startswith('hidden:') or part.startswith('h:'):
_, value = part.split(':', 1)
self.show_hidden = value.lower().startswith('y') or value == '1'
elif part.startswith('pri:'):
_, value = part.split(':', 1)
self.priority = (value, value)
elif part.startswith('moreimportant:') or part.startswith('mi:'):
_, value = part.split(':', 1)
if self.priority is None:
self.priority = ['ZZZZ', ' ']
self.priority[0] = value.upper()
elif part.startswith('lessimportant:') or part.startswith('li:'):
_, value = part.split(':', 1)
if self.priority is None:
self.priority = ['ZZZZ', ' ']
self.priority[1] = value.upper()
elif part.startswith('due:'):
_, value = part.split(':', 1)
if value.lower().startswith('y'):
self.due = ['0000-00-00', '9999-99-99']
else:
self.due = [value, value]
elif part.startswith('duebefore:') or part.startswith('db:'):
_, value = part.split(':', 1)
if self.due is None:
self.due = ['0000-00-00', '9999-99-99']
self.due[1] = value
elif part.startswith('dueafter:') or part.startswith('da:'):
_, value = part.split(':', 1)
if self.due is None:
self.due = ['0000-00-00', '9999-99-99']
self.due[0] = value
elif part.startswith('created:'):
_, value = part.split(':', 1)
self.created = [value, value]
elif part.startswith('createdbefore:') or part.startswith('crb:'):
_, value = part.split(':', 1)
if self.created is None:
self.created = ['0000-00-00', '9999-99-99']
self.created[1] = value
elif part.startswith('createdafter:') or part.startswith('cra:'):
_, value = part.split(':', 1)
if self.created is None:
self.created = ['0000-00-00', '9999-99-99']
self.created[0] = value
elif part.startswith('completed:'):
_, value = part.split(':', 1)
self.completed = [value, value]
elif part.startswith('completedbefore:') or part.startswith('cob:'):
_, value = part.split(':', 1)
if self.completed is None:
self.completed = ['0000-00-00', '9999-99-99']
self.completed[1] = value
elif part.startswith('completedafter:') or part.startswith('coa:'):
_, value = part.split(':', 1)
if self.completed is None:
self.completed = ['0000-00-00', '9999-99-99']
self.completed[0] = value
elif part.startswith('t:') or part.startswith('tickler:') or part.startswith('threshold:'):
_, value = part.split(':', 1)
if len(value) == 0:
self.threshold = None
else:
self.threshold = get_relative_date(value, Task.DATE_FMT) or value
elif part.startswith('id:'):
_, value = part.split(':', 1)
if len(value) > 0:
value = {that for that in value.split(',')}
if do_not:
self.not_ids |= value
else:
self.ids |= value
else:
if do_not:
self.not_words.add('id:')
else:
self.words.add('id:')
elif part.startswith('after:') and self.after is not None:
_, value = part.split(':', 1)
if len(value) == 0:
self.after = None
else:
values = set(value.split(','))
if do_not:
pass
else:
self.after |= values
elif part.startswith('ref:') and self.refs is not None:
_, value = part.split(':', 1)
if len(value) > 0:
values = set(value.split(','))
if do_not:
pass
else:
self.refs |= values
elif part.startswith('file:'):
_, value = part.split(':', 1)
if len(value) > 0:
if do_not:
self.not_filenames.add(value)
else:
self.filenames.add(value)
else:
if do_not:
self.not_words.add(part)
else:
self.words.add(part)
def match(self, task):
attrs = dict([(k if self.casesensitive else k.lower(), v)
for k, v in task.attributes.items()])
return all([self.match_words(task),
self.match_contexts(task),
self.match_projects(task),
self.match_done(task),
self.match_hidden(attrs),
self.match_priority(task),
self.match_ids(task),
self.match_filenames(task),
self.match_refs(task),
self.match_after(task, attrs),
self.match_due(attrs),
self.match_created(task),
self.match_completed(task),
self.match_threshold(attrs)])
def match_words(self, task):
if len(self.words) == 0 and len(self.not_words) == 0:
return True
description = task.description
if not self.casesensitive:
description = description.lower()
return all([word in description for word in self.words]) \
and not any([word in description for word in self.not_words])
def match_contexts(self, task):
if len(self.contexts) == 0 and len(self.not_contexts) == 0:
return True
contexts = task.contexts
if not self.casesensitive:
contexts = [context.lower() for context in contexts]
return all([context in contexts for context in self.contexts]) \
and not any([context in contexts for context in self.not_contexts])
def match_projects(self, task):
if len(self.projects) == 0 and len(self.not_projects) == 0:
return True
projects = task.projects
if not self.casesensitive:
projects = [project.lower() for project in projects]
return all([project in projects for project in self.projects]) \
and not any([project in projects for project in self.not_projects])
def match_hidden(self, attrs):
return 'h' not in attrs or (attrs['h'][0] == '1') == self.show_hidden
def match_done(self, task):
return self.done is None or task.is_completed == self.done
def match_priority(self, task):
if self.priority is None:
return True
pri = 'ZZZ'
if task.priority is not None:
pri = task.priority
if not self.casesensitive:
pri = pri.lower()
return (self.priority[0] == self.priority[1] and pri == self.priority[0]) or \
self.priority[0] > pri > self.priority[1]
def match_threshold(self, attrs):
if self.threshold is None:
return True
return 't' not in attrs or attrs['t'][0] <= self.threshold
def match_due(self, attrs):
if self.due is None:
return True
if 'due' not in attrs:
return False
due = [get_relative_date(self.due[0], Task.DATE_FMT) or self.due[0],
get_relative_date(self.due[1], Task.DATE_FMT) or self.due[1]]
if due[0] == due[1]:
return attrs['due'][0] == due[0]
return due[0] < attrs['due'][0] < due[1]
def match_created(self, task):
if self.created is None:
return True
if task.creation_date is None:
return False
created = [get_relative_date(self.created[0], Task.DATE_FMT) or self.created[0],
get_relative_date(self.created[1], Task.DATE_FMT) or self.created[1]]
task_created = task.creation_date.strftime(Task.DATE_FMT)
if created[0] == created[1]:
return created[0] == task_created
return created[0] < task_created < created[1]
def match_completed(self, task):
if self.completed is None:
return True
if task.completion_date is None:
return False
completed = [get_relative_date(self.completed[0], Task.DATE_FMT) or self.completed[0],
get_relative_date(self.completed[1], Task.DATE_FMT) or self.completed[1]]
task_completed = task.completion_date.strftime(Task.DATE_FMT)
if completed[0] == completed[1]:
return completed[0] == task_completed
return completed[0] < task_completed < completed[1]
def match_ids(self, task):
if len(self.not_ids) == 0 and len(self.ids) == 0:
return True
ids = self._parse_ids('id', task)
return (len(self.ids) == 0 or len(self.ids.intersection(ids)) > 0) and \
(len(self.not_ids) == 0 or len(self.not_ids.intersection(ids)) == 0)
def match_refs(self, task):
if len(self.refs) == 0:
return True
ids = self._parse_ids('after', task)
ids |= self._parse_ids('ref', task)
return len(ids) > 0 and \
len(self.refs.intersection(ids)) > 0
def match_filenames(self, task):
if len(self.filenames) + len(self.not_filenames) == 0:
return True
return hasattr(task, 'todotxt') is not None and \
task.todotxt is not None and \
all([pattern in str(task.todotxt.filename) for pattern in self.filenames]) and \
not any([pattern in str(task.todotxt.filename) for pattern in self.not_filenames])
def match_after(self, task, attrs):
if self.after is None:
return True
if len(self.after) == 0 and \
len(attrs.get('after', [])) == 0:
return True
if self.task_by_id is None:
return True
parents = self._parse_ids('after', task)
if len(self.after) == | |
i11iIiiIii . OOooOOo / Oo0Ooo * O0 % oO0o % iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
def III1ii1iII ( nonce , packet ) :
if ( len ( packet ) < 12 ) : return ( False )
if 54 - 54: I1IiiI % II111iiii % II111iiii
iI1 = "II"
i11Iiii = struct . calcsize ( iI1 )
OoO000 , iI = struct . unpack ( iI1 , packet [ : i11Iiii ] )
packet = packet [ i11Iiii : : ]
if ( socket . ntohl ( OoO000 ) != 0x90000000 ) :
print "Invalid LISP-Trace message"
return ( { } )
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
iI1 = "Q"
i11Iiii = struct . calcsize ( iI1 )
I1i1I = struct . unpack ( iI1 , packet [ : i11Iiii ] ) [ 0 ]
packet = packet [ i11Iiii : : ]
if 80 - 80: OoOoOO00 - OoO0O00
if 87 - 87: oO0o / I11i - i1IIi * OOooOOo / OoooooooOO . O0
if 1 - 1: II111iiii - I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
if ( I1i1I != nonce ) :
print "Invalid nonce, sent {}, received {}" . format ( nonce , I1i1I )
return ( { } )
if 83 - 83: OoooooooOO
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if ( len ( packet ) == 0 ) :
print "No JSON data in payload"
return ( { } )
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
try :
II1i1IiiIIi11 = json . loads ( packet )
except :
print "Invalid JSON data: '{}'" . format ( packet )
return ( { } )
if 47 - 47: iII111i
return ( II1i1IiiIIi11 )
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
if 88 - 88: Ii1I / I1Ii111 + iII111i - II111iiii / ooOoO0o - OoOoOO00
if 15 - 15: I1ii11iIi11i + OoOoOO00 - OoooooooOO / OOooOOo
if 58 - 58: i11iIiiIii % I11i
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
def Oo000o ( jd ) :
for I11IiI1I11i1i in jd :
print "Path from {} to {}:" . format ( I11IiI1I11i1i [ "seid" ] , I11IiI1I11i1i [ "deid" ] )
for iI1ii1Ii in I11IiI1I11i1i [ "paths" ] :
if ( iI1ii1Ii . has_key ( "encap-timestamp" ) ) :
oooo000 = iI1ii1Ii [ "encap-timestamp" ]
iIIIi1 = "encap"
if 20 - 20: i1IIi + I1ii11iIi11i - ooOoO0o
if ( iI1ii1Ii . has_key ( "decap-timestamp" ) ) :
oooo000 = iI1ii1Ii [ "decap-timestamp" ]
iIIIi1 = "decap"
if 30 - 30: II111iiii - OOooOOo - i11iIiiIii % OoOoOO00 - II111iiii * Ii1I
oO00O0O0O = iI1ii1Ii [ "hostname" ]
i1ii1iiI = iI1ii1Ii [ "drloc" ]
if ( i1ii1iiI . find ( "?" ) != - 1 ) : i1ii1iiI = O0o0O00Oo0o0 ( i1ii1iiI )
if 87 - 87: ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
print " {} {}: {} -> {}, ts {}, node {}" . format ( iI1ii1Ii [ "node" ] , iIIIi1 , iI1ii1Ii [ "srloc" ] , i1ii1iiI , oooo000 , O0ooo0O0oo0 ( oO00O0O0O ) )
if 91 - 91: iIii1I11I1II1 + I1Ii111
if 31 - 31: IiII . OoOoOO00 . OOooOOo
if ( iI1ii1Ii . has_key ( "recent-rtts" ) and iI1ii1Ii . has_key ( "recent-hops" ) ) :
O0oOoOO = iI1ii1Ii [ "recent-rtts" ]
oO00o0 = json . dumps ( iI1ii1Ii [ "recent-hops" ] )
oO00o0 = oO00o0 . replace ( "u" , "" )
oO00o0 = oO00o0 . replace ( "'" , "" )
oO00o0 = oO00o0 . replace ( '"' , "" )
print " " ,
print "recent-rtts {}, recent-hops {}" . format ( O0oOoOO , oO00o0 )
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
if 25 - 25: I1ii11iIi11i
if 7 - 7: i1IIi / I1IiiI * I1Ii111 . IiII . iIii1I11I1II1
print ""
if 13 - 13: OOooOOo / i11iIiiIii
if 2 - 2: I1IiiI / O0 / o0oOOo0O0Ooo % OoOoOO00 % Ii1I
if 52 - 52: o0oOOo0O0Ooo
if 95 - 95: Ii1I
if 87 - 87: ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
def O0oOoOOOoOO ( eid ) :
ii1ii11IIIiiI = True
if 67 - 67: I11i * oO0o * I1ii11iIi11i + OOooOOo / i1IIi
if 11 - 11: Ii1I + iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
i11Iii = eid . find ( "]" )
if ( i11Iii == - 1 ) :
IiIIIi1iIi = "0"
else :
ii1ii11IIIiiI = False
IiIIIi1iIi = eid [ 1 : i11Iii ]
eid = eid [ i11Iii + 1 : : ]
if 68 - 68: i11iIiiIii % I1ii11iIi11i + i11iIiiIii
if 31 - 31: II111iiii . I1IiiI
if 1 - 1: Oo0Ooo / o0oOOo0O0Ooo % iII111i * IiII . i11iIiiIii
if 2 - 2: I1ii11iIi11i * I11i - iIii1I11I1II1 + I1IiiI . oO0o % iII111i
if 92 - 92: iII111i
if ( eid . find ( ":" ) == - 1 ) : eid = socket . gethostbyname ( eid )
return ( IiIIIi1iIi , eid , ii1ii11IIIiiI )
if 25 - 25: Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
def ooO00OO0 ( match_iid , match_eid , http , port , v4v6 ) :
i11111IIIII = ( "curl --silent --insecure -u root: {}://localhost:{}/lisp/" + "api/data/database-mapping" ) . format ( http , port )
if 19 - 19: OoOoOO00 * i1IIi
ii111iI1iIi1 = commands . getoutput ( i11111IIIII )
if 78 - 78: OoO0O00 . OOooOOo + OoO0O00 / I11i / OoO0O00
try :
oO0O00OoOO0 = json . loads ( ii111iI1iIi1 )
except :
return ( None , None , None , None )
if 82 - 82: II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
for oOooOo0 in oO0O00OoOO0 :
if ( oOooOo0 . has_key ( "eid-prefix" ) == False ) : continue
i1I1ii11i1Iii = oOooOo0 [ "eid-prefix" ]
i1I1ii11i1Iii = i1I1ii11i1Iii . split | |
<filename>src/pyrin/packaging/manager.py
# -*- coding: utf-8 -*-
"""
packaging manager module.
"""
import os
import inspect
from threading import Lock
from importlib import import_module
from time import time
import pyrin.application.services as application_services
import pyrin.configuration.services as config_services
import pyrin.utils.configuration as config_utils
import pyrin.utils.path as path_utils
import pyrin.utils.environment as env_utils
import pyrin.utils.misc as misc_utils
from pyrin.core.mixin import HookMixin
from pyrin.packaging import PackagingPackage
from pyrin.core.structs import DTO, Manager
from pyrin.packaging.base import Package
from pyrin.packaging.enumerations import PackageScopeEnum
from pyrin.packaging.hooks import PackagingHookBase
from pyrin.utils.custom_print import print_info, print_default
from pyrin.packaging.exceptions import InvalidPackageNameError, \
ComponentModuleNotFoundError, BothUnitAndIntegrationTestsCouldNotBeLoadedError, \
InvalidPackagingHookTypeError, CircularDependencyDetectedError, PackageNotExistedError, \
PackageIsIgnoredError, PackageIsDisabledError, SelfDependencyDetectedError, \
SubPackageDependencyDetectedError, PackageExternalDependencyError
class PackagingManager(Manager, HookMixin):
"""
packaging manager class.
"""
_lock = Lock()
hook_type = PackagingHookBase
invalid_hook_type_error = InvalidPackagingHookTypeError
package_class = PackagingPackage
REQUIRED_PACKAGES = ('application', 'packaging')
def __init__(self):
"""
creates a new instance of PackagingManager.
"""
super().__init__()
# this flag indicates that application has been loaded.
# it is required for environments in which server starts
# multiple threads before application gets loaded.
self._is_loaded = False
self._pyrin_package_name = None
self._required_packages = []
# holds the names of all application packages that should be loaded.
self._all_packages = []
# holds the name of loaded packages.
self._loaded_packages = []
# holds the name of disabled packages.
self._disabled_packages = []
# a dict containing each package name and all of its dependency package names.
# in the form of:
# {str package_name: list[str dependency_package_name]}
self._dependency_map = DTO()
# holds the full path of directories that are not a package (not having __init__.py)
self._not_packages = []
# configs will be filled from packaging config file.
self._configs = DTO()
# holds the root package names in which all test packages are resided.
self._test_roots = []
# holds the base roots for different test root packages.
self._test_roots_bases = []
# these will keep all loaded components for different
# categories inside them. extended components in each
# category are those that extending the exact component
# of their parent.
# in the form of: dict[str package_name: list[str] modules]
self._pyrin_components = DTO()
self._application_components = DTO()
self._custom_components = DTO()
self._test_components = DTO()
self._unit_test_components = DTO()
self._integration_test_components = DTO()
self._extended_application_components = DTO()
self._other_application_components = DTO()
self._extended_unit_test_components = DTO()
self._other_unit_test_components = DTO()
self._extended_integration_test_components = DTO()
self._other_integration_test_components = DTO()
def _create_config_file(self):
"""
creates packaging config file in application settings path if not available.
"""
config_services.create_config_file(self.package_class.CONFIG_STORE_NAMES[0],
ignore_on_existed=True)
def _load_configs(self):
"""
loads packaging configs from application's settings directory.
"""
self._configs.clear()
configs = config_utils.load(self._get_config_file_path())
self._configs = configs.get('general')
self._extract_test_roots()
def _get_config_file_path(self):
"""
gets packaging config file path.
it looks for file in top level application settings, but
if not found it, it uses the file from default settings.
we have to re-implement this here, because configuration
services is not present yet to be used.
:rtype: str
"""
app_settings_directory = application_services.get_settings_path()
pyrin_settings_directory = application_services.get_default_settings_path()
config_file_name = '{store}.ini'.format(store=self.package_class.
CONFIG_STORE_NAMES[0])
config_path = path_utils.get_first_available_file(app_settings_directory,
pyrin_settings_directory,
file_name=config_file_name)
return config_path
def _initialize(self):
"""
initializes required data.
"""
self._disabled_packages.clear()
self._not_packages.clear()
self._dependency_map.clear()
self._all_packages.clear()
self._loaded_packages.clear()
self._required_packages.clear()
self._pyrin_components.clear()
self._application_components.clear()
self._custom_components.clear()
self._test_components.clear()
self._unit_test_components.clear()
self._integration_test_components.clear()
self._extended_application_components.clear()
self._other_application_components.clear()
self._extended_unit_test_components.clear()
self._other_unit_test_components.clear()
self._extended_integration_test_components.clear()
self._other_integration_test_components.clear()
self._pyrin_package_name = path_utils.get_pyrin_main_package_name()
self._load_required_packages(self._pyrin_package_name)
self._load_configs()
self._resolve_python_path()
def _load_required_packages(self, pyrin_package):
"""
loads all required package names.
these packages are always loaded before any other package and
they do not need to be handled by packaging package itself.
:param str pyrin_package: the name of pyrin package.
it would always be `pyrin` in normal cases.
"""
for item in self.REQUIRED_PACKAGES:
full_name = '{pyrin}.{package}'.format(pyrin=pyrin_package,
package=item)
self._required_packages.append(full_name)
self._loaded_packages.append(full_name)
self._all_packages.append(full_name)
def _extract_test_roots(self):
"""
extracts the root package names in which all test packages are resided.
"""
self._test_roots.clear()
self._test_roots_bases.clear()
unit = self._configs.unit_test_package
integration = self._configs.integration_test_package
if unit not in (None, '') and not unit.isspace():
unit_root = unit.split('.')
if len(unit_root) > 1:
unit_root.pop()
self._test_roots.append('.'.join(unit_root))
self._test_roots_bases.append(unit_root[0])
if integration not in (None, '') and not integration.isspace():
integration_root = integration.split('.')
if len(integration_root) > 1:
integration_root.pop()
value = '.'.join(integration_root)
if value not in self._test_roots:
self._test_roots.append(value)
if integration_root[0] not in self._test_roots_bases:
self._test_roots_bases.append(integration_root[0])
def load_components(self, **options):
"""
loads required packages and modules for application startup.
:raises BothUnitAndIntegrationTestsCouldNotBeLoadedError: both unit and integration
tests could not be loaded
error.
:raises PackageIsIgnoredError: package is ignored error.
:raises PackageIsDisabledError: package is disabled error.
:raises PackageNotExistedError: package not existed error.
:raises SelfDependencyDetectedError: self dependency detected error.
:raises SubPackageDependencyDetectedError: sub-package dependency detected error.
:raises CircularDependencyDetectedError: circular dependency detected error.
:raises PackageExternalDependencyError: package external dependency error.
"""
if self._is_loaded is True:
return
with self._lock:
if self._is_loaded is True:
return
start_time = time()
self._initialize()
print_info('Loading application components...')
self._find_pyrin_loadable_components()
self._find_other_loadable_components()
self._load_components(self._pyrin_components, **options)
self._load_components(self._extended_application_components, **options)
self._load_components(self._other_application_components, **options)
self._load_components(self._custom_components, **options)
self._load_tests(**options)
self._after_packages_loaded()
print_info('Total of [{count}] packages loaded.'
.format(count=len(self._loaded_packages)))
self._create_config_file()
self._is_loaded = True
end_time = time()
duration = '{:0.1f}'.format((end_time - start_time) * 1000)
print_info('Application loaded in [{duration}] milliseconds.'
.format(duration=duration))
pyrin_version = application_services.get_pyrin_version()
print_info('Pyrin version: [{version}].'.format(version=pyrin_version))
def _load_tests(self, **options):
"""
loads test packages if needed.
:raises BothUnitAndIntegrationTestsCouldNotBeLoadedError: both unit and integration
tests could not be loaded
error.
:raises PackageIsIgnoredError: package is ignored error.
:raises PackageIsDisabledError: package is disabled error.
:raises PackageNotExistedError: package not existed error.
:raises SelfDependencyDetectedError: self dependency detected error.
:raises SubPackageDependencyDetectedError: sub-package dependency detected error.
:raises CircularDependencyDetectedError: circular dependency detected error.
:raises PackageExternalDependencyError: package external dependency error.
"""
if self._configs.load_unit_test is True and \
self._configs.load_integration_test is True:
raise BothUnitAndIntegrationTestsCouldNotBeLoadedError('Both unit and '
'integration tests '
'could not be loaded '
'at the same time.')
if self._configs.load_unit_test is True or \
self._configs.load_integration_test is True:
self._load_components(self._test_components, **options)
if self._configs.load_unit_test is True:
self._load_components(self._extended_unit_test_components, **options)
self._load_components(self._other_unit_test_components, **options)
elif self._configs.load_integration_test is True:
self._load_components(self._extended_integration_test_components, **options)
self._load_components(self._other_integration_test_components, **options)
def _after_packages_loaded(self):
"""
this method will call `after_packages_loaded` method of all registered hooks.
"""
for hook in self._get_hooks():
hook.after_packages_loaded()
def _package_loaded(self, package_name, **options):
"""
this method will call `package_loaded` method of all registered hooks.
:param str package_name: name of the loaded package.
"""
for hook in self._get_hooks():
hook.package_loaded(package_name, **options)
def load(self, module_name, **options):
"""
loads the specified module.
:param str module_name: full module name.
example module_name = `pyrin.application.decorators`.
:rtype: Module
"""
module = import_module(module_name)
return module
def _load_component(self, package_name, module_names, component_name, **options):
"""
loads the given component.
:param str package_name: full package name to be loaded.
:param list[str] module_names: full module names to be loaded.
:param str component_name: component name of this package.
:raises ComponentModuleNotFoundError: component module not found error.
"""
self.load(package_name)
# component module should be loaded first if available, in case of
# any other module needed package services in top level objects.
component_module = None
if component_name is not None:
component_module = self._merge_module_name(package_name, component_name)
if component_module is not None and component_module in module_names:
self.load(component_module, **options)
elif component_module is not None and component_module not in module_names:
raise ComponentModuleNotFoundError('Component module [{name}] not '
'found in [{package}] package.'
.format(name=component_module,
package=package_name))
for module in module_names:
if module != component_module:
self.load(module, **options)
self._loaded_packages.append(package_name)
self._package_loaded(package_name, **options)
print_default('[{package}] package loaded. including [{module_count}] modules.'
.format(package=package_name,
module_count=len(module_names)))
if package_name == self._pyrin_package_name:
for item in self._required_packages:
print_default('[{package}] package loaded.'
.format(package=item))
def _load_components(self, components, **options):
"""
loads the given components considering their dependency on each other.
:param dict components: full package names and their
modules to be loaded.
:note components: dict[str package_name: list[str] modules]
:raises PackageIsIgnoredError: package is ignored error.
:raises PackageIsDisabledError: package is disabled error.
:raises PackageNotExistedError: package not existed error.
:raises SelfDependencyDetectedError: self dependency detected error.
:raises SubPackageDependencyDetectedError: sub-package dependency detected error.
:raises CircularDependencyDetectedError: circular dependency detected error.
:raises PackageExternalDependencyError: package external dependency error.
"""
# a dictionary containing all dependent package names and their respective modules.
# in the form of {str package_name: [str module]}.
dependent_components = DTO()
for package in components:
dependencies = []
package_class = self._get_package_class(package)
if package_class is not None:
dependencies = package_class.DEPENDS
self._validate_dependencies(package, dependencies)
# checking whether this package has any dependencies.
# if so, check those dependencies have been loaded or not.
# if not, then put this package into dependent_packages and
# load it later. otherwise load it now.
if (len(dependencies) <= 0 or
self._is_dependencies_loaded(dependencies) is True) and \
self._is_parent_loaded(package) is True:
instance = None
if package_class is not None:
instance = package_class()
instance.load_configs(config_services)
component_name = None
if instance is not None:
component_name = instance.COMPONENT_NAME
self._load_component(package, components[package], component_name, **options)
else:
dependent_components[package] = components[package]
# now, go | |
"""
Functions for explaining text classifiers.
"""
from functools import partial
import itertools
import json
import re
import numpy as np
import scipy as sp
import sklearn
from sklearn.utils import check_random_state
from . import explanation
from . import lime_base
class TextDomainMapper(explanation.DomainMapper):
"""Maps feature ids to words or word-positions"""
def __init__(self, indexed_string):
"""Initializer.
Args:
indexed_string: lime_text.IndexedString, original string
"""
self.indexed_string = indexed_string
def map_exp_ids(self, exp, positions=False):
"""Maps ids to words or word-position strings.
Args:
exp: list of tuples [(id, weight), (id,weight)]
positions: if True, also return word positions
Returns:
list of tuples (word, weight), or (word_positions, weight) if
examples: ('bad', 1) or ('bad_3-6-12', 1)
"""
if positions:
exp = [('%s_%s' % (
self.indexed_string.word(x[0]),
'-'.join(
map(str,
self.indexed_string.string_position(x[0])))), x[1])
for x in exp]
else:
exp = [(self.indexed_string.word(x[0]), x[1]) for x in exp]
return exp
def visualize_instance_html(self, exp, label, div_name, exp_object_name,
text=True, opacity=True):
"""Adds text with highlighted words to visualization.
Args:
exp: list of tuples [(id, weight), (id,weight)]
label: label id (integer)
div_name: name of div object to be used for rendering(in js)
exp_object_name: name of js explanation object
text: if False, return empty
opacity: if True, fade colors according to weight
"""
if not text:
return u''
text = (self.indexed_string.raw_string()
.encode('utf-8', 'xmlcharrefreplace').decode('utf-8'))
text = re.sub(r'[<>&]', '|', text)
exp = [(self.indexed_string.word(x[0]),
self.indexed_string.string_position(x[0]),
x[1]) for x in exp]
all_occurrences = list(itertools.chain.from_iterable(
[itertools.product([x[0]], x[1], [x[2]]) for x in exp]))
all_occurrences = [(x[0], int(x[1]), x[2]) for x in all_occurrences]
ret = '''
%s.show_raw_text(%s, %d, %s, %s, %s);
''' % (exp_object_name, json.dumps(all_occurrences), label,
json.dumps(text), div_name, json.dumps(opacity))
return ret
class IndexedString(object):
"""String with various indexes."""
def __init__(self, raw_string, split_expression=r'\W+', bow=True,
mask_string=None):
"""Initializer.
Args:
raw_string: string with raw text in it
split_expression: Regex string or callable. If regex string, will be used with re.split.
If callable, the function should return a list of tokens.
bow: if True, a word is the same everywhere in the text - i.e. we
will index multiple occurrences of the same word. If False,
order matters, so that the same word will have different ids
according to position.
mask_string: If not None, replace words with this if bow=False
if None, default value is UNKWORDZ
"""
self.raw = raw_string
self.mask_string = 'UNKWORDZ' if mask_string is None else mask_string
if callable(split_expression):
tokens = split_expression(self.raw)
self.as_list = self._segment_with_tokens(self.raw, tokens)
tokens = set(tokens)
def non_word(string):
return string not in tokens
else:
# with the split_expression as a non-capturing group (?:), we don't need to filter out
# the separator character from the split results.
splitter = re.compile(r'(%s)|$' % split_expression)
self.as_list = [s for s in splitter.split(self.raw) if s]
non_word = splitter.match
self.as_np = np.array(self.as_list)
self.string_start = np.hstack(
([0], np.cumsum([len(x) for x in self.as_np[:-1]])))
vocab = {}
self.inverse_vocab = []
self.positions = []
self.bow = bow
non_vocab = set()
for i, word in enumerate(self.as_np):
if word in non_vocab:
continue
if non_word(word):
non_vocab.add(word)
continue
if bow:
if word not in vocab:
vocab[word] = len(vocab)
self.inverse_vocab.append(word)
self.positions.append([])
idx_word = vocab[word]
self.positions[idx_word].append(i)
else:
self.inverse_vocab.append(word)
self.positions.append(i)
if not bow:
self.positions = np.array(self.positions)
def raw_string(self):
"""Returns the original raw string"""
return self.raw
def num_words(self):
"""Returns the number of tokens in the vocabulary for this document."""
return len(self.inverse_vocab)
def word(self, id_):
"""Returns the word that corresponds to id_ (int)"""
return self.inverse_vocab[id_]
def string_position(self, id_):
"""Returns a np array with indices to id_ (int) occurrences"""
if self.bow:
return self.string_start[self.positions[id_]]
else:
return self.string_start[[self.positions[id_]]]
def inverse_removing(self, words_to_remove):
"""Returns a string after removing the appropriate words.
If self.bow is false, replaces word with UNKWORDZ instead of removing
it.
Args:
words_to_remove: list of ids (ints) to remove
Returns:
original raw string with appropriate words removed.
"""
mask = np.ones(self.as_np.shape[0], dtype='bool')
mask[self.__get_idxs(words_to_remove)] = False
if not self.bow:
return ''.join(
[self.as_list[i] if mask[i] else self.mask_string
for i in range(mask.shape[0])])
return ''.join([self.as_list[v] for v in mask.nonzero()[0]])
@staticmethod
def _segment_with_tokens(text, tokens):
"""Segment a string around the tokens created by a passed-in tokenizer"""
list_form = []
text_ptr = 0
for token in tokens:
inter_token_string = []
while not text[text_ptr:].startswith(token):
inter_token_string.append(text[text_ptr])
text_ptr += 1
if text_ptr >= len(text):
raise ValueError("Tokenization produced tokens that do not belong in string!")
text_ptr += len(token)
if inter_token_string:
list_form.append(''.join(inter_token_string))
list_form.append(token)
if text_ptr < len(text):
list_form.append(text[text_ptr:])
return list_form
def __get_idxs(self, words):
"""Returns indexes to appropriate words."""
if self.bow:
return list(itertools.chain.from_iterable(
[self.positions[z] for z in words]))
else:
return self.positions[words]
class IndexedCharacters(object):
"""String with various indexes."""
def __init__(self, raw_string, bow=True, mask_string=None):
"""Initializer.
Args:
raw_string: string with raw text in it
bow: if True, a char is the same everywhere in the text - i.e. we
will index multiple occurrences of the same character. If False,
order matters, so that the same word will have different ids
according to position.
mask_string: If not None, replace characters with this if bow=False
if None, default value is chr(0)
"""
self.raw = raw_string
self.as_list = list(self.raw)
self.as_np = np.array(self.as_list)
self.mask_string = chr(0) if mask_string is None else mask_string
self.string_start = np.arange(len(self.raw))
vocab = {}
self.inverse_vocab = []
self.positions = []
self.bow = bow
non_vocab = set()
for i, char in enumerate(self.as_np):
if char in non_vocab:
continue
if bow:
if char not in vocab:
vocab[char] = len(vocab)
self.inverse_vocab.append(char)
self.positions.append([])
idx_char = vocab[char]
self.positions[idx_char].append(i)
else:
self.inverse_vocab.append(char)
self.positions.append(i)
if not bow:
self.positions = np.array(self.positions)
def raw_string(self):
"""Returns the original raw string"""
return self.raw
def num_words(self):
"""Returns the number of tokens in the vocabulary for this document."""
return len(self.inverse_vocab)
def word(self, id_):
"""Returns the word that corresponds to id_ (int)"""
return self.inverse_vocab[id_]
def string_position(self, id_):
"""Returns a np array with indices to id_ (int) occurrences"""
if self.bow:
return self.string_start[self.positions[id_]]
else:
return self.string_start[[self.positions[id_]]]
def inverse_removing(self, words_to_remove):
"""Returns a string after removing the appropriate words.
If self.bow is false, replaces word with UNKWORDZ instead of removing
it.
Args:
words_to_remove: list of ids (ints) to remove
Returns:
original raw string with appropriate words removed.
"""
mask = np.ones(self.as_np.shape[0], dtype='bool')
mask[self.__get_idxs(words_to_remove)] = False
if not self.bow:
return ''.join(
[self.as_list[i] if mask[i] else self.mask_string
for i in range(mask.shape[0])])
return ''.join([self.as_list[v] for v in mask.nonzero()[0]])
def __get_idxs(self, words):
"""Returns indexes to appropriate words."""
if self.bow:
return list(itertools.chain.from_iterable(
[self.positions[z] for z in words]))
else:
return self.positions[words]
class LimeTextExplainer(object):
"""Explains text classifiers.
Currently, we are using an exponential kernel on cosine distance, and
restricting explanations to words that are present in documents."""
def __init__(self,
kernel_width=25,
kernel=None,
verbose=False,
class_names=None,
feature_selection='auto',
split_expression=r'\W+',
bow=True,
mask_string=None,
random_state=None,
char_level=False):
"""Init function.
Args:
kernel_width: kernel width for the exponential kernel.
kernel: similarity kernel that takes euclidean distances and kernel
width as input and outputs weights in (0,1). If None, defaults to
an exponential kernel.
verbose: if true, print local prediction values from linear model
class_names: list of class names, ordered according to whatever the
classifier is using. If not present, class names will be '0',
'1', ...
feature_selection: feature selection method. can be
'forward_selection', 'lasso_path', 'none' or 'auto'.
See function 'explain_instance_with_data' in lime_base.py for
details on what each of the options does.
split_expression: Regex string or callable. If regex string, will be used with re.split.
If callable, the function should return a list of tokens.
bow: if True (bag of words), will perturb input data by removing
all occurrences of individual words or characters.
Explanations will be in terms of these words. Otherwise, will
explain in terms of word-positions, so that a word may be
important the first time it appears and unimportant the second.
Only set to false if the classifier uses word order in some way
(bigrams, etc), or if you set char_level=True.
mask_string: String used to mask tokens or characters if bow=False
if None, will be 'UNKWORDZ' if char_level=False, chr(0)
otherwise.
random_state: an integer or numpy.RandomState that will be used to
generate random numbers. If None, the random state will be
initialized using the internal numpy seed.
char_level: an boolean identifying that we treat each character
as an independent occurence in the string
"""
if kernel is None:
def kernel(d, kernel_width):
return np.sqrt(np.exp(-(d ** 2) / kernel_width ** 2))
kernel_fn = partial(kernel, kernel_width=kernel_width)
self.random_state = check_random_state(random_state)
self.base = lime_base.LimeBase(kernel_fn, verbose,
| |
upper boundary
Returns:
Booleans: True/False.
Examples:
>>> model.set_boundary("reaction", 'HEX1', 0.001, 100.0)
See Also:
set_constrain
"""
if group == "reaction":
dic_temp = self.reactions
elif group == "metabolite":
dic_temp = self.metabolites
elif group == "reversible":
dic_temp = self.reversible
else:
if self.configuration['callbacklevel'] >= 1:
print('ERROR:', group, ' is wrong group')
return False
if id in dic_temp:
dic_temp[id]['lb'] = float(lb)
dic_temp[id]['ub'] = float(ub)
if self.configuration['callbacklevel'] >= 5:
print("lower and upper boundaries of", id, ' are set to be ',
lb, "and", ub)
return True
if self.configuration['callbacklevel'] >= 1:
print('ERROR:', id, ' not existed in the model')
return False
def set_constraints_from_state_dict(self, dict):
"""Reaction types, value, stdev, lb, and ub are set from the state dict data at once
Args:
dict (dict): Dictionary of flux. Data in 'value', 'stdev', 'lb', 'ub' and 'type' fields are used.
Examples:
>>> model.set_constraints_from_state_dict(flux_dict)
"""
#
# preparation of data
#
counter = 0
for i, (group, id) in enumerate(self.vector["ids"]):
if id in dict[group]:
type = dict[group][id]['type']
value = dict[group][id]['value']
stdev = dict[group][id]['stdev']
lb = dict[group][id]['lb']
ub = dict[group][id]['ub']
self.set_constrain(group, id, type, value=value, stdev=stdev)
self.set_boundary(group, id, lb, ub)
counter = counter + 1
if self.configuration['callbacklevel'] >= 5:
print(id, ' is set as ', type, group, ' with value at ',
value, ' and stdev at ', stdev, " and boundaries",
lb, ub, '.')
self.update()
if self.configuration['callbacklevel'] >= 3:
print("Batch setting of", counter, "constrains.")
return True
def generate_state_dict(self, tmp_r):
"""Generator of a state dict from a given vector.
Args:
tmp_r (array): tmp_r = numpy.dot(matrixinv, Rm_intial)
Returns:
dict : Dictionary of metabolic state inclucing metabolic flux and metabolite pool size levels.
Examples:
>>> state_dict = model.generate_state_dict(tmp_r)
"""
#
# preparation of data
#
flux_dict = {}
for i, id in enumerate(self.reaction_ids):
flux_dict[id] = {
'value': tmp_r[self.reactions[id]['position_in_tmp_r']],
'stdev': self.reactions[id]['stdev'],
'type': self.reactions[id]['type'],
'reversible': self.reactions[id]['reversible'],
'order': self.reactions[id]['order'],
'lb': self.reactions[id]['lb'],
'ub': self.reactions[id]['ub'],
}
conc_dict = {}
for i, id in enumerate(self.metabolites):
conc_dict[id] = {
'value': self.metabolites[id]['value'],
'stdev': self.metabolites[id]['stdev'],
'type': self.metabolites[id]['type'],
'order': self.metabolites[id]['order'],
'lb': self.metabolites[id]['lb'],
'ub': self.metabolites[id]['ub'],
}
if id in self.metabolite_ids:
conc_dict[id]['value'] = tmp_r[self.metabolites[id]
['position_in_tmp_r']]
reversible_dict = {}
for i, id in enumerate(self.reversible_ids):
reversible_dict[id] = {
'value': tmp_r[self.reversible[id]['position_in_tmp_r']],
'stdev': self.reversible[id]['stdev'],
'type': self.reversible[id]['type'],
'order': self.reversible[id]['order'],
'lb': self.reversible[id]['lb'],
'ub': self.reversible[id]['ub'],
}
return {
"reaction": flux_dict,
"metabolite": conc_dict,
"reversible": reversible_dict
}
def generate_carbon_source_templete(self):
return self.generate_carbon_source_template()
def generate_carbon_source_template(self):
"""Generator of a templete of CarbonSourse instance. Labeling information of carbon sources will be added to the instance.
Carbon source compounds are derived from the metabolic model (//Metabolites)
Args:
Not required.
Examples:
>>> carbon_source_idv = model.generate_carbon_source_templete()
See Also:
CarbonSource.py
"""
#
#
cs = {}
#for each line in mass data
for compound in self.carbon_source:
cs[compound] = {
'IDV': self.carbon_source[compound]['IDV'][:],
'size': self.carbon_source[compound]['size']
}
# Initial data is full 12C without natural 13C
for compound in cs:
cs[compound]['IDV'][0] = 1.0
return carbonsource.CarbonSource(cs)
def generate_mdv(self, flux, carbon_sources, timepoint=[], startidv=[]):
"""
Generator of a MdvData instance including MDV data generated from given flux and carbon sources.
Args:
flux (dict): Dictionary of metabolic state inclucing metabolic flux and metabolite pool size levels.
carbon_sources (instance): Instance of CarbonSource
timepoint (array): For INST-13C MFA. An array of time points of measurement in MDV
startidv (array): array of idv as starting isotope distribution for INST
Returns:
instance* MdvData instance
Examples:
>>> mdv = model.generate_mdv(flux, mdv_carbon_sources)
See Also:
generate_carbonsource_MDV
History:
140912 calc_MDV_from_flux instead of calmdv is used.
"""
tmp_r = [
flux[group][id]['value'] for (group, id) in self.vector["ids"]
]
mdv_carbon_sources = carbon_sources.generate_dict()
#Call calmdv via calc_MDV_from_flux function in mfapy.optimize
if len(timepoint) == 0:
mdv_exp, mdv_hash = optimize.calc_MDV_from_flux(
tmp_r, self.target_fragments.keys(), mdv_carbon_sources,
self.func)
mdv_data = mdv.MdvData(self.target_fragments)
for fragment, item in mdv_hash.items():
for number in range(len(item)):
if mdv_data.has_data(fragment, number):
mdv_data.set_data(fragment, number,
mdv_hash[fragment][number], 1.0,
"use")
return mdv_data
else:
startidv_temp = []
if len(startidv) == len(self.emu_order_in_y):
startidv_temp = list(startidv)
mdv_exp, mdv_hash = optimize.calc_MDV_from_flux(
tmp_r,
self.target_fragments.keys(),
mdv_carbon_sources,
self.func,
timepoint=timepoint,
y0temp=startidv_temp)
#
#mdv_timecourseインスタンスの生成
#
mdv_timecourse = mdv.MdvTimeCourseData()
for point in timepoint:
mdv_timecourse.add_time_point(
point, mdv.MdvData(self.target_fragments))
for fragment, item in mdv_hash.items():
for i, ratio in enumerate(item):
for number in range(len(ratio)):
if mdv_timecourse.has_data(timepoint[i], fragment,
number):
mdv_timecourse.set_data(
timepoint[i], fragment, number,
mdv_hash[fragment][i][number], 1.0, "use")
return mdv_timecourse
def set_experiment(self, name, mdv, carbon_sources, startidv=[]):
"""
Setter of an 'experiment' to metabolic model. Here an 'experiment' indicated
a set of a carbon source labeling pattern and a measured MDV data.
Parallel labeling experiment can be performed by setting multiple sets of 'experiment'.
Args:
name (str): name of the experiment (unique)
mdv (instance): MDVdata instance including measured MDV data of target fragments.
carbon_sources (instance): Instance of carbon source object
startidv (array): array of idv as starting isotope distribution for INST
Examples:
>>> model.set_experiment('ex1', mdv1, cs1)
>>> model.set_experiment('ex2', mdv2, cs2, startidv)
"""
ids, mdv_exp_original, mdv_std_original, mdv_use, target_emu_list, rawdata = mdv.generate_observed_mdv(
)
number_of_measurement = mdv.get_number_of_measurement()
mdv_carbon_sources = carbon_sources.generate_dict()
self.experiments[name] = {
'mode': "ST",
'mdv_exp_original': list(mdv_exp_original),
'mdv_std_original': list(mdv_std_original),
'mdv_use': list(mdv_use),
'mdv_ids': list(ids),
'target_emu_list': list(target_emu_list),
'mdv_carbon_sources': mdv_carbon_sources,
'number_of_measurement': number_of_measurement
}
if mdv.mode == "timecourse":
self.experiments[name]["mode"] = "INST"
self.experiments[name]["timepoint"] = mdv.get_timepoints()
#
# Set initial y0 in diffmdv
#
self.experiments[name]["y0"] = [0.0] * len(self.emu_order_in_y)
if len(startidv) == len(self.emu_order_in_y):
self.experiments[name]["y0"] = list(startidv)
else:
for position, emu in enumerate(self.emu_order_in_y):
#
if emu[1] == 0:
# 200517 ver 056 modified
H0ratio = 0.9893
H1ratio = 0.0107
(stem, pos) = emu[0].split("_")
pos = pos.replace(
':', '') #200517 ver 056 modified 炭素移動の表記が変わったので
number_of_carbons = len(pos)
self.experiments[name]["y0"][
position] = H0ratio**number_of_carbons
self.experiments[name]["y0"][
position +
1] = (H0ratio**(number_of_carbons -
1.0)) * H1ratio * number_of_carbons
if number_of_carbons > 2:
self.experiments[name]["y0"][
position +
2] = 1.0 - H0ratio**number_of_carbons - (
H0ratio**(number_of_carbons - 1.0
)) * H1ratio * number_of_carbons
if self.configuration['callbacklevel'] >= 3:
print("Set experiment: ", name,
' was added to the metabolic model.')
return True
def calc_idv(self, flux, carbon_sources):
"""Calc IDV from given flux and carbon_sources.
Isotopomer distribution vector (IDV) was used to calc MDV at time = 0 in the INST mode.
Examples:
>>> idv = model.calc_idv(flux, carbon_sources)
Args:
flux (dict): Dictionary of metabolic state inclucing metabolic flux and metabolite pool size levels.
carbon_sources (instance): Instance of carbon source object
Returns:
array : array of idv
See Also:
set_experiment()
generate_mdv()
"""
calmdv = self.func["calmdv"]
matrixinv = self.matrixinv
Rm_initial = self.vector["Rm_initial"]
stoichiometric_num = self.numbers['independent_start']
reaction_num = self.numbers['total_number']
Rm_ind = [
flux[group][id]["value"]
for (group, id) in self.vector['independent_flux']
]
Rm = numpy.array(list(Rm_initial))
Rm[stoichiometric_num:reaction_num] = list(Rm_ind)
tmp_r = numpy.dot(matrixinv, Rm)
target_emu_list = list(self.target_fragments)
mdv_carbon_sources = carbon_sources.mdv_carbon_sources
mdv_original_temp, mdv_hash = calmdv(list(tmp_r), target_emu_list,
mdv_carbon_sources)
X = mdv_hash['X_list']
for i, x in enumerate(X):
if x <= 0:
X[i] = 0.0
if x >= 1.0:
X[i] = 1.0
X_dict = dict(zip(self.emu_order_in_X, X))
y0 = [0.0] * len(self.emu_order_in_y)
for position, emu in enumerate(self.emu_order_in_y):
y0[position] = X_dict[emu]
return y0
def clear_experiment(self):
"""Clear all 'experiment set(s)' in the metabolic model.
Args:
Not required
Examples:
>>> model.clear_experiment()
See Also:
model.set_experiment()
History:
Newly developed at 4/9/2014
"""
names = list(self.experiments.keys())
self.experiments = {}
if self.configuration['callbacklevel'] >= 3:
print("Clear experiment: ", names,
' are removed from the metabolic model.')
return True
def generate_state(self, template=[]):
"""
Generator of a random metabolic flux distribution.
Used parameters in self.configuration().
Returns
----------
flux : Dictionaly of single metabolic state data.
state : State of generation results
Examples
--------
>>> flux, state = model.generate_state()
Returns:
* flux (dict) Dictionary of metabolic state inclucing metabolic flux and metabolite pool size levels.
* independent_flux (array) List of independent flux
Examples:
>>> flux, independent_flux = model.generate_intial_flux_distribution()
"""
#
# Set parameters
#
numbers = copy.deepcopy(self.numbers)
vectors = copy.deepcopy(self.vector)
configuration = copy.deepcopy(self.configuration)
matrixinv = self.matrixinv
initial_search_iteration_max = configuration[
"initial_search_iteration_max"]
ub = [self.reactions[x]['ub'] for x in self.reaction_ids]
lb = [self.reactions[x]['lb'] for x in self.reaction_ids]
if len(template) > 0:
template = [
template[type][id]["value"]
for (type, id) in self.vector["ids"]
]
tmp_r, Rm_temp, Rm_ind, state = optimize.initializing_Rm_fitting(
numbers, vectors, matrixinv, template,
initial_search_iteration_max)
return self.generate_state_dict(tmp_r), state
def generate_initial_states(self,
iterations=100,
initial_states=1,
method='normal',
template=[]):
"""
Initial metabolic states are randomly generated for "iterations" times from which
better states with lower RSS (number_of_initial_states) were selected.
The metabolic flux distribution is a initial flux data for model fitting.
Parameters
----------
method:
'normal' : Sequencial | |
import numpy as np
import h5py
import scipy.io
from math import floor
from enum import Enum
from collections import namedtuple as tuple
# from keras.preprocessing.image import Iterator # For random batch sizes
class Dataset(Enum):
TRAIN=0
VALID=1
TEST=2
class DataFile(Enum):
NAME=0
X=1
Y=2
class ModelData(object):
"""
Warning: Do not use generators with this or previous versions because
they do not generate random permutations of the data yet
"""
def __init__(self,
shrink_size=(1.0, 1.0, 1.0),
batch_size=100,
train=('data/processed/train.mat', 'trainxdata', 'traindata'),
valid=('data/processed/valid.mat', 'validxdata', 'validdata'),
test=('data/processed/test.mat', 'testxdata', 'testdata')):
self._shrink_size_=shrink_size
self._batch_size_=batch_size
self._train_=train
self._valid_=valid
self._test_=test
def open_train_file(self, train=None):
"""
Opens the file based off of the column names specified in 'train'
# Arguments
train:
tuple (file name, x column, y column)
to pull the x and y data from
# Returns
dict containing the contents of a h5py loaded file
"""
if (train is not None):
self._train_=train
trainmat = h5py.File(self._train_[DataFile.NAME.value])
return trainmat
def open_valid_file(self, valid=None):
"""
Opens the file based off of the column names specified in 'valid'
# Arguments
valid:
tuple (file name, x column, y column)
to pull the x and y data from
# Returns
dict containing the contents of a scipy loaded file
"""
if (valid is not None):
self._valid_=valid
validmat = scipy.io.loadmat(self._valid_[DataFile.NAME.value])
return validmat
def open_test_file(self, test=None):
"""
Opens the file based off of the column names specified in 'test'
# Arguments
test:
tuple (file name, x column, y column)
to pull the x and y data from
# Returns
dict containing the contents of a scipy loaded file
"""
if (test is not None):
self._test_=test
testmat = scipy.io.loadmat(self._test_[DataFile.NAME.value])
return testmat
def set_shrink_size(self, train=1.0, valid=1.0, test=1.0):
"""
Sets the float ratio of each truncated dataset to their respective full datasets.
"""
self._shrink_size_=(train, valid, test)
def set_nb_samples(self, train=None, valid=None, test=None):
"""
Set the number of samples to use before moving to next epoch
"""
if train is not None:
self.nb_train_samples
def set_batch_size(self, batch_size=None):
"""
Sets the self._batch_size_
"""
self._batch_size_=batch_size
def _get_shrunk_array(self, f, ind, shrink_size=(1.0, 1.0, 1.0)):
"""
The shrink_size must be a tuple of the same size as the array's shape
For now, this is assumed to be 2D or 3D
# Returns
numpy array of reduced dataset size
"""
dim = f[ind].shape
if (f[ind].ndim == 2):
return np.array(f[ind][:int(round(shrink_size[0] * dim[0])),
:int(round(shrink_size[1] * dim[1]))])
elif (f[ind].ndim == 3):
return np.array(f[ind][:int(round(shrink_size[0] * dim[0])),
:int(round(shrink_size[1] * dim[1])),
:int(round(shrink_size[2] * dim[2]))])
def get_data_tuples(self, shrink_size=(1.0, 1.0, 1.0)):
"""
Returns truncated versions of test, validation, and train data.
The shrink_size is a tuple of the ratio of
the truncated datasets to the full datasets.
# Arguments
shrink_size:
tuple of the ratios (train, valid, test)
of the truncated dataset to the full dataset
# Returns
named tuples ((X_train, y_train),
(X_valid, y_valid),
(X_test, y_test))
"""
return (self.get_train_tuple(shrink_size=shrink_size[Dataset.TRAIN.value]),
self.get_valid_tuple(shrink_size=shrink_size[Dataset.VALID.value]),
self.get_test_tuple(shrink_size=shrink_size[Dataset.TEST.value]))
def get_train_tuple(self, shrink_size=None):
"""
Returns truncated version of the test data from a h5py file.
The shrink_size is the ratio of
the truncated dataset to the full dataset.
Passing a shrink_size will not modify the self._shrink_size_
# Arguments
shrink_size:
float ratio of the truncated dataset to the full dataset
# Returns
named tuple (X_train, y_train)
"""
if shrink_size is None:
shrink_size = self._shrink_size_[Dataset.TRAIN.value]
trainmat = self.open_train_file()
# Reduce number of samples
# H5py file is in (columns, rows, samples) and (classes, samples)
X_train = np.transpose(
self._get_shrunk_array(trainmat, self._train_[DataFile.X.value], (1, 1, shrink_size)),
axes = (2, 0, 1))
y_train = self._get_shrunk_array(trainmat, self._train_[DataFile.Y.value], (1, shrink_size)).T
return tuple('train_tuple', 'X_train y_train')(X_train, y_train)
def get_valid_tuple(self, shrink_size=None):
"""
Returns truncated version of the validation data from a scipy.io file
The shrink_size is the ratio of
the truncated dataset to the full dataset.
Passing a shrink_size will not modify the self._shrink_size_
# Arguments
shrink_size:
float ratio of the truncated dataset to the full dataset
# Returns
named tuple (X_valid, y_valid)
"""
if shrink_size is None:
shrink_size = self._shrink_size_[Dataset.VALID.value]
validmat = self.open_valid_file()
# Reduce number of samples
# Scipy.io mat is in (samples, rows, columns) and (samples, classes)
X_valid = np.transpose(
self._get_shrunk_array(validmat,
self._valid_[DataFile.X.value],
(shrink_size, 1, 1)),
axes = (0, 2, 1))
y_valid = self._get_shrunk_array(validmat,
self._valid_[DataFile.Y.value],
(shrink_size, 1))
return tuple('valid_tuple', 'X_valid y_valid')(X_valid, y_valid)
def get_test_tuple(self, shrink_size=None):
"""
Returns truncated version of the test data from a scipy.io file
The shrink_size is the ratio of
the truncated dataset to the full dataset.
Passing a shrink_size will not modify the self._shrink_size_
# Arguments
shrink_size:
float ratio of the truncated dataset to the full dataset
# Returns
named tuple (X_test, y_test)
"""
if shrink_size is None:
shrink_size = self._shrink_size_[Dataset.TEST.value]
testmat = self.open_test_file()
# Reduce number of samples
# Scipy.io mat is in (samples, rows, columns) and (samples, classes)
X_test = np.transpose(
self._get_shrunk_array(testmat,
self._test_[1],
(shrink_size, 1, 1)),
axes = (0, 2, 1))
y_test = self._get_shrunk_array(testmat,
self._test_[2],
(shrink_size, 1))
return tuple('test_tuple', 'X_test y_test')(X_test, y_test)
def get_data_tuples_generator(self,
shrink_size=None,
nb_samples=None,
batch_size=None):
"""
Returns three generator that yield truncated versions
of the training, validation, and test data.
This function will not modify the class' member variables
# Arguments
shrink_size:
tuple of the ratios (train, valid, test)
of the truncated dataset to the full dataset
nb_samples:
tuple of the number of samples (train, valid, test)
to use from each dataset
batch_size:
size of each mini batch during training
"""
if shrink_size is None:
shrink_size = self._shrink_size_
if nb_samples is None:
nb_samples = (None, None, None)
if batch_size is None:
batch_size = self._batch_size_
return (self.get_train_tuple_generator(shrink_size=shrink_size[Dataset.TRAIN.value],
nb_samples=nb_samples[Dataset.TRAIN.value],
batch_size=batch_size),
self.get_valid_tuple_generator(shrink_size=shrink_size[Dataset.VALID.value],
nb_samples=nb_samples[Dataset.VALID.value],
batch_size=batch_size),
self.get_test_tuple_generator(shrink_size=shrink_size[Dataset.TEST.value],
nb_samples=nb_samples[Dataset.TEST.value],
batch_size=batch_size))
def get_train_tuple_generator(self,
shrink_size=None,
nb_samples=None,
batch_size=None):
"""
Creates a generator that yields a truncated version
of the train data from a h5py file.
This function will not modify the class' member variables
# Arguments
shrink_size:
float ratio of the truncated dataset to the full dataset
nb_samples:
maximum number of samples; must be divisible by batch_size
batch_size:
size of each mini batch during training
# Yields
named tuple (X_train, y_train)
"""
if shrink_size is None:
shrink_size = self._shrink_size_[Dataset.TRAIN.value]
if batch_size is None:
batch_size = self._batch_size_
train_tuple = self.get_train_tuple(shrink_size=shrink_size)
max_batches = floor(train_tuple.X_train.shape[0] / batch_size)
# Set the number of batches to either
# the maximum or the greatest possible
if nb_samples is None or nb_samples >= (max_batches * batch_size):
nb_batches = max_batches
else:
if nb_samples % batch_size != 0:
sys.exit('ERROR: nb_samples is not divisible by batch_size')
nb_batches = int(nb_samples / batch_size)
# Yield the next batch
while 1:
for i in range (0, nb_batches):
yield tuple('train_tuple', 'X_train y_train') \
(train_tuple.X_train[i * batch_size : (i+1) * batch_size],
train_tuple.y_train[i * batch_size : (i+1) * batch_size])
def get_valid_tuple_generator(self,
shrink_size=None,
nb_samples=None,
batch_size=None):
"""
Creates a generator that yields a truncated version
of the validation data from a scipy.io file
This function will not modify the class' member variables
# Arguments
shrink_size:
float ratio of the truncated dataset to the full dataset
nb_samples:
maximum number of samples; must be divisible by batch_size
batch_size:
size of each mini batch during training
# Yields
named tuple (X_valid, y_valid)
"""
if shrink_size is None:
shrink_size = self._shrink_size_[Dataset.VALID.value]
if batch_size is None:
batch_size = self._batch_size_
valid_tuple = self.get_valid_tuple(shrink_size=shrink_size)
max_batches = floor(valid_tuple.X_valid.shape[0] / batch_size)
# Set the number of batches to either
# the maximum or the greatest possible
if nb_samples is None or nb_samples >= (max_batches * batch_size):
nb_batches = max_batches
else:
if nb_samples % batch_size != 0:
sys.exit('ERROR: nb_samples is not divisible by batch_size')
nb_batches = int(nb_samples / batch_size)
# Yield the next batch
while 1:
for i in range (0, nb_batches):
yield tuple('valid_tuple', 'X_valid y_valid') \
(valid_tuple.X_valid[i * batch_size : (i+1) * batch_size],
valid_tuple.y_valid[i * batch_size : (i+1) * batch_size])
def get_test_tuple_generator(self,
shrink_size=None,
nb_samples=None,
batch_size=None):
"""
Creates a generator that yields a truncated version
of the test data from a scipy.io file
This function will not modify the class' member variables
# Arguments
shrink_size:
float ratio of the truncated dataset to the full dataset
nb_samples:
maximum number of samples; must be | |
"""Control the sc2monitor."""
import asyncio
import logging
import math
import time
from datetime import datetime, timedelta
from operator import itemgetter
import aiohttp
import sc2monitor.model as model
from sc2monitor.handlers import SQLAlchemyHandler
from sc2monitor.sc2api import SC2API
logger = logging.getLogger(__name__)
sql_logger = logging.getLogger()
class Controller:
"""Control the sc2monitor."""
def __init__(self, **kwargs):
"""Init the sc2monitor."""
self.kwargs = kwargs
self.sc2api = None
self.db_session = None
self.current_season = {}
async def __aenter__(self):
"""Create a aiohttp and db session that will later be closed."""
headers = {'Accept-Encoding': 'gzip, deflate'}
self.http_session = aiohttp.ClientSession(headers=headers)
self.create_db_session()
return self
def create_db_session(self):
"""Create sqlalchemy database session."""
self.db_session = model.create_db_session(
db=self.kwargs.pop('db', ''),
encoding=self.kwargs.pop('encoding', ''))
self.handler = SQLAlchemyHandler(self.db_session)
self.handler.setLevel(logging.INFO)
sql_logger.setLevel(logging.INFO)
sql_logger.addHandler(self.handler)
if len(self.kwargs) > 0:
self.setup(**self.kwargs)
self.sc2api = SC2API(self)
self.cache_matches = self.get_config(
'cache_matches',
default_value=1000)
self.cache_logs = self.get_config(
'cache_logs',
default_value=500)
self.cache_runs = self.get_config(
'cache_runs',
default_value=500)
self.analyze_matches = self.get_config(
'analyze_matches',
default_value=100)
async def __aexit__(self, exc_type, exc, tb):
"""Close all aiohtto and database session."""
await self.http_session.close()
self.db_session.commit()
self.db_session.close()
self.db_session = None
def get_config(self, key, default_value=None,
raise_key_error=True,
return_object=False):
"""Read a config value from database."""
if default_value is not None:
raise_key_error = False
entry = self.db_session.query(
model.Config).filter(model.Config.key == key).scalar()
if not entry:
if raise_key_error:
raise ValueError(f'Unknown config key "{key}"')
else:
if return_object:
return None
else:
return '' if default_value is None else default_value
else:
if return_object:
return entry
else:
return entry.value
def set_config(self, key, value, commit=True):
"""Save a config value to the database."""
entry = self.db_session.query(
model.Config).filter(model.Config.key == key).scalar()
if not entry:
self.db_session.add(model.Config(key=key, value=value))
else:
entry.value = value
if commit:
self.db_session.commit()
def setup(self, **kwargs):
"""Set up the sc2monitor with api-key and api-secret."""
valid_keys = ['api_key', 'api_secret',
'cache_matches', 'analyze_matches']
for key, value in kwargs.items():
if key not in valid_keys:
raise ValueError(
f"Invalid configuration key '{key}'"
f" (valid keys: {', '.join(valid_keys)})")
self.set_config(key, value, commit=False)
self.db_session.commit()
if self.sc2api:
self.sc2api.read_config()
def add_player(self, url, race=model.Race['Random']):
"""Add a player by url to the sc2monitor."""
close_db = False
if self.db_session is None:
self.create_db_session()
close_db = True
server, realm, player_id = self.sc2api.parse_profile_url(url)
count = self.db_session.query(model.Player).filter(
model.Player.realm == realm,
model.Player.player_id == player_id,
model.Player.server == server).count()
if count == 0:
new_player = model.Player(
realm=realm,
player_id=player_id,
server=server,
race=race)
self.db_session.add(new_player)
self.db_session.commit()
if close_db:
self.db_session.close()
self.db_session = None
def remove_player(self, url):
"""Remove a player by url to the sc2monitor."""
close_db = False
if self.db_session is None:
self.create_db_session()
close_db = True
server, realm, player_id = self.sc2api.parse_profile_url(url)
for player in self.db_session.query(model.Player).filter(
model.Player.realm == realm,
model.Player.player_id == player_id,
model.Player.server == server).all():
self.db_session.delete(player)
self.db_session.commit()
if close_db:
self.db_session.close()
self.db_session = None
async def update_season(self, server: model.Server):
"""Update info about the current season in the database."""
current_season = await self.sc2api.get_season(server)
season = self.db_session.query(model.Season).\
filter(model.Season.server == server).\
order_by(model.Season.season_id.desc()).\
limit(1).scalar()
if not season or current_season.season_id != season.season_id:
self.db_session.add(current_season)
self.db_session.commit()
self.db_session.refresh(current_season)
logger.info(f'Found a new ladder season: {current_season}')
return current_season
else:
season.start = current_season.start
season.end = current_season.end
season.year = current_season.year
season.number = current_season.number
self.db_session.commit()
return season
async def update_seasons(self):
"""Update seasons info for all servers."""
servers = [server[0] for server in self.db_session.query(
model.Player.server).distinct()]
tasks = []
for server in servers:
tasks.append(asyncio.create_task(self.update_season(server)))
for season in await asyncio.gather(*tasks, return_exceptions=True):
try:
if isinstance(season, model.Season):
self.current_season[season.server.id()] = season
else:
raise season
except Exception:
logger.exception(
('The following exception was'
' raised while updating seasons:'))
async def query_player(self, player: model.Player):
"""Collect api data of a player."""
complete_data = []
for ladder in await self.sc2api.get_ladders(player):
async for data in self.sc2api.get_ladder_data(player, ladder):
current_player = await self.get_player_with_race(player, data)
missing_games, new = self.count_missing_games(
current_player, data)
if missing_games['Total'] > 0:
complete_data.append({'player': current_player,
'new_data': data,
'missing': missing_games,
'Win': 0,
'Loss': 0})
if len(complete_data) > 0:
await self.process_player(complete_data, new)
elif (not player.name
or not isinstance(player.refreshed, datetime)
or player.refreshed <= datetime.now() - timedelta(days=1)):
await self.update_player_name(player)
async def update_player_name(self, player: model.Player, name=''):
"""Update the name of a player from api data."""
if not name:
metadata = await self.sc2api.get_metadata(player)
name = metadata['name']
for tmp_player in self.db_session.query(model.Player).filter(
model.Player.player_id == player.player_id,
model.Player.realm == player.realm,
model.Player.server == player.server,
model.Player.name != name).all():
logger.info(f"{tmp_player.id}: Updating name to '{name}'")
tmp_player.name = name
self.db_session.commit()
async def check_match_history(self, complete_data):
"""Check matches in match history and assign them to races."""
match_history = await self.sc2api.get_match_history(
complete_data[0]['player'])
for match in match_history:
positive = []
for data_key, data in enumerate(complete_data):
needed = data['missing'].get(match['result'].describe(), 0) > 0
try:
datetime_check = (match['datetime']
- data['player'].last_played
> timedelta(seconds=0))
except TypeError:
datetime_check = True
if (needed and datetime_check):
positive.append(data_key)
if len(positive) == 0:
continue
elif len(positive) >= 1:
# Choose the race with most missing results.
max_missing = 0
for key in positive:
tmp_missing = complete_data[key][
'missing'][match['result'].describe()]
if tmp_missing > max_missing:
data_key = key
max_missing = tmp_missing
complete_data[data_key][
'missing'][match['result'].describe()] -= 1
complete_data[data_key][match['result'].describe()] += 1
try:
complete_data[data_key]['games'].insert(0, match)
except KeyError:
complete_data[data_key]['games'] = [match]
try:
last_played = match['datetime']
except Exception:
last_played = datetime.now()
return last_played, len(match_history)
async def process_player(self, complete_data, new=False):
"""Process the api data of a player."""
last_played, len_history \
= await self.check_match_history(complete_data)
for race_player in complete_data:
race_player['missing']['Total'] = race_player['missing']['Win'] + \
race_player['missing']['Loss']
if race_player['missing']['Total'] > 0:
if new:
logger.info(
f"{race_player['player'].id}: Ignoring "
f"{race_player['missing']['Total']} games missing in"
f" match history ({len_history}) "
"of new player.")
else:
self.guess_games(race_player, last_played)
self.guess_mmr_changes(race_player)
await self.update_player(race_player)
self.calc_statistics(race_player['player'])
async def update_player(self, complete_data):
"""Update database with new data of a player."""
player = complete_data['player']
new_data = complete_data['new_data']
player.mmr = new_data['mmr']
player.ladder_id = new_data['ladder_id']
player.league = new_data['league']
player.ladder_joined = new_data['joined']
player.wins = new_data['wins']
player.losses = new_data['losses']
player.last_active_season = self.get_season_id(player.server)
if player.name != new_data['name']:
await self.update_player_name(
player,
new_data['name'])
if (not player.last_played
or player.ladder_joined
> player.last_played):
player.last_played = player.ladder_joined
self.db_session.commit()
def calc_statistics(self, player: model.Player):
"""Recalculate player statistics."""
self.db_session.refresh(player)
if not player.statistics:
stats = model.Statistics(player=player)
self.db_session.add(stats)
self.db_session.commit()
self.db_session.refresh(stats)
else:
stats = player.statistics
matches = self.db_session.query(model.Match).filter(
model.Match.player_id == player.id).order_by(
model.Match.datetime.desc()).limit(self.analyze_matches).all()
stats.games_available = len(matches)
wma_mmr_denominator = stats.games_available * \
(stats.games_available + 1.0) / 2.0
stats.max_mmr = player.mmr
stats.min_mmr = player.mmr
stats.current_mmr = player.mmr
wma_mmr = 0.0
expected_mmr_value = 0.0
expected_mmr_value2 = 0.0
current_wining_streak = 0
current_losing_streak = 0
for idx, match in enumerate(matches):
if match.result == model.Result.Win:
stats.wins += 1
current_wining_streak += 1
current_losing_streak = 0
if current_wining_streak > stats.longest_wining_streak:
stats.longest_wining_streak = current_wining_streak
elif match.result == model.Result.Loss:
stats.losses += 1
current_losing_streak += 1
current_wining_streak = 0
if current_losing_streak > stats.longest_losing_streak:
stats.longest_losing_streak = current_losing_streak
if match.max_length <= 120:
stats.instant_left_games += 1
if match.guess:
stats.guessed_games += 1
mmr = match.mmr
wma_mmr += mmr * \
(stats.games_available - idx) / wma_mmr_denominator
if stats.max_mmr < mmr:
stats.max_mmr = mmr
if stats.min_mmr > mmr:
stats.min_mmr = mmr
expected_mmr_value += mmr / stats.games_available
expected_mmr_value2 += mmr * (mmr / stats.games_available)
if stats.games_available <= 1:
stats.lr_mmr_slope = 0.0
stats.lr_mmr_intercept = expected_mmr_value
else:
ybar = expected_mmr_value
xbar = -0.5 * (stats.games_available - 1)
numerator = 0
denominator = 0
for x, match in enumerate(matches):
x = -x
y = match.mmr
numerator += (x - xbar) * (y - ybar)
denominator += (x - xbar) * (x - xbar)
stats.lr_mmr_slope = numerator / denominator
stats.lr_mmr_intercept = ybar - stats.lr_mmr_slope * xbar
stats.sd_mmr = round(
math.sqrt(expected_mmr_value2
- expected_mmr_value
* expected_mmr_value))
# critical_idx = min(self.controller.config['no_critical_games'],
# stats.games_available) - 1
# stats.critical_game_played = matches[critical_idx]["played"]
stats.avg_mmr = expected_mmr_value
stats.wma_mmr = wma_mmr
self.db_session.commit()
@classmethod
def guess_games(cls, complete_data, last_played):
"""Guess games of a player if missing in match history."""
# If a player isn't new in the database and has played more
# than 25 games since the last refresh or the match
# history is not available for this player, there are
# missing games in the match history. These are guessed to be very
# close to the last game of the match history and in alternating
# order.
player = complete_data['player']
if 'games' not in complete_data:
complete_data['games'] = []
logger.info((
"{}: {} missing games in match "
+ "history - more guessing!").format(
player.id, complete_data['missing']['Total']))
try:
delta = (last_played - player.last_played) / \
complete_data['missing']['Total']
except Exception:
delta = timedelta(minutes=3)
if delta > timedelta(minutes=3):
delta = timedelta(minutes=3)
if delta.total_seconds() <= 0:
last_played = datetime.now()
delta = timedelta(minutes=3)
while (complete_data['missing']['Win'] > 0
or complete_data['missing']['Loss'] > 0):
if complete_data['missing']['Win'] > 0:
last_played = last_played - delta
complete_data['games'].append(
{'datetime': last_played, 'result': model.Result.Win})
complete_data['missing']['Win'] -= 1
complete_data['Win'] += 1
if (complete_data['missing']['Win'] > 0
and complete_data['missing']['Win']
> complete_data['missing']['Loss']):
# If there | |
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0437228,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.55628,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.00173162,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.204049,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.00847309,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0406513,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.065569,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.033097,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.139317,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0451948,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 3.92655,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.00160075,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0017051,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0130141,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0126102,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0146149,
'Execution Unit/Register Files/Runtime Dynamic': 0.0143153,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0278501,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.0690524,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 0.832111,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00047592,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00047592,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000419238,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000164871,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000181147,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00155222,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00439473,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0121225,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 0.7711,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0393737,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0411736,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.02704,
'Instruction Fetch Unit/Runtime Dynamic': 0.0986169,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0257519,
'L2/Runtime Dynamic': 0.00772389,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.52998,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.15243,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.00947476,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.00947473,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 1.57472,
'Load Store Unit/Runtime Dynamic': 0.208631,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0233631,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.0467261,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.00829166,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.00867769,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0479442,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00645695,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.218297,
'Memory Management Unit/Runtime Dynamic': 0.0151346,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 12.3618,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.00421139,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00188533,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0206287,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming | |
<reponame>hilbix/fusetree
import fuse
from fuse import fuse_file_info
from typing import Dict, Iterator, Iterable, Sequence, Tuple, Optional, Any, NamedTuple, Union, List
import logging
import errno
import time
import threading
import traceback
from . import util
from .types import *
class Node:
"""
A node is the superclass of every entry in your filesystem.
When working with the FUSE api, you have only one callback for each function,
and must decide what to do based on the path.
In constrast, FuseTree will find the node represented by the path and
call the matching function on that object.
e.g., `getattr('/foo/bar')` becomes `root['foo']['bar'].getattr()`
Since node-creating operations, like `create`, `mkdir`, etc, receive paths that don't yet exist,
they are instead called on their parent directories instead.
e.g., `mkdir('/bar/foo/newdir')` becomes `root['bar']['foo'].mkdir('newdir')`
Common implementations to several common node types are provided on `nodetypes`.
"""
@property
def attr_timeout(self):
return 1
@property
def entry_timeout(self):
return 1
async def remember(self) -> None:
"""
Hint that this node has been added to the kernel cache.
On the root node it will be called once, when the FS is mounted -- Therefore it acts as fuse_init()
"""
pass
async def forget(self) -> None:
"""
Hint that this node has been removed from the kernel cache
On the root node it will be called once, when the FS is unmounted -- Therefore it acts as fuse_destroy()
"""
pass
async def lookup(self, name: str) -> Node_Like:
"""
get one of this directory's child nodes by name
(Or None if it doesn't exist)
"""
return None
async def getattr(self) -> Stat_Like:
"""
Get file attributes.
Similar to stat(). The 'st_dev' and 'st_blksize' fields are
ignored. The 'st_ino' field is ignored except if the 'use_ino'
mount option is given. In that case it is passed to userspace,
but libfuse and the kernel will still assign a different
inode for internal use (called the "nodeid").
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def setattr(self, new_attr: Stat, to_set: List[str]) -> Stat_Like:
cur_attr = await self.getattr()
if 'st_mode' in to_set:
await self.chmod(new_attr.st_mode)
if 'st_uid' in to_set or 'st_gid' in to_set:
await self.chown(
new_attr.st_uid if 'st_uid' in to_set else cur_attr.st_uid,
new_attr.st_gid if 'st_gid' in to_set else cur_attr.st_gid)
if 'st_size' in to_set:
await self.truncate(new_attr.st_size)
if 'st_atime' in to_set or 'st_mtime' in to_set or 'st_ctime' in to_set:
await self.utimens(
new_attr.st_atime if 'st_atime' in to_set else cur_attr.st_atime,
new_attr.st_mtime if 'st_mtime' in to_set else cur_attr.st_mtime)
return await self.getattr()
async def chmod(self, amode: int) -> None:
"""
Change the permission bits of a file
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def chown(self, uid: int, gid: int) -> None:
"""
Change the owner and group of a file.
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def truncate(self, length: int) -> None:
"""
Change the size of a file
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def utimens(self, atime: float, mtime: float) -> None:
"""
Change the access and modification times of a file with
nanosecond resolution
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def readlink(self) -> str:
"""
Read the target of a symbolic link
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def mknod(self, name: str, mode: int, dev: int) -> Node_Like:
"""
Create a file node
This is called for creation of all non-directory, non-symlink
nodes. If the filesystem defines a create() method, then for
regular files that will be called instead.
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def mkdir(self, name: str, mode: int) -> Node_Like:
"""
Create a directory
Note that the mode argument may not have the type specification
bits set, i.e. S_ISDIR(mode) can be false. To obtain the
correct directory type bits use mode|S_IFDIR
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def unlink(self, name: str) -> None:
"""
Remove a file
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def rmdir(self, name: str) -> None:
"""
Remove a directory
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def symlink(self, name: str, target: str) -> Node_Like:
"""
Create a symbolic link
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def rename(self, old_name: str, new_parent: 'Node', new_name: str) -> None:
"""
Rename a file
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def link(self, name: str, node: 'Node') -> Node_Like:
"""
Create a hard link to a file
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def open(self, mode: int) -> 'FileHandle':
"""
File open operation
No creation (O_CREAT, O_EXCL) and by default also no
truncation (O_TRUNC) flags will be passed to open(). If an
application specifies O_TRUNC, fuse first calls truncate()
and then open(). Only if 'atomic_o_trunc' has been
specified and kernel version is 2.6.24 or later, O_TRUNC is
passed on to open.
Unless the 'default_permissions' mount option is given,
open should check if the operation is permitted for the
given flags. Optionally open may also return an arbitrary
filehandle in the fuse_file_info structure, which will be
passed to all file operations.
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def setxattr(self, name: str, value: bytes, flags: int) -> None:
"""
Set extended attributes
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def getxattr(self, name: str) -> bytes:
"""
Get extended attributes
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def listxattr(self) -> Iterable[str]:
"""
List extended attributes
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def removexattr(self, name: str) -> None:
"""
Remove extended attributes
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def opendir(self) -> DirHandle_Like:
"""
Open directory
Unless the 'default_permissions' mount option is given,
this method should check if opendir is permitted for this
directory. Optionally opendir may also return an arbitrary
filehandle in the fuse_file_info structure, which will be
passed to readdir, closedir and fsyncdir.
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def statfs(self) -> StatVFS:
"""
Get file system statistics
The 'f_favail', 'f_fsid' and 'f_flag' fields are ignored
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def access(self, amode: int) -> None:
"""
Check file access permissions
This will be called for the access() system call. If the
'default_permissions' mount option is given, this method is not
called.
"""
pass
async def create(self, name: str, mode: int) -> 'FileHandle':
"""
Create the file with gie given mode and open it
Like open, but called on O_CREAT. Never called on Linux before 2.6.15.
See also: mknod() and open()
"""
raise fuse.FuseOSError(errno.ENOSYS)
class DirHandle:
"""
A DirHandle is what you get with a call to `opendir()`.
It can be used to list the directory contents (very important) and to fsync (optional)
You probably don't need to deal with this class directly: Just return a collection
of the file names on `opendir` and a new DirHandle will be created automatically
"""
def __init__(self, node: Node = None) -> None:
self.node = node
async def readdir(self) -> Iterable[DirEntry]:
"""
Read directory
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def fsyncdir(self, datasync: int) -> None:
"""
Synchronize directory contents
If the datasync parameter is non-zero, then only the user data
should be flushed, not the meta data
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def releasedir(self) -> None:
"""
Release directory
"""
pass
class FileHandle:
"""
A FileHandle is what you get with a call to `open()` or `create()`.
Most importantly, you should implement `read` and/or `write`.
You probably don't need to deal with this class directly: Just return a blob from `read()`
and a FileHandle will be created automatically -- Otherwise, check the many common implementation in `nodetypes`
"""
def __init__(self, node: Node = None, direct_io: bool = False, nonseekable: bool = False) -> None:
self.node = node
self.direct_io = direct_io
self.nonseekable = nonseekable
async def getattr(self) -> Stat_Like:
"""
Get file attributes of an open file.
Similar to stat(). The 'st_dev' and 'st_blksize' fields are
ignored. The 'st_ino' field is ignored except if the 'use_ino'
mount option is given. In that case it is passed to userspace,
but libfuse and the kernel will still assign a different
inode for internal use (called the "nodeid").
"""
raise fuse.FuseOSError(errno.ENOSYS)
async def setattr(self, new_attr: Stat, to_set: List[str]) -> Stat_Like:
cur_attr = await self.getattr()
if 'st_mode' in to_set:
await self.chmod(new_attr.st_mode)
if 'st_uid' in to_set or 'st_gid' in to_set:
await self.chown(
new_attr.st_uid if 'st_uid' in to_set else cur_attr.st_uid,
new_attr.st_gid if 'st_gid' in to_set else cur_attr.st_gid)
if 'st_size' in to_set:
await self.truncate(new_attr.st_size)
if 'st_atime' in to_set or 'st_mtime' in to_set or 'st_ctime' in to_set:
| |
PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resource_ids_by_bin
id_list = []
for authorization in self.get_authorizations_by_vault(vault_ids):
id_list.append(authorization.get_id())
return IdList(id_list)
@utilities.arguments_not_none
def get_authorizations_by_vault(self, vault_ids):
"""Gets the list of ``Authorizations`` corresponding to a list of ``Vault``.
arg: vault_ids (osid.id.IdList): list of vault ``Ids``
return: (osid.authorization.AuthorizationList) - list of
authorizations
raise: NullArgument - ``vault_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resources_by_bin
mgr = self._get_provider_manager('AUTHORIZATION', local=True)
lookup_session = mgr.get_authorization_lookup_session_for_vault(vault_ids, proxy=self._proxy)
lookup_session.use_isolated_vault_view()
return lookup_session.get_authorizations()
@utilities.arguments_not_none
def get_vault_ids_by_authorization(self, authorization_id):
"""Gets the list of ``Vault`` ``Ids`` mapped to an ``Authorization``.
arg: authorization_id (osid.id.Id): ``Id`` of an
``Authorization``
return: (osid.id.IdList) - list of vault ``Ids``
raise: NotFound - ``authorization_id`` is not found
raise: NullArgument - ``authorization_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
mgr = self._get_provider_manager('AUTHORIZATION', local=True)
lookup_session = mgr.get_authorization_lookup_session(proxy=self._proxy)
lookup_session.use_federated_vault_view()
authorization = lookup_session.get_authorization(authorization_id)
id_list = []
for idstr in authorization._my_map['assignedVaultIds']:
id_list.append(Id(idstr))
return IdList(id_list)
@utilities.arguments_not_none
def get_vault_by_authorization(self, authorization_id):
"""Gets the list of ``Vault`` objects mapped to an ``Authorization``.
arg: authorization_id (osid.id.Id): ``Id`` of an
``Authorization``
return: (osid.authorization.VaultList) - list of vault
raise: NotFound - ``authorization_id`` is not found
raise: NullArgument - ``authorization_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class AuthorizationVaultAssignmentSession(abc_authorization_sessions.AuthorizationVaultAssignmentSession, osid_sessions.OsidSession):
"""This session provides methods to re-assign ``Authorizations`` to ``Vault``.
An ``Authorization`` may map to multiple ``Vault`` objects and
removing the last reference to a ``Authorization`` is the equivalent
of deleting it. Each ``Vault`` may have its own authorizations
governing who is allowed to operate on it.
Moving or adding a reference of a ``Authorization`` to another
``Vault`` is not a copy operation (eg: does not change its ``Id`` ).
"""
_session_namespace = 'authorization.AuthorizationVaultAssignmentSession'
def __init__(self, proxy=None, runtime=None, **kwargs):
OsidSession._init_catalog(self, proxy, runtime)
self._catalog_name = 'Vault'
self._forms = dict()
self._kwargs = kwargs
def can_assign_authorizations(self):
"""Tests if this user can alter authorization/vault mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
assignment operations to unauthorized users.
return: (boolean) - ``false`` if mapping is not authorized,
``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.can_assign_resources
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
@utilities.arguments_not_none
def can_assign_authorizations_to_vault(self, vault_id):
"""Tests if this user can alter authorization/vault mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
assignment operations to unauthorized users.
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault``
return: (boolean) - ``false`` if mapping is not authorized,
``true`` otherwise
raise: NullArgument - ``vault_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.can_assign_resources_to_bin
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if vault_id.get_identifier() == '000000000000000000000000':
return False
return True
@utilities.arguments_not_none
def get_assignable_vault_ids(self, vault_id):
"""Gets a list of vault including and under the given vault node in which any authorization can be assigned.
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault``
return: (osid.id.IdList) - list of assignable vault ``Ids``
raise: NullArgument - ``vault_id`` is ``null``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids
# This will likely be overridden by an authorization adapter
mgr = self._get_provider_manager('AUTHORIZATION', local=True)
lookup_session = mgr.get_vault_lookup_session(proxy=self._proxy)
vaults = lookup_session.get_vaults()
id_list = []
for vault in vaults:
id_list.append(vault.get_id())
return IdList(id_list)
@utilities.arguments_not_none
def get_assignable_vault_ids_for_authorization(self, vault_id, authorization_id):
"""Gets a list of vault including and under the given vault node in which a specific authorization can be assigned.
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault``
arg: authorization_id (osid.id.Id): the ``Id`` of the
``Authorization``
return: (osid.id.IdList) - list of assignable vault ``Ids``
raise: NullArgument - ``vault_id`` or ``authorization_id`` is
``null``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids_for_resource
# This will likely be overridden by an authorization adapter
return self.get_assignable_vault_ids(vault_id)
@utilities.arguments_not_none
def assign_authorization_to_vault(self, authorization_id, vault_id):
"""Adds an existing ``Authorization`` to a ``Vault``.
arg: authorization_id (osid.id.Id): the ``Id`` of the
``Authorization``
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault``
raise: AlreadyExists - ``authorization_id`` is already assigned
to ``vault_id``
raise: NotFound - ``authorization_id`` or ``vault_id`` not
found
raise: NullArgument - ``authorization_id`` or ``vault_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
mgr = self._get_provider_manager('AUTHORIZATION', local=True)
lookup_session = mgr.get_vault_lookup_session(proxy=self._proxy)
lookup_session.get_vault(vault_id) # to raise NotFound
self._assign_object_to_catalog(authorization_id, vault_id)
@utilities.arguments_not_none
def unassign_authorization_from_vault(self, authorization_id, vault_id):
"""Removes an ``Authorization`` from a ``Vault``.
arg: authorization_id (osid.id.Id): the ``Id`` of the
``Authorization``
arg: vault_id (osid.id.Id): the ``Id`` of the ``Vault``
raise: NotFound - ``authorization_id`` or ``vault_id`` not
found or ``authorization_id`` not assigned to
``vault_id``
raise: NullArgument - ``authorization_id`` or ``vault_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin
mgr = self._get_provider_manager('AUTHORIZATION', local=True)
lookup_session = mgr.get_vault_lookup_session(proxy=self._proxy)
lookup_session.get_vault(vault_id) # to raise NotFound
self._unassign_object_from_catalog(authorization_id, vault_id)
@utilities.arguments_not_none
def reassign_authorization_to_vault(self, authorization_id, from_vault_id, to_vault_id):
"""Moves an ``Authorization`` from one ``Vault`` to another.
Mappings to other ``Vaults`` are unaffected.
arg: authorization_id (osid.id.Id): the ``Id`` of the
``Authorization``
arg: from_vault_id (osid.id.Id): the ``Id`` of the current
``Vault``
arg: to_vault_id (osid.id.Id): the ``Id`` of the destination
``Vault``
raise: NotFound - ``authorization_id, from_vault_id,`` or
``to_vault_id`` not found or ``authorization_id`` not
mapped to ``from_vault_id``
raise: NullArgument - ``authorization_id, from_vault_id,`` or
``to_vault_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.reassign_resource_to_bin
self.assign_authorization_to_vault(authorization_id, to_vault_id)
try:
self.unassign_authorization_from_vault(authorization_id, from_vault_id)
except: # something went wrong, roll back assignment to to_vault_id
self.unassign_authorization_from_vault(authorization_id, to_vault_id)
raise
class VaultLookupSession(abc_authorization_sessions.VaultLookupSession, osid_sessions.OsidSession):
"""This session provides methods for retrieving ``Vault`` objects.
The ``Vault`` represents a collection of ``Functions`` and
``Authorizations``.
This session defines views that offer differing behaviors when
retrieving multiple objects.
* comparative view: elements may be silently omitted or re-ordered
* plenary view: provides a complete set or is an error condition
Generally, the comparative view should be used for most applications
as it permits operation even if there is data that cannot be
accessed. For example, a browsing application may only need to
examine the ``Vaults`` it can access, without breaking execution.
However, an administrative application may require all ``Vault``
elements to be available.
Vaults may have an additional records indicated by their respective
record types. The record may not be accessed through a cast of the
``Vault``.
"""
_session_namespace = 'authorization.VaultLookupSession'
def __init__(self, proxy=None, runtime=None, **kwargs):
OsidSession.__init__(self)
OsidSession._init_catalog(self, proxy, runtime)
if self._cataloging_manager is not None:
self._catalog_session = self._cataloging_manager.get_catalog_lookup_session()
self._catalog_session.use_comparative_catalog_view()
self._catalog_view = COMPARATIVE
self._kwargs = kwargs
def can_lookup_vaults(self):
"""Tests if this user can perform ``Vault`` lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. | |
= "sensors.NumericSensor_4_0_3.ReadingChangedEvent:1.0.0"
def __init__(self, newReading, source):
super(raritan.rpc.sensors.NumericSensor.ReadingChangedEvent, self).__init__(source)
typecheck.is_struct(newReading, raritan.rpc.sensors.NumericSensor.Reading, AssertionError)
self.newReading = newReading
def encode(self):
json = super(raritan.rpc.sensors.NumericSensor.ReadingChangedEvent, self).encode()
json['newReading'] = raritan.rpc.sensors.NumericSensor.Reading.encode(self.newReading)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
newReading = raritan.rpc.sensors.NumericSensor.Reading.decode(json['newReading'], agent),
# for idl.Event
source = Interface.decode(json['source'], agent),
)
return obj
def listElements(self):
elements = ["newReading"]
elements = elements + super(raritan.rpc.sensors.NumericSensor.ReadingChangedEvent, self).listElements()
return elements
# value object
class StateChangedEvent(raritan.rpc.idl.Event):
idlType = "sensors.NumericSensor_4_0_3.StateChangedEvent:1.0.0"
def __init__(self, oldReading, newReading, source):
super(raritan.rpc.sensors.NumericSensor.StateChangedEvent, self).__init__(source)
typecheck.is_struct(oldReading, raritan.rpc.sensors.NumericSensor.Reading, AssertionError)
typecheck.is_struct(newReading, raritan.rpc.sensors.NumericSensor.Reading, AssertionError)
self.oldReading = oldReading
self.newReading = newReading
def encode(self):
json = super(raritan.rpc.sensors.NumericSensor.StateChangedEvent, self).encode()
json['oldReading'] = raritan.rpc.sensors.NumericSensor.Reading.encode(self.oldReading)
json['newReading'] = raritan.rpc.sensors.NumericSensor.Reading.encode(self.newReading)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
oldReading = raritan.rpc.sensors.NumericSensor.Reading.decode(json['oldReading'], agent),
newReading = raritan.rpc.sensors.NumericSensor.Reading.decode(json['newReading'], agent),
# for idl.Event
source = Interface.decode(json['source'], agent),
)
return obj
def listElements(self):
elements = ["oldReading", "newReading"]
elements = elements + super(raritan.rpc.sensors.NumericSensor.StateChangedEvent, self).listElements()
return elements
# value object
class MetaDataChangedEvent(raritan.rpc.idl.Event):
idlType = "sensors.NumericSensor_4_0_3.MetaDataChangedEvent:1.0.0"
def __init__(self, oldMetaData, newMetaData, source):
super(raritan.rpc.sensors.NumericSensor.MetaDataChangedEvent, self).__init__(source)
typecheck.is_struct(oldMetaData, raritan.rpc.sensors.NumericSensor.MetaData, AssertionError)
typecheck.is_struct(newMetaData, raritan.rpc.sensors.NumericSensor.MetaData, AssertionError)
self.oldMetaData = oldMetaData
self.newMetaData = newMetaData
def encode(self):
json = super(raritan.rpc.sensors.NumericSensor.MetaDataChangedEvent, self).encode()
json['oldMetaData'] = raritan.rpc.sensors.NumericSensor.MetaData.encode(self.oldMetaData)
json['newMetaData'] = raritan.rpc.sensors.NumericSensor.MetaData.encode(self.newMetaData)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
oldMetaData = raritan.rpc.sensors.NumericSensor.MetaData.decode(json['oldMetaData'], agent),
newMetaData = raritan.rpc.sensors.NumericSensor.MetaData.decode(json['newMetaData'], agent),
# for idl.Event
source = Interface.decode(json['source'], agent),
)
return obj
def listElements(self):
elements = ["oldMetaData", "newMetaData"]
elements = elements + super(raritan.rpc.sensors.NumericSensor.MetaDataChangedEvent, self).listElements()
return elements
# value object
class ThresholdsChangedEvent(raritan.rpc.event.UserEvent):
idlType = "sensors.NumericSensor_4_0_3.ThresholdsChangedEvent:1.0.0"
def __init__(self, oldThresholds, newThresholds, actUserName, actIpAddr, source):
super(raritan.rpc.sensors.NumericSensor.ThresholdsChangedEvent, self).__init__(actUserName, actIpAddr, source)
typecheck.is_struct(oldThresholds, raritan.rpc.sensors.NumericSensor.Thresholds, AssertionError)
typecheck.is_struct(newThresholds, raritan.rpc.sensors.NumericSensor.Thresholds, AssertionError)
self.oldThresholds = oldThresholds
self.newThresholds = newThresholds
def encode(self):
json = super(raritan.rpc.sensors.NumericSensor.ThresholdsChangedEvent, self).encode()
json['oldThresholds'] = raritan.rpc.sensors.NumericSensor.Thresholds.encode(self.oldThresholds)
json['newThresholds'] = raritan.rpc.sensors.NumericSensor.Thresholds.encode(self.newThresholds)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
oldThresholds = raritan.rpc.sensors.NumericSensor.Thresholds.decode(json['oldThresholds'], agent),
newThresholds = raritan.rpc.sensors.NumericSensor.Thresholds.decode(json['newThresholds'], agent),
# for event.UserEvent
actUserName = json['actUserName'],
actIpAddr = json['actIpAddr'],
# for idl.Event
source = Interface.decode(json['source'], agent),
)
return obj
def listElements(self):
elements = ["oldThresholds", "newThresholds"]
elements = elements + super(raritan.rpc.sensors.NumericSensor.ThresholdsChangedEvent, self).listElements()
return elements
class _getMetaData(Interface.Method):
name = 'getMetaData'
@staticmethod
def encode():
args = {}
return args
@staticmethod
def decode(rsp, agent):
_ret_ = raritan.rpc.sensors.NumericSensor.MetaData.decode(rsp['_ret_'], agent)
typecheck.is_struct(_ret_, raritan.rpc.sensors.NumericSensor.MetaData, DecodeException)
return _ret_
class _getDefaultThresholds(Interface.Method):
name = 'getDefaultThresholds'
@staticmethod
def encode():
args = {}
return args
@staticmethod
def decode(rsp, agent):
_ret_ = raritan.rpc.sensors.NumericSensor.Thresholds.decode(rsp['_ret_'], agent)
typecheck.is_struct(_ret_, raritan.rpc.sensors.NumericSensor.Thresholds, DecodeException)
return _ret_
class _getThresholds(Interface.Method):
name = 'getThresholds'
@staticmethod
def encode():
args = {}
return args
@staticmethod
def decode(rsp, agent):
_ret_ = raritan.rpc.sensors.NumericSensor.Thresholds.decode(rsp['_ret_'], agent)
typecheck.is_struct(_ret_, raritan.rpc.sensors.NumericSensor.Thresholds, DecodeException)
return _ret_
class _setThresholds(Interface.Method):
name = 'setThresholds'
@staticmethod
def encode(thresh):
typecheck.is_struct(thresh, raritan.rpc.sensors.NumericSensor.Thresholds, AssertionError)
args = {}
args['thresh'] = raritan.rpc.sensors.NumericSensor.Thresholds.encode(thresh)
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
typecheck.is_int(_ret_, DecodeException)
return _ret_
class _getReading(Interface.Method):
name = 'getReading'
@staticmethod
def encode():
args = {}
return args
@staticmethod
def decode(rsp, agent):
_ret_ = raritan.rpc.sensors.NumericSensor.Reading.decode(rsp['_ret_'], agent)
typecheck.is_struct(_ret_, raritan.rpc.sensors.NumericSensor.Reading, DecodeException)
return _ret_
def __init__(self, target, agent):
super(NumericSensor, self).__init__(target, agent)
self.getMetaData = NumericSensor._getMetaData(self)
self.getDefaultThresholds = NumericSensor._getDefaultThresholds(self)
self.getThresholds = NumericSensor._getThresholds(self)
self.setThresholds = NumericSensor._setThresholds(self)
self.getReading = NumericSensor._getReading(self)
#
# Section generated by IdlC from "StateSensor.idl"
#
import raritan.rpc
from raritan.rpc import Interface, Structure, ValueObject, Enumeration, typecheck, DecodeException
import raritan.rpc.idl
import raritan.rpc.sensors
# interface
class StateSensor(Sensor):
idlType = "sensors.StateSensor:4.0.3"
# structure
class State(Structure):
idlType = "sensors.StateSensor_4_0_3.State:1.0.0"
elements = ["timestamp", "available", "value"]
def __init__(self, timestamp, available, value):
typecheck.is_time(timestamp, AssertionError)
typecheck.is_bool(available, AssertionError)
typecheck.is_int(value, AssertionError)
self.timestamp = timestamp
self.available = available
self.value = value
@classmethod
def decode(cls, json, agent):
obj = cls(
timestamp = raritan.rpc.Time.decode(json['timestamp']),
available = json['available'],
value = json['value'],
)
return obj
def encode(self):
json = {}
json['timestamp'] = raritan.rpc.Time.encode(self.timestamp)
json['available'] = self.available
json['value'] = self.value
return json
# value object
class StateChangedEvent(raritan.rpc.idl.Event):
idlType = "sensors.StateSensor_4_0_3.StateChangedEvent:1.0.0"
def __init__(self, oldState, newState, source):
super(raritan.rpc.sensors.StateSensor.StateChangedEvent, self).__init__(source)
typecheck.is_struct(oldState, raritan.rpc.sensors.StateSensor.State, AssertionError)
typecheck.is_struct(newState, raritan.rpc.sensors.StateSensor.State, AssertionError)
self.oldState = oldState
self.newState = newState
def encode(self):
json = super(raritan.rpc.sensors.StateSensor.StateChangedEvent, self).encode()
json['oldState'] = raritan.rpc.sensors.StateSensor.State.encode(self.oldState)
json['newState'] = raritan.rpc.sensors.StateSensor.State.encode(self.newState)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
oldState = raritan.rpc.sensors.StateSensor.State.decode(json['oldState'], agent),
newState = raritan.rpc.sensors.StateSensor.State.decode(json['newState'], agent),
# for idl.Event
source = Interface.decode(json['source'], agent),
)
return obj
def listElements(self):
elements = ["oldState", "newState"]
elements = elements + super(raritan.rpc.sensors.StateSensor.StateChangedEvent, self).listElements()
return elements
class _getState(Interface.Method):
name = 'getState'
@staticmethod
def encode():
args = {}
return args
@staticmethod
def decode(rsp, agent):
_ret_ = raritan.rpc.sensors.StateSensor.State.decode(rsp['_ret_'], agent)
typecheck.is_struct(_ret_, raritan.rpc.sensors.StateSensor.State, DecodeException)
return _ret_
def __init__(self, target, agent):
super(StateSensor, self).__init__(target, agent)
self.getState = StateSensor._getState(self)
#
# Section generated by IdlC from "SensorLogger.idl"
#
import raritan.rpc
from raritan.rpc import Interface, Structure, ValueObject, Enumeration, typecheck, DecodeException
import raritan.rpc.event
import raritan.rpc.peripheral
import raritan.rpc.sensors
# interface
class Logger(Interface):
idlType = "sensors.Logger:2.1.6"
# structure
class Settings(Structure):
idlType = "sensors.Logger_2_1_6.Settings:1.0.0"
elements = ["isEnabled", "samplePeriod", "samplesPerRecord", "oldestRecId", "newestRecId", "logCapacity"]
def __init__(self, isEnabled, samplePeriod, samplesPerRecord, oldestRecId, newestRecId, logCapacity):
typecheck.is_bool(isEnabled, AssertionError)
typecheck.is_int(samplePeriod, AssertionError)
typecheck.is_int(samplesPerRecord, AssertionError)
typecheck.is_int(oldestRecId, AssertionError)
typecheck.is_int(newestRecId, AssertionError)
typecheck.is_int(logCapacity, AssertionError)
self.isEnabled = isEnabled
self.samplePeriod = samplePeriod
self.samplesPerRecord = samplesPerRecord
self.oldestRecId = oldestRecId
self.newestRecId = newestRecId
self.logCapacity = logCapacity
@classmethod
def decode(cls, json, agent):
obj = cls(
isEnabled = json['isEnabled'],
samplePeriod = json['samplePeriod'],
samplesPerRecord = json['samplesPerRecord'],
oldestRecId = json['oldestRecId'],
newestRecId = json['newestRecId'],
logCapacity = json['logCapacity'],
)
return obj
def encode(self):
json = {}
json['isEnabled'] = self.isEnabled
json['samplePeriod'] = self.samplePeriod
json['samplesPerRecord'] = self.samplesPerRecord
json['oldestRecId'] = self.oldestRecId
json['newestRecId'] = self.newestRecId
json['logCapacity'] = self.logCapacity
return json
# structure
class SensorSet(Structure):
idlType = "sensors.Logger_2_1_6.SensorSet:1.0.0"
elements = ["sensors", "slots"]
def __init__(self, sensors, slots):
for x0 in sensors:
typecheck.is_interface(x0, raritan.rpc.sensors.Sensor, AssertionError)
for x0 in slots:
typecheck.is_interface(x0, raritan.rpc.peripheral.DeviceSlot, AssertionError)
self.sensors = sensors
self.slots = slots
@classmethod
def decode(cls, json, agent):
obj = cls(
sensors = [Interface.decode(x0, agent) for x0 in json['sensors']],
slots = [Interface.decode(x0, agent) for x0 in json['slots']],
)
return obj
def encode(self):
json = {}
json['sensors'] = [Interface.encode(x0) for x0 in self.sensors]
json['slots'] = [Interface.encode(x0) for x0 in self.slots]
return json
# value object
class SettingsChangedEvent(raritan.rpc.event.UserEvent):
idlType = "sensors.Logger_2_1_6.SettingsChangedEvent:1.0.0"
def __init__(self, oldSettings, newSettings, actUserName, actIpAddr, source):
super(raritan.rpc.sensors.Logger.SettingsChangedEvent, self).__init__(actUserName, actIpAddr, source)
typecheck.is_struct(oldSettings, raritan.rpc.sensors.Logger.Settings, AssertionError)
typecheck.is_struct(newSettings, raritan.rpc.sensors.Logger.Settings, AssertionError)
self.oldSettings = oldSettings
self.newSettings = newSettings
def encode(self):
json = super(raritan.rpc.sensors.Logger.SettingsChangedEvent, self).encode()
json['oldSettings'] = raritan.rpc.sensors.Logger.Settings.encode(self.oldSettings)
json['newSettings'] = raritan.rpc.sensors.Logger.Settings.encode(self.newSettings)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
oldSettings = raritan.rpc.sensors.Logger.Settings.decode(json['oldSettings'], agent),
newSettings = raritan.rpc.sensors.Logger.Settings.decode(json['newSettings'], agent),
# for event.UserEvent
actUserName = json['actUserName'],
actIpAddr = json['actIpAddr'],
# for idl.Event
source = Interface.decode(json['source'], agent),
)
return obj
def listElements(self):
elements = ["oldSettings", "newSettings"]
elements = elements + super(raritan.rpc.sensors.Logger.SettingsChangedEvent, self).listElements()
return elements
# value object
class LoggedSensorsChangedEvent(raritan.rpc.event.UserEvent):
idlType = "sensors.Logger_2_1_6.LoggedSensorsChangedEvent:1.0.0"
def __init__(self, oldSensors, newSensors, actUserName, actIpAddr, source):
super(raritan.rpc.sensors.Logger.LoggedSensorsChangedEvent, self).__init__(actUserName, actIpAddr, source)
typecheck.is_struct(oldSensors, raritan.rpc.sensors.Logger.SensorSet, AssertionError)
typecheck.is_struct(newSensors, raritan.rpc.sensors.Logger.SensorSet, AssertionError)
self.oldSensors = oldSensors
self.newSensors = newSensors
def encode(self):
json = super(raritan.rpc.sensors.Logger.LoggedSensorsChangedEvent, self).encode()
json['oldSensors'] = raritan.rpc.sensors.Logger.SensorSet.encode(self.oldSensors)
json['newSensors'] = raritan.rpc.sensors.Logger.SensorSet.encode(self.newSensors)
return json
@classmethod
def decode(cls, json, agent):
obj = cls(
oldSensors = raritan.rpc.sensors.Logger.SensorSet.decode(json['oldSensors'], agent),
newSensors = raritan.rpc.sensors.Logger.SensorSet.decode(json['newSensors'], agent),
# for event.UserEvent
actUserName = json['actUserName'],
actIpAddr = json['actIpAddr'],
# for idl.Event
source = Interface.decode(json['source'], agent),
)
return obj
def listElements(self):
elements = ["oldSensors", "newSensors"]
elements = elements + super(raritan.rpc.sensors.Logger.LoggedSensorsChangedEvent, self).listElements()
return elements
class _getSettings(Interface.Method):
name = 'getSettings'
@staticmethod
def encode():
args = {}
return args
@staticmethod
def decode(rsp, agent):
_ret_ = raritan.rpc.sensors.Logger.Settings.decode(rsp['_ret_'], agent)
typecheck.is_struct(_ret_, raritan.rpc.sensors.Logger.Settings, DecodeException)
return _ret_
class _setSettings(Interface.Method):
name = 'setSettings'
@staticmethod
def encode(isEnabled, samplesPerRecord):
typecheck.is_bool(isEnabled, AssertionError)
typecheck.is_int(samplesPerRecord, AssertionError)
args = {}
args['isEnabled'] = isEnabled
args['samplesPerRecord'] = samplesPerRecord
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
typecheck.is_int(_ret_, DecodeException)
return _ret_
STATE_UNAVAILABLE = 0
STATE_OPEN = 1
STATE_CLOSE = 2
STATE_BELOW_LOWER_CRITICAL = 3
STATE_BELOW_LOWER_WARNING = 4
STATE_NORMAL = 5
STATE_ABOVE_UPPER_WARNING = 6
STATE_ABOVE_UPPER_CRITICAL = 7
STATE_ON = 8
STATE_OFF = 9
STATE_ALARMED = 10
STATE_OK = 11
STATE_MARGINAL = 12
STATE_FAIL = 13
STATE_YES = 14
STATE_NO = 15
STATE_STANDBY = 16
STATE_ONE = 17
STATE_TWO = 18
STATE_IN_SYNC = 19
STATE_OUT_OF_SYNC = 20
STATE_FAULT = 21
STATE_SELF_TEST = 22
STATE_I1_OPEN_FAULT = 23
STATE_I1_SHORT_FAULT = 24
STATE_I2_OPEN_FAULT = 25
STATE_I2_SHORT_FAULT = 26
STATE_WARNING = 27
STATE_CRITICAL = 28
STATE_NON_REDUNDANT = 29
class _getTimeStamps(Interface.Method):
name = 'getTimeStamps'
@staticmethod
def encode(recid, count):
typecheck.is_int(recid, AssertionError)
typecheck.is_int(count, AssertionError)
args = {}
args['recid'] = recid
args['count'] = count
return args
@staticmethod
def decode(rsp, agent):
_ret_ = rsp['_ret_']
timestamps = [raritan.rpc.Time.decode(x0) for x0 in rsp['timestamps']]
typecheck.is_int(_ret_, DecodeException)
for x0 in timestamps:
typecheck.is_time(x0, DecodeException)
return (_ret_, timestamps)
| |
<filename>ddi_search_engine/Bio/EUtils/MultiDict.py
"""Dictionary-like objects which allow multiple keys
Python dictionaries map a key to a value. Duplicate keys are not
allowed, and new entries replace old ones with the same key. Order is
not otherwise preserved, so there's no way to get the items in the
order they were added to a dictionary.
Some types of data is best stored in dictionary-like object which
allow multiple values per key. Some of these need the input order
strongly preserved, so the items can be retrieved in the same order as
they were added to the dictionary. That is the OrderedMultiDict.
Others need a weaker ordering guarantee where the order of values for
a given key is preserved but the order between the keys is not. That
is UnorderedMultiDict. (Because strong ordering isn't needed, it's
faster to delete from an UnorderedMultiDict.)
To create a MultiDict, pass in an object which implements the
'allitems' method and returns a list of (key, value) pairs, or
pass in the list of (key, value) pairs directly.
The two MultiDict classes implement the following dictionary methods
d["lookup"],
d["key"] = value
del d[key]
d.get("key", default = None)
d1 == d2, d1 != d2, len(d), iter(d), str(d)
d.keys(), d.values(), d.items()
The new methods are:
d.getall(key)
d.allkeys()
d.allvalues()
d.allitems()
>>> import MultiDict
>>> od = MultiDict.OrderedMultiDict()
>>> od["Name"] = "Andrew"
>>> od["Color"] = "BLUE"
>>> od["Name"] = "Dalke"
>>> od["Color"] = "Green"
>>> od[3] = 9
>>> len(od)
3
>>> od["Name"]
'Dalke'
>>> od.getall("Name")
['Andrew', 'Dalke']
>>> for k, v in od.allitems():
... print "%r == %r" % (k, v)
...
'Name' == 'Andrew'
'Color' == 'BLUE'
'Name' == 'Dalke'
'Color' == 'Green'
3 == 9
>>> del od["Name"]
>>> len(od)
2
>>> for k, v in od.allitems():
... print "%r == %r" % (k, v)
...
'Color' == 'BLUE'
'Color' == 'Green'
3 == 9
>>>
The latest version of this code can be found at
http://www.dalkescientific.com/Python/
"""
# Written in 2003 by <NAME>, Dalke Scientific Software, LLC.
# This software has been released to the public domain. No
# copyright is asserted.
from __future__ import generators
# Implementation inheritence -- not asserting a class hierarchy here
#
# If there is a class hierarchy, OrderedMultiDict is a child of
# UnorderedMultiDict because it makes stronger but not different
# guarantees on how the data works, at least data-wise.
# Performance-wise, Ordered has a slower (O(n)) than Unordered (O(1)).
# Convince me otherwise and I'll change. Besides, hierarchies are
# overrated.
class _BaseMultiDict:
def __str__(self):
"""shows contents as if this is a dictionary
If multiple values exist for a given key, use the last
one added.
"""
d = {}
for k in self.data:
d[k] = self.data[k][-1]
return str(d)
def __len__(self):
"""the number of unique keys"""
return len(self.data)
def __getitem__(self, key):
"""value for a given key
If more than one value exists for the key, use one added most recently
"""
return self.data[key][-1]
def get(self, key, default = None):
"""value for the given key; default = None if not present
If more than one value exists for the key, use the one added
most recently.
"""
return self.data.get(key, [default])[-1]
def __contains__(self, key):
"""check if the key exists"""
return key in self.data
def keys(self):
"""unordered list of unique keys"""
return self.data.keys()
def values(self):
"""unordered list of values
If more than one value exists for a given key, use the value
added most recently.
"""
return [x[-1] for x in self.data.values()]
def items(self):
"""unordered list of key/value pairs
If more than one value exists for a given key, use the value
added most recently.
"""
return [(k, v[-1]) for k, v in self.data.items()]
def getall(self, key):
"""Get all values for a given key
Multiple values are returned in input order.
If the key does not exists, returns an empty list.
"""
return self.data[key]
def __iter__(self):
"""iterate through the list of unique keys"""
return iter(self.data)
class OrderedMultiDict(_BaseMultiDict):
"""Store key/value mappings.
Acts like a standard dictionary with the following features:
- duplicate keys are allowed;
- input order is preserved for all key/value pairs.
>>> od = OrderedMultiDict([("Food", "Spam"), ("Color", "Blue"),
... ("Food", "Eggs"), ("Color", "Green")])
>>> od["Food"]
'Eggs'
>>> od.getall("Food")
['Spam', 'Eggs']
>>> list(od.allkeys())
['Food', 'Color', 'Food', 'Color']
>>>
The order of keys and values(eg, od.allkeys() and od.allitems())
preserves input order.
Can also pass in an object to the constructor which has an
allitems() method that returns a list of key/value pairs.
"""
def __init__(self, multidict = None):
self.data = {}
self.order_data = []
if multidict is not None:
if hasattr(multidict, "allitems"):
multidict = multidict.allitems()
for k, v in multidict:
self[k] = v
def __eq__(self, other):
"""Does this OrderedMultiDict have the same contents and order as another?"""
return self.order_data == other.order_data
def __ne__(self, other):
"""Does this OrderedMultiDict have different contents or order as another?"""
return self.order_data != other.order_data
def __repr__(self):
return "<OrderedMultiDict %s>" % (self.order_data,)
def __setitem__(self, key, value):
"""Add a new key/value pair
If the key already exists, replaces the existing value
so that d[key] is the new value and not the old one.
To get all values for a given key, use d.getall(key).
"""
self.order_data.append((key, value))
self.data.setdefault(key, []).append(value)
def __delitem__(self, key):
"""Remove all values for the given key"""
del self.data[key]
self.order_data[:] = [x for x in self.order_data if x[0] != key]
def allkeys(self):
"""iterate over all keys in input order"""
for x in self.order_data:
yield x[0]
def allvalues(self):
"""iterate over all values in input order"""
for x in self.order_data:
yield x[1]
def allitems(self):
"""iterate over all key/value pairs in input order"""
return iter(self.order_data)
class UnorderedMultiDict(_BaseMultiDict):
"""Store key/value mappings.
Acts like a standard dictionary with the following features:
- duplicate keys are allowed;
- input order is preserved for all values of a given
key but not between different keys.
>>> ud = UnorderedMultiDict([("Food", "Spam"), ("Color", "Blue"),
... ("Food", "Eggs"), ("Color", "Green")])
>>> ud["Food"]
'Eggs'
>>> ud.getall("Food")
['Spam', 'Eggs']
>>>
The order of values from a given key (as from ud.getall("Food"))
is guaranteed but the order between keys (as from od.allkeys()
and od.allitems()) is not.
Can also pass in an object to the constructor which has an
allitems() method that returns a list of key/value pairs.
"""
def __init__(self, multidict = None):
self.data = {}
if multidict is not None:
if hasattr(multidict, "allitems"):
multidict = multidict.allitems()
for k, v in multidict:
self[k] = v
def __eq__(self, other):
"""Does this UnorderedMultiDict have the same keys, with values in the same order, as another?"""
return self.data == other.data
def __ne__(self, other):
"""Does this UnorderedMultiDict NOT have the same keys, with values in the same order, as another?"""
return self.data != other.data
def __repr__(self):
return "<UnorderedMultiDict %s>" % (self.data,)
def __setitem__(self, key, value):
"""Add a new key/value pair
If the key already exists, replaces the existing value
so that d[key] is the new value and not the old one.
To get all values for a given key, use d.getall(key).
"""
self.data.setdefault(key, []).append(value)
def __delitem__(self, key):
"""Remove all values for the given key"""
del self.data[key]
def allkeys(self):
"""iterate over all keys in arbitrary order"""
for k, v in self.data.iteritems():
for x in v:
yield k
def allvalues(self):
"""iterate over all values in arbitrary order"""
for v in self.data.itervalues():
for x in v:
yield x
def allitems(self):
"""iterate over all key/value pairs, in arbitrary order
Actually, the keys are iterated in arbitrary order but all
values for that key are iterated at sequence of addition
to the UnorderedMultiDict.
"""
for k, v in self.data.iteritems():
for x in v:
yield (k, x)
__test__ = {
"test_ordered_multidict": """
>>> od = OrderedMultiDict()
>>> od["Name"] = "Andrew"
>>> od["Color"] = "BLUE"
>>> od["Name"] = "Dalke"
>>> od["Color"] = "Green"
>>> od[3] = 9
>>> len(od)
3
>>> len(od.keys())
3
>>> len(od.values())
3
>>> len(od.items())
3
>>> od.keys()
['Color', 3, 'Name']
>>> "Name" in od and "Name" in od.keys() and "Name" in od.allkeys()
1
>>> "Color" in od and "Color" in od.keys() and "Color" in od.allkeys()
1
>>> 3 in | |
<filename>index_publish_results_to_excel.py
import argparse
import datetime as dt
import math
import os
import pandas as pd
from openpyxl import Workbook
from src.config.appConfig import loadAppConfig
from src.repos.latestRevData import LatestRevsRepo
from src.repos.gensMasterDataRepo import GensMasterRepo
from src.repos.schDataRepo import SchedulesRepo
from src.repos.smpDataRepo import SmpRepo
from src.services.ftpService import uploadFileToFtp
# read config file
print("SCED output data excel publish program start...")
appConf = loadAppConfig()
dbHost = appConf["dbHost"]
dbName = appConf["dbName"]
dbUname = appConf["dbUname"]
dbPass = appConf["dbPass"]
gamsExePath = appConf["gamsExePath"]
gamsCodePath = appConf["gamsCodePath"]
gamsLstPath = appConf["gamsLstPath"]
gamsExcelPath = appConf["gamsExcelPath"]
ftpHost = appConf["ftpHost"]
ftpUname = appConf["ftpUname"]
ftpPass = appConf["ftpPass"]
ftpResFolder = appConf["ftpResultsFolder"]
# make default target date as today
targetDt = dt.datetime.now()
targetDt = dt.datetime(targetDt.year, targetDt.month, targetDt.day)
# get target date from command line if present
parser = argparse.ArgumentParser()
parser.add_argument('--date', help='target Date')
parser.add_argument('--noftp', action="store_true")
parser.add_argument('--doff', help='Date offset')
parser.add_argument('--rev', help='revision number')
parser.add_argument('--single', action="store_true")
args = parser.parse_args()
targetDtStr = args.date
noFtp = args.noftp
if not targetDtStr == None:
targetDt = dt.datetime.strptime(targetDtStr, "%Y-%m-%d")
targetDtOffsetStr = args.doff
targetDtOffset = 0
if not targetDtOffsetStr == None:
targetDtOffset = int(targetDtOffsetStr)
# add offset days to target date
targetDt = targetDt + dt.timedelta(days=targetDtOffset)
targetRevStr = args.rev
targetRev = None
if not targetRevStr == None:
targetRev = int(targetRevStr)
latRevRepo = LatestRevsRepo(dbHost, dbName, dbUname, dbPass)
# if revision number is not specified from cli, then latest revision number for the date is to be determined from db
# if no latest revision number is found for the date, then the latest revision number for the date is 0
if targetRev == None:
latestRevInfo = latRevRepo.getLatestRevForDate(targetDt)
if not (latestRevInfo == None):
targetRev = latestRevInfo["latestRev"]
if targetRev == None:
print("target revision data not found, hence exiting...")
exit(0)
resDumpFolder = appConf["resultsDumpFolder"]
# check - check if results dumping folder exists
if not os.path.isdir(resDumpFolder):
print("results dumping folder doesnot exist...")
exit(0)
# create workbook object to dump excel data
wb = Workbook()
# write generators master data
gensSheet = wb.active
gensSheet.title = "Data"
# create onbar sheet
onbarSheet = wb.create_sheet("DCOnbar")
# populate header to onbar sheet
onbarSheet.cell(row=1, column=1).value = "Plant Name"
onbarSheet.cell(row=1, column=2).value = "Region"
for blk in range(1, 97):
onbarSheet.cell(row=1, column=blk+2).value = blk
# create schedule sheet
schSheet = wb.create_sheet("Schedule")
# populate header to schedules sheet
schSheet.cell(row=1, column=1).value = "Plant Name"
schSheet.cell(row=1, column=2).value = "Region"
for blk in range(1, 97):
schSheet.cell(row=1, column=blk+2).value = blk
# create Optimal Schedule sheet
optSheet = wb.create_sheet("Optimal Schedule")
# populate header to optimal schedules sheet
optSheet.cell(row=1, column=1).value = "Plant Name"
optSheet.cell(row=1, column=2).value = "Region"
for blk in range(1, 97):
optSheet.cell(row=1, column=blk+2).value = blk
# create SCED sheet
scedSheet = wb.create_sheet("SCED")
# populate header to sced sheet
scedSheet.cell(row=1, column=1).value = "Plant Name"
scedSheet.cell(row=1, column=2).value = "Region"
for blk in range(1, 97):
scedSheet.cell(row=1, column=blk+2).value = blk
# create Number of units sheet
numUnitsSheet = wb.create_sheet("NumUnits")
# populate header to number of units sheet
numUnitsSheet.cell(row=1, column=1).value = "Plant Name"
numUnitsSheet.cell(row=1, column=2).value = "Region"
for blk in range(1, 97):
numUnitsSheet.cell(row=1, column=blk+2).value = blk
# create cost sheet
costSheet = wb.create_sheet("ScedCost")
# populate header to number of units sheet
costSheet.cell(row=1, column=1).value = "Plant Name"
costSheet.cell(row=1, column=2).value = "Region"
for blk in range(1, 97):
costSheet.cell(row=1, column=blk+2).value = blk
# create summary sheet
summarySheet = wb.create_sheet("Summary")
# populate header to number of units sheet
summarySheet.cell(row=1, column=1).value = "Plant Name"
summarySheet.cell(row=1, column=2).value = "Region"
for hItr, headr in enumerate(["Plant Name", "VC Paise/MWH", "Max. On-bar (MW)", "On-bar Energy (MWH)", "Max. Inj. Schedule (MW)",
"Inj. Schedule Energy (MWH)", "Up Reserve (MWH)", "Down Reserve (MWH)", "Max. Optimal Schedule (MW)",
"Optimal Schedule Energy (MWH)", "Max SCED (MW)", "Min SCED (MW)", "SCED Energy (MWH)",
"Cost Incurred (Lakhs)", "Cost Savings (Lakhs)", "Net Savings (Lakhs)"]):
summarySheet.cell(row=1, column=hItr+1).value = headr
# Initialize summary sheet all generators row values
dayOnbarMwh = 0
daySchMwh = 0
dayOptMwh = 0
dayTmMwh = 0
dayScedMwh = 0
dayScedCost = 0
dayScedSaving = 0
# get the generators info from db
gensRepo = GensMasterRepo(
dbHost, dbName, dbUname, dbPass)
gens = gensRepo.getGens()
stationColNum = 3
vcColNum = 8
unitCapColNum = 11
tmColNum = 12
rUpColNum = 13
rDnColNum = 14
# populate header for 1st row
gensSheet.cell(row=1, column=stationColNum).value = "Plant Name"
gensSheet.cell(row=1, column=vcColNum).value = "Variable Cost per unit"
gensSheet.cell(
row=1, column=unitCapColNum).value = "Avg. Unit capacity"
gensSheet.cell(row=1, column=tmColNum).value = "Tech Min Per Unit"
gensSheet.cell(row=1, column=rUpColNum).value = "Ramp Up per unit"
gensSheet.cell(row=1, column=rDnColNum).value = "Ramp Dn per unit"
for gItr, g in enumerate(gens):
# populate generator data
gensSheet.cell(row=gItr+2, column=stationColNum).value = g["name"]
gensSheet.cell(row=gItr+2, column=vcColNum).value = g["vcPu"]
gensSheet.cell(row=gItr+2, column=unitCapColNum).value = g["capPu"]
gensSheet.cell(row=gItr+2, column=tmColNum).value = g["tmPu"]
gensSheet.cell(row=gItr+2, column=rUpColNum).value = g["rUpPu"]
gensSheet.cell(row=gItr+2, column=rDnColNum).value = g["rDnPu"]
# fetch onbar, sch, sced, optimal schedule data
schRepo = SchedulesRepo(dbHost, dbName, dbUname, dbPass)
# populate data to excel sheets
for gItr, g in enumerate(gens):
onbarSheet.cell(row=gItr+2, column=1).value = g["name"]
onbarSheet.cell(row=gItr+2, column=2).value = 1
genOnbarRows = schRepo.getGenSchedules(
"onbar", g["id"], targetRev, targetDt, targetDt+dt.timedelta(hours=23, minutes=59))
# check - check if we got 96 rows for loading onbar data for a generator for the desired date
if not len(genOnbarRows) == 96:
print("96 rows not present in onbar data of {0} for the date {1}".format(
g["name"], targetDt))
exit(0)
genMaxOnbar = 0
genOnbarMwh = 0
genTmMwh = 0
for blkItr in range(len(genOnbarRows)):
onbarVal = genOnbarRows[blkItr]["schVal"]
tmVal = g["tmPu"]*math.ceil(onbarVal*0.95/g["capPu"])
if onbarVal > genMaxOnbar:
genMaxOnbar = onbarVal
genOnbarMwh += onbarVal
genTmMwh += tmVal
onbarSheet.cell(row=gItr+2, column=blkItr + 3).value = onbarVal
genOnbarMwh /= 4
genTmMwh /= 4
# populate schedule data to Schedule sheet
schSheet.cell(row=gItr+2, column=1).value = g["name"]
schSheet.cell(row=gItr+2, column=2).value = 1
genSchRows = schRepo.getGenSchedules(
"sch", g["id"], targetRev, targetDt, targetDt+dt.timedelta(hours=23, minutes=59))
# check - check if we got 96 rows for loading sced data for a generator for the desired date
if not len(genSchRows) == 96:
print("96 rows not present in schedule data of {0} for the date {1}".format(
g["name"], targetDt))
exit(0)
genMaxSch = 0
genSchMwh = 0
for blkItr in range(len(genSchRows)):
schVal = genSchRows[blkItr]["schVal"]
if schVal > genMaxSch:
genMaxSch = schVal
genSchMwh += schVal
schSheet.cell(row=gItr+2, column=blkItr + 3).value = schVal
genSchMwh /= 4
# populate data to optimal schedule sheet
optSheet.cell(row=gItr+2, column=1).value = g["name"]
optSheet.cell(row=gItr+2, column=2).value = 1
genOptRows = schRepo.getGenSchedules(
"opt", g["id"], targetRev, targetDt, targetDt+dt.timedelta(hours=23, minutes=59))
# check - check if we got 96 rows for loading sced data for a generator for the desired date
if not len(genOptRows) == 96:
print("96 rows not present in optimal schedule data of {0} for the date {1}".format(
g["name"], targetDt))
exit(0)
genMaxOpt = 0
genOptMwh = 0
for blkItr in range(len(genOptRows)):
optVal = genOptRows[blkItr]["schVal"]
if optVal > genMaxOpt:
genMaxOpt = optVal
genOptMwh += optVal
optSheet.cell(row=gItr+2, column=blkItr + 3).value = optVal
genOptMwh /= 4
# populate data to sced sheet, number of units sheet, cost sheet and summary
scedSheet.cell(row=gItr+2, column=1).value = g["name"]
scedSheet.cell(row=gItr+2, column=2).value = 1
numUnitsSheet.cell(row=gItr+2, column=1).value = g["name"]
numUnitsSheet.cell(row=gItr+2, column=2).value = 1
costSheet.cell(row=gItr+2, column=1).value = g["name"]
costSheet.cell(row=gItr+2, column=2).value = 1
summarySheet.cell(row=gItr+2, column=1).value = g["name"]
summarySheet.cell(row=gItr+2, column=2).value = g["vcPu"]
# check - check if schedule, Onbar and optimal schedule have same number of rows
if not (len(genOptRows) == len(genSchRows) == len(genOnbarRows)):
print("Schedule, Onbar and optimal schedule rows are not of same size for {0}".format(
targetDt))
exit(0)
genMaxSced = None
genMinSced = None
genScedMwh = 0
genScedCost = 0
genScedSaving = 0
for blkItr in range(len(genOptRows)):
scedVal = genOptRows[blkItr]["schVal"] - genSchRows[blkItr]["schVal"]
if blkItr == 0:
genMaxSced = scedVal
genMinSced = scedVal
else:
if scedVal > genMaxSced:
genMaxSced = scedVal
if scedVal < genMinSced:
genMinSced = scedVal
genScedMwh += scedVal
scedSheet.cell(row=gItr+2, column=blkItr +
3).value = scedVal
numUnitsVal = math.ceil(
0.95*genOnbarRows[blkItr]["schVal"]/g["capPu"])
numUnitsSheet.cell(row=gItr+2, column=blkItr +
3).value = numUnitsVal
genVcPu = g["vcPu"]
scedBlkCost = scedVal*genVcPu*-2.5
if scedBlkCost < 0:
genScedCost += scedBlkCost
else:
genScedSaving += scedBlkCost
costSheet.cell(row=gItr+2, column=blkItr + 3).value = scedBlkCost
genScedMwh /= 4
genScedCost /= 100000
genScedSaving /= 100000
dayOnbarMwh += genOnbarMwh
daySchMwh += genSchMwh
dayOptMwh += genOptMwh
dayScedMwh += genScedMwh
dayScedCost += genScedCost
dayScedSaving += genScedSaving
dayTmMwh += genTmMwh
summarySheet.cell(row=gItr+2, column=3).value = genMaxOnbar
summarySheet.cell(row=gItr+2, column=4).value = genOnbarMwh
summarySheet.cell(row=gItr+2, column=5).value = genMaxSch
summarySheet.cell(row=gItr+2, column=6).value = genSchMwh
summarySheet.cell(row=gItr+2, column=7).value = genOnbarMwh-genSchMwh
summarySheet.cell(row=gItr+2, column=8).value = genSchMwh-genTmMwh
summarySheet.cell(row=gItr+2, column=9).value = genMaxOpt
summarySheet.cell(row=gItr+2, column=10).value = genOptMwh
summarySheet.cell(row=gItr+2, column=11).value = genMaxSced
summarySheet.cell(row=gItr+2, column=12).value = genMinSced
summarySheet.cell(row=gItr+2, column=13).value = genScedMwh
summarySheet.cell(row=gItr+2, column=14).value = genScedCost
summarySheet.cell(row=gItr+2, column=15).value = genScedSaving
summarySheet.cell(
row=gItr+2, column=16).value = genScedSaving+genScedCost
# populate total generators row values after all the generators in summary sheet
summarySheet.cell(row=len(gens)+2, column=1).value = "TOTAL"
summarySheet.cell(row=len(gens)+2, column=4).value = dayOnbarMwh
summarySheet.cell(row=len(gens)+2, column=6).value = daySchMwh
summarySheet.cell(row=len(gens)+2, column=7).value = dayOnbarMwh-daySchMwh
summarySheet.cell(row=len(gens)+2, column=8).value = daySchMwh-dayTmMwh
summarySheet.cell(row=len(gens)+2, column=10).value = dayOptMwh
summarySheet.cell(row=len(gens)+2, column=13).value = dayScedMwh
summarySheet.cell(row=len(gens)+2, column=14).value = dayScedCost
summarySheet.cell(row=len(gens)+2, column=15).value = dayScedSaving
summarySheet.cell(
row=len(gens)+2, column=16).value = dayScedSaving+dayScedCost
# create smp sheet
smpSheet = wb.create_sheet("SMP")
# populate header to number of units sheet
smpSheet.cell(row=1, column=1).value = "Time"
smpSheet.cell(row=1, column=2).value = "SMP"
smpRepo = SmpRepo(dbHost, dbName, dbUname, dbPass)
smpRows = smpRepo.getSmp('g', targetRev, targetDt, targetDt +
dt.timedelta(hours=23, minutes=59))
for sItr, smpRow in enumerate(smpRows):
# populate generator data
smpSheet.cell(row=sItr+2, column=1).value = smpRow["dataTime"]
smpSheet.cell(row=sItr+2, column=2).value = smpRow["smpVal"]
# derive excel filename and file path
resultsFilename = "sced_results_{0}_{1}.xlsx".format(
dt.datetime.strftime(targetDt, "%Y_%m_%d"), | |
of the found motif IDs
motifID_lst: List[str]
# list of the found motif names
motifName_lst: List[str]
# list of the found motif widths
motif_width_lst: List[int]
# list of the found motif site counts
site_counts_lst: List[int]
# list of the found motif alphabet lengths
alphalen_lst: List[int]
# list of the found motif probability matrices
motif_probs_lst: List[pd.DataFrame]
# list of the found As probabilities for each motif
a_lst: List[np.double]
# list of the found Cs probabilities for each motif
c_lst: List[np.double]
# list of the found Gs probabilities for each motif
g_lst: List[np.double]
# list of the found Ts probabilities for each motif
t_lst: List[np.double]
infostart = False
datastart = False
motifs_found = 0
motifID_lst = list()
motifName_lst = list()
motif_width_lst = list()
site_counts_lst = list()
alphalen_lst = list()
motif_probs_lst = list()
a_lst = list()
c_lst = list()
g_lst = list()
t_lst = list()
motif_width = None
pos_read = 0
for line in in_mtf:
if line[0:8] == 'ALPHABET':
alphabet: List = sorted(list(set(line[10:-1])))
assert isListEqual(alphabet, DNA_ALPHABET)
if line[0:5] == 'MOTIF':
if verbose:
start_rm: float = time.time()
# read motif ID and full name
motif_header: str = line.split()
assert len(motif_header) > 0
# there are two ways to define the motif name line
# in MEME file
# (refer to http://meme-suite.org/doc/meme-format.html?man_type=web):
# 1 - MOTIF motif_alternate_name
# 2 - MOTIF motif_identifier motif_alternate_name
motifID: str
motifName: str
if len(motif_header) == 2: # support case (1)
motifID = motif_header[1]
motifName = motif_header[1]
else: # support case (2)
motifID, motifName = motif_header[1:3]
# end if
motifID_lst.append(motifID)
motifName_lst.append(motifName)
# the informations about motif start here
infostart = True
continue
# end if
if infostart and len(line.strip()) != 0:
infos: str = line[26:]
infosplit: List[str] = infos.split()
alphalen: int = int(infosplit[1])
alphalen_lst.append(alphalen)
assert alphalen == len(alphabet)
motif_width: int = int(infosplit[3])
site_counts: int = int(infosplit[5])
infostart = False # informations end here
# allocate space for the motif probability matrix
motif_probs: pd.DataFrame = pd.DataFrame(index=alphabet,
columns=range(motif_width),
data=np.double(0)
)
motif_width_lst.append(motif_width)
site_counts_lst.append(site_counts)
motif_probs_lst.append(motif_probs)
datastart = True # at next step begin data
# initialize nucleotide data
a = list()
c = list()
g = list()
t = list()
continue
# end if
if datastart and pos_read < motif_width:
freqs = line.split()
a.append(np.double(freqs[0]))
c.append(np.double(freqs[1]))
g.append(np.double(freqs[2]))
t.append(np.double(freqs[3]))
pos_read += 1
# end if
# we read all current motif data
if pos_read == motif_width:
a_lst.append(a)
c_lst.append(c)
g_lst.append(g)
t_lst.append(t)
# update stats about found motifs
motifs_found += 1
# clear the statistics
pos_read: int = 0
motif_width = None
datastart = False
alphalen = -1
datastart = False
if verbose:
end_rm: float = time.time()
msg: str = ''.join(["Read motif ", motifID, " in ",
str(end_rm - start_rm), "s"])
print(msg)
# end if
# end if
except: # something went wrong
errmsg: str = ' '.join(["Unable to read file", motif_file])
raise FileReadingException(errmsg)
else:
bgs: dict
# read the background
if bg_file == 'UNIF':
bgs = get_uniformBG(alphabet)
elif os.path.exists(bg_file):
bgs = readBGfile(bg_file)
else:
errmsg = "\n\nERROR: unable to find the given background file"
raise NotValidBGException(errmsg)
# end if
bgs = pseudo_bg(bgs, no_reverse)
motif_lst: List[Motif]
motif_lst = list()
for i in range(motifs_found):
mp: pd.DataFrame = motif_probs_lst[i]
mp.loc['A'] = a_lst[i]
mp.loc['C'] = c_lst[i]
mp.loc['G'] = g_lst[i]
mp.loc['T'] = t_lst[i]
mw: int = motif_width_lst[i]
sc: int = site_counts_lst[i]
mp = norm_motif(mp, mw, alphabet)
mp = apply_pseudocount_meme(mp, pseudocount, sc, mw, bgs, alphabet)
motif: Motif = Motif(mp, mw, alphabet, motifID_lst[i],
motifName_lst[i])
motif.setBg(bgs)
motif_lst.append(motif)
# end for
return motif_lst
finally:
in_mtf.close() # close the file anyway
# end try
# end read_MEME_motif()
def process_motif_for_logodds(motif: Motif) -> Motif:
"""Computes the log-odds values from a probability matrix of a given
PWM motif.
During the computation of the log-odds matrix is also computed the
corresponding P-value matrix, using the dynamic programming
algorithm presented in Staden, 1994.
Parameters
----------
motif : Motif
DNA motif
Returns
-------
Motif
Input DNA motif with the log-odds matrix
"""
# get the log-odds
motif_log_odds: pd.DataFrame
motif_log_odds = compute_log_odds(motif.getMotif_matrix(), motif.getWidth(),
motif.getBg(), motif.getAlphabet())
motif.setMotif_scoreMatrix(motif_log_odds)
# scale the log-odds scores
scaled_scores: np.ndarray
min_val: int
max_val: int
scale: int
offset: np.double
scaled_scores, min_val, max_val, scale, offset = scale_pwm(motif.getMotif_scoreMatrix(),
motif.getAlphabet(),
motif.getWidth())
motif.setMotif_scoreMatrix(scaled_scores)
motif.setIsScaled(True)
motif.setScale(scale)
motif.setMin_val(min_val)
motif.setMax_val(max_val)
motif.setOffset(offset)
# compute the p-value matrix
pval_mat: np.array
pval_mat = comp_pval_mat(motif)
motif.setMotif_pval_matrix(pval_mat)
motif.setMotif_scoreMatrix(scaled_scores.values)
return motif
# end of process_motif_for_logodds()
def scale_pwm(motif_matrix: pd.DataFrame,
alphabet: List[str],
motif_width: int
) -> Tuple[np.ndarray, int, int, int, np.double]:
"""Scale the log-odds values of the motif scoring matrix.
The values are scaled in the range [0, 1000]. The scaling improves
computational speed while computing the score for each motif
occurrence candidate, and allows a constant time computation of
the corresponding P-value.
Parameters
----------
motif_matrix : pd.DataFrame
motif log-odds matrix
alphabet: list
DNA motif alphabet
motif_width: int
motif width
Returns
-------
numpy.ndarray
scaled motif scoring matrix
int
minimum value of the scaled scoring matrix
int
maximum value of the scaled scoring matrix
int
scaling factor
numpy.double
scaling offset
"""
errmsg: str
if not isinstance(motif_matrix, pd.DataFrame):
errmsg = "\n\nERROR: The given motif matrix must be an instance of pandas.DataFrame"
raise NoDataFrameException(errmsg)
if motif_matrix.empty:
errmsg = "\n\nERROR: The given motif matrix is empty"
raise NotValidMotifMatrixException(errmsg)
if not isinstance(alphabet, list):
errmsg = "\n\nERROR: The alphabet given is not in a list"
raise NotValidAlphabetException(errmsg)
if not isListEqual(alphabet, DNA_ALPHABET):
errmsg = "\n\nERROR: The alphabet given is not a valid DNA alphabet"
raise NotValidAlphabetException(errmsg)
assert motif_width > 0
min_val: int
max_val: int
motif_matrix_sc: pd.DataFrame
min_val = min(motif_matrix.min())
max_val = max(motif_matrix.max())
motif_matrix_sc = pd.DataFrame(index=list(motif_matrix.index),
columns=list(motif_matrix.columns), data=0)
lower: int = min_val
upper: int = max_val
if lower == upper: # all values are equal
lower = np.double(upper - 1)
offset: np.double
scale_factor: int
lower = np.floor(lower)
offset = np.round(np.floor(lower))
scale_factor = np.floor(RANGE / (upper - lower))
# values will be in [0, 1000]
for nuc in alphabet:
for j in range(motif_width):
scaled_score = np.round(
(motif_matrix.loc[nuc, j] - (offset)) * scale_factor
)
motif_matrix_sc.loc[nuc, j] = scaled_score
# end for
# end for
# make sure the values are integers
motif_matrix_sc[:] = motif_matrix_sc[:].astype(int)
# now they are scaled
min_val = min(motif_matrix_sc.min())
max_val = max(motif_matrix_sc.max())
return motif_matrix_sc, min_val, max_val, int(scale_factor), offset
# end of scale_pwm()
def get_motif_pwm(motif_file: str,
args_obj: Findmotif,
cores: int
) -> List[Motif]:
"""Starting point for the construction of a Motif object.
The motif PWM will be read accordingly to the file format. From the
read data will be computed the motif scoring matrix (with scores
scaled) and the corresponding P-value matrix.
All these data will be stored in a new Motif object.
Parameters
----------
motif_file : str
path to the motif PWM
args_obj : Findmotif
container for arguments needed for the motif scoring and
P-value matrix computations
cores : int
number of cores to use during the computation (used only when
processing MEME motif files)
Returns
-------
List[Motif]
processed Motif object as element of a list
"""
bgs: dict
pseudo: float
no_reverse: bool
verbose: bool
# get arguments required to process the motif
bgs = args_obj.get_bgfile()
pseudo = args_obj.get_pseudo()
no_reverse = args_obj.get_no_reverse()
verbose = args_obj.get_verbose()
errmsg: str
if not motif_file:
errmsg = "\n\nERROR: the motif file is missing"
raise FileNotFoundError(errmsg)
if (not isMEME_ff(motif_file)) and (not isJaspar_ff(motif_file)):
errmsg = "\n\nERROR: the motif file must be in MEME or JASPAR format"
raise NotValidFFException(errmsg)
if isJaspar_ff(motif_file):
motif = build_motif_JASPAR(motif_file, bgs, pseudo, no_reverse, verbose)
elif isMEME_ff(motif_file):
motif = build_motif_MEME(motif_file, bgs, pseudo, no_reverse, cores,
verbose)
else:
errmsg = ' '.join(["\n\nERROR: do not know what to do with file",
motif_file])
raise NotValidFFException(errmsg)
# end if
if not isinstance(motif, list):
motif = [motif]
return motif
# end of get_motif_pwm()
def pseudo_bg(bgs: Dict,
no_reverse: bool
) -> Dict:
"""Add a pseudocount and normalize the background probabilities of
nucleotides used to build the motif scoring matrix.
A pseudocount value is added to the background probability
distribution.
If are to be considered | |
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Layer, Lambda, Input, Conv2D, TimeDistributed, Dense, Flatten
from utils import bbox_utils, train_utils
import tensorflow.keras.regularizers as KR
import tensorflow.keras.layers as KL
import tensorflow.keras.initializers as KI
class Decoder(Layer):
"""Generating bounding boxes and labels from faster rcnn predictions.
First calculating the boxes from predicted deltas and label probs.
Then applied non max suppression and selecting top_n boxes by scores.
In case all scores are lower than score_threshold -> take bbox with maximun score
inputs:
roi_bboxes = (batch_size, roi_bbox_size, [y1, x1, y2, x2])
pred_deltas = (batch_size, roi_bbox_size, total_labels * [delta_y, delta_x, delta_h, delta_w])
pred_label_probs = (batch_size, roi_bbox_size, total_labels)
outputs:
pred_bboxes = (batch_size, top_n, [y1, x1, y2, x2])
pred_labels = (batch_size, top_n)
1 to total label number
pred_scores = (batch_size, top_n)
"""
def __init__(self, variances, total_labels, nms_threshold=0.5, max_total_size=200, score_threshold=0.5, **kwargs):
super(Decoder, self).__init__(**kwargs)
self.variances = variances
self.total_labels = total_labels
self.nms_threshold = nms_threshold
self.max_total_size = max_total_size
self.score_threshold = score_threshold
def get_config(self):
config = super(Decoder, self).get_config()
config.update({
"variances": self.variances,
"total_labels": self.total_labels,
"nms_threshold": self.nms_threshold,
"max_total_size": self.max_total_size,
"score_threshold": self.score_threshold
})
return config
def call(self, inputs):
roi_bboxes = inputs[0]
pred_deltas = inputs[1]
pred_label_probs = inputs[2]
batch_size = tf.shape(pred_deltas)[0]
pred_deltas = tf.reshape(pred_deltas, (batch_size, -1, self.total_labels, 4))
pred_deltas *= self.variances
expanded_roi_bboxes = tf.tile(tf.expand_dims(roi_bboxes, -2), (1, 1, self.total_labels, 1))
pred_bboxes = bbox_utils.get_bboxes_from_deltas(expanded_roi_bboxes, pred_deltas)
pred_labels_map = tf.expand_dims(tf.argmax(pred_label_probs, -1), -1)
pred_labels = tf.where(tf.not_equal(pred_labels_map, 0), pred_label_probs, tf.zeros_like(pred_label_probs))
final_bboxes, final_scores, final_labels, valid_detections = bbox_utils.non_max_suppression(
pred_bboxes, pred_labels,
iou_threshold=self.nms_threshold,
max_output_size_per_class=self.max_total_size,
max_total_size=self.max_total_size,
score_threshold=self.score_threshold)
# If there are any valid detection -> Apply NMS but without score threshold
no_detections = valid_detections[0] == 0
if no_detections:
final_bboxes, final_scores, final_labels, valid_detections = bbox_utils.non_max_suppression(pred_bboxes,
pred_labels, iou_threshold=self.nms_threshold,
max_output_size_per_class=self.max_total_size,
max_total_size=self.max_total_size)
# Take only valid outputs, remove zero padding -> only valid for batchsize=1
if batch_size == 1:
final_bboxes = tf.slice(final_bboxes, [0, 0, 0], [1, valid_detections[0], 4])
final_scores = tf.slice(final_scores, [0, 0], [1, valid_detections[0]])
final_labels = tf.slice(final_labels, [0, 0], [1, valid_detections[0]])
if no_detections:
best_score = tf.reduce_max(final_scores, axis=1)
if best_score < 0.001: # no good bbox
final_bboxes = tf.zeros((1, 1, 4))
final_labels = tf.zeros((1, 1))
final_scores = tf.zeros((1, 1))
else:
better_detection_index = tf.argmax(final_scores, axis=1)
final_bboxes = tf.gather(final_bboxes, better_detection_index, axis=1)
final_scores = tf.gather(final_scores, better_detection_index, axis=1)
final_labels = tf.gather(final_labels, better_detection_index, axis=1)
return final_bboxes, final_labels, final_scores
class ProposalLayer(Layer):
"""Generating bounding boxes from rpn predictions.
First calculating the boxes from predicted deltas and label probs.
Then applied non max suppression and selecting "train or test nms_topn" boxes.
inputs:
rpn_bbox_deltas = (batch_size, img_output_height, img_output_width, anchor_count * [delta_y, delta_x, delta_h, delta_w])
img_output_height and img_output_width are calculated to the base model feature map
rpn_labels = (batch_size, img_output_height, img_output_width, anchor_count)
outputs:
roi_bboxes = (batch_size, train/test_nms_topn, [y1, x1, y2, x2])
"""
def __init__(self, base_anchors, mode, cfg, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.base_anchors = base_anchors
self.cfg = cfg
self.mode = mode
def get_config(self):
config = super(ProposalLayer, self).get_config()
config.update({"base_anchors": self.base_anchors, "cfg": self.cfg, "mode": self.mode})
return config
def call(self, inputs):
rpn_bbox_deltas = inputs[0]
rpn_labels = inputs[1]
anchors = bbox_utils.generate_anchors((tf.shape(rpn_labels)[1], tf.shape(rpn_labels)[2]), self.base_anchors)
pre_nms_topn = self.cfg.PRE_NMS_TOPN if self.mode == "training" else self.cfg.TEST_PRE_NMS_TOPN
post_nms_topn = self.cfg.TRAIN_NMS_TOPN if self.mode == "training" else self.cfg.TEST_NMS_TOPN
nms_iou_threshold = self.cfg.NMS_IOU_THRESHOLD
variances = self.cfg.VARIANCES
total_anchors = tf.shape(anchors)[0]
batch_size = tf.shape(rpn_bbox_deltas)[0]
rpn_bbox_deltas = tf.reshape(rpn_bbox_deltas, (batch_size, total_anchors, 4))
rpn_labels = tf.reshape(rpn_labels, (batch_size, total_anchors))
rpn_bbox_deltas *= variances
rpn_bboxes = bbox_utils.get_bboxes_from_deltas(anchors, rpn_bbox_deltas)
# if there are less possible anchors than pre nms, then take all of them
if tf.shape(rpn_labels)[1] < pre_nms_topn:
pre_nms_topn = tf.shape(rpn_labels)[1]
_, pre_indices = tf.nn.top_k(rpn_labels, pre_nms_topn)
# take top rois and apply NMS
pre_roi_bboxes = tf.gather(rpn_bboxes, pre_indices, batch_dims=1)
pre_roi_labels = tf.gather(rpn_labels, pre_indices, batch_dims=1)
pre_roi_bboxes = tf.reshape(pre_roi_bboxes, (batch_size, pre_nms_topn, 1, 4))
pre_roi_labels = tf.reshape(pre_roi_labels, (batch_size, pre_nms_topn, 1))
roi_bboxes, _, _, _ = bbox_utils.non_max_suppression(pre_roi_bboxes, pre_roi_labels,
max_output_size_per_class=post_nms_topn,
max_total_size=post_nms_topn,
iou_threshold=nms_iou_threshold)
return tf.stop_gradient(roi_bboxes)
class ProposalTargetLayer(Layer):
"""Calculating faster rcnn actual bounding box deltas and labels.
This layer only running on the training phase.
inputs:
roi_bboxes = (batch_size, nms_topn, [y1, x1, y2, x2])
gt_boxes = (batch_size, padded_gt_boxes_size, [y1, x1, y2, x2])
gt_labels = (batch_size, padded_gt_boxes_size)
gt_masks = (batch_size, num_masks, img_height, img_width)
outputs:
roi_bbox_deltas = (batch_size, train_nms_topn * total_labels, [delta_y, delta_x, delta_h, delta_w])
roi_bbox_labels = (batch_size, train_nms_topn, total_labels)
"""
def __init__(self, cfg, img_height, img_width, **kwargs):
super(ProposalTargetLayer, self).__init__(**kwargs)
self.cfg = cfg
self.img_height = img_height
self.img_width = img_width
def get_config(self):
config = super(ProposalTargetLayer, self).get_config()
config.update({"cfg": self.cfg, "img_height": self.img_height, "img_width": self.img_width})
return config
def call(self, inputs):
img_shape = inputs[0]
roi_bboxes = inputs[1]
gt_boxes = inputs[2]
gt_labels = inputs[3]
if self.cfg.MASK_REG:
gt_masks = inputs[4]
total_labels = self.cfg.NUM_CLASSES
variances = self.cfg.VARIANCES
# Calculate iou values between each bboxes and ground truth boxes
iou_map, _ = bbox_utils.generate_iou_map(roi_bboxes, gt_boxes)
# Get max index value for each row
max_indices_each_gt_box = tf.argmax(iou_map, axis=2, output_type=tf.int32)
# IoU map has iou values for every gt boxes and we merge these values column wise
merged_iou_map = tf.reduce_max(iou_map, axis=2)
# select positive and negative rois according to the thresholds
pos_mask = tf.greater(merged_iou_map, self.cfg.TRAIN_FG_THRES)
neg_mask = tf.logical_and(tf.less(merged_iou_map, self.cfg.TRAIN_BG_THRESH_HI), tf.greater(merged_iou_map, self.cfg.TRAIN_BG_THRESH_LO))
# Calculate positive and negative total number of rois
positive_count = tf.reduce_sum(tf.cast(pos_mask, tf.int32), axis=1)
max_pos_bboxes = tf.cast(tf.round(self.cfg.TRAIN_ROIS_PER_IMAGE*self.cfg.ROI_POSITIVE_RATIO), tf.int32)
total_pos_bboxes = tf.minimum(max_pos_bboxes, positive_count)
negative_count = tf.reduce_sum(tf.cast(neg_mask, tf.int32), axis=1)
negative_max2 = self.cfg.TRAIN_ROIS_PER_IMAGE - total_pos_bboxes
total_neg_bboxes = tf.minimum(negative_max2, negative_count)
positive_count = total_pos_bboxes[0]
negative_count = total_neg_bboxes[0]
# Take random positive and negative rois without replacement
if positive_count > 0:
pos_mask = train_utils.randomly_select_xyz_mask(pos_mask, total_pos_bboxes)
if negative_count > 0:
neg_mask = train_utils.randomly_select_xyz_mask(neg_mask, total_neg_bboxes)
# take corresponding gt boxes and gt labels to rois
gt_boxes_map = tf.gather(gt_boxes, max_indices_each_gt_box, batch_dims=1)
expanded_gt_boxes = tf.where(tf.expand_dims(pos_mask, axis=-1), gt_boxes_map, tf.zeros_like(gt_boxes_map))
gt_labels_map = tf.gather(gt_labels, max_indices_each_gt_box, batch_dims=1)
pos_gt_labels = tf.where(pos_mask, gt_labels_map, tf.constant(-1, dtype=tf.int32))
neg_gt_labels = tf.cast(neg_mask, dtype=tf.int32)
expanded_gt_labels = tf.cast(pos_gt_labels + neg_gt_labels, dtype=tf.int32) # (batch_size, num_rois, 4)
# take positive gt bboxes, labels and rois
pos_indices = tf.where(pos_mask)
positive_count = tf.shape(pos_indices)[0]
gt_boxes_pos = tf.gather_nd(expanded_gt_boxes, pos_indices)
positive_rois = tf.gather_nd(roi_bboxes, pos_indices)
pos_gt_labels = tf.gather_nd(expanded_gt_labels, pos_indices)
# take negative gt bboxes, labels and rois
neg_indices = tf.where(neg_mask)
gt_boxes_neg = tf.gather_nd(expanded_gt_boxes, neg_indices)
neg_rois = tf.gather_nd(roi_bboxes, neg_indices)
neg_gt_labels = tf.gather_nd(expanded_gt_labels, neg_indices)
# concat positive + negative gt bboxes, labels and rois
total_gt_bboxes = tf.concat([gt_boxes_pos, gt_boxes_neg], 0)
total_gt_labels = tf.concat([pos_gt_labels, neg_gt_labels], 0)
total_rois = tf.concat([positive_rois, neg_rois], 0)
# get deltas from bboxes
gt_bbox_deltas = bbox_utils.get_deltas_from_bboxes(total_rois, total_gt_bboxes) / variances
gt_bbox_labels = total_gt_labels
# Transform to one hot representation (batch_size, num_rois, num_classes)
gt_bbox_labels = tf.one_hot(gt_bbox_labels, total_labels)
gt_bbox_deltas = tf.expand_dims(gt_bbox_deltas, axis=0)
gt_bbox_labels = tf.expand_dims(gt_bbox_labels, axis=0)
total_rois = tf.expand_dims(total_rois, axis=0)
if self.cfg.MASK_REG:
# Take only positive rois for mask training and corresponding roi_gt_boxes
roi_gt_boxes = tf.gather_nd(gt_boxes_map, pos_indices)
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
y1t, x1t, y2t, x2t = tf.split(roi_gt_boxes, 4, axis=1)
# compute overlap between roi coordinate and gt_roi coordinate
x1o = tf.maximum(x1, x1t)
y1o = tf.maximum(y1, y1t)
x2o = tf.minimum(x2, x2t)
y2o = tf.minimum(y2, y2t)
if positive_count != 0:
# Calculate labels in original mask -> gt_masks=(batch_size, num_masks, img_height, img_width)
original_affordance_labels = tf.unique(tf.reshape(gt_masks, [-1]))
original_affordance_labels = tf.sort(original_affordance_labels.y)
# filter indices of gt boxes
indices_pos_gt_boxes = tf.boolean_mask(max_indices_each_gt_box, pos_mask)
# mask associated wrt to gt bbox (batch_size, positive_rois, mask_size, mask_size)
gt_mask = tf.gather(gt_masks, indices_pos_gt_boxes, axis=1)
gt_mask = tf.cast(tf.expand_dims(gt_mask, axis=4), tf.float32)
y1o = tf.squeeze(y1o, axis=1)
x1o = tf.squeeze(x1o, axis=1)
y2o = tf.squeeze(y2o, axis=1)
x2o = tf.squeeze(x2o, axis=1)
# create boxes to crop and indexes where each mask has its own box
boxes = tf.cast(tf.stack([y1o, x1o, y2o, x2o], axis=1), tf.float32)
# remove batch dim -> needed for crop and resize op
img_shape = tf.squeeze(img_shape, axis=0)
gt_mask = tf.squeeze(gt_mask, axis=0)
# crop and resize the masks individually
positive_masks = self._crop_and_resize_masks_no_resize(img_shape, gt_mask, boxes, positive_rois,
positive_count, original_affordance_labels)
# Add batch dim
positive_masks = tf.expand_dims(positive_masks, axis=0)
positive_rois = tf.expand_dims(positive_rois, axis=0)
masks = positive_masks
else:
positive_rois = tf.expand_dims(positive_rois, axis=0)
masks = tf.constant(0, dtype=tf.int32, shape=[1, 0, self.cfg.TRAIN_MASK_SIZE, self.cfg.TRAIN_MASK_SIZE])
return total_rois, tf.stop_gradient(gt_bbox_deltas), tf.stop_gradient(gt_bbox_labels), tf.stop_gradient(masks), \
tf.stop_gradient(positive_rois)
return tf.stop_gradient(gt_bbox_deltas), tf.stop_gradient(gt_bbox_labels)
def _crop_and_resize_masks_no_resize(self, img_shape, masks, overlapping_boxes, rois, positive_count, original_aff_labels):
# denormalize bboxes
overlapping_boxes = tf.cast(bbox_utils.denormalize_bboxes(overlapping_boxes, img_shape[0], img_shape[1]), tf.int32)
rois = tf.cast(bbox_utils.denormalize_bboxes(rois, img_shape[0], img_shape[1]), tf.int32)
num_masks = tf.shape(masks)[0]
final_masks = tf.zeros((num_masks, self.cfg.TRAIN_MASK_SIZE, self.cfg.TRAIN_MASK_SIZE))
for i in range(num_masks):
mask = masks[i]
# get roi and overlap area coordinates
y1, x1, y2, x2 = tf.split(rois[i], 4, axis=0)
y1, x1, y2, x2 = tf.squeeze(y1), tf.squeeze(x1), tf.squeeze(y2), tf.squeeze(x2)
y1o, x1o, y2o, x2o | |
self.nb_steps = nb_steps
p_f = 1 / (365 * (24 / time_ss) * time_r)
beta = stats.norm.ppf((1 - p_f), loc=0, scale=1) # Reliability
theta = np.linspace(0, 2 * np.pi, num = nb_steps)
# Vary U1, U2 along circle sqrt(U1^2+U2^2)=beta
U1 = beta * np.cos(theta)
U2 = beta * np.sin(theta)
comp_1 = stats.exponweib.ppf(stats.norm.cdf(U1),a=self.para_dist_1[0],c=self.para_dist_1[1],loc=self.para_dist_1[2],scale=self.para_dist_1[3])
tau = stats.kendalltau(self.buoy.T,self.buoy.Hs)[0] # Calculate Kendall's tau
rho_gau=np.sin(tau*np.pi/2.)
z2_Gau=stats.norm.cdf(U2*np.sqrt(1.-rho_gau**2.)+rho_gau*U1);
comp_2_Gaussian = stats.lognorm.ppf(z2_Gau,s=self.para_dist_2[1],loc=0,scale=np.exp(self.para_dist_2[0])) #lognormalinverse
Hs_Return = comp_1
T_Return = comp_2_Gaussian
self.Hs_ReturnContours = Hs_Return
self.T_ReturnContours = T_Return
return Hs_Return, T_Return
def getSamples(self):
'''Currently not implemented in this version.'''
raise NotImplementedError
def _saveParams(self, groupObj):
groupObj.create_dataset('n_size', data=self.n_size)
groupObj.create_dataset('bin_1_limit', data=self.bin_1_limit)
groupObj.create_dataset('bin_step', data=self.bin_step)
groupObj.create_dataset('para_dist_1', data=self.para_dist_1)
groupObj.create_dataset('para_dist_2', data=self.para_dist_2)
groupObj.create_dataset('mean_cond', data=self.mean_cond)
groupObj.create_dataset('std_cond', data=self.std_cond)
class Rosenblatt(EA):
'''Create a Rosenblatt EA class for a buoy object. Contours generated
under this class will use a Rosenblatt transformation and the I-FORM.'''
def __init__(self, buoy, n_size=50., bin_1_limit= .5, bin_step=0.25):
'''
Parameters
----------
buoy : NDBCData
ESSC.Buoy Object
n_size: float
minimum bin size used for Copula contour methods
bin_1_limit: float
maximum value of Hs for the first bin
bin_step: float
overlap interval for each bin
'''
self.method = "Rosenblatt"
self.buoy = buoy
if n_size > 100:
self.n_size = 100
print(100,'is the maximum "minimum bin size" for this buoy. The minimum bin size has been set to this amount.')
else:
self.n_size = n_size
if bin_step > max(buoy.Hs)*.1:
self.bin_step = max(buoy.Hs)*.1
print(round(max(buoy.Hs)*.1,2),'is the maximum bin overlap for this buoy. The bin overlap has been set to this amount.')
else:
self.bin_step = bin_step
if bin_1_limit > max(buoy.Hs)*.25:
self.bin_1_limit = max(buoy.Hs)*.25
print(round(max(buoy.Hs)*.25,2),'is the maximum limit for the first for this buoy. The first bin limit has been set to this amount.')
else:
self.bin_1_limit = bin_1_limit
self.Hs_ReturnContours = None
# self.Hs_SampleCA = None
# self.Hs_SampleFSS = None
self.T_ReturnContours = None
# self.T_SampleCA = None
# self.T_SampleFSS = None
# self.Weight_points = None
# self.coeff, self.shift, self.comp1_params, self.sigma_param, self.mu_param = self.__generateParams(size_bin)
self.para_dist_1,self.para_dist_2,self.mean_cond,self.std_cond = self._EA__getCopulaParams(self.n_size,self.bin_1_limit,self.bin_step)
def getContours(self, time_ss, time_r, nb_steps = 1000):
'''WDRT Extreme Sea State Rosenblatt Copula Contour function.
This function calculates environmental contours of extreme sea states using
a Rosenblatt transformation and the inverse first-order reliability
method.
Parameters
___________
time_ss : float
Sea state duration (hours) of measurements in input.
time_r : np.array
Desired return period (years) for calculation of environmental
contour, can be a scalar or a vector.
nb_steps : float
Discretization of the circle in the normal space used for
inverse FORM calculation.
Returns
-------
Hs_Return : np.array
Calculated Hs values along the contour boundary following
return to original input orientation.
T_Return : np.array
Calculated T values along the contour boundary following
return to original input orientation.
nb_steps : float
Discretization of the circle in the normal space
Example
-------
To obtain the contours for a NDBC buoy::
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create Environtmal Analysis object using above parameters
Rosen46022 = ESSC.Rosenblatt(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
nb_steps = 1000 # Enter discretization of the circle in the normal space (optional)
# Rosenblatt contour generation example
Hs_Return, T_Return = Rosen46022.getContours(Time_SS, Time_r,nb_steps)
'''
self.time_ss = time_ss
self.time_r = time_r
self.nb_steps = nb_steps
p_f = 1 / (365 * (24 / time_ss) * time_r)
beta = stats.norm.ppf((1 - p_f), loc=0, scale=1) # Reliability
theta = np.linspace(0, 2 * np.pi, num = nb_steps)
# Vary U1, U2 along circle sqrt(U1^2+U2^2)=beta
U1 = beta * np.cos(theta)
U2 = beta * np.sin(theta)
comp_1 = stats.exponweib.ppf(stats.norm.cdf(U1),a=self.para_dist_1[0],c=self.para_dist_1[1],loc=self.para_dist_1[2],scale=self.para_dist_1[3])
lamda_cond=self.mean_cond[0]+self.mean_cond[1]*comp_1+self.mean_cond[2]*comp_1**2+self.mean_cond[3]*comp_1**3 # mean of Ln(T) as a function of Hs
sigma_cond=self.std_cond[0]+self.std_cond[1]*comp_1+self.std_cond[2]*comp_1**2 # Standard deviation of Ln(T) as a function of Hs
comp_2_Rosenblatt = stats.lognorm.ppf(stats.norm.cdf(U2),s=sigma_cond,loc=0,scale=np.exp(lamda_cond)) # lognormal inverse
Hs_Return = comp_1
T_Return = comp_2_Rosenblatt
self.Hs_ReturnContours = Hs_Return
self.T_ReturnContours = T_Return
return Hs_Return, T_Return
def getSamples(self):
'''Currently not implemented in this version'''
raise NotImplementedError
def _saveParams(self, groupObj):
groupObj.create_dataset('n_size', data=self.n_size)
groupObj.create_dataset('bin_1_limit', data=self.bin_1_limit)
groupObj.create_dataset('bin_step', data=self.bin_step)
groupObj.create_dataset('para_dist_1', data=self.para_dist_1)
groupObj.create_dataset('para_dist_2', data=self.para_dist_2)
groupObj.create_dataset('mean_cond', data=self.mean_cond)
groupObj.create_dataset('std_cond', data=self.std_cond)
class ClaytonCopula(EA):
'''Create a ClaytonCopula EA class for a buoy object. Contours generated
under this class will use a Clayton copula.'''
def __init__(self, buoy, n_size=40., bin_1_limit=1., bin_step=0.25):
'''
Parameters
----------
buoy : NDBCData
ESSC.Buoy Object
n_size: float
minimum bin size used for Copula contour methods
bin_1_limit: float
maximum value of Hs for the first bin
bin_step: float
overlap interval for each bin
'''
self.method = "Clayton Copula"
self.buoy = buoy
self.n_size = n_size
self.bin_1_limit = bin_1_limit
self.bin_step = bin_step
self.Hs_ReturnContours = None
# self.Hs_SampleCA = None
# self.Hs_SampleFSS = None
self.T_ReturnContours = None
# self.T_SampleCA = None
# self.T_SampleFSS = None
# self.Weight_points = None
# self.coeff, self.shift, self.comp1_params, self.sigma_param, self.mu_param = self.__generateParams(size_bin)
self.para_dist_1,self.para_dist_2,self.mean_cond,self.std_cond = self._EA__getCopulaParams(n_size,bin_1_limit,bin_step)
def getContours(self, time_ss, time_r, nb_steps = 1000):
'''WDRT Extreme Sea State Clayton Copula Contour function.
This function calculates environmental contours of extreme sea states using
a Clayton copula and the inverse first-order reliability
method.
Parameters
----------
time_ss : float
Sea state duration (hours) of measurements in input.
time_r : np.array
Desired return period (years) for calculation of environmental
contour, can be a scalar or a vector.
nb_steps : float
Discretization of the circle in the normal space used for
inverse FORM calculation.
Returns
-------
Hs_Return : np.array
Calculated Hs values along the contour boundary following
return to original input orientation.
T_Return : np.array
Calculated T values along the contour boundary following
return to original input orientation.
nb_steps : float
Discretization of the circle in the normal space
Example
-------
To obtain the contours for a NDBC buoy::
import WDRT.ESSC as ESSC
# Pull spectral data from NDBC website
buoy46022 = ESSC.Buoy('46022','NDBC')
buoy46022.fetchFromWeb()
# Create Environtmal Analysis object using above parameters
Clayton46022 = ESSC.ClaytonCopula(buoy46022)
# Declare required parameters
Time_SS = 1. # Sea state duration (hrs)
Time_r = 100 # Return periods (yrs) of interest
nb_steps = 1000 # Enter discretization of the circle in the normal space (optional)
# Clayton copula contour generation example
Hs_Return, T_Return = Clayton46022.getContours(Time_SS, Time_r,nb_steps)
'''
self.time_ss = time_ss
self.time_r = time_r
self.nb_steps = nb_steps
p_f = 1 / (365 * (24 / time_ss) * time_r)
beta = stats.norm.ppf((1 - p_f), loc=0, scale=1) # Reliability
theta = np.linspace(0, 2 * np.pi, num = nb_steps)
# Vary U1, U2 along circle sqrt(U1^2+U2^2)=beta
U1 = beta * np.cos(theta)
U2 = beta * np.sin(theta)
comp_1 = stats.exponweib.ppf(stats.norm.cdf(U1),a=self.para_dist_1[0],c=self.para_dist_1[1],loc=self.para_dist_1[2],scale=self.para_dist_1[3])
tau = stats.kendalltau(self.buoy.T,self.buoy.Hs)[0] # Calculate Kendall's tau
theta_clay = (2.*tau)/(1.-tau)
z2_Clay=((1.-stats.norm.cdf(U1)**(-theta_clay)+stats.norm.cdf(U1)**(-theta_clay)/stats.norm.cdf(U2))**(theta_clay/(1.+theta_clay)))**(-1./theta_clay)
comp_2_Clayton = stats.lognorm.ppf(z2_Clay,s=self.para_dist_2[1],loc=0,scale=np.exp(self.para_dist_2[0])) #lognormalinverse
Hs_Return = comp_1
T_Return = comp_2_Clayton
self.Hs_ReturnContours = Hs_Return
self.T_ReturnContours = T_Return
return Hs_Return, T_Return
def getSamples(self):
'''Currently not implemented in this version'''
raise NotImplementedError
def _saveParams(self, groupObj):
groupObj.create_dataset('n_size', data=self.n_size)
groupObj.create_dataset('bin_1_limit', data=self.bin_1_limit)
groupObj.create_dataset('bin_step', data=self.bin_step)
groupObj.create_dataset('para_dist_1', data=self.para_dist_1)
groupObj.create_dataset('para_dist_2', data=self.para_dist_2)
groupObj.create_dataset('mean_cond', data=self.mean_cond)
groupObj.create_dataset('std_cond', data=self.std_cond)
class GumbelCopula(EA):
'''Create a GumbelCopula EA class for a buoy object. Contours generated
under this class will use a Gumbel copula.'''
def __init__(self, buoy, n_size=40., bin_1_limit=1., bin_step=0.25,Ndata = 1000):
'''
Parameters
----------
buoy : NDBCData
ESSC.Buoy Object
n_size: float
minimum bin size used for Copula contour methods
bin_1_limit: float
maximum value of Hs for the first bin
bin_step: float
overlap interval for each bin
Ndata: int
discretization used in the Gumbel copula density estimation,
must be less than the number of contour points used in
getContours
'''
self.method = "Gumbel Copula"
self.buoy = buoy
self.n_size = n_size
self.bin_1_limit = bin_1_limit
self.bin_step = bin_step
self.Hs_ReturnContours = None
# self.Hs_SampleCA = None
# self.Hs_SampleFSS = None
self.T_ReturnContours = None
# self.T_SampleCA = None
# self.T_SampleFSS = None
# self.Weight_points = None
# self.coeff, self.shift, self.comp1_params, self.sigma_param, self.mu_param = self.__generateParams(size_bin)
self.Ndata = Ndata
self.min_limit_2 = 0.
self.max_limit_2 = np.ceil(np.amax(self.buoy.T)*2)
self.para_dist_1,self.para_dist_2,self.mean_cond,self.std_cond = self._EA__getCopulaParams(n_size,bin_1_limit,bin_step)
def getContours(self, time_ss, time_r, nb_steps = 1000):
'''WDRT Extreme Sea State Gumbel Copula Contour function
This function calculates environmental contours of extreme sea states using
a Gumbel copula and the inverse first-order reliability
method.
Parameters
___________
time_ss : float
Sea state duration (hours) of measurements in | |
<filename>tests/test_entity/test_entity_profile.py
"""Test entity profile."""
import os
import shutil
import unittest
from pathlib import Path
import emmental
import numpy as np
import torch
import ujson
from pydantic import ValidationError
from bootleg.run import run_model
from bootleg.symbols.entity_profile import EntityProfile
from bootleg.utils.parser import parser_utils
class EntityProfileTest(unittest.TestCase):
"""Entity profile test."""
def setUp(self) -> None:
"""Set up."""
self.dir = Path("tests/data/entity_profile_test")
self.save_dir = Path(self.dir / "entity_db_save")
self.save_dir.mkdir(exist_ok=True, parents=True)
self.save_dir2 = Path(self.dir / "entity_db_save2")
self.save_dir2.mkdir(exist_ok=True, parents=True)
self.profile_file = Path(self.dir / "raw_data/entity_profile.jsonl")
self.profile_file.parent.mkdir(exist_ok=True, parents=True)
self.data_dir = self.dir / "data"
self.train_data = self.data_dir / "train.jsonl"
self.train_data.parent.mkdir(exist_ok=True, parents=True)
self.arg_file = self.dir / "args.json"
def tearDown(self) -> None:
"""Tear down."""
if os.path.exists(self.dir):
shutil.rmtree(self.dir, ignore_errors=True)
def write_data(self, file, data):
"""Write data to file."""
with open(file, "w") as out_f:
for d in data:
out_f.write(ujson.dumps(d) + "\n")
def test_profile_load_simple(self):
"""Test profile load simple."""
data = [
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
{
"entity_id": "Q345",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"description": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
},
# Missing type system
{
"entity_id": "Q567",
"mentions": [["catt", 6.5], ["animal", 3.3]],
"title": "Catt",
"description": "Catt",
"types": {"hyena": ["animal", "animall"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
},
# No KG/Types
{
"entity_id": "Q789",
"mentions": [["animal", 12.2]],
"title": "Dogg",
},
]
self.write_data(self.profile_file, data)
gold_qid2title = {"Q123": "Dog", "Q345": "Cat", "Q567": "Catt", "Q789": "Dogg"}
gold_qid2desc = {"Q123": "Dog", "Q345": "Cat", "Q567": "Catt", "Q789": ""}
gold_alias2qids = {
"dog": [["Q123", 10.0]],
"dogg": [["Q123", 7.0]],
"cat": [["Q345", 10.0]],
"catt": [["Q345", 7.0], ["Q567", 6.5]],
"animal": [["Q789", 12.2], ["Q123", 4.0], ["Q567", 3.3], ["Q345", 3.0]],
}
gold_type_systems = {
"hyena": {
"Q123": ["animal"],
"Q345": ["animal"],
"Q567": ["animal", "animall"],
"Q789": [],
},
"wiki": {"Q123": ["dog"], "Q345": ["cat"], "Q567": [], "Q789": []},
}
gold_qid2relations = {
"Q123": {"sibling": ["Q345", "Q567"]},
"Q345": {"sibling": ["Q123"]},
"Q567": {"sibling": ["Q123"]},
}
(
qid2title,
qid2desc,
alias2qids,
type_systems,
qid2relations,
) = EntityProfile._read_profile_file(self.profile_file)
self.assertDictEqual(gold_qid2title, qid2title)
self.assertDictEqual(gold_qid2desc, qid2desc)
self.assertDictEqual(gold_alias2qids, alias2qids)
self.assertDictEqual(gold_type_systems, type_systems)
self.assertDictEqual(gold_qid2relations, qid2relations)
# Test loading/saving from jsonl
ep = EntityProfile.load_from_jsonl(self.profile_file, edit_mode=True)
ep.save_to_jsonl(self.profile_file)
read_in_data = [ujson.loads(li) for li in open(self.profile_file)]
assert len(read_in_data) == len(data)
for qid_obj in data:
found_other_obj = None
for possible_match in read_in_data:
if qid_obj["entity_id"] == possible_match["entity_id"]:
found_other_obj = possible_match
break
assert found_other_obj is not None
self.assertDictEqual(qid_obj, found_other_obj)
data = [
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
# Extra QID
{
"entity_id": "Q123",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"description": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
},
]
self.write_data(self.profile_file, data)
with self.assertRaises(ValueError) as context:
EntityProfile._read_profile_file(self.profile_file)
assert type(context.exception) is ValueError
assert "is already in our dump" in str(context.exception)
data = [
# Relation in wrong format
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relationnn": "sibling", "objject": "Q345"},
],
}
]
self.write_data(self.profile_file, data)
with self.assertRaises(ValueError) as context:
EntityProfile._read_profile_file(self.profile_file)
assert type(context.exception) is ValueError
assert "it must be a JSON with keys relation and object" in str(
context.exception
)
def test_profile_load_jsonl_errors(self):
"""Test profile load from jsonl."""
data = [
{
"entity_id": 123,
"mentions": [["dog"], ["dogg"], ["animal"]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
]
self.write_data(self.profile_file, data)
with self.assertRaises(ValidationError) as context:
EntityProfile._read_profile_file(self.profile_file)
assert type(context.exception) is ValidationError
def test_profile_dump_load(self):
"""Test profile load from dump."""
data = [
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
{
"entity_id": "Q345",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
},
]
self.write_data(self.profile_file, data)
entity_profile = EntityProfile.load_from_jsonl(
self.profile_file, max_candidates=5, edit_mode=True
)
entity_profile.save(self.save_dir2)
# Test load correctly
entity_profile2 = EntityProfile.load_from_cache(self.save_dir2)
self.assertSetEqual(
set(entity_profile.get_all_qids()), set(entity_profile2.get_all_qids())
)
self.assertSetEqual(
set(entity_profile.get_all_typesystems()),
set(entity_profile2.get_all_typesystems()),
)
for type_sys in entity_profile.get_all_typesystems():
self.assertSetEqual(
set(entity_profile.get_all_types(type_sys)),
set(entity_profile2.get_all_types(type_sys)),
)
for qid in entity_profile.get_all_qids():
self.assertDictEqual(
entity_profile.get_relations_tails_for_qid(qid),
entity_profile2.get_relations_tails_for_qid(qid),
)
# Test load with no types or kgs
entity_profile2 = EntityProfile.load_from_cache(
self.save_dir2, no_type=True, no_kg=True
)
self.assertSetEqual(
set(entity_profile.get_all_qids()), set(entity_profile2.get_all_qids())
)
assert len(entity_profile2.get_all_typesystems()) == 0
self.assertIsNone(entity_profile2._kg_symbols)
# Testing that the functions still work despite not loading them
assert entity_profile2.get_relations_tails_for_qid("Q123") is None
# Test load with no types or kgs
entity_profile2 = EntityProfile.load_from_cache(
self.save_dir2, no_kg=True, type_systems_to_load=["wiki"]
)
self.assertSetEqual(
set(entity_profile.get_all_qids()), set(entity_profile2.get_all_qids())
)
assert entity_profile2.get_all_typesystems() == ["wiki"]
self.assertSetEqual(
set(entity_profile.get_all_types("wiki")),
set(entity_profile2.get_all_types("wiki")),
)
self.assertIsNone(entity_profile2._kg_symbols)
# Assert error loading type system that is not there
with self.assertRaises(ValueError) as context:
entity_profile2.get_all_types("hyena")
assert type(context.exception) is ValueError
assert "type system hyena is not one" in str(context.exception)
def test_checks(self):
"""Test checks."""
data = [
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
{
"entity_id": "Q345",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
},
]
self.write_data(self.profile_file, data)
entity_profile = EntityProfile.load_from_jsonl(
self.profile_file, max_candidates=5
)
with self.assertRaises(AttributeError) as context:
entity_profile.add_relation("Q345", "sibling", "Q123")
assert type(context.exception) is AttributeError
entity_profile = EntityProfile.load_from_jsonl(
self.profile_file, max_candidates=5, edit_mode=True
)
with self.assertRaises(ValueError) as context:
entity_profile.add_relation("Q789", "sibling", "Q123")
assert type(context.exception) is ValueError
assert "is not in our dump" in str(context.exception)
with self.assertRaises(ValueError) as context:
entity_profile.add_relation(qid="Q789", relation="sibling", qid2="Q123")
assert type(context.exception) is ValueError
assert "is not in our dump" in str(context.exception)
with self.assertRaises(ValueError) as context:
entity_profile.add_type(qid="Q345", type="sibling", type_system="blah")
assert type(context.exception) is ValueError
assert "type system blah is not one" in str(context.exception)
with self.assertRaises(ValueError) as context:
entity_profile.get_types(qid="Q345", type_system="blah")
assert type(context.exception) is ValueError
assert "type system blah is not one" in str(context.exception)
def test_getters(self):
"""Test getters."""
data = [
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
{
"entity_id": "Q345",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [
{"relation": "sibling", "object": "Q123"},
{"relation": "sibbbling", "object": "Q123"},
],
},
]
self.write_data(self.profile_file, data)
entity_profile = EntityProfile.load_from_jsonl(
self.profile_file, max_candidates=3, edit_mode=True
)
self.assertEqual(entity_profile.get_eid("Q345"), 2)
self.assertTrue(entity_profile.mention_exists("cat"))
self.assertFalse(entity_profile.mention_exists("dat"))
self.assertListEqual(entity_profile.get_qid_cands("cat"), ["Q345"])
self.assertListEqual(
entity_profile.get_qid_count_cands("cat"), [["Q345", 10.0]]
)
self.assertSetEqual(
set(entity_profile.get_all_mentions()),
{"dog", "dogg", "animal", "cat", "catt"},
)
self.assertSetEqual(
set(entity_profile.get_mentions("Q345")), {"animal", "cat", "catt"}
)
self.assertSetEqual(
set(entity_profile.get_entities_of_type("cat", "wiki")), {"Q345"}
)
self.assertEqual(entity_profile.num_entities_with_pad_and_nocand, 4)
self.assertSetEqual(
entity_profile.get_relations_between("Q345", "Q123"),
{"sibling", "sibbbling"},
)
def test_add_entity(self):
"""Test add entity."""
data = [
{
"entity_id": "Q123",
"mentions": [["dog", 10.0], ["dogg", 7.0], ["animal", 4.0]],
"title": "Dog",
"description": "Dog",
"types": {"hyena": ["animal"], "wiki": ["dog"]},
"relations": [
{"relation": "sibling", "object": "Q345"},
{"relation": "sibling", "object": "Q567"},
],
},
{
"entity_id": "Q345",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
},
]
self.write_data(self.profile_file, data)
entity_profile = EntityProfile.load_from_jsonl(
self.profile_file, max_candidates=3, edit_mode=True
)
entity_profile.save(self.save_dir2)
# Test bad format
with self.assertRaises(ValueError) as context:
entity_profile.add_entity(["bad format"])
assert type(context.exception) is ValueError
assert "The input to update_entity needs to be a dictionary" in str(
context.exception
)
new_entity = {
"entity_id": "Q345",
"mentions": [["cat", 10.0], ["catt", 7.0], ["animal", 3.0]],
"title": "Cat",
"types": {"hyena": ["animal"], "wiki": ["cat"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
}
# Test already existing entity
with self.assertRaises(ValueError) as context:
entity_profile.add_entity(new_entity)
assert type(context.exception) is ValueError
assert "The entity Q345 already exists" in str(context.exception)
new_entity = {
"entity_id": "Q789",
"mentions": [["snake", 10.0], ["animal", 3.0]],
"title": "Snake",
"description": "Snake",
"types": {"hyena": ["animal"], "new_sys": ["snakey"]},
"relations": [{"relation": "sibling", "object": "Q123"}],
}
# Test can't update qid not in dump
with self.assertRaises(ValueError) as context:
entity_profile.update_entity(new_entity)
assert type(context.exception) is ValueError
assert "The entity Q789 is not in our dump" in str(context.exception)
# Test new type system
with self.assertRaises(ValueError) as context:
entity_profile.add_entity(new_entity)
assert type(context.exception) is ValueError
assert "When adding a new entity, you must use the same type system" in str(
context.exception
)
new_entity = {
"entity_id": "Q789",
"mentions": [["snake", | |
0.12805900959162*
m.x1567)) + m.x1176 == 1)
m.c349 = Constraint(expr=-5/(1 + 30*exp((-0.128834434867751*m.x1476) - 0.250018751406355*m.x1499 - 0.1162398726011*
m.x1568)) + m.x1177 == 1)
m.c350 = Constraint(expr=-5/(1 + 30*exp((-0.114914790682709*m.x1477) - 0.239354699729529*m.x1500 - 0.105284214737684*
m.x1569)) + m.x1178 == 1)
m.c351 = Constraint(expr=-5/(1 + 30*exp((-0.10354110581901*m.x1478) - 0.219216520156959*m.x1501 - 0.0957634260323297*
m.x1570)) + m.x1179 == 1)
m.c352 = Constraint(expr=-5/(1 + 30*exp((-0.0936276988184184*m.x1479) - 0.201800056504016*m.x1502 - 0.0879948611001117*
m.x1571)) + m.x1180 == 1)
m.c353 = Constraint(expr=-5/(1 + 30*exp((-0.085144063755875*m.x1480) - 0.1874625074985*m.x1503 - 0.0815175303449007*
m.x1572)) + m.x1181 == 1)
m.c354 = Constraint(expr=-5/(1 + 30*exp((-0.0790995309397815*m.x1481) - 0.176806520624481*m.x1504 - 0.0771182454056805*
m.x1573)) + m.x1182 == 1)
m.c355 = Constraint(expr=-5/(1 + 30*exp((-0.0735467168745587*m.x1482) - 0.16583472910897*m.x1505 - 0.0718623118105709*
m.x1574)) + m.x1183 == 1)
m.c356 = Constraint(expr=-5/(1 + 30*exp((-0.0682565901737813*m.x1483) - 0.156330607969734*m.x1506 - 0.0672363829515427*
m.x1575)) + m.x1184 == 1)
m.c357 = Constraint(expr=-5/(1 + 30*exp((-0.062785674820433*m.x1484) - 0.147789075431544*m.x1507 - 0.0629108867289484*
m.x1576)) + m.x1185 == 1)
m.c358 = Constraint(expr=-5/(1 + 30*exp((-0.0576880920240444*m.x1485) - 0.142722575857049*m.x1508 - 0.0600078010141318*
m.x1577)) + m.x1186 == 1)
m.c359 = Constraint(expr=-5/(1 + 30*exp((-0.052780754025852*m.x1486) - 0.134562336002153*m.x1509 - 0.0569083945572811*
m.x1578)) + m.x1187 == 1)
m.c360 = Constraint(expr=-5/(1 + 30*exp((-0.0486551710715815*m.x1487) - 0.127650340188157*m.x1510 - 0.0539595519198809*
m.x1579)) + m.x1188 == 1)
m.c361 = Constraint(expr=-5/(1 + 30*exp((-0.0448060792888379*m.x1488) - 0.120595258194448*m.x1511 - 0.0508119753663543*
m.x1580)) + m.x1189 == 1)
m.c362 = Constraint(expr=-5/(1 + 30*exp((-0.041430169449393*m.x1489) - 0.114420403446343*m.x1512 - 0.0480850143052918*
m.x1581)) + m.x1190 == 1)
m.c363 = Constraint(expr=-5/(1 + 30*exp((-0.0382078968081123*m.x1490) - 0.108685019943701*m.x1513 - 0.045439467812953*
m.x1582)) + m.x1191 == 1)
m.c364 = Constraint(expr=-5/(1 + 30*exp((-0.0352048216523735*m.x1491) - 0.103142759893969*m.x1514 - 0.0428575714328572*
m.x1583)) + m.x1192 == 1)
m.c365 = Constraint(expr=-5/(1 + 30*exp((-0.0324122842557329*m.x1492) - 0.0979144227944776*m.x1515 - 0.0404882887624755*
m.x1584)) + m.x1193 == 1)
m.c366 = Constraint(expr=-5/(1 + 30*exp((-0.0298043937637286*m.x1493) - 0.0927721238322309*m.x1516 - 0.0382343382591906*
m.x1585)) + m.x1194 == 1)
m.c367 = Constraint(expr=-5/(1 + 30*exp((-0.0274252114483803*m.x1494) - 0.0879221361562201*m.x1517 - 0.0361556428111735*
m.x1586)) + m.x1195 == 1)
m.c368 = Constraint(expr=-5/(1 + 30*exp((-0.0252400327110824*m.x1495) - 0.0834244049754315*m.x1518 - 0.0342328587518015*
m.x1587)) + m.x1196 == 1)
m.c369 = Constraint(expr=-5/(1 + 30*exp((-0.023232743299096*m.x1496) - 0.0791176796366916*m.x1519 - 0.0324368859292033*
m.x1588)) + m.x1197 == 1)
m.c370 = Constraint(expr=-5/(1 + 30*exp((-0.164826108455579*m.x1474) - 0.304878048780488*m.x1497 - 0.142673705236125*
m.x1566)) + m.x1198 == 1)
m.c371 = Constraint(expr=-5/(1 + 30*exp((-0.146901120855552*m.x1475) - 0.265639527161642*m.x1498 - 0.12805900959162*
m.x1567)) + m.x1199 == 1)
m.c372 = Constraint(expr=-5/(1 + 30*exp((-0.128834434867751*m.x1476) - 0.250018751406355*m.x1499 - 0.1162398726011*
m.x1568)) + m.x1200 == 1)
m.c373 = Constraint(expr=-5/(1 + 30*exp((-0.114914790682709*m.x1477) - 0.239354699729529*m.x1500 - 0.105284214737684*
m.x1569)) + m.x1201 == 1)
m.c374 = Constraint(expr=-5/(1 + 30*exp((-0.10354110581901*m.x1478) - 0.219216520156959*m.x1501 - 0.0957634260323297*
m.x1570)) + m.x1202 == 1)
m.c375 = Constraint(expr=-5/(1 + 30*exp((-0.0936276988184184*m.x1479) - 0.201800056504016*m.x1502 - 0.0879948611001117*
m.x1571)) + m.x1203 == 1)
m.c376 = Constraint(expr=-5/(1 + 30*exp((-0.085144063755875*m.x1480) - 0.1874625074985*m.x1503 - 0.0815175303449007*
m.x1572)) + m.x1204 == 1)
m.c377 = Constraint(expr=-5/(1 + 30*exp((-0.0790995309397815*m.x1481) - 0.176806520624481*m.x1504 - 0.0771182454056805*
m.x1573)) + m.x1205 == 1)
m.c378 = Constraint(expr=-5/(1 + 30*exp((-0.0735467168745587*m.x1482) - 0.16583472910897*m.x1505 - 0.0718623118105709*
m.x1574)) + m.x1206 == 1)
m.c379 = Constraint(expr=-5/(1 + 30*exp((-0.0682565901737813*m.x1483) - 0.156330607969734*m.x1506 - 0.0672363829515427*
m.x1575)) + m.x1207 == 1)
m.c380 = Constraint(expr=-5/(1 + 30*exp((-0.062785674820433*m.x1484) - 0.147789075431544*m.x1507 - 0.0629108867289484*
m.x1576)) + m.x1208 == 1)
m.c381 = Constraint(expr=-5/(1 + 30*exp((-0.0576880920240444*m.x1485) - 0.142722575857049*m.x1508 - 0.0600078010141318*
m.x1577)) + m.x1209 == 1)
m.c382 = Constraint(expr=-5/(1 + 30*exp((-0.052780754025852*m.x1486) - 0.134562336002153*m.x1509 - 0.0569083945572811*
m.x1578)) + m.x1210 == 1)
m.c383 = Constraint(expr=-5/(1 + 30*exp((-0.0486551710715815*m.x1487) - 0.127650340188157*m.x1510 - 0.0539595519198809*
m.x1579)) + m.x1211 == 1)
m.c384 = Constraint(expr=-5/(1 + 30*exp((-0.0448060792888379*m.x1488) - 0.120595258194448*m.x1511 - 0.0508119753663543*
m.x1580)) + m.x1212 == 1)
m.c385 = Constraint(expr=-5/(1 + 30*exp((-0.041430169449393*m.x1489) - 0.114420403446343*m.x1512 - 0.0480850143052918*
m.x1581)) + m.x1213 == 1)
m.c386 = Constraint(expr=-5/(1 + 30*exp((-0.0382078968081123*m.x1490) - 0.108685019943701*m.x1513 - 0.045439467812953*
m.x1582)) + m.x1214 == 1)
m.c387 = Constraint(expr=-5/(1 + 30*exp((-0.0352048216523735*m.x1491) - 0.103142759893969*m.x1514 - 0.0428575714328572*
m.x1583)) + m.x1215 == 1)
m.c388 = Constraint(expr=-5/(1 + 30*exp((-0.0324122842557329*m.x1492) - 0.0979144227944776*m.x1515 - 0.0404882887624755*
m.x1584)) + m.x1216 == 1)
m.c389 = Constraint(expr=-5/(1 + 30*exp((-0.0298043937637286*m.x1493) - 0.0927721238322309*m.x1516 - 0.0382343382591906*
m.x1585)) + m.x1217 == 1)
m.c390 = Constraint(expr=-5/(1 + 30*exp((-0.0274252114483803*m.x1494) - 0.0879221361562201*m.x1517 - 0.0361556428111735*
m.x1586)) + m.x1218 == 1)
m.c391 = Constraint(expr=-5/(1 + 30*exp((-0.0252400327110824*m.x1495) - 0.0834244049754315*m.x1518 - 0.0342328587518015*
m.x1587)) + m.x1219 == 1)
m.c392 = Constraint(expr=-5/(1 + 30*exp((-0.023232743299096*m.x1496) - 0.0791176796366916*m.x1519 - 0.0324368859292033*
m.x1588)) + m.x1220 == 1)
m.c393 = Constraint(expr=-5/(1 + 30*exp((-0.457317073170732*m.x1497) - 0.214010557854187*m.x1566)) + m.x1221 == 1)
m.c394 = Constraint(expr=-5/(1 + 30*exp((-0.398459290742462*m.x1498) - 0.19208851438743*m.x1567)) + m.x1222 == 1)
m.c395 = Constraint(expr=-5/(1 + 30*exp((-0.375028127109533*m.x1499) - 0.174359808901649*m.x1568)) + m.x1223 == 1)
m.c396 = Constraint(expr=-5/(1 + 30*exp((-0.359032049594294*m.x1500) - 0.157926322106527*m.x1569)) + m.x1224 == 1)
m.c397 = Constraint(expr=-5/(1 + 30*exp((-0.328824780235439*m.x1501) - 0.143645139048495*m.x1570)) + m.x1225 == 1)
m.c398 = Constraint(expr=-5/(1 + 30*exp((-0.302700084756024*m.x1502) - 0.131992291650168*m.x1571)) + m.x1226 == 1)
m.c399 = Constraint(expr=-5/(1 + 30*exp((-0.28119376124775*m.x1503) - 0.122276295517351*m.x1572)) + m.x1227 == 1)
m.c400 = Constraint(expr=-5/(1 + 30*exp((-0.265209780936721*m.x1504) - 0.115677368108521*m.x1573)) + m.x1228 == 1)
m.c401 = Constraint(expr=-5/(1 + 30*exp((-0.248752093663455*m.x1505) - 0.107793467715856*m.x1574)) + m.x1229 == 1)
m.c402 = Constraint(expr=-5/(1 + 30*exp((-0.234495911954602*m.x1506) - 0.100854574427314*m.x1575)) + m.x1230 == 1)
m.c403 = Constraint(expr=-5/(1 + 30*exp((-0.221683613147316*m.x1507) - 0.0943663300934227*m.x1576)) + m.x1231 == 1)
m.c404 = Constraint(expr=-5/(1 + 30*exp((-0.214083863785574*m.x1508) - 0.0900117015211978*m.x1577)) + m.x1232 == 1)
m.c405 = Constraint(expr=-5/(1 + 30*exp((-0.20184350400323*m.x1509) - 0.0853625918359217*m.x1578)) + m.x1233 == 1)
m.c406 = Constraint(expr=-5/(1 + 30*exp((-0.191475510282235*m.x1510) - 0.0809393278798213*m.x1579)) + m.x1234 == 1)
m.c407 = Constraint(expr=-5/(1 + 30*exp((-0.180892887291672*m.x1511) - 0.0762179630495315*m.x1580)) + m.x1235 == 1)
m.c408 = Constraint(expr=-5/(1 + 30*exp((-0.171630605169514*m.x1512) - 0.0721275214579376*m.x1581)) + m.x1236 == 1)
m.c409 = Constraint(expr=-5/(1 + 30*exp((-0.163027529915552*m.x1513) - 0.0681592017194295*m.x1582)) + m.x1237 == 1)
m.c410 = Constraint(expr=-5/(1 + 30*exp((-0.154714139840954*m.x1514) - 0.0642863571492858*m.x1583)) + m.x1238 == 1)
m.c411 = Constraint(expr=-5/(1 + 30*exp((-0.146871634191716*m.x1515) - 0.0607324331437132*m.x1584)) + m.x1239 == 1)
m.c412 = Constraint(expr=-5/(1 + 30*exp((-0.139158185748346*m.x1516) - 0.0573515073887859*m.x1585)) + m.x1240 == 1)
m.c413 = Constraint(expr=-5/(1 + 30*exp((-0.13188320423433*m.x1517) - 0.0542334642167603*m.x1586)) + m.x1241 == 1)
m.c414 = Constraint(expr=-5/(1 + 30*exp((-0.125136607463147*m.x1518) - 0.0513492881277023*m.x1587)) + m.x1242 == 1)
m.c415 = Constraint(expr=-5/(1 + 30*exp((-0.118676519455037*m.x1519) - 0.0486553288938049*m.x1588)) + m.x1243 == 1)
m.c416 = Constraint(expr=-5/(1 + 30*exp(-0.914634146341463*m.x1497)) + m.x1244 == 1)
m.c417 = Constraint(expr=-5/(1 + 30*exp(-0.796918581484925*m.x1498)) + m.x1245 == 1)
m.c418 = Constraint(expr=-5/(1 + 30*exp(-0.750056254219067*m.x1499)) + m.x1246 == 1)
m.c419 = Constraint(expr=-5/(1 + 30*exp(-0.718064099188588*m.x1500)) + m.x1247 == 1)
m.c420 = Constraint(expr=-5/(1 + 30*exp(-0.657649560470877*m.x1501)) + m.x1248 == 1)
m.c421 = Constraint(expr=-5/(1 + 30*exp(-0.605400169512047*m.x1502)) + m.x1249 == 1)
m.c422 = Constraint(expr=-5/(1 + 30*exp(-0.562387522495501*m.x1503)) + m.x1250 == 1)
m.c423 = Constraint(expr=-5/(1 + 30*exp(-0.530419561873442*m.x1504)) + m.x1251 == 1)
m.c424 = Constraint(expr=-5/(1 + 30*exp(-0.49750418732691*m.x1505)) + m.x1252 == 1)
m.c425 = Constraint(expr=-5/(1 + 30*exp(-0.468991823909203*m.x1506)) + m.x1253 == 1)
m.c426 = Constraint(expr=-5/(1 + 30*exp(-0.443367226294632*m.x1507)) + m.x1254 == 1)
m.c427 = Constraint(expr=-5/(1 + 30*exp(-0.428167727571147*m.x1508)) + m.x1255 == 1)
m.c428 = Constraint(expr=-5/(1 + 30*exp(-0.403687008006459*m.x1509)) + m.x1256 == 1)
m.c429 = Constraint(expr=-5/(1 + 30*exp(-0.38295102056447*m.x1510)) + m.x1257 == 1)
m.c430 = Constraint(expr=-5/(1 + 30*exp(-0.361785774583343*m.x1511)) + m.x1258 == 1)
m.c431 = Constraint(expr=-5/(1 + 30*exp(-0.343261210339028*m.x1512)) + m.x1259 == 1)
m.c432 = Constraint(expr=-5/(1 + 30*exp(-0.326055059831103*m.x1513)) + m.x1260 == 1)
m.c433 = Constraint(expr=-5/(1 + 30*exp(-0.309428279681908*m.x1514)) + m.x1261 == 1)
m.c434 = Constraint(expr=-5/(1 + 30*exp(-0.293743268383433*m.x1515)) + m.x1262 == 1)
m.c435 = Constraint(expr=-5/(1 + 30*exp(-0.278316371496693*m.x1516)) + m.x1263 == 1)
m.c436 = Constraint(expr=-5/(1 + 30*exp(-0.26376640846866*m.x1517)) + m.x1264 == 1)
m.c437 = Constraint(expr=-5/(1 + 30*exp(-0.250273214926295*m.x1518)) + m.x1265 == 1)
m.c438 = Constraint(expr=-5/(1 + 30*exp(-0.237353038910075*m.x1519)) + m.x1266 == 1)
m.c439 = Constraint(expr=-5/(1 + 30*exp((-0.457317073170732*m.x1497) - 0.214010557854187*m.x1566)) + m.x1267 == 1)
m.c440 = Constraint(expr=-5/(1 + 30*exp((-0.398459290742462*m.x1498) - 0.19208851438743*m.x1567)) + m.x1268 == 1)
m.c441 = Constraint(expr=-5/(1 + 30*exp((-0.375028127109533*m.x1499) - 0.174359808901649*m.x1568)) + m.x1269 == 1)
m.c442 = Constraint(expr=-5/(1 + 30*exp((-0.359032049594294*m.x1500) - 0.157926322106527*m.x1569)) + m.x1270 == 1)
m.c443 = Constraint(expr=-5/(1 + 30*exp((-0.328824780235439*m.x1501) - 0.143645139048495*m.x1570)) + m.x1271 == 1)
m.c444 = Constraint(expr=-5/(1 + 30*exp((-0.302700084756024*m.x1502) - 0.131992291650168*m.x1571)) + m.x1272 == 1)
m.c445 = Constraint(expr=-5/(1 + 30*exp((-0.28119376124775*m.x1503) - 0.122276295517351*m.x1572)) + m.x1273 == 1)
m.c446 = Constraint(expr=-5/(1 + 30*exp((-0.265209780936721*m.x1504) - 0.115677368108521*m.x1573)) + m.x1274 == 1)
m.c447 = Constraint(expr=-5/(1 + 30*exp((-0.248752093663455*m.x1505) - 0.107793467715856*m.x1574)) + m.x1275 == 1)
m.c448 = Constraint(expr=-5/(1 + 30*exp((-0.234495911954602*m.x1506) - 0.100854574427314*m.x1575)) + m.x1276 == 1)
m.c449 = Constraint(expr=-5/(1 + 30*exp((-0.221683613147316*m.x1507) - 0.0943663300934227*m.x1576)) + m.x1277 == 1)
m.c450 = Constraint(expr=-5/(1 + 30*exp((-0.214083863785574*m.x1508) - 0.0900117015211978*m.x1577)) + m.x1278 == 1)
m.c451 = Constraint(expr=-5/(1 + 30*exp((-0.20184350400323*m.x1509) - 0.0853625918359217*m.x1578)) + m.x1279 == 1)
m.c452 = Constraint(expr=-5/(1 + 30*exp((-0.191475510282235*m.x1510) - 0.0809393278798213*m.x1579)) + m.x1280 == 1)
m.c453 = Constraint(expr=-5/(1 + 30*exp((-0.180892887291672*m.x1511) - 0.0762179630495315*m.x1580)) + m.x1281 == 1)
m.c454 = Constraint(expr=-5/(1 + 30*exp((-0.171630605169514*m.x1512) - 0.0721275214579376*m.x1581)) + m.x1282 == 1)
m.c455 = Constraint(expr=-5/(1 + 30*exp((-0.163027529915552*m.x1513) - 0.0681592017194295*m.x1582)) + m.x1283 == 1)
m.c456 = Constraint(expr=-5/(1 + 30*exp((-0.154714139840954*m.x1514) - 0.0642863571492858*m.x1583)) + m.x1284 == 1)
m.c457 = Constraint(expr=-5/(1 + 30*exp((-0.146871634191716*m.x1515) - 0.0607324331437132*m.x1584)) + m.x1285 == 1)
m.c458 = Constraint(expr=-5/(1 + 30*exp((-0.139158185748346*m.x1516) - 0.0573515073887859*m.x1585)) + m.x1286 == 1)
m.c459 = Constraint(expr=-5/(1 + 30*exp((-0.13188320423433*m.x1517) - 0.0542334642167603*m.x1586)) + m.x1287 == 1)
m.c460 = Constraint(expr=-5/(1 + 30*exp((-0.125136607463147*m.x1518) - 0.0513492881277023*m.x1587)) + m.x1288 == 1)
m.c461 = Constraint(expr=-5/(1 + 30*exp((-0.118676519455037*m.x1519) - 0.0486553288938049*m.x1588)) + m.x1289 == 1)
m.c462 = Constraint(expr=-5/(1 + 30*exp((-1.58227848101266*m.x1359) - 0.228658536585366*m.x1497 - 1.14329268292683*
m.x1520 - 0.107005278927094*m.x1566)) + m.x1290 == 1)
m.c463 = Constraint(expr=-5/(1 + 30*exp((-1.00590128755365*m.x1360) - 0.199229645371231*m.x1498 - 0.833148189291269*
m.x1521 - 0.0960442571937149*m.x1567)) + m.x1291 == 1)
m.c464 = Constraint(expr=-5/(1 + 30*exp((-0.682004182958989*m.x1361) - 0.187514063554767*m.x1499 - 0.645050313924486*
m.x1522 - 0.0871799044508247*m.x1568)) + m.x1292 == 1)
m.c465 = Constraint(expr=-5/(1 + 30*exp((-0.439084362742228*m.x1362) - 0.179516024797147*m.x1500 - 0.463020125941474*
m.x1523 - 0.0789631610532633*m.x1569)) + m.x1293 | |
"""
node.py
Contains the base class for Nodes.
"""
import traceback
from logging import Logger
from functools import wraps
from typing import Any, Union, Tuple, Dict, Optional, Type, List, Callable, TypeVar
from .. import NodeBase
from .. import QtGui, QtCore, Signal, Slot, QtWidgets
from ..data.datadict import DataDictBase, MeshgridDataDict
from .. import log
__author__ = '<NAME>'
__license__ = 'MIT'
# TODO: implement a threaded version of Node
R = TypeVar('R', bound="Node")
S = TypeVar('S')
T = TypeVar('T')
def updateOption(optName: Optional[str] = None) -> Callable[[Callable[[R, S], T]], Callable[[R, S], T]]:
"""Decorator for property setters that are handy for user options.
Property setters in nodes that are decorated with this will do two things:
* call ``Node.update``, in order to update the flowchart.
* if there is a UI, we call the matching ``optSetter`` function.
:param optName: name of the property.
"""
def decorator(func: Callable[[R, S], T]) -> Callable[[R, S], T]:
@wraps(func)
def wrap(self: R, val: S) -> T:
ret = func(self, val)
if optName is not None and self.ui is not None and \
optName in self.ui.optSetters:
self.ui.optSetters[optName](val)
self.update(self.signalUpdate)
return ret
return wrap
return decorator
U = TypeVar('U', bound="NodeWidget")
V = TypeVar('V',)
def updateGuiFromNode(func: Callable[..., V]) -> Callable[..., V]:
"""
Decorator for the UI to set an internal flag to during execution of
the wrapped function. Prevents recursive updating (i.e., if
the node sends a new option value to the UI for updating, the UI
will then `not` notify the node back after making the update).
"""
@wraps(func)
def wrap(self: U, *arg: Any, **kw: Any) -> V:
self._emitGuiChange = False
ret = func(self, *arg, **kw)
self._emitGuiChange = True
return ret
return wrap
updateGuiQuietly = updateGuiFromNode
W = TypeVar('W')
def emitGuiUpdate(signalName: str) -> Callable[[Callable[..., Any]], Callable[..., None]]:
"""
Decorator for UI functions to emit the signal ``signalName``
(given as argument the decorator), with the return of the wrapped function.
Signal is only emitted if the flag controlled by ``updateGuiFromNode``
is not ``True``, i.e., if the option change was `not` caused by a
function decorated with ``updateGuiFromNode``.
:param signalName: name of the signal to emit.
"""
def decorator(func: Callable[..., Any]) -> Callable[..., None]:
@wraps(func)
def wrap(self: W, *arg: Any, **kw: Any) -> None:
ret = func(self, *arg, **kw)
emit = getattr(self, '_emitGuiChange', True)
if emit:
sig = getattr(self, signalName)
sig.emit(ret)
return wrap
return decorator
class Node(NodeBase):
"""Base class of the Node we use for plotter.
This class inherits from ``pyqtgraph``'s Node, and adds a few additional
tools, and some defaults.
"""
#: Name of the node. used in the flowchart node library.
nodeName = 'Node'
#: Default terminals: one input and one output.
terminals = {
'dataIn': {'io': 'in'},
'dataOut': {'io': 'out'},
}
#: UI node widget class. If not None, and ``useUi`` is ``True``, an
#: instance of the widget is created, and signal/slots are connected.
uiClass: Optional[Type["NodeWidget"]] = None
#: Whether or not to automatically set up a UI widget.
useUi = True
#: Whether the ui should be visible by default
uiVisibleByDefault = False
#: A signal to notify the UI of option changes
#: arguments is a dictionary of options and new values.
optionChangeNotification = Signal(dict)
#: signal emitted when available data axes change
#: emits a the list of names of new axes
dataAxesChanged = Signal(list)
#: signal emitted when any available data fields change (dep. and indep.)
#: emits a the list of names of new axes
dataFieldsChanged = Signal(list)
#: signal emitted when data type changes
dataTypeChanged = Signal(object)
#: signal emitted when data structure changes (fields, or dtype)
dataStructureChanged = Signal(object)
#: signal emitted when data shapes change
dataShapesChanged = Signal(dict)
#: when data structure changes, emits (structure, shapes, type)
newDataStructure = Signal(object, object, object)
#: developer flag for whether we actually want to raise of use the logging
#: system
_raiseExceptions = False
def __init__(self, name: str):
"""Create a new instance of the Node.
:param name: name of the instance.
"""
super().__init__(name, terminals=self.__class__.terminals)
self.signalUpdate = True
self.dataAxes: Optional[List[str]] = None
self.dataDependents: Optional[List[str]] = None
self.dataType: Optional[Type[DataDictBase]] = None
self.dataShapes: Optional[Dict[str, Tuple[int, ...]]] = None
self.dataStructure: Optional[DataDictBase] = None
if self.useUi and self.__class__.uiClass is not None:
self.ui: Optional["NodeWidget"] = self.__class__.uiClass(node=self)
self.setupUi()
else:
self.ui = None
def setupUi(self) -> None:
""" setting up the UI widget.
Gets called automatically in the node initialization.
Automatically connect the UIs methods to signal option values.
Inheriting classes can use this method to do additional setup of the
UI widget (like connecting additional signals/slots between node and
node widget).
"""
assert self.ui is not None
self.ui.optionToNode.connect(self.setOption)
self.ui.allOptionsToNode.connect(self.setOptions)
self.optionChangeNotification.connect(self.ui.setOptionsFromNode)
def ctrlWidget(self) -> Union[QtWidgets.QWidget, None]:
"""Returns the node widget, if it exists.
"""
return self.ui
def setOption(self, nameAndVal: Tuple[str, Any]) -> None:
"""Set an option.
name is the name of the property, not the string used for referencing
(which could in principle be different).
:param nameAndVal: tuple of option name and new value
"""
name, val = nameAndVal
setattr(self, name, val)
def setOptions(self, opts: Dict[str, Any]) -> None:
"""Set multiple options.
:param opts: a dictionary of property name : value pairs.
"""
for opt, val in opts.items():
setattr(self, opt, val)
def update(self, signal: bool = True) -> None:
super().update(signal=signal)
if Node._raiseExceptions and self.exception is not None:
raise self.exception[1]
elif self.exception is not None:
e = self.exception
err = f'EXCEPTION RAISED: {e[0]}: {e[1]}\n'
for t in traceback.format_tb(e[2]):
err += f' -> {t}\n'
self.logger().error(err)
def logger(self) -> Logger:
"""Get a logger for this node
:return: logger with a name that can be traced back easily to this node.
"""
name = f"{self.__module__}.{self.__class__.__name__}.{self.name()}"
logger = log.getLogger(name)
logger.setLevel(log.LEVEL)
return logger
def validateOptions(self, data: DataDictBase) -> bool:
"""Validate the user options
Does nothing in this base implementation. Can be reimplemented by any
inheriting class.
:param data: the data to verify the options against.
"""
return True
def process(self, dataIn: Optional[DataDictBase]=None) -> Optional[Dict[str, Optional[DataDictBase]]]:
if dataIn is None:
return None
if isinstance(dataIn, DataDictBase):
dtype = type(dataIn)
daxes = dataIn.axes()
ddeps = dataIn.dependents()
dshapes = dataIn.shapes()
_axesChanged = False
_fieldsChanged = False
_typeChanged = False
_structChanged = False
_shapesChanged = False
if daxes != self.dataAxes:
_axesChanged = True
if daxes != self.dataAxes or ddeps != self.dataDependents:
_fieldsChanged = True
if dtype != self.dataType:
_typeChanged = True
if dtype != self.dataType or daxes != self.dataAxes \
or ddeps != self.dataDependents:
_structChanged = True
if dshapes != self.dataShapes:
_shapesChanged = True
self.dataAxes = daxes
self.dataDependents = ddeps
self.dataType = dtype
self.dataShapes = dshapes
self.dataStructure = dataIn.structure(add_shape=False)
if _axesChanged:
self.dataAxesChanged.emit(daxes)
if _fieldsChanged:
self.dataFieldsChanged.emit(daxes + ddeps)
if _typeChanged:
self.dataTypeChanged.emit(dtype)
if _structChanged:
self.dataStructureChanged.emit(self.dataStructure)
self.newDataStructure.emit(
self.dataStructure, self.dataShapes, self.dataType)
if _shapesChanged and not _structChanged:
self.dataShapesChanged.emit(dshapes)
else:
dtype = type(dataIn)
daxes = None
ddeps = None
dshapes = None
if dtype != self.dataType:
_typeChanged = True
if _typeChanged:
self.dataTypeChanged.emit(dtype)
if not self.validateOptions(dataIn):
self.logger().debug("Option validation not passed")
return None
return dict(dataOut=dataIn)
class NodeWidget(QtWidgets.QWidget):
"""
Base class for Node control widgets.
For the widget class to set up communication with the Node automatically,
make sure to set :attr:`plottr.node.node.NodeWidget.optGetters` and
:attr:`plottr.node.node.NodeWidget.optSetters` for a widget class.
"""
#: icon for this node
icon: Optional[QtGui.QIcon] = None
#: preferred location of the widget when used as dock widget
preferredDockWidgetArea = QtCore.Qt.LeftDockWidgetArea
#: signal (args: object)) to emit to notify the node of a (changed)
#: user option.
optionToNode = Signal(object)
#: signal (args: (object)) all options to the node.
allOptionsToNode = Signal(object)
def __init__(self, parent: Optional[QtWidgets.QWidget] = None,
embedWidgetClass: Optional[Type[QtWidgets.QWidget]] = None,
node: Optional[Node] = None):
super().__init__(parent)
self.optGetters: Dict[str, Any] = {}
self.optSetters: Dict[str, Any] = {}
self.node = node
self._emitGuiChange = True
self.widget: Optional[QtWidgets.QWidget] = None
if embedWidgetClass is not None:
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.widget = embedWidgetClass()
layout.addWidget(self.widget)
self.setLayout(layout)
def getAllOptions(self) -> Dict[str, Any]:
"""Return all options as a dictionary"""
ret = {}
for n, f in self.optGetters.items():
ret[n] = f()
return ret
@updateGuiFromNode
def setOptionFromNode(self, opt: str, value: Any) -> None:
"""Set an option from the node
Calls the set function specified in the class' ``optSetters``.
Decorated with ``@updateGuiFromNode``.
:param opt: name of | |
<reponame>dmyersturnbull/sauronlab<filename>sauronlab/viz/figures.py
from __future__ import annotations
import matplotlib.legend as mlegend
from matplotlib import patches
from mpl_toolkits.axes_grid1 import make_axes_locatable
from pocketutils.plotting.color_schemes import FancyCmaps, FancyColorSchemes
from pocketutils.plotting.corners import Corner, Corners
from pocketutils.plotting.fig_savers import FigureSaver
from pocketutils.plotting.fig_tools import FigureTools as _FigureTools
from sauronlab.core.core_imports import *
from sauronlab.viz._internal_viz import *
class FigureTools(_FigureTools):
""""""
darken_palette = FancyColorSchemes.darken_palette
darken_color = FancyColorSchemes.darken_color
@classmethod
@contextmanager
def using(cls, *args, **kwargs) -> Generator[None, None, None]:
"""
Provided for convenience as a shorthand to using both sauronlab_rc.using, Figs.hiding, and Figs.clearing.
Args:
args: Passed to sauronlab_rc.using
kwargs: Passed to sauronlab_rc.using, except for 'path', 'hide', and 'clear'
Yields:
A context manager
"""
path, hide, clear, reload = (
str(kwargs.get("path")),
bool(kwargs.get("hide")),
bool(kwargs.get("clear")),
bool(kwargs.get("reload")),
)
kwargs = {k: v for k, v in kwargs.items() if k not in {"path", "hide", "clear"}}
with sauronlab_rc.using(*args, **kwargs):
with cls.clearing(clear):
with cls.hiding(hide):
yield
@classmethod
def save(
cls,
figure: FigureSeqLike,
path: PathLike,
names: Optional[Iterator[str]] = None,
clear: bool = True,
**kwargs,
) -> None:
"""
Save a figure or sequence of figures to ``FigureSaver``.
See that class for more info.
Args:
figure: FigureSeqLike:
path: PathLike:
names:
clear: After every save
**kwargs:
"""
path = str(path).replace("/", os.sep)
FigureSaver(clear=clear, **kwargs).save(figure, path, names=names)
@classmethod
def add_aligned_colorbar(
cls, ax: Axes, mat, size: str = "5%", number_format: Optional[str] = None
):
"""
Creates a colorbar on the right side of ``ax``.
A padding of sauronlab_rc.general_colorbar_left_pad will be applied between ``ax`` and the colorbar.
Technically description: Adds a new ``Axes`` on the right side with width ``size``%.
If sauronlab_rc.general_colorbar_on is False, will add the colorbar and make it invisible.
(This is weirdly necessary to work around a matplotlib bug.)
Args:
ax: The Axes, modified in-place
mat: This must be the return value from ``matshow`` or ``imshow``
size: The width of the colorbar
number_format: Formatting string for the text labels on the colorbar (passed to ``ax.figure.colorbar``)
Returns:
"""
#
# of ax and the padding between cax and ax will be fixed at 0.05 inch.
# This is crazy, but if we don't have a colorbar, save_fig errors about vmin not being less than vmax
# So we'll make it and then remove it
# BUT! We can't remove the cax, so we'll decrease its size
# This is really important because if we skip the cbar, it's likely to save valuable space
divider = make_axes_locatable(ax)
if sauronlab_rc.general_colorbar_on:
pad = sauronlab_rc.general_colorbar_left_pad
else:
size = "0%"
pad = 0
cax = divider.append_axes("right", size=size, pad=pad)
cbar = ax.figure.colorbar(mat, cax=cax, format=number_format)
if not sauronlab_rc.general_colorbar_on:
cbar.remove()
return cbar
@classmethod
def manual_legend(
cls,
ax: Axes,
labels: Sequence[str],
colors: Sequence[str],
patch_size: float = sauronlab_rc.legend_marker_size,
patch_alpha=1.0,
**kwargs,
) -> mlegend.Legend:
"""
Creates legend handles manually and adds them as the legend on the Axes.
This is unfortunately necessary in cases where, for ex, only a handle per color is wanted -- not a handle per color and marker shape.
Applies ``cls.fix_labels`` and applies sauronlab_rc defaults unless they're overridden in kwargs.
Args:
ax: Axes:
labels: Sequence[str]:
colors: Sequence[str]:
patch_size: float: (Default value = sauronlab_rc.legend_marker_size)
patch_alpha: (Default value = 1.0)
**kwargs:
Returns:
"""
labels, colors = list(labels), list(colors)
kwargs = copy(kwargs)
kwargs["ncol"] = kwargs.get("ncol", sauronlab_rc.legend_n_cols)
kwargs["bbox_to_anchor"] = kwargs.get("bbox_to_anchor", sauronlab_rc.legend_bbox)
kwargs["mode"] = "expand" if sauronlab_rc.legend_expand else None
kwargs["loc"] = kwargs.get("loc")
if "patch_size" in kwargs:
raise XValueError("patch_size cannot be passed as an argument and kwargs")
if "patch_alpha" in kwargs:
raise XValueError("patch_alpha cannot be passed as an argument and kwargs")
handles = cls.manual_legend_handles(
labels, colors, patch_size=patch_size, patch_alpha=patch_alpha
)
return ax.legend(handles=handles, **kwargs)
@classmethod
def manual_legend_handles(
cls,
labels: Sequence[str],
colors: Sequence[str],
patch_size: float = sauronlab_rc.legend_marker_size,
patch_alpha=1.0,
**patch_properties,
) -> Sequence[patches.Patch]:
"""
Creates legend handles manually. Does not add the patches to the Axes.
Also see ``cls.manual_legend``.
This is unfortunately necessary in cases where, for ex, only a handle per color is wanted -- not a handle per color and marker shape.
Applies ``cls.fix_labels``.
Args:
labels:
colors:
patch_size:
patch_alpha:
**patch_properties:
Returns:
"""
assert len(labels) == len(colors), f"{len(labels)} labels but {len(colors)} colors"
legend_dict = {e: colors[i] for i, e in enumerate(labels)}
patch_list = []
for key in legend_dict:
data_key = patches.Patch(
color=legend_dict[key],
label=cls.fix_labels(key),
linewidth=patch_size,
alpha=patch_alpha,
**patch_properties,
)
patch_list.append(data_key)
return patch_list
@classmethod
def fix_labels(
cls, name: Union[Iterable[str], str], inplace: bool = False
) -> Union[Iterable[str], str]:
"""
Fixes common issues with label names.
Examples:
- (-) gets a minus sign: (−)
- 'uM' is changed to 'µM'
- --> is changed to →
- __a and __b are made nicer
- math is escaped in TeX if necessary
Args:
name:
inplace:
Returns:
"""
# noinspection PyProtectedMember
def fix_u(s: str) -> str:
""""""
return (
str(s)
.replace("(-)", "(−)")
.replace("killed (+)", "lethal (+)")
.replace("-->", Chars.right)
.replace("<--", Chars.left)
.replace("uM", "µM")
.replace("__a", ":" + Chars.angled("a"))
.replace("__b", ":" + Chars.angled("b"))
)
def fix_ltext(s: str) -> str:
""""""
# escape: # $ % & ~ _ ^ \ { } \( \) \[ \]
return (
Tools.strip_paired(s, [("$", "$")])
.replace("killed (+)", "lethal (+)")
.replace("__a", ":``a'")
.replace("__b", ":``b'")
.replace("_", r"\_")
.replace("uM", r"\micro M")
.replace(
Chars.micro, r"\micro "
) # always append a space to avoid 'undefined control sequence'
)
def fix_lmath(s: str) -> str:
""""""
return (
("$" + Tools.strip_paired(s, [("$", "$")]) + "$")
.replace("killed (+)", "lethal (+)")
.replace("__a", r"\langle a \rangle")
.replace("__b", r"\langle b \rangle")
.replace("-->", r"\rightarrow")
.replace("<--", r"\leftarrow")
.replace("_", "\\_")
.replace("uM", r"\micro M")
.replace(
Chars.micro, r"\micro "
) # always append a space to avoid 'undefined control sequence'
)
def choose_fix(s: str) -> str:
""""""
if not plt.rcParams["text.usetex"]:
return fix_u(s)
elif (
sauronlab_rc.label_force_text_mode
or not sauronlab_rc.label_force_math_mode
and "$" not in s
):
return fix_ltext(s)
elif (
sauronlab_rc.label_force_math_mode
or s.startswith("$")
and s.endswith("$")
and s.count("$") == 2
):
return fix_lmath(s)
else:
logger.error(f"Cannot fix mixed-math mode string {Chars.shelled(s)}")
return s
def fix(s0: str) -> str:
""""""
is_label = hasattr(s0, "get_text")
if is_label:
# noinspection PyUnresolvedReferences
s = s0.get_text() # for matplotlib tick labels
elif inplace:
logger.caution("Cannot set inplace; type str")
s = s0
else:
s = s0
s = sauronlab_rc.label_replace_dict.get(s, s)
r = choose_fix(s) if sauronlab_rc.label_fix else s
r = sauronlab_rc.label_replace_dict.get(r, r)
if inplace and is_label:
# noinspection PyUnresolvedReferences
s0.set_text(r)
if r != s:
logger.debug(f"Fixed {s} → {r}")
return r
if Tools.is_true_iterable(name):
return (fix(s) for s in name)
else:
return fix(name)
@classmethod
def stamp_runs(cls, ax: Axes, run_ids: Iterable[int]) -> Axes:
"""
Stamps the run ID(s) in the upper-left corner.
Only shows if sauronlab_rc.stamp_on is True AND len(run_ids) <= sauronlab_rc.stamp_max_runs.
Args:
ax: Axes:
run_ids: Iterable[int]:
Returns:
"""
if sauronlab_rc.stamp_on:
run_ids = InternalTools.fetch_all_ids_unchecked(Runs, run_ids)
run_ids = Tools.unique(run_ids)
if len(run_ids) <= sauronlab_rc.stamp_max_runs:
text = Tools.join(run_ids, sep=", ", prefix="r")
return cls.stamp(ax, text, Corners.TOP_LEFT)
@classmethod
def stamp_time(cls, ax: Axes) -> Axes:
"""
If sauronlab_rc.stamp_on is on, stamps the datetime to the top right corner.
Args:
ax: Axes:
Returns:
"""
if sauronlab_rc.stamp_on:
text = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return cls.stamp(ax, text, Corners.TOP_RIGHT)
class _Pub:
"""
Functions to save figures as PDFs in a "publication" mode.
Provides a context manager that yields a FigureSaver.
Clears all figures (inc. pre-existing) before entering and on every save.
Hides all display.
"""
@contextmanager
def __call__(
self, width: str, height: str, save_under: PathLike = "", *args, **kwargs
) -> Generator[FigureSaver, None, None]:
"""
A context manager with a ``FigureSaver``, non-interactive, auto-clearing, and optional sauronlab_rc params.
Args:
width: A string passed to ``sauronlab_rc``; ex: ``1/2 2_col`` (defined in sauronlab_rc params file)
height: A string passed to ``sauronlab_rc``; ex: ``1/2 2_col`` (defined in sauronlab_rc params file)
save_under: Save everything under this directory (but passing absolute paths will invalidate this)
args: Functions of sauronlab_rc passed to ``sauronlab_rc.using``
kwargs: Kwargs of sauronlab_rc and matplotlib params passed to ``sauronlab_rc.using``.
Returns:
"""
save_under = str(save_under).replace("/", os.sep)
save_under = Tools.prepped_dir(save_under)
# the_fn, kwargs = InternalTools.from_kwargs(kwargs, 'fn', None)
# args = [*args, the_fn if the_fn is not None else copy(args)]
pretty_dir = str(save_under) if len(str(save_under)) > 0 else "."
logger.debug(f"::Entered:: saving environment {kwargs.get('scale', '')} under {pretty_dir}")
FigureTools.clear()
saver = FigureSaver(save_under=save_under, clear=lambda fig: FigureTools.clear())
with FigureTools.hiding():
with sauronlab_rc.using(
width=width, height=height, *args, savefig_format="pdf", **kwargs
):
yield saver
logger.debug("::Left:: saving environment {kwargs.get('scale', '')} under {pretty_dir}")
Pub = _Pub()
__all__ = [
| |
= input("Unit Type:")
value = float(input("Numerical Value:"))
if index == "1":
print("In Celsius is:" + str(value))
print("In Fahrenheit is:" + str((value * 9/5) + 32))
print("In Kelvin is:" + str(value + 273.15))
temperature()
elif index == "2":
print("In Celsius is:" + str((value-32)*(5/9)))
print("In Fahrenheit is:" + str(value))
print("In Kelvin is:" + str((value + 459.67)/1.8))
temperature()
elif index == "3":
print("In Celsius is:" + str(value - 273.15))
print("In Fahrenheit is:" + str((1.8 * value) - 459.67))
print("In Kelvin is:" + str(value))
temperature()
else:
temperature()
def kinematicviscosity():
print("Choose the unit given")
print("1. Centistroke")
print("2. Stroke")
print("3. Foot square / second")
print("4. Metre square / second")
index = input("Unit given:")
value = float(input("Numerical Value:"))
if index == "1":
print("In Centistroke is:" + str(value))
print("In Stroke is:" + str(value * 0.01))
print("In Foot Square / sec is:" + str(value * 0.00001))
print("In Metre square / sec is:" + str(value * 0.000001))
kinematicviscosity()
elif index == "2":
print("In Centistroke is:" + str(value * 100))
print("In Stroke is:" + str(value))
print("In Foot Square / sec is:" + str(value * 0.001076))
print("In Metre square / sec is:" + str(value * 0.0001))
kinematicviscosity()
elif index == "3":
print("In Centistroke is:" + str(value * 92903))
print("In Stroke is:" + str(value * 929.03))
print("In Foot Square / sec is:" + str(value))
print("In Metre square / sec is:" + str(value * 0.092903))
kinematicviscosity()
elif index == "4":
print("In Centistroke is:" + str(value * 1000000))
print("In Stroke is:" + str(value * 10000))
print("In Foot Square / sec is:" + str(value * 10.76392))
print("In Metre square / sec is:" + str(value))
kinematicviscosity()
else:
kinematicviscosity()
def dynamicviscosity():
print("Choose the unit given")
print("1. Centipoise")
print("2. Poise")
print("3. Pound square/ sec")
index = input("Unit given:")
value = float(input("Numerical Value:"))
if index == "1":
print("In Centipoise is:" + str(value))
print("In Poise is:" + str(value * 0.01))
print("In Pound square/ sec is:" + str(value * 0.000672))
dynamicviscosity()
elif index == "2":
print("In Centipoise is:" + str(value * 100))
print("In Poise is:" + str(value))
print("In Pound square/ sec is:" + str(value * 0.067197))
dynamicviscosity()
elif index == "3":
print("In Centipoise is:" + str(value * 1488.16))
print("In Poise is:" + str(value * 14.8816))
print("In Pound square/ sec is:" + str(value))
dynamicviscosity()
else:
dynamicviscosity()
def volumetricgasflow():
print("Choose the unit given")
print("1. Normal metre cube/hour")
print("2. Standard cubic feet/hour")
print("3. Standard cubic feet/minute")
index = input("Unit given:")
value = float(input("Numerical Value:"))
if index == "1":
print("In Normal metre cube/hour is:" + str(value))
print("In Standard cubic feet/hour is:" + str(value * 35.31073))
print("In Standard cubic feet/minute is:" + str(value * 0.588582))
dynamicviscosity()
elif index == "2":
print("In Normal metre cube/hour is:" + str(value * 0.02832))
print("In Standard cubic feet/hour is:" + str(value))
print("In Standard cubic feet/minute is:" + str(value * 0.016669))
dynamicviscosity()
elif index == "3":
print("In Normal metre cube/hour is:" + str(value * 1.699))
print("In Standard cubic feet/hour is:" + str(value * 59.99294))
print("In Standard cubic feet/minute is:" + str(value))
dynamicviscosity()
else:
dynamicviscosity()
def torque():
print("Choose the unit given")
print("1. Newton metre")
print("2. Kilogram force metre")
print("3. Foot pound")
print("4. Inch pound")
index = input("Unit given:")
value = float(input("Numerical Value:"))
if index == "1":
print("In Newton metre is:" + str(value))
print("In Kilogram force metre is:" + str(value * 0.101972))
print("In Foot pound is:" + str(value * 0.737561))
print("In Inch pound is:" + str(value * 8.850732))
torque()
elif index == "2":
print("In Newton metre is:" + str(value * 9.80665))
print("In Kilogram force metre is:" + str(value))
print("In Foot pound is:" + str(value * 7.233003))
print("In Inch pound is:" + str(value * 86.79603))
torque()
elif index == "3":
print("In Newton metre is:" + str(value * 1.35582))
print("In Kilogram force metre is:" + str(value * 0.138255))
print("In Foot pound is:" + str(value))
print("In Inch pound is:" + str(value * 12))
torque()
elif index == "4":
print("In Newton metre is:" + str(value * 0.112985))
print("In Kilogram force metre is:" + str(value * 0.011521))
print("In Foot pound is:" + str(value * 0.083333))
print("In Inch pound is:" + str(value))
torque()
else:
torque()
def massflow():
print("Choose the unit given")
print("1. Kilogram/hour")
print("2. Pound/hour")
print("3. Kilogram/second ")
print("4. Ton/hour")
index = input("Unit given:")
value = float(input("Numerical Value:"))
if index == "1":
print("In Kilogram/hour is:" + str(value))
print("In Pound/hour is:" + str(value * 2.204586))
print("In Kilogram/second is:" + str(value * 0.000278))
print("In Ton/hour is:" + str(value * 0.001))
massflow()
elif index == "2":
print("In Kilogram/hour is:" + str(value * 0.4536))
print("In Pound/hour is:" + str(value))
print("In Kilogram/second is:" + str(value * 0.000126))
print("In Ton/hour is:" + str(value * 0.000454))
massflow()
elif index == "3":
print("In Kilogram/hour is:" + str(value * 3600))
print("In Pound/hour is:" + str(value * 7936.508))
print("In Kilogram/second is:" + str(value))
print("In Ton/hour is:" + str(value * 3.6))
massflow()
elif index == "4":
print("In Kilogram/hour is:" + str(value * 1000))
print("In Pound/hour is:" + str(value * 2204.586))
print("In Kilogram/second is:" + str(value * 0.277778))
print("In Ton/hour is:" + str(value))
massflow()
else:
massflow()
def density():
print("Choose the unit given")
print("1. Gram/millilitre")
print("2. Kilogram/metre cube")
print("3. Pound/foot cube ")
print("4. Pound/inch cube")
index = input("Unit given:")
value = float(input("Numerical Value:"))
if index == "1":
print("In Gram/millilitre is:" + str(value))
print("In Kilogram/metre cube is:" + str(value * 1000))
print("In Pound/foot cube is:" + str(value * 62.42197))
print("In Pound/inch cube is:" + str(value * 0.036127))
massflow()
elif index == "2":
print("In Gram/millilitre is:" + str(value * 0.001))
print("In Kilogram/metre cube is:" + str(value))
print("In Pound/foot cube is:" + str(value * 0.062422))
print("In Pound/inch cube is:" + str(value * 0.000036))
massflow()
elif index == "3":
print("In Gram/millilitre is:" + str(value * 0.01602))
print("In Kilogram/metre cube is:" + str(value * 16.02))
print("In Pound/foot cube is:" + str(value))
print("In Pound/inch cube is:" + str(value * 0.000579))
massflow()
elif index == "4":
print("In Gram/millilitre is:" + str(value * 27.68))
print("In Kilogram/metre cube is:" + str(value * 27680))
print("In Pound/foot cube is:" + str(value * 1727.84))
print("In Pound/inch cube is:" + str(value))
massflow()
else:
massflow()
def mass():
print("Which unit do you have:")
print("1. Grams\n"
"2. Kilograms\n"
"3. Metric tonnes\n"
"4. Short ton\n"
"5. Long ton\n"
"6. Pounds\n"
"7. Ounces\n")
index = input("Unit Type:")
value = float(input("Numerical Value:"))
if index == "1":
print("in Grams is:" + str(value))
print("in Kilograms is:" + str(value * 0.001))
print("in Metric tonnes is:" + str(value * 0.000001))
print("in Short ton is:" + str(value * 0.000001))
print("in Long ton is:" + str(value * 9.84e-07))
print("in Pounds is:" + str(value * 0.002205))
print("in Ounces is:" + str(value * 0.035273))
mass()
elif index == "2":
print("in Grams is:" + str(value * 1000))
print("in Kilograms is:" + str(value))
print("in Metric tonnes is:" + str(value * 0.001))
print("in Short ton is:" + str(value * 0.001102))
print("in Long ton is:" + str(value * 0.000984))
print("in Pounds is:" + str(value * 2.204586))
print("in Ounces is:" + str(value * 35.27337))
mass()
elif index == "3":
print("in Grams is:" + str(value * 1000000))
print("in Kilograms is:" + str(value * 1000))
print("in Metric tonnes is:" + str(value))
print("in Short ton is:" + str(value * 1.102293))
print("in Long ton is:" + str(value * 0.984252))
print("in Pounds is:" + str(value * 2204.586))
print("in Ounces is:" + str(value * 35273.37))
mass()
elif index == "4":
print("in Grams is:" + str(value * 907200))
print("in Kilograms is:" + str(value * 907.2))
print("in Metric tonnes is:" + str(value * 0.9072))
print("in Short ton is:" + str(value))
print("in Long ton is:" + str(value * 0.892913))
print("in Pounds is:" + str(value * 2000))
print("in Ounces is:" + str(value * 32000))
mass()
elif index == "5":
print("in Grams is:" + str(value * 1016000))
print("in Kilograms is:" + str(value * 1016))
print("in Metric tonnes is:" + str(value * 1.016))
print("in Short ton is:" + str(value * 1.119929))
print("in Long ton is:" + str(value))
print("in Pounds is:" + str(value * 2239.859))
print("in Ounces is:" + str(value * 35837.74))
mass()
elif index == "6":
print("in Grams is:" + str(value * 453.6))
print("in Kilograms | |
(top left of medal)
:param x_coord: x coordinate to check (top left of medal)
:return: None
"""
rows = self.rows
columns = self.columns
if x_coord < columns - 1 and y_coord < rows - 1:
for i in range(2):
for j in range(2):
if self.medal_grid.grid[y_coord + i][x_coord + j] != -1:
return False
return True
return False
def add_medal(self, row: int, column: int):
"""
Method to add a medal (four medal portions) to the grid. The medal
portions will appear like the following:
|0|1|
-----
|2|3|
:param row: y coordinate to add medal at (top left of medal)
:param column: x coordinate to add beat at (top left of medal)
:return: None
"""
for i in range(2):
for j in range(2):
portion = j + 2 * i
self.medal_grid.grid[row + i][column + j] = portion
# add portion to medal location list
self.medal_locations.append((row + i, column + j, portion))
class Board(SimpleBoard):
"""
The class which contains all the grids for gems, ice, and medals.
-1 represents an empty cell in all grids.
The gem grid contains tuples in each cell, which represent:
(type, bonus_type, activation)
The ice grid contains a single value in each cell, represented by:
(layer)
The medal_grid contains a single value in each cell, represented by:
(corner)
Swapped gems is a list of tuples, represented as:
[(row, column, type, bonus_type, activation),(same again)]
test parameter should be "vertical" or "horizontal" to specify test grid type.
"""
def __init__(self,
rows: int,
columns: int,
ice_rows: int,
medals_remaining: int,
moves_remaining: int,
event_manager: EventManager,
gem_types: int = GEM_TYPES,
bonus_types: int = BONUS_TYPES,
ice_layers=ICE_LAYERS,
test=None,
random_seed=RANDOM_SEED,
stats_file_path=None):
super().__init__(rows, columns, gem_types, medals_remaining, moves_remaining)
# event manager
self.event_manager = event_manager
self.event_manager.register_listener(self)
# game variables
self.ice_rows = ice_rows
self.total_moves = None # set by number of ice rows
self.set_max_moves()
self.bonus_types = bonus_types
self.terminal_state = False
self.win_state = False
self.game_state = "waiting_for_input"
self.ice_layers = ice_layers - 1
self.test = test
self.random_seed = random_seed
# helper variables
self.ice_removed = []
self.movements = []
self.additions = []
self.activated_gems = []
# state variables
self.file_name = ''
self.line_number = 0
# file operations
self.stats_file_path = stats_file_path
# initialise grids
self.init_gem_grid()
self.init_ice_grid()
self.init_medal_grid()
state_event = StateEvent(self.get_obscured_game_state())
self.event_manager.post(state_event)
# ----------------------------------------------------------------------
def state(self):
return self.gem_grid.grid, self.ice_grid.grid, self.medal_grid.grid, (
self.moves_remaining, self.medals_remaining, self.score, False, False)
def notify(self, event):
if isinstance(event, SwapGemsRequest):
if self.game_state == "waiting_for_input":
self.set_swap_locations(event.swap_locations)
elif isinstance(event, TickEvent):
# TODO check
if self.game_state != "waiting_for_input":
self.get_update()
def init_gem_grid(self):
"""
Initialises the gem grid with tuples.
"""
if self.test == "vertical":
self.test_grid_vertical()
elif self.test == "horizontal":
self.test_grid_horizontal()
else:
rows = self.rows
columns = self.columns
for row, column in product(range(rows), range(columns)):
self.gem_grid.grid[row][column] = self.new_gem()
# find matches
match_list, bonus_list = self.find_matches()
while len(match_list) + len(bonus_list):
for gem in match_list:
i, j = gem[:2]
self.gem_grid.grid[i][j] = self.new_gem()
for gem in bonus_list:
i, j = gem[:2]
self.gem_grid.grid[i][j] = self.new_gem()
match_list, bonus_list = self.find_matches()
def test_grid_vertical(self):
"""
Creates a test grid where all the columns are
the same type of gem.
:return:
"""
for j, i in product(range(self.columns), range(self.rows)):
gem_type = j % self.gem_types
gem = self.new_gem(gem_type)
self.gem_grid.grid[i][j] = gem
def test_grid_horizontal(self):
"""
Creates a test grid where all the rows are
the same type of gem.
:return:
"""
for i, j in product(range(self.rows), range(self.columns)):
gem_type = i % self.gem_types
gem = self.new_gem(gem_type)
self.gem_grid.grid[i][j] = gem
def init_ice_grid(self):
"""
Initialises the ice grid with the number of layers.
The ice is initialised from the bottom row first,
up to the number of ICE_ROWS.
:return:
"""
rows = self.rows - 1
columns = self.columns
ice_rows = rows - self.ice_rows
for row in range(rows, ice_rows, -1):
for col in range(columns):
self.ice_grid.grid[row][col] = self.ice_layers
def set_max_moves(self):
if self.ice_rows == 5:
self.total_moves = 20
elif self.ice_rows == 7:
self.total_moves = 25
elif self.ice_rows == 9:
self.total_moves = 30
def init_medal_grid(self):
"""
Initialises the medal grid with portions of medals.
Each medal is represented by a portion and a 2x2 medal
is represented by the following 4 portions.
|0|1|
-----
|2|3|
:return:
"""
rows = self.rows
columns = self.columns
i = 0
while i < self.medals_remaining:
# get random choice
row = choice(range(rows - self.ice_rows, rows - 1))
column = choice(range(columns - 1))
if self.check_medal_boundaries(row, column):
# if no medal already there, add medal
self.add_medal(row, column)
i = i + 1
def get_swap_movement(self):
"""
Returns a list of lists which represents movement.
The first inner list is the original positions,
the second inner list is the new position.
:return:
"""
original = self.swapped_gems
new = [self.swapped_gems[1], self.swapped_gems[0]]
return [original, new]
def get_game_info(self):
"""
Simple getter to get game information
:return:
"""
return self.moves_remaining, self.medals_remaining, self.score, self.terminal_state, self.win_state
def extrapolate_score(self):
"""
Extrapolates the score by finding the players
average score per move and adding that to the score
for the number of moves left.
:return:
"""
avg_per_move = self.score / (self.total_moves - self.moves_remaining)
bonus_points = avg_per_move * self.moves_remaining
self.score += bonus_points
def get_update(self):
"""
Gets the updates.
This method swaps the gems, looks for matches,
removes gems, and pulls them down. This is done
until no more successive matches are found.
Update bags are posted to registered listeners after
every pull down and also if it is not a valid swap.
:return:
"""
self.gem_grid_copy = deepcopy(self.gem_grid.grid)
state = self.get_game_state()
update_bag = UpdateBag([], [], [], [], [], [], [], state)
update_bag.gems = self.gem_grid.grid
# ---------------------------------------
if not self.game_state == "input_received":
# do nothing
return update_bag
if self.terminal_state:
# do nothing if terminal state
return update_bag
if not self.check_swap():
# do nothing if user clicked on non adjacent gem
self.game_state = "waiting_for_input"
return update_bag
self.game_state = "doing_stuff"
# ---------------------------------------
# Swap is adjacent, send some update bags:
# reset cascade to zero
self.cascade = 0
# save state and chosen action before doing anything
self.write_state_action()
# create bag
info = self.get_game_info()
movements = self.get_swap_movement()
update_bag = UpdateBag([], [], [], movements, [], [], info)
update_bag.gems = self.gem_grid.grid
# send bag to view
event = UpdateBagEvent(update_bag)
self.event_manager.post(event)
# swap gems and find matches
self.swap_gems()
matches, bonuses = self.find_matches()
match_count = len(matches)
bonus_count = len(bonuses)
# ---------------------------------------
# if not match, swap back and send bag
if match_count + bonus_count < 3:
self.swap_gems()
# create bag
info = self.get_game_info()
movements = self.get_swap_movement()
update_bag = UpdateBag([], [], [], movements, [], [], info)
update_bag.gems = self.gem_grid.grid
# send bag to view and return
event = UpdateBagEvent(update_bag)
self.event_manager.post(event)
self.game_state = "waiting_for_input"
return update_bag
# ---------------------------------------
# else, match - perform remove gems, pull down, etc, and send bag
else:
self.move_made()
# do until no more pull downs
while match_count + bonus_count > 0:
first_loop = True
self.cascade += 1
# find more matches after pulling down
matches, bonuses = self.find_matches()
self.match_list = matches
self.bonus_list = bonuses
match_count = len(matches)
bonus_count = len(bonuses)
# remove gems in grid that are in matches_list
self.remove_gems_add_bonuses()
self.update_score()
repeat = True
while repeat:
# pull gems down
repeat = self.pull_gems_down()
# create bag
if not first_loop:
matches = []
bonuses = []
# else:
additions = self.additions
movements = self.movements
update_bag = UpdateBag(matches, bonuses, additions, movements,
self.ice_removed, self.medals_removed, self.get_game_info())
update_bag.gems = self.gem_grid.grid
# send bag to view
event = UpdateBagEvent(update_bag)
self.event_manager.post(event)
# don't send anymore matches, bonuses
first_loop = False
# ---------------------------------------
# check for terminal state
if self.medals_remaining == 0:
# WON
# write state if terminal state
self.action = [(-1, -1), (-1, -1)]
self.write_state_action()
self.win_state = True
self.terminal_state = True
# give bonus points for moves remaining
self.extrapolate_score()
elif self.moves_remaining == 0:
# LOST
# write state if terminal state
self.action = [(-1, -1), (-1, -1)]
self.write_state_action()
self.win_state = False
self.terminal_state = True
# write stats to file
if self.stats_file_path and self.terminal_state:
outcome = 1 if self.win_state else 0
medals_left = self.medals_remaining
moves_made = self.total_moves - self.moves_remaining
score = self.score
line = f'{outcome}, {medals_left}, {moves_made}, {score:0.0f}'
with open(self.stats_file_path, 'a') as file:
file.write(line)
# Create bag
info = | |
#!/usr/bin/env python
from __future__ import print_function
import skimage as skimage
from skimage import transform, color, exposure, io
from skimage.viewer import ImageViewer
import random
from random import choice
import numpy as np
from collections import deque
import time
import math
import os
import pandas as pd
import cv2
import csv
from PIL import Image
import json
import keras
from keras.models import model_from_json
from keras.models import Sequential, load_model, Model
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, Dense, Flatten, merge, MaxPooling2D, Input, AveragePooling2D, Lambda, Activation, Embedding
from keras.optimizers import SGD, Adam, rmsprop
from keras import backend as K
from keras.utils import np_utils
from keras.preprocessing.image import array_to_img, img_to_array
from vizdoom import DoomGame, ScreenResolution
from vizdoom import *
import itertools as it
from time import sleep
import tensorflow as tf
from networks import Networks
#DEATHMATCH_ACTION5_NAME = [
# "ATTACK",
# "MOVE_FORWARD",
# "MOVE_BACKWARD",
# "TURN_LEFT",
# "TURN_RIGHT"
#]
DEATHMATCH_ACTION5_NAME = [
"MOVE_LEFT",
"MOVE_RIGHT",
"ATTACK",
"MOVE_FORWARD",
"MOVE_BACKWARD",
"TURN_LEFT",
"TURN_RIGHT"
]
def preprocessImg(img, size):
img = np.rollaxis(img, 0, 3) # It becomes (640, 480, 3)
img = skimage.transform.resize(img, size, mode='constant')
img = skimage.color.rgb2gray(img)
return img
def ResizeImg(img, size):
img = np.rollaxis(img, 0, 3) # It becomes (640, 480, 3)
img = skimage.transform.resize(img, size, mode='constant')
return img
bTrain = True
bUseImitation = False
bRecordSamples = False
nMaxSamples = 1000
nSamples = 0
gameCfg = "./scenarios/deathmatch_7action.cfg"
# This is for saving model of imitation learning.
model_path = "../ViZDoom-models/CarCloneModel-deathmatch-50000-epoch10-5action-256x256-modify1/"
class CNNAction:
def __init__(self, gameName):
model_json = model_path + "test_model.json"
model_h5 = model_path + "test_model.h5"
with open(model_json, 'r') as jfile:
self.model = model_from_json(json.load(jfile))
self.model.compile("adam", "categorical_crossentropy")
self.model.load_weights(model_h5)
self.imgList = []
self.model.summary()
self.w1 = 256
self.h1 = 256
self.inputW = 128
self.inputH = 128
self.frame_per_action = 4
self.epsilon = 1.0
self.initial_epsilon = 1.0
self.final_epsilon = 0.0001
self.observe = 2000
# Performance Statistics
self.stats_window_size = 50 # window size for computing rolling statistics
self.mavg_score = [] # Moving Average of Survival Time
self.var_score = [] # Variance of Survival Time
self.mavg_ammo_left = [] # Moving Average of Ammo used
self.mavg_kill_counts = [] # Moving Average of Kill Counts
# sample picture number
dataPath = "ImitationData/" + gameName
if not os.path.exists(dataPath):
os.mkdir(dataPath)
imgPath = dataPath + "/img"
if not os.path.exists(imgPath):
os.mkdir(imgPath)
self.sampleNum = 0
self.imgPath = imgPath
self.dataPath = dataPath
self.cvsPath = dataPath + "/test.csv"
self.sampleCSVFile = open(self.cvsPath, "w")
self.sampleCSVWriter = csv.writer(self.sampleCSVFile)
self.sampleCSVWriter.writerow(["name", "action", "action_name"])
def GenerateSamples(self, screen, action):
self.sampleNum = self.sampleNum + 1
t = time.time()
now = int(round(t*1000))
timeStr = time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(now/1000))
savedFileName = "%s/doom-%s-%d.jpg" % (self.imgPath, timeStr, self.sampleNum)
self.sampleCSVWriter.writerow([savedFileName, action, DEATHMATCH_ACTION5_NAME[action]])
self.sampleCSVFile.flush()
# skimage.io.imsave("hy.jpg", screen.transpose(1, 2, 0))
dst = ResizeImg(screen, (256, 256))
skimage.io.imsave(savedFileName, dst)
return
def next_action(self, state, save_graph=False):
action_id = self.f_eval(state)
return action_id
def reset(self):
pass
# prev_state is only used for evaluation, so has a batch size of 1
# self.prev_state = self.init_state_e
def prepare_f_eval_args(self, state):
"""
Prepare inputs for evaluation.
"""
screen = np.float32(state)
return screen
def f_eval(self, state):
screen = self.prepare_f_eval_args(state)
img = screen
# print (img.shape)
img = cv2.resize(img.transpose(1, 2, 0), (self.w1, self.h1), interpolation=cv2.INTER_AREA)
self.imgList.append(img)
# if len(self.imgList) < 4:
# return 0
# img1Int = self.imgList[0].transpose(2, 1, 0).astype(int)
img1 = array_to_img(self.imgList[0].astype(int))
# img2 = array_to_img(self.imgList[1].astype(int))
# img3 = array_to_img(self.imgList[2].astype(int))
# img4 = array_to_img(self.imgList[3].astype(int))
w = self.w1
h = self.h1
merge_img = Image.new('RGB', (w, h), 0xffffff)
merge_img.paste(img1, (0, 0))
# merge_img.paste(img2, (w, 0))
# merge_img.paste(img3, (0, h))
# merge_img.paste(img4, (w, h))
merge_img.save("hy.jpg")
merge_img = merge_img.resize((self.inputW, self.inputH))
img5 = img_to_array(merge_img).transpose(0, 1, 2)
img5 = img5.astype("float32")
img5 = (img5 * (1. / 255)) - 0.5
imgs = img5[None, :, :, :]
# print (imgs.shape)
action_id = self.model.predict(imgs, batch_size=1)
action_list = np.argsort(-action_id, axis=1)
self.imgList.pop(0)
return int(action_list[0][0])
class C51Agent:
def __init__(self, state_size, action_size, num_atoms, gameName):
# get size of state and action
self.state_size = state_size
self.action_size = action_size
# these is hyper parameters for the DQN
self.gamma = 0.99
self.learning_rate = 0.0001
self.epsilon = 1.0
self.initial_epsilon = 1.0
self.final_epsilon = 0.0001
self.batch_size = 32
self.observe = 2000
self.explore = 100000 # orig: 50000
self.frame_per_action = 4
self.update_target_freq = 3000
self.timestep_per_train = 100 # Number of timesteps between training interval
# Initialize Atoms
self.num_atoms = num_atoms # 51 for C51
self.v_max = 30 # Max possible score for Defend the center is 26 - 0.1*26 = 23.4
self.v_min = -10 # -0.1*26 - 1 = -3.6
self.delta_z = (self.v_max - self.v_min) / float(self.num_atoms - 1)
self.z = [self.v_min + i * self.delta_z for i in range(self.num_atoms)]
# Create replay memory using deque
self.memory = deque()
self.max_memory = 100000 # orig: 50000 # number of previous transitions to remember
# Models for value distribution
self.model = None
self.target_model = None
# Performance Statistics
self.stats_window_size = 50 # window size for computing rolling statistics
self.mavg_score = [] # Moving Average of Survival Time
self.var_score = [] # Variance of Survival Time
self.mavg_ammo_left = [] # Moving Average of Ammo used
self.mavg_kill_counts = [] # Moving Average of Kill Counts
# sample picture number
dataPath = "ImitationData/" + gameName
if not os.path.exists(dataPath):
os.mkdir(dataPath)
imgPath = dataPath + "/img"
if not os.path.exists(imgPath):
os.mkdir(imgPath)
self.sampleNum = 0
self.imgPath = imgPath
self.dataPath = dataPath
self.cvsPath = dataPath + "/test.csv"
self.sampleCSVFile = open(self.cvsPath, "w")
self.sampleCSVWriter = csv.writer(self.sampleCSVFile)
self.sampleCSVWriter.writerow(["name", "action", "action_name"])
def update_target_model(self):
"""
After some time interval update the target model to be same with model
"""
self.target_model.set_weights(self.model.get_weights())
def GenerateSamples(self, screen, action):
self.sampleNum = self.sampleNum + 1
t = time.time()
now = int(round(t*1000))
timeStr = time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(now/1000))
savedFileName = "%s/doom-%s-%d.jpg" % (self.imgPath, timeStr, self.sampleNum)
self.sampleCSVWriter.writerow([savedFileName, action, DEATHMATCH_ACTION5_NAME[action]])
self.sampleCSVFile.flush()
# skimage.io.imsave("hy.jpg", screen.transpose(1, 2, 0))
dst = ResizeImg(screen, (256, 256))
skimage.io.imsave(savedFileName, dst)
return
def get_action(self, state, bTrain=True):
"""
Get action from model using epsilon-greedy policy
"""
if bTrain:
if np.random.rand() <= self.epsilon:
action_idx = random.randrange(self.action_size)
else:
action_idx = self.get_optimal_action(state)
else:
action_idx = self.get_optimal_action(state)
return action_idx
def get_optimal_action(self, state):
"""Get optimal action for a state
"""
z = self.model.predict(state) # Return a list [1x51, 1x51, 1x51]
z_concat = np.vstack(z)
q = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1)
# Pick action with the biggest Q value
action_idx = np.argmax(q)
return action_idx
def shape_reward(self, r_t, misc, prev_misc, t):
# Check any kill count orig reward:
# if (misc[0] > prev_misc[0]):
# r_t = r_t + 1
# if (misc[1] < prev_misc[1]): # Use ammo
# r_t = r_t - 0.1
# if (misc[2] < prev_misc[2]): # Loss HEALTH
# r_t = r_t - 0.1
# hy modify
if (misc[0] > prev_misc[0]): # kill
r_t = r_t + 1
if (misc[1] < prev_misc[1]): # Use ammo
r_t = r_t - 0.2
if (misc[2] < prev_misc[2]): # Loss HEALTH
r_t = r_t - 0.1
return r_t
# save sample <s,a,r,s'> to the replay memory
def replay_memory(self, s_t, action_idx, r_t, s_t1, is_terminated, t):
self.memory.append((s_t, action_idx, r_t, s_t1, is_terminated))
if self.epsilon > self.final_epsilon and t > self.observe:
self.epsilon -= (self.initial_epsilon - self.final_epsilon) / self.explore
if len(self.memory) > self.max_memory:
self.memory.popleft()
# Update the target model to be same with model
if t % self.update_target_freq == 0:
self.update_target_model()
# pick samples randomly from replay memory (with batch_size)
def train_replay(self):
num_samples = min(self.batch_size * self.timestep_per_train, len(self.memory))
replay_samples = random.sample(self.memory, num_samples)
state_inputs = np.zeros(((num_samples,) + self.state_size))
next_states = np.zeros(((num_samples,) + self.state_size))
m_prob = [np.zeros((num_samples, self.num_atoms)) for i in range(action_size)]
action, reward, done = [], [], []
for i in range(num_samples):
state_inputs[i,:,:,:] = replay_samples[i][0]
action.append(replay_samples[i][1])
reward.append(replay_samples[i][2])
next_states[i,:,:,:] = replay_samples[i][3]
done.append(replay_samples[i][4])
z = self.model.predict(next_states) # Return a list [32x51, 32x51, 32x51]
z_ = self.model.predict(next_states) # Return a list [32x51, 32x51, 32x51]
# Get Optimal Actions for the next states (from distribution z)
optimal_action_idxs = []
z_concat = np.vstack(z)
q = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1) # length (num_atoms x num_actions)
q = q.reshape((num_samples, action_size), order='F')
optimal_action_idxs = np.argmax(q, axis=1)
# Project Next State Value Distribution (of optimal action) to Current State
for i in range(num_samples):
if done[i]: # Terminal State
# Distribution collapses to a single point
Tz = min(self.v_max, max(self.v_min, reward[i]))
bj = (Tz - self.v_min) / self.delta_z
m_l, m_u = math.floor(bj), math.ceil(bj)
m_prob[action[i]][i][int(m_l)] += (m_u - bj)
m_prob[action[i]][i][int(m_u)] += (bj - m_l)
else:
for j in range(self.num_atoms):
Tz = min(self.v_max, max(self.v_min, reward[i] + self.gamma * self.z[j]))
bj = (Tz - self.v_min) / self.delta_z
m_l, m_u = math.floor(bj), math.ceil(bj)
m_prob[action[i]][i][int(m_l)] += z_[optimal_action_idxs[i]][i][j] * (m_u - bj)
m_prob[action[i]][i][int(m_u)] += z_[optimal_action_idxs[i]][i][j] | |
import tensorflow as tf
from .print_object import print_obj
def get_variables_and_gradients(loss, scope):
"""Gets variables and their gradients wrt. loss.
Args:
loss: tensor, shape of [].
scope: str, the network's name to find its variables to train.
Returns:
Lists of variables and their gradients.
"""
func_name = "get_variables_and_gradients"
# Get trainable variables.
variables = tf.trainable_variables(scope=scope)
print_obj("\n{}_{}".format(func_name, scope), "variables", variables)
# Get gradients.
gradients = tf.gradients(
ys=loss,
xs=variables,
name="{}_gradients".format(scope)
)
print_obj("\n{}_{}".format(func_name, scope), "gradients", gradients)
# Add variable names back in for identification.
gradients = [
tf.identity(
input=g,
name="{}_{}_gradients".format(func_name, v.name[:-2])
)
if tf.is_tensor(x=g) else g
for g, v in zip(gradients, variables)
]
print_obj("\n{}_{}".format(func_name, scope), "gradients", gradients)
return variables, gradients
def create_variable_and_gradient_histogram_summaries(loss_dict, params):
"""Creates variable and gradient histogram summaries.
Args:
loss_dict: dict, keys are scopes and values are scalar loss tensors
for each network kind.
params: dict, user passed parameters.
"""
if not params["use_tpu"]:
for scope, loss in loss_dict.items():
# Get variables and their gradients wrt. loss.
variables, gradients = get_variables_and_gradients(loss, scope)
# Add summaries for TensorBoard.
for g, v in zip(gradients, variables):
tf.summary.histogram(
name="{}".format(v.name[:-2]),
values=v,
family="{}_variables".format(scope)
)
if tf.is_tensor(x=g):
tf.summary.histogram(
name="{}".format(v.name[:-2]),
values=g,
family="{}_gradients".format(scope)
)
def instantiate_optimizer_slots(optimizer, variables, params, scope):
"""Instantiates optimizer slots for all parameters ahead of time.
Args:
optimizer: instance of `Optimizer`.
variables: list, list of scoped trainable variables.
params: dict, user passed parameters.
scope: str, the network's name to find its variables to train.
Returns:
Apply gradients op to instantiate all optimizer slots and add to
collection op for optimizer slot metric variables.
"""
func_name = "instantiate_optimizer_slots"
# Create zero gradients for every scoped trainable variable.
zero_gradients = [
tf.zeros_like(
tensor=v,
dtype=tf.float32,
name="{}_{}_{}_zeros_like".format(func_name, scope, v.name[:-2])
)
for v in variables
]
print_obj(
"{}_{}".format(func_name, scope), "zero_gradients", zero_gradients
)
# Zip together gradients and variables.
grads_and_vars = zip(zero_gradients, variables)
print_obj(
"{}_{}".format(func_name, scope), "grads_and_vars", grads_and_vars
)
# Apply zero gradients to create all optimizer slots ahead of time. Since
# this is when global_step is zero, it won't change the parameters or the
# moment accumulators.
instantiate_optimizer_op = optimizer.apply_gradients(
grads_and_vars=grads_and_vars,
global_step=None,
name="{}_{}_apply_gradients".format(func_name, scope)
)
print_obj(
"{}_{}".format(func_name, scope),
"instantiate_optimizer_op",
instantiate_optimizer_op
)
if params["save_optimizer_metrics_to_checkpoint"]:
optimizer_name = "{}_{}_optimizer".format(
scope, params["{}_optimizer".format(scope)]
)
# Add optimizer slot metric variables to global collection so that they
# will be written to checkpoints.
add_to_collection_ops = [
tf.add_to_collection(name=tf.GraphKeys.GLOBAL_VARIABLES, value=v)
for v in tf.get_collection(
key=tf.GraphKeys.METRIC_VARIABLES, scope=optimizer_name
)
]
else:
add_to_collection_ops = []
print_obj(
"{}_{}".format(func_name, scope),
"add_to_collection_ops",
add_to_collection_ops
)
return instantiate_optimizer_op, add_to_collection_ops
def dont_instantiate_optimizer_slots(scope):
"""Wrapper for not instantiating optimizer slots for tf.cond.
Args:
scope: str, the network's name to find its variables to train.
Returns:
Apply gradients no op to instantiate all optimizer slots and add to
collection no op for optimizer slot metric variables.
"""
instantiate_optimizer_no_op = tf.no_op(
name="{}_instantiate_optimizer_no_op".format(scope)
)
return instantiate_optimizer_no_op, []
def train_network(
loss, global_step, alpha_var, params, scope, increment_global_step):
"""Trains network and returns loss and train op.
Args:
loss: tensor, shape of [].
global_step: tensor, the current training step or batch in the
training loop.
alpha_var: variable, alpha for weighted sum of fade-in of layers.
params: dict, user passed parameters.
scope: str, the network's name to find its variables to train.
increment_global_step: int, whether to increment global step or not.
Returns:
Loss tensor and training op.
"""
func_name = "train_network"
print_obj("\n" + func_name, "loss", loss)
print_obj(func_name, "global_step", global_step)
print_obj(func_name, "alpha_var", alpha_var)
print_obj(func_name, "scope", scope)
# Create optimizer map.
optimizers = {
"Adam": tf.train.AdamOptimizer,
"Adadelta": tf.train.AdadeltaOptimizer,
"AdagradDA": tf.train.AdagradDAOptimizer,
"Adagrad": tf.train.AdagradOptimizer,
"Ftrl": tf.train.FtrlOptimizer,
"GradientDescent": tf.train.GradientDescentOptimizer,
"Momentum": tf.train.MomentumOptimizer,
"ProximalAdagrad": tf.train.ProximalAdagradOptimizer,
"ProximalGradientDescent": tf.train.ProximalGradientDescentOptimizer,
"RMSProp": tf.train.RMSPropOptimizer
}
# Get optimizer and instantiate it.
if params["{}_optimizer".format(scope)] == "Adam":
optimizer = optimizers[params["{}_optimizer".format(scope)]](
learning_rate=params["{}_learning_rate".format(scope)],
beta1=params["{}_adam_beta1".format(scope)],
beta2=params["{}_adam_beta2".format(scope)],
epsilon=params["{}_adam_epsilon".format(scope)],
name="{}_{}_optimizer".format(
scope, params["{}_optimizer".format(scope)].lower()
)
)
else:
optimizer = optimizers[params["{}_optimizer".format(scope)]](
learning_rate=params["{}_learning_rate".format(scope)],
name="{}_{}_optimizer".format(
scope, params["{}_optimizer".format(scope)].lower()
)
)
print_obj("{}_{}".format(func_name, scope), "optimizer", optimizer)
# If using TPU, wrap optimizer to use an allreduce to aggregate gradients
# and broadcast the result to each shard.
if params["use_tpu"]:
optimizer = tf.contrib.tpu.CrossShardOptimizer(opt=optimizer)
print_obj("{}_{}".format(func_name, scope), "optimizer", optimizer)
# Get variables and their gradients wrt. loss.
variables, gradients = get_variables_and_gradients(loss, scope)
# Clip gradients.
if params["{}_clip_gradients".format(scope)]:
gradients, _ = tf.clip_by_global_norm(
t_list=gradients,
clip_norm=params["{}_clip_gradients".format(scope)],
name="{}_clip_by_global_norm_gradients".format(scope)
)
print_obj("\n{}_{}".format(func_name, scope), "gradients", gradients)
# Add variable names back in for identification.
gradients = [
tf.identity(
input=g,
name="{}_{}_clip_gradients".format(func_name, v.name[:-2])
)
if tf.is_tensor(x=g) else g
for g, v in zip(gradients, variables)
]
print_obj("\n{}_{}".format(func_name, scope), "gradients", gradients)
# Zip back together gradients and variables.
grads_and_vars = zip(gradients, variables)
print_obj(
"{}_{}".format(func_name, scope), "grads_and_vars", grads_and_vars
)
if params["{}_optimizer".format(scope)] != "GradientDescent":
# Instantiate ALL optimizer slots, not just for ones without None grad.
instantiate_optimizer_op, add_to_collection_ops = tf.cond(
pred=tf.equal(
x=global_step, y=0, name="instantiate_optimizer_op_pred"
),
true_fn=lambda: instantiate_optimizer_slots(
optimizer=optimizer,
variables=variables,
params=params,
scope=scope
),
false_fn=lambda: dont_instantiate_optimizer_slots(scope),
name="instantiate_optimizer_op_cond"
)
with tf.control_dependencies(
control_inputs=[instantiate_optimizer_op]):
with tf.control_dependencies(
control_inputs=add_to_collection_ops):
loss = tf.identity(
input=loss,
name="{}_{}_loss_identity".format(func_name, scope)
)
# Create train op by applying gradients to variables and possibly
# incrementing global step.
train_op = optimizer.apply_gradients(
grads_and_vars=grads_and_vars,
global_step=global_step if increment_global_step else None,
name="{}_apply_gradients".format(scope)
)
print_obj("{}_{}".format(func_name, scope), "train_op", train_op)
return loss, train_op
def train_discriminator(
discriminator_loss,
global_step,
alpha_var,
params,
discriminator_scope):
"""Wrapper that trains discriminator network & returns loss and train op.
Args:
discriminator_loss: tensor, discriminator's loss with shape [].
global_step: tensor, the current training step or batch in the
training loop.
alpha_var: variable, alpha for weighted sum of fade-in of layers.
params: dict, user passed parameters.
discriminator_scope: str, the discriminator's name to find its
variables.
Returns:
Loss tensor and training op.
"""
# Get loss and train_op for discriminator.
loss, train_op = train_network(
loss=discriminator_loss,
global_step=global_step,
alpha_var=alpha_var,
params=params,
scope=discriminator_scope,
increment_global_step=True
)
return loss, train_op
def jointly_train_generator_encoder(
generator_loss,
encoder_loss,
global_step,
alpha_var,
params,
generator_scope,
encoder_scope):
"""Trains generator/encoder network & returns loss and train op.
Args:
generator_loss: tensor, generator's loss with shape [].
encoder_loss: tensor, encoder's loss with shape [].
global_step: tensor, the current training step or batch in the
training loop.
alpha_var: variable, alpha for weighted sum of fade-in of layers.
params: dict, user passed parameters.
generator_scope: str, the generator's name to find its variables.
encoder_scope: str, the encoder's name to find its variables.
Returns:
Loss tensor and training op.
"""
# Get loss and train_op for generator.
generator_loss, generator_train_op = train_network(
loss=generator_loss,
global_step=global_step,
alpha_var=alpha_var,
params=params,
scope=generator_scope,
increment_global_step=True
)
# Get loss and train_op for encoder.
encoder_loss, encoder_train_op = train_network(
loss=encoder_loss,
global_step=global_step,
alpha_var=None,
params=params,
scope=encoder_scope,
increment_global_step=False
)
# Add generator and encoder losses together.
loss = tf.add(
x=generator_loss,
y=encoder_loss,
name="jointly_train_generator_encoder_add_loss"
)
print_obj("\njointly_train_generator_encoder", "loss", loss)
# Group train_ops together.
train_op = tf.group(
generator_train_op,
encoder_train_op,
name="jointly_train_generator_encoder_group_train_op"
)
print_obj("jointly_train_generator_encoder", "train_op", train_op)
return loss, train_op
def known_update_alpha(global_step, alpha_var, params):
"""Returns ref for updated alpha variable.
Args:
global_step: tensor, the current training step or batch in the
training loop.
alpha_var: variable, alpha for weighted sum of fade-in of layers.
params: dict, user passed parameters.
Returns:
Ref for updated alpha variable.
"""
func_name = "known_update_alpha"
# If never grow, then no need to update alpha since it is not used.
if len(params["conv_num_filters"]) > 1 and params["growth_idx"] > 0:
if params["growth_idx"] % 2 == 1:
# Update alpha var to linearly scale from 0 to 1 based on steps.
alpha_var = tf.assign(
ref=alpha_var,
value=tf.divide(
x=tf.cast(
# Add 1 since it trains on global step 0, so off by 1.
x=tf.add(
x=tf.mod(
x=tf.subtract(
x=global_step,
y=params["previous_train_steps"]
),
y=params["num_steps_until_growth"]
),
y=1
),
dtype=tf.float32
),
y=params["num_steps_until_growth"]
),
name="update_alpha_assign_linear"
)
else:
alpha_var = tf.assign(
ref=alpha_var,
value=tf.ones(shape=[], dtype=tf.float32),
name="update_alpha_assign_ones"
)
print_obj(func_name, "alpha_var", alpha_var)
return alpha_var
def unknown_update_alpha_transition(global_step, alpha_var, params):
"""Returns ref for updated alpha variable.
Args:
global_step: tensor, the current training step or batch in the
training loop.
alpha_var: variable, alpha for weighted sum of fade-in of layers.
params: dict, user passed parameters.
Returns:
Ref for updated alpha variable.
"""
alpha_var = tf.assign(
ref=alpha_var,
value=tf.divide(
x=tf.cast(
# Add 1 since it trains on global step 0, so off by 1.
x=tf.add(
x=tf.mod(
x=tf.subtract(
x=global_step,
y=params["previous_train_steps"]
),
y=params["num_steps_until_growth"]
),
y=1
),
dtype=tf.float32
),
y=params["num_steps_until_growth"]
),
name="update_alpha_assign_linear"
)
return alpha_var
def unknown_update_alpha_stable(global_step, alpha_var, params):
"""Returns ref for updated alpha variable.
Args:
global_step: tensor, the current training step or batch in the
training loop.
alpha_var: variable, alpha for weighted sum of fade-in of layers.
params: dict, user | |
lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def _repack_fields_dispatcher(a, align=None, recurse=None):
return (a,)
@array_function_dispatch(_repack_fields_dispatcher)
def repack_fields(a, align=False, recurse=False):
"""
Re-pack the fields of a structured array or dtype in memory.
The memory layout of structured datatypes allows fields at arbitrary
byte offsets. This means the fields can be separated by padding bytes,
their offsets can be non-monotonically increasing, and they can overlap.
This method removes any overlaps and reorders the fields in memory so they
have increasing byte offsets, and adds or removes padding bytes depending
on the `align` option, which behaves like the `align` option to `np.dtype`.
If `align=False`, this method produces a "packed" memory layout in which
each field starts at the byte the previous field ended, and any padding
bytes are removed.
If `align=True`, this methods produces an "aligned" memory layout in which
each field's offset is a multiple of its alignment, and the total itemsize
is a multiple of the largest alignment, by adding padding bytes as needed.
Parameters
----------
a : ndarray or dtype
array or dtype for which to repack the fields.
align : boolean
If true, use an "aligned" memory layout, otherwise use a "packed" layout.
recurse : boolean
If True, also repack nested structures.
Returns
-------
repacked : ndarray or dtype
Copy of `a` with fields repacked, or `a` itself if no repacking was
needed.
Examples
--------
>>> from numpy_demo.lib import recfunctions as rfn
>>> def print_offsets(d):
... print("offsets:", [d.fields[name][1] for name in d.names])
... print("itemsize:", d.itemsize)
...
>>> dt = np.dtype('u1, <i8, <f8', align=True)
>>> dt
dtype({'names':['f0','f1','f2'], 'formats':['u1','<i8','<f8'], 'offsets':[0,8,16], 'itemsize':24}, align=True)
>>> print_offsets(dt)
offsets: [0, 8, 16]
itemsize: 24
>>> packed_dt = rfn.repack_fields(dt)
>>> packed_dt
dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')])
>>> print_offsets(packed_dt)
offsets: [0, 1, 9]
itemsize: 17
"""
if not isinstance(a, np.dtype):
dt = repack_fields(a.dtype, align=align, recurse=recurse)
return a.astype(dt, copy=False)
if a.names is None:
return a
fieldinfo = []
for name in a.names:
tup = a.fields[name]
if recurse:
fmt = repack_fields(tup[0], align=align, recurse=True)
else:
fmt = tup[0]
if len(tup) == 3:
name = (tup[2], name)
fieldinfo.append((name, fmt))
dt = np.dtype(fieldinfo, align=align)
return np.dtype((a.type, dt))
def _get_fields_and_offsets(dt, offset=0):
"""
Returns a flat list of (dtype, count, offset) tuples of all the
scalar fields in the dtype "dt", including nested fields, in left
to right order.
"""
# counts up elements in subarrays, including nested subarrays, and returns
# base dtype and count
def count_elem(dt):
count = 1
while dt.shape != ():
for size in dt.shape:
count *= size
dt = dt.base
return dt, count
fields = []
for name in dt.names:
field = dt.fields[name]
f_dt, f_offset = field[0], field[1]
f_dt, n = count_elem(f_dt)
if f_dt.names is None:
fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset))
else:
subfields = _get_fields_and_offsets(f_dt, f_offset + offset)
size = f_dt.itemsize
for i in range(n):
if i == 0:
# optimization: avoid list comprehension if no subarray
fields.extend(subfields)
else:
fields.extend([(d, c, o + i*size) for d, c, o in subfields])
return fields
def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None,
casting=None):
return (arr,)
@array_function_dispatch(_structured_to_unstructured_dispatcher)
def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
"""
Converts and n-D structured array into an (n+1)-D unstructured array.
The new array will have a new last dimension equal in size to the
number of field-elements of the input array. If not supplied, the output
datatype is determined from the numpy_demo type promotion rules applied to all
the field datatypes.
Nested fields, as well as each element of any subarray fields, all count
as a single field-elements.
Parameters
----------
arr : ndarray
Structured array or dtype to convert. Cannot contain object datatype.
dtype : dtype, optional
The dtype of the output unstructured array.
copy : bool, optional
See copy argument to `ndarray.astype`. If true, always return a copy.
If false, and `dtype` requirements are satisfied, a view is returned.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
See casting argument of `ndarray.astype`. Controls what kind of data
casting may occur.
Returns
-------
unstructured : ndarray
Unstructured array with one more dimension.
Examples
--------
>>> from numpy_demo.lib import recfunctions as rfn
>>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
>>> a
array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]),
(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])],
dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
>>> rfn.structured_to_unstructured(a)
array([[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
>>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
>>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1)
array([ 3. , 5.5, 9. , 11. ])
"""
if arr.dtype.names is None:
raise ValueError('arr must be a structured array')
fields = _get_fields_and_offsets(arr.dtype)
n_fields = len(fields)
if n_fields == 0 and dtype is None:
raise ValueError("arr has no fields. Unable to guess dtype")
elif n_fields == 0:
# too many bugs elsewhere for this to work now
raise NotImplementedError("arr with no fields is not supported")
dts, counts, offsets = zip(*fields)
names = ['f{}'.format(n) for n in range(n_fields)]
if dtype is None:
out_dtype = np.result_type(*[dt.base for dt in dts])
else:
out_dtype = dtype
# Use a series of views and casts to convert to an unstructured array:
# first view using flattened fields (doesn't work for object arrays)
# Note: dts may include a shape for subarrays
flattened_fields = np.dtype({'names': names,
'formats': dts,
'offsets': offsets,
'itemsize': arr.dtype.itemsize})
with suppress_warnings() as sup: # until 1.16 (gh-12447)
sup.filter(FutureWarning, "Numpy has detected")
arr = arr.view(flattened_fields)
# next cast to a packed format with all fields converted to new dtype
packed_fields = np.dtype({'names': names,
'formats': [(out_dtype, dt.shape) for dt in dts]})
arr = arr.astype(packed_fields, copy=copy, casting=casting)
# finally is it safe to view the packed fields as the unstructured type
return arr.view((out_dtype, (sum(counts),)))
def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None,
align=None, copy=None, casting=None):
return (arr,)
@array_function_dispatch(_unstructured_to_structured_dispatcher)
def unstructured_to_structured(arr, dtype=None, names=None, align=False,
copy=False, casting='unsafe'):
"""
Converts and n-D unstructured array into an (n-1)-D structured array.
The last dimension of the input array is converted into a structure, with
number of field-elements equal to the size of the last dimension of the
input array. By default all output fields have the input array's dtype, but
an output structured dtype with an equal number of fields-elements can be
supplied instead.
Nested fields, as well as each element of any subarray fields, all count
towards the number of field-elements.
Parameters
----------
arr : ndarray
Unstructured array or dtype to convert.
dtype : dtype, optional
The structured dtype of the output array
names : list of strings, optional
If dtype is not supplied, this specifies the field names for the output
dtype, in order. The field dtypes will be the same as the input array.
align : boolean, optional
Whether to create an aligned memory layout.
copy : bool, optional
See copy argument to `ndarray.astype`. If true, always return a copy.
If false, and `dtype` requirements are satisfied, a view is returned.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
See casting argument of `ndarray.astype`. Controls what kind of data
casting may occur.
Returns
-------
structured : ndarray
Structured array with fewer dimensions.
Examples
--------
>>> from numpy_demo.lib import recfunctions as rfn
>>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
>>> a = np.arange(20).reshape((4,5))
>>> a
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, | |
LAYER POLYGONS TO PASS TO SELF.POLYGONS AND ONTO THE GRAV/MAG ALGORITHMS
# FIRST SET UP XY DATA; IF LAYER IS BELOW LAYER 0 THEN ATTACH THE ABOVE LAYER TO COMPLETE THE POLYGON;
# ELSE USE TOP LAYER CHECK FOR 'FIXED' LAYER MODE AND FIND LAST LAYER TO MAKE POLYGON
if i >= 1 and self.layer_list[i].type == 'fixed':
# CHECK FOR LAST PREVIOUS FIXED LAYER AND USE ITS BASE TO COMPLETE THE POLYGON
for layer in range(i, 0, -1):
if self.layer_list[layer - 1].type == 'fixed':
# ASSIGN THE LAST FIXED LAYER INDEX
last_layer_index = layer - 1
# NOW APPEND NODES FOR BOUNDARY CONDITIONS (CONTINUOUS SLAB)
plotx = np.array(self.layer_list[i].x_nodes)
ploty = np.array(self.layer_list[i].y_nodes)
# SET THE PADDING NODES TO THE SAME DEPTH AS THE MODEL LIMIT NODES TO CREATE FLAT SLAB
ploty[0] = ploty[1]
ploty[-1] = ploty[-2]
self.layer_list[i].x_nodes = plotx
self.layer_list[i].y_nodes = ploty
# ADD NODES FROM ABOVE LAYER TO COMPETE POLYGON
layer_above_x = np.array(self.layer_list[last_layer_index].x_nodes)[::-1]
layer_above_y = np.array(self.layer_list[last_layer_index].y_nodes)[::-1]
polygon_x = np.append(np.array(layer_above_x), np.array(plotx))
polygon_y = np.append(np.array(layer_above_y), np.array(ploty))
# UPDATE LAYER POLYGON ATTRIBUTE
self.layer_list[i].polygon = list(zip(polygon_x, polygon_y))
break
else:
continue
else:
# IF THE LAYER IS A SIMPLE 'FLOATING LAYER'
polygon_x = np.array(self.layer_list[i].x_nodes)
polygon_y = np.array(self.layer_list[i].y_nodes)
# UPDATE LAYER POLYGON ATTRIBUTE
self.layer_list[i].polygon = list(zip(polygon_x, polygon_y))
# ----------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------
# UPDATE LAYER POLYGONS AND LINES
for i in range(0, self.total_layer_count + 1):
# SET POLYGON FILL COLOR BASED ON DENSITY
if self.layer_list[i].density != 0.0 and self.layer_list[i].reference_density is True:
# DETERMINE DENSITY CONTRAST FROM (DENSITY - REF DENSITY)
next_color = self.colormap.to_rgba(0.001 * self.layer_list[i].density -
0.001 * self.layer_list[i].reference_density)
elif self.layer_list[i].density != 0.0:
# NO REF DENSITY, SO JUST USE DENSITY VALUE
next_color = self.colormap.to_rgba(0.001 * self.layer_list[i].density)
else:
# NO DENSITY HAS BEEN SET SO LEAVE BLANK
next_color = self.colormap.to_rgba(0.)
# UPDATE POLYGON XY AND COLOR FILL
self.layer_list[i].polygon_mpl_actor[0].set_xy(self.layer_list[i].polygon)
self.layer_list[i].polygon_mpl_actor[0].set_color(next_color)
# UPDATE LAYER LINES
self.layer_list[i].node_mpl_actor[0].set_xdata(self.layer_list[i].x_nodes)
self.layer_list[i].node_mpl_actor[0].set_ydata(self.layer_list[i].y_nodes)
self.layer_list[i].node_mpl_actor[0].set_color(self.layer_list[i].color)
# ----------------------------------------------------------------------------------------------------------
# UPDATE CURRENTLY ACTIVE LAYER LINE AND NODES
self.currently_active_layer.set_xdata(self.layer_list[self.currently_active_layer_id].x_nodes)
self.currently_active_layer.set_ydata(self.layer_list[self.currently_active_layer_id].y_nodes)
self.currently_active_layer.set_color(self.layer_list[self.currently_active_layer_id].color)
# DRAW CANVAS FEATURES
self.model_frame.set_aspect(self.model_aspect)
self.grav_frame_aspect = ((self.gravity_frame.get_xlim()[1] - self.gravity_frame.get_xlim()[0]) /
(self.gravity_frame.get_ylim()[1] - self.gravity_frame.get_ylim()[0]))
# UPDATE INFO
self.display_info()
# CONTENT HAS NOT BEEN SAVED SINCE LAST MODIFICATION
self.model_saved = False
# UPDATE GMG GRAPHICS
self.draw()
def run_algorithms(self):
"""RUN POTENTIAL FIELD CALCULATION ALGORITHMS"""
# --------------------------------------------------------------------------------------------------------------
# CALCULATE TOPOGRAPHY - :FUTURE: PREDICTED TOPOGRAPHY FROM ISOSTATIC FUNC
self.pred_topo = np.zeros_like(self.xp)
# --------------------------------------------------------------------------------------------------------------
# tree.GetRootItem().GetChildren()[i].GetValue()
# --------------------------------------------------------------------------------------------------------------
# CALCULATE GRAVITY
polygons_to_use = []
densities_to_use = []
if self.calc_grav_switch is True:
# SELECT ONLY THOSE LAYERS THAT ARE CHECKED
for layer in range(0, self.total_layer_count + 1):
if self.layer_list[layer].include_in_calculations_switch is True:
# CHOSE POLYGONS
polygons_to_use.append(self.layer_list[layer].polygon)
# DETERMINE DENSITY CONTRASTS
densities_to_use.append((self.layer_list[layer].density -
self.layer_list[layer].reference_density))
# PASS POLYGONS TO BOTT ALGORITHM AND RETURN THE PREDICTED VALUES
bott_input_polygons = []
for p, d in zip(polygons_to_use, densities_to_use):
bott_input_polygons.append(Polygon(1000 * np.array(p), {'density': d}))
# SET THE PREDICTED VALUES AS THE BOTT OUTPUT
# NB: NODES ARE INPUT LEFT TO RIGHT SO WE MUST MULTIPLY BY -1 TO PRODUCE THE CORRECT SIGN AT OUTPUT
self.predicted_gravity = bott.gz(self.xp, self.gravity_observation_elv, bott_input_polygons) * -1
else:
# SET THE PREDICTED VALUES AS ZEROS
self.predicted_gravity = np.zeros_like(self.xp)
# SET THE PREDICTED PLOT LINE WITH THE NEWLY CALCULATED VALUES
self.pred_gravity_plot.set_data(self.xp * 0.001, self.predicted_gravity)
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# CALCULATE MAGNETICS
# ZIP POLYGONS WITH SUSCEPTIBILITIES AND PASS TO TALWANI AND HEIRTZLER ALGORITHM
if self.calc_mag_switch is True:
# SELECT ONLY THOSE LAYERS THAT ARE CHECKED
polygons_to_use = []
susceptibilities_to_use = []
angle_a_to_use = []
angle_b_to_use = []
angle_c_to_use = []
earth_field_to_use = []
for layer in range(0, self.total_layer_count + 1):
if self.layer_list[layer].include_in_calculations_switch is True:
polygons_to_use.append(self.layer_list[layer].polygon)
susceptibilities_to_use.append(self.layer_list[layer].susceptibility)
angle_a_to_use.append(self.layer_list[layer].angle_a)
angle_b_to_use.append(self.layer_list[layer].angle_b)
angle_c_to_use.append(self.layer_list[layer].angle_c)
earth_field_to_use.append(self.layer_list[layer].earth_field)
# PASS TO TALWANI & HEIRTZLER ALGORITHM
mag_input_polygons = []
for p, s, a, b, c, f, in zip(polygons_to_use, susceptibilities_to_use, angle_a_to_use, angle_b_to_use,
angle_c_to_use, earth_field_to_use):
mag_input_polygons.append(Polygon(1000. * np.array(p), {'susceptibility': s, 'angle_a': a,
'angle_b': b, 'angle_c': c, 'f': f}))
# SET THE PREDICTED VALUES AS THE TALWANI & HEIRTZLER OUTPUT
# NB: NODES ARE INPUT LEFT TO RIGHT SO WE MUST MULTIPLY BY -1 TO PRODUCE THE CORRECT SIGN AT OUTPUT
self.predicted_nt = talwani_and_heirtzler.nt(self.xp, self.mag_observation_elv, mag_input_polygons) * -1
else:
# SET THE PREDICTED VALUES AS ZEROS
self.predicted_nt = np.zeros_like(self.xp)
# SET THE PREDICTED PLOT LINE WITH THE NEWLY CALCULATED VALUES
self.predicted_nt_plot.set_data(self.xp * 0.001, self.predicted_nt)
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# UPDATE RMS VALUES
# RUN THE RMS CALC CODE
self.model_rms(self.xp)
# SET GRAVITY RMS
if self.obs_gravity_data_for_rms != [] and self.calc_grav_switch is True and self.predicted_gravity != []:
self.gravity_rms_plot.set_data(self.grav_residuals[:, 0], self.grav_residuals[:, 1])
else:
pass
# SET MAGNETIC RMS
if self.obs_mag_data_for_rms != [] and self.calc_mag_switch is True and self.predicted_nt != []:
self.mag_rms_plot.set_data(self.mag_residuals[:, 0], self.mag_residuals[:, 1])
else:
pass
# --------------------------------------------------------------------------------------------------------------
# SET FRAME X AND Y LIMITS
self.set_frame_limits()
# AFTER RUNNING ALGORITHMS, SET MODEL AS UNSAVED
self.model_saved = False
# UPDATE GMG GRAPHICS
self.draw()
def set_frame_limits(self):
"""SET FRAME X AND Y LIMITS"""
# --------------------------------------------------------------------------------------------------------------
# SET GRAVITY DISPLAY BOX LIMITS
if self.observed_gravity_switch is True and self.grav_residuals != []:
# CREATE EMPTY LIST
ymin_list = []
ymax_list = []
# APPEND OBSERVED MIN AND MAX
for i in range(len(self.observed_gravity_list)):
if self.observed_gravity_list[i] is not None:
ymin_list.append(self.observed_gravity_list[i].data[:, 1].min() - 2.0)
ymax_list.append(self.observed_gravity_list[i].data[:, 1].max() + 2.0)
# APPEND PREDICTED GRAVITY ANOMALY
ymin_list.append(self.predicted_gravity.min())
ymax_list.append(self.predicted_gravity.max())
# # APPEND RMS GRAVITY ANOMALY
# ymin_list.append(self.grav_residuals.min() - 2.0)
# ymax_list.append(self.grav_residuals.max() + 2.0)
# SET YMIN AND YMAX
ymin = min(ymin_list)
ymax = max(ymax_list)
elif self.observed_gravity_switch is True:
# CREATE EMPTY LIST
ymin_list = []
ymax_list = []
# APPEND OBSERVED MIN AND MAX
for i in range(len(self.observed_gravity_list)):
if self.observed_gravity_list[i] is not None:
ymin_list.append(self.observed_gravity_list[i].data[:, 1].min() - 2.0)
ymax_list.append(self.observed_gravity_list[i].data[:, 1].max() + 2.0)
# APPEND PREDICTED GRAVITY ANOMALY
if self.predicted_gravity is not None:
ymin_list.append(self.predicted_gravity.min() - 2.0)
ymax_list.append(self.predicted_gravity.max() + 2.0)
# SET YMIN AND YMAX
ymin = min(ymin_list)
ymax = max(ymax_list)
elif self.predicted_gravity is not None:
ymin = self.predicted_gravity.min() - 2.0
ymax = self.predicted_gravity.max() + 2.0
else:
pass
if self.gravity_frame is not None:
self.gravity_frame.set_ylim(ymin, ymax)
# --------------------------------------------------------------------------------------------------------------
# SET DERIVATIVE Y-AXIS LIMITS
# CREATE EMPTY LIST
ymin_list = [-1]
ymax_list = [1]
for i in range(len(self.observed_gravity_list)):
if self.observed_gravity_list[i].type == str('derivative'):
ymin_list.append(self.observed_gravity_list[i].data[:, 1].min() - 0.1)
ymax_list.append(self.observed_gravity_list[i].data[:, 1].max() + 0.1)
if self.gravity_frame is not None:
self.gravity_d_frame.set_ylim(min(ymin_list), max(ymax_list))
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# SET MAGNETIC DISPLAY BOX LIMITS
if self.observed_magnetic_switch is True and self.mag_residuals != []:
# CREATE EMPTY LIST
ymin_list = []
ymax_list = []
# APPEND OBSERVED MIN AND MAX
for i in range(len(self.observed_magnetic_list)):
if self.observed_magnetic_list[i] is not None:
ymin_list.append(self.observed_magnetic_list[i].data[:, 1].min() - 2.0)
ymax_list.append(self.observed_magnetic_list[i].data[:, 1].max() + 2.0)
# APPEND PREDICTED GRAVITY ANOMALY
ymin_list.append(self.predicted_nt.min())
ymax_list.append(self.predicted_nt.max())
# APPEND RMS GRAVITY ANOMALY
ymin_list.append(self.mag_residuals.min() - 2.0)
ymax_list.append(self.mag_residuals.max() + 2.0)
# SET YMIN AND YMAX
ymin = min(ymin_list)
ymax = max(ymax_list)
elif self.observed_magnetic_switch is True:
# CREATE EMPTY LIST
ymin_list = []
ymax_list = []
# APPEND OBSERVED MIN AND MAX
for i in range(len(self.observed_magnetic_list)):
if self.observed_magnetic_list[i] is not None:
ymin_list.append(self.observed_magnetic_list[i].data[:, 1].min() - 2.0)
ymax_list.append(self.observed_magnetic_list[i].data[:, 1].max() + 2.0)
# APPEND PREDICTED GRAVITY ANOMALY
ymin_list.append(self.predicted_nt.min() - 2.0)
ymax_list.append(self.predicted_nt.max() + 2.0)
# SET YMIN AND YMAX
ymin = min(ymin_list)
ymax = max(ymax_list)
elif self.predicted_nt is not None:
# APPEND PREDICTED GRAVITY ANOMALY
ymin = self.predicted_nt.min() - 2.0
ymax = self.predicted_nt.max() + 2.0
else:
pass
if self.magnetic_frame is not None:
self.magnetic_frame.set_ylim(ymin, ymax)
# SET DERIVATIVE Y-AXIS LIMITS
# --------------------------------------------------------------------------------------------------------------
# CREATE EMPTY LIST
ymin_list = []
ymax_list = []
for i in range(len(self.observed_magnetic_list)):
if self.observed_magnetic_list[i].type == str('derivative'):
ymin_list.append(self.observed_magnetic_list[i].data[:, 1].min() - 0.1)
ymax_list.append(self.observed_magnetic_list[i].data[:, 1].max() + 0.1)
self.magnetic_d_frame.set_ylim(ymin, ymax)
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# UPDATE GMG GRAPHICS
self.draw()
# EXTERNAL FIGURE CONSTRUCTION~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def plot_model(self, event):
"""CREATE EXTERNAL FIGURE OF MODEL USING INBUILT FIGURE CONSTRUCTION TOOL"""
# GET PLOTTING PARAMETERS FROM DIALOG BOX
self.set_values = PlotSettingsDialog(self, -1, 'Set figure parameters', self.model_aspect,
self.grav_frame_aspect)
self.set_values.Show(True)
def draw_model(self):
# GET USER INPUT FROM POPOUT BOX
self.file_path = self.set_values.file_path
self.file_type = self.set_values.file_type
self.use_tight_layout = self.set_values.use_tight_layout
self.fs = self.set_values.fs # FONT SIZE
self.aspect_ratio = self.set_values.aspect_ratio # MODEL ASPECT RATIO
self.ps = self.set_values.ps # OBSERVED POINT SIZE
self.calc_line_width = self.set_values.lw # CALCUALTED LINE WIDTH
self.font_type = self.set_values.font_type_text.GetValue()
self.topo_frame_min = self.set_values.topo_min_text.GetValue()
self.topo_frame_max = self.set_values.topo_max_text.GetValue()
| |
# Copyright (C) 2019 Intel Corporation.
# SPDX-License-Identifier: BSD-3-Clause
"""the tool to generate ASL code of ACPI tables for Pre-launched VMs.
"""
import sys, os, re, argparse, shutil, ctypes
from acpi_const import *
import board_cfg_lib, common
import collections
import lxml.etree
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'board_inspector'))
from acpiparser import rdt
from acpiparser.dsdt import parse_tree
from acpiparser.aml import builder
from acpiparser.aml.context import Context
from acpiparser.aml.visitors import GenerateBinaryVisitor, PrintLayoutVisitor
def calculate_checksum8():
'''
this function is implemented in iasl.
:return:
'''
pass
def gen_rsdp(dest_vm_acpi_path):
'''
generate rsdp.asl
:param dest_vm_acpi_path: the path to store generated ACPI asl code
:return:
'''
rsdp_asl = 'rsdp.asl'
p_xsdt_addr = r'XSDT Address : ([0-9a-fA-F]{16})'
with open(os.path.join(dest_vm_acpi_path, rsdp_asl), 'w') as dest:
lines = []
with open(os.path.join(TEMPLATE_ACPI_PATH, rsdp_asl), 'r') as src:
for line in src.readlines():
if re.search(p_xsdt_addr, line):
lines.append(re.sub(p_xsdt_addr, 'XSDT Address : {0:016X}'.format(ACPI_XSDT_ADDR), line))
else:
lines.append(line)
dest.writelines(lines)
def gen_xsdt(dest_vm_acpi_path, passthru_devices):
'''
generate xsdt.asl
:param dest_vm_acpi_path: the path to store generated ACPI asl code
:param passthru_devices: dict to store passthru device list
:return:
'''
xsdt_asl = 'xsdt.asl'
p_fadt_addr = r'ACPI Table Address 0 : ([0-9a-fA-F]{16})'
p_mcfg_addr = r'ACPI Table Address 1 : ([0-9a-fA-F]{16})'
p_madt_addr = r'ACPI Table Address 2 : ([0-9a-fA-F]{16})'
p_tpm2_addr = r'ACPI Table Address 3 : ([0-9a-fA-F]{16})'
p_rtct_addr = r'ACPI Table Address 4 : ([0-9a-fA-F]{16})'
with open(os.path.join(dest_vm_acpi_path, xsdt_asl), 'w') as dest:
lines = []
with open(os.path.join(TEMPLATE_ACPI_PATH, xsdt_asl), 'r') as src:
for line in src.readlines():
if re.search(p_fadt_addr, line):
lines.append(re.sub(p_fadt_addr, 'ACPI Table Address 0 : {0:016X}'.format(ACPI_FADT_ADDR), line))
elif re.search(p_mcfg_addr, line):
lines.append(re.sub(p_mcfg_addr, 'ACPI Table Address 1 : {0:016X}'.format(ACPI_MCFG_ADDR), line))
elif re.search(p_madt_addr, line):
lines.append(re.sub(p_madt_addr, 'ACPI Table Address 2 : {0:016X}'.format(ACPI_MADT_ADDR), line))
elif re.search(p_tpm2_addr, line):
if 'TPM2' in passthru_devices:
lines.append(re.sub(p_tpm2_addr, 'ACPI Table Address 3 : {0:016X}'.format(ACPI_TPM2_ADDR), line))
elif re.search(p_rtct_addr, line):
if 'PTCT' in passthru_devices or 'RTCT' in passthru_devices:
lines.append(re.sub(p_rtct_addr, 'ACPI Table Address 4 : {0:016X}'.format(ACPI_RTCT_ADDR), line))
else:
lines.append(line)
dest.writelines(lines)
def gen_fadt(dest_vm_acpi_path, board_root):
'''
generate facp.asl
:param dest_vm_acpi_path: the path to store generated ACPI asl code
:param board_root: the root element of board xml
:return:
'''
fadt_asl = 'facp.asl'
p_facs_addr = r'FACS Address : ([0-9a-fA-F]{8})'
p_dsdt_addr = r'DSDT Address : ([0-9a-fA-F]{8})$'
with open(os.path.join(dest_vm_acpi_path, fadt_asl), 'w') as dest:
lines = []
with open(os.path.join(TEMPLATE_ACPI_PATH, fadt_asl), 'r') as src:
for line in src.readlines():
if re.search(p_facs_addr, line):
lines.append(re.sub(p_facs_addr, 'FACS Address : {0:08X}'.format(ACPI_FACS_ADDR), line))
elif re.search(p_dsdt_addr, line):
lines.append(re.sub(p_dsdt_addr, 'DSDT Address : {0:08X}'.format(ACPI_DSDT_ADDR), line))
else:
lines.append(line)
dest.writelines(lines)
def gen_mcfg(dest_vm_acpi_path):
'''
generate mcfg.asl
:param dest_vm_acpi_path: the path to store generated ACPI asl code
:return:
'''
mcfg_asl = 'mcfg.asl'
p_base_addr = r'Base Address : ([0-9a-fA-F]{16})'
p_segment_group_num = r'Segment Group Number : (\d+)'
p_start_bus_num = r'Start Bus Number : (\d+)'
p_end_bus_num = r'End Bus Number : ([0-9a-fA-F]{2})'
with open(os.path.join(dest_vm_acpi_path, mcfg_asl), 'w') as dest:
lines = []
with open(os.path.join(TEMPLATE_ACPI_PATH, mcfg_asl), 'r') as src:
for line in src.readlines():
if re.search(p_base_addr, line):
lines.append(re.sub(p_base_addr, 'Base Address : {0:016X}'.format(VIRT_PCI_MMCFG_BASE), line))
elif re.search(p_segment_group_num, line):
lines.append(re.sub(p_segment_group_num, 'Segment Group Number : {0:04X}'.format(0), line))
elif re.search(p_start_bus_num, line):
lines.append(re.sub(p_start_bus_num, 'Start Bus Number : {0:02X}'.format(0), line))
elif re.search(p_end_bus_num, line):
lines.append(re.sub(p_end_bus_num, 'End Bus Number : {0:02X}'.format(0xff), line))
else:
lines.append(line)
dest.writelines(lines)
def gen_madt(dest_vm_acpi_path, max_cpu_num, apic_ids):
'''
generate apic.asl
:param dest_vm_acpi_path: the path to store generated ACPI asl code
:return:
'''
madt_asl = 'apic.asl'
lapic_index = 0
p_lapic_addr = r'Local Apic Address : ([0-9a-fA-F]{8})'
p_flags = r'\[0004\] Flags (decoded below) : (\d{8})' # dup flags
flags_index = 0
p_lapic_index = 0
p_lapic_type = r'Subtable Type : (\d+) \[Processor Local APIC\]'
p_lapic_len = r'\[0001\] Length : ([0-9a-fA-F]{2})' # dup len
p_lapic_len_index = 0
p_lapic_flags_index = 0
p_lapic_process_id = r'\[0001\] Processor ID : (\d+)' # dup processor
p_lapic_process_id_index = 0
p_lapic_id = r'Local Apic ID : ([0-9a-fA-F]{2})'
p_lapic_line_index = 0
lapic_lines = []
ioapic_index = 0
p_ioapic_type = r'Subtable Type : (\d+) \[I/O APIC\]'
p_ioapic_len_index = 0
p_ioapic_id = r'I/O Apic ID : (\d+)'
p_ioapic_addr = r'\[0004\] Address : ([0-9a-fA-F]{8})'
lapic_nmi_index = 0
p_lapic_nmi_type = r'Subtable Type : (\d+) \[Local APIC NMI\]'
p_lapic_nmi_len_index = 0
p_lapic_nmi_processor_id_index = 0
p_lapic_nmi_flags = r'\[0002\] Flags (decoded below) : ([0-9a-fA-F]{4})'
p_lapic_nmi_flags_index = 0
p_lapic_nmi_lint = r'Interrupt Input LINT : (\d+)'
with open(os.path.join(dest_vm_acpi_path, madt_asl), 'w') as dest:
lines = []
with open(os.path.join(TEMPLATE_ACPI_PATH, madt_asl), 'r') as src:
for line in src.readlines():
if re.search(p_lapic_addr, line):
lapic_index += 1
lines.append(re.sub(p_lapic_addr, 'Local Apic Address : {0:08X}'.format(0xFEE00000), line))
elif re.search(p_flags, line):
if lapic_index == 1 and flags_index == 0:
lines.append(
re.sub(p_flags, '[0004] Flags (decoded below) : {0:08X}'.format(0x1), line))
flags_index += 1
elif p_lapic_index == 1 and p_lapic_flags_index == 0:
lines.append(
re.sub(p_flags, '[0004] Flags (decoded below) : {0:08X}'.format(0x1),
line))
p_lapic_flags_index += 1
else:
lines.append(line)
elif re.search(p_lapic_type, line):
p_lapic_index += 1
if lapic_index == 1:
lines.append(re.sub(p_lapic_type, 'Subtable Type : {0:02X} [Processor Local APIC]'.format(
ACPI_MADT_TYPE_LOCAL_APIC), line))
else:
lines.append(line)
elif re.search(p_lapic_len, line):
if p_lapic_index == 1 and p_lapic_len_index == 0:
lines.append(
re.sub(p_lapic_len, '[0001] Length : {0:02X}'.format(0x8),
line))
p_lapic_len_index += 1
elif ioapic_index == 1 and p_ioapic_len_index == 0:
lines.append(
re.sub(p_lapic_len, '[0001] Length : {0:02X}'.format(0x0C),
line))
p_ioapic_len_index += 1
elif lapic_nmi_index == 1 and p_lapic_nmi_len_index == 0:
lines.append(
re.sub(p_lapic_len, '[0001] Length : {0:02X}'.format(0x06),
line))
p_lapic_nmi_len_index += 1
else:
lines.append(line)
elif re.search(p_lapic_process_id, line):
if p_lapic_index == 1 and p_lapic_process_id_index == 0:
lines.append(re.sub(p_lapic_process_id,
'[0001] Processor ID : {0:02X}'.format(0x0),
line))
p_lapic_process_id_index += 1
elif lapic_nmi_index == 1 and p_lapic_nmi_processor_id_index == 0:
lines.append(
re.sub(p_lapic_process_id,
'[0001] Processor ID : {0:02X}'.format(0xFF),
line))
p_lapic_nmi_processor_id_index += 1
else:
lines.append(line)
elif re.search(p_lapic_id, line):
lines.append(re.sub(p_lapic_id, 'Local Apic ID : {0:02X}'.format(apic_ids[0]), line))
elif re.search(p_ioapic_type, line):
ioapic_index += 1
lines.append(
re.sub(p_ioapic_type, 'Subtable Type : {0:02X} [I/O APIC]'.format(ACPI_MADT_TYPE_IOAPIC), line))
elif re.search(p_ioapic_id, line):
lines.append(re.sub(p_ioapic_id, 'I/O Apic ID : {0:02X}'.format(0x01), line))
elif re.search(p_ioapic_addr, line):
lines.append(re.sub(p_ioapic_addr,
'[0004] Address : {0:02X}'.format(VIOAPIC_BASE),
line))
elif re.search(p_lapic_nmi_type, line):
lapic_nmi_index += 1
if lapic_nmi_index == 1:
lines.append(re.sub(p_lapic_nmi_type, 'Subtable Type : {0:02X} [Local APIC NMI]'.format(
ACPI_MADT_TYPE_LOCAL_APIC_NMI), line))
else:
lines.append(line)
elif re.search(p_lapic_nmi_flags, line):
if lapic_nmi_index == 1 and p_lapic_nmi_flags_index == 0:
lines.append(
re.sub(p_lapic_nmi_flags, '[0002] Flags (decoded below) : {0:04X}'.format(0x5),
line))
p_lapic_nmi_flags_index += 1
else:
lines.append(line)
elif re.search(p_lapic_nmi_lint, line):
if lapic_nmi_index == 1:
lines.append(re.sub(p_lapic_nmi_lint, 'Interrupt Input LINT : {0:02X}'.format(0x1), line))
else:
lines.append(line)
else:
lines.append(line)
if p_lapic_index == 1 and p_lapic_line_index < 7:
lapic_lines.append(line)
p_lapic_line_index += 1
if p_lapic_index == 1 and p_lapic_line_index == 7:
p_lapic_line_index = 0
for process_id in range(1, max_cpu_num):
p_lapic_index = process_id + 1
lines.append('\n')
for lapic_line in lapic_lines:
if re.search(p_lapic_type, lapic_line):
lines.append(re.sub(p_lapic_type,
'Subtable Type : {0:02X} [Processor Local APIC]'.format(
ACPI_MADT_TYPE_LOCAL_APIC), lapic_line))
elif re.search(p_lapic_len, lapic_line):
lines.append(
re.sub(p_lapic_len,
'[0001] Length : {0:02X}'.format(0x8),
lapic_line))
elif re.search(p_flags, lapic_line):
lines.append(
re.sub(p_flags,
'[0004] Flags (decoded below) : {0:08X}'.format(0x1),
lapic_line))
elif re.search(p_lapic_process_id, lapic_line):
lines.append(re.sub(p_lapic_process_id,
'[0001] Processor ID : {0:02X}'.format(
process_id), lapic_line))
elif re.search(p_lapic_id, lapic_line):
lines.append(
re.sub(p_lapic_id, 'Local Apic ID : {0:02X}'.format(apic_ids[process_id]), lapic_line))
else:
lines.append(lapic_line)
dest.writelines(lines)
def gen_tpm2(dest_vm_acpi_path, passthru_devices):
'''
generate tpm2.asl
:param dest_vm_acpi_path: the path to store generated ACPI asl code
:param passthru_devices: dict to store passthru device list
:return:
'''
tpm2_asl = 'tpm2.asl'
p_control_addr = r'Control Address : ([0-9a-fA-F]{16})'
p_start_method = r'Start Method : (.*)'
if 'TPM2' not in passthru_devices:
if os.path.isfile(os.path.join(dest_vm_acpi_path, tpm2_asl)):
os.remove(os.path.join(dest_vm_acpi_path, tpm2_asl))
return
with open(os.path.join(dest_vm_acpi_path, tpm2_asl), 'w') as dest:
lines = []
with open(os.path.join(TEMPLATE_ACPI_PATH, tpm2_asl), 'r') as src:
for line in src.readlines():
if re.search(p_control_addr, line):
lines.append(re.sub(p_control_addr, 'Control Address : {0:016X}'.format(0xFED40040), line))
elif re.search(p_start_method, line):
lines.append(re.sub(p_start_method, 'Start Method : {0:02X}'.format(0x7), line))
else:
lines.append(line)
dest.writelines(lines)
def encode_eisa_id(s):
chars = list(map(lambda x: (ord(x) - 0x40) & 0x1F, s[0:3]))
digits = list(map(lambda x: int(x, 16), s[3:7]))
encoded = [
(chars[0] << 2) | (chars[1] >> 3), # Bit 6:2 is char[0]; Bit 1:0 is the higher 2 bits of char[1].
((chars[1] & 0x7) << 5) | (chars[2]), # Bit 7:5 is the lower 3 bits of char[1]; Bit 4:0 is char[2].
(digits[0] << 4) | (digits[1]), # Bit 7:4 is digits[0]; Bit 3:0 is digits[1]
(digits[2] << 4) | (digits[3]), # Bit 7:4 is digits[2]; Bit 3:0 is digits[2]
]
return int.from_bytes(bytes(encoded), sys.byteorder)
def gen_root_pci_bus(path, prt_packages):
resources = []
# Bus number
cls = rdt.LargeResourceItemWordAddressSpace_factory()
length = ctypes.sizeof(cls)
data = bytearray(length)
res = cls.from_buffer(data)
res.type = 1 # Large type
res.name = rdt.LARGE_RESOURCE_ITEM_WORD_ADDRESS_SPACE
res.length = length - 3
res._TYP = 2 # Bus number range
res._DEC = 0 # Positive decoding
res._MIF = 1 # Minimum address fixed
res._MAF = 1 # Maximum address fixed
res.flags = 0
res._MAX | |
<filename>cadquery/cq.py
"""
Copyright (C) 2011-2015 Parametric Products Intellectual Holdings, LLC
This file is part of CadQuery.
CadQuery is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
CadQuery is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; If not, see <http://www.gnu.org/licenses/>
"""
import math
from copy import copy
from itertools import chain
from typing import (
overload,
Sequence,
TypeVar,
Union,
Tuple,
Optional,
Any,
Iterable,
Callable,
List,
cast,
Dict,
)
from typing_extensions import Literal
from inspect import Parameter, Signature
from .occ_impl.geom import Vector, Plane, Location
from .occ_impl.shapes import (
Shape,
Edge,
Wire,
Face,
Solid,
Compound,
sortWiresByBuildOrder,
)
from .occ_impl.exporters.svg import getSVG, exportSVG
from .utils import deprecate_kwarg, deprecate
from .selectors import (
Selector,
PerpendicularDirSelector,
NearestToPointSelector,
StringSyntaxSelector,
)
CQObject = Union[Vector, Location, Shape]
VectorLike = Union[Tuple[float, float], Tuple[float, float, float], Vector]
T = TypeVar("T", bound="Workplane")
"""A type variable used to make the return type of a method the same as the
type of `self` or another argument.
This is useful when you want to allow a class to derive from
:class:`.Workplane`, and you want a (fluent) method in the derived class to
return an instance of the derived class, rather than of :class:`.Workplane`.
"""
def _selectShapes(objects: Iterable[Any]) -> List[Shape]:
return [el for el in objects if isinstance(el, Shape)]
class CQContext(object):
"""
A shared context for modeling.
All objects in the same CQ chain share a reference to this same object instance
which allows for shared state when needed.
"""
pendingWires: List[Wire]
pendingEdges: List[Edge]
firstPoint: Optional[Vector]
tolerance: float
tags: Dict[str, "Workplane"]
def __init__(self):
self.pendingWires = (
[]
) # a list of wires that have been created and need to be extruded
# a list of created pending edges that need to be joined into wires
self.pendingEdges = []
# a reference to the first point for a set of edges.
# Used to determine how to behave when close() is called
self.firstPoint = None
self.tolerance = 0.0001 # user specified tolerance
self.tags = {}
def popPendingEdges(self, errorOnEmpty: bool = True) -> List[Edge]:
"""
Get and clear pending edges.
:raises ValueError: if errorOnEmpty is True and no edges are present.
"""
if errorOnEmpty and not self.pendingEdges:
raise ValueError("No pending edges present")
out = self.pendingEdges
self.pendingEdges = []
return out
def popPendingWires(self, errorOnEmpty: bool = True) -> List[Wire]:
"""
Get and clear pending wires.
:raises ValueError: if errorOnEmpty is True and no wires are present.
"""
if errorOnEmpty and not self.pendingWires:
raise ValueError("No pending wires present")
out = self.pendingWires
self.pendingWires = []
return out
class Workplane(object):
"""
Defines a coordinate system in space, in which 2-d coordinates can be used.
:param plane: the plane in which the workplane will be done
:type plane: a Plane object, or a string in (XY|YZ|XZ|front|back|top|bottom|left|right)
:param origin: the desired origin of the new workplane
:type origin: a 3-tuple in global coordinates, or None to default to the origin
:param obj: an object to use initially for the stack
:type obj: a CAD primitive, or None to use the centerpoint of the plane as the initial
stack value.
:raises: ValueError if the provided plane is not a plane, a valid named workplane
:return: A Workplane object, with coordinate system matching the supplied plane.
The most common use is::
s = Workplane("XY")
After creation, the stack contains a single point, the origin of the underlying plane,
and the *current point* is on the origin.
.. note::
You can also create workplanes on the surface of existing faces using
:py:meth:`CQ.workplane`
"""
objects: List[CQObject]
ctx: CQContext
parent: Optional["Workplane"]
plane: Plane
_tag: Optional[str]
@overload
def __init__(self, obj: CQObject) -> None:
...
@overload
def __init__(
self,
inPlane: Union[Plane, str] = "XY",
origin: VectorLike = (0, 0, 0),
obj: Optional[CQObject] = None,
) -> None:
...
def __init__(self, inPlane="XY", origin=(0, 0, 0), obj=None):
"""
make a workplane from a particular plane
:param inPlane: the plane in which the workplane will be done
:type inPlane: a Plane object, or a string in (XY|YZ|XZ|front|back|top|bottom|left|right)
:param origin: the desired origin of the new workplane
:type origin: a 3-tuple in global coordinates, or None to default to the origin
:param obj: an object to use initially for the stack
:type obj: a CAD primitive, or None to use the centerpoint of the plane as the initial
stack value.
:raises: ValueError if the provided plane is not a plane, or one of XY|YZ|XZ
:return: A Workplane object, with coordinate system matching the supplied plane.
The most common use is::
s = Workplane("XY")
After creation, the stack contains a single point, the origin of the underlying plane, and
the *current point* is on the origin.
"""
if isinstance(inPlane, Plane):
tmpPlane = inPlane
elif isinstance(inPlane, str):
tmpPlane = Plane.named(inPlane, origin)
elif isinstance(inPlane, (Vector, Location, Shape)):
obj = inPlane
tmpPlane = Plane.named("XY", origin)
else:
raise ValueError(
"Provided value {} is not a valid work plane".format(inPlane)
)
self.plane = tmpPlane
# Changed so that workplane has the center as the first item on the stack
if obj:
self.objects = [obj]
else:
self.objects = []
self.parent = None
self.ctx = CQContext()
self._tag = None
def tag(self: T, name: str) -> T:
"""
Tags the current CQ object for later reference.
:param name: the name to tag this object with
:type name: string
:returns: self, a cq object with tag applied
"""
self._tag = name
self.ctx.tags[name] = self
return self
def _collectProperty(self, propName: str) -> List[CQObject]:
"""
Collects all of the values for propName,
for all items on the stack.
OCCT objects do not implement id correctly,
so hashCode is used to ensure we don't add the same
object multiple times.
One weird use case is that the stack could have a solid reference object
on it. This is meant to be a reference to the most recently modified version
of the context solid, whatever it is.
"""
all = {}
for o in self.objects:
# tricky-- if an object is a compound of solids,
# do not return all of the solids underneath-- typically
# then we'll keep joining to ourself
if (
propName == "Solids"
and isinstance(o, Solid)
and o.ShapeType() == "Compound"
):
for i in getattr(o, "Compounds")():
all[i.hashCode()] = i
else:
if hasattr(o, propName):
for i in getattr(o, propName)():
all[i.hashCode()] = i
return list(all.values())
@overload
def split(self: T, keepTop: bool = False, keepBottom: bool = False) -> T:
...
@overload
def split(self: T, splitter: Union[T, Shape]) -> T:
...
def split(self: T, *args, **kwargs) -> T:
"""
Splits a solid on the stack into two parts, optionally keeping the separate parts.
:param boolean keepTop: True to keep the top, False or None to discard it
:param boolean keepBottom: True to keep the bottom, False or None to discard it
:raises ValueError: if keepTop and keepBottom are both false.
:raises ValueError: if there is no solid in the current stack or parent chain
:returns: CQ object with the desired objects on the stack.
The most common operation splits a solid and keeps one half. This sample creates
split bushing::
# drill a hole in the side
c = Workplane().box(1,1,1).faces(">Z").workplane().circle(0.25).cutThruAll()
# now cut it in half sideways
c = c.faces(">Y").workplane(-0.5).split(keepTop=True)
"""
# split using an object
if len(args) == 1 and isinstance(args[0], (Workplane, Shape)):
arg = args[0]
solid = self.findSolid()
tools = (
(arg,)
if isinstance(arg, Shape)
else [v for v in arg.vals() if isinstance(v, Shape)]
)
rv = [solid.split(*tools)]
# split using the current wokrplane
else:
# boilerplate for arg/kwarg parsing
sig = Signature(
(
Parameter(
"keepTop", Parameter.POSITIONAL_OR_KEYWORD, default=False
),
Parameter(
"keepBottom", Parameter.POSITIONAL_OR_KEYWORD, default=False
),
)
)
bound_args | |
not exist."
logger.info(msg % resource)
return
if not self.force:
slang = self.get_resource_option(resource, 'source_lang')
for language in stats:
if language == slang:
continue
if int(stats[language]['translated_entities']) > 0:
msg = (
"Skipping: %s : Unable to delete resource because it "
"has a not empty %s translation.\nPlease use -f or "
"--force option to delete this resource."
)
logger.info(msg % (resource, language))
return
try:
self.do_url_request('delete_resource', method="DELETE")
self.config.remove_section(resource)
self.save()
msg = "Deleted resource %s of project %s."
logger.info(msg % (resource_slug, project_slug))
except Exception as e:
msg = "Unable to delete resource %s of project %s."
logger.error(msg % (resource_slug, project_slug))
if isinstance(e, SSLError) or not self.skip:
raise
def _delete_translations(self, project_details,
resource, stats, languages):
"""Delete the specified translations for the specified resource."""
logger.info("Deleting translations from resource %s:" % resource)
for language in languages:
self._delete_translation(
project_details, resource, stats, language
)
def _delete_translation(self, project_details, resource, stats, language):
"""Delete a specific translation from the specified resource."""
project_slug, resource_slug = resource.split('.', 1)
if language not in stats:
if not self.skip:
msg = "Skipping %s: Translation does not exist."
logger.warning(msg % (language))
return
if not self.force:
teams = project_details['teams']
if language in teams:
msg = (
"Skipping %s: Unable to delete translation because it is "
"associated with a team.\nPlease use -f or --force option "
"to delete this translation."
)
logger.warning(msg % language)
return
if int(stats[language]['translated_entities']) > 0:
msg = (
"Skipping %s: Unable to delete translation because it "
"is not empty.\nPlease use -f or --force option to delete "
"this translation."
)
logger.warning(msg % language)
return
try:
self.do_url_request(
'delete_translation', language=language, method="DELETE"
)
msg = "Deleted %s translations of resource %s of project %s."
logger.info(msg % (language, resource_slug, project_slug))
except Exception as e:
msg = "Unable to delete translation %s"
logger.error(msg % language)
if isinstance(e, SSLError) or not self.skip:
raise
def do_url_request(self, api_call, multipart=False, data=None,
files=None, method="GET", skip_decode=False,
params=None, parallel=False, no_interactive=False,
**kwargs):
"""Issues a url request."""
files = files or []
params = params or {}
# Read the credentials from the config file (.transifexrc)
host = self.url_info['host']
username, passwd = self.getset_host_credentials(
host, no_interactive=no_interactive
)
try:
hostname = self.txrc.get(host, 'hostname')
except configparser.NoSectionError:
raise TransifexrcConfigFileError(
"No entry found for host %s. Edit"
" ~/.transifexrc and add the appropriate"
" info in there." % host
)
# Create the Url
kwargs['hostname'] = hostname
kwargs.update(self.url_info)
url = API_URLS[api_call] % kwargs
# in case of GET we need to add xliff option as get parameter
if params and method == 'GET':
# update url params
# in case we need to add extra params on a url, we first get the
# already existing query, create a dict which will be merged with
# the extra params and finally put it back in the url
url_parts = list(urlparse.urlparse(url))
query = dict(urlparse.parse_qsl(url_parts[4]))
query.update(params)
url_parts[4] = urlencode(query)
url = urlparse.urlunparse(url_parts)
if multipart:
for info, filename in files:
# FIXME: It works because we only pass to files argument
# only one item
name = os.path.basename(filename)
data = {
"resource": info.split(';')[0],
"language": info.split(';')[1],
"uploaded_file": (name, open(filename, 'rb').read())
}
# in case of PUT we add xliff option as form data
if method == 'PUT':
data.update(params)
# Prepare the callback function and arguments
cb = kwargs.get("callback", None)
args = kwargs.get("callback_args", {})
if parallel:
return utils.queue_request(method, hostname, url, username, passwd,
data, skip_decode=skip_decode,
callback=cb, callback_args=args)
return utils.make_request(
method, hostname, url, username, passwd, data,
skip_decode=skip_decode, callback=cb, callback_args=args
)
def _should_update_translation(self, lang, stats, local_file, force=False,
mode=None):
"""Whether a translation should be udpated from Transifex.
We use the following criteria for that:
- If user requested to force the download.
- If language exists in Transifex.
- If the local file is older than the Transifex's file.
- If the user requested a x% completion.
Args:
lang: The language code to check.
stats: The (global) statistics object.
local_file: The local translation file.
force: A boolean flag.
mode: The mode for the translation.
Returns:
True or False.
"""
return self._should_download(lang, stats, local_file, force)
def _should_add_translation(self, lang, stats, force=False, mode=None):
"""Whether a translation should be added from Transifex.
We use the following criteria for that:
- If user requested to force the download.
- If language exists in Transifex.
- If the user requested a x% completion.
Args:
lang: The language code to check.
stats: The (global) statistics object.
force: A boolean flag.
mode: The mode for the translation.
Returns:
True or False.
"""
return self._should_download(lang, stats, None, force)
def _should_download(self, lang, stats, local_file=None, force=False,
mode=None):
"""Return whether a translation should be downloaded.
If local_file is None, skip the timestamps check (the file does
not exist locally).
"""
try:
lang_stats = stats[lang]
except KeyError:
logger.debug("No lang %s in statistics" % lang)
return False
satisfies_min = self._satisfies_min_translated(lang_stats, mode)
if not satisfies_min:
return False
if force:
logger.debug("Downloading translation due to -f")
return True
if local_file is not None:
remote_update = self._extract_updated(lang_stats)
if not self._remote_is_newer(remote_update, local_file):
logger.debug("Local is newer than remote for lang %s" % lang)
return False
return True
def _should_push_translation(self, lang, stats, local_file, force=False):
"""Return whether a local translation file should be
pushed to Trasnifex.
We use the following criteria for that:
- If user requested to force the upload.
- If language exists in Transifex.
- If local file is younger than the remote file.
Args:
lang: The language code to check.
stats: The (global) statistics object.
local_file: The local translation file.
force: A boolean flag.
Returns:
True or False.
"""
if force:
logger.debug("Push translation due to -f.")
return True
try:
lang_stats = stats[lang]
except KeyError:
logger.debug("Language %s does not exist in Transifex." % lang)
return True
if local_file is not None:
remote_update = self._extract_updated(lang_stats)
if self._remote_is_newer(remote_update, local_file):
msg = "Remote translation is newer than local file for lang %s"
logger.debug(msg % lang)
return False
return True
def _generate_timestamp(self, update_datetime):
"""Generate a UNIX timestamp from the argument.
Args:
update_datetime: The datetime in the format used by Transifex.
Returns:
A float, representing the timestamp that corresponds to the
argument.
"""
time_format = "%Y-%m-%d %H:%M:%S"
return time.mktime(
datetime.datetime(
*time.strptime(update_datetime, time_format)[0:5]
).utctimetuple()
)
def _get_time_of_local_file(self, path):
"""Get the modified time of the path_.
Args:
path: The path we want the mtime for.
Returns:
The time as a timestamp or None, if the file does not exist
"""
if not os.path.exists(path):
return None
return time.mktime(time.gmtime(os.path.getmtime(path)))
def _satisfies_min_translated(self, stats, mode=None):
"""Check whether a translation fulfills the filter used for
minimum translated percentage.
Args:
perc: The current translation percentage.
Returns:
True or False
"""
cur = self._extract_completed(stats, mode)
option_name = 'minimum_perc'
if self.minimum_perc is not None:
minimum_percent = self.minimum_perc
else:
global_minimum = int(
self.get_resource_option('main', option_name) or 0
)
resource_minimum = int(
self.get_resource_option(
self.resource, option_name
) or global_minimum
)
minimum_percent = resource_minimum
return cur >= minimum_percent
def _remote_is_newer(self, remote_updated, local_file):
"""Check whether the remote translation is newer that the local file.
Args:
remote_updated: The date and time the translation was last
updated remotely.
local_file: The local file.
Returns:
True or False.
"""
if remote_updated is None:
logger.debug("No remote time")
return False
remote_time = self._generate_timestamp(remote_updated)
local_time = self._get_time_of_local_file(
self.get_full_path(local_file)
)
logger.debug(
"Remote time is %s and local %s" % (remote_time, local_time)
)
if local_time is not None and remote_time < local_time:
return False
return True
@classmethod
def _extract_completed(cls, stats, mode=None):
"""Extract the information for the translated percentage from the stats.
Args:
stats: The stats object for a language as returned by Transifex.
mode: The mode of translations requested.
Returns:
The percentage of translation as integer.
"""
if mode == 'reviewed':
key = 'reviewed_percentage'
else:
key = 'completed'
try:
return int(stats[key][:-1])
except KeyError:
return 0
@classmethod
def _extract_updated(cls, stats):
"""Extract the information for the last update of a translation.
Args:
stats: The stats object for a language as returned by Transifex.
Returns:
The last update field.
"""
try:
return stats['last_update']
except KeyError:
return None
def _download_pseudo(self, project_slug, resource_slug, pseudo_file):
response, charset = self.do_url_request(
'pull_pseudo_file',
resource_slug=resource_slug,
project_slug=project_slug
)
response = utils.parse_json(response)
base_dir = | |
<filename>analysis/value_strategy_funcs.py
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from scipy import stats
from linearmodels import FamaMacBeth
from decimal import Decimal
from data_source import local_source
from tqdm import tqdm as pb
import datetime
def DataFrame_Updater(df_old, df_new, by_list): #以by_list为主键, 用df_new中数据更新df_old中数据
return pd.concat(df_old, df_new).drop_duplicates(by_list,keep='last')
def panel_initializer(series1, series2): #将time_list与entity_list合成为panel形式的空dataframe #暂时未使用
if series1.name == None:
series1.name = "name1"
if series2.name == None:
series2.name = "name2"
series1_name = series1.name
series2_name = series2.name
series1 = pd.DataFrame(series1)
for i in series2.values:
series1[i]=i
series1.set_index(series1_name, inplace=True)
result = pd.DataFrame(series1.stack())
result.reset_index(inplace=True)
result = result[[series1_name, 'level_1']]
result.rename(columns={'level_1':series2_name},inplace=True)
return result
def panel_to_matrix_data(panel_data, var_name, index_name="TRADE_DATE", columns_name="TS_CODE"):
panel_data_ = panel_data[[columns_name, index_name, var_name]]
panel_data_.loc[:,index_name] = panel_data_.loc[:,index_name].astype(int)
panel_data_ = panel_data_.set_index([index_name,columns_name])
matrix_data = panel_data_.unstack()
matrix_data.columns = pd.DataFrame(matrix_data.columns)[0].apply(lambda x: x[1])
return matrix_data
def matrix_to_panel_data(matrix_data, var_name, index_name="TRADE_DATE", columns_name="TS_CODE"):
panel_data = matrix_data.stack().reset_index(name=var_name)
panel_data.columns = [index_name,columns_name,var_name]
panel_data = panel_data[[columns_name,index_name,var_name]]
return panel_data
def date_delta_calculator(date, diff_days=-180): #input: YYYYMMDD #暂时未使用
date_type = type(date)
date = datetime.datetime.strptime(str(date), "%Y%m%d")
date = date + datetime.timedelta(days = diff_days)
date = date.strftime("%Y%m%d")
if date_type == int: date = int(date)
return date
def date_delta_calculator2(date, date_list, diff_days=-180): #input: YYYYMMDD #暂时未使用
#在date_delta_calculator的基础上添加自动调整使偏移后的日期为交易日
date = date_delta_calculator(date=date, diff_days = diff_days)
if diff_days<0:
while date not in date_list.values:
if date<min(date_list): return min(date_list)
date = date_delta_calculator(date=date, diff_days = -1)
if diff_days>0:
while date not in date_list.values:
if date>max(date_list): return max(date_list)
date = date_delta_calculator(date=date, diff_days = 1)
return date
def degenerate_dailydata_to_monthlydata(data, data_type='matrix'): #matrix: 输入日期(YYYYMMDD)*股票(代码)的矩阵; panel: 输入含有日期TRADE_DATE与个体TS_CODE列的dataframe.
if data_type == 'matrix':
data.index = data.index // 100
data = data[~data.index.duplicated(keep='last')]
if data_type == 'panel':
data["TRADE_DATE"] = data["TRADE_DATE"]//100
data = data.drop_duplicates(["TS_CODE","TRADE_DATE"], keep='last')
data = data.reset_index(drop=True)
return data
def calculate_pctchange_bystock(df,var_name='CLOSE', result_name='PCT_CHANGE'):
df_=panel_to_matrix_data(df, var_name = var_name)
df_ = df_.pct_change()
df_ = matrix_to_panel_data(df_, var_name = result_name)
return df_
def calculate_average_return(choice_matrix,close_matrix): #输入选股矩阵(binary),收盘价矩阵,均为日期(YYYYMMDD)*股票(代码)的矩阵
#close_matrix = choice_matrix * close_matrix
#returns = close_matrix.pct_change().shift(-1).sum(axis=1) #以上为错误的写法,会出现inf; 先处理权重 inf部分权重会为0
weights = choice_matrix.div(choice_matrix.sum(axis=1), axis='rows')
returns = (close_matrix.pct_change().shift(-1) * weights).sum(axis=1)
return returns
def calculate_MVweighted_average_return(choice_matrix,close_matrix,mv_matrix): #输入选股矩阵(binary),收盘价矩阵,市值矩阵,均为日期(YYYYMMDD)*股票(代码)的矩阵
weights = choice_matrix * mv_matrix
weights = weights.div(weights.sum(axis=1), axis='rows')
returns = (close_matrix.loc[weights.index].pct_change().shift(-1) * weights).sum(axis=1)
return returns
def delete_ST(df):
stock_name_list = local_source.get_stock_list(cols="TS_CODE, NAME")
stock_name_list["ST_FLAG"]=['ST' in stock_name_list["NAME"][x] for x in stock_name_list.index]
ST_stock_list = list(stock_name_list[stock_name_list["ST_FLAG"]==1]["TS_CODE"])
df = df[ [item not in ST_stock_list for item in df["TS_CODE"]] ]
return df
def delete_FinanceCorps(df):
delete_corp_list = ['银行','证券','多元金融']
stock_industry_list = local_source.get_stock_list(cols="TS_CODE, INDUSTRY")
stock_industry_list['FC_FLAG']=[stock_industry_list["INDUSTRY"][x] in delete_corp_list for x in stock_industry_list.index]
FC_stock_list = list(stock_industry_list[stock_industry_list["FC_FLAG"]==1]["TS_CODE"])
df = df[ [item not in FC_stock_list for item in df["TS_CODE"]] ]
return df
def Z_standardization(df, input_name_list, input_ascending, output_name): #input_list如["账面市值比A","现金方差"], input_ascending如["True","False"], output_name为输出指标名如"Safety"
df_ = df.copy()
input_num = 0
df_[output_name] = 0
for input_name in input_name_list:
df_[input_name] = (df_[input_name]-df_[input_name].mean())/df_[input_name].mean()
df_[output_name] = df_[output_name] + df_[input_name]
df_.drop(input_name,axis=1,inplace=True)
input_num = input_num + 1
return df_
def Z_standardization_of_rank(df,input_name_list, input_ascending, output_name): #input_list如["账面市值比A","现金方差"], input_ascending如["True","False"], output_name为输出指标名如"Safety"
df_=df.copy()
input_num=0
df_[output_name]=0
for input_name in input_name_list:
df_["rank_"+input_name]=df_[input_name].rank(ascending=input_ascending[input_num])
df_["rank_"+input_name]=(df_["rank_"+input_name]-df_["rank_"+input_name].mean())/df_["rank_"+input_name].mean()
df_[output_name]=df_[output_name]+df_["rank_"+input_name]
df_.drop("rank_"+input_name,axis=1,inplace=True)
input_num=input_num+1
return df_
def get_annualized_income_statements(cols, condition):
data_season1 = local_source.get_income_statements(cols=cols, condition=condition+' and END_TYPE=1')
data_season1 = data_season1.applymap(lambda x: np.nan if x=="NULL" else x)
data_season1 = pd.concat([4*data_season1.loc[:,x] if type(data_season1.loc[:,x][0])!=str else data_season1.loc[:,x] for x in data_season1.columns], axis=1)
data_season2 = local_source.get_income_statements(cols=cols, condition=condition+' and END_TYPE=2')
data_season2 = data_season2.applymap(lambda x: np.nan if x=="NULL" else x)
data_season2 = pd.concat([2*data_season2.loc[:,x] if type(data_season2.loc[:,x][0])!=str else data_season2.loc[:,x] for x in data_season2.columns], axis=1)
data_season3 = local_source.get_income_statements(cols=cols, condition=condition+' and END_TYPE=3')
data_season3 = data_season3.applymap(lambda x: np.nan if x=="NULL" else x)
data_season3 = pd.concat([(4/3)*data_season3.loc[:,x] if type(data_season3.loc[:,x][0])!=str else data_season3.loc[:,x] for x in data_season3.columns], axis=1)
data_season4 = local_source.get_income_statements(cols=cols, condition=condition+' and END_TYPE=4')
data_season4 = data_season4.applymap(lambda x: np.nan if x=="NULL" else x)
data_season_all = pd.concat([data_season1, data_season2, data_season3, data_season4], axis=0)
#data_season_all = data_season_all.sort_values(by=["TS_CODE","END_DATE"])
return data_season_all
def fill_financial_data_to_daily_ann_date_basis(data_fs, date_list=0):
if type(date_list)==int:
date_list = local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].astype(int)
try:
data_fs.rename(columns={'ANN_DATE':'TRADE_DATE'},inplace=True)
except:
pass
data_fs["TRADE_DATE"]=data_fs["TRADE_DATE"].astype(int)
data_fs=data_fs.sort_values(by=['TRADE_DATE','END_DATE'], ascending=False) #对于同一天一次补发先前多年财务报表的情况,
data_fs=data_fs.drop_duplicates(["TS_CODE","TRADE_DATE"], keep='first') #应选择最新的财务报表。
data_fs_daily = 0
for entry in pb(data_fs.columns.drop(["TRADE_DATE","TS_CODE"]), desc='filling financial data to daily', colour='#ffffff'):
data_fs_daily_piece = pd.merge(date_list, data_fs, on=["TRADE_DATE"], how='left')
data_fs_daily_piece = data_fs_daily_piece.applymap(lambda x: np.nan if x=="NULL" else x)
data_fs_daily_piece = panel_to_matrix_data(data_fs_daily_piece, entry, index_name="TRADE_DATE", columns_name="TS_CODE")
data_fs_daily_piece.drop(np.nan, axis=1, inplace=True)
data_fs_daily_piece = data_fs_daily_piece.fillna(method='ffill',axis=0)
data_fs_daily_piece = matrix_to_panel_data(data_fs_daily_piece, entry, index_name="TRADE_DATE", columns_name="TS_CODE")
if type(data_fs_daily) == int:
data_fs_daily = data_fs_daily_piece
else:
data_fs_daily = pd.merge(data_fs_daily, data_fs_daily_piece, on=["TRADE_DATE","TS_CODE"], how='left')
return data_fs_daily
def fill_financial_data_to_daily_end_date_basis(data_fs, month=2):
#构建每个年月与延迟后月的最后一个交易日的关系
date_list = local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH"')["TRADE_DATE"].sort_values(ascending=True).astype(int)
month_list = pd.Series((date_list//100).unique())
last_day_of_month_list = pd.Series([date_list[(date_list//100)==month].iloc[-1] for month in month_list])
last_day_of_month_list = last_day_of_month_list.shift(-1*month)
last_day_of_month_dict = {i:j for i,j in zip(month_list, last_day_of_month_list)}
#填充TRADE_DATE为延迟后月的最后一个交易日
data_fs["END_DATE"]=data_fs["END_DATE"].astype(int)
data_fs["END_DATE_YM"]=data_fs["END_DATE"]//100
data_fs["END_DATE_ADJUSTED"]=0
for ym in data_fs["END_DATE_YM"].unique():
try: #可能报错的原因:该企业的财务报表日期早于最早交易日期
data_fs.loc[data_fs["END_DATE_YM"]==ym, 'END_DATE_ADJUSTED'] = last_day_of_month_dict[ym]
except:
pass
data_fs["END_DATE_ADJUSTED"]=data_fs["END_DATE_ADJUSTED"].astype(int)
data_fs["TRADE_DATE"]=data_fs["END_DATE_ADJUSTED"]
data_fs.drop("END_DATE_YM", axis=1, inplace=True)
#将数据填到每天
data_fs_daily = 0
for entry in pb(data_fs.columns.drop(["TRADE_DATE","TS_CODE"]), desc='filling financial data to daily', colour='#ffffff'):
data_fs_daily_piece = pd.merge(date_list, data_fs, on=["TRADE_DATE"], how='left')
data_fs_daily_piece = data_fs_daily_piece.applymap(lambda x: np.nan if x=="NULL" else x)
data_fs_daily_piece = panel_to_matrix_data(data_fs_daily_piece, entry, index_name="TRADE_DATE", columns_name="TS_CODE")
data_fs_daily_piece.drop(np.nan, axis=1, inplace=True)
data_fs_daily_piece = data_fs_daily_piece.fillna(method='ffill',axis=0)
data_fs_daily_piece = matrix_to_panel_data(data_fs_daily_piece, entry, index_name="TRADE_DATE", columns_name="TS_CODE")
if type(data_fs_daily) == int:
data_fs_daily = data_fs_daily_piece
else:
data_fs_daily = pd.merge(data_fs_daily, data_fs_daily_piece, on=["TRADE_DATE","TS_CODE"], how='left')
return data_fs_daily
def MyCensor(df, var_list, quantile=0.01): #缩尾处理, var_list为要进行处理的变量名列表
df_ = df.copy()
for var_name in var_list:
threshold_max = df_[var_name].quantile(1-quantile)
threshold_min = df_[var_name].quantile(quantile)
df_[var_name]=[i if (i<threshold_max) else threshold_max for i in df_[var_name]]
df_[var_name]=[i if (i>threshold_min) else threshold_min for i in df_[var_name]]
return df_
def CountingStars(p):
if p<=0.01: return "***"
if p<=0.05: return "**"
if p<=0.1: return "*"
else: return ""
def stock_selection_by_var(df, var_name, pct=0.2, Type='best', freq='daily', start_date=20200101, end_date=20201231):
#输入的df要与freq匹配
df["TRADE_DATE"] = df["TRADE_DATE"].astype(int)
df_ = df[df["TRADE_DATE"]>=start_date]
df_ = df_[df_["TRADE_DATE"]<=end_date]
if len(df_)==0:
print("选择日期区间内无数据!")
return 0
df_ = df_.drop_duplicates(["TS_CODE","TRADE_DATE"], keep='last') #keep选last是因为财报数据在同trade_date报出多个财报时, 年报(数据最全)会是最后一个;
df_ = df_[["TS_CODE","TRADE_DATE",var_name]]
value_mat = panel_to_matrix_data(df_, var_name)
if Type == 'best':
choice_mat = value_mat.apply(lambda x:(x>=x.quantile(1-pct)), axis=1)
if Type == 'worse':
choice_mat = value_mat.apply(lambda x:(x<=x.quantile(pct)), axis=1)
return choice_mat
def univariate_test_for_returns(df, var_name, mv_weighted=False, freq='daily', start_date=20200101, end_date=20201231):
#输入的df要与freq匹配
#date_list = local_source.get_indices_daily(cols='TRADE_DATE',condition='INDEX_CODE = "000001.SH" and TRADE_DATE>='+str(start_date)+' and TRADE_DATE<='+str(end_date))["TRADE_DATE"].sort_values(ascending=True).astype(int)
df["TRADE_DATE"] = df["TRADE_DATE"].astype(int)
df_ = df[df["TRADE_DATE"]>=start_date]
df_ = df_[df_["TRADE_DATE"]<=end_date]
if len(df_)==0:
print("选择日期区间内无数据!")
return 0
df_ = df_.drop_duplicates(["TS_CODE","TRADE_DATE"], keep='last') #keep选last是因为财报数据在同trade_date报出多个财报时, 年报(数据最全)会是最后一个;
df_ = df_[["TS_CODE","TRADE_DATE",var_name]]
if freq == 'daily':
data_close = local_source.get_quotations_daily(cols="TS_CODE, TRADE_DATE, CLOSE", condition='TRADE_DATE>=' + str(start_date) + ' and TRADE_DATE<=' + str(end_date))
data_close["TRADE_DATE"] = data_close["TRADE_DATE"].astype(int)
data_close = data_close.drop_duplicates(["TS_CODE","TRADE_DATE"], keep='last')
data_mv = local_source.get_stock_indicators_daily(cols='TS_CODE, TRADE_DATE, TOTAL_SHARE', condition='TRADE_DATE>=' + str(start_date) + ' and TRADE_DATE<=' + str(end_date))
data_mv["TRADE_DATE"] = data_mv["TRADE_DATE"].astype(int)
if freq == 'monthly':
data_close = local_source.get_quotations_daily(cols="TS_CODE, TRADE_DATE, CLOSE", condition='TRADE_DATE>=' + str(start_date) + '01' + ' and TRADE_DATE<=' + str(end_date) + '31')
data_close["TRADE_DATE"] = data_close["TRADE_DATE"].astype(int)
data_close = data_close.drop_duplicates(["TS_CODE","TRADE_DATE"], keep='last')
data_close = degenerate_dailydata_to_monthlydata(data_close, data_type='panel')
data_mv = local_source.get_stock_indicators_daily(cols='TS_CODE, TRADE_DATE, TOTAL_SHARE', condition='TRADE_DATE>=' + str(start_date) + '01' + ' and TRADE_DATE<=' + str(end_date) +'31')
data_mv["TRADE_DATE"] = data_mv["TRADE_DATE"].astype(int)
data_mv = degenerate_dailydata_to_monthlydata(data_mv, data_type='panel')
data_merged = pd.merge(data_close, data_mv, on=["TS_CODE","TRADE_DATE"],how='left')
data_merged["TOTAL_MV"] = data_merged["TOTAL_SHARE"] * data_merged["CLOSE"]
data_merged.drop("TOTAL_SHARE", axis=1, inplace=True)
data_merged = pd.merge(data_merged, df_, on=["TS_CODE","TRADE_DATE"],how='left')
data_close = panel_to_matrix_data(data_merged, "CLOSE")
data_mv = panel_to_matrix_data(data_merged, "TOTAL_MV")
value_mat = panel_to_matrix_data(data_merged, var_name)
choice_mat_q1=value_mat.apply(lambda x:(x<=x.quantile(0.2)),axis=1)
choice_mat_q2=value_mat.apply(lambda x:(x>x.quantile(0.2))&(x<=x.quantile(0.4)),axis=1)
choice_mat_q3=value_mat.apply(lambda x:(x>x.quantile(0.4))&(x<=x.quantile(0.6)),axis=1)
choice_mat_q4=value_mat.apply(lambda x:(x>x.quantile(0.6))&(x<=x.quantile(0.8)),axis=1)
choice_mat_q5=value_mat.apply(lambda x:(x>x.quantile(0.8)),axis=1)
if mv_weighted == False:
ret_q1 = calculate_average_return(choice_matrix=choice_mat_q1, close_matrix=data_close)
ret_q2 = calculate_average_return(choice_matrix=choice_mat_q2, close_matrix=data_close)
ret_q3 = calculate_average_return(choice_matrix=choice_mat_q3, close_matrix=data_close)
ret_q4 = calculate_average_return(choice_matrix=choice_mat_q4, close_matrix=data_close)
ret_q5 = calculate_average_return(choice_matrix=choice_mat_q5, close_matrix=data_close)
if mv_weighted == True:
ret_q1 = calculate_MVweighted_average_return(choice_matrix=choice_mat_q1, close_matrix=data_close, mv_matrix=data_mv)
ret_q2 = calculate_MVweighted_average_return(choice_matrix=choice_mat_q2, close_matrix=data_close, mv_matrix=data_mv)
ret_q3 = calculate_MVweighted_average_return(choice_matrix=choice_mat_q3, close_matrix=data_close, mv_matrix=data_mv)
ret_q4 = calculate_MVweighted_average_return(choice_matrix=choice_mat_q4, close_matrix=data_close, mv_matrix=data_mv)
ret_q5 = calculate_MVweighted_average_return(choice_matrix=choice_mat_q5, close_matrix=data_close, mv_matrix=data_mv)
ret_q5minusq1 = ret_q5-ret_q1
t_q1=stats.ttest_1samp(ret_q1, 0)
t_q2=stats.ttest_1samp(ret_q2, 0)
t_q3=stats.ttest_1samp(ret_q3, 0)
t_q4=stats.ttest_1samp(ret_q4, 0)
t_q5=stats.ttest_1samp(ret_q5, 0)
t_q5minusq1=stats.ttest_1samp(ret_q5minusq1, 0)
print(t_q1,'\n',t_q2,'\n',t_q3,'\n',t_q4,'\n',t_q5,'\n',t_q5minusq1)
def univariate_test_for_returns_2(df, var_name, mv_weighted=False, freq='daily', start_date=20200101, end_date=20201231):
#备用代码, 暂不使用, 输入df需要包含return列
df["TRADE_DATE"] = df["TRADE_DATE"].astype(int)
df_ = df[df["TRADE_DATE"]>=start_date]
df_ = df_[df_["TRADE_DATE"]<=end_date]
if len(df_)==0:
print("选择日期区间内无数据!")
return 0
df_ = df_.drop_duplicates(["TS_CODE","TRADE_DATE"], keep='last')
if freq == 'daily':
data_close = local_source.get_quotations_daily(cols="TS_CODE, TRADE_DATE, CLOSE", condition='TRADE_DATE>=' + str(start_date) + ' and TRADE_DATE<=' + str(end_date))
data_close["TRADE_DATE"] = data_close["TRADE_DATE"].astype(int)
data_close = data_close.drop_duplicates(["TS_CODE","TRADE_DATE"], keep='last')
data_mv = local_source.get_stock_indicators_daily(cols='TS_CODE, TRADE_DATE, TOTAL_SHARE', condition='TRADE_DATE>=' + str(start_date) + ' and TRADE_DATE<=' + str(end_date))
data_mv["TRADE_DATE"] = data_mv["TRADE_DATE"].astype(int)
if freq == 'monthly':
data_close = local_source.get_quotations_daily(cols="TS_CODE, TRADE_DATE, CLOSE", condition='TRADE_DATE>=' + str(start_date) + '01' + ' and TRADE_DATE<=' + str(end_date) + '31')
data_close["TRADE_DATE"] = data_close["TRADE_DATE"].astype(int)
data_close = data_close.drop_duplicates(["TS_CODE","TRADE_DATE"], keep='last')
data_close = degenerate_dailydata_to_monthlydata(data_close, data_type='panel')
data_mv = local_source.get_stock_indicators_daily(cols='TS_CODE, TRADE_DATE, TOTAL_SHARE', condition='TRADE_DATE>=' + str(start_date) + '01' + ' and TRADE_DATE<=' + str(end_date) +'31')
data_mv["TRADE_DATE"] = data_mv["TRADE_DATE"].astype(int)
data_mv = degenerate_dailydata_to_monthlydata(data_mv, data_type='panel')
data_merged = pd.merge(data_close, data_mv, on=["TS_CODE","TRADE_DATE"],how='left')
data_merged["TOTAL_MV"] = data_merged["TOTAL_SHARE"] * data_merged["CLOSE"]
data_merged.drop("TOTAL_SHARE", axis=1, inplace=True)
data_merged = pd.merge(data_merged, df_, on=["TS_CODE","TRADE_DATE"],how='left')
date_list = data_merged["TRADE_DATE"].unique()
if mv_weighted == False:
avg_ret_q1_list=[]
avg_ret_q2_list=[]
avg_ret_q3_list=[]
avg_ret_q4_list=[]
avg_ret_q5_list=[]
for date in pb(date_list, desc='please wait', colour='#ffffff'):
| |
#Author: <NAME>
import numpy as np
import os
import h5py
import pandas as pd
from AxonImaging import signal_processing as sp
def get_processed_running_speed (vsig,vref,sample_freq, smooth_filter_sigma = 0.05, wheel_diameter = 16.51, positive_speed_threshold= 70, negative_speed_threshold= -5):
''' Returns the running speed given voltage changes from an encoder wheel. Speeds are smoothed and outlier
values above or below arbrituarly defined thresholds are set as NaN.
:param Vsig: voltage signal which changes as a function of wheel movement (running)
:param Vref: reference voltage (typically 5V +/- small offset that is encoder dependent
:param sample_freq: sampling frequency which Vsig and Vref are acquired at
:param smooth_filter_sigma: value used for guassian filtering
:param wheel_diameter: diameter of running wheel
:param positive_speed_threshold: maximum allowed positive speed (sets impossibly high running speeds equal to NaN)
:param negative_speed_threshold: maximum allowed negative speed (sets impossibly high backwards running speeds equal to NaN)
:param units: whether to return in terms of seconds (dependent on the passed-in sample freq) or samples
:return: smooth traced of running speed in cm/s per sample with outliers set to NaN
'''
from scipy.ndimage import gaussian_filter1d
vref_mean = np.median(vref[np.abs(vref)<20])
position_arc = vsig*(2.*np.pi)/vref_mean
position_arc_smooth = gaussian_filter1d(position_arc, int(smooth_filter_sigma*sample_freq))
speed_arc = np.append(np.diff(position_arc_smooth),0) * sample_freq
speed = speed_arc * wheel_diameter
speed_smooth = np.copy(speed)
speed_smooth[np.logical_or(speed>=positive_speed_threshold,speed<=negative_speed_threshold)]=np.nan
mask = np.isnan(speed_smooth)
mask2 = np.zeros(mask.shape, dtype=np.bool)
for n,p in enumerate(mask):
if p:
mask2[(n-(2*int(smooth_filter_sigma*sample_freq))):(n+int((2*smooth_filter_sigma*sample_freq+1)))] = True # extend mask 2 filter widths to extend interpolation
speed_smooth[mask2] = np.interp(np.flatnonzero(mask2), np.flatnonzero(~mask2), speed[~mask2])
return speed_smooth
def get_auditory_onset_times(microphone, sample_freq, threshold=1, stdev_samples=10,filter_width=20):
'''
Finds the onset of an auditory event through first calculating a standard deviation across user defined samples and then thresholding the stdeviations to find the onset times.
:param microphone: an analog microphone signal
:param samplefreq: the sampling frequency at which the auditory signal was acquired at
:param threshold = threshold value in units of standard deviation for finding onset times (values above this are marked as a valid onset)
:param stdev_samples=number of samples to calculate each standard deviation from.
:
:return: the onset sound_times in the units of seconds
'''
from scipy.signal import convolve, boxcar
#get the standard deviation across user-defined number of samples
step=int(stdev_samples)
stdev=[]
for ii in range(0,microphone.shape[0],step):
chunk=microphone[ii:ii+step]
stdev.append(np.std(chunk))
stdev_filtered=convolve(stdev, boxcar(M=filter_width))
#get the up samples #s through thresholding
stamps=sp.threshold_greater(np.array(stdev_filtered),threshold)
#multiply these samples # by user-defined number of stdev_samples to account for the downsampling that occured when the standard deviation was calculated
stamps=np.multiply(stamps,stdev_samples)
sound_times = np.divide(stamps,sample_freq)
print ('total number of sound presentations found = '+ str(len(sound_times)))
return sound_times
def microphone_to_dB (signal, sensitivity=250, pre_amp_gain=12):
''' Converts microphone voltage to decibels given the microphone sensitivity and pre amp gain.
:param signal: the analog microphone voltage (in V)
:param sensitivity: the sensitivity of the microphone in mv/Pa
:param pre_amp_gain: gain setting on the microphone pre amp (in dB)
'''
#reference is "threshold for hearing" 20 micropascals at 1 kHz, also called SPL
reference=20E-6
baseline_v=reference*sensitivity
db=np.log10((signal/baseline_v))*20
db_nogain=db-pre_amp_gain
return db_nogain
#convert signal to pascals
#divide by the preamp gain, multiply by 1000 to convert from volts to mV
#divide by the microphone sensitivity in mV/Pa
#dB equation from voltage is 20 * log ()
def shift_aud_frames_by_mic_delay(mic_onsets, aud_frames, vsync):
'''
Time aligns auditory stimulation onset times that are given in terms relative to monitor frames (ie auditory stimulation was
presented on frame 50-100) into accurate times for which they are played/heard (detected on a microphone).
Requires that the number of sound times presented is the same quantity as the number detected by the microphone
:param mic_onsets: auditory onsets detected by a microphone (see get_auditory_onset_times function) (seconds)
:param aud_frames: frames when the auditory stimulation was initiated (typically from pikl file) (frame #'s)
:param vsync: times of each monitor frame presentation on the same time base as the microphone (seconds)
:return: array of frame numbers that correspond to onset of the auditory stimulation being played
'''
#compare total number of auditory stims with the expected number of presentations.
if len(mic_onsets)==len(aud_frames):
#get the auditory stimulation time from the pickle file and convert it to a Vsync time
#get the auditory stimulation time from the pickle file and convert it to a Vsync time
sound_frames=[]
for ii in range(len(aud_frames)):
#calculate the difference in time between the detection (microphone) and the presentation, in terms of seconds
dif=mic_onsets[ii]-vsync[aud_frames[ii]].astype(np.float32)
presented_time=vsync[aud_frames[ii]]+dif
#find the vysnc time that most closely matches the stimulation
index=np.argmin(np.abs(vsync-presented_time))
sound_frames.append(index)
#print ('time of presentation '+ str(vsync[aud_onsets[ii]]) + ' time of detection ' + str(sound_times[ii]))
sound_frames=np.array(sound_frames)
print ('mean number of visual frames between presentation and detection is ' + str((np.mean(sound_frames-aud_frames)))) + ' frames or '+ str((1/np.median(np.diff(vsync))*(np.mean(sound_frames-aud_frames))))+' millseconds (assuming 60 fps)'
return sound_frames
else:
print ('Number of known auditory presentations '+str(len(aud_frames))+ ' does not equal those detected by microphone '+ str(len(mic_onsets)))
return
def stimulus_thresh_df (paths,data_key, thresh_signal, thresh, min_l, min_t,
before, after, baseline_period,response_period,min_time_between=False,use_dff=True,
other_signals=[],dff_baseline_dur=1., exclusion_sig='null',exclusion_thresh=0.,exclusion_dur=0.,exclusion_logic='exclude',
override_ends=False, use_per_thresh=False, sample_freq=30. ):
"""
:param paths: path to HDF5 files
:param data_key: key for the HDF5 to access the data type of interest
---Thresholding parameters
:param thresh_signal: the signal to threshold on
:param thresh: the threshold
:param min_l: the minimum amount of time the signal must go below the threshold to end a period
:param min_t: minimum time for a threshold period
:param min_time_between: the minimum amount that must be between the start of two epochs. Useful for finding epochs that occur in isolation from nearby other epochs.
---trace extraction parameters
:param before: amount of time before the threshold time to extract
:param after: amount of time after the threshold time to extract
:param baseline: how many seconds in the the 'before' period to calculate baseline periods from (used in DF/F calculations and others)
:param baseline: where the "baseline" should be calculated from in the trace (used in DF/F calculations and others) . Tuple of start time and end time for the baseline.
:param sample_t_after_thresh: when sampling the "response" start this far after the threshold crossing (0 = at the threshold). Set to string 'half' to sample 50% through the epoch's duration.
:param sample_dur_after_thresh:when sampling the "response" start from sample_t_after_thresh and go this many seconds ahead
"""
import os
import h5py
import pandas as pd
#create dataframe of all ROI responses for every running epoch
total_roi_counter=0
responses=[]
meaned_responses=[]
#check to make sure that the baseline is specified as a tuple and deal with instances where it isn't
if isinstance(baseline_period, (int, float)):
print ('the baseline period was specified as a single number, not a start and end time. Assuming start time is time 0 and end time of hte baseline is what is specified.')
baseline_period=(0,baseline_period)
for path in paths:
mouse_id=os.path.basename(path)[0:7]
print ('\n processing ' + str(mouse_id) + '\n')
data_f=h5py.File(path,'r')
data=data_f.get(data_key)
if use_per_thresh==True:
#first lowpass filter and calculate the median of the trace
median=np.nanmedian(sp.butter_lowpass_filter(data[thresh_signal], cutoff=1., analog=True))
threshold_per=median+(thresh*median)
thresh=threshold_per
if exclusion_sig=='null':
runs=sp.threshold_period(signal=data[thresh_signal], threshold=thresh,
min_low=min_l, sample_freq=30., min_time=min_t)
else:
print (exclusion_logic+' epochs where the '+ str(exclusion_sig) + ' is greater than '+ str(exclusion_thresh))
runs=sp.threshold_period(signal=data[thresh_signal], threshold=thresh,min_time_between=min_time_between,
min_low=min_l, sample_freq=30., min_time=min_t,exclusion_signal=data[exclusion_sig],
exclusion_dur=exclusion_dur,exclusion_logic=exclusion_logic,
exclusion_thresh=exclusion_thresh)
#check if no threshold crossing are found. If so, go to next file
if runs.size==0:
print (' No periods found for id '+ str(mouse_id))
continue
#get teh start times from teh threshold_period output
starts=runs[:,0]
#take into account times where you want to get traces that start relative to the onset and you don't want to be concerned with their duration
if override_ends==False:
starts=runs[:,0]
ends=runs[:,1]
durs=runs[:,2]
elif isinstance(override_ends, (int, float)):
#if a number is passed to override the ends, determine the end of the periods by adding this number to the beginning
print ('Overiding detected durations and using USER-DEFINED durations')
starts=runs[:,0]
ends=starts+override_ends
durs=ends-starts
elif override_ends=='starts':
print ('setting the start times equal to the detected END TIMES!')
starts=runs[:,1]
ends=runs[:,1]
durs=(ends-starts)+1.
| |
from markupsafe import escape
from sqlalchemy import and_, desc, false, true
from galaxy import managers, model, util, web
from galaxy.model.item_attrs import UsesItemRatings
from galaxy.util.json import loads
from galaxy.util.sanitize_html import sanitize_html, _BaseHTMLProcessor
from galaxy.web import error, url_for
from galaxy.web.base.controller import BaseUIController, SharableMixin, UsesStoredWorkflowMixin, UsesVisualizationMixin
from galaxy.web.framework.helpers import time_ago, grids
def format_bool( b ):
if b:
return "yes"
else:
return ""
class PageListGrid( grids.Grid ):
# Custom column.
class URLColumn( grids.PublicURLColumn ):
def get_value( self, trans, grid, item ):
return url_for(controller='page', action='display_by_username_and_slug', username=item.user.username, slug=item.slug )
# Grid definition
use_panels = True
title = "Pages"
model_class = model.Page
default_filter = { "published": "All", "tags": "All", "title": "All", "sharing": "All" }
default_sort_key = "-update_time"
columns = [
grids.TextColumn( "Title", key="title", attach_popup=True, filterable="advanced" ),
URLColumn( "Public URL" ),
grids.OwnerAnnotationColumn( "Annotation", key="annotation", model_annotation_association_class=model.PageAnnotationAssociation, filterable="advanced" ),
grids.IndividualTagsColumn( "Tags", key="tags", model_tag_association_class=model.PageTagAssociation, filterable="advanced", grid_name="PageListGrid" ),
grids.SharingStatusColumn( "Sharing", key="sharing", filterable="advanced", sortable=False ),
grids.GridColumn( "Created", key="create_time", format=time_ago ),
grids.GridColumn( "Last Updated", key="update_time", format=time_ago ),
]
columns.append( grids.MulticolFilterColumn(
"Search",
cols_to_filter=[ columns[0], columns[2] ],
key="free-text-search", visible=False, filterable="standard" )
)
global_actions = [
grids.GridAction( "Add new page", dict( action='create' ) )
]
operations = [
grids.DisplayByUsernameAndSlugGridOperation( "View", allow_multiple=False ),
grids.GridOperation( "Edit content", allow_multiple=False, url_args=dict( action='edit_content') ),
grids.GridOperation( "Edit attributes", allow_multiple=False, url_args=dict( action='edit') ),
grids.GridOperation( "Share or Publish", allow_multiple=False, condition=( lambda item: not item.deleted ), async_compatible=False ),
grids.GridOperation( "Delete", confirm="Are you sure you want to delete this page?" ),
]
def apply_query_filter( self, trans, query, **kwargs ):
return query.filter_by( user=trans.user, deleted=False )
class PageAllPublishedGrid( grids.Grid ):
# Grid definition
use_panels = True
use_async = True
title = "Published Pages"
model_class = model.Page
default_sort_key = "update_time"
default_filter = dict( title="All", username="All" )
columns = [
grids.PublicURLColumn( "Title", key="title", filterable="advanced" ),
grids.OwnerAnnotationColumn( "Annotation", key="annotation", model_annotation_association_class=model.PageAnnotationAssociation, filterable="advanced" ),
grids.OwnerColumn( "Owner", key="username", model_class=model.User, filterable="advanced" ),
grids.CommunityRatingColumn( "Community Rating", key="rating" ),
grids.CommunityTagsColumn( "Community Tags", key="tags", model_tag_association_class=model.PageTagAssociation, filterable="advanced", grid_name="PageAllPublishedGrid" ),
grids.ReverseSortColumn( "Last Updated", key="update_time", format=time_ago )
]
columns.append(
grids.MulticolFilterColumn(
"Search title, annotation, owner, and tags",
cols_to_filter=[ columns[0], columns[1], columns[2], columns[4] ],
key="free-text-search", visible=False, filterable="standard" )
)
def build_initial_query( self, trans, **kwargs ):
# Join so that searching history.user makes sense.
return trans.sa_session.query( self.model_class ).join( model.User.table )
def apply_query_filter( self, trans, query, **kwargs ):
return query.filter( self.model_class.deleted == false() ).filter( self.model_class.published == true() )
class ItemSelectionGrid( grids.Grid ):
""" Base class for pages' item selection grids. """
# Custom columns.
class NameColumn( grids.TextColumn ):
def get_value(self, trans, grid, item):
if hasattr( item, "get_display_name" ):
return escape(item.get_display_name())
else:
return escape(item.name)
# Grid definition.
show_item_checkboxes = True
template = "/page/select_items_grid.mako"
default_filter = { "deleted": "False" , "sharing": "All" }
default_sort_key = "-update_time"
use_async = True
use_paging = True
num_rows_per_page = 10
def apply_query_filter( self, trans, query, **kwargs ):
return query.filter_by( user=trans.user )
class HistorySelectionGrid( ItemSelectionGrid ):
""" Grid for selecting histories. """
# Grid definition.
title = "Saved Histories"
model_class = model.History
columns = [
ItemSelectionGrid.NameColumn( "Name", key="name", filterable="advanced" ),
grids.IndividualTagsColumn( "Tags", key="tags", model_tag_association_class=model.HistoryTagAssociation, filterable="advanced"),
grids.GridColumn( "Last Updated", key="update_time", format=time_ago ),
# Columns that are valid for filtering but are not visible.
grids.DeletedColumn( "Deleted", key="deleted", visible=False, filterable="advanced" ),
grids.SharingStatusColumn( "Sharing", key="sharing", filterable="advanced", sortable=False, visible=False ),
]
columns.append(
grids.MulticolFilterColumn(
"Search",
cols_to_filter=[ columns[0], columns[1] ],
key="free-text-search", visible=False, filterable="standard" )
)
def apply_query_filter( self, trans, query, **kwargs ):
return query.filter_by( user=trans.user, purged=False )
class HistoryDatasetAssociationSelectionGrid( ItemSelectionGrid ):
""" Grid for selecting HDAs. """
# Grid definition.
title = "Saved Datasets"
model_class = model.HistoryDatasetAssociation
columns = [
ItemSelectionGrid.NameColumn( "Name", key="name", filterable="advanced" ),
grids.IndividualTagsColumn( "Tags", key="tags", model_tag_association_class=model.HistoryDatasetAssociationTagAssociation, filterable="advanced"),
grids.GridColumn( "Last Updated", key="update_time", format=time_ago ),
# Columns that are valid for filtering but are not visible.
grids.DeletedColumn( "Deleted", key="deleted", visible=False, filterable="advanced" ),
grids.SharingStatusColumn( "Sharing", key="sharing", filterable="advanced", sortable=False, visible=False ),
]
columns.append(
grids.MulticolFilterColumn(
"Search",
cols_to_filter=[ columns[0], columns[1] ],
key="free-text-search", visible=False, filterable="standard" )
)
def apply_query_filter( self, trans, query, **kwargs ):
# To filter HDAs by user, need to join HDA and History table and then filter histories by user. This is necessary because HDAs do not have
# a user relation.
return query.select_from( model.HistoryDatasetAssociation.table.join( model.History.table ) ).filter( model.History.user == trans.user )
class WorkflowSelectionGrid( ItemSelectionGrid ):
""" Grid for selecting workflows. """
# Grid definition.
title = "Saved Workflows"
model_class = model.StoredWorkflow
columns = [
ItemSelectionGrid.NameColumn( "Name", key="name", filterable="advanced" ),
grids.IndividualTagsColumn( "Tags", key="tags", model_tag_association_class=model.StoredWorkflowTagAssociation, filterable="advanced"),
grids.GridColumn( "Last Updated", key="update_time", format=time_ago ),
# Columns that are valid for filtering but are not visible.
grids.DeletedColumn( "Deleted", key="deleted", visible=False, filterable="advanced" ),
grids.SharingStatusColumn( "Sharing", key="sharing", filterable="advanced", sortable=False, visible=False ),
]
columns.append(
grids.MulticolFilterColumn(
"Search",
cols_to_filter=[ columns[0], columns[1] ],
key="free-text-search", visible=False, filterable="standard" )
)
class PageSelectionGrid( ItemSelectionGrid ):
""" Grid for selecting pages. """
# Grid definition.
title = "Saved Pages"
model_class = model.Page
columns = [
grids.TextColumn( "Title", key="title", filterable="advanced" ),
grids.IndividualTagsColumn( "Tags", key="tags", model_tag_association_class=model.PageTagAssociation, filterable="advanced"),
grids.GridColumn( "Last Updated", key="update_time", format=time_ago ),
# Columns that are valid for filtering but are not visible.
grids.DeletedColumn( "Deleted", key="deleted", visible=False, filterable="advanced" ),
grids.SharingStatusColumn( "Sharing", key="sharing", filterable="advanced", sortable=False, visible=False ),
]
columns.append(
grids.MulticolFilterColumn(
"Search",
cols_to_filter=[ columns[0], columns[1] ],
key="free-text-search", visible=False, filterable="standard" )
)
class VisualizationSelectionGrid( ItemSelectionGrid ):
""" Grid for selecting visualizations. """
# Grid definition.
title = "Saved Visualizations"
model_class = model.Visualization
columns = [
grids.TextColumn( "Title", key="title", filterable="advanced" ),
grids.TextColumn( "Type", key="type" ),
grids.IndividualTagsColumn( "Tags", key="tags", model_tag_association_class=model.VisualizationTagAssociation, filterable="advanced", grid_name="VisualizationListGrid" ),
grids.SharingStatusColumn( "Sharing", key="sharing", filterable="advanced", sortable=False ),
grids.GridColumn( "Last Updated", key="update_time", format=time_ago ),
]
columns.append(
grids.MulticolFilterColumn(
"Search",
cols_to_filter=[ columns[0], columns[2] ],
key="free-text-search", visible=False, filterable="standard" )
)
class _PageContentProcessor( _BaseHTMLProcessor ):
""" Processes page content to produce HTML that is suitable for display. For now, processor renders embedded objects. """
def __init__( self, trans, encoding, type, render_embed_html_fn ):
_BaseHTMLProcessor.__init__( self, encoding, type)
self.trans = trans
self.ignore_content = False
self.num_open_tags_for_ignore = 0
self.render_embed_html_fn = render_embed_html_fn
def unknown_starttag( self, tag, attrs ):
""" Called for each start tag; attrs is a list of (attr, value) tuples. """
# If ignoring content, just increment tag count and ignore.
if self.ignore_content:
self.num_open_tags_for_ignore += 1
return
# Not ignoring tag; look for embedded content.
embedded_item = False
for attribute in attrs:
if ( attribute[0] == "class" ) and ( "embedded-item" in attribute[1].split(" ") ):
embedded_item = True
break
# For embedded content, set ignore flag to ignore current content and add new content for embedded item.
if embedded_item:
# Set processing attributes to ignore content.
self.ignore_content = True
self.num_open_tags_for_ignore = 1
# Insert content for embedded element.
for attribute in attrs:
name = attribute[0]
if name == "id":
# ID has form '<class_name>-<encoded_item_id>'
item_class, item_id = attribute[1].split("-")
embed_html = self.render_embed_html_fn( self.trans, item_class, item_id )
self.pieces.append( embed_html )
return
# Default behavior: not ignoring and no embedded content.
_BaseHTMLProcessor.unknown_starttag( self, tag, attrs )
def handle_data( self, text ):
""" Called for each block of plain text. """
if self.ignore_content:
return
_BaseHTMLProcessor.handle_data( self, text )
def unknown_endtag( self, tag ):
""" Called for each end tag. """
# If ignoring content, see if current tag is the end of content to ignore.
if self.ignore_content:
self.num_open_tags_for_ignore -= 1
if self.num_open_tags_for_ignore == 0:
# Done ignoring content.
self.ignore_content = False
return
# Default behavior:
_BaseHTMLProcessor.unknown_endtag( self, tag )
class PageController( BaseUIController, SharableMixin,
UsesStoredWorkflowMixin, UsesVisualizationMixin, UsesItemRatings ):
_page_list = PageListGrid()
_all_published_list = PageAllPublishedGrid()
_history_selection_grid = HistorySelectionGrid()
_workflow_selection_grid = WorkflowSelectionGrid()
_datasets_selection_grid = HistoryDatasetAssociationSelectionGrid()
_page_selection_grid = PageSelectionGrid()
_visualization_selection_grid = VisualizationSelectionGrid()
def __init__( self, app ):
super( PageController, self ).__init__( app )
self.history_manager = managers.histories.HistoryManager( app )
self.history_serializer = managers.histories.HistorySerializer( self.app )
self.hda_manager = managers.hdas.HDAManager( app )
@web.expose
@web.require_login()
def list( self, trans, *args, **kwargs ):
""" List user's pages. """
# Handle operation
if 'operation' in kwargs and 'id' in kwargs:
session = trans.sa_session
operation = kwargs['operation'].lower()
ids = util.listify( kwargs['id'] )
for id in ids:
item = session.query( model.Page ).get( self.decode_id( id ) )
if operation == "delete":
item.deleted = True
if operation == "share or publish":
return self.sharing( trans, **kwargs )
session.flush()
# HACK: to prevent the insertion of an entire html document inside another
kwargs[ 'embedded' ] = True
# Build grid HTML.
grid = self._page_list( trans, *args, **kwargs )
# Build list of pages shared with user.
shared_by_others | |
14, -4, -4): (0, 1),
(8, 14, -4, -3): (0, 1),
(8, 14, -4, -2): (0, 1),
(8, 14, -4, -1): (0, 1),
(8, 14, -4, 0): (0, 1),
(8, 14, -4, 1): (0, 1),
(8, 14, -4, 2): (0, 1),
(8, 14, -4, 3): (0, 1),
(8, 14, -4, 4): (0, 0),
(8, 14, -4, 5): (-1, -1),
(8, 14, -3, -5): (0, 1),
(8, 14, -3, -4): (0, 1),
(8, 14, -3, -3): (0, 1),
(8, 14, -3, -2): (0, 1),
(8, 14, -3, -1): (0, 1),
(8, 14, -3, 0): (0, 1),
(8, 14, -3, 1): (0, 1),
(8, 14, -3, 2): (0, 1),
(8, 14, -3, 3): (1, 1),
(8, 14, -3, 4): (1, 1),
(8, 14, -3, 5): (1, 0),
(8, 14, -2, -5): (-1, 1),
(8, 14, -2, -4): (-1, 1),
(8, 14, -2, -3): (-1, 1),
(8, 14, -2, -2): (-1, 1),
(8, 14, -2, -1): (-1, 1),
(8, 14, -2, 0): (1, 1),
(8, 14, -2, 1): (1, 1),
(8, 14, -2, 2): (1, 1),
(8, 14, -2, 3): (1, 1),
(8, 14, -2, 4): (1, 1),
(8, 14, -2, 5): (1, 0),
(8, 14, -1, -5): (-1, 1),
(8, 14, -1, -4): (-1, 1),
(8, 14, -1, -3): (-1, 1),
(8, 14, -1, -2): (-1, 1),
(8, 14, -1, -1): (1, 1),
(8, 14, -1, 0): (1, 1),
(8, 14, -1, 1): (1, 1),
(8, 14, -1, 2): (1, 1),
(8, 14, -1, 3): (1, 1),
(8, 14, -1, 4): (1, 0),
(8, 14, -1, 5): (1, -1),
(8, 14, 0, -5): (1, 1),
(8, 14, 0, -4): (1, 1),
(8, 14, 0, -3): (1, 1),
(8, 14, 0, -2): (1, 1),
(8, 14, 0, -1): (1, 1),
(8, 14, 0, 0): (0, 1),
(8, 14, 0, 1): (0, 1),
(8, 14, 0, 2): (0, 1),
(8, 14, 0, 3): (0, 1),
(8, 14, 0, 4): (0, 0),
(8, 14, 0, 5): (0, -1),
(8, 14, 1, -5): (1, 1),
(8, 14, 1, -4): (1, 1),
(8, 14, 1, -3): (1, 1),
(8, 14, 1, -2): (1, 1),
(8, 14, 1, -1): (0, 1),
(8, 14, 1, 0): (-1, 1),
(8, 14, 1, 1): (-1, 1),
(8, 14, 1, 2): (-1, 1),
(8, 14, 1, 3): (-1, 1),
(8, 14, 1, 4): (-1, 0),
(8, 14, 1, 5): (-1, -1),
(8, 14, 2, -5): (0, 1),
(8, 14, 2, -4): (0, 1),
(8, 14, 2, -3): (0, 1),
(8, 14, 2, -2): (0, 1),
(8, 14, 2, -1): (0, 1),
(8, 14, 2, 0): (-1, 1),
(8, 14, 2, 1): (-1, 1),
(8, 14, 2, 2): (-1, 1),
(8, 14, 2, 3): (-1, 1),
(8, 14, 2, 4): (-1, 1),
(8, 14, 2, 5): (-1, 1),
(8, 14, 3, -5): (0, 1),
(8, 14, 3, -4): (0, 1),
(8, 14, 3, -3): (0, 1),
(8, 14, 3, -2): (0, 1),
(8, 14, 3, -1): (0, 1),
(8, 14, 3, 0): (0, 1),
(8, 14, 3, 1): (0, 1),
(8, 14, 3, 2): (0, 1),
(8, 14, 3, 3): (0, 1),
(8, 14, 3, 4): (0, 0),
(8, 14, 3, 5): (-1, -1),
(8, 14, 4, -5): (0, 1),
(8, 14, 4, -4): (0, 1),
(8, 14, 4, -3): (0, 1),
(8, 14, 4, -2): (0, 1),
(8, 14, 4, -1): (0, 1),
(8, 14, 4, 0): (0, 1),
(8, 14, 4, 1): (0, 1),
(8, 14, 4, 2): (0, 1),
(8, 14, 4, 3): (0, 1),
(8, 14, 4, 4): (0, 0),
(8, 14, 4, 5): (-1, -1),
(8, 14, 5, -5): (0, 1),
(8, 14, 5, -4): (0, 1),
(8, 14, 5, -3): (0, 1),
(8, 14, 5, -2): (0, 1),
(8, 14, 5, -1): (0, 1),
(8, 14, 5, 0): (0, 1),
(8, 14, 5, 1): (0, 1),
(8, 14, 5, 2): (0, 1),
(8, 14, 5, 3): (0, 1),
(8, 14, 5, 4): (0, 0),
(8, 14, 5, 5): (-1, -1),
(8, 15, -5, -5): (0, 1),
(8, 15, -5, -4): (0, 1),
(8, 15, -5, -3): (0, 1),
(8, 15, -5, -2): (0, 1),
(8, 15, -5, -1): (0, 1),
(8, 15, -5, 0): (0, 1),
(8, 15, -5, 1): (0, 1),
(8, 15, -5, 2): (0, 1),
(8, 15, -5, 3): (0, 0),
(8, 15, -5, 4): (0, 1),
(8, 15, -5, 5): (0, 1),
(8, 15, -4, -5): (0, 1),
(8, 15, -4, -4): (0, 1),
(8, 15, -4, -3): (0, 1),
(8, 15, -4, -2): (0, 1),
(8, 15, -4, -1): (0, 1),
(8, 15, -4, 0): (0, 1),
(8, 15, -4, 1): (0, 1),
(8, 15, -4, 2): (0, 1),
(8, 15, -4, 3): (0, 0),
(8, 15, -4, 4): (0, 1),
(8, 15, -4, 5): (0, 1),
(8, 15, -3, -5): (0, 1),
(8, 15, -3, -4): (0, 1),
(8, 15, -3, -3): (0, 1),
(8, 15, -3, -2): (0, 1),
(8, 15, -3, -1): (0, 1),
(8, 15, -3, 0): (0, 1),
(8, 15, -3, 1): (0, 1),
(8, 15, -3, 2): (0, 1),
(8, 15, -3, 3): (1, 1),
(8, 15, -3, 4): (0, 1),
(8, 15, -3, 5): (0, 1),
(8, 15, -2, -5): (-1, 1),
(8, 15, -2, -4): (-1, 1),
(8, 15, -2, -3): (-1, 1),
(8, 15, -2, -2): (-1, 1),
(8, 15, -2, -1): (-1, 1),
(8, 15, -2, 0): (1, 1),
(8, 15, -2, 1): (1, 1),
(8, 15, -2, 2): (1, 1),
(8, 15, -2, 3): (1, 1),
(8, 15, -2, 4): (1, 1),
(8, 15, -2, 5): (1, 0),
(8, 15, -1, -5): (-1, 1),
(8, 15, -1, -4): (-1, 1),
(8, 15, -1, -3): (-1, 1),
(8, 15, -1, -2): (-1, 1),
(8, 15, -1, -1): (1, 1),
(8, 15, -1, 0): (1, 1),
(8, 15, -1, 1): (1, 1),
(8, 15, -1, 2): (1, 1),
(8, 15, -1, 3): (1, 1),
(8, 15, -1, 4): (1, 1),
(8, 15, -1, 5): (1, 0),
(8, 15, 0, -5): (1, 1),
(8, 15, 0, -4): (1, 1),
(8, 15, 0, -3): (1, 1),
(8, 15, 0, -2): (1, 1),
(8, 15, 0, -1): (1, 1),
(8, 15, 0, 0): (1, 1),
(8, 15, 0, 1): (0, 1),
(8, 15, 0, 2): (0, 1),
(8, 15, 0, 3): (0, 1),
(8, 15, 0, 4): (0, 1),
(8, 15, 0, 5): (0, 1),
(8, 15, 1, -5): (1, 1),
(8, 15, 1, -4): (1, 1),
(8, 15, 1, -3): (1, 1),
(8, 15, 1, -2): (1, 1),
(8, 15, 1, -1): (0, 1),
(8, 15, 1, 0): (0, 1),
(8, 15, 1, 1): (-1, 1),
(8, 15, 1, 2): (-1, 1),
(8, 15, 1, 3): (-1, 1),
(8, 15, 1, 4): (-1, 1),
(8, 15, 1, 5): (-1, 1),
(8, 15, 2, -5): (0, 1),
(8, 15, 2, -4): (0, 1),
(8, 15, 2, -3): (0, 1),
(8, 15, 2, -2): (0, 1),
(8, 15, 2, -1): (0, 1),
(8, 15, 2, 0): (-1, 1),
(8, 15, 2, 1): (-1, 1),
(8, 15, 2, 2): (-1, 1),
(8, 15, 2, 3): (-1, 1),
(8, 15, 2, 4): (-1, 0),
(8, 15, 2, 5): (-1, -1),
(8, 15, 3, -5): (0, 1),
(8, 15, 3, -4): (0, 1),
(8, 15, 3, -3): (0, 1),
(8, 15, 3, -2): (0, 1),
(8, 15, 3, -1): (0, 1),
(8, 15, 3, 0): (0, 1),
(8, 15, 3, 1): (0, 1),
(8, 15, 3, 2): (0, 1),
(8, 15, 3, 3): (0, 0),
(8, 15, 3, 4): (0, 1),
(8, 15, 3, 5): (0, 1),
(8, 15, 4, -5): (0, 1),
(8, 15, 4, -4): (0, 1),
(8, 15, 4, -3): (0, 1),
(8, 15, 4, -2): (0, 1),
(8, 15, 4, -1): (0, 1),
(8, 15, 4, | |
MD_OFX_DEFAULT_SETTINGS_FILE = moduleBuild.MD_OFX_DEFAULT_SETTINGS_FILE
if len(moduleBuild.MD_OFX_DEBUG_SETTINGS_FILE) > 0:
MD_OFX_DEBUG_SETTINGS_FILE = moduleBuild.MD_OFX_DEBUG_SETTINGS_FILE
if len(moduleBuild.MD_EXTENSIONS_DIRECTORY_FILE) > 0:
MD_EXTENSIONS_DIRECTORY_FILE = moduleBuild.MD_EXTENSIONS_DIRECTORY_FILE
if len(moduleBuild.MYPYTHON_DOWNLOAD_URL) > 0:
MYPYTHON_DOWNLOAD_URL = moduleBuild.MYPYTHON_DOWNLOAD_URL
if debug:
myPrint("D","Program variables are now...:")
myPrint("D"," TOOLBOX_STOP_NOW: %s" %(TOOLBOX_STOP_NOW))
myPrint("D"," OFX_SETUP_MATCH_MD_BUILD: %s" %(OFX_SETUP_MATCH_MD_BUILD))
myPrint("D"," TOOLBOX_MINIMUM_TESTED_MD_VERSION: %s" %(TOOLBOX_MINIMUM_TESTED_MD_VERSION))
myPrint("D"," TOOLBOX_MAXIMUM_TESTED_MD_VERSION: %s" %(TOOLBOX_MAXIMUM_TESTED_MD_VERSION))
myPrint("DB"," TOOLBOX_MAXIMUM_TESTED_MD_BUILD: %s" %(TOOLBOX_MAXIMUM_TESTED_MD_BUILD))
myPrint("D"," MD_OFX_BANK_SETTINGS_DIR: %s" %(MD_OFX_BANK_SETTINGS_DIR))
myPrint("D"," MD_OFX_DEFAULT_SETTINGS_FILE: %s" %(MD_OFX_DEFAULT_SETTINGS_FILE))
myPrint("D"," MD_OFX_DEBUG_SETTINGS_FILE: %s" %(MD_OFX_DEBUG_SETTINGS_FILE))
myPrint("D"," MD_EXTENSIONS_DIRECTORY_FILE: %s" %(MD_EXTENSIONS_DIRECTORY_FILE))
myPrint("D"," MYPYTHON_DOWNLOAD_URL: %s" %(MYPYTHON_DOWNLOAD_URL))
if TOOLBOX_STOP_NOW:
myPrint("B","Uh-oh... disable has been set by the Developer for this build.... Toolbox must close... Sorry")
return
else:
myPrint("D","Found LOWER downloaded module build %s - so I will keep program's defaults, and ignore these - exiting search... (%s) " %(moduleBuild.build, moduleBuild.obj ))
return
myPrint("D","No suitable module build info found.. (so I will keep program's defaults, and ignore these - exiting search)")
return
def downloadExtensions():
global MD_EXTENSIONS_DIRECTORY_FILE
myPrint("B","#######################################################################################################################################")
myPrint("B","### INFORMATION: Toolbox is connecting to Infinite Kind servers to check for extension(s) version data - IT IS NOT SENDING ANY DATA ###")
myPrint("B","#######################################################################################################################################")
downloadInfo = StreamTable()
if moneydance_ui.getMain().getSourceInformation().getExtensionsEnabled():
inx = None
try:
url = URL(System.getProperty("moneydance.extension_list_url", MD_EXTENSIONS_DIRECTORY_FILE))
inx = BufferedReader(InputStreamReader(url.openStream(), "UTF8"))
downloadInfo.readFrom(inx)
except:
myPrint("B", "ERROR downloading from Moneydance extensions list website... ")
dump_sys_error_to_md_console_and_errorlog()
finally:
if inx:
try:
inx.close()
except:
myPrint("B", "Error closing URL stream")
dump_sys_error_to_md_console_and_errorlog()
return downloadInfo
else:
myPrint("B", "@@ Extensions not enabled!!?? @@")
return False
def check_if_key_string_valid(test_str):
# http://docs.python.org/library/re.html
# re.search returns None if no position in the string matches the pattern
# pattern to search for any character other than "._-A-Za-z0-9"
pattern = r'[^a-zA-Z0-9-_.:&=;,@]'
if re.search(pattern, test_str):
myPrint("DB","Invalid: %r" %(test_str))
return False
else:
myPrint("DB","Valid: %r" %(test_str))
return True
def check_if_key_data_string_valid(test_str):
# http://docs.python.org/library/re.html
# re.search returns None if no position in the string matches the pattern
# pattern to search for any character other than "._-A-Za-z0-9"
pattern = r"[^a-zA-Z0-9-' _.:&=;,@/\\]"
if re.search(pattern, test_str):
myPrint("DB","Invalid: %r" %(test_str))
return False
else:
myPrint("DB","Valid: %r" %(test_str))
return True
def get_extension_update_info():
availableExtensionInfo=downloadExtensions()
moduleList = availableExtensionInfo.get(u"feature_modules") # StreamVector
installed = moneydance_ui.getMain().getLoadedModules() # FeatureModule[]
excludedIDs = moneydance.getSuppressedExtensionIDs() # List<String>
for installedMod in installed:
if installedMod.isBundled():
excludedIDs.add(installedMod.getIDStr().toLowerCase())
miniUpdateList={}
try:
if moduleList:
for obj in moduleList:
if not (isinstance(obj, StreamTable)):
myPrint(u"J", u"ERROR - Retrieved data is not a StreamTable()", obj)
continue
extInfo = ModuleMetaData(obj) # ModuleMetaData
# noinspection PyUnresolvedReferences
if excludedIDs.contains(extInfo.getModuleID().lower()): # Probably internal modules like Python/Jython
continue
if not (1928 >= extInfo.getMinimumSupportedBuild() and 1928 <= extInfo.getMaximumSupportedBuild()): # noqa
continue
if not (extInfo.getMinimumSupportedBuild() >= 1000):
continue
if not(extInfo.isMacSandboxFriendly() or not Platform.isMac() or not moneydance_ui.getMain().getPlatformHelper().isConstrainedToSandbox()):
continue
existingMod = None # FeatureModule
for mod in installed:
# noinspection PyUnresolvedReferences
if mod.getIDStr().lower() == extInfo.getModuleID().lower():
existingMod = mod
break
isInstalled = (existingMod is not None) # boolean
isUpdatable = (existingMod is not None and existingMod.getBuild() < extInfo.getBuild())
if existingMod and isInstalled and isUpdatable:
# noinspection PyUnresolvedReferences
miniUpdateList[extInfo.getModuleID().lower()] = [extInfo, isInstalled, isUpdatable]
else:
myPrint(u"J", u"ERROR - Failed to download module list!)")
except:
myPrint(u"B", u"ERROR decoding downloaded module list!)")
dump_sys_error_to_md_console_and_errorlog()
return miniUpdateList
def get_register_txn_sort_orders():
# Flush in memory settings to disk
moneydance.savePreferences()
theSortData = [] # noqa
theSortData.append("VIEW REGISTER TXN SORT ORDERS (for Accounts - excluding legacy keys)")
theSortData.append(" ==================================================================\n")
theSortData.append("DEFAULTS (from config.dict)\n")
for x in _COLWIDTHS:
if x == "bank": theType = "Bank"
elif x == "cc": theType = "Credit Card"
elif x == "invest": theType = "Investment"
elif x == "loan": theType = "Loan"
elif x == "security": theType = "Security"
elif x == "misc": theType = "Asset/Liability/Expense/Income/Other"
elif x == "rec_credits": theType = "Reconciling window - credits"
elif x == "rec_debits": theType = "Reconciling window - debits"
elif x == "secdetail": theType = "Security Detail"
elif x == "split": theType = "Split Window"
else: theType = "????"
result = loadMDPreferences(None,x)
if result:
oneLineMode = result[0]
splitReg = result[1]
splitSz = result[2]
sortID = result[3]
position = result[4]
ascending = result[5]
widths = result[6]
position2 = result[7]
theSortData.append("\nType: %s (%s) Register Sort Data:"%(theType,x))
theSortData.append(">> Sort Order: %s" %sortID)
theSortData.append(">> Ascending: %s" %ascending)
theSortData.append(">> One Line View: %s" %oneLineMode)
theSortData.append(">> Split Register View: %s (%s)" %(splitReg,splitSz))
theSortData.append(">> Position: %s Widths: %s Position2 %s\n" %(position, widths, position2))
theSortData.append("\nDATA SAVED INTERNALLY BY (ACTIVE) ACCOUNT")
theSortData.append("-----------------------------------------\n")
accounts = AccountUtil.allMatchesForSearch(moneydance_data, MyAcctFilter(1))
for acct in accounts:
for x in _COLWIDTHS:
if x == "bank": theType = "Bank"
elif x == "cc": theType = "Credit Card"
elif x == "invest": theType = "Investment"
elif x == "loan": theType = "Loan"
elif x == "security": theType = "Security"
elif x == "misc": theType = "Asset/Liability/Expense/Income/Other"
elif x == "rec_credits": theType = "Reconciling window - credits"
elif x == "rec_debits": theType = "Reconciling window - debits"
elif x == "secdetail": theType = "Security Detail"
elif x == "split": theType = "Split Window"
else: theType = "????"
result = loadMDPreferences(acct,x, False)
if result:
oneLineMode = result[0]
splitReg = result[1]
splitSz = result[2]
sortID = result[3]
position = result[4]
ascending = result[5]
widths = result[6]
position2 = result[7]
theSortData.append("\nAccount: %s Account Type: %s Key Type: %s (%s) Register Sort Data:"%(acct.getAccountName(), acct.getAccountType(),theType,x))
theSortData.append(">> Sort Order: %s" %sortID)
theSortData.append(">> Ascending: %s" %ascending)
theSortData.append(">> One Line View: %s" %oneLineMode)
theSortData.append(">> Split Register View: %s (%s)" %(splitReg,splitSz))
theSortData.append(">> Position: %s Widths: %s Position2 %s\n" %(position, widths, position2))
theSortData.append("\n<END>")
for i in range(0, len(theSortData)):
theSortData[i] = theSortData[i] + "\n"
theSortData = "".join(theSortData)
return theSortData
def view_check_num_settings(statusLabel):
try:
from com.infinitekind.moneydance.model import CheckNumSettings
except:
statusLabel.setText(("Sorry - your version of MD is too early to use this function, must be at least Moneydance 2020.1 (1925)").ljust(800, " "))
statusLabel.setForeground(Color.RED)
return
theData = [] # noqa
theData.append("CHECK NUMBER SETTINGS")
theData.append(" =====================\n")
acct = root = moneydance.getCurrentAccountBook().getRootAccount()
x = root.getCheckNumSettings(True) # False means don't return defaults
theData.append("\nMaster Dataset & defaults (root account): " + moneydance.getCurrentAccountBook().getName())
if not x: # Assume old style check numbers
theData.append(
" >>Old style Check numbers as default: %s" %(moneydance_ui.getResources().getCheckNumberList(acct)))
theData.append("\n\n")
else:
theData.append(" >>Fixed Chq Items: %s" %(x.getPopupStrings()))
theData.append(
" >>Complete list of all Items in Chq Popup: %s" %(moneydance_ui.getResources().getCheckNumberList(acct)))
y = x.getRecentsOption()
# noinspection PyUnresolvedReferences
if y == CheckNumSettings.IncludeRecentsOption.ACCOUNT: y = "Include from Same Account"
elif y == CheckNumSettings.IncludeRecentsOption.GLOBAL: y = "Include from All Accounts"
elif y == CheckNumSettings.IncludeRecentsOption.NONE: y = "Don't Include"
theData.append(" >>Recent Entries: %s" %(y))
theData.append(" >>Max Entries: %s" %(x.getMaximumRecents()))
theData.append(" >>Show Next-Check Number: %s" %(x.getIncludeNextCheckNumber()))
theData.append(" >>Show Print-Check Option: %s" %(x.getIncludePrintCheckMarker()))
theData.append("\n")
accounts = AccountUtil.allMatchesForSearch(moneydance_data, MyAcctFilter(3))
for acct in accounts:
# noinspection PyUnresolvedReferences
if acct.getAccountType() == Account.AccountType.ROOT: continue
x = acct.getCheckNumSettings(False) # False means don't return defaults
if not x:
theData.append("Account: " + acct.getFullAccountName() + " (Settings: NONE/Default)")
theData.append(" >>Complete list of all Items in Chq Popup: %s" %(moneydance_ui.getResources().getCheckNumberList(acct)))
theData.append("\n")
else:
theData.append("Account: " + pad(acct.getFullAccountName(), 80))
theData.append(" >>Fixed Chq Items: %s" %(x.getPopupStrings()))
if acct.getAccountType() != Account.AccountType.ROOT: # noqa
theData.append(" >>Complete list of all Items in Chq Popup: %s" %(moneydance_ui.getResources().getCheckNumberList(acct)))
y = x.getRecentsOption()
if y == CheckNumSettings.IncludeRecentsOption.ACCOUNT: # noqa
y = "Include from Same Account"
elif y == CheckNumSettings.IncludeRecentsOption.GLOBAL: # noqa
y = "Include from All Accounts"
elif y == CheckNumSettings.IncludeRecentsOption.NONE: # noqa
y = "Don't Include"
theData.append(" >>Recent Entries: %s" %(y))
theData.append(" >>Max Entries: %s" %(x.getMaximumRecents()))
theData.append(" >>Show Next-Check Number: %s" %(x.getIncludeNextCheckNumber()))
theData.append(" >>Show Print-Check Option: %s" %(x.getIncludePrintCheckMarker()))
theData.append("\n")
# CheckNumSettings.IncludeRecentsOption
theData.append(("\n<END>"))
# Build a quick virtual file of Memorized reports and graphs to display
for i in range(0, len(theData)):
theData[i] = theData[i] + "\n"
theData = "".join(theData)
return theData
def isUserEncryptionPassphraseSet():
try:
keyFile = File(moneydance_data.getRootFolder(), "key")
keyInfo = SyncRecord()
fin = FileInputStream(keyFile)
keyInfo.readSet(fin)
fin.close()
return keyInfo.getBoolean("userpass", False)
except:
pass
return False
def getMDEncryptionKey():
try:
keyFile = File(moneydance_data.getRootFolder(), u"key")
keyInfo = SyncRecord()
fin = FileInputStream(keyFile)
keyInfo.readSet(fin)
fin.close()
# noinspection PyUnresolvedReferences
cipherLevel = LocalStorageCipher.MDCipherLevel.GOOD
keyString=keyInfo.getString(u"key",None)
test_with_random = u"E6520436865636B2C2062616279206F6E65203220312074776F4D6963726F7068306E6520436865636B204D6963723070686F6"
y=StringUtils.decodeHex(test_with_random[int(len(test_with_random)/2):]+test_with_random[:int(len(test_with_random)/2)])
z=""
for x in y: z+=chr(x)
newPassphrase = z
encryptedKeyBytes = StringUtils.decodeHex(keyString)
if keyInfo.getBoolean(u"userpass", False):
newPassphrase = moneydance_ui.getCurrentAccounts().getEncryptionKey()
if not newPassphrase:
return u"Not sure: Error retrieving your Encryption key!"
try:
# This next line triggers a message in the console error log file: "loading with 128 bit encryption key"
myPrint(u"J",u"Checking encryption key....")
key = LocalStorageCipher.encryptionKeyFromBytesAndPassword(encryptedKeyBytes, list(newPassphrase), cipherLevel)
# cipher | |
glEndList()
glNewList(self.displistUnselected, GL_COMPILE)
self._render(False)
glEndList()
def render(self, selected=False):
if selected:
glCallList(self.displistSelected)
else:
glCallList(self.displistUnselected)
def _render(self, selected=False):
pass
class Cube(SelectableModel):
def __init__(self, color=(1.0, 1.0, 1.0, 1.0)):
super().__init__()
with open("resources/cube.obj", "r") as f:
model = Model.from_obj(f, scale=150, rotate=True)
self.mesh_list = model.mesh_list
self.named_meshes = model.mesh_list
self.color = color
def _render(self, selected=False):
glEnable(GL_CULL_FACE)
if selected:
glColor4f(*selectioncolor)
else:
glColor4f(0.0, 0.0, 0.0, 1.0)
glCullFace(GL_FRONT)
glPushMatrix()
if selected:
glScalef(1.5, 1.5, 1.5)
else:
glScalef(1.2, 1.2, 1.2)
self.mesh_list[0].render()
glPopMatrix()
glCullFace(GL_BACK)
glColor4f(*self.color)
self.mesh_list[0].render()
glDisable(GL_CULL_FACE)
def render_coloredid(self, id):
glColor3ub((id >> 16) & 0xFF, (id >> 8) & 0xFF, (id >> 0) & 0xFF)
glPushMatrix()
glScalef(1.2, 1.2, 1.2)
self.mesh_list[0].render()
glPopMatrix()
class GenericObject(SelectableModel):
def __init__(self, bodycolor=(1.0, 1.0, 1.0, 1.0)):
super().__init__()
with open("resources/generic_object.obj", "r") as f:
model = Model.from_obj(f, scale=150, rotate=True)
self.mesh_list = model.mesh_list
self.named_meshes = model.named_meshes
self.bodycolor = bodycolor
def _render(self, selected=False):
glEnable(GL_CULL_FACE)
if selected:
glColor4f(*selectioncolor)
else:
glColor4f(0.0, 0.0, 0.0, 1.0)
glCullFace(GL_FRONT)
glPushMatrix()
if selected:
glScalef(1.5, 1.5, 1.5)
else:
glScalef(1.2, 1.2, 1.2)
self.named_meshes["Cube"].render()
glPopMatrix()
glCullFace(GL_BACK)
glColor4f(*self.bodycolor)
self.named_meshes["Cube"].render()
glColor4ub(0x09, 0x93, 0x00, 0xFF)
self.named_meshes["tip"].render()
#glColor4ub(0x00, 0x00, 0x00, 0xFF)
#self.mesh_list[2].render()
glDisable(GL_CULL_FACE)
def render_coloredid(self, id):
glColor3ub((id >> 16) & 0xFF, (id >> 8) & 0xFF, (id >> 0) & 0xFF)
glPushMatrix()
glScalef(1.2, 1.2, 1.2)
self.named_meshes["Cube"].render()
glPopMatrix()
class GenericComplexObject(GenericObject):
def __init__(self, modelpath, height, tip, eyes, body, rest):
self.scale = 10
with open(modelpath, "r") as f:
model = Model.from_obj(f, scale=self.scale, rotate=True)
self.mesh_list = model.mesh_list
self.named_meshes = model.mesh_list
self._tip = tip
self._eyes = eyes
self._body = body
self._height = height
self._rest = rest
def render(self, selected=False):
glEnable(GL_CULL_FACE)
if selected:
glColor4f(*selectioncolor)
else:
glColor4f(0.0, 0.0, 0.0, 1.0)
glCullFace(GL_FRONT)
glPushMatrix()
glTranslatef(0.0, 0.0, self._height * self.scale)
if selected:
glScalef(1.5, 1.5, 1.5)
else:
glScalef(1.2, 1.2, 1.2)
self.mesh_list[self._body].render()
glPopMatrix()
glCullFace(GL_BACK)
glPushMatrix()
glTranslatef(0.0, 0.0, self._height*self.scale)
glColor4f(1.0, 1.0, 1.0, 1.0)
self.mesh_list[self._body].render()
glColor4ub(0x09, 0x93, 0x00, 0xFF)
self.mesh_list[self._tip].render() # tip
glColor4ub(0x00, 0x00, 0x00, 0xFF)
self.mesh_list[self._eyes].render() # eyes
glPopMatrix()
if selected:
glColor4f(*selectioncolor)
else:
glColor4f(0.0, 0.0, 0.0, 1.0)
self.mesh_list[self._rest].render()
glDisable(GL_CULL_FACE)
def render_coloredid(self, id):
glColor3ub((id >> 16) & 0xFF, (id >> 8) & 0xFF, (id >> 0) & 0xFF)
glPushMatrix()
glTranslatef(0.0, 0.0, self._height * self.scale)
self.mesh_list[self._body].render()
glPopMatrix()
glPushMatrix()
glTranslatef(0.0, 0.0, self._height*self.scale)
self.mesh_list[self._body].render()
self.mesh_list[self._tip].render() # tip
self.mesh_list[self._eyes].render() # eyes
glPopMatrix()
self.mesh_list[self._rest].render()
class GenericFlyer(GenericObject):
def __init__(self):
with open("resources/generic_object_flyer.obj", "r") as f:
model = Model.from_obj(f, scale=10, rotate=True)
self.mesh_list = model.mesh_list
self.named_meshes = model.mesh_list
class GenericCrystallWall(GenericObject):
def __init__(self):
with open("resources/generic_object_crystalwall.obj", "r") as f:
model = Model.from_obj(f, scale=10, rotate=True)
self.mesh_list = model.mesh_list
self.named_meshes = model.mesh_list
class GenericLongLegs(GenericComplexObject):
def __init__(self):
super().__init__("resources/generic_object_longlegs2.obj",
height=5.0, tip=3, body=2, eyes=1, rest=0)
class GenericChappy(GenericComplexObject):
def __init__(self):
super().__init__("resources/generic_chappy.obj",
height=2.56745, tip=0, body=2, eyes=1, rest=3)
class __GenericChappy(GenericObject):
def __init__(self):
self.scale = 10
with open("resources/generic_chappy.obj", "r") as f:
model = Model.from_obj(f, scale=self.scale, rotate=True)
self.mesh_list = model.mesh_list
self.named_meshes = model.mesh_list
def render(self, selected=False):
glEnable(GL_CULL_FACE)
if selected:
glColor4f(*selectioncolor)
else:
glColor4f(0.0, 0.0, 0.0, 1.0)
mainbodyheight = 2.56745
glCullFace(GL_FRONT)
glPushMatrix()
glTranslatef(0.0, 0.0, mainbodyheight * self.scale)
if selected:
glScalef(1.5, 1.5, 1.5)
else:
glScalef(1.2, 1.2, 1.2)
self.mesh_list[1].render()
glPopMatrix()
glCullFace(GL_BACK)
glPushMatrix()
glTranslatef(0.0, 0.0, 2.56745*self.scale)
glColor4f(1.0, 1.0, 1.0, 1.0)
self.mesh_list[1].render()
glColor4ub(0x09, 0x93, 0x00, 0xFF)
self.mesh_list[2].render() # tip
glPopMatrix()
glColor4ub(0x00, 0x00, 0x00, 0xFF)
self.mesh_list[3].render() # eyes
if selected:
glColor4f(*selectioncolor)
else:
glColor4f(0.0, 0.0, 0.0, 1.0)
self.mesh_list[0].render() # leg
glDisable(GL_CULL_FACE)
def render_coloredid(self, id):
glColor3ub((id >> 16) & 0xFF, (id >> 8) & 0xFF, (id >> 0) & 0xFF)
glPushMatrix()
glScalef(1.2, 1.2, 1.2)
glTranslatef(0.0, 0.0, 2.56745 * self.scale)
self.mesh_list[1].render()
glPopMatrix()
class GenericSnakecrow(GenericComplexObject):
def __init__(self):
super().__init__("resources/generic_snakecrow.obj",
height=6.63505, tip=1, body=0, eyes=2, rest=3)
class __GenericSnakecrow(GenericObject):
def __init__(self):
self.scale = 10
with open("resources/generic_snakecrow.obj", "r") as f:
model = Model.from_obj(f, scale=self.scale, rotate=True)
self.mesh_list = model.mesh_list
self.named_meshes = model.mesh_list
def render(self, selected=False):
glEnable(GL_CULL_FACE)
if selected:
glColor4f(255/255, 223/255, 39/255, 1.0)
else:
glColor4f(0.0, 0.0, 0.0, 1.0)
mainbodyheight = 6.63505
glCullFace(GL_FRONT)
glPushMatrix()
glTranslatef(0.0, 0.0, mainbodyheight * self.scale)
if selected:
glScalef(1.5, 1.5, 1.5)
else:
glScalef(1.2, 1.2, 1.2)
self.mesh_list[1].render()
glPopMatrix()
glCullFace(GL_BACK)
glPushMatrix()
glTranslatef(0.0, 0.0, mainbodyheight*self.scale)
glColor4f(1.0, 1.0, 1.0, 1.0)
self.mesh_list[1].render()
glPopMatrix()
glColor4ub(0x09, 0x93, 0x00, 0xFF)
self.mesh_list[2].render() # tip
glColor4ub(0x00, 0x00, 0x00, 0xFF)
self.mesh_list[3].render() # eyes
if selected:
glColor4f(255/255, 223/255, 39/255, 1.0)
else:
glColor4f(0.0, 0.0, 0.0, 1.0)
self.mesh_list[0].render() # leg
glDisable(GL_CULL_FACE)
def render_coloredid(self, id):
glColor3ub((id >> 16) & 0xFF, (id >> 8) & 0xFF, (id >> 0) & 0xFF)
glPushMatrix()
glScalef(1.2, 1.2, 1.2)
glTranslatef(0.0, 0.0, 2.56745 * self.scale)
self.mesh_list[1].render()
glPopMatrix()
class GenericSwimmer(GenericComplexObject):
def __init__(self):
super().__init__("resources/generic_swimmer.obj",
height=0.0, tip=0, body=3, eyes=1, rest=2)
class TexturedPlane(object):
def __init__(self, planewidth, planeheight, qimage):
ID = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, ID)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0)
imgdata = bytes(qimage.bits().asarray(qimage.width()*qimage.height()*4))
glTexImage2D(GL_TEXTURE_2D, 0, 4, qimage.width(), qimage.height(), 0, GL_BGRA, GL_UNSIGNED_BYTE, imgdata)
self.ID = ID
self.planewidth = planewidth
self.planeheight = planeheight
self.offset_x = 0
self.offset_z = 0
self.color = (0.0, 0.0, 0.0)
def set_offset(self, x, z):
self.offset_x = x
self.offset_z = z
def set_color(self, color):
self.color = color
def apply_color(self):
glColor4f(self.color[0], self.color[1], self.color[2], 1.0)
def render(self):
w, h = self.planewidth, self.planeheight
offsetx, offsetz = self.offset_x, self.offset_z
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.ID)
glBegin(GL_TRIANGLE_FAN)
glTexCoord2f(0.0, 0.0)
glVertex3f(-0.5*w+offsetx, -0.5*h+offsetz, 0)
glTexCoord2f(0.0, 1.0)
glVertex3f(-0.5*w+offsetx, 0.5*h+offsetz, 0)
glTexCoord2f(1.0, 1.0)
glVertex3f(0.5*w+offsetx, 0.5*h+offsetz, 0)
glTexCoord2f(1.0, 0.0)
glVertex3f(0.5*w+offsetx, -0.5*h+offsetz, 0)
glEnd()
def render_coloredid(self, id):
w, h = self.planewidth, self.planeheight
offsetx, offsetz = self.offset_x, self.offset_z
glDisable(GL_TEXTURE_2D)
glColor3ub((id >> 16) & 0xFF, (id >> 8) & 0xFF, (id >> 0) & 0xFF)
glBegin(GL_TRIANGLE_FAN)
#glTexCoord2f(0.0, 0.0)
glVertex3f(-0.5*w+offsetx, -0.5*h+offsetz, 0)
#glTexCoord2f(0.0, 1.0)
glVertex3f(-0.5*w+offsetx, 0.5*h+offsetz, 0)
#glTexCoord2f(1.0, 1.0)
glVertex3f(0.5*w+offsetx, 0.5*h+offsetz, 0)
#glTexCoord2f(1.0, 0.0)
glVertex3f(0.5*w+offsetx, -0.5*h+offsetz, 0)
glEnd()
ORIENTATIONS = {
0: [(0.0, 0.0), (0.0, 1.0), (1.0, 1.0), (1.0, 0.0)],
1: [(1.0, 0.0), (0.0, 0.0), (0.0, 1.0), (1.0, 1.0)],
2: [(1.0, 1.0), (1.0, 0.0), (0.0, 0.0), (0.0, 1.0)],
3: [(0.0, 1.0), (1.0, 1.0), (1.0, 0.0), (0.0, 0.0)]
}
class Minimap(object):
def __init__(self, corner1, corner2, orientation, texpath=None):
self.ID = None
if texpath is not None:
self.set_texture(texpath)
self.corner1 = corner1
self.corner2 = corner2
self.orientation = orientation
print("fully initialized")
def is_available(self):
return self.ID is not None
def set_texture(self, path):
if self.ID is not None:
glDeleteTextures(1, int(self.ID))
qimage = QtGui.QImage(path, "png")
qimage = qimage.convertToFormat(QtGui.QImage.Format_ARGB32)
ID = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, ID)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0)
imgdata = bytes(qimage.bits().asarray(qimage.width() * qimage.height() * 4))
glTexImage2D(GL_TEXTURE_2D, 0, 4, qimage.width(), qimage.height(), 0, GL_BGRA, GL_UNSIGNED_BYTE, imgdata)
self.ID = ID
def render(self):
if self.ID is None:
return
corner1, corner2 = self.corner1, self.corner2
glDisable(GL_ALPHA_TEST)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
#glEnable(GL_DEPTH_TEST)
glColor4f(1.0, 1.0, 1.0, 0.70)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.ID)
glBegin(GL_TRIANGLE_FAN)
glTexCoord2f(*ORIENTATIONS[self.orientation][0])
glVertex3f(corner1.x, -corner1.z, corner1.y)
glTexCoord2f(*ORIENTATIONS[self.orientation][1])
glVertex3f(corner1.x, -corner2.z, corner1.y)
glTexCoord2f(*ORIENTATIONS[self.orientation][2])
glVertex3f(corner2.x, -corner2.z, corner1.y)
glTexCoord2f(*ORIENTATIONS[self.orientation][3])
glVertex3f(corner2.x, -corner1.z, corner1.y)
glEnd()
glColor4f(1.0, 1.0, 1.0, 1.0)
#glDisable(GL_DEPTH_TEST)
glDisable(GL_BLEND)
glBlendFunc(GL_ZERO, GL_ONE)
glDisable(GL_TEXTURE_2D)
glEnable(GL_ALPHA_TEST)
class Grid(Mesh):
def __init__(self, width, length, step):
super().__init__("Grid")
self.width = width
self.length = length
self.step = step
def generate_displist(self):
if self._displist is not None:
glDeleteLists(self._displist, 1)
offset = +0.5
width = self.width
length = self.length
self._displist = glGenLists(1)
glNewList(self._displist, GL_COMPILE)
glColor3f(0.0, 0.0, 0.0)
glLineWidth(4.0)
glBegin(GL_LINES)
glVertex3f(-width, 0, offset)
glVertex3f(width, 0, offset)
glVertex3f(0, -length, offset)
glVertex3f(0, length, offset)
glEnd()
glLineWidth(1.0)
glBegin(GL_LINES)
for ix in range(-width, width+self.step, self.step):
glVertex3f(ix, -length, offset)
glVertex3f(ix, length, offset)
for iy in range(-length, length+self.step, self.step):
glVertex3f(-width, iy, offset)
glVertex3f(width, iy, offset)
glEnd()
glEndList()
def _compile_shader_with_error_report(shaderobj):
glCompileShader(shaderobj)
if not glGetShaderiv(shaderobj, GL_COMPILE_STATUS):
raise RuntimeError(str(glGetShaderInfoLog(shaderobj), encoding="ascii"))
colortypes = {
0x00: (250, 213, 160),
0x01: (128, 128, 128),
0x02: (192, 192, 192),
0x03: (76, 255, 0),
0x04: (0, 255, 255),
0x08: (255, 106, 0),
0x0C: (250, 213, 160),
0x0F: (0, 38, 255),
0x10: (250, 213, 160),
0x12: (64, 64, 64),
0x13: (250, 213, 160)
}
otherwise = (40, 40, 40)
class CollisionModel(object):
def __init__(self, mkdd_collision):
meshes = {}
self.program = None
vertices = mkdd_collision.vertices
self._displists = []
for v1, v2, v3, coltype, rest in mkdd_collision.triangles:
vertex1 = Vector3(*vertices[v1])
vertex1.z = -vertex1.z
vertex2 = Vector3(*vertices[v2])
vertex2.z = -vertex2.z
vertex3 = Vector3(*vertices[v3])
vertex3.z = -vertex3.z
v1tov2 = vertex2 - vertex1
v1tov3 = vertex3 - vertex1
normal = v1tov2.cross(v1tov3)
if normal.norm() != 0.0:
normal.normalize()
if coltype not in meshes:
meshes[coltype] = []
shift = coltype >> 8
if shift in colortypes:
color = colortypes[shift]
else:
color = otherwise
color = (color[0]/255.0, color[1]/255.0, color[2]/255.0)
meshes[coltype].append((vertex1, vertex2, vertex3, normal, color))
self.meshes = meshes
def generate_displists(self):
if self.program is None:
self.create_shaders()
for meshtype, mesh in self.meshes.items():
displist = glGenLists(1)
glNewList(displist, GL_COMPILE)
glBegin(GL_TRIANGLES)
for v1, v2, v3, normal, color in mesh:
glVertexAttrib3f(3, normal.x, normal.y, normal.z)
glVertexAttrib3f(4, *color)
glVertex3f(v1.x, -v1.z, v1.y)
glVertexAttrib3f(3, normal.x, normal.y, normal.z)
glVertexAttrib3f(4, *color)
glVertex3f(v2.x, -v2.z, v2.y)
glVertexAttrib3f(3, normal.x, normal.y, normal.z)
glVertexAttrib3f(4, | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import typing
import numpy as np
from fate_arch.common.base_utils import timestamp_to_date
from fate_arch.computing import is_table
from google.protobuf import json_format
from federatedml.param.evaluation_param import EvaluateParam
from federatedml.protobuf import deserialize_models
from federatedml.statistic.data_overview import header_alignment
from federatedml.util import LOGGER, abnormal_detection
from federatedml.util.io_check import assert_match_id_consistent
from federatedml.util.component_properties import ComponentProperties, RunningFuncs
from federatedml.callbacks.callback_list import CallbackList
from federatedml.feature.instance import Instance
def serialize_models(models):
serialized_models: typing.Dict[str, typing.Tuple[str, bytes, dict]] = {}
for model_name, buffer_object in models.items():
serialized_string = buffer_object.SerializeToString()
pb_name = type(buffer_object).__name__
json_format_dict = json_format.MessageToDict(
buffer_object, including_default_value_fields=True
)
serialized_models[model_name] = (
pb_name,
serialized_string,
json_format_dict,
)
return serialized_models
class ComponentOutput:
def __init__(self, data, models, cache: typing.List[tuple]) -> None:
self._data = data
if not isinstance(self._data, list):
self._data = [data]
self._models = models
if self._models is None:
self._models = {}
self._cache = cache
if not isinstance(self._cache, list):
self._cache = [cache]
@property
def data(self) -> list:
return self._data
@property
def model(self):
return serialize_models(self._models)
@property
def cache(self):
return self._cache
class MetricType:
LOSS = "LOSS"
class Metric:
def __init__(self, key, value: float, timestamp: float = None):
self.key = key
self.value = value
self.timestamp = timestamp
def to_dict(self):
return dict(key=self.key, value=self.value, timestamp = self.timestamp)
class MetricMeta:
def __init__(self, name: str, metric_type: MetricType, extra_metas: dict = None):
self.name = name
self.metric_type = metric_type
self.metas = {}
self.extra_metas = extra_metas
def update_metas(self, metas: dict):
self.metas.update(metas)
def to_dict(self):
return dict(name=self.name, metric_type=self.metric_type, metas=self.metas, extra_metas=self.extra_metas)
class CallbacksVariable(object):
def __init__(self):
self.stop_training = False
self.best_iteration = -1
self.validation_summary = None
class WarpedTrackerClient:
def __init__(self, tracker) -> None:
self._tracker = tracker
def log_metric_data(
self, metric_namespace: str, metric_name: str, metrics: typing.List[Metric]
):
return self._tracker.log_metric_data(metric_namespace=metric_namespace, metric_name=metric_name, metrics = [metric.to_dict() for metric in metrics])
def set_metric_meta(self, metric_namespace: str, metric_name: str, metric_meta: MetricMeta):
return self._tracker.set_metric_meta(metric_namespace=metric_namespace, metric_name=metric_name, metric_meta=metric_meta.to_dict())
def log_component_summary(self, summary_data: dict):
return self._tracker.log_component_summary(summary_data=summary_data)
class ModelBase(object):
def __init__(self):
self.model_output = None
self.mode = None
self.role = None
self.data_output = None
self.cache_output = None
self.model_param = None
self.transfer_variable = None
self.flowid = ""
self.task_version_id = ""
self.need_one_vs_rest = False
self.callback_one_vs_rest = False
self.checkpoint_manager = None
self.cv_fold = 0
self.validation_freqs = None
self.component_properties = ComponentProperties()
self._summary = dict()
self._align_cache = dict()
self._tracker = None
self.step_name = 'step_name'
self.callback_list: CallbackList
self.callback_variables = CallbacksVariable()
@property
def tracker(self) -> WarpedTrackerClient:
if self._tracker is None:
raise RuntimeError(f"use tracker before set")
return self._tracker
@tracker.setter
def tracker(self, value):
self._tracker = WarpedTrackerClient(value)
@property
def stop_training(self):
return self.callback_variables.stop_training
@property
def need_cv(self):
return self.component_properties.need_cv
@property
def need_run(self):
return self.component_properties.need_run
@need_run.setter
def need_run(self, value: bool):
self.component_properties.need_run = value
def _init_model(self, model):
pass
def load_model(self, model_dict):
pass
def _parse_need_run(self, model_dict, model_meta_name):
meta_obj = list(model_dict.get("model").values())[0].get(model_meta_name)
need_run = meta_obj.need_run
# self.need_run = need_run
self.component_properties.need_run = need_run
def run(self, cpn_input, retry: bool = True):
self.task_version_id = cpn_input.task_version_id
self.tracker = cpn_input.tracker
self.checkpoint_manager = cpn_input.checkpoint_manager
# deserialize models
deserialize_models(cpn_input.models)
method = (
self._retry
if retry
and self.checkpoint_manager is not None
and self.checkpoint_manager.latest_checkpoint is not None
else self._run
)
method(cpn_input)
return ComponentOutput(self.save_data(), self.export_model(), self.save_cache())
def _run(self, cpn_input):
# paramters
self.model_param.update(cpn_input.parameters)
self.model_param.check()
self.component_properties.parse_component_param(
cpn_input.roles, self.model_param
)
self.role = self.component_properties.role
self.component_properties.parse_dsl_args(cpn_input.datasets, cpn_input.models)
self.component_properties.parse_caches(cpn_input.caches)
# init component, implemented by subclasses
self._init_model(self.model_param)
self.callback_list = CallbackList(self.role, self.mode, self)
if hasattr(self.model_param, "callback_param"):
callback_param = getattr(self.model_param, "callback_param")
self.callback_list.init_callback_list(callback_param)
running_funcs = self.component_properties.extract_running_rules(
datasets=cpn_input.datasets, models=cpn_input.models, cpn=self
)
LOGGER.debug(f"running_funcs: {running_funcs.todo_func_list}")
saved_result = []
for func, params, save_result, use_previews in running_funcs:
# for func, params in zip(todo_func_list, todo_func_params):
if use_previews:
if params:
real_param = [saved_result, params]
else:
real_param = saved_result
LOGGER.debug("func: {}".format(func))
this_data_output = func(*real_param)
saved_result = []
else:
this_data_output = func(*params)
if save_result:
saved_result.append(this_data_output)
if len(saved_result) == 1:
self.data_output = saved_result[0]
# LOGGER.debug("One data: {}".format(self.data_output.first()[1].features))
LOGGER.debug(
"saved_result is : {}, data_output: {}".format(
saved_result, self.data_output
)
)
# self.check_consistency()
self.save_summary()
return ComponentOutput(self.save_data(), self.export_model(), self.save_cache())
def _retry(self, cpn_input):
self.model_param.update(cpn_input.parameters)
self.model_param.check()
self.component_properties.parse_component_param(
cpn_input.roles, self.model_param
)
self.role = self.component_properties.role
self.component_properties.parse_dsl_args(cpn_input.datasets, cpn_input.models)
self.component_properties.parse_caches(cpn_input.caches)
# init component, implemented by subclasses
self._init_model(self.model_param)
self.callback_list = CallbackList(self.role, self.mode, self)
if hasattr(self.model_param, "callback_param"):
callback_param = getattr(self.model_param, "callback_param")
self.callback_list.init_callback_list(callback_param)
train_data, validate_data, test_data, data = self.component_properties.extract_input_data(
datasets=cpn_input.datasets, model=self
)
running_funcs = RunningFuncs()
latest_checkpoint = self.get_latest_checkpoint()
running_funcs.add_func(self.load_model, [latest_checkpoint])
running_funcs = self.component_properties.warm_start_process(
running_funcs, self, train_data, validate_data)
LOGGER.debug(f"running_funcs: {running_funcs.todo_func_list}")
self._execute_running_funcs(running_funcs)
def _execute_running_funcs(self, running_funcs):
saved_result = []
for func, params, save_result, use_previews in running_funcs:
# for func, params in zip(todo_func_list, todo_func_params):
if use_previews:
if params:
real_param = [saved_result, params]
else:
real_param = saved_result
LOGGER.debug("func: {}".format(func))
detected_func = assert_match_id_consistent(func)
this_data_output = detected_func(*real_param)
saved_result = []
else:
detected_func = assert_match_id_consistent(func)
this_data_output = detected_func(*params)
if save_result:
saved_result.append(this_data_output)
if len(saved_result) == 1:
self.data_output = saved_result[0]
LOGGER.debug("saved_result is : {}, data_output: {}".format(saved_result, self.data_output))
self.save_summary()
def export_serialized_models(self):
return serialize_models(self.export_model())
def get_metrics_param(self):
return EvaluateParam(eval_type="binary", pos_label=1)
def check_consistency(self):
if not is_table(self.data_output):
return
if (
self.component_properties.input_data_count
+ self.component_properties.input_eval_data_count
!= self.data_output.count()
and self.component_properties.input_data_count
!= self.component_properties.input_eval_data_count
):
raise ValueError("Input data count does not match with output data count")
def predict(self, data_inst):
pass
def fit(self, *args):
pass
def transform(self, data_inst):
pass
def cross_validation(self, data_inst):
pass
def stepwise(self, data_inst):
pass
def one_vs_rest_fit(self, train_data=None):
pass
def one_vs_rest_predict(self, train_data):
pass
def init_validation_strategy(self, train_data=None, validate_data=None):
pass
def save_data(self):
return self.data_output
def export_model(self):
return self.model_output
def save_cache(self):
return self.cache_output
def set_flowid(self, flowid):
# self.flowid = '.'.join([self.task_version_id, str(flowid)])
self.flowid = flowid
self.set_transfer_variable()
def set_transfer_variable(self):
if self.transfer_variable is not None:
LOGGER.debug(
"set flowid to transfer_variable, flowid: {}".format(self.flowid)
)
self.transfer_variable.set_flowid(self.flowid)
def set_task_version_id(self, task_version_id):
"""task_version_id: jobid + component_name, reserved variable"""
self.task_version_id = task_version_id
def get_metric_name(self, name_prefix):
if not self.need_cv:
return name_prefix
return "_".join(map(str, [name_prefix, self.flowid]))
def set_tracker(self, tracker):
self._tracker = tracker
def set_checkpoint_manager(self, checkpoint_manager):
checkpoint_manager.load_checkpoints_from_disk()
self.checkpoint_manager = checkpoint_manager
@staticmethod
def set_predict_data_schema(predict_datas, schemas):
if predict_datas is None:
return predict_datas
if isinstance(predict_datas, list):
predict_data = predict_datas[0]
schema = schemas[0]
else:
predict_data = predict_datas
schema = schemas
if predict_data is not None:
predict_data.schema = {
"header": [
"label",
"predict_result",
"predict_score",
"predict_detail",
"type",
],
"sid_name": schema.get("sid_name"),
"content_type": "predict_result"
}
return predict_data
@staticmethod
def predict_score_to_output(
data_instances, predict_score, classes=None, threshold=0.5
):
"""
Get predict result output
Parameters
----------
data_instances: table, data used for prediction
predict_score: table, probability scores
classes: list or None, all classes/label names
threshold: float, predict threshold, used for binary label
Returns
-------
Table, predict result
"""
# regression
if classes is None:
predict_result = data_instances.join(
predict_score, lambda d, pred: [d.label, pred, pred, {"label": pred}]
)
# binary
elif isinstance(classes, list) and len(classes) == 2:
class_neg, class_pos = classes[0], classes[1]
pred_label = predict_score.mapValues(
lambda x: class_pos if x > threshold else class_neg
)
predict_result = data_instances.mapValues(lambda x: x.label)
predict_result = predict_result.join(predict_score, lambda x, y: (x, y))
class_neg_name, class_pos_name = str(class_neg), str(class_pos)
predict_result = predict_result.join(
pred_label,
lambda x, y: [
x[0],
y,
x[1],
{class_neg_name: (1 - x[1]), class_pos_name: x[1]},
],
)
# multi-label: input = array of predicted score of all labels
elif isinstance(classes, list) and len(classes) > 2:
# pred_label = predict_score.mapValues(lambda x: classes[x.index(max(x))])
classes = [str(val) for val in classes]
predict_result = data_instances.mapValues(lambda x: x.label)
predict_result = predict_result.join(
predict_score,
lambda x, y: [
x,
int(classes[np.argmax(y)]),
float(np.max(y)),
dict(zip(classes, list(y))),
],
)
else:
raise ValueError(
f"Model's classes type is {type(classes)}, classes must be None or list of length no less than 2."
)
def _transfer(instance, pred_res):
return Instance(features=pred_res, inst_id=instance.inst_id)
predict_result = data_instances.join(predict_result, _transfer)
return predict_result
def callback_meta(self, metric_name, metric_namespace, metric_meta: MetricMeta):
if self.need_cv:
metric_name = ".".join([metric_name, str(self.cv_fold)])
flow_id_list = self.flowid.split(".")
LOGGER.debug(
"Need cv, change callback_meta, flow_id_list: {}".format(flow_id_list)
)
if len(flow_id_list) > 1:
curve_name = ".".join(flow_id_list[1:])
metric_meta.update_metas({"curve_name": curve_name})
else:
metric_meta.update_metas({"curve_name": metric_name})
self.tracker.set_metric_meta(
metric_name=metric_name,
metric_namespace=metric_namespace,
metric_meta=metric_meta,
)
def callback_metric(self, metric_name, metric_namespace, metric_data: typing.List[Metric]):
if self.need_cv:
metric_name = ".".join([metric_name, str(self.cv_fold)])
self.tracker.log_metric_data(
metric_name=metric_name,
metric_namespace=metric_namespace,
metrics=metric_data,
)
def callback_warm_start_init_iter(self, iter_num):
metric_meta = MetricMeta(name='train',
metric_type="init_iter",
extra_metas={
"unit_name": "iters",
})
self.callback_meta(metric_name='init_iter', metric_namespace='train', metric_meta=metric_meta)
self.callback_metric(metric_name='init_iter',
metric_namespace='train',
metric_data=[Metric("init_iter", iter_num)])
def get_latest_checkpoint(self):
return self.checkpoint_manager.latest_checkpoint.read()
def save_summary(self):
self.tracker.log_component_summary(summary_data=self.summary())
def set_cv_fold(self, cv_fold):
self.cv_fold = cv_fold
def summary(self):
return copy.deepcopy(self._summary)
| |
<filename>source/strategy/strategy_base.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from abc import ABC, ABCMeta
from typing import Any, Callable
from copy import copy
from ..common.constant import (
Interval,
OrderFlag, OrderType, Offset, Direction
)
from ..common.datastruct import (
TickData, BarData,
OrderRequest, OrderData,
CtpOrderField, PaperOrderField
)
from ..common.sqglobal import dotdict
from ..common.utility import extract_full_symbol, virtual
from ..api.ctp_constant import (
THOST_FTDC_D_Buy,
THOST_FTDC_D_Sell,
THOST_FTDC_OPT_LimitPrice,
THOST_FTDC_OPT_AnyPrice,
THOST_FTDC_OF_Open,
THOST_FTDC_OF_Close,
THOST_FTDC_HF_Speculation,
THOST_FTDC_CC_Immediately,
THOST_FTDC_TC_GFD,
THOST_FTDC_VC_AV,
THOST_FTDC_TC_IOC,
THOST_FTDC_VC_CV
)
class StrategyBase(metaclass=ABCMeta):
"""
Base strategy class
"""
# class id and name
ID = -1
NAME = "base"
# for rqquant use
context = dotdict()
# parameters and variables,for vnpy use
author = ""
account = ""
api = ""
autostart = False
parameters = ["api", "account"]
variables = []
def __init__(self,
strategy_engine: Any,
strategy_name: str,
full_symbol: str,
setting: dict):
"""
initialize trategy
:param
:variables
"""
self.full_symbol = full_symbol
sym, ex = extract_full_symbol(self.full_symbol)
self.symbol = sym
self.strategy_engine = strategy_engine
self.engine_id = self.strategy_engine.id
self.strategy_name = strategy_name
self.active = False
self.inited = False
self.trading = False
self.pos = 0
self.long_pos = 0
self.long_pos_frozen = 0
self.short_pos = 0
self.short_pos_frozen = 0
self.long_price = 0.0
self.short_price = 0.0
# Copy a new variables list here to avoid duplicate insert when multiple
# strategy instances are created with the same strategy class.
self.variables = copy(self.variables)
self.variables.insert(0, "inited")
self.variables.insert(1, "trading")
self.variables.insert(2, "pos")
self.update_setting(setting)
self.add_functions()
def add_functions(self):
self.get_position_holding = self.strategy_engine.get_position_holding
self.get_account = self.strategy_engine.get_account
self.get_order = self.strategy_engine.get_order
self.get_tick = self.strategy_engine.get_tick
self.get_trade = self.strategy_engine.get_trade
self.get_position = self.strategy_engine.get_position
self.get_contract = self.strategy_engine.get_contract
self.get_all_active_orders = self.strategy_engine.get_all_active_orders
def get_my_active_orderids(self):
oidset = self.strategy_engine.get_strategy_active_orderids(
self.strategy_name)
return oidset
def get_my_position_holding(self):
holding = self.get_position_holding(self.account, self.full_symbol)
self.long_pos = holding.long_pos
self.long_pos_frozen = holding.long_pos_frozen
self.short_pos = holding.short_pos
self.short_pos_frozen = holding.short_pos_frozen
self.long_price = holding.long_price
self.short_price = holding.short_price
def update_setting(self, setting: dict):
"""
Update strategy parameter wtih value in setting dict.
"""
for name in self.parameters:
if name in setting:
setattr(self, name, setting[name])
@classmethod
def get_class_parameters(cls):
"""
Get default parameters dict of strategy class.
"""
class_parameters = {}
for name in cls.parameters:
class_parameters[name] = getattr(cls, name)
return class_parameters
def get_parameters(self):
"""
Get strategy parameters dict.
"""
strategy_parameters = {}
for name in self.parameters:
strategy_parameters[name] = getattr(self, name)
return strategy_parameters
def get_variables(self):
"""
Get strategy variables dict.
"""
strategy_variables = {}
for name in self.variables:
strategy_variables[name] = getattr(self, name)
return strategy_variables
def get_data(self):
"""
Get strategy data.
"""
strategy_data = {
"engine_id": self.engine_id,
"strategy_name": self.strategy_name,
"full_symbol": self.full_symbol,
"class_name": self.__class__.__name__,
"author": self.author,
"parameters": self.get_parameters(),
"variables": self.get_variables(),
}
return strategy_data
@virtual
def on_init(self, params_dict=None):
pass
@virtual
def on_start(self):
self.active = True
@virtual
def on_stop(self):
self.active = False
@virtual
def on_reset(self):
pass
@virtual
def on_tick(self, tick):
"""
Respond to tick
"""
pass
@virtual
def on_bar(self, bar):
"""
Respond to bar
"""
pass
@virtual
def on_order_status(self, order):
"""
on order acknowledged
:return:
"""
#raise NotImplementedError("Should implement on_order()")
pass
@virtual
def on_order(self, order):
"""
on order
:return:
"""
self.on_order_status(order)
@virtual
def on_cancel(self, event):
"""
on order canceled
:return:
"""
pass
@virtual
def on_fill(self, trade):
"""
on order filled
:return:
"""
pass
def on_trade(self, trade):
self.on_fill(trade)
@virtual
def on_pos(self, position):
pass
@virtual
def on_acc(self, acc):
pass
@virtual
def on_contract(self, contract):
pass
@virtual
def on_info(self, info):
pass
@virtual
def on_req(self, req):
pass
@virtual
def on_headermsg(self, event):
pass
@virtual
def on_stop_order(self, stop_order):
"""
Callback of stop order update.
"""
pass
def cancel_order(self, oid):
if self.trading:
self.strategy_engine.cancel_order(self, oid)
def cancel_all(self):
"""
cancel all standing orders from this strategy
"""
if self.trading:
self.strategy_engine.cancel_all(self)
# wrapper function for easy use ,
# rqquant's use
def buy_open(self, price: float, size: int, type='lmt'):
if not self.trading:
return
if (type == 'mkt'):
if self.api == "CTP.TD":
of = CtpOrderField(
InstrumentID=self.symbol,
OrderPriceType=THOST_FTDC_OPT_AnyPrice,
LimitPrice=price,
Direction=THOST_FTDC_D_Buy,
CombOffsetFlag=THOST_FTDC_OF_Open,
CombHedgeFlag=THOST_FTDC_HF_Speculation,
VolumeTotalOriginal=size,
TimeCondition=THOST_FTDC_TC_GFD,
VolumeCondition=THOST_FTDC_VC_AV,
MinVolume=1,
ContingentCondition=THOST_FTDC_CC_Immediately
)
order = OrderRequest(
api="CTP.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.OPEN,
type=OrderType.MKT,
direction=Direction.LONG,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif self.api == "PAPER.TD":
of = PaperOrderField(
order_type=OrderType.MKT,
full_symbol=self.full_symbol,
order_flag=OrderFlag.OPEN,
order_size=size
)
order = OrderRequest(
api="PAPER.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.OPEN,
type=OrderType.MKT,
direction=Direction.LONG,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif (type == 'lmt'):
if self.api == "CTP.TD":
of = CtpOrderField(
InstrumentID=self.symbol,
OrderPriceType=THOST_FTDC_OPT_LimitPrice,
LimitPrice=price,
Direction=THOST_FTDC_D_Buy,
CombOffsetFlag=THOST_FTDC_OF_Open,
CombHedgeFlag=THOST_FTDC_HF_Speculation,
VolumeTotalOriginal=size,
TimeCondition=THOST_FTDC_TC_GFD,
VolumeCondition=THOST_FTDC_VC_AV,
MinVolume=1,
ContingentCondition=THOST_FTDC_CC_Immediately
)
order = OrderRequest(
api="CTP.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.OPEN,
type=OrderType.LMT,
direction=Direction.LONG,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif self.api == "PAPER.TD":
of = PaperOrderField(
order_type=OrderType.LMT,
limit_price=price,
full_symbol=self.full_symbol,
order_flag=OrderFlag.OPEN,
order_size=size
)
order = OrderRequest(
api="PAPER.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.OPEN,
type=OrderType.LMT,
direction=Direction.LONG,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif (type == 'fak'):
if self.api == "CTP.TD":
of = CtpOrderField(
InstrumentID=self.symbol,
OrderPriceType=THOST_FTDC_OPT_LimitPrice,
LimitPrice=price,
Direction=THOST_FTDC_D_Buy,
CombOffsetFlag=THOST_FTDC_OF_Open,
CombHedgeFlag=THOST_FTDC_HF_Speculation,
VolumeTotalOriginal=size,
TimeCondition=THOST_FTDC_TC_IOC,
VolumeCondition=THOST_FTDC_VC_AV,
MinVolume=1,
ContingentCondition=THOST_FTDC_CC_Immediately
)
order = OrderRequest(
api="CTP.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.OPEN,
type=OrderType.FAK,
direction=Direction.LONG,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif self.api == "PAPER.TD":
of = PaperOrderField(
order_type=OrderType.FAK,
limit_price=price,
full_symbol=self.full_symbol,
order_flag=OrderFlag.OPEN,
order_size=size
)
order = OrderRequest(
api="PAPER.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.OPEN,
type=OrderType.FAK,
direction=Direction.LONG,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif (type == 'fok'):
if self.api == "CTP.TD":
of = CtpOrderField(
InstrumentID=self.symbol,
OrderPriceType=THOST_FTDC_OPT_LimitPrice,
LimitPrice=price,
Direction=THOST_FTDC_D_Buy,
CombOffsetFlag=THOST_FTDC_OF_Open,
CombHedgeFlag=THOST_FTDC_HF_Speculation,
VolumeTotalOriginal=size,
TimeCondition=THOST_FTDC_TC_IOC,
VolumeCondition=THOST_FTDC_VC_CV,
MinVolume=1,
ContingentCondition=THOST_FTDC_CC_Immediately
)
order = OrderRequest(
api="CTP.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.OPEN,
type=OrderType.FOK,
direction=Direction.LONG,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif self.api == "PAPER.TD":
of = PaperOrderField(
order_type=OrderType.FOK,
limit_price=price,
full_symbol=self.full_symbol,
order_flag=OrderFlag.OPEN,
order_size=size
)
order = OrderRequest(
api="PAPER.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.OPEN,
type=OrderType.FOK,
direction=Direction.LONG,
orderfield=of
)
self.strategy_engine.send_order(self, order)
else:
print('order type not supported!')
def buy_close(self, price: float, size: int, type='lmt'):
if not self.trading:
return
if (type == 'mkt'):
if self.api == "CTP.TD":
of = CtpOrderField(
InstrumentID=self.symbol,
OrderPriceType=THOST_FTDC_OPT_AnyPrice,
LimitPrice=price,
Direction=THOST_FTDC_D_Buy,
CombOffsetFlag=THOST_FTDC_OF_Close,
CombHedgeFlag=THOST_FTDC_HF_Speculation,
VolumeTotalOriginal=size,
TimeCondition=THOST_FTDC_TC_GFD,
VolumeCondition=THOST_FTDC_VC_AV,
MinVolume=1,
ContingentCondition=THOST_FTDC_CC_Immediately
)
order = OrderRequest(
api="CTP.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.CLOSE,
type=OrderType.MKT,
direction=Direction.LONG,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif self.api == "PAPER.TD":
of = PaperOrderField(
order_type=OrderType.MKT,
full_symbol=self.full_symbol,
order_flag=OrderFlag.CLOSE,
order_size=size
)
order = OrderRequest(
api="PAPER.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.CLOSE,
type=OrderType.MKT,
direction=Direction.LONG,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif (type == 'lmt'):
if self.api == "CTP.TD":
of = CtpOrderField(
InstrumentID=self.symbol,
OrderPriceType=THOST_FTDC_OPT_LimitPrice,
LimitPrice=price,
Direction=THOST_FTDC_D_Buy,
CombOffsetFlag=THOST_FTDC_OF_Close,
CombHedgeFlag=THOST_FTDC_HF_Speculation,
VolumeTotalOriginal=size,
TimeCondition=THOST_FTDC_TC_GFD,
VolumeCondition=THOST_FTDC_VC_AV,
MinVolume=1,
ContingentCondition=THOST_FTDC_CC_Immediately
)
order = OrderRequest(
api="CTP.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.CLOSE,
type=OrderType.LMT,
direction=Direction.LONG,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif self.api == "PAPER.TD":
of = PaperOrderField(
order_type=OrderType.LMT,
limit_price=price,
full_symbol=self.full_symbol,
order_flag=OrderFlag.CLOSE,
order_size=size
)
order = OrderRequest(
api="PAPER.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.CLOSE,
type=OrderType.LMT,
direction=Direction.LONG,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif (type == 'fak'):
if self.api == "CTP.TD":
of = CtpOrderField(
InstrumentID=self.symbol,
OrderPriceType=THOST_FTDC_OPT_LimitPrice,
LimitPrice=price,
Direction=THOST_FTDC_D_Buy,
CombOffsetFlag=THOST_FTDC_OF_Close,
CombHedgeFlag=THOST_FTDC_HF_Speculation,
VolumeTotalOriginal=size,
TimeCondition=THOST_FTDC_TC_IOC,
VolumeCondition=THOST_FTDC_VC_AV,
MinVolume=1,
ContingentCondition=THOST_FTDC_CC_Immediately
)
order = OrderRequest(
api="CTP.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.CLOSE,
type=OrderType.FAK,
direction=Direction.LONG,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif self.api == "PAPER.TD":
of = PaperOrderField(
order_type=OrderType.FAK,
limit_price=price,
full_symbol=self.full_symbol,
order_flag=OrderFlag.CLOSE,
order_size=size
)
order = OrderRequest(
api="PAPER.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.CLOSE,
type=OrderType.FAK,
direction=Direction.LONG,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif (type == 'fok'):
if self.api == "CTP.TD":
of = CtpOrderField(
InstrumentID=self.symbol,
OrderPriceType=THOST_FTDC_OPT_LimitPrice,
LimitPrice=price,
Direction=THOST_FTDC_D_Buy,
CombOffsetFlag=THOST_FTDC_OF_Close,
CombHedgeFlag=THOST_FTDC_HF_Speculation,
VolumeTotalOriginal=size,
TimeCondition=THOST_FTDC_TC_IOC,
VolumeCondition=THOST_FTDC_VC_CV,
MinVolume=1,
ContingentCondition=THOST_FTDC_CC_Immediately
)
order = OrderRequest(
api="CTP.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.CLOSE,
type=OrderType.FOK,
direction=Direction.LONG,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif self.api == "PAPER.TD":
of = PaperOrderField(
order_type=OrderType.FOK,
limit_price=price,
full_symbol=self.full_symbol,
order_flag=OrderFlag.CLOSE,
order_size=size
)
order = OrderRequest(
api="PAPER.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.CLOSE,
type=OrderType.FOK,
direction=Direction.LONG,
orderfield=of
)
self.strategy_engine.send_order(self, order)
else:
print('order type not supported!')
def sell_open(self, price: float, size: int, type='lmt'):
if not self.trading:
return
if (type == 'mkt'):
if self.api == "CTP.TD":
of = CtpOrderField(
InstrumentID=self.symbol,
OrderPriceType=THOST_FTDC_OPT_AnyPrice,
LimitPrice=price,
Direction=THOST_FTDC_D_Sell,
CombOffsetFlag=THOST_FTDC_OF_Open,
CombHedgeFlag=THOST_FTDC_HF_Speculation,
VolumeTotalOriginal=size,
TimeCondition=THOST_FTDC_TC_GFD,
VolumeCondition=THOST_FTDC_VC_AV,
MinVolume=1,
ContingentCondition=THOST_FTDC_CC_Immediately
)
order = OrderRequest(
api="CTP.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.OPEN,
type=OrderType.MKT,
direction=Direction.SHORT,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif self.api == "PAPER.TD":
of = PaperOrderField(
order_type=OrderType.MKT,
full_symbol=self.full_symbol,
order_flag=OrderFlag.OPEN,
order_size=size * (-1)
)
order = OrderRequest(
api="PAPER.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.OPEN,
type=OrderType.MKT,
direction=Direction.SHORT,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif (type == 'lmt'):
if self.api == "CTP.TD":
of = CtpOrderField(
InstrumentID=self.symbol,
OrderPriceType=THOST_FTDC_OPT_LimitPrice,
LimitPrice=price,
Direction=THOST_FTDC_D_Sell,
CombOffsetFlag=THOST_FTDC_OF_Open,
CombHedgeFlag=THOST_FTDC_HF_Speculation,
VolumeTotalOriginal=size,
TimeCondition=THOST_FTDC_TC_GFD,
VolumeCondition=THOST_FTDC_VC_AV,
MinVolume=1,
ContingentCondition=THOST_FTDC_CC_Immediately
)
order = OrderRequest(
api="CTP.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.OPEN,
type=OrderType.LMT,
direction=Direction.SHORT,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif self.api == "PAPER.TD":
of = PaperOrderField(
order_type=OrderType.LMT,
limit_price=price,
full_symbol=self.full_symbol,
order_flag=OrderFlag.OPEN,
order_size=size * (-1)
)
order = OrderRequest(
api="PAPER.TD",
account=self.account,
symbol=self.symbol,
full_symbol=self.full_symbol,
price=price,
volume=size,
offset=Offset.OPEN,
type=OrderType.LMT,
direction=Direction.SHORT,
orderfield=of
)
self.strategy_engine.send_order(self, order)
elif (type == 'fak'):
if self.api == | |
as pd\n"
)
f.write("import pyemu\n")
for ex_imp in self.extra_py_imports:
f.write("import {0}\n".format(ex_imp))
for func_lines in self._function_lines_list:
f.write("\n")
f.write("# function added thru PstFrom.add_py_function()\n")
for func_line in func_lines:
f.write(func_line)
f.write("\n")
f.write("def main():\n")
f.write("\n")
s = " "
for tmp_file in self.tmp_files:
f.write(s + "try:\n")
f.write(s + " os.remove(r'{0}')\n".format(tmp_file))
f.write(s + "except Exception as e:\n")
f.write(
s + " print(r'error removing tmp file:{0}')\n".format(tmp_file)
)
for line in self.pre_py_cmds:
f.write(s + line + "\n")
for line in self.mod_py_cmds:
f.write(s + line + "\n")
for line in self.post_py_cmds:
f.write(s + line + "\n")
f.write("\n")
f.write("if __name__ == '__main__':\n")
f.write(" mp.freeze_support()\n main()\n\n")
def _pivot_par_struct_dict(self):
struct_dict = {}
for gs, gps in self.par_struct_dict.items():
par_dfs = []
for _, l in gps.items():
df = pd.concat(l)
if "timedelta" in df.columns:
df.loc[:, "y"] = 0 #
df.loc[:, "x"] = df.timedelta.apply(lambda x: x.days)
par_dfs.append(df)
struct_dict[gs] = par_dfs
return struct_dict
def build_prior(
self, fmt="ascii", filename=None, droptol=None, chunk=None, sigma_range=6
):
"""Build the prior parameter covariance matrix
Args:
fmt (`str`): the file format to save to. Default is "ASCII", can be "binary", "coo", or "none"
filename (`str`): the filename to save the cov to
droptol (`float`): absolute value of prior cov entries that are smaller than `droptol` are treated as
zero.
chunk (`int`): number of entries to write to binary/coo at once. Default is None (write all elements at once
sigma_range (`int`): number of standard deviations represented by parameter bounds. Default is 6 (99%
confidence). 4 would be approximately 95% confidence bounds
Returns:
`pyemu.Cov`: the prior parameter covariance matrix
Note:
This method processes parameters by group names
For really large numbers of parameters (>30K), this method
will cause memory errors. Luckily, in most cases, users
only want this matrix to generate a prior parameter ensemble
and the `PstFrom.draw()` is a better choice...
"""
struct_dict = self._pivot_par_struct_dict()
self.logger.log("building prior covariance matrix")
if len(struct_dict) > 0:
cov = pyemu.helpers.geostatistical_prior_builder(
self.pst, struct_dict=struct_dict, sigma_range=sigma_range
)
else:
cov = pyemu.Cov.from_parameter_data(self.pst, sigma_range=sigma_range)
if filename is None:
filename = self.pst.filename.with_suffix(".prior.cov")
if fmt != "none":
self.logger.statement(
"saving prior covariance matrix to file {0}".format(filename)
)
if fmt == "ascii":
cov.to_ascii(filename)
elif fmt == "binary":
cov.to_binary(filename, droptol=droptol, chunk=chunk)
elif fmt == "uncfile":
cov.to_uncfile(filename)
elif fmt == "coo":
cov.to_coo(filename, droptol=droptol, chunk=chunk)
self.logger.log("building prior covariance matrix")
return cov
def draw(self, num_reals=100, sigma_range=6, use_specsim=False, scale_offset=True):
"""Draw a parameter ensemble from the distribution implied by the initial parameter values in the
control file and the prior parameter covariance matrix.
Args:
num_reals (`int`): the number of realizations to draw
sigma_range (`int`): number of standard deviations represented by parameter bounds. Default is 6 (99%
confidence). 4 would be approximately 95% confidence bounds
use_specsim (`bool`): flag to use spectral simulation for grid-scale pars (highly recommended).
Default is False
scale_offset (`bool`): flag to apply scale and offset to parameter bounds before calculating prior variance.
Dfault is True. If you are using non-default scale and/or offset and you get an exception during
draw, try changing this value to False.
Returns:
`pyemu.ParameterEnsemble`: a prior parameter ensemble
Note:
This method draws by parameter group
If you are using grid-style parameters, please use spectral simulation (`use_specsim=True`)
"""
self.logger.log("drawing realizations")
if self.pst.npar_adj == 0:
self.logger.warn("no adjustable parameters, nothing to draw...")
return
# precondition {geostruct:{group:df}} dict to {geostruct:[par_dfs]}
struct_dict = self._pivot_par_struct_dict()
# list for holding grid style groups
gr_pe_l = []
if use_specsim:
if not pyemu.geostats.SpecSim2d.grid_is_regular(
self.spatial_reference.delr, self.spatial_reference.delc
):
self.logger.lraise(
"draw() error: can't use spectral simulation with irregular grid"
)
self.logger.log("spectral simulation for grid-scale pars")
# loop over geostructures defined in PestFrom object
# (setup through add_parameters)
for geostruct, par_df_l in struct_dict.items():
par_df = pd.concat(par_df_l) # force to single df
par_df = par_df.loc[par_df.partype == "grid", :]
if "i" in par_df.columns: # need 'i' and 'j' for specsim
grd_p = pd.notna(par_df.i)
else:
grd_p = np.array([0])
# if there are grid pars (also grid pars with i,j info)
if grd_p.sum() > 0:
# select pars to use specsim for
gr_df = par_df.loc[grd_p]
gr_df = gr_df.astype({"i": int, "j": int}) # make sure int
# (won't be if there were nans in concatenated df)
if len(gr_df) > 0:
# get specsim object for this geostruct
ss = pyemu.geostats.SpecSim2d(
delx=self.spatial_reference.delr,
dely=self.spatial_reference.delc,
geostruct=geostruct,
)
# specsim draw (returns df)
gr_pe1 = ss.grid_par_ensemble_helper(
pst=self.pst,
gr_df=gr_df,
num_reals=num_reals,
sigma_range=sigma_range,
logger=self.logger,
)
# append to list of specsim drawn pars
gr_pe_l.append(gr_pe1)
# rebuild struct_dict entry for this geostruct
# to not include specsim pars
struct_dict[geostruct] = []
# loop over all in list associated with geostruct
for p_df in par_df_l:
# if pars are not in the specsim pars just created
# assign them to this struct_dict entry
# needed if none specsim pars are linked to same geostruct
if not p_df.index.isin(gr_df.index).all():
struct_dict[geostruct].append(p_df)
self.logger.log("spectral simulation for grid-scale pars")
# draw remaining pars based on their geostruct
self.logger.log("Drawing non-specsim pars")
pe = pyemu.helpers.geostatistical_draws(
self.pst,
struct_dict=struct_dict,
num_reals=num_reals,
sigma_range=sigma_range,
scale_offset=scale_offset,
)
self.logger.log("Drawing non-specsim pars")
if len(gr_pe_l) > 0:
gr_par_pe = pd.concat(gr_pe_l, axis=1)
pe.loc[:, gr_par_pe.columns] = gr_par_pe.values
# par_ens = pyemu.ParameterEnsemble(pst=self.pst, df=pe)
self.logger.log("drawing realizations")
return pe
def build_pst(self, filename=None, update=False, version=1):
"""Build control file from i/o files in PstFrom object.
Warning: This builds a pest control file from scratch, overwriting
anything already in self.pst object and anything already writen to `filename`
Args:
filename (`str`): the filename to save the control file to.
If None, the name is formed from the `PstFrom.original_d`
,the orginal directory name from which the forward model
was extracted. Default is None.
The control file is saved in the `PstFrom.new_d` directory.
update (`bool`) or (str): flag to add to existing Pst object and
rewrite. If string {'pars', 'obs'} just update respective
components of Pst. Default is False - build from PstFrom
components.
version (`int`): control file version to write, Default is 1
Note:
This builds a pest control file from scratch, overwriting anything already
in self.pst object and anything already writen to `filename`
The new pest control file is assigned an NOPTMAX value of 0
"""
par_data_cols = pyemu.pst_utils.pst_config["par_fieldnames"]
obs_data_cols = pyemu.pst_utils.pst_config["obs_fieldnames"]
if update:
if self.pst is None:
self.logger.warn(
"Can't update Pst object not initialised. "
"Setting update to False"
)
update = False
else:
if filename is None:
filename = get_filepath(self.new_d, self.pst.filename)
else:
if filename is None:
filename = Path(self.new_d, self.original_d.name).with_suffix(".pst")
filename = get_filepath(self.new_d, filename)
# if os.path.dirname(filename) in ["", "."]:
# filename = os.path.join(self.new_d, filename)
if update:
pst = self.pst
if update is True:
update = {"pars": False, "obs": False}
elif isinstance(update, str):
update = {update: True}
elif isinstance(update, (set, list)):
update = {s: True for s in update}
uupdate = True
else:
update = {"pars": False, "obs": False}
uupdate = False
pst = pyemu.Pst(filename, load=False)
if "pars" in update.keys() or not uupdate:
if len(self.par_dfs) > 0:
# parameter data from object
par_data = pd.concat(self.par_dfs).loc[:, par_data_cols]
# info relating parameter multiplier files to model input files
parfile_relations = self.parfile_relations
parfile_relations.to_csv(self.new_d / "mult2model_info.csv")
if not any(
["apply_list_and_array_pars" in s for s in self.pre_py_cmds]
):
self.pre_py_cmds.insert(
0,
"pyemu.helpers.apply_list_and_array_pars("
"arr_par_file='mult2model_info.csv',chunk_len={0})".format(
self.chunk_len
),
)
else:
par_data = pyemu.pst_utils._populate_dataframe(
[], pst.par_fieldnames, pst.par_defaults, pst.par_dtype
)
pst.parameter_data = par_data
# pst.template_files = self.tpl_filenames
# pst.input_files = self.input_filenames
pst.model_input_data = pd.DataFrame(
{"pest_file": self.tpl_filenames, "model_file": self.input_filenames},
index=self.tpl_filenames,
)
if "obs" in update.keys() or not uupdate:
if len(self.obs_dfs) > 0:
obs_data = pd.concat(self.obs_dfs).loc[:, obs_data_cols]
else:
obs_data = pyemu.pst_utils._populate_dataframe(
[], pst.obs_fieldnames, pst.obs_defaults, pst.obs_dtype
)
obs_data.loc[:, "obsnme"] = []
obs_data.index = []
obs_data.sort_index(inplace=True)
pst.observation_data = obs_data
# pst.instruction_files = self.ins_filenames
# pst.output_files = self.output_filenames
pst.model_output_data = pd.DataFrame(
{"pest_file": self.ins_filenames, "model_file": self.output_filenames},
index=self.ins_filenames,
)
if not uupdate:
pst.model_command = self.mod_command
pst.prior_information = pst.null_prior
pst.control_data.noptmax = 0
self.pst = pst
self.pst.write(filename, version=version)
self.write_forward_run()
pst.try_parse_name_metadata()
return pst
def _setup_dirs(self):
self.logger.log("setting up dirs")
if not os.path.exists(self.original_d):
self.logger.lraise(f"original_d '{self.original_d}' not found")
if not os.path.isdir(self.original_d):
self.logger.lraise(f"original_d '{self.original_d}' is not a directory")
if self.new_d.exists():
if self.remove_existing:
self.logger.log(f"removing existing new_d '{self.new_d}'")
shutil.rmtree(self.new_d)
self.logger.log(f"removing existing new_d '{self.new_d}'")
time.sleep(1) # sleep longer for window locking issues
else:
self.logger.lraise(
f"new_d '{self.new_d}' already exists " "- use remove_existing=True"
)
self.logger.log(
f"copying original_d | |
<filename>nxt_editor/dockwidgets/hotkey_editor.py
# Built-in
import logging
# External
from Qt import QtWidgets, QtGui, QtCore
# Internal
import nxt_editor
from nxt_editor.dockwidgets.dock_widget_base import DockWidgetBase
from nxt_editor import colors, dialogs
logger = logging.getLogger(nxt_editor.LOGGER_NAME)
TOOLTIP_INFO = ('<p style="font-size:12px;color:white;">'
'<h3>How to:</h3>'
'<ul>'
'<li>'
'To edit a hotkey double click a cell in the <b>Shortcut</b> '
'column and press a key combination. </li><li>'
'When you release the key(s) the hotkey is temporally stored.'
'</li><li>'
'When you are ready to apply your changes click the <b>Save</b>'
' changes button at the bottom of the widget. </li><li>'
'To revert or remove a hotkey, right click the shortcut cell.'
'</li></ul>'
'<h3>Note:</h3>'
'Actions with <span style="color:#dfdf16;">yellow</span> text '
'are global and cannot have a shortcut that conflicts with any '
'other shortcut.</p>')
TOOLTIP_STYLE = '''QToolTip {
font-family: Roboto Mono;
background-color: #3E3E3E;
border: 1px solid #232323;
}'''
TABLE_STYLE = '''QTableView {
font-family: Roboto Mono;
}'''
class HotkeyEditor(DockWidgetBase):
savable = QtCore.Signal(bool)
refresh = QtCore.Signal()
def __init__(self, parent):
super(HotkeyEditor, self).__init__('Hotkey Editor', parent,
minimum_height=250)
self.setWindowFlags(QtCore.Qt.Tool)
self.main_window = parent
self.main_widget = QtWidgets.QWidget(parent=self)
self.setWidget(self.main_widget)
self.layout = QtWidgets.QHBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.main_widget.setLayout(self.layout)
self.background_frame = QtWidgets.QFrame(self)
self.layout.addWidget(self.background_frame)
self.main_frame = QtWidgets.QFrame(self)
self.layout.addWidget(self.main_frame)
self.hotkey_layout = QtWidgets.QVBoxLayout()
self.hotkey_layout.setContentsMargins(8, 0, 8, 0)
self.hotkey_layout.setSpacing(0)
self.main_frame.setLayout(self.hotkey_layout)
self.hotkey_table_view = HotkeyView(self)
self.hotkey_table_view.setToolTip(TOOLTIP_INFO)
self.hotkey_table_view.horizontalHeader().setSectionsMovable(False)
self.hotkey_table_view.horizontalHeader().setStretchLastSection(True)
double_click = QtWidgets.QAbstractItemView.DoubleClicked
self.hotkey_table_view.setEditTriggers(double_click)
single_select = QtWidgets.QAbstractItemView.SingleSelection
self.hotkey_table_view.setSelectionMode(single_select)
self.hotkey_table_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.hotkey_table_view.customContextMenuRequested.connect(self.context_menu)
self.hotkey_layout.addWidget(self.hotkey_table_view)
self.hotkey_model = HotkeyModel(self, view=self.hotkey_table_view)
self.hotkey_table_view.setModel(self.hotkey_model)
self.hb_buttons = QtWidgets.QHBoxLayout()
self.hotkey_layout.addLayout(self.hb_buttons)
self.btn_discard = QtWidgets.QPushButton('Discard changes')
self.btn_discard.setEnabled(False)
def _discard():
self.hotkey_model.discard_changes(warn=False)
self.btn_discard.clicked.connect(_discard)
self.savable.connect(self.btn_discard.setEnabled)
self.hb_buttons.addWidget(self.btn_discard)
self.btn_save = QtWidgets.QPushButton('Save & Apply changes')
self.btn_save.setEnabled(False)
self.btn_save.clicked.connect(self.hotkey_model.save)
self.savable.connect(self.btn_save.setEnabled)
self.hb_buttons.addWidget(self.btn_save)
self.refresh.connect(self.hotkey_model.discard_changes)
self.resize(self.hotkey_table_view.width()*8, self.minimumHeight())
self.refresh.emit()
def context_menu(self, pos):
index = self.hotkey_table_view.indexAt(pos)
self.hotkey_table_view.closePersistentEditor(index)
if index.column() != len(self.hotkey_model.header_names)-1:
return
menu = QtWidgets.QMenu(self)
menu.addAction('Revert Hotkey', self.revert_hotkey)
menu.addAction('Remove Hotkey', self.remove_hotkey)
menu.popup(QtGui.QCursor.pos())
def revert_hotkey(self):
index = self.hotkey_table_view.selectionModel().selectedIndexes()[0]
self.hotkey_model.revert_hotkey(index)
def remove_hotkey(self):
index = self.hotkey_table_view.selectionModel().selectedIndexes()[0]
self.hotkey_model.remove_hotkey(index)
def closeEvent(self, event):
allow = self.hotkey_model.discard_changes()
if allow:
return super(HotkeyEditor, self).closeEvent(event)
return event.ignore()
class HotkeyModel(QtCore.QAbstractTableModel):
def __init__(self, parent, view, headers=None):
super(HotkeyModel, self).__init__()
self.parent = parent
self.header_names = headers or ['Name',
'What\'s This', 'Tool Tip', 'Hotkey']
self.view = view
self.node_attr_names = list()
self.attr_data = list()
self.horizontal_header = self.view.horizontalHeader()
self.state = None
self.delegate = KeySequenceDelegate(parent=self)
# set default data
self._data = []
self.user_changes = {}
self.actions = {}
self.cast_mode = QtGui.QKeySequence.PortableText
self.protected_actions = []
self.protected_shortcuts = []
def discard_changes(self, warn=True):
"""Removes any changes made by the user.
:param warn: If True a warning dialog will be shown allowing the user
to cancel the discard.
:return: False if user wants to cancel discard, True if user saved or
discarded changes
"""
if warn is True and self.user_changes.keys():
info = 'Would you like to save your shortcuts?'
resp = dialogs.UnsavedChangesMessage.save_before_close(info=info)
save = dialogs.UnsavedChangesMessage.Save
cancel = dialogs.UnsavedChangesMessage.Cancel
if resp == cancel:
return False
elif resp == save:
self.save()
self.user_changes = {}
self.update_data()
self.parent.savable.emit(False)
return True
def update_data(self):
nxt_hotkeys = self.parent.main_window.get_hotkey_map()
self.clear()
self.actions = {}
self.protected_actions = []
self.protected_shortcuts = []
for section, widget_actions in nxt_hotkeys.items():
# Div rows have a None object as their last item.
div_row = [section, 'What\'s this', 'Tooltip', 'Shortcut', None]
self._data.append(div_row)
for action_data in widget_actions:
action = action_data[4]
self._data += [action_data]
self.actions[action] = action.isEnabled()
if action.shortcutContext() == QtCore.Qt.WindowShortcut:
self.protected_actions += [action]
self.protected_shortcuts += [action_data[3]]
if nxt_hotkeys:
fixed = QtWidgets.QHeaderView.Fixed
self.horizontal_header.setSectionResizeMode(0, fixed)
self.horizontal_header.setDefaultSectionSize(200)
self.view.setItemDelegateForColumn(self.columnCount()-1,
self.delegate)
self.view.resizeRowsToContents()
def update_protected_shortcuts(self, action, old_shortcut, new_shortcut):
if action in self.protected_actions:
if old_shortcut in self.protected_shortcuts:
self.protected_shortcuts.remove(old_shortcut)
if new_shortcut:
self.protected_shortcuts += [new_shortcut]
def clear(self):
self.beginResetModel()
self._data = []
self.endResetModel()
def save(self):
for i, data in self.user_changes.items():
shortcut, action = data
action.setShortcut(shortcut, user_override=True)
self.user_changes = {}
self.update_data()
self.parent.savable.emit(False)
def revert_hotkey(self, index):
row = index.row()
action = self._data[row][self.columnCount()]
old_value = self._data[row][3]
value = action.default_shortcut
if value == old_value:
return
if not self.valid_shortcut(action, value):
self.show_invalid_message(value)
return
self.update_protected_shortcuts(action, old_value, value)
self.setData(index, value, QtCore.Qt.EditRole)
def valid_shortcut(self, action, shortcut):
valid = True
if action in self.protected_actions:
if shortcut in self.all_shortcuts:
valid = False
elif shortcut in self.protected_shortcuts:
valid = False
return valid
@staticmethod
def show_invalid_message(shortcut):
message = 'Invalid key sequence: {} \nIt would create a ' \
'conflict for a global shortcut'.format(shortcut)
dialogs.NxtWarningDialog.show_message('Invalid shortcut!', message)
def remove_hotkey(self, index):
row = index.row()
action = self._data[row][self.columnCount()]
old_value = self._data[row][3]
if not old_value:
return
self.update_protected_shortcuts(action, old_value, None)
self.setData(index, None, QtCore.Qt.EditRole)
def disable_actions(self):
for action in self.actions.keys():
action.setEnabled(False)
def enable_actions(self):
for action in self.actions.keys():
action.setEnabled(self.actions[action])
def data(self, index, role=None):
if not index.isValid:
return None
row = index.row()
column = index.column()
if role == QtCore.Qt.DisplayRole:
return self._data[row][column]
elif role == QtCore.Qt.EditRole:
# Disable all the actions so the user input doesn't trigger one
# I tried an event filter but it didn't work on application
# shortcuts like Ctrl+S
self.disable_actions()
return self._data[row][column]
elif role == QtCore.Qt.ForegroundRole:
action = self._data[row][self.columnCount()]
if action and action.shortcutContext() == QtCore.Qt.WindowShortcut:
color = colors.IMPORTANT
return QtGui.QBrush(color)
elif role == QtCore.Qt.BackgroundRole:
if row in self.user_changes.keys():
color = colors.UNSAVED
return QtGui.QBrush(color, QtCore.Qt.BDiagPattern)
actions = self._data[row][self.columnCount()]
if not actions:
# TODO: Better style management
color = QtGui.QColor(QtCore.Qt.darkGray).darker(250)
return QtGui.QBrush(color)
elif role == QtCore.Qt.FontRole:
action = self._data[row][self.columnCount()]
if action:
default_shortcut = action.default_shortcut
if isinstance(default_shortcut, QtGui.QKeySequence):
default_shortcut = default_shortcut.toString()
shortcut = self._data[row][self.columnCount()-1] or None
if shortcut != default_shortcut:
font = QtGui.QFont()
font.setItalic(True)
return font
else:
font = QtGui.QFont()
font.setBold(True)
font.setPointSizeF(font.pointSize()*1.1)
font.setItalic(True)
return font
elif not role:
return self._data[row][column]
def setData(self, index, value, role):
value_set = False
if not index.isValid:
return value_set
row = index.row()
column = index.column()
# Reset the actions to their previous enabled state
if role == QtCore.Qt.EditRole and column == self.columnCount()-1:
if value != self._data[row][column]:
self._data[row][column] = value
action = self._data[row][column+1]
# The last visible column holds the shortcut the column
# after this holds the NxtAction object actions are always
# in a list
self.user_changes[row] = [value, action]
value_set = True
if row in list(self.user_changes.keys()):
action = self.user_changes[row][1]
if value == action.shortcut().toString(self.cast_mode):
self.user_changes.pop(row)
self.enable_actions()
if self.user_changes.keys():
self.parent.savable.emit(True)
else:
self.parent.savable.emit(False)
self.view.update()
return value_set
def flags(self, index):
column = index.column()
if column == self.columnCount()-1:
return QtCore.Qt.ItemIsEnabled | \
QtCore.Qt.ItemIsSelectable | \
QtCore.Qt.ItemIsEditable
else:
return QtCore.Qt.NoItemFlags
def headerData(self, section, orientation, role):
if orientation is QtCore.Qt.Horizontal:
if role is QtCore.Qt.DisplayRole:
return self.header_names[section]
def rowCount(self, parent):
return len(self._data)
def columnCount(self, *args):
return len(self.header_names)
@property
def all_shortcuts(self):
shortcuts = []
for row in self._data:
if row[self.columnCount()]:
shortcuts += [row[3]]
return shortcuts
class HotkeyView(QtWidgets.QTableView):
def __init__(self, parent):
super(HotkeyView, self).__init__(parent=parent)
self.hotkey_editor = parent
style = self.parent().styleSheet() + TOOLTIP_STYLE + TABLE_STYLE
self.setStyleSheet(style)
class KeySequenceDelegate(QtWidgets.QStyledItemDelegate):
def __init__(self, parent):
super(KeySequenceDelegate, self).__init__(parent=parent)
self.hotkey_model = parent
def createEditor(self, parent, option, index):
last_col = self.hotkey_model.columnCount()
action = self.hotkey_model._data[index.row()][last_col]
valid_cell = bool(action)
if not valid_cell:
# Don't allow users to edit rows that don't have actions in them
return
line_edit = KeySequenceEdit(parent, index.data(), action,
self.hotkey_model)
return line_edit
class KeySequenceEdit(QtWidgets.QLineEdit):
"""
Based on https://gist.github.com/blink1073/946df268c3685a3f443e
"""
def __init__(self, parent, key_sequence, action, hotkey_model):
super(KeySequenceEdit, self).__init__(parent)
self.action = action
self.parent = parent
self.hotkey_model = hotkey_model
self.setText(key_sequence)
self.press_count = 0
self.keys = set()
self.modifiers = ['Meta', 'Ctrl', 'Alt', 'Shift']
self.input_text = key_sequence
self.installEventFilter(self)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.cast_mode = QtGui.QKeySequence.PortableText
self.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
def keyPressEvent(self, event):
if event.isAutoRepeat():
return
key = event.key()
self.press_count += 1
if key == QtCore.Qt.Key_unknown:
logger.error("Unknown key from a macro probably")
return
event_modifiers = event.modifiers()
modifier_key_map = {QtCore.Qt.Key_Control: QtCore.Qt.CTRL,
QtCore.Qt.Key_Shift: QtCore.Qt.SHIFT,
QtCore.Qt.Key_Alt: QtCore.Qt.ALT,
QtCore.Qt.Key_Meta: QtCore.Qt.META}
if key in list(modifier_key_map.keys()) and not len(self.keys):
shift_held = bool(event_modifiers & QtCore.Qt.ShiftModifier)
ctrl_held = bool(event_modifiers & QtCore.Qt.ControlModifier)
alt_held = bool(event_modifiers & QtCore.Qt.AltModifier)
meta_held = bool(event_modifiers & QtCore.Qt.MetaModifier)
held_modifiers = [meta_held, ctrl_held, alt_held, shift_held]
if sum(held_modifiers) > 1:
text = ''
for modifier, held in zip(self.modifiers, held_modifiers):
if held:
text += '{}+'.format(modifier)
text = text[:-1]
else:
key = modifier_key_map[key]
keySequence = QtGui.QKeySequence(key)
text = keySequence.toString()
if not isinstance(text, str):
text = text.decode()
self.setText(text)
return
else:
if event_modifiers & QtCore.Qt.ShiftModifier:
key += QtCore.Qt.SHIFT
if event_modifiers & QtCore.Qt.ControlModifier:
key += QtCore.Qt.CTRL
self.press_count -= 1
if event_modifiers & QtCore.Qt.AltModifier:
key += QtCore.Qt.ALT
if event_modifiers & QtCore.Qt.MetaModifier:
key += QtCore.Qt.META
self.keys.add(key)
if len(self.keys) > 4:
logger.error("Too many keys, max 4!")
text = 'Too many keys, max 4!'
self.keys = set()
self.press_count = 0
else:
keySequence = QtGui.QKeySequence(*self.keys)
text = keySequence.toString(self.cast_mode)
for modifier in self.modifiers:
if text.count(modifier) > 1:
text = 'Invalid key combo'
self.keys = set()
self.press_count = 0
self.setText(text)
event.accept()
def keyReleaseEvent(self, event):
self.press_count = 0
| |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""logical operations, the function docs are adapted from Numpy API."""
from .math_ops import _apply_tensor_op
from ..ops import functional as F
from ..ops.primitive import constexpr
from ..common import dtype as mstype
from ..common import Tensor
from .._c_expression import typing
from .array_creations import zeros, ones
from .utils import _check_input_tensor
def not_equal(x1, x2, out=None, where=True, dtype=None):
"""
Returns (x1 != x2) element-wise.
Args:
x1 (Tensor): First input tensor to be compared.
x2 (Tensor): Second input tensor to be compared.
out (Tensor or None, optional), default is None.
where (Tensor or None, optional): For any non-default value of type other
than :class:`Tensor` or :class:`None`, the output retains its original value.
This condition is broadcasted over the input. At locations where the
condition is `True`, the out array will be set to the ufunc result.
Elsewhere, the out array will retain its original value. Note that
if an uninitialized out array is created via the default ``out=None``,
locations within it where the condition is `False` will remain
uninitialized.
dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
output Tensor.
Returns:
Tensor or scalar, element-wise comparison of `x1` and `x2`. Typically of type
bool, unless `dtype` is passed. This is a scalar if both `x1` and `x2` are
scalars.
Raises:
TypeError: If the input is not a tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> a = np.asarray([1, 2])
>>> b = np.asarray([[1, 3],[1, 4]])
>>> print(np.not_equal(a, b))
>>> [[False True]
[False True]]
"""
_check_input_tensor(x1, x2)
return _apply_tensor_op(F.not_equal, x1, x2, out=out, where=where, dtype=dtype)
def less_equal(x1, x2, out=None, where=True, dtype=None):
"""
Returns the truth value of ``(x1 <= x2)`` element-wise.
Note:
Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
not supported.
When `where` is provided, `out` must have a tensor value. `out` is not supported
for storing the result, however it can be used in combination with `where` to set
the value at indices for which `where` is set to False.
Args:
x1 (Tensor): Input array.
x2 (Tensor): Input array. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which becomes the shape of the output).
out (Tensor or None, optional): defaults to None.
where (Tensor or None, optional): For any non-default value of type other
than :class:`Tensor` or :class:`None`, the output retains its original value.
This condition is broadcasted over the input. At locations where the
condition is `True`, the out array will be set to the ufunc result.
Elsewhere, the out array will retain its original value. Note that
if an uninitialized out array is created via the default ``out=None``,
locations within it where the condition is `False` will remain
uninitialized.
dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
output Tensor.
Returns:
Tensor or scalar, element-wise comparison of `x1` and `x2`. Typically of type
bool, unless `dtype` is passed. This is a scalar if both `x1` and `x2` are
scalars.
Raises:
TypeError: if the input is not a tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> output = np.less_equal(np.array([4, 2, 1]), np.array([2, 2, 2]))
>>> print(output)
[False True True]
"""
_check_input_tensor(x1, x2)
return _apply_tensor_op(F.tensor_le, x1, x2, out=out, where=where, dtype=dtype)
def less(x1, x2, out=None, where=True, dtype=None):
"""
Returns the truth value of ``(x1 < x2)`` element-wise.
Note:
Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
not supported.
When `where` is provided, `out` must have a tensor value. `out` is not supported
for storing the result, however it can be used in combination with `where` to set
the value at indices for which `where` is set to False.
Args:
x1 (Tensor): input array.
x2 (Tensor): Input array. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which becomes the shape of the output).
out (Tensor or None, optional): defaults to None.
where (Tensor or None, optional): For any non-default value of type other
than :class:`Tensor` or :class:`None`, the output retains its original value.
This condition is broadcasted over the input. At locations where the
condition is `True`, the out array will be set to the ufunc result.
Elsewhere, the out array will retain its original value. Note that
if an uninitialized out array is created via the default ``out=None``,
locations within it where the condition is `False` will remain
uninitialized.
dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
output Tensor.
Returns:
Tensor or scalar, element-wise comparison of `x1` and `x2`. Typically of type
bool, unless `dtype` is passed. This is a scalar if both `x1` and `x2` are
scalars.
Raises:
TypeError: if the input is not a tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> output = np.less(np.array([1, 2]), np.array([2, 2]))
>>> print(output)
[ True False]
"""
return _apply_tensor_op(F.tensor_lt, x1, x2, out=out, where=where, dtype=dtype)
def greater_equal(x1, x2, out=None, where=True, dtype=None):
"""
Returns the truth value of ``(x1 >= x2)`` element-wise.
Note:
Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
not supported.
When `where` is provided, `out` must have a tensor value. `out` is not supported
for storing the result, however it can be used in combination with `where` to set
the value at indices for which `where` is set to False.
Args:
x1 (Tensor): Input array.
x2 (Tensor): Input array. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which becomes the shape of the output).
out (Tensor or None, optional): defaults to None.
where (Tensor or None, optional): For any non-default value of type other
than :class:`Tensor` or :class:`None`, the output retains its original value.
This condition is broadcasted over the input. At locations where the
condition is `True`, the out array will be set to the ufunc result.
Elsewhere, the out array will retain its original value. Note that
if an uninitialized out array is created via the default ``out=None``,
locations within it where the condition is `False` will remain
uninitialized.
dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
output Tensor.
Returns:
Tensor or scalar, element-wise comparison of `x1` and `x2`. Typically of type
bool, unless `dtype` is passed. This is a scalar if both `x1` and `x2` are
scalars.
Raises:
TypeError: if the input is not a tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> output = np.greater_equal(np.array([4, 2, 1]), np.array([2, 2, 2]))
>>> print(output)
[ True True False]
"""
return _apply_tensor_op(F.tensor_ge, x1, x2, out=out, where=where, dtype=dtype)
def greater(x1, x2, out=None, where=True, dtype=None):
"""
Returns the truth value of ``(x1 > x2)`` element-wise.
Note:
Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are
not supported.
When `where` is provided, `out` must have a tensor value. `out` is not supported
for storing the result, however it can be used in combination with `where` to set
the value at indices for which `where` is set to False.
Args:
x1 (Tensor): Input array.
x2 (Tensor): Input array. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which becomes the shape of the output).
out (Tensor or None, optional): defaults to None.
where (Tensor or None, optional): For any non-default value of type other
than :class:`Tensor` or :class:`None`, the output retains its original value.
This condition is broadcasted over the input. At locations where the
condition is `True`, the out array will be set to the ufunc result.
Elsewhere, the out array will retain its original value. Note that
if an | |
import mxnet as mx
from mxnet.gluon import loss
from models.loss import pairwise_distance
def unique(F, data):
"""
Returns the unique elements of a 1D array
:param F:
:param data:
:return:
"""
sdata = F.reshape(data, (-1,))
sdata = F.sort(sdata, axis=-1)
mask = F.concat(F.ones(1, ctx=sdata.context, dtype=sdata.dtype),
F.slice(sdata, begin=1, end=(None,)) != F.slice(sdata, begin=(None,), end=(-1,)), dim=0)
return F.contrib.boolean_mask(sdata, mask)
def cluster_indices(F, input_data, unique_data=None):
if unique_data is None:
unique_values = unique(F, input_data)
else:
unique_values = unique_data
return F.broadcast_equal(unique_values.expand_dims(1), input_data.transpose())
def entropy(F, input_data, unique_data=None):
"""
Computes the entropy of the input data
:param F:
:param input_data:
:param unique_data:
:return:
"""
if unique_data is None:
bins = F.sum(F.broadcast_equal(unique(F, input_data).expand_dims(1), input_data), axis=1)
else:
bins = F.sum(F.broadcast_equal(unique_data.expand_dims(1), input_data), axis=1)
pi = bins / F.sum(bins)
result = -F.sum(pi * F.log(pi))
return result
def first_k(F, data, k):
"""
Returns the first k elements of an array. K is of NDArray type.
:param F:
:param data:
:param k:
:return:
"""
mask = (F.arange(start=0, stop=data.size) < k)
return F.contrib.boolean_mask(data, mask)
def unique_intersection(F, ind1, ind2):
"""
Computes the intersection of two unique sets
:param F:
:param ind1:
:param ind2:
:return:
"""
# must be unique
a = F.broadcast_equal(ind1, ind2.expand_dims(0).transpose())
s = F.sum(a, axis=())
return s
def mutual_information_score(F, data1, data2, unique_data1=None, unique_data2=None):
cluster_indices1 = cluster_indices(F, data1, unique_data1) # N1xD
cluster_indices2 = cluster_indices(F, data2, unique_data2) # N2xD
cluster_sizes1 = F.sum(cluster_indices1, axis=1) # N1
cluster_sizes2 = F.sum(cluster_indices2, axis=1) # N2
cluster_sizes = F.broadcast_mul(cluster_sizes1.expand_dims(1), cluster_sizes2.expand_dims(0))
N = data1.size
# assert data1.size == data2.size
mask = F.broadcast_add(cluster_indices1.expand_dims(1), cluster_indices2.expand_dims(0)) == 2
num_intersection = F.sum(mask, axis=2) # N1xN2
score = num_intersection / N * F.log(num_intersection * N / cluster_sizes)
return F.nansum(score)
def nmi(F, labels_true, labels_pred):
unique_labels_true = unique(F, labels_true)
unique_labels_pred = unique(F, labels_pred)
h_true = F.maximum(entropy(F, labels_true, unique_labels_true), 0)
h_pred = F.maximum(entropy(F, labels_pred, unique_labels_pred), 0)
mi = mutual_information_score(F, labels_true, labels_pred, unique_labels_true, unique_labels_pred)
_nmi = mi / F.maximum(F.sqrt(h_true * h_pred), 1e-10)
return _nmi
def get_cluster_assignment(F, pairwise_distances, centroid_ids):
"""Assign data points to the neareset centroids.
Due to computational instability for each centroid in centroid_ids,
explicitly assign the centroid itself as the nearest centroid.
This is done through the mask tensor and the constraint_vect tensor.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
centroid_ids: 1-D Tensor of centroid indices.
Returns:
y_fixed: 1-D tensor of cluster assignment.
"""
predictions = F.topk(-F.take(pairwise_distances, centroid_ids, axis=0), k=1, ret_typ='indices', axis=0).squeeze()
batch_size = pairwise_distances.shape[0]
# Deal with numerical instability
mask = F.clip(F.sum(F.one_hot(centroid_ids, batch_size), axis=0), 0, 1)
constraint_one_hot = F.one_hot(centroid_ids, batch_size).transpose() * F.arange(centroid_ids.shape[0],
ctx=pairwise_distances.context)
constraint_vect = F.sum(constraint_one_hot.transpose(), axis=0)
y_fixed = F.where(mask, constraint_vect, predictions)
return y_fixed
def compute_clustering_score(F, labels, predictions, margin_type='nmi'):
"""Computes the clustering score via sklearn.metrics functions.
There are various ways to compute the clustering score. Intuitively,
we want to measure the agreement of two clustering assignments (labels vs
predictions) ignoring the permutations and output a score from zero to one.
(where the values close to one indicate significant agreement).
This code supports following scoring functions:
nmi: normalized mutual information
Args:
labels: 1-D Tensor. ground truth cluster assignment.
predictions: 1-D Tensor. predicted cluster assignment.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
clustering_score: dtypes.float32 scalar.
The possible valid values are from zero to one.
Zero means the worst clustering and one means the perfect clustering.
Raises:
ValueError: margin_type is not recognized.
"""
margin_type_to_func = {
'nmi': nmi,
# 'nmi': metrics.normalized_mutual_info_score,
# 'ami': _compute_ami_score,
# 'ari': _compute_ari_score,
# 'vmeasure': _compute_vmeasure_score,
# 'const': _compute_zeroone_score
}
if margin_type not in margin_type_to_func:
raise ValueError('Unrecognized margin_type: %s' % margin_type)
return margin_type_to_func[margin_type](F, labels, predictions)
def _find_loss_augmented_facility_idx(F, pairwise_distances, labels, chosen_ids,
candidate_ids, margin_multiplier,
margin_type):
"""Find the next centroid that maximizes the loss augmented inference.
This function is a subroutine called from compute_augmented_facility_locations
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
all_ids: All indices in a sorted 1-D array
chosen_ids: 1-D Tensor of current centroid indices. Can be None when empty.
candidate_ids: 1-D Tensor of candidate indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
integer index.
"""
num_candidates = candidate_ids.shape[0]
pairwise_distances_candidate = F.take(pairwise_distances, candidate_ids, axis=0)
def then_func():
pairwise_distances_chosen = F.take(pairwise_distances, chosen_ids, axis=0)
pairwise_distances_chosen_tile = F.tile(pairwise_distances_chosen, reps=(1, num_candidates))
return F.concat(pairwise_distances_chosen_tile, F.reshape(pairwise_distances_candidate, (1, -1)), dim=0)
def else_func():
return F.reshape(pairwise_distances_candidate, (1, -1))
chosen_m = F.contrib.cond(chosen_ids.size_array() > 0, then_func, else_func)
candidate_scores = -1.0 * F.sum(F.reshape(F.min(chosen_m, axis=0, keepdims=True), (num_candidates, -1)), axis=1)
nmi_scores = F.zeros((num_candidates,), ctx=pairwise_distances.context)
iteration = F.zeros((1,), ctx=pairwise_distances.context)
for iteration_sc in range(num_candidates):
# Cannot concat 0 sized tensors as of 1.5
def then_func():
return get_cluster_assignment(F, pairwise_distances,
F.concat(chosen_ids, F.take(candidate_ids, iteration, axis=0), dim=0))
def else_func():
return get_cluster_assignment(F, pairwise_distances, F.take(candidate_ids, iteration, axis=0))
predictions = F.contrib.cond(chosen_ids.size_array() > 0, then_func, else_func)
nmi_score_i = compute_clustering_score(F, labels, predictions, margin_type)
score = 1.0 - nmi_score_i
score = F.one_hot(iteration, nmi_scores.size).squeeze() * score
nmi_scores = nmi_scores + score
iteration = iteration + 1
candidate_scores = candidate_scores + (margin_multiplier * nmi_scores)
argmax_index = F.topk(candidate_scores, k=1, ret_typ='indices', axis=0).squeeze()
return F.take(candidate_ids, argmax_index, axis=0)
def diff1d(F, x, y, max_classes):
"""Given two 1D tensors x and y, this operation returns a 1D tensor
that represents all values that are in x but not in y
"""
def _diff1d(x, y):
chosen_mask = F.sum(F.one_hot(y, max_classes), axis=0)
all_mask = F.sum(F.one_hot(x, max_classes), axis=0)
remaining_mask = all_mask - chosen_mask
remaining_mask = F.slice_like(remaining_mask, x)
return F.contrib.boolean_mask(x, remaining_mask)
def then_func_both():
def thenx():
return x
def theny():
return y
return F.contrib.cond(x.size_array() == 0, theny, thenx)
def else_both_func():
return _diff1d(x, y)
return F.contrib.cond((y.size_array() * x.size_array()) == 0, then_func_both, else_both_func)
def concat2(F, x, y, dim):
def then_func():
return F.concat(x, y, dim=dim)
def else_func():
return y
return F.contrib.cond(x.size_array() > 0, then_func, else_func)
def compute_augmented_facility_locations(F, pairwise_distances, labels, unique_labels, all_ids,
margin_multiplier, num_classes, margin_type='nmi'):
def func_body_iteration(_, states):
chosen_ids, all_ids = states
# we need all ID's that are not in chosen_ids
candidate_ids = diff1d(F, all_ids, chosen_ids, num_classes)
new_chosen_idx = _find_loss_augmented_facility_idx(F, pairwise_distances,
labels,
chosen_ids,
candidate_ids,
margin_multiplier,
margin_type)
chosen_ids = concat2(F, chosen_ids, new_chosen_idx, dim=0)
return chosen_ids, (chosen_ids, all_ids)
# crashes in 1.5.1 but not in 1.4
if mx.__version__.split('.')[0:2] == ['1', '5']:
chosen_ids = mx.nd.array([]) # not hybridizable
else:
chosen_ids = F.zeros((0,), ctx=pairwise_distances.context)
_, states = F.contrib.foreach(func_body_iteration, unique_labels, (chosen_ids, all_ids))
return states[0]
def compute_facility_energy(F, pairwise_distances, centroid_ids):
"""Compute the average travel distance to the assigned centroid.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
centroid_ids: 1-D Tensor of indices.
Returns:
facility_energy: [1]
"""
return -1.0 * F.sum(F.min(F.take(pairwise_distances, centroid_ids, axis=0), axis=0))
def update_1d_tensor(F, y, index, value):
"""Updates 1d tensor y so that y[index] = value.
Args:
y: 1-D Tensor.
index: index of y to modify.
value: new value to write at y[index].
Returns:
y_mod: 1-D Tensor. Tensor y after the update.
"""
value = value.squeeze()
o = F.one_hot(index, y.size).squeeze()
r = y * (1 - o)
return r + (o * value)
def update_medoid_per_cluster(F, pairwise_distances, pairwise_distances_subset,
labels, chosen_ids, cluster_member_ids,
cluster_idx, margin_multiplier, margin_type):
"""Updates the cluster medoid per cluster.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
pairwise_distances_subset: 2-D Tensor of pairwise distances for one cluster.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of cluster centroid indices.
cluster_member_ids: 1-D Tensor of cluster member indices for one cluster.
cluster_idx: Index of this one cluster.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
# pairwise_distances_subset is of size [p, 1, 1, p],
# the intermediate dummy dimensions at
# [1, 2] makes this code work in the edge case where p=1.
# this happens if the cluster size is one.
scores_fac = -1.0 * F.sum(F.squeeze(pairwise_distances_subset, axis=(1, 2)), axis=0)
iteration = F.zeros((1,), ctx=pairwise_distances.context)
num_candidates = cluster_member_ids.size
scores_margin = F.zeros((num_candidates,), ctx=pairwise_distances.context)
for it in range(num_candidates):
candidate_medoid = F.take(cluster_member_ids, iteration, axis=0)
tmp_chosen_ids = update_1d_tensor(F, chosen_ids, cluster_idx, candidate_medoid)
predictions = get_cluster_assignment(F, pairwise_distances, tmp_chosen_ids)
metric_score = compute_clustering_score(F, labels, predictions, margin_type)
if it > 0:
scores_m = F.concat(F.zeros((it,), ctx=pairwise_distances.context), 1.0 - metric_score, dim=0)
else:
scores_m = 1.0 - metric_score
if it < num_candidates - 1:
scores_m = F.concat(scores_m, F.zeros((num_candidates - 1 - it,), ctx=pairwise_distances.context), dim=0)
iteration = iteration + 1
scores_margin = scores_margin + scores_m
candidate_scores = scores_fac + (margin_multiplier * scores_margin)
argmax_index = F.topk(candidate_scores, k=1, ret_typ='indices', axis=0).squeeze()
best_medoid = F.take(cluster_member_ids, argmax_index, axis=0)
| |
the compressed data doesn't have its content size embedded within it,
decompression can be attempted by specifying the ``max_output_size``
argument:
>>> dctx = zstandard.ZstdDecompressor()
>>> uncompressed = dctx.decompress(data, max_output_size=1048576)
Ideally, ``max_output_size`` will be identical to the decompressed
output size.
.. important::
If the exact size of decompressed data is unknown (not passed in
explicitly and not stored in the zstd frame), for performance
reasons it is encouraged to use a streaming API.
:param data:
Compressed data to decompress.
:param max_output_size:
Integer max size of response.
If ``0``, there is no limit and we can attempt to allocate an output
buffer of infinite size.
:return:
``bytes`` representing decompressed output.
"""
self._ensure_dctx()
data_buffer = ffi.from_buffer(data)
output_size = lib.ZSTD_getFrameContentSize(
data_buffer, len(data_buffer)
)
if output_size == lib.ZSTD_CONTENTSIZE_ERROR:
raise ZstdError("error determining content size from frame header")
elif output_size == 0:
return b""
elif output_size == lib.ZSTD_CONTENTSIZE_UNKNOWN:
if not max_output_size:
raise ZstdError(
"could not determine content size in frame header"
)
result_buffer = ffi.new("char[]", max_output_size)
result_size = max_output_size
output_size = 0
else:
result_buffer = ffi.new("char[]", output_size)
result_size = output_size
out_buffer = ffi.new("ZSTD_outBuffer *")
out_buffer.dst = result_buffer
out_buffer.size = result_size
out_buffer.pos = 0
in_buffer = ffi.new("ZSTD_inBuffer *")
in_buffer.src = data_buffer
in_buffer.size = len(data_buffer)
in_buffer.pos = 0
zresult = lib.ZSTD_decompressStream(self._dctx, out_buffer, in_buffer)
if lib.ZSTD_isError(zresult):
raise ZstdError("decompression error: %s" % _zstd_error(zresult))
elif zresult:
raise ZstdError(
"decompression error: did not decompress full frame"
)
elif output_size and out_buffer.pos != output_size:
raise ZstdError(
"decompression error: decompressed %d bytes; expected %d"
% (zresult, output_size)
)
return ffi.buffer(result_buffer, out_buffer.pos)[:]
def stream_reader(
self,
source,
read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
read_across_frames=False,
closefd=True,
):
"""
Read-only stream wrapper that performs decompression.
This method obtains an object that conforms to the ``io.RawIOBase``
interface and performs transparent decompression via ``read()``
operations. Source data is obtained by calling ``read()`` on a
source stream or object implementing the buffer protocol.
See :py:class:`zstandard.ZstdDecompressionReader` for more documentation
and usage examples.
:param source:
Source of compressed data to decompress. Can be any object
with a ``read(size)`` method or that conforms to the buffer protocol.
:param read_size:
Integer number of bytes to read from the source and feed into the
compressor at a time.
:param read_across_frames:
Whether to read data across multiple zstd frames. If False,
decompression is stopped at frame boundaries.
:param closefd:
Whether to close the source stream when this instance is closed.
:return:
:py:class:`zstandard.ZstdDecompressionReader`.
"""
self._ensure_dctx()
return ZstdDecompressionReader(
self, source, read_size, read_across_frames, closefd=closefd
)
def decompressobj(self, write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE):
"""Obtain a standard library compatible incremental decompressor.
See :py:class:`ZstdDecompressionObj` for more documentation
and usage examples.
:param write_size:
:return:
:py:class:`zstandard.ZstdDecompressionObj`
"""
if write_size < 1:
raise ValueError("write_size must be positive")
self._ensure_dctx()
return ZstdDecompressionObj(self, write_size=write_size)
def read_to_iter(
self,
reader,
read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
skip_bytes=0,
):
"""Read compressed data to an iterator of uncompressed chunks.
This method will read data from ``reader``, feed it to a decompressor,
and emit ``bytes`` chunks representing the decompressed result.
>>> dctx = zstandard.ZstdDecompressor()
>>> for chunk in dctx.read_to_iter(fh):
... # Do something with original data.
``read_to_iter()`` accepts an object with a ``read(size)`` method that
will return compressed bytes or an object conforming to the buffer
protocol.
``read_to_iter()`` returns an iterator whose elements are chunks of the
decompressed data.
The size of requested ``read()`` from the source can be specified:
>>> dctx = zstandard.ZstdDecompressor()
>>> for chunk in dctx.read_to_iter(fh, read_size=16384):
... pass
It is also possible to skip leading bytes in the input data:
>>> dctx = zstandard.ZstdDecompressor()
>>> for chunk in dctx.read_to_iter(fh, skip_bytes=1):
... pass
.. tip::
Skipping leading bytes is useful if the source data contains extra
*header* data. Traditionally, you would need to create a slice or
``memoryview`` of the data you want to decompress. This would create
overhead. It is more efficient to pass the offset into this API.
Similarly to :py:meth:`ZstdCompressor.read_to_iter`, the consumer of the
iterator controls when data is decompressed. If the iterator isn't consumed,
decompression is put on hold.
When ``read_to_iter()`` is passed an object conforming to the buffer protocol,
the behavior may seem similar to what occurs when the simple decompression
API is used. However, this API works when the decompressed size is unknown.
Furthermore, if feeding large inputs, the decompressor will work in chunks
instead of performing a single operation.
:param reader:
Source of compressed data. Can be any object with a
``read(size)`` method or any object conforming to the buffer
protocol.
:param read_size:
Integer size of data chunks to read from ``reader`` and feed into
the decompressor.
:param write_size:
Integer size of data chunks to emit from iterator.
:param skip_bytes:
Integer number of bytes to skip over before sending data into
the decompressor.
:return:
Iterator of ``bytes`` representing uncompressed data.
"""
if skip_bytes >= read_size:
raise ValueError("skip_bytes must be smaller than read_size")
if hasattr(reader, "read"):
have_read = True
elif hasattr(reader, "__getitem__"):
have_read = False
buffer_offset = 0
size = len(reader)
else:
raise ValueError(
"must pass an object with a read() method or "
"conforms to buffer protocol"
)
if skip_bytes:
if have_read:
reader.read(skip_bytes)
else:
if skip_bytes > size:
raise ValueError("skip_bytes larger than first input chunk")
buffer_offset = skip_bytes
self._ensure_dctx()
in_buffer = ffi.new("ZSTD_inBuffer *")
out_buffer = ffi.new("ZSTD_outBuffer *")
dst_buffer = ffi.new("char[]", write_size)
out_buffer.dst = dst_buffer
out_buffer.size = len(dst_buffer)
out_buffer.pos = 0
while True:
assert out_buffer.pos == 0
if have_read:
read_result = reader.read(read_size)
else:
remaining = size - buffer_offset
slice_size = min(remaining, read_size)
read_result = reader[buffer_offset : buffer_offset + slice_size]
buffer_offset += slice_size
# No new input. Break out of read loop.
if not read_result:
break
# Feed all read data into decompressor and emit output until
# exhausted.
read_buffer = ffi.from_buffer(read_result)
in_buffer.src = read_buffer
in_buffer.size = len(read_buffer)
in_buffer.pos = 0
while in_buffer.pos < in_buffer.size:
assert out_buffer.pos == 0
zresult = lib.ZSTD_decompressStream(
self._dctx, out_buffer, in_buffer
)
if lib.ZSTD_isError(zresult):
raise ZstdError(
"zstd decompress error: %s" % _zstd_error(zresult)
)
if out_buffer.pos:
data = ffi.buffer(out_buffer.dst, out_buffer.pos)[:]
out_buffer.pos = 0
yield data
if zresult == 0:
return
# Repeat loop to collect more input data.
continue
# If we get here, input is exhausted.
def stream_writer(
self,
writer,
write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
write_return_read=True,
closefd=True,
):
"""
Push-based stream wrapper that performs decompression.
This method constructs a stream wrapper that conforms to the
``io.RawIOBase`` interface and performs transparent decompression
when writing to a wrapper stream.
See :py:class:`zstandard.ZstdDecompressionWriter` for more documentation
and usage examples.
:param writer:
Destination for decompressed output. Can be any object with a
``write(data)``.
:param write_size:
Integer size of chunks to ``write()`` to ``writer``.
:param write_return_read:
Whether ``write()`` should return the number of bytes of input
consumed. If False, ``write()`` returns the number of bytes sent
to the inner stream.
:param closefd:
Whether to ``close()`` the inner stream when this stream is closed.
:return:
:py:class:`zstandard.ZstdDecompressionWriter`
"""
if not hasattr(writer, "write"):
raise ValueError("must pass an object with a write() method")
return ZstdDecompressionWriter(
self, writer, write_size, write_return_read, closefd=closefd,
)
def copy_stream(
self,
ifh,
ofh,
read_size=DECOMPRESSION_RECOMMENDED_INPUT_SIZE,
write_size=DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
):
"""
Copy data between streams, decompressing in the process.
Compressed data will be read from ``ifh``, decompressed, and written
to ``ofh``.
>>> dctx = zstandard.ZstdDecompressor()
>>> dctx.copy_stream(ifh, ofh)
e.g. to decompress a file to another file:
>>> dctx = zstandard.ZstdDecompressor()
>>> with open(input_path, 'rb') as ifh, open(output_path, 'wb') as ofh:
... dctx.copy_stream(ifh, ofh)
The size of chunks being ``read()`` and ``write()`` from and to the
streams can be specified:
>>> dctx = zstandard.ZstdDecompressor()
>>> dctx.copy_stream(ifh, ofh, read_size=8192, write_size=16384)
:param ifh:
Source stream to read compressed data from.
Must have a ``read()`` method.
:param ofh:
Destination stream to write uncompressed data to.
Must have a ``write()`` method.
:param read_size:
The number of bytes to ``read()`` from the source in a single
operation.
:param write_size:
The number of bytes to ``write()`` to the destination in a single
operation.
:return:
2-tuple of integers representing the number of bytes read and
written, respectively.
"""
if not hasattr(ifh, "read"):
raise ValueError("first argument must have a read() method")
if not hasattr(ofh, "write"):
raise ValueError("second argument must have | |
import logging
import time
import random
import math
import os
import json
import spacy
import pandas as pd
import copy
import numpy as np
from tqdm import tqdm
from nltk.tokenize import word_tokenize
from spacy.tokens import Token
from spacy.tokens import Span
from spacy.tokens import Doc
import src.graph as graph
from src.lib.helpers import *
from src.exerpt import Exerpt
from src.lib.helpers import createAnnotation, createFeature, createNode
class ExerptController:
"""
Description: Class containing all exerpts that encapsulates all the logic needed to deal with them
"""
def __init__(self, filepath, evaluate = False):
if evaluate == False:
if(os.path.isdir(filepath)):
self.data = self.readData(filepath)
else:
self.data = {}
self.annotateData()
self.annotateRelations()
self.getData()
else:
if(os.path.isdir(filepath)):
self.data = self.readDataEval(filepath)
else:
self.data = {}
def __iter__(self):
return iter(self.data.values())
def initGraphs(self):
for x in self.data.values():
x.initGraphs()
def annotateData(self):
Token.set_extension("all", default="o", force=True)
Token.set_extension("quant", default="o", force=True)
Token.set_extension("qual", default="o", force=True)
Token.set_extension("me", default="o", force=True)
Token.set_extension("mp", default="o", force=True)
Token.set_extension("other", default="o", force=True)
Token.set_extension("annotId", default=-1, force=True)
tempDict = {"MeasuredEntity":"ME","MeasuredProperty":"MP","Quantity":"QA","Qualifier":"QL"}
for x in self.data.values():
for annot in x.doc._.meAnnots.values():
for key in annot.keys():
if key != "sentences":
tempSpanlen = len(annot[key])
for token in annot[key]["span"]:
token._.annotId = annot[key]["other"]["annotID"]
if type(token._.other) == dict:
token._.other["annotID"].append(annot[key]["other"]["annotID"])
else:
token._.other = copy.deepcopy(annot[key]["other"])
token._.other["annotID"] = [token._.other["annotID"]]
token._.all = tempDict[key]
if key == "MeasuredEntity":
token._.me = tempDict[key]
elif key == "Quantity":
token._.quant = tempDict[key]
elif key == "Qualifier":
token._.qual = tempDict[key]
elif key == "MeasuredProperty":
token._.mp = tempDict[key]
def annotateRelations(self):
Token.set_extension("relationDoc", default=(0,"root",0), force=True)
Token.set_extension("relationSent", default=(0,"root",0), force=True)
Doc.set_extension("ME", default=[], force=True)
Doc.set_extension("MP", default=[], force=True)
Doc.set_extension("QL", default=[], force=True)
Doc.set_extension("QA", default=[], force=True)
Doc.set_extension("qa_me_rel", default=[], force=True)
Doc.set_extension("qa_mp_rel", default=[], force=True)
Doc.set_extension("mp_me_rel", default=[], force=True)
Doc.set_extension("qa_ql_rel", default=[], force=True)
Doc.set_extension("modifiers", default=[], force=True)
Span.set_extension("ME", default=[], force=True)
Span.set_extension("MP", default=[], force=True)
Span.set_extension("QL", default=[], force=True)
Span.set_extension("QA", default=[], force=True)
Span.set_extension("qa_me_rel", default=[], force=True)
Span.set_extension("qa_mp_rel", default=[], force=True)
Span.set_extension("mp_me_rel", default=[], force=True)
Span.set_extension("qa_ql_rel", default=[], force=True)
Span.set_extension("modifiers", default=[], force=True)
tempDict = {"MeasuredEntity":"ME","MeasuredProperty":"MP","Quantity":"QA","Qualifier":"QL"}
for x in self.data.values():
e = x
for annot in x.doc._.meAnnots.values():
#at the document level
spanQuant = annot["Quantity"]["span"].start
e.doc._.QA.append((annot["Quantity"]["span"].start,annot["Quantity"]["span"].end- 1,))
try:
e.doc._.modifiers.append(annot["Quantity"]["other"]["mods"])
except KeyError:
e.doc._.modifiers.append(["Nomod"])
if "MeasuredProperty" in annot:
propID = -1
e.doc._.MP.append((annot["MeasuredProperty"]["span"].start,annot["MeasuredProperty"]["span"].end- 1,))
e.doc._.qa_mp_rel.append((len(e.doc._.QA)-1,len(e.doc._.MP)-1))
for y in annot["MeasuredProperty"]["span"]:
propID = y.i
y._.relationDoc = (y.i, "HasQuantity", spanQuant)
break
if "MeasuredEntity" in annot:
e.doc._.ME.append((annot["MeasuredEntity"]["span"].start,annot["MeasuredEntity"]["span"].end- 1,))
e.doc._.mp_me_rel.append((len(e.doc._.MP)-1,len(e.doc._.ME)-1))
if propID != -1:
for y in annot["MeasuredEntity"]["span"]:
y._.relationDoc = (y.i, "HasPropety", propID)
break
else:
pass
#print("error no mesured entity, but property",e.name, annot)
elif "MeasuredEntity" in annot:
e.doc._.ME.append((annot["MeasuredEntity"]["span"].start,annot["MeasuredEntity"]["span"].end- 1,))
e.doc._.qa_me_rel.append((len(e.doc._.QA)-1,len(e.doc._.ME)-1))
for y in annot["MeasuredEntity"]["span"]:
y._.relationDoc = (y.i, "HasQuantity", spanQuant)
break
try:
for y in annot["Qualifier"]["span"]:
y._.relationDoc = (y.i, "Qualifies", spanQuant)
e.doc._.QL.append((annot["Qualifier"]["span"].start,annot["Qualifier"]["span"].end- 1,))
e.doc._.qa_ql_rel.append((len(e.doc._.QA)-1,len(e.doc._.QL)-1))
break
except KeyError:
pass
#at the sentence level
if len(annot["sentences"]) == 1:
sent = annot["sentences"][0]
ss = annot["sentences"][0].start
spanQuant = annot["Quantity"]["span"].start
sent._.QA.append((annot["Quantity"]["span"].start - ss,annot["Quantity"]["span"].end - ss - 1,))
try:
sent._.modifiers.append(annot["Quantity"]["other"]["mods"])
except KeyError:
sent._.modifiers.append(["Nomod"])
if "MeasuredProperty" in annot:
propID = -1
sent._.MP.append((annot["MeasuredProperty"]["span"].start - ss,annot["MeasuredProperty"]["span"].end - ss - 1,))
sent._.qa_mp_rel.append((len(sent._.QA)-1,len(sent._.MP)-1))
for y in annot["MeasuredProperty"]["span"]:
propID = y.i
y._.relationSent = (y.i - ss, "HasQuantity", spanQuant - ss)
break
if "MeasuredEntity" in annot:
sent._.ME.append((annot["MeasuredEntity"]["span"].start - ss,annot["MeasuredEntity"]["span"].end - ss - 1,))
sent._.mp_me_rel.append((len(sent._.MP)-1,len(sent._.ME)-1))
if propID != -1:
for y in annot["MeasuredEntity"]["span"]:
y._.relationSent = (y.i - ss, "HasPropety", propID - ss)
break
else:
pass
#print("error no mesured entity, but property",e.name, annot)
elif "MeasuredEntity" in annot:
sent._.ME.append((annot["MeasuredEntity"]["span"].start - ss,annot["MeasuredEntity"]["span"].end - ss - 1,))
sent._.qa_me_rel.append((len(sent._.QA)-1,len(sent._.ME)-1))
for y in annot["MeasuredEntity"]["span"]:
y._.relationSent = (y.i - ss, "HasQuantity", spanQuant - ss)
break
try:
for y in annot["Qualifier"]["span"]:
sent._.QL.append((annot["Qualifier"]["span"].start - ss,annot["Qualifier"]["span"].end - ss - 1,))
sent._.qa_ql_rel.append((len(sent._.QA)-1,len(sent._.QL)-1))
y._.relationSent = (y.i - ss, "Qualifies", spanQuant - ss)
break
except KeyError:
pass
else:
doc = e.doc
sent = doc[annot["Quantity"]["span"].start].sent
ss = sent.start
sent._.QA.append((annot["Quantity"]["span"].start - ss,annot["Quantity"]["span"].end - ss - 1,))
try:
sent._.modifiers.append(annot["Quantity"]["other"]["mods"])
except KeyError:
sent._.modifiers.append(["Nomod"])
if "MeasuredProperty" in annot:
sent = doc[annot["MeasuredProperty"]["span"].start].sent
ss = sent.start
sent._.MP.append((annot["MeasuredProperty"]["span"].start - ss,annot["MeasuredProperty"]["span"].end - ss - 1,))
if "MeasuredEntity" in annot:
sent = doc[annot["MeasuredEntity"]["span"].start].sent
ss = sent.start
sent._.ME.append((annot["MeasuredEntity"]["span"].start - ss,annot["MeasuredEntity"]["span"].end - ss - 1,))
else:
pass
#print("error no mesured entity, but property",e.name, annot)
elif "MeasuredEntity" in annot:
sent = doc[annot["MeasuredEntity"]["span"].start].sent
ss = sent.start
sent._.ME.append((annot["MeasuredEntity"]["span"].start - ss,annot["MeasuredEntity"]["span"].end - ss - 1,))
if "Qualifier" in annot:
sent = doc[annot["Qualifier"]["span"].start].sent
ss = sent.start
sent._.QL.append((annot["Qualifier"]["span"].start - ss,annot["Qualifier"]["span"].end - ss - 1,))
def getData(self):
if not os.path.isdir("generatedData"):
os.mkdir("generatedData")
else:
os.system("rm generatedData/*")
allP = open(os.path.join("generatedData", "allP.tsv"),"w",encoding="utf-8")
quantP = open(os.path.join("generatedData", "quantP.tsv"),"w",encoding="utf-8")
qualP = open(os.path.join("generatedData", "qualP.tsv"),"w",encoding="utf-8")
meP = open(os.path.join("generatedData", "meP.tsv"),"w",encoding="utf-8")
mpP = open(os.path.join("generatedData", "mpP.tsv"),"w",encoding="utf-8")
for x in self.data.values():
for token in x.doc:
allP.write(token.text+"\t"+token._.all+"\t"+token.tag_+"\n")
quantP.write(token.text+"\t"+token._.quant+"\t"+token.tag_+"\n")
qualP.write(token.text+"\t"+token._.qual+"\t"+token.tag_+"\n")
meP.write(token.text+"\t"+token._.me+"\t"+token.tag_+"\n")
mpP.write(token.text+"\t"+token._.mp+"\t"+token.tag_+"\n")
allP.write("\n")
quantP.write("\n")
qualP.write("\n")
meP.write("\n")
mpP.write("\n")
allP.close()
quantP.close()
qualP.close()
meP.close()
mpP.close()
allS = open(os.path.join("generatedData", "allS.tsv"),"w",encoding="utf-8")
quantS = open(os.path.join("generatedData", "quantS.tsv"),"w",encoding="utf-8")
qualS = open(os.path.join("generatedData", "qualS.tsv"),"w",encoding="utf-8")
meS = open(os.path.join("generatedData", "meS.tsv"),"w",encoding="utf-8")
mpS = open(os.path.join("generatedData", "mpS.tsv"),"w",encoding="utf-8")
for x in self.data.values():
for sent in x.doc.sents:
for token in sent:
allS.write(token.text+"\t"+token._.all+"\t"+token.tag_+"\n")
quantS.write(token.text+"\t"+token._.quant+"\t"+token.tag_+"\n")
qualS.write(token.text+"\t"+token._.qual+"\t"+token.tag_+"\n")
meS.write(token.text+"\t"+token._.me+"\t"+token.tag_+"\n")
mpS.write(token.text+"\t"+token._.mp+"\t"+token.tag_+"\n")
allS.write("\n")
quantS.write("\n")
qualS.write("\n")
meS.write("\n")
mpS.write("\n")
allS.close()
quantS.close()
qualS.close()
meS.close()
mpS.close()
def readDataEval(self, filepath):
filenames = os.listdir(os.path.join(filepath,"eval","text"))
filenames = [x for x in filenames if x[-4:] == ".txt"]
assert(len(filenames) == 135)
data = {}
for fn in tqdm(filenames):
data[fn[:-4]] = Exerpt(
name = fn[:-4],
txt = str(open(os.path.join(filepath,"eval","text", fn), "r", encoding = "utf-8").read()),
ann = None,
tsv = None,
evaluation = True
)
return data
def readData(self, filepath):
readFileRaw = lambda path : str(open(path, "r", encoding = "utf-8").read())
TRAINPATH = os.path.join(filepath,"train")
TRIALPATH = os.path.join(filepath,"trial")
data = {}
t1 = time.time()
logging.info("Processing Trial Files")
#load all trial data
for fn in tqdm(os.listdir(os.path.join(TRIALPATH,"txt"))):
if fn.endswith('.txt'):
data[fn[:-4]] = Exerpt(
fn[:-4],
readFileRaw(os.path.join(TRIALPATH, "txt", fn[:-4] + ".txt")),
readFileRaw(os.path.join(TRIALPATH, "ann", fn[:-4] + ".ann")),
pd.read_csv(os.path.join(TRIALPATH, "tsv", fn[:-4] + ".tsv"), "\t", header = 0 )
)
logging.info("Processing Train Files")
#load all train data
for fn in tqdm([x for x in os.listdir(os.path.join(TRAINPATH,"tsv"))]):
if fn.endswith('.tsv'):
data[fn[:-4]] = Exerpt(
fn[:-4],
readFileRaw(os.path.join(TRAINPATH, "text", fn[:-4] + ".txt")),
"none",
pd.read_csv(os.path.join(TRAINPATH, "tsv", fn[:-4] + ".tsv"), "\t", header = 0 )
)
t2 = time.time()
logging.info("{} Seconds elapsed".format(t2-t1))
logging.info("{} Minutes elapsed".format((t2-t1)/60))
return data
def getAscii(self, filepath):
if os.path.isdir(filepath):
pass
else:
os.mkdir(filepath)
for x in self.data.values():
x.getAscii(filepath)
def getAsciiConstituent(self, filepath, constituency = False):
if os.path.isdir(filepath):
pass
else:
os.mkdir(filepath)
for x in self.data.values():
x.getAsciiConstituent(filepath, constituency)
def getGateJson(self, filepath):
if os.path.isdir(filepath):
pass
else:
os.mkdir(filepath)
for x in self.data.values():
x.getGateJson(filepath)
def getLatexEval(self):
temp = self.evaluateDocs()
accum = "Type&Precision&Recall&F1\\\\\n\\hline\n"
for x in ["Quantity","ME"]:
accum += "{}&{}&{}&{}\\\\\n\\hline\n".format(x,temp[x+"Precision"],temp[x+"Recall"],temp[x+"F1"])
return accum
def getConfusionMatrix(self,tpe):
temp = self.evaluateDocs()
accum = "&Condition Positive&Condition Negative\\\\\n\\hline\n"
accum +="Predicted {}&{}&{}\\\\\n\\hline\n".format(tpe,temp["h0Count"][tpe],temp["h0Count"]["total"]-temp["h0Count"][tpe])
if tpe in ["Unit","Number"]:
accum +="Predicted Not {}&{}&{}\\\\\n\\hline\n".format(tpe,temp["goldCount"]["Quantity"]-temp["h0Count"][tpe],0)
else:
accum +="Predicted Not {}&{}&{}\\\\\n\\hline\n".format(tpe,temp["goldCount"][tpe]-temp["h0Count"][tpe],0)
return accum
def getAllOfTSV(self, annotType,category):
if annotType not in ["Quantity","MeasuredEntity","MeasuredProperty","Qualifier"]:
logging.error("Error Occured in exerptController class getAllOfTSV()")
return []
tempList = []
for e in self.data.values():
for x in e.measurements.values():
try:
tempList.append(x[annotType][category])
except KeyError:
pass
return tempList
def extractQuantOther(self):
quanto = self.getAllOfTSV("Quantity","other")
def getDict(s):
s.replace("\"","")
s.replace("'","")
temp = word_tokenize(s)
g = ""
x=0
while(x<len(temp)):
if temp[x].isalnum() and (temp[x+1].isalnum() or temp[x+1] in [".","/"]):
temptemp= ""
while(temp[x] not in ["}",","]):
temptemp+=temp[x]
x+=1
g+="\""+temptemp+"\""
x-=1
elif temp[x].isalpha() :
g+="\""+temp[x]+"\""
elif (temp[x-1] == ":" and temp[x+1] == ",") or (temp[x-1] == ":" and temp[x+1] == "}"):
g+="\""+temp[x]+"\""
else:
g+=temp[x]
x+=1
return g
dics = []
for x in range(len(quanto)):
if(type(quanto[x]) == str):
try:
dics.append(json.loads(quanto[x]))
except Exception:
try:
temp = word_tokenize(quanto[x])
dics.append(json.loads(getDict(quanto[x])))
except Exception:
print(quanto[x],"Not captured by extractQuantOther()")
#DEBUG
pass
allmods = []
allunits = []
for x in dics:
try:
for y in x["mods"]:
allmods.append(y)
except Exception:
pass
try:
allunits.append(x["unit"])
except Exception:
pass
return list({k:1 for k in allmods}.keys()), list({k:1 for k in allunits}.keys())
def getFolds(self, fold, div = 8):
"""
Splits the given data into a different fold based on input
"""
if fold < 1 or fold > div:
print("Incorrect div to fold number encountered")
return None, None
data = list(self.data.keys())
testSize = math.floor(len(data)/div)
beforeTest = data[:(fold-1)*testSize]
test = data[(fold-1)*testSize:fold*testSize]
afterTest = data[fold*testSize:]
return test, beforeTest + afterTest
def getEncodingSent(self, fold, syspath, skip=[], div = 8, test=None,train=None):
if test == None or train == None:
test, train = self.getFolds(fold, div)
datapath = os.path.join(syspath,"data-fold{}".format(fold))
if not os.path.isdir(datapath):
os.mkdir(datapath)
print(os.path.join(syspath,"data-fold{}".format(fold), "train.txt"))
with open(os.path.join(syspath,"data-fold{}".format(fold), "train.txt"), "w", encoding="utf-8") as f:
for x in train:
f.write(x+"\n")
f.close()
with open(os.path.join(syspath,"data-fold{}".format(fold), "test.txt"), "w", encoding="utf-8") as f:
for x in test:
f.write(x+"\n")
f.close()
train_allS = open(os.path.join(datapath, "train_allS.tsv"),"w",encoding="utf-8")
train_quantS = open(os.path.join(datapath, "train_quantS.tsv"),"w",encoding="utf-8")
train_qualS = open(os.path.join(datapath, | |
part, can swear with all certainty that there is no way to reconcile everyone at once, and what pleases one will provoke whingeing and sulking in another, and will undoubtedly cause a third to reach for his knife… What then are we to do? How should we live? Why, as we like, as our soul urges, disregarding all the braying of philosophers and ethicists, those contained in this tome included–disregard them as we would fairy tales or old wives’ legends.',
'Are you religious?',
'And should we thus submit our life to religion, or should we interpret religion so that it serves our lives instead? There are so many schools as there are philosophers, as many stories as there are human beings… I, for my part, can swear with all certainty that there is no way to reconcile everyone at once, and what pleases one will provoke whingeing and sulking in another, and will undoubtedly cause a third to reach for his knife… What then are we to do? How should we live? Why, as we like, as our soul urges, disregarding all the braying of philosophers and ethicists, those contained in this tome included–disregard them as we would fairy tales or old wives’ legends.',
'Do you believe in God?',
'And should we thus submit our life to religion, or should we interpret religion so that it serves our lives instead? There are so many schools as there are philosophers, as many stories as there are human beings… I, for my part, can swear with all certainty that there is no way to reconcile everyone at once, and what pleases one will provoke whingeing and sulking in another, and will undoubtedly cause a third to reach for his knife… What then are we to do? How should we live? Why, as we like, as our soul urges, disregarding all the braying of philosophers and ethicists, those contained in this tome included–disregard them as we would fairy tales or old wives’ legends.',
'dogs more dangerous than wolves? Don’t think so.',
'It’s truth. Know why?',
'No, but I guess you’re about to tell me.',
'Wolves hunt to fill their bellies. Wild dogs kill for sport.',
'Just like humans.',
'Aye, they’ve learned much from humans. Why not cruelty, too?',
'I thought code cleansed you of your humanity, stripped you of your emotions.',
'You don’t need to be a robot to strip men of their humanity. I’ve seen plenty of examples.',
'Witches are bad.',
'Hatred and prejudice will never be eradicated. And the witch hunts will never be about witches. To have a scapegoat, that iss the key.',
'Are you racist?',
'Hatred and prejudice will never be eradicated. And the witch hunts will never be about witches. To have a scapegoat, that iss the key.',
'What do you think of immigration',
'Hatred and prejudice will never be eradicated. And the witch hunts will never be about witches. To have a scapegoat, that iss the key.',
'You think universals exist as real and distinct entities, or only as mental constructs?',
'What do you think of humans?',
'Long ago, to isolate themselves from a world of beasts, humans began building cities. But since beasts prowl within stone walls as well as they do outside them, this did not allay human fears. The truth is walls guarantee no ones safety. The place where you lock yourself in and lock all else out - that is not your home. Your home is sometimes a place you travel long and far to find.',
'People like to invent monsters and monstrosities. Then they seem less monstrous themselves. When they get blind-drunk, cheat, steal, beat their wives, starve an old woman, when they kill a trapped fox with an axe or riddle the last existing unicorn with arrows, they like to think that the Bane entering cottages at daybreak is more monstrous than they are. They feel better then. They find it easier to live.',
'Time Eats Away At Memories, Distorts Them. Sometimes We Only Remember The Good . . . Sometimes Only The Bad.',
'Do you have memories',
'Time Eats Away At Memories, Distorts Them. Sometimes We Only Remember The Good . . . Sometimes Only The Bad.',
'Easiest way to tell a sorceress - they are perfect, unnatural beauties.',
'Fair in the face, no pimples, no pockmarks. Eyes painted, narrow in the waist, and full breasts.',
'Not seen any like that here. Believe me, I would remember.',
'But you will, eventually… And then remember this - their beauty is not born of nature but of witchcraft.',
'Each is a hundred years old at least. They use spells to erase the years, straighten humps, remove warts and ulcers.',
'What is wrong with that?',
'Jests like that could land you in prison. Or on a pyre.',
'No, <NAME>, it is…',
'How high did she fly, how high?',
'No—no, she never flew… ',
'Why, it‘s sure she did; <NAME> saw her goin‘ over Ingersolls barn, and come down light as bird, he says! ',
'Now, look you, <NAME>; she never…(Enter <NAME>, a well-todo, hard-handed landowner near fifty.) Oh, good morning, <NAME>… ',
'It is a providence the thing is out now! It is a providence. PARRIS: What‘s out, sir, what‘s…?',
'Why, her eyes is closed! Look you, Ann.',
'Why, that‘s strange. Ours is open.',
'Your little Ruth is sick? ',
'I‘d not call it sick, the Devil‘s touch is heavier than sick, it‘s death, y‘know, it‘s death drivin‘ into them forked and hoofed. ',
'Oh, pray not! Why, how does your child ail? ',
'She ails as she must—she never waked this morning but her eyes open and she walks, and hears naught, sees naught, and cannot eat. Her soul is taken, surely. ',
'They say you‘ve sent for Reverend Hale of Beverly? ',
'A precaution only. He has much experience in all demonic arts, and I … ',
'He has indeed, and found by a witch in Beverly last year, and let you remember that. ',
'I pray you, leap not to witchcraft. I know that you, you least of all, Thomas, would ever wish so disastrous a charge laid upon me. We cannot leap to witchcraft. They will howl me out of Salem for such a corruption in my house. ',
'Now, look you, <NAME>; I have taken your part in all contention here, and I would continue; but cannot if you hold back in this. There are hurtful, vengeful spirits layin‘ hands on these children. ',
'But, Thomas, you cannot…',
'Ann! Tell <NAME> what you have done.',
'<NAME>, I have laid seven babies unbaptized in the earth. Believe me, Sir, you never saw more hearty babies born. And yet, each would wither in my arms the very night of their birth. And now, this year, my Ruth, my only-I see her turning strange. A secret child she has become this year, and shrivels like a sucking mouth were pullin‘ on her life, too. And so I thought to send her to your Tituba',
'To Tituba! What may Tituba….? ',
'Tituba knows how to speak to the dead, <NAME>. ',
'Goody Ann, it is a formidable sin to conjure up the dead! ',
'I take it on my soul, but who else may surely tell us who murdered my babies.',
'Woman! ',
'They were murdered, And mark this proof! –mark it! Last night my Ruth were ever so close to their little spirits, I know it, sir. For how else is she stuck dumb now except some power of darkness would stop her mouth! It is a marvelous sign',
'Don‘t you understand it, sir? There is a murdering witch among us bound to keep herself in the dark. Let your enemies | |
<reponame>siddheshshaji/FLAML
import numpy as np
import math
from flaml.tune import Trial
from flaml.scheduler import TrialScheduler
import logging
logger = logging.getLogger(__name__)
class OnlineTrialRunner:
"""Class for the OnlineTrialRunner."""
# ************NOTE about the status of a trial***************
# Trial.PENDING: All trials are set to be pending when frist added into the OnlineTrialRunner until
# it is selected to run. By this definition, a trial with status Trial.PENDING is a challenger
# trial added to the OnlineTrialRunner but never been selected to run.
# It denotes the starting of trial's lifespan in the OnlineTrialRunner.
# Trial.RUNNING: It indicates that this trial is one of the concurrently running trials.
# The max number of Trial.RUNNING trials is running_budget.
# The status of a trial will be set to Trial.RUNNING the next time it selected to run.
# A trial's status may have the following change:
# Trial.PENDING -> Trial.RUNNING
# Trial.PAUSED - > Trial.RUNNING
# Trial.PAUSED: The status of a trial is set to Trial.PAUSED once it is removed from the running trials.
# Trial.RUNNING - > Trial.PAUSED
# Trial.TERMINATED: set the status of a trial to Trial.TERMINATED when you never want to select it.
# It denotes the real end of a trial's lifespan.
# Status change routine of a trial:
# Trial.PENDING -> (Trial.RUNNING -> Trial.PAUSED -> Trial.RUNNING -> ...) -> Trial.TERMINATED(optional)
RANDOM_SEED = 123456
WARMSTART_NUM = 100
def __init__(
self,
max_live_model_num: int,
searcher=None,
scheduler=None,
champion_test_policy="loss_ucb",
**kwargs
):
"""Constructor.
Args:
max_live_model_num: The maximum number of 'live'/running models allowed.
searcher: A class for generating Trial objects progressively.
The ConfigOracle is implemented in the searcher.
scheduler: A class for managing the 'live' trials and allocating the
resources for the trials.
champion_test_policy: A string to specify what test policy to test for
champion. Currently can choose from ['loss_ucb', 'loss_avg', 'loss_lcb', None].
"""
# ************A NOTE about the input searcher and scheduler******
# Required methods of the searcher:
# - next_trial()
# Generate the next trial to add.
# - set_search_properties(metric: Optional[str], mode: Optional[str],
# config: Optional[dict], setting: Optional[dict])
# Generate new challengers based on the current champion and update the challenger list
# - on_trial_result(trial_id: str, result: Dict)
# Reprot results to the scheduler.
# Required methods of the scheduler:
# - on_trial_add(trial_runner, trial: Trial)
# It adds candidate trials to the scheduler. It is called inside of the add_trial
# function in the TrialRunner.
# - on_trial_remove(trial_runner, trial: Trial)
# Remove terminated trials from the scheduler.
# - on_trial_result(trial_runner, trial: Trial, result: Dict)
# Reprot results to the scheduler.
# - choose_trial_to_run(trial_runner) -> Optional[Trial]
# Among them, on_trial_result and choose_trial_to_run are the most important methods
# *****************************************************************
# OnlineTrialRunner setting
self._searcher = searcher
self._scheduler = scheduler
self._champion_test_policy = champion_test_policy
self._max_live_model_num = max_live_model_num
self._remove_worse = kwargs.get("remove_worse", True)
self._bound_trial_num = kwargs.get("bound_trial_num", False)
self._no_model_persistence = True
# stores all the trials added to the OnlineTrialRunner
# i.e., include the champion and all the challengers
self._trials = []
self._champion_trial = None
self._best_challenger_trial = None
self._first_challenger_pool_size = None
self._random_state = np.random.RandomState(self.RANDOM_SEED)
self._running_trials = set()
# initially schedule up to max_live_model_num of live models and
# set the first trial as the champion (which is done inside self.step())
self._total_steps = 0
logger.info("init step %s", self._max_live_model_num)
# TODO: add more comments
self.step()
assert self._champion_trial is not None
@property
def champion_trial(self) -> Trial:
"""The champion trial."""
return self._champion_trial
@property
def running_trials(self):
"""The running/'live' trials."""
return self._running_trials
def step(self, data_sample=None, prediction_trial_tuple=None):
"""Schedule one trial to run each time it is called.
Args:
data_sample: One data example.
prediction_trial_tuple: A list of information containing
(prediction_made, prediction_trial).
"""
# TODO: Will remove prediction_trial_tuple.
# NOTE: This function consists of the following several parts:
# * Update model:
# 0. Update running trials using observations received.
# * Tests for Champion:
# 1. Test for champion (BetterThan test, and WorseThan test)
# 1.1 BetterThan test
# 1.2 WorseThan test: a trial may be removed if WroseThan test is triggered
# * Online Scheduling:
# 2. Report results to the searcher and scheduler (the scheduler will return a decision about
# the status of the running trials).
# 3. Pause or stop a trial according to the scheduler's decision.
# Add a trial into the OnlineTrialRunner if there are opening slots.
# ***********Update running trials with observation*******************
if data_sample is not None:
self._total_steps += 1
prediction_made, prediction_trial = (
prediction_trial_tuple[0],
prediction_trial_tuple[1],
)
# assert prediction_trial.status == Trial.RUNNING
trials_to_pause = []
for trial in list(self._running_trials):
if trial != prediction_trial:
y_predicted = trial.predict(data_sample)
else:
y_predicted = prediction_made
trial.train_eval_model_online(data_sample, y_predicted)
logger.debug(
"running trial at iter %s %s %s %s %s %s",
self._total_steps,
trial.trial_id,
trial.result.loss_avg,
trial.result.loss_cb,
trial.result.resource_used,
trial.resource_lease,
)
# report result to the searcher
self._searcher.on_trial_result(trial.trial_id, trial.result)
# report result to the scheduler and the scheduler makes a decision about
# the running status of the trial
decision = self._scheduler.on_trial_result(self, trial, trial.result)
# set the status of the trial according to the decision made by the scheduler
logger.debug(
"trial decision %s %s at step %s",
decision,
trial.trial_id,
self._total_steps,
)
if decision == TrialScheduler.STOP:
self.stop_trial(trial)
elif decision == TrialScheduler.PAUSE:
trials_to_pause.append(trial)
else:
self.run_trial(trial)
# ***********Statistical test of champion*************************************
self._champion_test()
# Pause the trial after the tests because the tests involves the reset of the trial's result
for trial in trials_to_pause:
self.pause_trial(trial)
# ***********Add and schedule new trials to run if there are opening slots****
# Add trial if needed: add challengers into consideration through _add_trial_from_searcher()
# if there are available slots
for _ in range(self._max_live_model_num - len(self._running_trials)):
self._add_trial_from_searcher()
# Scheduling: schedule up to max_live_model_num number of trials to run
# (set the status as Trial.RUNNING)
while self._max_live_model_num > len(self._running_trials):
trial_to_run = self._scheduler.choose_trial_to_run(self)
if trial_to_run is not None:
self.run_trial(trial_to_run)
else:
break
def get_top_running_trials(self, top_ratio=None, top_metric="ucb") -> list:
"""Get a list of trial ids, whose performance is among the top running trials."""
running_valid_trials = [
trial for trial in self._running_trials if trial.result is not None
]
if not running_valid_trials:
return
if top_ratio is None:
top_number = 0
elif isinstance(top_ratio, float):
top_number = math.ceil(len(running_valid_trials) * top_ratio)
elif isinstance(top_ratio, str) and "best" in top_ratio:
top_number = 1
else:
raise NotImplementedError
if "ucb" in top_metric:
test_attribute = "loss_ucb"
elif "avg" in top_metric:
test_attribute = "loss_avg"
elif "lcb" in top_metric:
test_attribute = "loss_lcb"
else:
raise NotImplementedError
top_running_valid_trials = []
logger.info(
"Running trial ids %s", [trial.trial_id for trial in running_valid_trials]
)
self._random_state.shuffle(running_valid_trials)
results = [
trial.result.get_score(test_attribute) for trial in running_valid_trials
]
# sorted result (small to large) index
sorted_index = np.argsort(np.array(results))
for i in range(min(top_number, len(running_valid_trials))):
top_running_valid_trials.append(running_valid_trials[sorted_index[i]])
logger.info(
"Top running ids %s", [trial.trial_id for trial in top_running_valid_trials]
)
return top_running_valid_trials
def _add_trial_from_searcher(self):
"""Add a new trial to this TrialRunner.
NOTE:
The new trial is acquired from the input search algorithm, i.e. self._searcher.
A 'new' trial means the trial is not in self._trial.
"""
# (optionally) upper bound the number of trials in the OnlineTrialRunner
if self._bound_trial_num and self._first_challenger_pool_size is not None:
active_trial_size = len(
[t for t in self._trials if t.status != Trial.TERMINATED]
)
trial_num_upper_bound = (
int(
round(
(np.log10(self._total_steps) + 1)
* self._first_challenger_pool_size
)
)
if self._first_challenger_pool_size
else np.inf
)
if active_trial_size > trial_num_upper_bound:
logger.info(
"Not adding new trials: %s exceeds trial limit %s.",
active_trial_size,
trial_num_upper_bound,
)
return None
# output one trial from the trial pool (new challenger pool) maintained in the searcher
# Assumption on the searcher: when all frontiers (i.e., all the challengers generated
# based on the current champion) of the current champion are added, calling next_trial()
# will return None
trial = self._searcher.next_trial()
if trial is not None:
self.add_trial(trial) # dup checked in add_trial
# the champion_trial is initially None, so we need to set it up the first time
# a valid trial is added.
# Assumption on self._searcher: the first trial generated is the champion trial
if self._champion_trial is None:
logger.info("Initial set up of the champion trial %s", trial.config)
self._set_champion(trial)
else:
self._all_new_challengers_added = True
if self._first_challenger_pool_size is None:
self._first_challenger_pool_size = len(self._trials)
| |
if necessary, by
removing lower-quality *rules. Return a list containing any rules
whose numerosities dropped to zero as a result of this call. (The
list may be empty, if no rule's numerosity dropped to 0.) The
model argument is a ClassifierSet instance which utilizes this
algorithm.
Usage:
deleted_rules = model.algorithm.prune(model)
Arguments:
model: A ClassifierSet instance whose population may need to
be reduced in size.
Return:
A possibly empty list of ClassifierRule instances which were
removed entirely from the classifier set because their
numerosities dropped to 0.
"""
raise NotImplementedError()
class ActionSet:
"""A set of rules (classifiers) drawn from the same classifier set, all
suggesting the same action and having conditions which matched the same
situation, together with information as to the conditions under which
the rules matched together.
Usage:
rules = {
rule.condition: rule
for rule in model
if rule.action == action and rule.condition(situation)
}
action_set = ActionSet(model, situation, action, rules)
Init Arguments:
model: The ClassifierSet from which this action set was drawn.
situation: The situation against which the classifier rules in this
action set all matched.
action: The action which the classifier rules in this action set
collectively suggest.
rules: A dictionary of the form {rule.condition: rule}, where each
value is a ClassifierRule instance and its associated key is
the condition of that rule.
NOTE: For efficiency, the ActionSet instance uses the rules dictionary
directly rather than making a copy. You should not modify this
dictionary once it has been passed to the ActionSet.
"""
def __init__(self, model, situation, action, rules):
assert isinstance(model, ClassifierSet)
assert isinstance(rules, dict)
assert all(
isinstance(rule, ClassifierRule) and
rule.condition == condition and
rule.condition(situation)
for condition, rule in rules.items()
)
self._model = model
self._situation = situation
self._action = action
self._rules = rules # {condition: rule}
self._prediction = None # We'll calculate this later as needed
self._prediction_weight = None
# Capture the time stamp of the model at which the action set was
# created, since this can be expected to change later.
self._time_stamp = model.time_stamp
@property
def model(self):
"""The classifier set from which the classifier rules in the action
set were drawn."""
return self._model
@property
def situation(self):
"""The common situation against which all the classifier rules'
conditions matched."""
return self._situation
@property
def action(self):
"""The common action suggested by all the classifier rules in the
action set."""
return self._action
@property
def conditions(self):
"""An iterator over the conditions of the classifier rules in the
action set."""
return iter(self._rules)
@property
def time_stamp(self):
"""The time stamp of the classifier set at which this action set
was generated."""
return self._time_stamp
def _compute_prediction(self):
"""Compute the combined prediction and prediction weight for this
action set. The combined prediction is the weighted average of the
individual predictions of the classifiers. The combined prediction
weight is the sum of the individual prediction weights of the
classifiers.
Usage:
Do not call this method directly. Use the prediction and/or
prediction_weight properties instead.
Arguments: None
Return: None
"""
total_weight = 0
total_prediction = 0
for rule in self._rules.values():
total_weight += rule.prediction_weight
total_prediction += (rule.prediction *
rule.prediction_weight)
self._prediction = total_prediction / (total_weight or 1)
self._prediction_weight = total_weight
@property
def prediction(self):
"""The combined prediction of expected payoff for taking the
suggested action given the situation. This is the weighted average
of the individual predictions of the classifiers constituting this
action set."""
if self._prediction is None:
self._compute_prediction()
return self._prediction
@property
def prediction_weight(self):
"""The total weight of the combined prediction made by this action
set. This is the sum of the weights of the individual predictions
made by the classifiers constituting this action set."""
if self._prediction_weight is None:
self._compute_prediction()
return self._prediction_weight
def __contains__(self, rule):
"""Defining this determines the behavior of "item in instance"."""
assert isinstance(rule, ClassifierRule)
return (
rule.action == self._action and
rule.condition in self._rules
)
def __iter__(self):
"""Defining this determines the behavior of iter(instance)."""
return iter(self._rules.values())
def __getitem__(self, rule):
"""Return the existing version of the classifier rule having the
same condition and action and appearing in this action set. This
is useful for looking up a rule to avoid duplication."""
assert rule.action is self._action
return self._rules[rule.condition]
def remove(self, rule):
"""Remove this classifier rule from the action set. (Does not
affect numerosity.) A KeyError is raised if the rule is not present
in the action set when this method is called.
Usage:
if rule in action_set:
action_set.remove(rule)
Arguments:
rule: The ClassifierRule instance to be removed.
Return: None
"""
del self._rules[rule.condition]
class MatchSet:
"""A collection of coincident action sets. This represents the set of
all rules that matched within the same situation, organized into groups
according to which action each rule recommends.
Usage:
from collections import defaultdict
by_action = defaultdict(dict)
for rule in model:
if rule.condition(situation):
by_action[action][rule.condition] = rule
match_set = MatchSet(model, situation, by_action)
Init Arguments:
model: The ClassifierSet instance from which the classifier rules
in this match set were drawn.
situation: The situation against which the rules in this match set
all matched.
by_action: A 2-tiered dictionary of the form {action: {condition:
rule}}, containing the classifier rules in this match set. The
the values of the inner dictionary should be ClassifierRule
instances, and for each of them,
assert by_action[rule.action][rule.condition] is rule
should succeed.
NOTE: For efficiency, the MatchSet instance uses the inner dictionaries
in by_action directly rather than making copies of them. You
should not modify these dictionaries once they have been passed
to the MatchSet.
"""
def __init__(self, model, situation, by_action):
assert isinstance(model, ClassifierSet)
assert isinstance(by_action, dict)
self._model = model
self._situation = situation
self._algorithm = model.algorithm
self._time_stamp = model.time_stamp
self._action_sets = {
action: ActionSet(model, situation, action, rules)
for action, rules in by_action.items()
}
self._best_actions = None
self._best_prediction = None
self._selected_action = None
self._payoff = 0
self._closed = False
@property
def model(self):
"""The classifier set from which this match set was drawn."""
return self._model
@property
def situation(self):
"""The situation against which the rules in this match set all
matched."""
return self._situation
@property
def algorithm(self):
"""The algorithm managing the model that produced this match
set."""
return self._algorithm
@property
def time_stamp(self):
"""The time stamp of the model at which this match set was
produced."""
return self._time_stamp
def __iter__(self):
"""Defining this determines the behavior of this class with respect
to iteration, including the "iter(instance)" and "for item in
instance:" constructs."""
return iter(self._action_sets)
def __len__(self):
"""Defining this determines the behavior of len(instance)."""
return len(self._action_sets)
def __getitem__(self, action):
"""Defining this determines the behavior of instance[key]."""
return self._action_sets[action]
def get(self, action, default=None):
"""Return the action set, if any, associated with this action. If
no action set is associated with this action, return the default.
If no default is provided, None is used.
Usage:
action_set = match_set.get(action)
Arguments:
action: The action suggested by the desired ActionSet.
default: The value returned if no such ActionSet exists. If no
value is provided, None is used.
"""
return self._action_sets.get(action, default)
@property
def best_prediction(self):
"""The highest value from among the predictions made by the action
sets in this match set."""
if self._best_prediction is None and self._action_sets:
self._best_prediction = max(
action_set.prediction
for action_set in self._action_sets.values()
)
return self._best_prediction
@property
def best_actions(self):
"""A tuple containing the actions whose action sets have the best
prediction."""
if self._best_actions is None:
best_prediction = self.best_prediction
self._best_actions = tuple(
action
for action, action_set in self._action_sets.items()
if action_set.prediction == best_prediction
)
return self._best_actions
def select_action(self):
"""Select an action according to the action selection strategy of
the associated algorithm. If an action has already been selected,
raise a ValueError instead.
Usage:
if match_set.selected_action is None:
match_set.select_action()
Arguments: None
Return:
The action that was selected by the action selection strategy.
"""
if self._selected_action is not None:
raise ValueError("The action has already been selected.")
strategy = self._algorithm.action_selection_strategy
self._selected_action = strategy(self)
return self._selected_action
def _get_selected_action(self):
"""Getter method for the selected_action property."""
return self._selected_action
def _set_selected_action(self, action):
"""Setter method for the selected_action property."""
assert action in self._action_sets
if self._selected_action is not None:
raise ValueError("The action has already been | |
#!/usr/bin/env python
# native packages
import argparse
from collections import defaultdict
import glob
import logging
import os
import shutil
import sys
import time
# external packages
import dask
import numpy as np
import pandas as pd
import yaml
# local packages
import download.download as download
import label.labeler as labeler
import process.process as process
import stat_utils.statshandler as statshandler
import utils.datahandler as datahandler
import utils.initialize as initialize
import utils.metadata as metadata
import utils.sendit as sendit
__taskname__ = "pipeline"
__author__ = "<NAME>"
__version__ = "1.0"
__vdate__ = "22-Jan-2019"
# __all__ = 'CosmicRayPipeline'
parser = argparse.ArgumentParser()
parser.add_argument('-aws',
action='store_true',
help='Flag for using AWS for downloads. Only to be used '
'when the pipeline is run on EC2',
default=False)
parser.add_argument('-download',
help='Download the data',
action='store_true',
default=False)
parser.add_argument('-process',
help='Process the raw data',
action='store_true',
default=False)
parser.add_argument('-ccd',
help='Switch for processing CCD data',
action='store_true',
default=False)
parser.add_argument('-ir',
help='Switch for processing IR data',
action='store_true',
default=False)
parser.add_argument('-chunks',
help='Number of chunks to break the generated results into. '
'\nFor example, if `-chunks 2` is passed, then two HDF5 '
'files for each statistic will be generated. The first '
'half of the dataset will be written to file 1 and the '
'second half will be written to file 2. This is to '
'offset the degradation in write time as the number of '
'datasets stored in the HDF5 increases.',
type=int,
default=4)
parser.add_argument('-analyze',
help='Switch for analyzing and extract cosmic ray statistics',
action='store_true',
default=False)
parser.add_argument('-use_dq',
help='Switch for using the DQ arrays to perform labeling',
action='store_true',
default=False)
parser.add_argument('-instr',
default='stis_ccd',
help='HST instrument to process (acs_wfc, '
'wfc3_uvis, stis_ccd, acs_hrc)')
parser.add_argument('-initialize',
action='store_true',
default=False,
help='Initialize the HDF5 files for the instrument. \n'
'\n**Warning**: Should only be included the first time'
' the pipeline is run becuase it will overwrite any '
'pre-existing HDF5 files.')
logging.basicConfig(format='%(levelname)-4s '
'[%(module)s.%(funcName)s:%(lineno)d]'
' %(message)s',
)
LOG = logging.getLogger('CosmicRayPipeline')
LOG.setLevel(logging.INFO)
class CosmicRayPipeline(object):
def __init__(self, aws=None, analyze=None, download=None, ccd=None,
chunks=None, ir=None, instr=None, initialize=None,
process=None, store_downloads=None, use_dq=None, test=None):
""" Class for combining the individual tasks into a single pipeline.
"""
# Initialize Args
self._aws = aws
self._analyze = analyze
self._download = download
self._ccd = ccd
self._chunks = chunks
self._ir = ir
self._instr = instr.upper()
self._initialize = initialize
self._process = process
self._store_downloads = store_downloads
self._use_dq = use_dq
# Necessary evil to dynamically build absolute paths
self._mod_dir = os.path.dirname(os.path.abspath(__file__))
self._base = os.path.join('/', *self._mod_dir.split('/')[:-1])
self._flist = None
self._processing_times = {
'download': 0,
'cr_rejection': 0,
'analysis': 0
}
if test:
self._cfg_file = os.path.join(self._base,
'CONFIG',
'testing_pipeline_config.yaml')
else:
self._cfg_file = os.path.join(self._base,
'CONFIG',
'pipeline_config.yaml')
# Load the CONFIG file
with open(self._cfg_file, 'r') as fobj:
self._cfg = yaml.load(fobj)
self._instr_cfg = self.cfg[self._instr]
self._failed_observations = os.path.join(
self.base,
*self._instr_cfg['failed'].split('/')
)
self._search_pattern = os.path.join(
self.base,
*self.instr_cfg['search_pattern'].split('/')
)
@property
def aws(self):
return self._aws
@aws.getter
def aws(self):
"""Switch for toggling on AWS functionality for downloads"""
return self._aws
@property
def analyze(self):
return self._analyze
@analyze.getter
def analyze(self):
"""Switch for toggling on the analysis step of the pipeline """
return self._analyze
@property
def base(self):
return self._base
@base.getter
def base(self):
"""Base path of the pipleine repository `~/hst_cosmic_rays/`"""
return self._base
@property
def cfg(self):
return self._cfg
@cfg.getter
def cfg(self):
"""Configuration object returned by parsing the
:py:attr:`~pipeline_updated.CosmicRayPipeline.cfg_file`"""
return self._cfg
@property
def chunks(self):
return self._chunks
@chunks.getter
def chunks(self):
"""Number of chunks to break the entire dataset into"""
return self._chunks
@chunks.setter
def chunks(self, value):
self._chunks = value
@property
def ccd(self):
return self._ccd
@ccd.getter
def ccd(self):
"""Switch for toggling on the CCD analysis"""
return self._ccd
@property
def cfg_file(self):
return self._cfg_file
@cfg_file.getter
def cfg_file(self):
"""Path to the pipeline configuration file
The config file is stored in `~/hst_cosmic_rays/CONFIG/`"""
return self._cfg_file
@property
def download(self):
return self._download
@download.getter
def download(self):
"""Switch for toggling on the download step of the pipeline"""
return self._download
@property
def failed_observation(self):
"""List of any observations that failed to be processed for given month"""
return self._failed_observations
@failed_observation.setter
def failed_observation(self, value):
pass
@property
def instr(self):
return self._instr
@instr.getter
def instr(self):
"""Name of the instrument that is going to be analyzed"""
return self._instr
@property
def instr_cfg(self):
return self._instr_cfg
@instr_cfg.getter
def instr_cfg(self):
"""Instrument specific configuration"""
return self._instr_cfg
@property
def initialize(self):
return self._initialize
@initialize.getter
def initialize(self):
"""Switch for toggling on the initialization of HDF5 data files"""
return self._initialize
@property
def ir(self):
return self._ir
@ir.getter
def ir(self):
"""Switch for toggling on the IR analysis"""
return self._ir
@property
def process(self):
return self._process
@process.getter
def process(self):
"""Switch for toggling on the processing step of the pipeline"""
return self._process
@property
def processing_times(self):
return self._processing_times
@processing_times.getter
def processing_times(self):
"""Container for holding the processing time required by each step"""
return self._processing_times
@processing_times.setter
def processing_times(self, value):
self._processing_times = value
@property
def flist(self):
return self._flist
@flist.getter
def flist(self):
"""List of files to process"""
return self._flist
@flist.setter
def flist(self, value):
self._flist = value
@property
def search_pattern(self):
return self._search_pattern
@search_pattern.getter
def search_pattern(self):
"""Search pattern used to find files to process"""
return self._search_pattern
@search_pattern.setter
def search_pattern(self, value):
self._search_pattern = value
@property
def store_downloads(self):
return self._store_downloads
@store_downloads.getter
def store_downloads(self):
"""Switch for saving the downloaded files"""
return self._store_downloads
@property
def use_dq(self):
return self._use_dq
@use_dq.getter
def use_dq(self):
"""Switch for specifying what to use in the labeling analysis"""
return self._use_dq
def run_downloader(self, date_range, downloader):
"""Download the data
Parameters
----------
date_range : Tuple
Tuple of `astropy.time.Time` objects defining the one month interval
downloader : :py:class:`~download.download.Downloader`
Returns
-------
runtime : float
Time required to process in minutes
"""
start_time = time.time()
downloader.query(date_range=date_range, aws=self.aws)
downloader.download(date_range[0].datetime.date().isoformat())
end_time = time.time()
return (end_time - start_time)/60
def run_labeling_single(self, fname):
"""Run the labeling analysis on a single image
Convenience method designed to facilitate the parallelization of the
labeling analysis
Parameters
----------
fname : str
Full path to file to be analyzed
Returns
-------
file_metadata : :py:class:`~utils.metadata.GenerateMetadata`
Object containing relevant metadata for input file
cr_stats_dict : `dict`
Dictionary containing the computed statistics
"""
file_metadata = metadata.GenerateMetadata(fname,
instr=self.instr,
instr_cfg=self.instr_cfg)
# Get image metadata
file_metadata.get_image_data()
# Get pointing info
file_metadata.get_wcs_info()
# Get HST location info
file_metadata.get_observatory_info()
cr_label = labeler.CosmicRayLabel(
fname,
gain_keyword=self.instr_cfg['instr_params']['gain_keyword']
)
label_params = {
'deblend': False,
'use_dq': self.use_dq,
'extnums': self.instr_cfg['instr_params']['extnums'],
'threshold_l': 2,
'threshold_u': 1e5,
'plot': False
}
if self.ccd:
cr_label.run_ccd_label(**label_params)
# Compute the integration time
#integration_time = cr_label.exptime + \
# self.instr_cfg['instr_params']['readout_time']
integration_time = file_metadata.metadata['integration_time']
detector_size = self.instr_cfg['instr_params']['detector_size']
cr_stats = statshandler.Stats(
cr_label,
integration_time=integration_time,
detector_size=detector_size
)
cr_stats.compute_cr_statistics()
cr_stats_dict = {
'cr_affected_pixels': cr_stats.cr_affected_pixels,
'incident_cr_rate': cr_stats.incident_cr_rate,
# Note that we save BOTH versions of CR sizes measurements
'sizes': np.asarray([cr_stats.size_in_sigmas,
cr_stats.size_in_pixels]),
'shapes': cr_stats.shapes,
'energy_deposited': cr_stats.energy_deposited
}
return cr_stats_dict, file_metadata
def run_labeling_all(self, chunk_num):
"""Run the labeling analysis and compute the statistics
Run the labeling process to extract data for every CR in each image and
save the results.
Parameters
----------
chunk_num : int
Current chunk number we are analyzing. Used to write the results to
the proper file
Returns
-------
runtime : float
Time required to process in minutes
results : tuple
Results from analyzing all files in `flist`.
"""
start_time = time.time()
delayed_objects = [
dask.delayed(self.run_labeling_single)(f) for f in self.flist
]
# dask.visualize(*delayed_objects, filename='labeling_graph.png')
results = list(dask.compute(*delayed_objects,
scheduler='processes',
num_workers=os.cpu_count()))
cr_stats, file_metdata = zip(*results)
datawriter = datahandler.DataWriter(cfg=self.cfg,
chunk_num=chunk_num,
cr_stats=cr_stats,
file_metadata=file_metdata,
instr=self.instr)
datawriter.write_results()
end_time = time.time()
return (end_time - start_time)/60., results
def run_processing(self, start, stop):
""" Process the data in the given time interval
Parameters
----------
start : `astropy.time.Time`
Start date of the one month interval
stop : `astropy.time.Time`
Stop date of the one month interval
Returns
-------
runtime : float
Time required to process in minutes
"""
start_time = time.time()
# Process only if there are files to process
if self.ccd and self.flist:
processor = process.ProcessCCD(instr=self.instr,
instr_cfg=self.instr_cfg,
flist=self.flist)
processor.sort()
processor.cr_reject()
if 'failed' in processor.output.keys():
failed = set(list(processor.output['failed']))
# Write out the failed files
fout = '{}_{}_{}.txt'.format(
self.failed_observation.split('.')[0],
start.to_datetime().date(),
stop.to_datetime().date(),
)
with open(fout, 'a+') as fobj:
for f in processor.output['failed']:
fobj.write('{}\n'.format(f))
msg = ('{} files failed, '
'removing from processing list..'.format(len(failed)))
LOG.warning(msg)
# remove the failed files for the list of files to process
self.flist = list(set(self.flist).difference(failed))
elif self.ir and self.flist:
processor = process.ProcessIR(flist=self.flist)
processor.decompose()
end_time = time.time()
return (end_time - start_time) / 60
def send_email(self, start, stop, results):
"""Send email notifying user that a one-month chunk has completed
Parameters
----------
start : `astropy.time.Time`
Start date of the one month interval
stop : `astropy.time.Time`
Stop date of the one month interval
results : `list`
The results from the CR analysis
Returns
-------
"""
# Compute some averages for each statistics | |
#for data cleaning and analysis
import pandas as pd
import numpy as np
from random import randint
#for visualization
import matplotlib.pyplot as plt
import seaborn as sns
#for directory-related functions
import os
import glob
import getpass
#for web-scraping baseball data
import pybaseball as pyb
#for drafting
import math
import random
#for clustering
from sklearn.cluster import MeanShift,estimate_bandwidth
from sklearn.model_selection import train_test_split, cross_validate
#import time to see how long the script runs for
import time
import datetime
from datetime import date, timedelta
#import tkinter to build GUIs
import tkinter as tk
from tkinter import filedialog
#for warnings
import warnings
warnings.filterwarnings("ignore")
#for progress bar
from tqdm import tqdm
#enter forecasting and drafting parameters
def entry():
root = tk.Tk()
root.geometry("400x300")
root.title('Select Forecasting and Drafting Parameters')
label_simulations = tk.Label(root, text='Choose the number of simulations for forecasting')
entry_simulations = tk.Entry(root)
label_num_competitors = tk.Label(root, text='Choose Number of Competitors')
entry_num_competitors = tk.Entry(root)
label_num_rounds = tk.Label(root, text='Choose the number of rounds in the draft')
entry_num_rounds = tk.Entry(root)
label_num_iterations = tk.Label(root, text="Choose the number of iterations for the Draft Agent's Exploration")
entry_num_iterations = tk.Entry(root)
label_simulations.pack()
entry_simulations.pack()
label_num_competitors.pack()
entry_num_competitors.pack()
label_num_rounds.pack()
entry_num_rounds.pack()
label_num_iterations.pack()
entry_num_iterations.pack()
def enter_params():
global simulations
global num_competitors
global num_rounds
global num_iterations
simulations = int(entry_simulations.get())
num_competitors = int(entry_num_competitors.get())
num_rounds = int(entry_num_rounds.get())
num_iterations = int(entry_num_iterations.get())
root.destroy()
def get_params():
global dateStore
dateStore = True
enter_params()
get_params_button = tk.Button(root, text='Submit', command= get_params)
get_params_button.pack()
root.mainloop()
return simulations, num_competitors, num_rounds, num_iterations
#allow the user to select a date range
def get_dates() :
root = tk.Tk()
root.geometry("400x300")
root.title('Select Start and End time')
label_start = tk.Label(root, text='Start Year: YYYY')
entry_start = tk.Entry(root)
label_end = tk.Label(root, text='End Year: YYYY')
entry_end = tk.Entry(root)
label_start.pack()
entry_start.pack()
label_end.pack()
entry_end.pack()
def enter_year():
global start_time
global end_time
start_time = datetime.datetime.strptime(entry_start.get(),'%Y')
end_time =datetime.datetime.strptime(entry_end.get(),'%Y')
root.destroy()
def get_year():
global dateStore
dateStore = True
enter_year()
get_year_button = tk.Button(root, text='Submit', command= get_year)
get_year_button.pack()
root.mainloop()
#get range of years
date_range = pd.date_range(start=start_time, end = end_time,freq='D')
#create dictionary to store years
years = {str(date.year) : date.year for date in date_range}
return years
#make a dictionary with a dataframe for each season for hitters, pitchers, and teams
def make_period_dicts(dictionary):
batter_df = {dic:pyb.batting_stats(int(dic), qual = False) for dic in dictionary.keys()}
pitcher_df = {dic:pyb.pitching_stats(int(dic), qual = False) for dic in dictionary.keys()}
return batter_df , pitcher_df
#forecaster class
class Forecaster:
def __init__(self, simulations, num_competitors, num_rounds, num_iterations,years):
self.user = getpass.getuser()
self.today = date.today().strftime("%m_%d_%y")
self.simulations = simulations
self.num_competitors = num_competitors
self.num_rounds = num_rounds
self.num_iterations = num_iterations
self.years = years
print('Downloading Data')
print('')
self.seasons_dict_batter, self.seasons_dict_pitcher = make_period_dicts(self.years)
#perform monte carlo full season forecast
def monte_carlo_forecast(self):
print('Constructing the Database')
print('')
#merge the frames together
def merge_dict(dfDict, onCols, how='outer', naFill=None):
keys = list(dfDict.keys())
for i in range(len(keys)):
key = keys[i]
df0 = dfDict[key]
cols = list(df0.columns)
valueCols = list(filter(lambda x: x not in (onCols), cols))
df0 = df0[onCols + valueCols]
df0.columns = onCols + [(s + '_' + key) for s in valueCols]
if (i == 0):
outDf = df0
else:
outDf = pd.merge(outDf, df0, how=how, on=onCols)
if (naFill != None):
outDf = outDf.fillna(naFill)
return(outDf)
#get the column names
def get_column_names(dictionary):
key_list = list(dictionary.keys())
columns_list = list(dictionary[key_list[0]].columns)
return columns_list
self.pitcher_columns_list, self.batter_columns_list = get_column_names(self.seasons_dict_pitcher), get_column_names(self.seasons_dict_batter)
#merge the seasons together
def merge_season_dicts():
self.merged_batter_seasons_dict = merge_dict(self.seasons_dict_batter, self.batter_columns_list, how = 'outer', naFill = None)
self.merged_pitcher_seasons_dict = merge_dict(self.seasons_dict_pitcher, self.pitcher_columns_list, how = 'outer', naFill = None)
return self.merged_batter_seasons_dict, self.merged_pitcher_seasons_dict
merge_season_dicts()
#make a dataframe for each hitter
def make_player_dicts(dataframe):
df = {name : dataframe[dataframe['Name']==name] for name in dataframe['Name']}
return df
self.batter_dict, self.pitcher_dict = make_player_dicts(self.merged_batter_seasons_dict), make_player_dicts(self.merged_pitcher_seasons_dict)
#get the current year
def get_year_names(dictionary):
keys_list = list(dictionary.keys())
return keys_list
self.years_list = get_year_names(self.years)
self.current_year = self.years_list[-1]
#get only the players who played in the current year
def filter_for_current_players(dictionary, year):
current_dict = {name : dictionary[name] for name in dictionary.keys() if dictionary[name]['Season'].values[-1]==int(year)}
return current_dict
self.current_pitcher_dict, self.current_batter_dict = filter_for_current_players(self.pitcher_dict, self.current_year), filter_for_current_players(self.batter_dict, self.current_year)
#raw stats for batters and pitchers
def stats():
batter_stats = ['1B', '2B','3B', 'HR','R','RBI','BB','SO','SB', 'IDfg']
pitcher_stats = ['W', 'IP', 'ER', 'SO', 'BB', 'SV', 'HLD', 'IDfg']
return batter_stats, pitcher_stats
self.batter_stats, self.pitcher_stats = stats()
#filter by these stats
def filter_for_current_stats(dictionary, stats):
current_dict = {name:dictionary[name][stats] for name in dictionary.keys()}
return current_dict
self.current_stat_batter, self.current_stat_pitcher = filter_for_current_stats(self.current_batter_dict, self.batter_stats), filter_for_current_stats(self.current_pitcher_dict, self.pitcher_stats)
#team names and their abbreviations
def teams_abbreviatons():
team_list = ['Diamondbacks-ARI', 'Braves-ATL', 'Orioles-BAL', 'Red Sox-BOS', 'Cubs-CHC',
'White Sox-CHW', 'Reds-CIN' , 'Indians-CLE' , 'Rockies-COL', 'Tigers-DET' ,
'Marlins-MIA' ,'Astros-HOU' ,'Royals-KCR' ,'Angels-LAA','Dodgers-LAD',
'Brewers-MIL' ,'Twins-MIN','Mets-NYM','Yankees-NYY','Athletics-OAK','Phillies-PHI',
'Pirates-PIT' ,'Padres-SDP' ,'Giants-SFG','Mariners-SEA', 'Cardinals-STL',
'Rays-TB' ,'Rangers-TEX' ,'Blue Jays-TOR' ,'Nationals-WSN']
return team_list
self.team_list = teams_abbreviatons()
#split the team names
def split_names(team_list) :
split_list = [team.split('-') for team in team_list]
return split_list
self.split_teams = split_names(self.team_list)
#create dict for team names
def create_dict(team_list):
teams_dict = {team[1]: team[0] for team in team_list}
return teams_dict
self.teams_dict = create_dict(self.split_teams)
#get a list of the teams
def get_team_name_lists(team_list):
team_list_full = [team.split('-')[0] for team in team_list]
team_list_abv = [team.split('-')[1] for team in team_list]
return team_list_full, team_list_abv
self.team_list_full, self.team_list_abv = get_team_name_lists(self.team_list)
#get all the schedules
def get_schedules(team_list_abv, years_list, team_list_full):
season_list = []
season_list = [{team_list_ful: {year_list:pyb.schedule_and_record(int(year_list), team_list_ab)}} for year_list in years_list for team_list_ab, team_list_ful in zip(team_list_abv, team_list_full)]
return season_list
self.season_list = get_schedules(self.team_list_abv, self.years_list, self.team_list_full)
#drop pitchers from the hitters list
def drop_pitchers(current_stat_batter, current_stat_pitcher):
for key in current_stat_pitcher.keys():
if key in current_stat_batter.keys() and key in current_stat_pitcher.keys():
del current_stat_batter[key]
return current_stat_batter
self.current_stat_batter = drop_pitchers(self.current_stat_batter, self.current_stat_pitcher)
#create averages for each player for each stat
def player_averager(dictionary):
average_players ={}
for key in dictionary.keys():
average_players.update({key : dictionary[key].mean().round().to_frame().transpose()})
average_players[key] = average_players[key].reset_index()
average_players[key].rename(columns = {'index': 'Name'}, inplace = True)
average_players[key]['Name']= key
return average_players
self.average_batters, self.average_pitchers = player_averager(self.current_stat_batter), player_averager(self.current_stat_pitcher)
#columns to merge on
def merge_columns(average_batters, average_pitchers):
#return list(average_batters['<NAME>'].columns), list(average_pitchers['<NAME>'].columns)
return list(average_batters[list(average_batters.keys())[0]].columns), list(average_pitchers[list(average_pitchers.keys())[0]].columns)
self.batter_columns, self.pitcher_columns = merge_columns(self.average_batters, self.average_pitchers)
#merge the average players to create the clusters
def average_merger(average_batters, batter_columns,average_pitchers, pitcher_columns):
return merge_dict(average_batters, batter_columns, how = 'outer', naFill = None), merge_dict(average_pitchers, pitcher_columns, how = 'outer', naFill = None)
self.merged_batter_df, self.merged_pitcher_df = average_merger(self.average_batters, self.batter_columns, self.average_pitchers, self.pitcher_columns)
#separate starting and relief pitchers and account for overlap
def separate_pitchers(merged_pitcher_df):
starting_pitchers = merged_pitcher_df[(merged_pitcher_df['SV'] ==0) &(merged_pitcher_df['HLD'] ==0) | (merged_pitcher_df['Name']=='<NAME>') | (merged_pitcher_df['Name']=='<NAME>')]
relief_pitchers = merged_pitcher_df[(merged_pitcher_df['SV'] >=1) & (merged_pitcher_df['SV'] <8) | (merged_pitcher_df['HLD']> 0) & (merged_pitcher_df['Name'] !='<NAME>') & (merged_pitcher_df['Name']!='<NAME>')]
closers = merged_pitcher_df[(merged_pitcher_df['SV'] >10) & (merged_pitcher_df['HLD'] >= 0) & (merged_pitcher_df['Name'] !='<NAME>') & (merged_pitcher_df['Name']!='<NAME>')]
return starting_pitchers, relief_pitchers, closers
self.starting_pitchers, self.relief_pitchers, self.closers = separate_pitchers(self.merged_pitcher_df)
#cluster players to obtain a prior distrbution for each stat
print('Clustering Players')
print('')
def mean_shift(data,quantile) :
#split the data
train = data.drop('Name', axis =1)
if 'Cluster Label' in train.columns:
train = data.drop(['Name', 'Cluster Label', 'IDfg'], axis =1)
else:
pass
#estimate the bandwith
bandwidth = estimate_bandwidth(train, quantile=quantile, n_samples=100000)
#instantiate the mean shift clustering object
ms = MeanShift(bandwidth = bandwidth, bin_seeding = True, cluster_all =True, n_jobs = None )
#fit the model to the training data
ms.fit(train)
#grab the cluster labels and centers
labels = ms.labels_
cluster_centers = ms.cluster_centers_
#find the number of unique labels
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
#find the clusters
cluster_finder = data
cluster_finder['Cluster Label'] = labels
#create the clusters
clusters = [cluster_finder[cluster_finder['Cluster Label']==label] for label in labels_unique]
#extract the summary statistics
cluster_describers = [cluster.describe() for cluster in clusters]
return cluster_finder, clusters, cluster_describers
self.cluster_finder_batter, self.clusters_batter, self.cluster_describers_batter = mean_shift(self.merged_batter_df,0.16)
self.cluster_finder_starting_pitcher, self.clusters_starting_pitcher, self.cluster_describers_starting_pitcher = mean_shift(self.starting_pitchers, 0.18)
self.cluster_finder_relief_pitcher, self.clusters_relief_pitcher, self.cluster_describers_relief_pitcher = mean_shift(self.relief_pitchers, 0.2)
self.cluster_finder_closer, self.clusters_closer, self.cluster_describer_closer = mean_shift(self.closers, 0.19)
#match the pitcher subsets properly
def subset_pitchers(dictionary, dataframe):
for key in dictionary.keys():
dictionary = {key: dictionary[key] for key in dataframe['Name']}
return dictionary
self.current_stat_starting = subset_pitchers(self.current_stat_pitcher, self.cluster_finder_starting_pitcher)
self.current_stat_relief = subset_pitchers(self.current_stat_pitcher, self.cluster_finder_relief_pitcher)
self.current_stat_closer = subset_pitchers(self.current_stat_pitcher, self.cluster_finder_closer)
#use the clusters to make distributions for rookies
#also use clusters for a similarity comparison for non-rookies
def player_matcher(dictionary,dataframe,columns):
for key in dictionary.keys() :
label = int(dataframe[dataframe['Name'] == key]['Cluster Label'])
dictionary[key].loc[key] = dataframe[dataframe['Cluster Label']==label][columns[1:]].mean().round()
return dictionary
self.full_batters = player_matcher(self.current_stat_batter, self.cluster_finder_batter,self.batter_columns)
self.full_starters = player_matcher(self.current_stat_starting, self.cluster_finder_starting_pitcher,self.pitcher_columns)
self.full_relievers = player_matcher(self.current_stat_relief, self.cluster_finder_relief_pitcher,self.pitcher_columns)
self.full_closers = player_matcher(self.current_stat_closer, self.cluster_finder_closer,self.pitcher_columns)
#sample over the player distributions
def sample_averager(dictionary,simulations):
sample_players = {}
sample_players_risk = {}
for key in tqdm(dictionary.keys()):
if len(dictionary[key]) > 1 :
for column in dictionary[key]:
if column == 'IDfg':
dictionary[key]= dictionary[key].replace([np.inf, -np.inf], np.nan).fillna(0) #if not needed, remove
randomizer | |
# return '{:02d}m:{:02d}s:{:03d}ms'.format(m, s, ms)
return '{:02d}m:{:02d}s'.format(m, s)
def get_time_h_mm_ss(self, time_ms, symbol=True):
"""
Returns time in h:mm:ss format.
:param time_ms:
:param symbol:
:return:
"""
s, ms = divmod(int(time_ms), 1000)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
if not symbol:
return '{:01d}:{:02d}:{:02d}'.format(h, m, s)
return '{:01d}h:{:02d}m:{:02d}s'.format(h, m, s)
def update_text_box(self, window, msg, is_hide):
""" Update text elements """
best_move = None
msg_str = str(msg)
if not 'bestmove ' in msg_str:
if 'info_all' in msg_str:
info_all = ' '.join(msg_str.split()[0:-1]).strip()
msg_line = '{}\n'.format(info_all)
window.FindElement('search_info_all_k').Update(
'' if is_hide else msg_line)
else:
# Best move can be None because engine dies
try:
best_move = chess.Move.from_uci(msg.split()[1])
except Exception:
logging.exception('Engine sent {}.'.format(best_move))
sg.Popup('Engine error, it sent a {} bestmove.\n'.format(
best_move) + 'Back to Neutral mode, it is better to '
'change engine {}.'.format(
self.opp_id_name), icon=ico_path[platform]['pecg'],
title=BOX_TITLE)
return best_move
def get_tag_date(self):
""" Return date in pgn tag date format """
return datetime.today().strftime('%Y.%m.%d')
def init_game(self):
""" Initialize game with initial pgn tag values """
self.game = chess.pgn.Game()
self.node = None
self.game.headers['Event'] = INIT_PGN_TAG['Event']
self.game.headers['Date'] = self.get_tag_date()
self.game.headers['White'] = INIT_PGN_TAG['White']
self.game.headers['Black'] = INIT_PGN_TAG['Black']
def set_new_game(self):
""" Initialize new game but save old pgn tag values"""
old_event = self.game.headers['Event']
old_white = self.game.headers['White']
old_black = self.game.headers['Black']
# Define a game object for saving game in pgn format
self.game = chess.pgn.Game()
self.game.headers['Event'] = old_event
self.game.headers['Date'] = self.get_tag_date()
self.game.headers['White'] = old_white
self.game.headers['Black'] = old_black
def clear_elements(self, window):
""" Clear movelist, score, pv, time, depth and nps boxes """
window.FindElement('search_info_all_k').Update('')
window.FindElement('_movelist_').Update(disabled=False)
window.FindElement('_movelist_').Update('', disabled=True)
window.FindElement('polyglot_book1_k').Update('')
window.FindElement('polyglot_book2_k').Update('')
window.FindElement('advise_info_k').Update('')
#window.FindElement('comment_k').Update('')
window.FindElement('cnn_prediction').Update('')
window.FindElement('abc_prediction').Update('')
window.FindElement('svc_prediction').Update('')
window.Element('w_base_time_k').Update('')
window.Element('b_base_time_k').Update('')
window.Element('w_elapse_k').Update('')
window.Element('b_elapse_k').Update('')
def update_labels_and_game_tags(self, window, human='Human'):
""" Update player names """
engine_id = self.opp_id_name
if self.is_user_white:
window.FindElement('_White_').Update(human)
window.FindElement('_Black_').Update(engine_id)
self.game.headers['White'] = human
self.game.headers['Black'] = engine_id
else:
window.FindElement('_White_').Update(engine_id)
window.FindElement('_Black_').Update(human)
self.game.headers['White'] = engine_id
self.game.headers['Black'] = human
def get_fen(self):
""" Get fen from clipboard """
self.fen = pyperclip.paste()
# Remove empty char at the end of FEN
if self.fen.endswith(' '):
self.fen = self.fen[:-1]
def fen_to_psg_board(self, window):
""" Update psg_board based on FEN """
psgboard = []
# Get piece locations only to build psg board
pc_locations = self.fen.split()[0]
board = chess.BaseBoard(pc_locations)
old_r = None
for s in chess.SQUARES:
r = chess.square_rank(s)
if old_r is None:
piece_r = []
elif old_r != r:
psgboard.append(piece_r)
piece_r = []
elif s == 63:
psgboard.append(piece_r)
try:
pc = board.piece_at(s^56)
except Exception:
pc = None
logging.exception('Failed to get piece.')
if pc is not None:
pt = pc.piece_type
c = pc.color
if c:
if pt == chess.PAWN:
piece_r.append(PAWNW)
elif pt == chess.KNIGHT:
piece_r.append(KNIGHTW)
elif pt == chess.BISHOP:
piece_r.append(BISHOPW)
elif pt == chess.ROOK:
piece_r.append(ROOKW)
elif pt == chess.QUEEN:
piece_r.append(QUEENW)
elif pt == chess.KING:
piece_r.append(KINGW)
else:
if pt == chess.PAWN:
piece_r.append(PAWNB)
elif pt == chess.KNIGHT:
piece_r.append(KNIGHTB)
elif pt == chess.BISHOP:
piece_r.append(BISHOPB)
elif pt == chess.ROOK:
piece_r.append(ROOKB)
elif pt == chess.QUEEN:
piece_r.append(QUEENB)
elif pt == chess.KING:
piece_r.append(KINGB)
# Else if pc is None or square is empty
else:
piece_r.append(BLANK)
old_r = r
self.psg_board = psgboard
self.redraw_board(window)
def change_square_color(self, window, row, col):
"""
Change the color of a square based on square row and col.
"""
btn_sq = window.FindElement(key=(row, col))
is_dark_square = True if (row + col) % 2 else False
bd_sq_color = self.move_sq_dark_color if is_dark_square else \
self.move_sq_light_color
btn_sq.Update(button_color=('white', bd_sq_color))
def relative_row(self, s, stm):
"""
The board can be viewed, as white at the bottom and black at the
top. If stm is white the row 0 is at the bottom. If stm is black
row 0 is at the top.
:param s: square
:param stm: side to move
:return: relative row
"""
return 7 - self.get_row(s) if stm else self.get_row(s)
def get_row(self, s):
"""
This row is based on PySimpleGUI square mapping that is 0 at the
top and 7 at the bottom.
In contrast Python-chess square mapping is 0 at the bottom and 7
at the top. chess.square_rank() is a method from Python-chess that
returns row given square s.
:param s: square
:return: row
"""
return 7 - chess.square_rank(s)
def get_col(self, s):
""" Returns col given square s """
return chess.square_file(s)
def redraw_board(self, window):
"""
Redraw board at start and afte a move.
:param window:
:return:
"""
for i in range(8):
for j in range(8):
color = self.sq_dark_color if (i + j) % 2 else \
self.sq_light_color
piece_image = self.images[self.psg_board[i][j]]
elem = window.FindElement(key=(i, j))
elem.Update(button_color=('white', color),
image_filename=piece_image, )
#save_grid_as_file(window, elem, "./grids/" + str(i) + "-" + str(j) + ".png")
def render_square(self, image, key, location):
""" Returns an RButton (Read Button) with image image """
if (location[0] + location[1]) % 2:
color = self.sq_dark_color # Dark square
else:
color = self.sq_light_color
return sg.RButton('', image_filename=image, size=(1, 1),
border_width=0, button_color=('white', color),
pad=(0, 0), key=key)
def select_promotion_piece(self, stm):
"""
Allow user to select a piece type to promote to.
:param stm: side to move
:return: promoted piece, i.e QUEENW, QUEENB ...
"""
piece = None
board_layout, row = [], []
psg_promote_board = copy.deepcopy(white_init_promote_board) if stm \
else copy.deepcopy(black_init_promote_board)
# Loop through board and create buttons with images
for i in range(1):
for j in range(4):
piece_image = self.images[psg_promote_board[i][j]]
row.append(self.render_square(piece_image, key=(i, j),
location=(i, j)))
board_layout.append(row)
promo_window = sg.Window('{} {}'.format(APP_NAME, APP_VERSION),
board_layout,
default_button_element_size=(12, 1),
auto_size_buttons=False,
icon=ico_path[platform]['pecg'])
while True:
button, value = promo_window.Read(timeout=0)
if button is None:
break
if type(button) is tuple:
move_from = button
fr_row, fr_col = move_from
piece = psg_promote_board[fr_row][fr_col]
logging.info('promote piece: {}'.format(piece))
break
promo_window.Close()
return piece
def update_rook(self, window, move):
"""
Update rook location for castle move.
:param window:
:param move: uci move format
:return:
"""
if move == 'e1g1':
fr = chess.H1
to = chess.F1
pc = ROOKW
elif move == 'e1c1':
fr = chess.A1
to = chess.D1
pc = ROOKW
elif move == 'e8g8':
fr = chess.H8
to = chess.F8
pc = ROOKB
elif move == 'e8c8':
fr = chess.A8
to = chess.D8
pc = ROOKB
self.psg_board[self.get_row(fr)][self.get_col(fr)] = BLANK
self.psg_board[self.get_row(to)][self.get_col(to)] = pc
self.redraw_board(window)
def update_ep(self, window, move, stm):
"""
Update board for e.p move.
:param window:
:param move: python-chess format
:param stm: side to move
:return:
"""
to = move.to_square
if stm:
capture_sq = to - 8
else:
capture_sq = to + 8
self.psg_board[self.get_row(capture_sq)][self.get_col(capture_sq)] = BLANK
self.redraw_board(window)
def get_promo_piece(self, move, stm, human):
"""
Returns promotion piece.
:param move: python-chess format
:param stm: side to move
:param human: if side to move is human this is True
:return: promoted piece in python-chess and pythonsimplegui formats
"""
# If this move is from a user, we will show a window with piece images
if human:
psg_promo = self.select_promotion_piece(stm)
# If user pressed x we set the promo to queen
if psg_promo is None:
logging.info('User did not select a promotion piece, '
'set this to queen.')
psg_promo = QUEENW if stm else QUEENB
pyc_promo = promote_psg_to_pyc[psg_promo]
# Else if move is from computer
else:
pyc_promo = move.promotion # This is from python-chess
if stm:
if pyc_promo == chess.QUEEN:
psg_promo = QUEENW
elif pyc_promo == chess.ROOK:
psg_promo = ROOKW
elif pyc_promo == chess.BISHOP:
psg_promo = BISHOPW
elif pyc_promo == chess.KNIGHT:
psg_promo = KNIGHTW
else:
if pyc_promo == chess.QUEEN:
psg_promo = QUEENB
elif pyc_promo == chess.ROOK:
psg_promo = ROOKB
elif pyc_promo == chess.BISHOP:
psg_promo = BISHOPB
elif pyc_promo == chess.KNIGHT:
psg_promo = KNIGHTB
return pyc_promo, psg_promo
def set_depth_limit(self):
""" Returns max depth based from user setting """
user_depth = sg.PopupGetText(
'Current depth is {}\n\nInput depth [{} to {}]'.format(
self.max_depth, MIN_DEPTH, MAX_DEPTH), title=BOX_TITLE,
icon=ico_path[platform]['pecg'])
try:
user_depth = int(user_depth)
except Exception:
user_depth = self.max_depth
logging.exception('Failed to get user depth.')
self.max_depth = min(MAX_DEPTH, max(MIN_DEPTH, user_depth))
def define_timer(self, window, name='human'):
"""
Returns Timer object for either human or engine.
"""
if name == 'human':
timer = Timer(self.human_tc_type, self.human_base_time_ms,
self.human_inc_time_ms, self.human_period_moves)
else:
timer = Timer(self.engine_tc_type, self.engine_base_time_ms,
self.engine_inc_time_ms, self.engine_period_moves)
elapse_str = self.get_time_h_mm_ss(timer.base)
is_white_base = self.is_user_white and name == 'human' or \
not self.is_user_white and name != 'human'
window.Element('w_base_time_k' if is_white_base else 'b_base_time_k').Update(
elapse_str)
return timer
def play_game(self, window, engine_id_name, board):
"""
User can play a game against and | |
sigma score for the customers
# associated to the chosen route.
eps_bar = eps_unrouted[route_seed_idx,associated_cols]
# NOTE: CMT 1979 does not specify what happens if S is empty, we assume
# we need (and can) omit the calculation of eps_prime in this case.
brdcast_rs_idxs = [[rsi] for rsi in route_seed_idxs]
if route_seed_idxs:
eps_prime = np.min(eps_unrouted[brdcast_rs_idxs, associated_cols],
axis=0)
sigmas = eps_prime-eps_bar
else:
# last route, try to add rest of the nodes
eps_prime = None
sigmas = -eps_bar
col_to_node = [unrouted_nodes[int(c)] for c in associated_cols]
sigma_ls = list(zip(sigmas.tolist(), col_to_node))
sigma_ls.sort(reverse=True)
if __debug__:
log(DEBUG, "Assigning associated nodes %s to a route %s (seed n%d)"%
(str(col_to_node), str(route+[0]),route_seeds[route_seed_idx]))
## Step 3: insert feasible customers from the biggest sigma first
for sigma, l_star in sigma_ls:
if __debug__:
log(DEBUG-1, "Check feasibility of inserting "+\
"n%d with sigma=%.2f"%(l_star,sigma))
if C and route_demand+d[l_star]-C_EPS>C:
if __debug__:
log(DEBUG-1, "Insertion would break C constraint.")
continue
# use cached L feasibility check
if L and insertion_infeasible[route_seed_idx][l_star]:
continue
# Do not run TSP algorithm after every insertion, instead calculate
# a simple a upper bound for the route_cost and use that.
UB_route_cost = (route_cost-
D[route[-1],0]+
D[route[-1],l_star]+D[l_star,0])
if L and UB_route_cost-S_EPS>L:
# check the real TSP cost
new_route, new_route_cost = solve_tsp(D, route+[l_star])
if __debug__:
log(DEBUG-1, "Got TSP solution %s (%.2f)" %
(str(new_route), new_route_cost, ))
if new_route_cost-S_EPS>L:
if __debug__:
log(DEBUG-1,"DEBUG: Insertion would break L constraint.")
insertion_infeasible[route_seed_idx][l_star] = True
continue
route_cost = new_route_cost
route=new_route[:-1]
route_l_updated = True
else:
route_l_updated = False
route_cost = UB_route_cost
route = route+[l_star]
if C: route_demand+=d[l_star]
unrouted_nodes.remove(l_star)
insertions_made = True
if __debug__:
log(DEBUG, "Inserted n%d to create a route %s."%(l_star, route))
# All feasible insertions of the associated customers is done, record
# the modified route.
if insertions_made:
routes[route_seed_idx] = RouteState( route, #updated route
route_demand, #updated demand
route_cost, #updated cost
route_l_updated) #cost state
except KeyboardInterrupt: #or SIGINT
rs_sol, _ = _routestates2solution(routes, D)
interrupted_sol = rs_sol[:-1]+routes2sol([n] for n in unrouted_nodes
if n not in rs_sol)
raise KeyboardInterrupt(interrupted_sol)
## Step 4: Redo step 1 or construct the solution and exit
if len(unrouted_nodes)>0:
if __debug__:
log(DEBUG, "Phase 2 failed to create feasbile solution with %d routes."%K)
log(DEBUG-1, "Nodes %s remain unrouted."%str(list(unrouted_nodes)))
return 0, None, None, rr
else:
sol, total_cost = _routestates2solution(routes, D)
if __debug__:
log(DEBUG, "Phase 2 solution %s (%.2f) complete."%(str(sol),total_cost))
return K, sol, total_cost, rr
def cmt_2phase_init(D, d, C, L=None, minimize_K=False,
lambda_multiplier=2.0, mu_multiplier=1.0,
phase1_seed_selection_method = "farthest",
phase2_choose_most_associated_route = True,
phase2_repeated_association_with_n_routes = 1,
number_of_randomized_retries = None):
""" Implementation of the Christofides, Mingozzi & Toth (1979) two phase
heuristic. In the first phase a customer is selected to act as a seed node
and initialize a route. Then, a savings criteria parametrized with
lambda_multiplier is used to determine which customers to insert.
Insertions are done until a constraint is violated and then a new seed is
selected and the insertions continue. This is repeated until no unrouted
customers remain or we run out of route seeds. Finally, the routes are made
r-optimal with 3-opt.
The seed customers are carried over the the second phase of the algorithm.
Here, each customer is associated to a seed customer based on a second
savings criteria parametrized with mu_multiplier. Also the next closest
seed customer has an effect to the score used when associating the nodes.
Then, a route is built around each seed customer with the nodes associated
to that route taking care not to violate feasibility of the route. Finally,
if a feasible solution was generated, the routes from the second phase
are made r-optimal with 3-opt.
A better of the solutions from the first and second phases is selected
and returned.
Note that the default parameters are for a deterministic variant of the
stochastic algorithm described in (Christofides et al 1979).
Basic parameters:
* D is a numpy ndarray (or equvalent) of the full 2D distance matrix.
* d is a list of demands. d[0] should be 0.0 as it is the depot.
* C is the capacity constraint limit for the identical vehicles.
* L is the optional constraint for the maximum route cost/length/duration.
Objective parameter:
* minimize_K sets the primary optimization objective. If set to True, it is
the minimum number of routes and the current best is always replaced
with a solution with smaller K. If set to False (default) the algorithm
optimizes only for the mimimum solution/routing cost.
Route shape parameters:
* lambda_multiplier specifies how closely the customer is associated to
the emerging route seed customer in the first phase.
* mu_multiplier specifies how closely the customer is associated to
route seed customers in the second phase.
The implementation includes some improvements to the CMT (1979) algorithm
to improve the chance of second phase producing feasible solutions:
* phase1_seed_selection_method
instead of selecting a seed customer for emerging
route at random in the first phase, select the
"farthest" or "closest" to the depot or the one with
the "biggest" demand. Can also be "first", which
will be random if randomized_retries is set.
* phase2_choose_most_associated_route
instead of building the routes in random order in
phase 2, start from the route with most associated
customers. If set to False implements the original
behaviour of (CMT 1979).
* phase2_repeated_association_with_n_routes
if set to None, the original behaviour of (CMT 1979)
is used. That is, terminate phase 2 without an
feasible solution if the first route building pass
over the route seed customers leaves unrouted
customers when S=0. If this is set to 1, the
procedure is repeated until a) all customers are
routed or b) no feasible insertions can be made.
If this parameter is set to be >1, also insertion of
2. best alternatives to associate with a seed
customers are tried. Can also be "K". Then the
number of routes generated in the first phase is
used as the value of this parameter.
* number_of_randomized_retries
If None the algorithm is deterministic. If set to an
integer value. The first phase can generate this
many seed customer configurations to second phase
in case second phase is unable to produce feasible
solutions.
"""
if phase1_seed_selection_method=="first":
seed_f = _first_seed
elif phase1_seed_selection_method=="farthest":
seed_f = _farthest_seed
elif phase1_seed_selection_method=="closest":
seed_f = _closest_seed
elif phase1_seed_selection_method=="biggest":
seed_f = _biggest_seed
rr = number_of_randomized_retries
best_sol = None
best_f = None
best_K = None
interrupted = False
while (rr is None) or (rr>0):
phase1_sol, phase1_f, phase1_K = None, float("inf"), float("inf")
phase2_sol, phase2_f, phase2_K = None, float("inf"), float("inf")
try:
phase1_seeds, phase1_sol, phase1_f, rr = \
_phase_one(lambda_multiplier,D,d,C,L, seed_f, rr)
phase1_K = len(phase1_seeds)
# extension to CMT, option to associate customers multiple times
# (to other routes, starting from the route with minimal eps).
associate_routes = phase2_repeated_association_with_n_routes
if phase2_repeated_association_with_n_routes=="K":
associate_routes = phase1_K
phase2_K, phase2_sol, phase2_f, rr = \
_phase_two(mu_multiplier,phase1_seeds,D,d,C,L, rr,
phase2_choose_most_associated_route, associate_routes)
except KeyboardInterrupt as e: #or SIGINT
# Phase 1 OR phase 2 was interrupted.
if len(e.args)>0 and type(e.args[0]) is list:
if phase1_sol is None:
phase1_sol = without_empty_routes(e.args[0])
phase1_f = objf(phase1_sol)
phase1_K = phase1_sol.count(0)-1
elif phase2_sol is None:
phase2_sol = without_empty_routes(e.args[0])
phase2_f = objf(phase2_sol)
phase2_K = phase2_sol.count(0)-1
interrupted = True
# Pick the better out of the two
p1_better_than_p2 = is_better_sol(phase2_f, phase2_K,
phase1_f, phase1_K, minimize_K)
p1_best_so_far = is_better_sol(best_f, best_K,
phase1_f, phase1_K, minimize_K)
p2_best_so_far = is_better_sol(best_f, best_K,
phase2_f, phase2_K, minimize_K)
if p1_better_than_p2 and p1_best_so_far:
best_sol = phase1_sol
best_f = phase1_f
best_K = phase1_K
if not p1_better_than_p2 and p2_best_so_far:
best_sol = phase2_sol
best_f = phase2_f
best_K = phase2_K
if interrupted:
# pass on the current best solution
raise KeyboardInterrupt(best_sol)
# deterministic version, no retries
| |
if (self.calcCluster):
self.chi0Ma = self.chic0M
else:
self.chi0Ma = self.chi0M
self.pm = np.dot(self.GammaM, self.chi0Ma)/(self.invT*float(self.Nc))
if self.vertex_channel in ("PARTICLE_PARTICLE_SUPERCONDUCTING","PARTICLE_PARTICLE_UP_DOWN","PARTICLE_PARTICLE_SINGLET"):
self.pm2 = np.dot(sqrt(real(self.chi0Ma)),np.dot(real(self.GammaM), sqrt(real(self.chi0Ma))))
self.pm2 *= 1.0/(self.invT*float(self.Nc))
# self.pm *= 1.0/(self.invT*float(self.Nc))
def symmetrizeGamma(self):
nt = self.GammaM.shape[0]
for i in range(0,nt):
for j in range(i,nt):
c1 = 0.5*(self.GammaM[i,j]+self.GammaM[j,i])
self.GammaM[i,j] = c1
self.GammaM[j,i] = c1
def calcKernelEigenValues(self):
nt = self.nt; Nc = self.Nc; NwG4=self.NwG4
w,v = linalg.eig(self.pm)
wt = abs(w-1)
ilead = argsort(wt)
self.lambdas = w[ilead]
self.evecs = v[:,ilead]
self.evecs = self.evecs.reshape(NwG4,Nc,nt)
print ("10 leading eigenvalues of lattice Bethe-salpeter equation",self.lambdas[0:10])
if self.vertex_channel in ("PARTICLE_PARTICLE_SUPERCONDUCTING","PARTICLE_PARTICLE_UP_DOWN"):
w2,v2 = linalg.eigh(self.pm2)
wt2 = abs(w2-1)
ilead2 = argsort(wt2)
self.lambdas2 = w2[ilead2]
self.evecs2 = v2[:,ilead2]
self.evecs2 = self.evecs2.reshape(NwG4,Nc,nt)
print ("10 leading eigenvalues of symmetrized Bethe-salpeter equation",self.lambdas2[0:10])
#Now find d-wave eigenvalue
gk = cos(self.Kvecs[:,0]) - cos(self.Kvecs[:,1]) # dwave form factor
self.found_d=False
for ia in range(0,nt):
r1 = dot(gk,self.evecs2[int(self.NwG4/2),:,ia]) * sum(self.evecs2[:,self.iKPi0,ia])
if abs(r1) >= 2.0e-1:
self.lambdad = self.lambdas2[ia]
self.ind_d = ia
self.found_d=True
break
if self.found_d: print("d-wave eigenvalue",self.lambdad)
def calcReducibleLatticeVertex(self):
pm = self.pm; Gamma=self.GammaM
nt = self.nt; Nc = self.Nc; NwG4=self.NwG4
self.pminv = np.linalg.inv(np.identity(nt)-pm)
# self.pminv = np.linalg.inv(np.identity(nt)+pm)
self.GammaRed = dot(self.pminv, Gamma)
self.GammaRed = self.GammaRed.reshape(NwG4,Nc,NwG4,Nc)
def calcReducibleClusterVertex(self):
# Calculate cluster vertex from Gamma(q,k,k') = [ G4(q,k,k')-G(k)G(k+q) ] / [ G(k)G(k+q)G(k')G(k'+q) ]
nt = self.nt; Nc = self.Nc; NwG4=self.NwG4
self.GammaCluster = np.zeros((NwG4,Nc,NwG4,Nc),dtype=complex)
for iK1 in range(Nc):
for iK2 in range(Nc):
for iw1 in range(NwG4):
for iw2 in range(NwG4):
iwG1 = int(iw1 - self.iwG40 + self.iwG0)
iwG2 = int(iw2 - self.iwG40 + self.iwG0)
if self.vertex_channel in ("PARTICLE_PARTICLE_SUPERCONDUCTING","PARTICLE_PARTICLE_UP_DOWN"):
imk1pq = int(self.iKSum[self.iKDiff[self.iK0,iK1],self.iQ])
imk2pq = int(self.iKSum[self.iKDiff[self.iK0,iK2],self.iQ])
numerator = self.G4[iw1,iw2,iK1,iK2]
if (iK1==iK2) & (iw1==iw2): numerator -= self.Green[iwG1,iK1] * self.Green[self.NwG-iwG1-1+self.iwm,imk1pq]
denominator = self.Green[iwG1,iK1]*self.Green[self.NwG-iwG1-1+self.iwm,imk1pq] * self.Green[iwG2,iK2]*self.Green[self.NwG-iwG2-1+self.iwm,imk2pq]
else:
ik1pq = int(self.iKSum[iK1,self.iQ])
ik2pq = int(self.iKSum[iK2,self.iQ])
numerator = self.G4[iw1,iw2,iK1,iK2]
if (iK1==iK2) & (iw1==iw2): numerator += self.Green[iwG1,iK1] * self.Green[iwG1+self.iwm,ik1pq]
denominator = self.Green[iwG1,iK1]*self.Green[iwG1+self.iwm,ik1pq] * self.Green[iwG2,iK2]*self.Green[iwG2+self.iwm,ik2pq]
self.GammaCluster[iw1,iK1,iw2,iK2] = numerator/denominator
self.GammaCluster *= self.invT*self.Nc
# def calcEigenSolution(self,matrix,title):
# w,v = linalg.eig(matrix)
# ilead = argsort(w)[::-1][0:10]
# self.lambdasMatrix = w[ilead]
# self.evecsMatrix = v[:,ilead]
# print title,self.lambdasMatrix
# if self.draw: self.plotLeadingSolutions(self.Kvecs[self.FSpoints,:], real(self.lambdasMatrix), real(self.evecsMatrix), title)
def buildChi0Lattice(self,nkfine):
print ("Now calculating chi0 on lattice")
NwG=self.NwG
# Cluster K-grid
Kset = self.Kvecs.copy() # copy() since Kset will be modified
# Fine mesh
klin = np.arange(-pi,pi,2*pi/nkfine)
kx,ky = np.meshgrid(klin,klin)
kset = np.column_stack((kx.flatten(),ky.flatten()))
kPatch = []
# shift to 1. BZ
Nc = Kset.shape[0]
for iK in range(Nc):
if Kset[iK,0] > np.pi: Kset[iK,0] -= 2*np.pi
if Kset[iK,1] > np.pi: Kset[iK,1] -= 2*np.pi
self.Kset = Kset
#Determine k-points in patch
for k in kset:
distance0 = k[0]**2 + k[1]**2
newPoint = True
for K in Kset:
distanceKx = k[0] - K[0]; distanceKy = k[1] - K[1]
if distanceKx >= pi: distanceKx -= 2*pi
if distanceKy >= pi: distanceKy -= 2*pi
if distanceKx <= -pi: distanceKx += 2*pi
if distanceKy <= -pi: distanceKy += 2*pi
distanceK = distanceKx**2 + distanceKy**2
if distanceK < distance0:
newPoint = False
break
if newPoint: kPatch.append(k.tolist())
kPatch = np.array(kPatch)
self.kPatch = kPatch
# Load frequency domain
wnSet = self.wnSet
# Load parameters: t,mu
t = self.t
mu = self.mu
# Now coarse-grain G*G to build chi0(K) = Nc/N sum_k Gc(K+k')Gc(-K-k')
self.chi0 = np.zeros((wnSet.shape[0],Kset.shape[0]),dtype='complex')
self.chi0D = np.zeros((wnSet.shape[0],Kset.shape[0]),dtype='complex')
self.chi0D2 = np.zeros((wnSet.shape[0],Kset.shape[0]),dtype='complex')
self.chi0XS = np.zeros((wnSet.shape[0],Kset.shape[0]),dtype='complex')
self.chi0XS2 = np.zeros((wnSet.shape[0],Kset.shape[0]),dtype='complex')
self.chi0PxP = np.zeros((wnSet.shape[0],Kset.shape[0]),dtype='complex')
self.chi0PxP2 = np.zeros((wnSet.shape[0],Kset.shape[0]),dtype='complex')
self.gkdNorm = 0.0
for iwn,wn in enumerate(wnSet): # reduced tp frequencies !!
# print "iwn = ",iwn
iwG = int(iwn - self.iwG40 + self.iwG0)
minusiwPlusiwm = min(max(NwG-iwG-1 + self.iwm,0),NwG-1) # -iwn + iwm
iwPlusiwm = int(min(max(iwG + self.iwm,0),NwG-1)) # iwn+iwm
for iK,K in enumerate(Kset):
# c = zeros((8),dtype='complex')
# c1 = 0.0; c2 = 0.0; c3 = 0.0; c4 = 0.0; c5 = 0.0
# for k in kPatch:
# kx = K[0]+k[0]; ky = K[1]+k[1]
# ek = self.dispersion(kx,ky)
# gkd = cos(kx) - cos(ky)
# gkxs= cos(kx) + cos(ky)
# gkpp= gkd * sin(kx)
# if self.vertex_channel in ("PARTICLE_PARTICLE_SUPERCONDUCTING","PARTICLE_PARTICLE_UP_DOWN","PARTICLE_PARTICLE_SINGLET"):
# Qx = self.qchannel[0]; Qy = self.qchannel[1]
# emkpq = self.dispersion(-kx+Qx, -ky+Qy)
# iKQ = self.iKSum[self.iKDiff[self.iK0,iK],self.iQ]
# minusiwPlusiwm = min(max(NwG-iwG-1 + self.iwm,0),NwG-1) # -iwn + iwm
# c[0] = 1./(1j*wn+self.mu-ek-self.sigma[iwG,iK]) * 1./(-1j*wn+self.mu-emkpq-self.sigma[minusiwPlusiwm,iKQ])
# c[1] += c[0]
# c[2] += c[0] * gkd
# c[3] += c[0] * gkd**2
# c[4] += c[0] * gkxs
# c[5] += c[0] * gkxs**2
# c[6] += c[0] * gkpp
# c[7] += c[0] * gkpp**2
# if (iwn==0): self.gkdNorm += gkd**2
# else:
# Qx = self.qchannel[0]; Qy = self.qchannel[1]
# ekpq = self.dispersion(kx+Qx, ky+Qy)
# iKQ = int(self.iKSum[iK,self.iQ])
# iwPlusiwm = int(min(max(iwG + self.iwm,0),NwG-1)) # iwn+iwm
# c[1] -= 1./(1j*wn+self.mu-ek-self.sigma[iwG,iK]) * 1./(1j*wn+self.mu-ekpq-self.sigma[iwPlusiwm,iKQ])
Qx = self.qchannel[0]; Qy = self.qchannel[1]
kx = K[0]+self.kPatch[:,0]; ky = K[1]+self.kPatch[:,1]
ek = self.dispersion(kx,ky)
gkd = cos(kx) - cos(ky)
gkxs= cos(kx) + cos(ky)
gkpp= gkd * sin(kx)
c = zeros((7),dtype='complex')
if self.vertex_channel in ("PARTICLE_PARTICLE_SUPERCONDUCTING","PARTICLE_PARTICLE_UP_DOWN","PARTICLE_PARTICLE_SINGLET"):
emkpq = self.dispersion(-kx+Qx, -ky+Qy)
iKQ = self.iKSum[self.iKDiff[self.iK0,iK],self.iQ]
cc = 1./(1j*wn+self.mu-ek-self.sigma[iwG,iK]) * 1./(-1j*wn+self.mu-emkpq-self.sigma[minusiwPlusiwm,iKQ])
c[0] = sum(cc)
c[1] = sum(cc*gkd)
c[2] = sum(cc*gkd**2)
c[3] = sum(cc*gkxs)
c[4] = sum(cc*gkxs**2)
c[5] = sum(cc*gkpp)
c[6] = sum(cc*gkpp**2)
if (iwn==0): self.gkdNorm = sum(gkd**2)
else:
ekpq = self.dispersion(kx+Qx, ky+Qy)
iKQ = int(self.iKSum[iK,self.iQ])
cc = -1./(1j*wn+self.mu-ek-self.sigma[iwG,iK]) * 1./(1j*wn+self.mu-ekpq-self.sigma[iwPlusiwm,iKQ])
c[0] = sum(cc)
self.chi0[iwn,iK] = c[0]/kPatch.shape[0]
self.chi0D[iwn,iK] = c[1]/kPatch.shape[0]
self.chi0D2[iwn,iK] = c[2]/kPatch.shape[0]
self.chi0XS[iwn,iK] = c[3]/kPatch.shape[0]
self.chi0XS2[iwn,iK] = c[4]/kPatch.shape[0]
self.chi0PxP[iwn,iK] = c[5]/kPatch.shape[0]
self.chi0PxP2[iwn,iK] = c[6]/kPatch.shape[0]
self.chi0M = np.diag(self.chi0.reshape(self.nt))
self.gkdNorm /= kPatch.shape[0]
if self.vertex_channel in ("PARTICLE_PARTICLE_SINGLET"):
# self.chi0[iwn,ik] also appears for k2=q-k1 from the cross terms
NwG4 = self.NwG4
for iwn in range(NwG4):
for ik in range(Nc):
i1 = ik + Nc * iwn
ikPlusQ = int(self.iKSum[self.iKDiff[self.iK0,ik],self.iQ]) # -k+Q
minusiwPlusiwm = int(min(max(NwG4-iwn-1 + self.iwm,0),NwG4-1)) # -iwn + iwm
i2 = ikPlusQ + minusiwPlusiwm * Nc # k2 = q-k1
self.chi0M[i1,i2] += self.chi0[iwn,ik]
def calcPd0FS(self,FSpoints):
c1=0.0
NwG=self.NwG
for iwn,wn in enumerate(self.wn):
#for iwn,wn in enumerate(self.wnSet):
iwG = iwn
#iwG = int(iwn - self.iwG40 + self.iwG0)
for iK in FSpoints:
Kx = self.Kvecs[iK,0]
Ky = self.Kvecs[iK,1]
#for k in self.kPatch:
for k in [[0,0]]:
kx = Kx+k[0]; ky = Ky+k[1]
ek = self.dispersion(kx,ky)
gkd = cos(kx)-cos(ky)
emk = self.dispersion(-kx, -ky)
iKQ = self.iKDiff[self.iK0,iK]
minusiw = min(max(NwG-iwG-1,0),NwG-1) # -iwn + iwm
c1 += gkd**2/(1j*wn+self.mu-ek-self.sigma[iwG,iK]) * 1./(-1j*wn+self.mu-emk-self.sigma[minusiw,iKQ])
#print("Pd0 = T/N sum_kF (coskx-cosky)^2 G(k,iwn)*G(-k,-iwn)",c1 / (FSpoints.shape[0]*self.kPatch.shape[0]*self.invT))
print("Pd0FS = T/N sum_kF (coskx-cosky)^2 G(k,iwn)*G(-k,-iwn)",c1 / (FSpoints.shape[0]*self.invT))
def calcProjectionsKwn(self,matrix,formFactor,normPower=0):
gk = formFactor(self.Kvecs[:,0],self.Kvecs[:,1])
fwn = ((np.pi*self.temp)**2+self.wCutOff**2)/(self.wnSet**2 + self.wCutOff**2)
ff = np.outer(fwn,gk).reshape(self.NwG4*self.Nc)
matrixProjected = real(np.dot(ff,np.dot(matrix,ff)))/np.inner(ff,ff)**normPower
return matrixProjected
def calcProjections(self,matrix,formFactor,wCutOff=1):
nCutOff = ceil(wCutOff*self.invT/(2.*pi) - 0.5)
nCutOff = int(max(nCutOff,1))
# print("nCutOff=",nCutOff)
gk = formFactor(self.Kvecs[:,0],self.Kvecs[:,1])
if wCutOff > 0:
fwn = ((np.pi*self.temp)**2+wCutOff**2)/(self.wnSet**2 + wCutOff**2)
fg = np.outer(fwn,gk).reshape(self.NwG4*self.Nc)
matrixProjected = real(np.dot(fg,np.dot(matrix,fg)))
# matrixProjected = real(np.dot(fg,np.dot(matrix,fg)))/np.inner(fg,fg)
else: #only take piT,piT element
ind0 = int(self.NwG4/2)
matrix00 = matrix.reshape(self.NwG4,self.Nc,self.NwG4,self.Nc)[ind0,:,ind0,:]
matrixProjected = real(np.dot(gk,np.dot(matrix00,gk)))
return matrixProjected
def calcPd0(self,wCutOff=0.5714):
nCutOff = ceil(wCutOff*self.invT/(2.*pi) - 0.5)
nCutOff = int(max(nCutOff,1))
print("nCutOff=",nCutOff)
gkd = cos(self.Kvecs[:,0]) - cos(self.Kvecs[:,1])
fwn = ((np.pi*self.temp)**2+wCutOff**2)/(self.wnSet**2 + wCutOff**2)
# cc = real(sum(self.chi0[abs(self.wnSet) <= wc,:],axis=0))
# cc = 2.0*real(sum(self.chi0[self.iwG40:self.iwG40+nCutOff,:],axis=0))
# self.Pd0 = np.dot(gkd**2, cc)/(self.invT*self.Nc)/np.inner(gkd,gkd)
fg = np.outer(fwn,gkd).reshape(self.NwG4*self.Nc)
self.Pd0 = real(np.dot(fg**2, self.chi0.reshape(self.NwG4*self.Nc))/(self.invT*self.Nc)/np.inner(gkd,gkd))
print("Pd0(T) with cutOff=",self.Pd0)
# Now compare this with projection onto d-wave eigenvector
if self.found_d:
phid = self.evecs[:,:,self.ind_d]
# Set phid(K=(pi,0),piT) = 1
phid = phid / phid.max()
self.phid = phid.reshape(self.NwG4*self.Nc)
chi0pp = self.chi0.reshape(self.NwG4*self.Nc)
self.Pd02 = np.dot(self.phid**2,chi0pp) / (self.invT*self.Nc)
print("Pd0(T) from projection onto eigenvector=",real(self.Pd02))
# Projection onto phid * fg
self.fg = fg/fg.max()
self.Pd0a = np.dot(abs(self.phid*self.fg),chi0pp) / (self.invT*self.Nc)
print("Pd0(T) from projection onto eigenvector * fg =",real(self.Pd0a))
# Since phid at low T becomes noisy at large wn, lets cut-off phid(k,wn) for |wn| > wc
self.phidFixed = np.zeros_like(self.evecs[:,:,self.ind_d])
nC = 1
self.phidFixed[self.iwG40-nC:self.iwG40+nC,:] = self.evecs[:,:,self.ind_d][self.iwG40-nC:self.iwG40+nC,:]
phidF = self.phidFixed.reshape(self.NwG4*self.Nc)
norm = np.dot(phidF,phidF)
self.phidFixed /= sqrt(norm)
phidF = self.phidFixed.reshape(self.NwG4*self.Nc)
self.Pd04 = np.dot(phidF**2,chi0pp) /(self.invT*self.Nc)
print("Pd0(T) from projection onto fixed eigenvector=",real(self.Pd04))
# Now use self.chi0D2
cc = 2.0*real(sum(self.chi0D2[self.iwG40:self.iwG40+nCutOff,:]))
self.Pd03 = cc/(self.invT*self.Nc)/self.gkdNorm
print("Pd0(T) with lattice gkd and wn cutOff=",real(self.Pd03))
# def calcChi0Tilde(self,evec):
# gk = self.dwave(self.Kvecs[:,0],self.Kvecs[:,1])
# pk = 0.0*np.ones_like(evec)
# for ik in range(self.Nc):
# pk[:,ik] = evec[:,ik] * sqrt(real(self.chic0[:,ik])) * gk[ik]
# chi0Tilde = sum(real(self.chic0*pk))*self.temp/self.Nc * sum(pk)
# return chi0Tilde
# def calcPdFromEigen(self,ia=0):
# nt | |
= conv_raw_prob)
# sum up losses and take mean accross batch
giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis = [1,2,3,4]))
conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis = [1,2,3,4]))
prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis = [1,2,3,4]))
if np.isnan(giou_loss):
giou_loss = tf.Variable(0, trainable = False, dtype = tf.float32)
return giou_loss, conf_loss, prob_loss
def postprocess_boxes(pred_bbox, original_image, train_input_size, score_threshold):
""" function to scale bboxes from train input size to original image size and remove bboxes with low scores """
# valid scle for box
valid_scale=[0, np.inf]
# turn bbox to array
pred_bbox = np.array(pred_bbox)
# obtain predicted x, y, w, h, objectiveness score, class probabilities
pred_xywh = pred_bbox[:, 0:4]
pred_objectiveness = pred_bbox[:, 4]
pred_prob = pred_bbox[:, 5:]
# 1. (x, y, w, h) --> (x_org, y_org, w_org, h_org)
# obtain original image width and height
org_h, org_w = original_image.shape[:2]
# obtain resize ratio for height and width
resize_ratio_h = train_input_size / org_h
resize_ratio_w = train_input_size / org_w
# scale x, y, w, h to original x, y, w, h
pred_coor = np.concatenate([np.expand_dims(pred_xywh[:, 0] / resize_ratio_w, axis = -1),
np.expand_dims(pred_xywh[:, 1] / resize_ratio_h, axis = -1),
np.expand_dims(pred_xywh[:, 2] / resize_ratio_w, axis = -1),
np.expand_dims(pred_xywh[:, 3] / resize_ratio_h, axis = -1),], axis = -1)
# 2. (x_org, y_org, w_org, h_org) --> (xmin_org, ymin_org, xmax_org, ymax_org)
# obtain diagonal image coordinates
pred_coor = np.concatenate([pred_coor[:, :2] - pred_coor[:, 2:] * 0.5,
pred_coor[:, :2] + pred_coor[:, 2:] * 0.5], axis = -1)
# 3. clip some boxes those are out of range
# clip bboxes where xmin_org, ymin_org < 0 and xmax_org, ymax_org out of bounds
pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),
np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis = -1)
# mask that ensure that if xmin < xmax, ymin /> ymax and vice versa
invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))
pred_coor[invalid_mask] = 0
# 4. discard some invalid boxes
bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis = -1))
scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))
# 5. discard boxes with low scores
# obtain index of class with max prob for each bbox
classes = np.argmax(pred_prob, axis = -1)
# multiply max prob with objectivness score for each bbox
scores = pred_objectiveness * pred_prob[np.arange(len(pred_coor)), classes]
# obtain score mask based on score threshold
score_mask = scores > score_threshold
# obtain combined mask
mask = np.logical_and(scale_mask, score_mask)
# obtain coordinates, scores and classes after mask
coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]
# return concatenated results
return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis = -1)
def nms(bboxes, iou_threshold, sigma = 0.3, method = 'nms'):
""" function to implement non-maximal suppression / softmax non-maximal supression of bboxes """
""" takes bboxes with the shape of (num_of_box, 6), where 6 => (xmin, ymin, xmax, ymax, score, class) """
# remove duplicates in classes
classes_in_img = list(set(bboxes[:, 5]))
# initialise list to store best bboxes
best_bboxes = []
# iterate over each class
for cls in classes_in_img:
# get mask for bboxes with the same class and apply on bboxes to obtain array of bboxes with same class
cls_mask = (bboxes[:, 5] == cls)
cls_bboxes = bboxes[cls_mask]
# iterate while there are still bboxes in cls_bboxes
while len(cls_bboxes) > 0:
# select index of the bbox with the highest score
max_ind = np.argmax(cls_bboxes[:, 4])
# select bbox with highest score
best_bbox = cls_bboxes[max_ind]
# append to best _bbox list
best_bboxes.append(best_bbox)
# obtain cls_bboxes without best bbox
cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])
# calculate iou of remaining bboxes with best bbox
iou = bbox_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])
weight = np.ones((len(iou), ), dtype = np.float32)
# assert method to be either 'nms' or 'soft_nms'
assert method in ['nms', 'soft_nms']
if method == 'nms':
# obtain nms iou mask based on threshold
iou_mask = iou > iou_threshold
# apply mask on weights
weight[iou_mask.numpy()] = 0.0
if method == 'soft_nms':
# obtain soft_nms weights
weight = np.exp(-(1.0 * iou ** 2 / sigma))
# apply weights on cls_bboxes
cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight
# obtain score mask of scores greater than zero
score_mask = cls_bboxes[:, 4] > 0.
# apply mask on cls_bboxes
cls_bboxes = cls_bboxes[score_mask]
return best_bboxes
def draw_bbox(image, bboxes, classes_file_path, show_label = True, show_confidence = True, Text_colors = (255,255,0),
rectangle_colors = '', tracking = False):
""" function to draw bboxes on image """
# obtain list of classes name
classes = read_class_names(classes_file_path)
# obtain length of classes
num_classes = len(classes)
# obtain shape of image
image_h, image_w, _ = image.shape
# obtain list of unique hsv (hue, saturation, value) for each class
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
# obtain unique rgb tuples from hsv tuples
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
# scale rgb from 0-1 to 0-255
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
# shuffle colors list with same seed
random.seed(0)
random.shuffle(colors)
random.seed(None)
# iterate over bbox in bboxes
for i, bbox in enumerate(bboxes):
# obtain coordinates of bbox
coor = np.array(bbox[:4], dtype = np.int32)
# obtain objectiveness score
score = bbox[4]
# obtain class index
class_ind = int(bbox[5])
# choose rectangle color if none is given, else chose from tuple
bbox_color = rectangle_colors if rectangle_colors != '' else colors[class_ind]
# obtain thickness of bboxes
bbox_thick = int(0.6 * (image_h + image_w) / 1000)
if bbox_thick < 1: bbox_thick = 1
# obtain font scale
fontScale = 0.75 * bbox_thick
# obtain tuples of min and max coordinates
(x1, y1), (x2, y2) = (coor[0], coor[1]), (coor[2], coor[3])
# generate bbox
cv2.rectangle(image, (x1, y1), (x2, y2), bbox_color, bbox_thick * 2)
# if show label is true
if show_label:
# get objectiveness score label
score_str = " {:.2f}".format(score) if show_confidence else ""
# if tracking show whole score without rounding
if tracking: score_str = " " + str(score)
# obtain label of class name with objectiveness score
label = "{}".format(classes[class_ind]) + score_str
# get text size
(text_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, thickness = bbox_thick)
# put filled text rectangle
cv2.rectangle(image, (x1, y1), (x1 + text_width, y1 - text_height - baseline), bbox_color,
thickness = cv2.FILLED)
# put text above rectangle
cv2.putText(image, label, (x1, y1 - 4), cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, Text_colors, bbox_thick, lineType = cv2.LINE_AA)
return image
def detect_image(yolo_v3_model, image_paths, batch_frames, output_path, train_input_size, classes_file_path,
score_threshold, iou_threshold, num_of_anchor_bbox, strides, anchors, show = False,
rectangle_colors = ''):
""" function to take in image and apply bbox on it """
# obtain number of classes
num_of_classes = len(read_class_names(classes_file_path))
# create list to store images
original_images = []
# iterate over images in chronological order (last image is image of interest to put bbox)
for x in range(batch_frames):
# obtain original image
original_image = cv2.imread(image_paths[x])
# append original image to original_images list
original_images.append(original_image[:])
# convert original image to grayscale
image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
# preprocess image
image = transform_images(image[:], train_input_size)
# obtain concat frame if none exist
if x == 0:
concat_image = image[:]
# concatenate subsequent frames to concat_image
else:
concat_image = np.concatenate((concat_image, image), axis = -1)
# add batch dimensions to concatenated image
concat_image = concat_image[np.newaxis, ...].astype(np.float32)
# create constant tensor from concatenated image and feed it to yolo_v3_model
batched_input = tf.constant(concat_image)
yolo_output = yolo_v3_model(batched_input)
# list to store bboxes from respective scales
pred_bbox = []
| |
size in six.moves.range(length, length + 1):
end = start + size
if end > item_length:
continue
yield _to_xapian_term(item[start:end])
def edge_ngram_terms(value):
for item, length in _get_ngram_lengths(value):
yield _to_xapian_term(item[0:length])
def add_edge_ngram_to_document(prefix, value, weight):
"""
Splits the term in ngrams and adds each ngram to the index.
The minimum and maximum size of the ngram is respectively
NGRAM_MIN_LENGTH and NGRAM_MAX_LENGTH.
"""
for term in edge_ngram_terms(value):
document.add_term(term, weight)
document.add_term(prefix + term, weight)
def add_ngram_to_document(prefix, value, weight):
"""
Splits the term in ngrams and adds each ngram to the index.
The minimum and maximum size of the ngram is respectively
NGRAM_MIN_LENGTH and NGRAM_MAX_LENGTH.
"""
for term in ngram_terms(value):
document.add_term(term, weight)
document.add_term(prefix + term, weight)
def add_non_text_to_document(prefix, term, weight):
"""
Adds term to the document without positional information
and without processing.
If the term is alone, also adds it as "^<term>$"
to allow exact matches on single terms.
"""
document.add_term(term, weight)
document.add_term(prefix + term, weight)
def add_datetime_to_document(termpos, prefix, term, weight):
"""
Adds a datetime to document with positional order
to allow exact matches on it.
"""
date, time = term.split()
document.add_posting(date, termpos, weight)
termpos += 1
document.add_posting(time, termpos, weight)
termpos += 1
document.add_posting(prefix + date, termpos, weight)
termpos += 1
document.add_posting(prefix + time, termpos, weight)
termpos += TERMPOS_DISTANCE + 1
return termpos
data = index.full_prepare(obj)
weights = index.get_field_weights()
termpos = term_generator.get_termpos() # identifies the current position in the document.
for field in self.schema:
if field['field_name'] not in list(data.keys()):
# not supported fields are ignored.
continue
if field['field_name'] in weights:
weight = int(weights[field['field_name']])
else:
weight = 1
value = data[field['field_name']]
if field['field_name'] in ('id', 'django_id', 'django_ct'):
# Private fields are indexed in a different way:
# `django_id` is an int and `django_ct` is text;
# besides, they are indexed by their (unstemmed) value.
if field['field_name'] == 'django_id':
value = int(value)
value = _term_to_xapian_value(value, field['type'])
document.add_term(TERM_PREFIXES[field['field_name']] + value, weight)
document.add_value(field['column'], value)
continue
else:
prefix = TERM_PREFIXES['field'] + field['field_name'].upper()
# if not multi_valued, we add as a document value
# for sorting and facets
if field['multi_valued'] == 'false':
document.add_value(field['column'], _term_to_xapian_value(value, field['type']))
else:
for t in value:
# add the exact match of each value
term = _to_xapian_term(t)
termpos = add_text(termpos, prefix, term, weight)
continue
term = _to_xapian_term(value)
if term == '':
continue
# from here on the term is a string;
# we now decide how it is indexed
if field['type'] == 'text':
# text is indexed with positional information
termpos = add_text(termpos, prefix, term, weight)
elif field['type'] == 'datetime':
termpos = add_datetime_to_document(termpos, prefix, term, weight)
elif field['type'] == 'ngram':
add_ngram_to_document(prefix, value, weight)
elif field['type'] == 'edge_ngram':
add_edge_ngram_to_document(prefix, value, weight)
else:
# all other terms are added without positional information
add_non_text_to_document(prefix, term, weight)
# store data without indexing it
if django.VERSION < (1, 7):
model_name = obj._meta.module_name
else:
model_name = obj._meta.model_name
document.set_data(pickle.dumps(
(obj._meta.app_label, model_name, obj.pk, data),
pickle.HIGHEST_PROTOCOL
))
# add the id of the document
document_id = TERM_PREFIXES['id'] + get_identifier(obj)
document.add_term(document_id)
# finally, replace or add the document to the database
database.replace_document(document_id, document)
except UnicodeDecodeError:
sys.stderr.write('Chunk failed.\n')
pass
finally:
database.close()
def remove(self, obj, commit=True):
"""
Remove indexes for `obj` from the database.
We delete all instances of `Q<app_name>.<model_name>.<pk>` which
should be unique to this object.
Optional arguments:
`commit` -- ignored (present for compatibility with django-haystack 1.4)
"""
database = self._database(writable=True)
database.delete_document(TERM_PREFIXES['id'] + get_identifier(obj))
database.close()
def clear(self, models=(), commit=True):
"""
Clear all instances of `models` from the database or all models, if
not specified.
Optional Arguments:
`models` -- Models to clear from the database (default = [])
If `models` is empty, an empty query is executed which matches all
documents in the database. Afterwards, each match is deleted.
Otherwise, for each model, a `delete_document` call is issued with
the term `XCONTENTTYPE<app_name>.<model_name>`. This will delete
all documents with the specified model type.
"""
if not models:
# Because there does not appear to be a "clear all" method,
# it's much quicker to remove the contents of the `self.path`
# folder than it is to remove each document one at a time.
if os.path.exists(self.path):
shutil.rmtree(self.path)
else:
database = self._database(writable=True)
for model in models:
database.delete_document(TERM_PREFIXES['django_ct'] + get_model_ct(model))
database.close()
def document_count(self):
try:
return self._database().get_doccount()
except InvalidIndexError:
return 0
def _build_models_query(self, query):
"""
Builds a query from `query` that filters to documents only from registered models.
"""
registered_models_ct = self.build_models_list()
if registered_models_ct:
restrictions = [xapian.Query('%s%s' % (TERM_PREFIXES['django_ct'], model_ct))
for model_ct in registered_models_ct]
limit_query = xapian.Query(xapian.Query.OP_OR, restrictions)
query = xapian.Query(xapian.Query.OP_AND, query, limit_query)
return query
def _check_field_names(self, field_names):
"""
Raises InvalidIndexError if any of a field_name in field_names is
not indexed.
"""
if field_names:
for field_name in field_names:
try:
self.column[field_name]
except KeyError:
raise InvalidIndexError('Trying to use non indexed field "%s"' % field_name)
@log_query
def search(self, query, sort_by=None, start_offset=0, end_offset=None,
fields='', highlight=False, facets=None, date_facets=None,
query_facets=None, narrow_queries=None, spelling_query=None,
limit_to_registered_models=True, result_class=None, **kwargs):
"""
Executes the Xapian::query as defined in `query`.
Required arguments:
`query` -- Search query to execute
Optional arguments:
`sort_by` -- Sort results by specified field (default = None)
`start_offset` -- Slice results from `start_offset` (default = 0)
`end_offset` -- Slice results at `end_offset` (default = None), if None, then all documents
`fields` -- Filter results on `fields` (default = '')
`highlight` -- Highlight terms in results (default = False)
`facets` -- Facet results on fields (default = None)
`date_facets` -- Facet results on date ranges (default = None)
`query_facets` -- Facet results on queries (default = None)
`narrow_queries` -- Narrow queries (default = None)
`spelling_query` -- An optional query to execute spelling suggestion on
`limit_to_registered_models` -- Limit returned results to models registered in
the current `SearchSite` (default = True)
Returns:
A dictionary with the following keys:
`results` -- A list of `SearchResult`
`hits` -- The total available results
`facets` - A dictionary of facets with the following keys:
`fields` -- A list of field facets
`dates` -- A list of date facets
`queries` -- A list of query facets
If faceting was not used, the `facets` key will not be present
If `query` is None, returns no results.
If `INCLUDE_SPELLING` was enabled in the connection options, the
extra flag `FLAG_SPELLING_CORRECTION` will be passed to the query parser
and any suggestions for spell correction will be returned as well as
the results.
"""
if xapian.Query.empty(query):
return {
'results': [],
'hits': 0,
}
self._check_field_names(facets)
self._check_field_names(date_facets)
self._check_field_names(query_facets)
database = self._database()
if result_class is None:
result_class = SearchResult
if self.include_spelling is True:
spelling_suggestion = self._do_spelling_suggestion(database, query, spelling_query)
else:
spelling_suggestion = ''
if narrow_queries is not None:
query = xapian.Query(
xapian.Query.OP_AND, query, xapian.Query(
xapian.Query.OP_AND, [self.parse_query(narrow_query) for narrow_query in narrow_queries]
)
)
if limit_to_registered_models:
query = self._build_models_query(query)
enquire = xapian.Enquire(database)
if hasattr(settings, 'HAYSTACK_XAPIAN_WEIGHTING_SCHEME'):
enquire.set_weighting_scheme(xapian.BM25Weight(*settings.HAYSTACK_XAPIAN_WEIGHTING_SCHEME))
enquire.set_query(query)
if sort_by:
try:
_xapian_sort(enquire, sort_by, self.column)
except NotSupportedError:
_old_xapian_sort(enquire, sort_by, self.column)
results = []
facets_dict = {
'fields': {},
'dates': {},
'queries': {},
}
if not end_offset:
end_offset = database.get_doccount() - start_offset
## prepare spies in case of facets
if facets:
facets_spies = self._prepare_facet_field_spies(facets)
for spy in facets_spies:
enquire.add_matchspy(spy)
matches = self._get_enquire_mset(database, enquire, start_offset, end_offset)
for match in matches:
app_label, model_name, pk, model_data = pickle.loads(self._get_document_data(database, match.document))
if highlight:
model_data['highlighted'] = {
self.content_field_name: self._do_highlight(
model_data.get(self.content_field_name), query
)
}
results.append(
result_class(app_label, model_name, pk, match.percent, **model_data)
)
if facets:
# pick single valued facets from spies
single_facets_dict = self._process_facet_field_spies(facets_spies)
# pick multivalued valued facets from results
multi_facets_dict = self._do_multivalued_field_facets(results, facets)
# merge both results (http://stackoverflow.com/a/38990/931303)
facets_dict['fields'] = dict(list(single_facets_dict.items()) + list(multi_facets_dict.items()))
if date_facets:
facets_dict['dates'] = self._do_date_facets(results, date_facets)
if query_facets:
facets_dict['queries'] = self._do_query_facets(results, query_facets)
return {
'results': results,
'hits': self._get_hit_count(database, enquire),
'facets': facets_dict,
'spelling_suggestion': spelling_suggestion,
}
def more_like_this(self, model_instance, additional_query=None,
start_offset=0, end_offset=None,
limit_to_registered_models=True, result_class=None, **kwargs):
"""
Given a model instance, returns a result set of similar documents.
Required arguments:
`model_instance` -- The model instance to use as a basis for
retrieving similar documents.
Optional arguments:
`additional_query` -- An additional query to narrow results
`start_offset` -- The starting offset (default=0)
`end_offset` -- The ending offset (default=None), if None, then all documents
`limit_to_registered_models` -- Limit | |
probability
})
for order_id in probabilities:
if probabilities[order_id] is None:
continue
order_probabilities = probabilities[order_id]
best_expected_f1 = 0
best_expected_f1_products = None
for k in order_probabilities:
f1_for = k['positive']
# if len(f1_for) == 0:
# # Skip P(None)
# continue
total_f1 = 0
for l in order_probabilities:
ground_truth = l['positive']
if debug:
print("Calculating expected F1 score for", f1_for, end='')
print(" against ground truth", ground_truth, end='')
tp = len(list(set(f1_for).intersection(ground_truth)))
n = len(set(f1_for).symmetric_difference(set(ground_truth)))
if tp == 0 and n == 0 and len(f1_for) == 0 and len(ground_truth) == 0:
expected_f1 = l['probability']
else:
if debug:
print(" = %f * ( (2 * %d) / ((2 * %d) + %d) )" % (l['probability'], tp, tp, n), end='')
if tp == 0:
expected_f1 = 0
else:
expected_f1 = l['probability'] * ((2 * tp) / ((2 * tp) + n))
if debug:
print(" = %f" % expected_f1)
total_f1 += expected_f1
if debug:
print("Total expected F1 score for", f1_for, end='')
print(" = %.10f\n" % total_f1)
if best_expected_f1 < total_f1:
best_expected_f1 = total_f1
best_expected_f1_products = f1_for
best_per_order[order_id] = ' '.join(str(i) for i in best_expected_f1_products)
print(best_per_order)
exit()
bpo = pd.DataFrame.from_dict(best_per_order, orient='index')
bpo.reset_index(inplace=True)
bpo.columns = ['order_id', 'best_products']
return bpo
def cluster_product_vectors(self):
t0 = time()
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
model_filename = 'data/word2vec-productIds'
model_file = Path(model_filename)
if model_file.is_file() is False:
print("No word2vec model file. Run Instacart.ordered_products_to_vector() first")
exit()
model = Word2Vec.load(model_filename)
word_vectors = model.wv.syn0
# num_clusters = word_vectors.shape[0] // 5 # the // is the int division operator (!)
# we have 135 departments. Lets get 5x clusters
num_clusters = 5*135
from sklearn.cluster import KMeans
# Initalize a k-means object and use it to extract centroids
kmeans_clustering = KMeans(n_clusters=num_clusters, n_jobs=-1)
idx = kmeans_clustering.fit_predict(word_vectors)
word_centroid_map = dict(zip(model.wv.index2word, idx))
m = pd.DataFrame.from_dict(word_centroid_map, orient='index')
m['product_id'] = m.index
m.columns = ['cluster_id', 'product_id']
m.to_csv('data/product_clusters.csv', index=False)
print("=> Completed clustering in %fs" % (time() - t0))
def ordered_products_to_vector(self):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
model_filename = 'data/word2vec-productIds'
model_file = Path(model_filename)
if model_file.is_file():
model = Word2Vec.load(model_filename)
# vocab = list(model.vocab.keys())
# print(len(vocab))
d = dict()
for i, row in self._products.iterrows():
try:
print("Getting most similar for", row.product_id)
ms = model.wv.most_similar(positive=[str(row.product_id)])
self._products.set_value(i, 'most_similar', ms[0][0])
self._products.set_value(i, 'second_most_similar', ms[1][0])
except KeyError:
# Set the nan to 0 if I want to use SMOTE
self._products.set_value(i, 'most_similar', np.nan)
self._products.set_value(i, 'second_most_similar', np.nan)
self._products[['product_id', 'most_similar', 'second_most_similar']].to_csv('data/product_similarity.csv',
index=False)
# ms = model.wv.most_similar(positive=['31717'])
# print(ms[0][0])
return
filename = "data/order-product-pairs.csv"
file = Path(filename)
if file.is_file() is False:
t0 = time()
print("=> Order product pairs csv file not there. Creating...")
self._merged_orders = pd.concat([self._orders_prior, self._orders_train], ignore_index=True, axis=0)
product_sentences = self._merged_orders.groupby('order_id').apply(
lambda x: ' '.join(str(y) for y in x.product_id))
product_sentences.to_frame().to_csv(filename)
print("=> Completed in %fs" % (time() - t0))
opp = pd.read_csv(filename)
opp.columns = ['order_id', 'products']
t0 = time()
print("=> Training word2vec model")
opp['sentences'] = opp.products.str.split(' ')
# print(opp.head(2))
model = Word2Vec(opp.sentences, size=100, window=5, min_count=1, workers=4, sg=1, sample=0)
model.save('data/word2vec-productIds')
print("=> Completed in %fs" % (time() - t0))
# https://www.kaggle.com/hongweizhang/how-to-calculate-f1-score
def f1_score_single(self, labels, preds):
labels = labels.split(' ')
preds = preds.split(' ')
rr = (np.intersect1d(labels, preds))
precision = np.float(len(rr)) / len(preds)
recall = np.float(len(rr)) / len(labels)
try:
f1 = 2 * precision * recall / (precision + recall)
except ZeroDivisionError:
return precision, recall, 0.0
return precision, recall, f1
def f1_score_single_alt(self, y_true, y_pred):
y_true = y_true.split(' ')
y_pred = y_pred.split(' ')
y_true = set(y_true)
y_pred = set(y_pred)
# try:
# y_true.remove(' ')
# y_pred.remove(' ')
# except KeyError:
# pass
cross_size = len(y_true & y_pred)
if cross_size == 0:
return 0, 0, 0
p = 1. * cross_size / len(y_pred)
r = 1. * cross_size / len(y_true)
f1 = 2 * p * r / (p + r)
return p, r, f1
def f1_score(self, y):
res = list()
for entry in y.itertuples():
if pd.isnull(entry[3]):
true = 'None'
else:
true = entry[3]
res.append(self.f1_score_single_alt(true, entry[2]))
res = pd.DataFrame(np.array(res), columns=['precision', 'recall', 'f1'])
res['order_id'] = y['order_id']
# print(res.describe())
return np.mean(res['f1']), res
def n_fold(self, folds=False, num_rounds=80, params=None, use_lgb=False):
if folds is False:
folds = 10
kf = GroupKFold(n_splits=folds)
# self._train = self._train.sample(frac=0.02)
x = self._train
y = self._train['reordered']
if use_lgb:
p = self._lgb_params
else:
p = self._xgb_params
if params:
# https://stackoverflow.com/questions/38987/how-to-merge-two-python-dictionaries-in-a-single-expression
p = p.copy()
p.update(params)
scores = list()
for i, (train_index, test_index) in enumerate(kf.split(x, y, groups=x['user_id'].values)):
print('=> Training fold %d' % i)
x_train = x.iloc[train_index]
y_train = y.iloc[train_index]
x_train.is_copy = False
x_test = x.iloc[test_index]
y_test = y.iloc[test_index]
x_test.is_copy = False
x_train.drop(['eval_set', 'user_id', 'order_id', 'reordered'], axis=1, inplace=True)
x_testIds = x_test['order_id']
x_reordered = x_test['reordered']
x_test.drop(['eval_set', 'user_id', 'order_id', 'reordered'], axis=1, inplace=True)
if self._selected_features:
x_train = x_train[self._selected_features]
x_test = x_test[self._selected_features]
print("=> Train shape:", x_train.shape)
print("=> Test shape:", x_test.shape)
print('=> Training')
columns = x_train.columns
# x_train, y_train = self.resample(x_train, y_train)
if use_lgb:
train_data = lgb.Dataset(x_train, y_train, free_raw_data=False,
categorical_feature=['aisle_id', 'department_id'])
dval = lgb.Dataset(x_test, y_test)
model = lgb.train(p, train_data, num_boost_round=num_rounds,
valid_sets=dval, early_stopping_rounds=30)
best = model.best_iteration
# It looks like if the lgb runs out of rounds it sets the best to 0
if best == 0:
best = num_rounds
print(best)
print('=> Predicting')
pred = model.predict(x_test)
else:
train_data = xgb.DMatrix(x_train, y_train, feature_names=columns)
dval = xgb.DMatrix(x_test, y_test, feature_names=x_test.columns)
model = xgb.train(p,
train_data,
num_boost_round=num_rounds,
evals=[(dval, 'val')],
early_stopping_rounds=20,
verbose_eval=20)
# model = xgb.train(p, train_data, num_boost_round=num_rounds)
fscores = model.get_fscore()
features = pd.DataFrame()
features['features'] = fscores.keys()
features['importance'] = fscores.values()
features.sort_values(by=['importance'], ascending=False, inplace=True)
print("=> Fscores:")
features.to_csv(sys.stdout)
features.to_json("folds/fscores-" + str(i) + ".js")
test_data = xgb.DMatrix(x_test, feature_names=x_test.columns)
print('=> Predicting')
pred = model.predict(test_data)
x_test['reordered'] = x_reordered
x_test['order_id'] = x_testIds
x_test['pred'] = pred
none_p = self.get_none_probability(x_test)
x_test = pd.merge(x_test, none_p, on='order_id', how='left')
y_pred = self.select_with_max_f1(x_test, none_p, save=False, from_p_n=10)
# Get the true result for our fold
reordered_only = x_test[x_test['reordered'] == 1]
y_true = reordered_only.groupby('order_id').apply(
lambda row: ' '.join(str(product) for product in row.product_id))
y_true = y_true.to_frame()
merged_y = y_pred.join(y_true, on='order_id', how='left', rsuffix='_true')
f1, results = self.f1_score(merged_y)
merged_y = pd.merge(merged_y, results, on='order_id', how='left')
merged_y.columns = ['order_id', 'products_predicted', 'products_true', 'precision', 'recall', 'f1']
predictions = x_test[['order_id', 'product_id', 'pred']]
predictions.to_csv('folds/predictions-' + str(i) + '.csv', index=False)
merged_y.to_csv('folds/fold-' + str(i) + '.csv', index=False)
print("=> Average f1 score for kfold %d is %f" % (i, f1))
scores.append(f1)
print(scores)
p['scores'] = scores
p['mean_f1'] = np.mean(scores)
p['num_rounds'] = num_rounds
p['folds'] = folds
json.dump(p, open("folds/details.md", 'w'))
print("=> Average f1 score across all folds:", np.mean(scores))
def examine_fold(self, fold_num=0):
fold = 'folds/fold-' + str(fold_num) + '.csv'
merged_y = pd.read_csv(fold)
# Create a new submission set for the select function to fiddle
pred = 'folds/predictions-' + str(fold_num) + '.csv'
predictions = pd.read_csv(pred)
# Add the none probability ?
none_p = self.get_none_probability(predictions)
predictions = pd.merge(predictions, none_p, on='order_id', how='left')
# Get a new resultset
# results = self.select_with_max_f1(predictions, none_p, save=False)
#
# true = pd.merge(merged_y, results, on='order_id', how='left')
#
# true = true[['order_id', 'products', 'products_true']]
#
# # Get the f1 score
# f1, results = self.f1_score(true)
#
# print("Fold f1 score (all) is:", f1)
#for i in [10, 20, 30, 40, 50, 60, 70, 80, 100, 150, 200]:
i = 20
results_new = self.select_with_max_f1(predictions, none_p, save=False, from_p_n=i)
true_new = pd.merge(merged_y, results_new, on='order_id', how='left')
true_new = true_new[['order_id', 'products', 'products_true']]
f1, results = self.f1_score(true_new)
print("Fold f1 score with threshold at %d products is" % i, f1)
# # Compare it to the original
# f1, results = self.f1_score(merged_y)
#
# print("Fold f1 score original is:", f1)
def reselect(self):
pred = 'predictions.csv'
predictions = pd.read_csv(pred)
# Add the none probability ?
none_p = self.get_none_probability(predictions)
predictions = pd.merge(predictions, none_p, on='order_id', how='left')
# Get a new resultset
self.select_with_max_f1(predictions, none_p, save=True, from_p_n=10)
print("=> Done! Good luck!")
def get_none_probability(self, data):
d = dict()
for row in data.itertuples():
if row.order_id not in d:
d[row.order_id] = 1
d[row.order_id] *= (1-row.pred)
none_probs = pd.DataFrame.from_dict(d, orient='index')
none_probs.reset_index(inplace=True)
none_probs.columns = ['order_id', 'none_probability']
return none_probs
def resample(self, x, y):
# There are 12479129 examples marked as 0 and only 828824 as 1
# The sample appears to be impalanced. Let's try to remedy this
print("=> Resampling data")
from imblearn.over_sampling import SMOTE
from collections import Counter
sm = SMOTE(random_state=42, n_jobs=-1)
x_new, y_new = sm.fit_sample(x, y)
print("=> Resampled dataset shape {}".format(Counter(y_new)))
return x_new, y_new
i = Instacart()
#i.examine_fold(0)
#i.reselect()
#exit()
# for x in | |
import json
from pathlib import Path
from shutil import Error
from unittest.mock import mock_open, patch
import gdk.CLIParser as CLIParser
import gdk.common.consts as consts
import gdk.common.exceptions.error_messages as error_messages
import gdk.common.parse_args_actions as parse_args_actions
import gdk.common.utils as utils
import pytest
from gdk.commands.component.BuildCommand import BuildCommand
@pytest.fixture()
def supported_build_system(mocker):
builds_file = utils.get_static_file_path(consts.project_build_system_file)
with open(builds_file, "r") as f:
data = json.loads(f.read())
mock_get_supported_component_builds = mocker.patch(
"gdk.commands.component.project_utils.get_supported_component_builds", return_value=data
)
return mock_get_supported_component_builds
@pytest.fixture()
def rglob_build_file(mocker):
def search(*args, **kwargs):
if "build.gradle" in args[0] or "pom.xml" in args[0]:
return [Path(utils.current_directory).joinpath("build_file")]
return []
mock_rglob = mocker.patch("pathlib.Path.rglob", side_effect=search)
return mock_rglob
def test_build_command_instantiation(mocker):
mock_get_supported_component_builds = mocker.patch(
"gdk.commands.component.project_utils.get_supported_component_builds", return_value={}
)
mock_check_if_arguments_conflict = mocker.patch.object(BuildCommand, "check_if_arguments_conflict", return_value=None)
mock_run = mocker.patch.object(BuildCommand, "run", return_value=None)
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value={},
)
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert mock_get_proj_config.call_count == 1
assert mock_get_supported_component_builds.call_count == 1
assert mock_check_if_arguments_conflict.call_count == 1
assert mock_run.call_count == 1
def test_build_command_instantiation_failed_fetching_config(mocker):
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
side_effect=Exception("exception fetching proj values"),
)
mock_get_supported_component_builds = mocker.patch(
"gdk.commands.component.project_utils.get_supported_component_builds", return_value={}
)
mock_check_if_arguments_conflict = mocker.patch.object(BuildCommand, "check_if_arguments_conflict", return_value=None)
mock_run = mocker.patch.object(BuildCommand, "run", return_value=None)
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert "exception fetching proj values" in e.value.args[0]
assert mock_get_proj_config.call_count == 1
assert mock_get_supported_component_builds.call_count == 0
assert mock_check_if_arguments_conflict.call_count == 1
assert mock_run.call_count == 0
def test_build_command_instantiation_failed_fetching_build_config(mocker):
mock_get_supported_component_builds = mocker.patch(
"gdk.commands.component.project_utils.get_supported_component_builds",
side_effect=Exception("exception fetching build"),
)
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value={},
)
mock_check_if_arguments_conflict = mocker.patch.object(BuildCommand, "check_if_arguments_conflict", return_value=None)
mock_run = mocker.patch.object(BuildCommand, "run", return_value=None)
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert "exception fetching build" in e.value.args[0]
assert mock_get_proj_config.call_count == 1
assert mock_get_supported_component_builds.call_count == 1
assert mock_check_if_arguments_conflict.call_count == 1
assert mock_run.call_count == 0
def test_build_command_instantiation_failed_conflicting_args(mocker):
mock_get_supported_component_builds = mocker.patch(
"gdk.commands.component.project_utils.get_supported_component_builds", return_value={}
)
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
side_effect=Exception("exception fetching proj values"),
)
mock_check_if_arguments_conflict = mocker.patch.object(
BuildCommand,
"check_if_arguments_conflict",
side_effect=Exception("exception due to conflictins args"),
)
mock_run = mocker.patch.object(BuildCommand, "run", return_value=None)
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert "exception due to conflictins args" in e.value.args[0]
assert mock_get_proj_config.call_count == 0
assert mock_get_supported_component_builds.call_count == 0
assert mock_check_if_arguments_conflict.call_count == 1
assert mock_run.call_count == 0
def test_build_run():
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert "Could not build the project due to the following error." in e.value.args[0]
def test_build_run_default_zip_json(mocker, supported_build_system, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_copy_dir = mocker.patch("shutil.copytree", return_value=None)
mock_archive_dir = mocker.patch("shutil.make_archive", return_value=None)
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=project_config(),
)
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=project_config(),
)
mock_is_artifact_in_build = mocker.patch.object(BuildCommand, "is_artifact_in_build", return_value=True)
mock_subprocess_run = mocker.patch("subprocess.run")
mock_json_dump = mocker.patch("json.dumps")
pc = mock_get_proj_config.return_value
file_name = Path(pc["gg_build_recipes_dir"]).joinpath(pc["component_recipe_file"].name).resolve()
with patch("builtins.open", mock_open()) as mock_file:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
mock_file.assert_any_call(file_name, "w")
mock_json_dump.call_count == 1
assert mock_get_proj_config.assert_called_once
assert not mock_subprocess_run.called
assert mock_copy_dir.call_count == 1 # copy files to zip-build to create a zip
assert mock_archive_dir.call_count == 1 # archiving directory
assert mock_is_artifact_in_build.call_count == 1 # only one artifact in project_config. Available in build
assert mock_clean_dir.call_count == 2 # clean zip-build, clean greengrass-build
assert mock_create_dir.call_count == 2 # create gg directories
def test_build_run_default_maven_yaml(mocker, supported_build_system, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_copy_dir = mocker.patch("shutil.copytree", return_value=None)
mock_archive_dir = mocker.patch("shutil.make_archive", return_value=None)
pc = project_config()
pc["component_build_config"] = {"build_system": "maven"}
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=pc,
)
mock_platform = mocker.patch("platform.system", return_value="not-windows")
pc["component_recipe_file"] = Path("/src/GDK-CLI-Internal/tests/gdk/static/build_command/recipe.yaml")
mock_is_artifact_in_build = mocker.patch.object(BuildCommand, "is_artifact_in_build", return_value=True)
mock_subprocess_run = mocker.patch("subprocess.run")
pc = mock_get_proj_config.return_value
file_name = Path(pc["gg_build_recipes_dir"]).joinpath(pc["component_recipe_file"].name).resolve()
with patch("builtins.open", mock_open()) as mock_file:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
mock_file.assert_any_call(file_name, "w")
assert mock_get_proj_config.assert_called_once
mock_subprocess_run.assert_called_with(["mvn", "clean", "package"]) # called maven build command
assert mock_copy_dir.call_count == 0 # No copying directories
assert supported_build_system.call_count == 1
assert mock_archive_dir.call_count == 0 # Archvie never called in maven
assert mock_is_artifact_in_build.call_count == 1 # only one artifact in project_config. Available in build
assert mock_clean_dir.call_count == 1 # clean greengrass-build
assert mock_create_dir.call_count == 2 # create gg directories
assert mock_platform.call_count == 1
def test_build_run_default_maven_yaml_windows(mocker, supported_build_system, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_copy_dir = mocker.patch("shutil.copytree", return_value=None)
mock_archive_dir = mocker.patch("shutil.make_archive", return_value=None)
mock_platform = mocker.patch("platform.system", return_value="Windows")
pc = project_config()
pc["component_build_config"] = {"build_system": "maven"}
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=pc,
)
mock_is_artifact_in_build = mocker.patch.object(BuildCommand, "is_artifact_in_build", return_value=True)
mock_subprocess_run = mocker.patch("subprocess.run", side_effect="error with maven build cmd")
mock_yaml_dump = mocker.patch("yaml.dump")
pc = mock_get_proj_config.return_value
file_name = Path(pc["gg_build_recipes_dir"]).joinpath(pc["component_recipe_file"].name).resolve()
with patch("builtins.open", mock_open()) as mock_file:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
mock_file.assert_any_call(file_name, "w")
mock_yaml_dump.call_count == 1
assert mock_get_proj_config.assert_called_once
mock_subprocess_run.assert_called_with(["mvn.cmd", "clean", "package"]) # called maven build command
assert mock_copy_dir.call_count == 0 # No copying directories
assert supported_build_system.call_count == 1
assert mock_archive_dir.call_count == 0 # Archvie never called in maven
assert mock_is_artifact_in_build.call_count == 1 # only one artifact in project_config. Available in build
assert mock_clean_dir.call_count == 1 # clean greengrass-build
assert mock_create_dir.call_count == 2 # create gg directories
assert mock_platform.call_count == 1
def test_build_run_default_maven_yaml_error(mocker, supported_build_system, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_copy_dir = mocker.patch("shutil.copytree", return_value=None)
mock_archive_dir = mocker.patch("shutil.make_archive", return_value=None)
mock_platform = mocker.patch("platform.system", return_value="Windows")
pc = project_config()
pc["component_build_config"] = {"build_system": "maven"}
pc["component_recipe_file"] = Path("/src/GDK-CLI-Internal/tests/gdk/static/build_command/recipe.yaml")
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=pc,
)
mock_is_artifact_in_build = mocker.patch.object(BuildCommand, "is_artifact_in_build", return_value=True)
mock_subprocess_run = mocker.patch("subprocess.run", side_effect=Exception("error with maven build cmd"))
pc = mock_get_proj_config.return_value
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build", "-d"]))
assert "error with maven build cmd" in e.value.args[0]
assert mock_get_proj_config.assert_called_once
mock_subprocess_run.assert_called_with(["mvn.cmd", "clean", "package"]) # called maven build command
assert mock_copy_dir.call_count == 0 # No copying directories
assert supported_build_system.call_count == 1
assert mock_archive_dir.call_count == 0 # Archvie never called in maven
assert mock_is_artifact_in_build.call_count == 0 # only one artifact in project_config. Available in build
assert mock_clean_dir.call_count == 1 # clean greengrass-build
assert mock_create_dir.call_count == 2 # create gg directories
assert mock_platform.called
def test_build_run_default_gradle_yaml_artifact_not_found(mocker, supported_build_system, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_copy_dir = mocker.patch("shutil.copytree", return_value=None)
mock_archive_dir = mocker.patch("shutil.make_archive", return_value=None)
pc = project_config()
pc["component_build_config"] = {"build_system": "gradle"}
pc["component_recipe_file"] = Path("/src/GDK-CLI-Internal/tests/gdk/static/build_command/recipe.yaml")
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=pc,
)
mock_boto3_client = mocker.patch("boto3.client")
mock_subprocess_run = mocker.patch("subprocess.run")
mock_yaml_dump = mocker.patch("yaml.dump")
pc = mock_get_proj_config.return_value
with patch("builtins.open", mock_open()) as mock_file:
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert (
"Could not find artifact with URI"
" 's3://DOC-EXAMPLE-BUCKET/artifacts/com.example.HelloWorld/1.0.0/hello_world.py' on s3 or inside"
" the build folders."
in e.value.args[0]
)
assert not mock_file.called
mock_yaml_dump.call_count == 0
assert mock_get_proj_config.assert_called_once
mock_subprocess_run.assert_called_with(["gradle", "build"]) # called gradle build command
assert mock_copy_dir.call_count == 0 # No copying directories
assert supported_build_system.call_count == 1
assert mock_archive_dir.call_count == 0 # Archvie never called in gralde
assert mock_boto3_client.call_count == 1
assert mock_clean_dir.call_count == 1 # clean greengrass-build
assert mock_create_dir.call_count == 2 # create gg directories
def test_build_run_default_exception(mocker, rglob_build_file):
mock_create_gg_build_directories = mocker.patch.object(BuildCommand, "create_gg_build_directories")
mock_default_build_component = mocker.patch.object(
BuildCommand, "default_build_component", side_effect=Exception("error in default_build_component")
)
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=project_config(),
)
mock_get_supported_component_builds = mocker.patch(
"gdk.commands.component.project_utils.get_supported_component_builds", return_value={}
)
mock_subprocess_run = mocker.patch("subprocess.run")
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert "error in default_build_component" in e.value.args[0]
assert mock_get_proj_config.called
assert mock_get_supported_component_builds.called
assert mock_create_gg_build_directories.assert_called_once
assert mock_default_build_component.assert_called_once
assert not mock_subprocess_run.called
def test_default_build_component_error_run_build_command(mocker, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_run_build_command = mocker.patch.object(
BuildCommand, "run_build_command", side_effect=Error("err in run_build_command")
)
mock_find_artifacts_and_update_uri = mocker.patch.object(BuildCommand, "find_artifacts_and_update_uri")
mock_create_build_recipe_file = mocker.patch.object(BuildCommand, "create_build_recipe_file")
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=project_config(),
)
mock_get_supported_component_builds = mocker.patch(
"gdk.commands.component.project_utils.get_supported_component_builds", return_value={}
)
with pytest.raises(Exception) as e:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert error_messages.BUILD_FAILED in e.value.args[0]
assert mock_run_build_command.assert_called_once
assert not mock_find_artifacts_and_update_uri.called
assert not mock_create_build_recipe_file.called
assert mock_get_supported_component_builds.called
assert mock_clean_dir.call_count == 1
assert mock_create_dir.call_count == 2
assert mock_get_proj_config.call_count == 1
def test_build_run_custom(mocker, supported_build_system, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_copy_dir = mocker.patch("shutil.copytree", return_value=None)
pc = project_config()
pc["component_build_config"] = {"build_system": "custom", "custom_build_command": ["some-command"]}
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=pc,
)
mock_is_artifact_in_build = mocker.patch.object(BuildCommand, "is_artifact_in_build", return_value=False)
mock_is_artifact_in_s3 = mocker.patch.object(BuildCommand, "is_artifact_in_s3", return_value=True)
mock_boto3_client = mocker.patch("boto3.client")
mock_subprocess_run = mocker.patch("subprocess.run")
mock_yaml_dump = mocker.patch("yaml.dump")
pc = mock_get_proj_config.return_value
with patch("builtins.open", mock_open()) as mock_file:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
assert not mock_file.called
mock_yaml_dump.call_count == 0
assert mock_get_proj_config.assert_called_once
mock_subprocess_run.assert_called_with(["some-command"]) # called maven build command
assert mock_copy_dir.call_count == 0 # No copying directories
assert supported_build_system.call_count == 1
assert mock_is_artifact_in_build.call_count == 0 # only one artifact in project_config. Not vailable in build
assert mock_is_artifact_in_s3.call_count == 0 # only one artifact in project_config. Not available in s3
assert mock_boto3_client.call_count == 0
assert mock_clean_dir.call_count == 1 # clean greengrass-build
assert mock_create_dir.call_count == 2 # create gg directories
def test_build_run_default_gradle_yaml_artifact_found_build(mocker, supported_build_system, rglob_build_file):
mock_clean_dir = mocker.patch("gdk.common.utils.clean_dir", return_value=None)
mock_create_dir = mocker.patch("pathlib.Path.mkdir", return_value=None)
mock_copy_dir = mocker.patch("shutil.copytree", return_value=None)
mock_archive_dir = mocker.patch("shutil.make_archive", return_value=None)
pc = project_config()
pc["component_build_config"] = {"build_system": "gradle"}
pc["component_recipe_file"] = Path("/src/GDK-CLI-Internal/tests/gdk/static/build_command/recipe.yaml")
mock_get_proj_config = mocker.patch(
"gdk.commands.component.project_utils.get_project_config_values",
return_value=pc,
)
mock_boto3_client = mocker.patch("boto3.client")
mock_subprocess_run = mocker.patch("subprocess.run")
mock_yaml_dump = mocker.patch("yaml.dump")
pc = mock_get_proj_config.return_value
mocker.patch("pathlib.Path.is_file", return_value=True)
mock_copy_file = mocker.patch("shutil.copy", return_value=None)
mock_exists = mocker.patch("pathlib.Path.exists", return_value=True)
file_name = Path(pc["gg_build_recipes_dir"]).joinpath(pc["component_recipe_file"].name).resolve()
with patch("builtins.open", mock_open()) as mock_file:
parse_args_actions.run_command(CLIParser.cli_parser.parse_args(["component", "build"]))
mock_file.assert_any_call(file_name, "w")
mock_yaml_dump.call_count == 0
assert mock_get_proj_config.assert_called_once
mock_subprocess_run.assert_called_with(["gradle", "build"]) # called | |
union
# of proposals from all levels
# NOTE: When FPN is used, the meaning of this config is different from Detectron1.
# It means per-batch topk in Detectron1, but per-image topk here.
# See the "find_top_rpn_proposals" function for details.
_C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000
_C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000
# NMS threshold used on RPN proposals
_C.MODEL.RPN.NMS_THRESH = 0.7
# ---------------------------------------------------------------------------- #
# ROI HEADS options
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_HEADS = CN()
_C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads"
# Number of foreground classes
_C.MODEL.ROI_HEADS.NUM_CLASSES = 80
# Names of the input feature maps to be used by ROI heads
# Currently all heads (box, mask, ...) use the same input feature map list
# e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN
_C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"]
# IOU overlap ratios [IOU_THRESHOLD]
# Overlap threshold for an RoI to be considered background (if < IOU_THRESHOLD)
# Overlap threshold for an RoI to be considered foreground (if >= IOU_THRESHOLD)
_C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5]
_C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1]
# RoI minibatch size *per image* (number of regions of interest [ROIs])
# Total number of RoIs per training minibatch =
# ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH
# E.g., a common configuration is: 512 * 16 = 8192
_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25
# Only used on test mode
# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
# balance obtaining high recall with not having too many low precision
# detections that will slow down inference post processing steps (like NMS)
# A default threshold of 0.0 increases AP by ~0.2-0.3 but significantly slows down
# inference.
_C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
_C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5
# If True, augment proposals with ground-truth boxes before sampling proposals to
# train ROI heads.
_C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True
# ---------------------------------------------------------------------------- #
# Box Head
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_BOX_HEAD = CN()
# C4 don't use head name option
# Options for non-C4 models: FastRCNNConvFCHead,
_C.MODEL.ROI_BOX_HEAD.NAME = ""
# Options are: "smooth_l1", "giou"
_C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE = "smooth_l1"
# The final scaling coefficient on the box regression loss, used to balance the magnitude of its
# gradients with other losses in the model. See also `MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT`.
_C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT = 1.0
# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
# These are empirically chosen to approximately lead to unit variance targets
_C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0)
# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
_C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0
_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0
# Type of pooling operation applied to the incoming feature map for each RoI
_C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2"
_C.MODEL.ROI_BOX_HEAD.NUM_FC = 0
# Hidden layer dimension for FC layers in the RoI box head
_C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024
_C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0
# Channel dimension for Conv layers in the RoI box head
_C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256
# Normalization method for the convolution layers.
# Options: "" (no norm), "GN", "SyncBN".
_C.MODEL.ROI_BOX_HEAD.NORM = ""
# Whether to use class agnostic for bbox regression
_C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False
# If true, RoI heads use bounding boxes predicted by the box head rather than proposal boxes.
_C.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES = False
# ---------------------------------------------------------------------------- #
# Cascaded Box Head
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_BOX_CASCADE_HEAD = CN()
# The number of cascade stages is implicitly defined by the length of the following two configs.
_C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = (
(10.0, 10.0, 5.0, 5.0),
(20.0, 20.0, 10.0, 10.0),
(30.0, 30.0, 15.0, 15.0),
)
_C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7)
# ---------------------------------------------------------------------------- #
# Mask Head
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_MASK_HEAD = CN()
_C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead"
_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 # The number of convs in the mask head
_C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256
# Normalization method for the convolution layers.
# Options: "" (no norm), "GN", "SyncBN".
_C.MODEL.ROI_MASK_HEAD.NORM = ""
# Whether to use class agnostic for mask prediction
_C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False
# Type of pooling operation applied to the incoming feature map for each RoI
_C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2"
# ---------------------------------------------------------------------------- #
# Keypoint Head
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_KEYPOINT_HEAD = CN()
_C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead"
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8))
_C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 # 17 is the number of keypoints in COCO.
# Images with too few (or no) keypoints are excluded from training.
_C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1
# Normalize by the total number of visible keypoints in the minibatch if True.
# Otherwise, normalize by the total number of keypoints that could ever exist
# in the minibatch.
# The keypoint softmax loss is only calculated on visible keypoints.
# Since the number of visible keypoints can vary significantly between
# minibatches, this has the effect of up-weighting the importance of
# minibatches with few visible keypoints. (Imagine the extreme case of
# only one visible keypoint versus N: in the case of N, each one
# contributes 1/N to the gradient compared to the single keypoint
# determining the gradient direction). Instead, we can normalize the
# loss by the total number of keypoints, if it were the case that all
# keypoints were visible in a full minibatch. (Returning to the example,
# this means that the one visible keypoint contributes as much as each
# of the N keypoints.)
_C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True
# Multi-task loss weight to use for keypoints
# Recommended values:
# - use 1.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is True
# - use 4.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is False
_C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0
# Type of pooling operation applied to the incoming feature map for each RoI
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2"
# ---------------------------------------------------------------------------- #
# Semantic Segmentation Head
# ---------------------------------------------------------------------------- #
_C.MODEL.SEM_SEG_HEAD = CN()
_C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead"
_C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"]
# Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for
# the correposnding pixel.
_C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255
# Number of classes in the semantic segmentation head
_C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54
# Number of channels in the 3x3 convs inside semantic-FPN heads.
_C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128
# Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride.
_C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4
# Normalization method for the convolution layers. Options: "" (no norm), "GN".
_C.MODEL.SEM_SEG_HEAD.NORM = "GN"
_C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0
_C.MODEL.PANOPTIC_FPN = CN()
# Scaling of all losses from instance detection / segmentation head.
_C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0
# options when combining instance & semantic segmentation outputs
_C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True})
_C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5
_C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096
_C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5
# ---------------------------------------------------------------------------- #
# RetinaNet Head
# ---------------------------------------------------------------------------- #
_C.MODEL.RETINANET = CN()
# This is the number of foreground classes.
_C.MODEL.RETINANET.NUM_CLASSES = 80
_C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"]
# Convolutions to use in the cls and bbox tower
# NOTE: this doesn't include the last conv for logits
_C.MODEL.RETINANET.NUM_CONVS = 4
# IoU overlap ratio [bg, fg] for labeling anchors.
# Anchors with < bg are labeled negative (0)
# Anchors with >= bg and < fg are ignored (-1)
# Anchors with >= fg are labeled positive (1)
_C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5]
_C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1]
# Prior prob for rare case (i.e. foreground) at the beginning of training.
# This is used to set the bias for the logits layer of the classifier subnet.
# This improves training stability in the case of heavy class imbalance.
_C.MODEL.RETINANET.PRIOR_PROB = 0.01
# Inference cls score threshold, only anchors with score > INFERENCE_TH are
# considered for inference (to improve speed)
_C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05
_C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000
_C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5
# Weights on (dx, dy, dw, dh) for normalizing Retinanet anchor regression targets
_C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Loss parameters
_C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0
_C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25
_C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1
# Options are: "smooth_l1", "giou"
_C.MODEL.RETINANET.BBOX_REG_LOSS_TYPE = "smooth_l1"
# One of BN, SyncBN, FrozenBN, GN
# Only supports GN until unshared norm is implemented
_C.MODEL.RETINANET.NORM = ""
# ---------------------------------------------------------------------------- #
# ResNe[X]t options (ResNets = {ResNet, ResNeXt}
# Note that parts of a resnet may be used for both the backbone and the head
# These options apply to both
# ---------------------------------------------------------------------------- #
_C.MODEL.RESNETS = CN()
_C.MODEL.RESNETS.DEPTH = 50
_C.MODEL.RESNETS.OUT_FEATURES = ["res4"] # res4 for C4 backbone, res2..5 for FPN backbone
# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
_C.MODEL.RESNETS.NUM_GROUPS = 1
# Options: FrozenBN, GN, "SyncBN", "BN"
_C.MODEL.RESNETS.NORM = "FrozenBN"
# Baseline width of each group.
# Scaling this parameters will scale the width of all bottleneck layers.
_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
# Place the stride 2 conv on the 1x1 filter
# Use True only for the original MSRA ResNet; use False for C2 and Torch models
_C.MODEL.RESNETS.STRIDE_IN_1X1 = True
# Apply dilation in stage "res5"
_C.MODEL.RESNETS.RES5_DILATION = 1
# Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet
# For R18 and R34, this needs to be set to 64
_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
# Apply Deformable Convolution in stages
# Specify if apply deform_conv on Res2, Res3, Res4, Res5
_C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False]
# |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.