input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
import argparse
import os
import pytest
import vcf
from ..dumpSTR import *
# Set up base argparser
@pytest.fixture
def args(tmpdir):
args = argparse.ArgumentParser()
args.vcf = None
args.vcftype = "auto"
args.out = str(tmpdir / "test")
args.min_locus_callrate = None
args.min_locus_hwep = None
args.min_locus_het = None
args.max_locus_het = None
args.use_length = False
args.filter_regions = None
args.filter_regions_names = None
args.filter_hrun = False
args.drop_filtered = False
args.hipstr_min_call_DP = None
args.hipstr_max_call_DP = None
args.hipstr_min_call_Q = None
args.hipstr_max_call_flank_indel = None
args.hipstr_max_call_stutter = None
args.hipstr_min_supp_reads = None
args.gangstr_expansion_prob_het = None
args.gangstr_expansion_prob_hom = None
args.gangstr_expansion_prob_total = None
args.gangstr_filter_span_only = False
args.gangstr_filter_spanbound_only = False
args.gangstr_filter_badCI = None
args.gangstr_require_support = None
args.gangstr_readlen = None
args.gangstr_min_call_DP = None
args.gangstr_max_call_DP = None
args.gangstr_min_call_Q = None
args.advntr_min_call_DP = None
args.advntr_max_call_DP = None
args.advntr_min_spanning = None
args.advntr_min_flanking = None
args.advntr_min_ML = None
args.eh_min_ADFL = None
args.eh_min_ADIR = None
args.eh_min_ADSP = None
args.eh_min_call_LC = None
args.eh_max_call_LC = None
args.popstr_min_call_DP = None
args.popstr_max_call_DP = None
args.popstr_require_support = None
args.num_records = None
args.die_on_warning = False
args.verbose = False
return args
# Test no such file or directory
def test_WrongFile(args, vcfdir):
fname = os.path.join(vcfdir, "test_non_existent.vcf")
if os.path.exists(fname):
os.remove(fname)
args.vcf = fname
retcode = main(args)
assert retcode==1
# Test if basic inputs and threshold filters work for each file
def test_GangSTRFile(args, vcfdir):
fname = os.path.join(vcfdir, "test_gangstr.vcf")
args.vcf = fname
args.num_records = 10
args.gangstr_min_call_DP = 10
args.gangstr_max_call_DP = 20
args.gangstr_min_call_Q = 0.99
args.gangstr_filter_span_only = True
args.gangstr_filter_spanbound_only = True
args.gangstr_filter_badCI = True
args.gangstr_require_support = 2
args.gangstr_readlen = 100
retcode = main(args)
assert retcode==0
# Test expansion options
args.gangstr_expansion_prob_het = 0.8
retcode = main(args)
assert retcode==0
args.gangstr_expansion_prob_het = None
args.gangstr_expansion_prob_hom = 0.8
retcode = main(args)
assert retcode==0
args.gangstr_expansion_prob_het = None
args.gangstr_expansion_prob_hom = None
args.gangstr_expansion_prob_total = 0.8
retcode = main(args)
assert retcode==0
def test_HipSTRFile(args, vcfdir):
fname = os.path.join(vcfdir, "test_hipstr.vcf")
args.vcf = fname
args.num_records = 10
args.hipstr_min_call_DP = 10
args.hipstr_max_call_DP = 100
args.hipstr_min_call_Q = 0.9
args.hipstr_min_supp_reads = 2
args.hipstr_max_call_flank_indel = 0.05
args.hipstr_max_call_stutter = 0.01
retcode = main(args)
assert retcode==0
def test_AdVNTRFile(args, vcfdir):
fname = os.path.join(vcfdir, "test_advntr.vcf")
args.vcf = fname
args.num_records = 10
args.advntr_min_call_DP = 10
args.advntr_max_call_DP = 20
args.advntr_min_spanning = 2
args.advntr_min_flanking = 2
args.advntr_min_ML = 0
retcode = main(args)
assert retcode==0
# TODO: uncomment. EH not implemented yet in TR Harmonizer
"""
def test_EHFile(args, vcfdir):
fname = os.path.join(vcfdir, "test_ExpansionHunter.vcf")
args.vcf = fname
args.num_records = 10
retcode = main(args)
assert retcode==0
"""
def test_PopSTRFile(args, vcfdir):
fname = os.path.join(vcfdir, "test_popstr.vcf")
args.vcf = fname
args.num_records = 10
args.popstr_min_call_DP = 5
args.popstr_max_call_DP = 100
args.popstr_require_support = 2
with pytest.warns(UserWarning, match="fabricated"):
retcode = main(args)
assert retcode==0
# Test invalid options
def test_InvalidOptions(args, vcfdir):
fname = os.path.join(vcfdir, "test_popstr.vcf")
args.vcf = fname
# HWE
args.min_locus_hwep = -1
retcode = main(args)
assert retcode==1
args.min_locus_hwep = 2
retcode = main(args)
assert retcode==1
# Het
args.min_locus_hwep = None
args.min_locus_het = -1
retcode = main(args)
assert retcode==1
args.min_locus_het = 2
retcode = main(args)
assert retcode==1
args.min_locus_het = None
args.max_locus_het = -1
retcode = main(args)
assert retcode==1
args.max_locus_het = 2
retcode = main(args)
assert retcode==1
args.min_locus_het = 0.5
args.max_locus_het = 0.2
retcode = main(args)
assert retcode==1
# Test locus-level filters
def test_LocusLevel(args, vcfdir):
for tool in ["hipstr","gangstr","popstr","advntr"]:
fname = os.path.join(vcfdir, "test_%s.vcf"%tool)
args.vcf = fname
args.num_records = 10
args.min_locus_callrate = 0.8
args.min_locus_hwep = 10e-4
args.min_locus_het = 0.1
args.max_locus_het = 0.3
args.use_length = True
args.drop_filtered = False
args.filter_hrun = True
assert main(args)==0
args.drop_filtered = True
assert main(args)==0
def test_RegionFilters(args, regiondir, vcfdir):
fname = os.path.join(vcfdir, "test_gangstr.vcf")
args.vcf = fname
args.num_records = 10
# Correct filters
args.filter_regions = os.path.join(regiondir, "test_regions1.bed.gz")
retcode = main(args)
assert retcode==0
args.filter_regions_names = "test"
retcode = main(args)
assert retcode==0
# Correct filters, multiple regions
args.filter_regions = os.path.join(regiondir, "test_regions1.bed.gz") + "," + os.path.join(regiondir, "test_regions2.bed.gz")
args.filter_regions_names = "test1,test2"
retcode = main(args)
assert retcode==0
# Mismatch between region names and regions
args.filter_regions_names = "test1"
retcode = main(args)
assert retcode==1
# Nonexistent regions file
args.filter_regions = os.path.join(regiondir, "test_nonexistent.bed")
retcode = main(args)
assert retcode==1
# File missing tabix
args.filter_regions = os.path.join(regiondir, "test_regions3.bed.gz")
assert main(args)==1
# File with no chr
args.filter_regions = os.path.join(regiondir, "test_regions4.bed.gz")
assert main(args)==0
args.vcf = os.path.join(vcfdir, "test_gangstr_nochr.vcf")
assert main(args)==0
def test_InvalidHipstrOptions(args, vcfdir):
fname = os.path.join(vcfdir, "test_hipstr.vcf")
args.vcf = fname
args.num_records = 10
args.hipstr_max_call_flank_indel = -1
retcode = main(args)
assert retcode==1
args.hipstr_max_call_flank_indel = None
args.hipstr_max_call_flank_indel = 2
retcode = main(args)
assert retcode==1
args.hipstr_max_call_flank_indel = None
args.hipstr_max_call_stutter = -1
retcode = main(args)
assert retcode==1
args.hipstr_max_call_stutter = 2
retcode = main(args)
assert retcode==1
args.hipstr_max_call_stutter = None
args.hipstr_min_supp_reads = -1
retcode = main(args)
assert retcode==1
args.hipstr_min_supp_reads = None
args.hipstr_min_call_DP = -1
assert main(args)==1
args.hipstr_min_call_DP = None
args.hipstr_max_call_DP = -1
assert main(args)==1
args.hipstr_min_call_DP = 5
args.hipstr_max_call_DP = 2
assert main(args)==1
args.hipstr_min_call_DP = None
args.hipstr_max_call_DP = None
args.hipstr_min_call_Q = -1
assert main(args)==1
args.hipstr_min_call_Q = 2
assert main(args)==1
def test_InvalidGangSTROptions(args, vcfdir):
fname = os.path.join(vcfdir, "test_gangstr.vcf")
args.vcf = fname
args.num_records = 10
args.gangstr_min_call_DP = -1
assert main(args)==1
args.gangstr_min_call_DP = None
args.gangstr_max_call_DP = -1
assert main(args)==1
args.gangstr_min_call_DP = 5
args.gangstr_max_call_DP = 2
assert main(args)==1
args.gangstr_min_call_DP = None
args.gangstr_max_call_DP = None
args.gangstr_min_call_Q = -1
assert main(args)==1
args.gangstr_min_call_Q = 2
assert main(args)==1
args.gangstr_min_call_Q = None
args.gangstr_expansion_prob_het = -1
assert main(args)==1
args.gangstr_expansion_prob_het = 2
assert main(args)==1
args.gangstr_expansion_prob_het = None
args.gangstr_expansion_prob_hom = -1
assert main(args)==1
args.gangstr_expansion_prob_hom = 2
assert main(args)==1
args.gangstr_expansion_prob_hom = None
args.gangstr_expansion_prob_total = -1
assert main(args)==1
args.gangstr_expansion_prob_total = 2
assert main(args)==1
args.gangstr_expansion_prob_total = None
args.gangstr_require_support = -1
assert main(args)==1
args.gangstr_require_support = 2
assert main(args)==1
args.gangstr_readlen = 1
assert main(args)==1
def test_InvalidAdVNTROptions(args, vcfdir):
fname = os.path.join(vcfdir, "test_advntr.vcf")
args.vcf = fname
args.num_records = 10
args.advntr_min_call_DP = -1
assert main(args)==1
args.advntr_min_call_DP = None
args.advntr_max_call_DP = -1
assert main(args)==1
args.advntr_min_call_DP = 5
args.advntr_max_call_DP = 2
assert main(args)==1
args.advntr_min_call_DP = None
args.advntr_max_call_DP = None
args.advntr_min_ML = -1
assert main(args)==1
args.advntr_min_ML = None
args.advntr_min_flanking = -1
assert main(args)==1
args.advntr_min_spanning = -1
assert main(args)==1
"""
def test_InvalidEHOptions(args, vcfdir):
fname = os.path.join(vcfdir, "test_ExpansionHunter.vcf")
args.vcf = fname
args.num_records = 10
# TODO add once EH is implemented
"""
def test_InvalidPopSTROptions(args, vcfdir):
fname = os.path.join(vcfdir, "test_popstr.vcf")
args.vcf = fname
args.num_records = 10
args.popstr_min_call_DP = -1
assert main(args)==1
args.popstr_min_call_DP = None
args.popstr_max_call_DP = -1
assert main(args)==1
args.popstr_min_call_DP = 5
args.popstr_max_call_DP = 2
assert main(args)==1
args.popstr_min_call_DP = None
args.popstr_max_call_DP = None
args.popstr_require_support = -1
assert main(args)==1
def test_InvalidGenotyperOptions(args, vcfdir):
fname = os.path.join(vcfdir, "test_popstr.vcf")
args.vcf = fname
args.num_records = 10
args.hipstr_min_call_DP = 10
assert main(args)==1
args.hipstr_min_call_DP = None
args.gangstr_min_call_DP = 10
assert main(args)==1
args.gangstr_min_call_DP = None
fname = os.path.join(vcfdir, "test_hipstr.vcf")
args.vcf = fname
args.popstr_min_call_DP = 10
assert main(args)==1
args.popstr_min_call_DP = None
args.advntr_min_call_DP = 10
assert main(args)==1
args.advntr_min_call_DP = None
args.eh_min_call_LC = 5
assert main(args)==1
args.eh_min_call_LC = None
def test_InvalidOutput(capsys, args, vcfdir, tmpdir):
fname = os.path.join(vcfdir, "test_popstr.vcf")
args.vcf = fname
# Fail when trying to output inside a nonexistant directory
args.out = str(tmpdir / "notadirectory" / "somefilename")
assert main(args) == 1
# To simulate a permissions issue: fail when trying to write a file in a location
# that is already a directory
capsys.readouterr()
(tmpdir / "foo.vcf").mkdir()
args.out = str(tmpdir / "foo")
assert main(args) == 1
# Make sure we produce a meaningful error message for this issue
assert 'Is a directory:' in str(capsys.readouterr())
def test_TwoDumpSTRRounds(args, vcfdir, tmpdir):
args.num_records = 10
fname = os.path.join(vcfdir, "test_gangstr.vcf")
args.vcf = fname
args.min_locus_callrate = 0
main(args) # produces DUMPDIR/test.vcf
args.vcf = str(tmpdir / "test.vcf")
args.out = str(tmpdir / "test2")
assert main(args)==0
def test_BrokenVCF(args, vcfdir):
args.num_records = 10
fname = os.path.join(vcfdir, "test_broken.vcf")
args.vcf = fname
args.die_on_warning = True
args.verbose = True
assert main(args)==1
"""
def test_Filters(args, vcfdir):
fname = os.path.join(vcfdir, "artificial_gangstr.vcf")
args.vcf = fname
args.vcftype = "gangstr"
artificial_vcf = vcf.Reader(filename=args.vcf)
## Test1: call passes with no filter
vcfcall = vcf.model._Call # Blank call
call_filters = []
reasons1 = FilterCall(vcfcall, call_filters)
assert reasons1==[]
# Line 1 of artificial vcf
record1 = next(artificial_vcf)
call1 = record1.samples[0]
## Check call1 attributes:
assert record1.CHROM=='chr1'
assert record1.POS==3004986
assert record1.REF=='tctgtctgtctg'
assert record1.INFO['RU']=='tctg'
assert call1['DP']==31
assert call1['Q']==0.999912
assert call1['QEXP']==[0.0, 5.1188e-05, 0.999949]
assert call1['RC']=='17,12,0,2'
## Test2: call filter: LowCallDepth
vcfcall = call1
args = base_argparse()
args.min_call_DP = 50
call_filters = BuildCallFilters(args)
reasons2 = FilterCall(vcfcall, call_filters)
assert reasons2==['LowCallDepth']
## Test3: call filter: HighCallDepth
vcfcall = call1
args = base_argparse()
args.max_call_DP = 10
call_filters = BuildCallFilters(args)
reasons3 = FilterCall(vcfcall, call_filters)
assert reasons3==['HighCallDepth']
## Test4: call filter: LowCallQ
vcfcall = call1
args = base_argparse()
args.min_call_Q = | |
<reponame>leo-oliveiraa/wa-automation-tool
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.chrome.options import Options
from time import sleep
from colorama import Fore
from colorama import Style
import os
import csv
# ---
# Global variables
# ---
contacts = []
numbers = []
message = []
media = []
documents = []
driver = None
# ---
# Main menu
# ---
def main_menu():
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Menu principal\n')
print(f'{Fore.CYAN}1.{Style.RESET_ALL} Iniciar envio')
print(f'{Fore.CYAN}2.{Style.RESET_ALL} Gerenciar contatos')
print(f'{Fore.CYAN}3.{Style.RESET_ALL} Gerenciar anexos')
print(f'{Fore.CYAN}4.{Style.RESET_ALL} Gerenciar mensagem')
print(f'{Fore.CYAN}5.{Style.RESET_ALL} Sair\n')
try:
choice = int(input(f'{Style.RESET_ALL}Digite a opção desejada [1-5]:{Fore.CYAN} '))
if choice == 1:
clear()
start_sending()
elif choice == 2:
clear()
mng_contacts()
elif choice == 3:
clear()
mng_attachments()
elif choice == 4:
clear()
mng_message()
elif choice == 5:
pass
else:
send_error('ERRO: Opção inválida!')
main_menu()
except ValueError:
send_error('ERRO: Opção inválida!')
main_menu()
# ---
# Manage contacts
# ---
def mng_contacts():
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Gerenciar contatos\n')
print(f'{Fore.CYAN}1.{Style.RESET_ALL} Contatos')
print(f'{Fore.CYAN}2.{Style.RESET_ALL} Números')
print(f'{Fore.CYAN}3.{Style.RESET_ALL} Voltar\n')
try:
choice = int(input(f'{Style.RESET_ALL}Digite a opção desejada [1-3]:{Fore.CYAN} '))
if choice == 1:
clear()
contacts_menu()
elif choice == 2:
clear()
numbers_menu()
elif choice == 3:
clear()
main_menu()
else:
send_error('ERRO: Opção inválida!')
mng_contacts()
except ValueError:
send_error('ERRO: Opção inválida!')
mng_contacts()
def contacts_menu():
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Contatos')
if len(contacts) > 0:
print(f'\n{Style.RESET_ALL}{contacts}\n')
else:
print(f'\n{Fore.RED}# Você ainda não adicionou contatos a lista!\n')
print(f'{Fore.CYAN}1.{Style.RESET_ALL} Adicionar contatos')
print(f'{Fore.CYAN}2.{Style.RESET_ALL} Remover contato')
print(f'{Fore.CYAN}3.{Style.RESET_ALL} Importar lista de contatos')
print(f'{Fore.CYAN}4.{Style.RESET_ALL} Exportar lista de contatos')
print(f'{Fore.CYAN}5.{Style.RESET_ALL} Voltar\n')
try:
choice = int(input(f'{Style.RESET_ALL}Digite a opção desejada [1-5]:{Fore.CYAN} '))
if choice == 1:
clear()
add_contacts(1)
elif choice == 2:
clear()
rmv_contacts(1)
elif choice == 3:
clear()
import_contacts(1)
elif choice == 4:
clear()
export_contacts(1)
elif choice == 5:
clear()
mng_contacts()
else:
send_error('ERRO: Opção inválida!')
contacts_menu()
except ValueError:
send_error('ERRO: Opção inválida!')
contacts_menu()
def numbers_menu():
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Números')
if len(numbers) > 0:
print(f'\n{Style.RESET_ALL}{numbers}\n')
else:
print(f'\n{Fore.RED}# Você ainda não adicionou números a lista!\n')
print(f'{Fore.CYAN}1.{Style.RESET_ALL} Adicionar números')
print(f'{Fore.CYAN}2.{Style.RESET_ALL} Remover número')
print(f'{Fore.CYAN}3.{Style.RESET_ALL} Importar lista de números')
print(f'{Fore.CYAN}4.{Style.RESET_ALL} Exportar lista de números')
print(f'{Fore.CYAN}5.{Style.RESET_ALL} Voltar\n')
try:
choice = int(input(f'{Style.RESET_ALL}Digite a opcão desejada [1-5]:{Fore.CYAN} '))
if choice == 1:
clear()
add_contacts(2)
elif choice == 2:
clear()
rmv_contacts(2)
elif choice == 3:
clear()
import_contacts(2)
elif choice == 4:
clear()
export_contacts(2)
elif choice == 5:
clear()
mng_contacts()
else:
send_error('ERRO: Opção inválida!')
numbers_menu()
except ValueError:
send_error('ERRO: Opção inválida!')
numbers_menu()
def add_contacts(type):
if type == 1:
global contacts
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Adicionar contatos\n')
try:
print(f'{Fore.YELLOW}# Máximo de 20 contatos por vez.')
print(f'{Fore.YELLOW}# Se quiser adicionar mais de 20 contatos, importe um arquivo .csv!\n')
quantity = int(input(f'{Fore.CYAN}#{Style.RESET_ALL} Digite a quantidade que deseja adicionar:{Fore.CYAN} '))
if quantity > 20:
send_error('ERRO: Máximo de 20 contatos por vez!')
add_contacts(1)
except ValueError:
send_error('ERRO: Use apenas números!')
add_contacts(1)
clear()
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Adicionar contatos\n')
for x in range(0, quantity):
contact = str(input(f'{Fore.CYAN}#{Style.RESET_ALL} Digite o nome do contato {x+1}/{quantity}:{Fore.CYAN} '))
contacts.append(contact)
input(Style.RESET_ALL + '\nPressione Enter para continuar...')
clear()
contacts_menu()
else:
global numbers
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Adicionar números\n')
try:
print(f'{Fore.YELLOW}# Máximo de 20 números por vez.')
print(f'{Fore.YELLOW}# Se quiser adicionar mais de 20 números, importe um arquivo .csv!\n')
quantity = int(input(f'{Fore.CYAN}#{Style.RESET_ALL} Digite a quantidade que deseja adicionar:{Fore.CYAN} '))
if quantity > 20:
send_error('ERRO: Máximo de 20 números por vez!')
add_contacts(1)
except ValueError:
send_error('ERRO: Use apenas números!')
add_contacts(1)
clear()
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Adicionar números\n')
for x in range(0, quantity):
try:
number = int(input(f'{Fore.CYAN}#{Style.RESET_ALL} Digite o número de celular {x+1}/{quantity}:{Fore.CYAN} '))
numbers.append(number)
except ValueError:
print(f'{Fore.RED}ERRO: Use apenas números, sem pontos ou hífens!')
number = int(input(f'{Fore.CYAN}#{Style.RESET_ALL} Digite o número de celular {x+1}/{quantity}:{Fore.CYAN} '))
numbers.append(number)
input(Style.RESET_ALL + '\nPressione Enter para continuar...')
clear()
numbers_menu()
def rmv_contacts(type):
if type == 1:
global contacts
if len(contacts) == 0:
send_error('ERRO: Você ainda não adicionou nenhum contato!')
contacts_menu()
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Remover contatos')
print(f'\n{Style.RESET_ALL}{contacts}\n')
print(f"{Fore.YELLOW}# Digite 'Remover Todos' caso queira remover todos os contatos!")
print(f"{Fore.YELLOW}# Digite 'Voltar' caso tenha desistido de remover!\n")
try:
contact = str(input(f'{Fore.CYAN}#{Style.RESET_ALL} Digite o contato que deseja remover:{Fore.CYAN} '))
if contact == 'Remover Todos' or contact == 'remover todos':
contacts = []
print(f'{Fore.RED}# Todos os contatos foram removidos!')
elif contact == 'Voltar' or contact == 'voltar':
clear()
contacts_menu()
else:
contacts.remove(contact)
print(f'{Fore.RED}# O contato {contact} foi removido!')
input(f'{Style.RESET_ALL}\nPressione Enter para continuar...')
clear()
contacts_menu()
except ValueError:
send_error('ERRO: O contato digitado não está na lista!')
rmv_contacts(1)
else:
global numbers
if len(numbers) == 0:
send_error('ERRO: Você ainda não adicionou nenhum número!')
numbers_menu()
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Remover números')
print(f'\n{Style.RESET_ALL}{numbers}\n')
print(f"{Fore.YELLOW}# Digite 'Remover Todos' caso queira remover todos os números!")
print(f"{Fore.YELLOW}# Digite 'Voltar' caso tenha desistido de remover!\n")
try:
number = str(input(f'{Fore.CYAN}#{Style.RESET_ALL} Digite o número que deseja remover:{Fore.CYAN} '))
if number == 'Remover Todos' or number == 'remover todos':
numbers = []
print(f'{Fore.RED}# Todos os números foram removidos!')
elif number == 'Voltar' or number == 'voltar':
clear()
numbers_menu()
else:
try:
numbers.remove(int(number))
print(f'{Fore.RED}# O número {number} foi removido!')
except ValueError:
send_error('ERRO: O número digitado não está na lista!')
rmv_contacts(2)
input(f'{Style.RESET_ALL}\nPressione Enter para continuar...')
clear()
numbers_menu()
except ValueError:
send_error('ERRO: O número digitado não está na lista!')
rmv_contacts(2)
def import_contacts(type):
if type == 1:
global contacts
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Importar contatos\n')
try:
with open('contatos.csv') as f:
for row in f:
contacts.append(row.replace('\n',''))
print(f'{Fore.GREEN}# Contatos importados com sucesso!')
input(f'{Style.RESET_ALL}\nPressione Enter para continuar...')
clear()
contacts_menu()
except FileNotFoundError:
send_error('ERRO: O arquivo "contatos.csv" não foi encontrado!')
contacts_menu()
else:
global numbers
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Importar números\n')
try:
with open('numeros.csv') as f:
for row in f:
numbers.append(row.replace('\n',''))
print(f'{Fore.GREEN}# Números importados com sucesso!')
input(f'{Style.RESET_ALL}\nPressione Enter para continuar...')
clear()
numbers_menu()
except FileNotFoundError:
send_error('ERRO: O arquivo "numeros.csv" não foi encontrado!')
numbers_menu()
def export_contacts(type):
if type == 1:
global contacts
if len(contacts) == 0:
send_error('ERRO: Você ainda não adicionou nenhum contato!')
contacts_menu()
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Exportar contatos\n')
with open('contatos.csv', 'w', newline='') as f:
wt = csv.writer(f)
for i in contacts:
wt.writerow([i])
print(f'{Fore.GREEN}# Contatos exportados com sucesso!')
input(f'{Style.RESET_ALL}\nPressione Enter para continuar...')
clear()
contacts_menu()
else:
global numbers
if len(numbers) == 0:
send_error('ERRO: Você ainda não adicionou nenhum número!')
contacts_menu()
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Exportar números\n')
with open('numeros.csv', 'w', newline='') as f:
wt = csv.writer(f)
for i in numbers:
wt.writerow([i])
print(f'{Fore.GREEN}# Números exportados com sucesso!')
input(f'{Style.RESET_ALL}\nPressione Enter para continuar...')
clear()
numbers_menu()
# ---
# Manage attachments
# ---
def mng_attachments():
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Gerenciar anexos\n')
print(f'{Fore.CYAN}1.{Style.RESET_ALL} Documentos')
print(f'{Fore.CYAN}2.{Style.RESET_ALL} Imagens/Videos')
print(f'{Fore.CYAN}3.{Style.RESET_ALL} Voltar\n')
try:
choice = int(input(f'{Style.RESET_ALL}Digite a opção desejada [1-3]:{Fore.CYAN} '))
if choice == 1:
clear()
documents_menu()
elif choice == 2:
clear()
media_menu()
elif choice == 3:
clear()
main_menu()
else:
send_error('ERRO: Opção inválida!')
mng_attachments()
except ValueError:
send_error('ERRO: Opção inválida!')
mng_attachments()
def documents_menu():
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Documentos')
if len(documents) > 0:
print(f'\n{Style.RESET_ALL}{documents}\n')
else:
print(f'\n{Fore.RED}# Você ainda não anexou documentos!\n')
print(f'{Fore.CYAN}1.{Style.RESET_ALL} Anexar documento')
print(f'{Fore.CYAN}2.{Style.RESET_ALL} Remover documento')
print(f'{Fore.CYAN}3.{Style.RESET_ALL} Voltar\n')
try:
choice = int(input(f'{Style.RESET_ALL}Digite a opção desejada [1-3]:{Fore.CYAN} '))
if choice == 1:
clear()
add_attachment(1)
elif choice == 2:
clear()
rmv_attachment(1)
elif choice == 3:
clear()
mng_attachments()
else:
send_error('ERRO: Opção inválida!')
documents_menu()
except ValueError:
send_error('ERRO: Opção inválida!')
documents_menu()
def media_menu():
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Imagens/videos')
if len(media) > 0:
print(f'\n{Style.RESET_ALL}{media}\n')
else:
print(f'\n{Fore.RED}# Você ainda não anexou imagens/videos!\n')
print(f'{Fore.CYAN}1.{Style.RESET_ALL} Anexar imagem/video')
print(f'{Fore.CYAN}2.{Style.RESET_ALL} Remover imagem/video')
print(f'{Fore.CYAN}3.{Style.RESET_ALL} Voltar\n')
try:
choice = int(input(f'{Style.RESET_ALL}Digite a opção desejada [1-3]:{Fore.CYAN} '))
if choice == 1:
clear()
add_attachment(2)
elif choice == 2:
clear()
rmv_attachment(2)
elif choice == 3:
clear()
mng_attachments()
else:
send_error('ERRO: Opção inválida!')
media_menu()
except ValueError:
send_error('ERRO: Opção inválida!')
media_menu()
def add_attachment(type):
if type == 1:
global documents
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Anexar documento\n')
print(f"{Fore.YELLOW}# Digite 'Voltar' caso tenha desistido de anexar!")
attach = str(input(f'{Fore.CYAN}#{Style.RESET_ALL} Digite o nome do documento que deseja anexar:{Fore.CYAN} '))
if attach == 'Voltar' or attach == 'voltar':
clear()
documents_menu()
else:
file = f'./doc/{attach}'
if os.path.exists(file):
documents.append(attach)
print(f'{Fore.GREEN}# O documento {attach} foi anexado!')
input(f'{Style.RESET_ALL}\nPressione Enter para continuar...')
clear()
documents_menu()
else:
send_error('ERRO: Arquivo não encontrado na pasta documentos!')
add_attachment(1)
else:
global media
print(f'{Fore.CYAN}# Whatsapp Automation Tool')
print(f'{Fore.CYAN}# Anexar imagem/video\n')
print(f"{Fore.YELLOW}# Digite 'Voltar' caso tenha desistido de anexar!")
attach = | |
"champion's",
'champions',
'championship',
'champs',
'chan',
'chance',
'chanced',
'chances',
'chancing',
'chancy',
'chandelier',
'chandler',
'chanel',
'change',
'change-o',
'changeable',
'changed',
'changer',
'changers',
'changes',
"changin'",
'changing',
'channel',
"channel's",
'channeled',
'channels',
'chansey',
'chant',
"chant's",
'chanted',
'chanting',
'chants',
'chanty',
'chanukah',
'chaos',
'chaotic',
'chap',
'chapeau',
'chapel',
'chappy',
'chaps',
'chapsitck',
'chapter',
"chapter's",
'chaptered',
'chaptering',
'chapters',
'char',
'character',
"character's",
'charactered',
'charactering',
'characteristics',
'characters',
'charade',
"charade's",
'charades',
'charcoal',
'chard',
'chare',
'chares',
'charge',
'charged',
'charger',
"charger's",
'chargers',
'charges',
'charging',
'chariot',
'charisma',
'charismatic',
'charitable',
'charity',
'charizard',
'charles',
'charley',
'charlie',
"charlie's",
'charlotte',
'charlottes',
'charm',
"charm's",
'charmander',
'charmed',
'charmeleon',
'charmer',
'charmers',
'charming',
"charming's",
'charmingly',
'charms',
'charred',
'chars',
'chart',
"chart's",
'charted',
'charter',
'chartered',
'charters',
'charting',
'chartings',
'chartreuse',
'charts',
'chase',
"chase's",
'chased',
'chaser',
'chasers',
'chases',
'chasing',
'chasse',
'chassed',
'chastise',
'chastised',
'chastity',
'chat',
"chat's",
'chateau',
'chatoyant',
'chats',
'chatted',
'chatter',
"chatter's",
'chattering',
'chatters',
'chatting',
'chatty',
'chauffer',
'chauffeur',
'chd',
'cheap',
'cheapen',
'cheapens',
'cheaper',
'cheapest',
'cheaply',
'cheapo',
'cheapskate',
'cheapskates',
'cheat',
"cheat's",
'cheated',
'cheater',
"cheater's",
'cheaters',
'cheating',
'cheats',
'check',
"check's",
'checkbook',
'checkbox',
'checked',
'checker',
"checker's",
'checkerboard',
'checkered',
'checkers',
'checking',
'checklist',
'checkmark',
'checkout',
'checkpoint',
'checks',
'checkup',
'cheddar',
'cheek',
'cheeks',
'cheeky',
'cheep',
'cheer',
"cheer's",
'cheered',
'cheerer',
'cheerers',
'cheerful',
'cheerier',
'cheering',
'cheerio',
'cheerios',
'cheerleader',
'cheerleaders',
'cheerleading',
'cheerly',
'cheers',
'cheery',
'cheese',
"cheese's",
'cheeseburger',
"cheeseburger's",
'cheeseburgers',
'cheesecake',
'cheesed',
'cheeses',
'cheesey',
'cheesier',
'cheesiest',
'cheesiness',
'cheesing',
'cheesy',
'cheetah',
"cheetah's",
'cheetahs',
'cheezer',
'cheezy',
'cheezybee',
'cheezyberry',
'cheezyblabber',
'cheezybocker',
'cheezyboing',
'cheezyboom',
'cheezybounce',
'cheezybouncer',
'cheezybrains',
'cheezybubble',
'cheezybumble',
'cheezybump',
'cheezybumper',
'cheezyburger',
'cheezychomp',
'cheezycorn',
'cheezycrash',
'cheezycrumbs',
'cheezycrump',
'cheezycrunch',
'cheezydoodle',
'cheezydorf',
'cheezyface',
'cheezyfidget',
'cheezyfink',
'cheezyfish',
'cheezyflap',
'cheezyflapper',
'cheezyflinger',
'cheezyflip',
'cheezyflipper',
'cheezyfoot',
'cheezyfuddy',
'cheezyfussen',
'cheezygadget',
'cheezygargle',
'cheezygloop',
'cheezyglop',
'cheezygoober',
'cheezygoose',
'cheezygrooven',
'cheezyhoffer',
'cheezyhopper',
'cheezyjinks',
'cheezyklunk',
'cheezyknees',
'cheezymarble',
'cheezymash',
'cheezymonkey',
'cheezymooch',
'cheezymouth',
'cheezymuddle',
'cheezymuffin',
'cheezymush',
'cheezynerd',
'cheezynoodle',
'cheezynose',
'cheezynugget',
'cheezyphew',
'cheezyphooey',
'cheezypocket',
'cheezypoof',
'cheezypop',
'cheezypounce',
'cheezypow',
'cheezypretzel',
'cheezyquack',
'cheezyroni',
'cheezyscooter',
'cheezyscreech',
'cheezysmirk',
'cheezysnooker',
'cheezysnoop',
'cheezysnout',
'cheezysocks',
'cheezyspeed',
'cheezyspinner',
'cheezysplat',
'cheezysprinkles',
'cheezysticks',
'cheezystink',
'cheezyswirl',
'cheezyteeth',
'cheezythud',
'cheezytoes',
'cheezyton',
'cheezytoon',
'cheezytooth',
'cheezytwist',
'cheezywhatsit',
'cheezywhip',
'cheezywig',
'cheezywoof',
'cheezyzaner',
'cheezyzap',
'cheezyzapper',
'cheezyzilla',
'cheezyzoom',
'chef',
"chef's",
'chefs',
'chelsea',
"chelsea's",
'chelseas',
'chemical',
'chemicals',
'cherish',
'cherishes',
'chernabog',
"chernabog's",
'cherries',
'cherry',
'cherrywood',
'cherubfish',
'cheryl',
'cheshire',
"cheshire's",
'chess',
'chest',
'chester',
'chestnut',
'chestnut-shell',
'chetagua',
'chetermo',
'chetik',
'chetros',
'chettia',
'chetuan',
'chevalle',
'chew',
'chewchow',
'chewed',
'chewing',
'cheyenne',
"cheyenne's",
'cheyennes',
'chez',
'chic',
'chick',
"chick's",
'chickadee',
'chickadees',
'chicken',
"chicken's",
'chickened',
'chickenhearted',
'chickening',
'chickens',
'chicks',
'chief',
"chief's",
'chiefly',
'chiefs',
'chiffon',
'chihuahua',
'child',
"child's",
'childcare',
'childhood',
'childish',
'childlike',
'children',
"children's",
'chili',
"chili's",
'chill',
"chill's",
'chilled',
'chillin',
"chillin'",
'chilling',
'chills',
'chilly',
'chillycog',
'chim',
'chime',
"chime's",
'chimes',
'chiming',
'chimnes',
'chimney',
'chimneys',
'chimp',
'chin',
"chin's",
'china',
'chinchilla',
"chinchilla's",
'chinchillas',
'chine',
'ching',
'chino',
'chins',
'chip',
"chip's",
'chipmunk',
"chipmunk's",
'chipmunks',
'chipotle',
'chipped',
'chipper',
'chipping',
'chips',
'chiropractic',
'chiropractor',
'chirp',
'chirping',
'chirpy',
'chisel',
'chit',
'chit-chat',
'chivalrous',
'chivalry',
'chloe',
'choc',
'chock',
'chocolate',
"chocolate's",
'chocolates',
'choice',
'choicely',
'choicer',
'choices',
'choicest',
'choir',
'choirs',
'choking',
'chomp',
'chomping',
'chomugon',
'choo',
'choo-choo',
"choo-choo's",
'choo-choos',
'chook',
'choose',
'chooser',
'choosers',
'chooses',
'choosey',
'choosing',
'choosy',
'chop',
'chopin',
'chopped',
'chopper',
'choppers',
'choppier',
'choppiness',
'chopping',
'choppy',
'chops',
'chopsticks',
'choral',
'chord',
'chords',
'chore',
'choreographed',
'chores',
'chortle',
'chortled',
'chortles',
'chortling',
'chorus',
'chose',
'chosen',
'chow',
'chowder',
'chris',
'christina',
"christina's",
'christinas',
'christmas',
'christmastime',
'christopher',
'chrome',
"chrome's",
'chromed',
'chromes',
'chronicle',
'chronicled',
'chronicles',
'chronicling',
'chuck',
"chuck's",
'chucked',
'chucking',
'chuckle',
"chuckle's",
'chuckled',
'chuckles',
'chuckling',
'chucks',
'chucky',
'chuff',
'chug',
'chugged',
'chugging',
'chugyo',
'chum',
"chum's",
'chummed',
'chums',
'chunk',
'chunked',
'chunking',
'chunks',
'chunky',
'church',
'churches',
'churn',
'churned',
'churning',
'churro',
"churro's",
'churros',
'chute',
'chutes',
'cia',
'ciao',
'ciaran',
'cid',
'cider',
'cienfuegos',
'cierra',
'cimson',
'cinammon',
'cinch',
'cinda',
"cinda's",
'cinder',
"cinder's",
'cinderbones',
'cinderella',
"cinderella's",
'cinderellas',
'cinders',
'cindy',
'cine',
'cinema',
"cinema's",
'cinemas',
'cinematic',
'cinematics',
'cineplex',
'cinerama',
'cinnamon',
"cinnamon's",
'cinnamons',
'cir',
'circ',
'circle',
"circle's",
'circled',
'circler',
'circlers',
'circles',
'circling',
'circuit',
"circuit's",
'circuited',
'circuiting',
'circuits',
'circular',
"circular's",
'circularly',
'circulars',
'circumnavigate',
'circumstance',
'circumstances',
'circumvent',
'circus',
"circus's",
'circuses',
'citadel',
'citations',
'cite',
'cites',
'cities',
'citified',
'citing',
'citizen',
"citizen's",
'citizenly',
'citizens',
'citn',
'citrine',
'citrus',
'city',
'civics',
'civil',
'civilians',
'civility',
'civilization',
'civilizations',
'civilized',
'cj',
"cj'd",
"cj's",
'cjed',
'cjing',
'cjs',
'clack',
'clad',
'clafairy',
'claim',
"claim's",
'claimed',
'claimer',
'claiming',
'claims',
'claire',
'clam',
"clam's",
'clamed',
'clammed',
'clams',
'clan',
'clang',
'clangs',
'clank',
'clans',
'clap',
'clapped',
'clapping',
'claps',
'clara',
'clarabelle',
"clarabelle's",
'clarence',
'clarified',
'clarify',
'clarifying',
'clarion',
'clarissa',
'clarity',
'clark',
'clash',
'clashes',
'clashing',
'class',
"class's",
'classed',
'classer',
'classes',
'classic',
"classic's",
'classical',
'classics',
'classiest',
'classifications',
'classified',
'classify',
'classing',
'classmate',
"classmate's",
'classmates',
'classy',
'claus',
'clause',
'clauses',
'claustrophobia',
'claustrophobic',
'clavier',
'claw',
"claw's",
'clawed',
'clawing',
'claws',
'clawssified',
'clay',
'clayton',
'clean',
'cleaned',
'cleaner',
"cleaner's",
'cleaners',
'cleanest',
'cleaning',
'cleanliness',
'cleanly',
'cleanout',
'cleans',
'cleanse',
'cleansing',
'cleanup',
'clear',
'clearance',
'cleared',
'clearer',
'clearest',
'clearing',
'clearings',
'clearly',
'clears',
"cleat's",
'cleats',
'cleave',
'cleaved',
'cleaver',
'cleaves',
'clefable',
'cleff',
'clementine',
'cleric',
'clerics',
'clerk',
"clerk's",
'clerks',
'clever',
'clew',
'click',
'click-and-drag',
'clickable',
'clickables',
'clicked',
'clicker',
'clickers',
'clicking',
'clicks',
'client',
"client's",
'clientele',
'clients',
'cliff',
"cliff's",
'cliffs',
'climactic',
'climate',
"climate's",
'climates',
'climb',
'climbed',
'climber',
'climbers',
'climbing',
'climbs',
'clime',
'cling',
'clinger',
'clinging',
'clings',
'clingy',
'clinic',
'clinics',
'clink',
'clinker',
'clip',
'clipboard',
'clipped',
'clipper',
'clippers',
'clipping',
'clips',
'clique',
'cloak',
'cloaking',
'clobber',
'clobbered',
'clock',
'clocked',
'clocker',
'clockers',
'clocking',
'clockings',
'clocks',
'clockwise',
'clockwork',
'clockworks',
'clod',
'clodley',
'clods',
'clog',
'clogged',
'clogging',
'clogs',
'clomping',
'clone',
"clone's",
'cloned',
'clones',
'cloning',
'clonk',
'clopping',
'close',
'close-up',
'closed',
'closely',
'closeness',
'closer',
'closers',
'closes',
'closest',
'closet',
'closets',
'closing',
'closings',
'closure',
'cloth',
'clothe',
'clothed',
'clothes',
'clothesline',
'clothespins',
'clothing',
'cloths',
'clots',
'cloture',
'cloud',
"cloud's",
'clouded',
'clouding',
'clouds',
'cloudy',
'clout',
'clove',
'clover',
"clover's",
'cloverleaf',
"cloverleaf's",
'cloverleafs',
'clovers',
'cloves',
'clovi',
'clovinia',
'clovis',
'clowder',
'clown',
'clowns',
'cloyster',
'clu',
'club',
"club's",
'club33',
'clubbing',
'clubhouse',
'clubpenguin',
'clubs',
'clucked',
'clucking',
'clue',
"clue's",
'clued',
'clueing',
'clueless',
'clues',
'clump',
'clumped',
'clumsies',
"clumsies'",
'clumsily',
'clumsy',
'clunk',
'clunker',
'clunkers',
'clunky',
'cluster',
"cluster's",
'clustered',
'clusters',
'clutch',
'clutches',
'clutching',
'clutter',
'cluttered',
'clyde',
'clydesdale',
'co',
'co-starred',
'coach',
"coach's",
"coache's",
'coached',
'coacher',
'coaches',
'coaching',
'coal',
"coal's",
'coaled',
'coaler',
'coalfire',
'coaling',
'coalmine',
'coals',
'coarse',
'coast',
'coastal',
'coasted',
'coaster',
"coaster's",
'coasters',
'coasting',
'coastline',
'coasts',
'coat',
"coat's",
'coated',
'coater',
'coatings',
'coats',
'coax',
'coaxes',
'cobalt',
'cobber',
'cobble',
'cobbler',
'cobbles',
'cobblestone',
'cobra',
"cobra's",
'cobras',
'cobweb',
'cobwebs',
'coca',
'coco',
"coco's",
'cocoa',
'coconut',
"coconut's",
'coconuts',
'cod',
'coda',
'codas',
'coddles',
'code',
"code's",
'codec',
'coded',
'coder',
'coders',
'codes',
'codex',
'codfish',
'coding',
'codings',
'cods',
'cody',
"cody's",
'coed',
'coerce',
'coffee',
"coffee's",
'coffees',
'coffer',
'coffers',
'cog',
"cog's",
'cog-o-war',
'cog-tastrophe',
'cog0war',
"cogbuck's",
'cogbucks',
'cogcicle',
'cognation',
'cogowar',
'cogs',
'cogwar',
'coherently',
'cohesive',
'cohort',
'coiffure',
'coil',
'coiled',
'coin',
"coin's",
'coinage',
'coincide',
'coincidence',
'coincidences',
'coined',
'coiner',
'coining',
'coins',
'cola',
'colada',
"colada's",
'coladas',
'colby',
'cold',
"cold's",
'colder',
'coldest',
'coldly',
'colds',
'cole',
"cole's",
'coleman',
"coleman's",
'colemans',
'colestra',
'colette',
"colette's",
'colettes',
'colin',
"colin's",
'coliseum',
"coliseum's",
'coliseums',
'collaborate',
'collaboration',
'collage',
'collapse',
'collapsed',
'collapsing',
'collar',
'collard',
'collars',
'collateral',
'collaterals',
'colleague',
'colleagues',
'collect',
"collect's",
'collectable',
'collectables',
'collected',
'collectible',
'collectibles',
'collecting',
'collection',
"collection's",
'collections',
'collective',
'collector',
'collectors',
'collects',
'colleen',
'colleens',
'college',
'colleting',
'collette',
"collette's",
'collettes',
'collide',
'collie',
'collier',
'collision',
"collision's",
'collisions',
'colm',
'cologne',
'colombia',
'colonel',
'colonial',
'colonials',
'colonies',
'colonized',
'colony',
'color',
"color's",
'colorblind',
'colored',
'colorfast',
'colorful',
'colorhost',
'coloring',
'colorless',
'colors',
'colossal',
'colossus',
'colour',
"colour's",
'coloured',
'colourful',
'colouring',
'colours',
'cols',
'colt',
'coltello',
'coltellos',
'colts',
'columbia',
"columbia's",
'columbus',
'column',
'columns',
'coma',
'comatose',
'comb',
"comb's",
'combat',
"combat's",
'combatants',
'combater',
'combative',
'combats',
'combination',
"combination's",
'combinations',
'combine',
'combined',
'combiner',
'combiners',
'combines',
'combing',
'combining',
'combo',
"combo's",
'combos',
'combs',
'combustible',
'combustion',
'come',
'comeback',
'comebacks',
'comedian',
'comedians',
'comedies',
'comedown',
'comedy',
'comely',
'comer',
'comers',
'comes',
'comet',
'comfort',
'comfortable',
'comfortably',
'comforted',
'comforter',
'comforters',
'comforting',
'comforts',
'comfy',
'comic',
"comic's",
'comic-con',
'comical',
'comics',
'coming',
'comings',
'comma',
'command',
"command's",
'commandant',
'commanded',
'commandeer',
'commandeered',
'commandeering',
'commander',
"commander's",
'commanders',
'commanding',
'commando',
'commands',
'commence',
'commencer',
'commences',
'commencing',
'commend',
'commendations',
'comment',
"comment's",
'commentary',
'commented',
'commenter',
'commenting',
'comments',
'commerce',
'commercial',
'commercially',
'commercials',
'commissar',
'commission',
"commission's",
'commissioned',
'commissioner',
'commissioners',
'commissioning',
'commissions',
'commit',
'commitment',
"commitment's",
'commitments',
'commits',
'committed',
'committee',
"committee's",
'committees',
'committing',
'commodore',
"commodore's",
'commodores',
'common',
'commoner',
"commoner's",
'commoners',
'commonest',
'commonly',
'commons',
'commotion',
'communal',
'communicate',
'communicated',
'communicates',
'communicating',
'communication',
'communications',
'communicative',
'communist',
'communities',
'community',
"community's",
'commute',
'como',
'comp',
"comp's",
'compact',
'companies',
'companion',
'companions',
'companionships',
'company',
"company's",
'companying',
'comparable',
'comparatively',
'compare',
'compared',
'comparer',
'compares',
'comparing',
'comparison',
"comparison's",
'comparisons',
'compartments',
'compass',
'compassed',
'compasses',
'compassing',
'compassion',
'compassionate',
'compatibility',
'compatible',
'compel',
'compelled',
'compensate',
'compensates',
'compensating',
'compensation',
'compete',
'competed',
'competence',
'competences',
'competent',
'competes',
'competing',
'competition',
"competition's",
'competitions',
'competitive',
'complain',
'complained',
'complainer',
"complainer's",
'complainers',
'complaining',
'complains',
'complaint',
"complaint's",
'complaints',
'complement',
'complete',
'completed',
'completely',
'completer',
'completes',
'completing',
'completion',
"completion's",
'completions',
'completive',
'complex',
'complexes',
'complexly',
'complicate',
| |
{float: 'float', int: 'short'}
if dtype in dtypes:
dtype = dtypes[dtype]
prefix, ext = afni.split_out_file(out_file)
outputs = {'out_file': f"{prefix}{ext}"}
mean_file = utils.temp_prefix(suffix=ext)
utils.run(f"3dTstat -mean -prefix {mean_file} -overwrite {in_file}")
if mask_file is not None:
utils.run(f"3dcalc -a {in_file} -b {mean_file} -c {mask_file} \
-expr 'min(200,a/b*100)*step(a)*step(b)*c' \
{f'-datum {dtype}' if dtype is not None else ''} \
-prefix {out_file} -overwrite")
else:
utils.run(f"3dcalc -a {in_file} -b {mean_file} \
-expr 'min(200,a/b*100)*step(a)*step(b)' \
{f'-datum {dtype}' if dtype is not None else ''} \
-prefix {outputs['out_file']} -overwrite")
os.remove(mean_file)
all_finished(outputs)
return outputs
def zscore(in_file, out_file):
prefix, ext = afni.split_out_file(out_file)
outputs = {'out_file': f"{prefix}{ext}"}
mean_file = utils.temp_prefix(suffix=ext)
utils.run(f"3dTstat -mean -stdev -prefix {mean_file} -overwrite {in_file}")
utils.run(f"3dcalc -a {in_file} -b {mean_file}'[0]' -c {mean_file}'[1]' \
-expr '(a-b)/c*step(b)' \
-prefix {outputs['out_file']} -overwrite")
os.remove(mean_file)
all_finished(outputs)
return outputs
def skullstrip(in_file, out_file=None):
if out_file is None:
prefix, ext = afni.split_out_file(in_file)
out_file = f"{prefix}_ns{ext}"
prefix, ext = afni.split_out_file(out_file)
outputs = {'out_file': f"{prefix}{ext}"}
utils.run(f"3dSkullStrip -orig_vol -prefix {out_file} -overwrite -input {in_file}")
all_finished(outputs)
return outputs
def _ants_sanitize_input(in_file, overwrite=True):
'''
It is important that the input is 3D (not 4D) dataset.
But it is OK that the input is not in ORIG space (e.g., MNI), or RAS+/NIFTI orientation.
Otherwise antsRegistration will spit WEIRD result, e.g., rotate cerebellum to forehead...
'''
if afni.get_dims(in_file)[3] > 1:
print(f'+* WARNING: "{in_file}" contains multiple volumes. Only the first volume will be considered in the registration.')
out_file = in_file if overwrite else utils.temp_prefix(suffix='.nii')
is_temp = not overwrite
utils.run(f"3dTcat -prefix {out_file} -overwrite {in_file}'[0]'", error_pattern='error')
else:
out_file, is_temp = in_file, False
return out_file, is_temp
def align_ants(base_file, in_file, out_file, strip=None, base_mask=None, in_mask=None,
base_mask_SyN=None, in_mask_SyN=None, init_transform=None, preset=None, n_jobs=None):
'''
Nonlinearly align `in_file` to `base_file` using ANTs' SyN method via antsRegistration.
Examples
--------
1. Align MNI template to T1_al using default preset. Skullstrip T1_al.
Apply inversed mask (1-mask) to MNI template at the nonlinear (SyN) stage.
>>> prep.align_ants("T1_al.nii", "MNI152_2009_template.nii.gz", "MNI_al.nii", strip="base", in_mask_SyN="mask.nii -I")
2. Align T1_ns_al.nii to MNI template using "test" preset.
For a quick test of the parameters in a few minutes. The result will not be good, but should not be weird.
>>> prep.align_ants("MNI152_2009_template.nii.gz", "T1_ns_al.nii", "T1_ns_MNI.nii", preset="test")
Parameters
----------
preset : str
None | 'default' | 'test' | 'path/to/my/preset.json'
For production, just leave it as None or use the 'default' presest (est. time: 3 hr).
For quick test, use the 'test' presest (est. time: 3 min).
Returns
-------
outputs : dict
outputs['transform'] : ANTsTransform object
You can apply the forward or inverse transform to other volumes. E.g.,
>>> outputs['transform'].apply(in_file, out_file)
>>> outputs['transform'].apply_inverse(in_file, out_file)
'''
temp_dir = utils.temp_folder()
pc = utils.PooledCaller()
prefix, ext = afni.split_out_file(out_file)
outputs = {
'out_file': f"{prefix}{ext}",
'fwd_affine': f"{prefix}_0GenericAffine.mat",
'fwd_warp': f"{prefix}_1Warp.nii.gz",
'inv_warp': f"{prefix}_1InverseWarp.nii.gz",
'fwd_warped': f"{prefix}_fwd_warped.nii.gz",
'inv_warped': f"{prefix}_inv_warped.nii.gz",
}
# Strip and sanitize base_file or in_file if required
if strip is None:
strip = [False, False]
if isinstance(strip, str):
strip = {'both': [True, True], 'base': [True, False], 'input': [False, True], 'none': [False, False]}[strip]
strip = [((skullstrip if s else copy_dset) if isinstance(s, bool) else s) for s in strip]
fixed = f"{temp_dir}/base.nii"
moving = f"{temp_dir}/input.nii"
D1 = pc.run(strip[0], base_file, out_file=fixed)
D2 = pc.run(strip[1], in_file, out_file=moving)
pc.run(_ants_sanitize_input, fixed, _depends=[D1])
pc.run(_ants_sanitize_input, moving, _depends=[D2])
pc.wait()
# Prepare masks (e.g., strip, inversion, etc.)
def populate_mask(mask_file, temp_dir=None, ref_file=None):
if mask_file is None:
out_file = 'None'
elif mask_file == 'strip':
out_file = utils.temp_prefix(prefix=f"{temp_dir}/tmp_", suffix='.nii')
skullstrip(ref_file, out_file)
utils.run(f"3dcalc -a {out_file} -expr 'step(a)' -prefix {out_file} -overwrite")
elif mask_file.endswith(' -I'):
out_file = utils.temp_prefix(prefix=f"{temp_dir}/tmp_", suffix='.nii')
utils.run(f"3dcalc -a {mask_file[:-3]} -expr '1-a' -prefix {out_file} -overwrite")
else:
out_file = mask_file
return out_file
pc.run(populate_mask, base_mask, temp_dir, ref_file=base_file)
pc.run(populate_mask, in_mask, temp_dir, ref_file=in_file)
pc.run(populate_mask, base_mask_SyN, temp_dir, ref_file=base_file)
pc.run(populate_mask, in_mask_SyN, temp_dir, ref_file=in_file)
base_mask, in_mask, base_mask_SyN, in_mask_SyN = pc.wait()
# Init moving transform
if init_transform is None:
# Align the geometric center of the images (=0), the image intensities (=1), or the origin of the images (=2).
init_moving_cmd = f"--initial-moving-transform [ {fixed}, {moving}, 1 ]"
elif init_transform in ['none', 'identity']:
init_moving_cmd = ''
else:
init_moving_cmd = f"--initial-moving-transform {init_transform}"
# Estimate transforms
os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS'] = str(DEFAULT_JOBS) if n_jobs is None else str(n_jobs)
if preset is None:
pc.run1(f"antsRegistration -d 3 --float 1 --verbose \
--output [ {prefix}_, {outputs['fwd_warped']}, {outputs['inv_warped']} ] \
--interpolation LanczosWindowedSinc \
--collapse-output-transforms 1 \
{init_moving_cmd} \
--winsorize-image-intensities [0.005,0.995] \
--use-histogram-matching 1 \
--transform translation[ 0.1 ] \
--metric mattes[ {fixed}, {moving}, 1, 32, regular, 0.3 ] \
--convergence [ 1000x300x100, 1e-6, 10 ] \
--smoothing-sigmas 4x2x1vox \
--shrink-factors 8x4x2 \
--use-estimate-learning-rate-once 1 \
--masks [ {base_mask}, {in_mask} ] \
-t rigid[ 0.1 ] \
-m mattes[ {fixed}, {moving}, 1, 32, regular, 0.3 ] \
-c [ 1000x300x100, 1e-6, 10 ] \
-s 4x2x1vox \
-f 4x2x1 -l 1 \
-x [ {base_mask}, {in_mask} ] \
-t affine[ 0.1 ] \
-m mattes[ {fixed}, {moving}, 1, 32, regular, 0.3 ] \
-c [ 1000x300x100, 1e-6, 10 ] \
-s 2x1x0vox \
-f 4x2x1 -l 1 \
-x [ {base_mask}, {in_mask} ] \
-t SyN[ 0.1, 3, 0 ] \
-m mattes[ {fixed}, {moving}, 0.5 , 32 ] \
-m cc[ {fixed}, {moving}, 0.5 , 4 ] \
-c [ 100x100x50, 1e-6, 10 ] \
-s 1x0.5x0vox \
-f 4x2x1 -l 1 \
-x [ {base_mask_SyN}, {in_mask_SyN} ]", _error_pattern='error')
else:
if isinstance(preset, str):
if not preset.endswith('.json'): # Built-in preset (located in mripy/data/align_ants_presets)
preset = f"{utils.package_dir}/data/align_ants_presets/{preset}.json"
# Otherwise should be the fullpath to a custom .json file
with open(preset) as json_file:
preset = json.load(json_file)
# `preset` is now a dict
# Generate antsRegistration command line
cmd = f"antsRegistration -d {preset['dimension']} --float 1 --verbose \
--output [ {prefix}_, {outputs['fwd_warped']}, {outputs['inv_warped']} ] \
--interpolation {preset['interpolation']} \
--collapse-output-transforms 1 \
--write-composite-transform {int(preset['write_composite_transform'])} \
{init_moving_cmd} \
--winsorize-image-intensities [ {preset['winsorize_lower_quantile']}, {preset['winsorize_upper_quantile']} ] "
for k in range(len(preset['transforms'])):
cmd += f"--transform {preset['transforms'][k]}[ {', '.join([f'{x:g}' for x in preset['transform_parameters'][k]])} ] \
--metric {preset['metric'][k]}[ {fixed}, {moving}, {preset['metric_weight'][k]}, {preset['radius_or_number_of_bins'][k]}, {preset['sampling_strategy'][k]}, {preset['sampling_percentage'][k]} ] \
--convergence [ {'x'.join([str(int(x)) for x in preset['number_of_iterations'][k]])}, {preset['convergence_threshold'][k]}, {preset['convergence_window_size'][k]} ] \
--smoothing-sigmas {'x'.join([str(int(x)) for x in preset['smoothing_sigmas'][k]])}{preset['sigma_units'][k]} \
--shrink-factors {'x'.join([str(int(x)) for x in preset['shrink_factors'][k]])} \
--use-histogram-matching {int(preset['use_histogram_matching'][k])} \
--use-estimate-learning-rate-once {int(preset['use_estimate_learning_rate_once'][k])} \
--masks [ {base_mask_SyN if preset['transforms'][k].lower()=='syn' else base_mask}, {in_mask_SyN if preset['transforms'][k].lower()=='syn' else in_mask} ] "
pc.run1(cmd, _error_pattern='error')
# Apply transforms
apply_ants([outputs['fwd_warp'], outputs['fwd_affine']], base_file, in_file, outputs['out_file'])
shutil.rmtree(temp_dir)
all_finished(outputs)
outputs['base_file'] = base_file
outputs['in_file'] = in_file
outputs['transform'] = ANTsTransform.from_align_ants(outputs)
outputs['transform_file'] = f"{prefix}_transform.json"
outputs['transform'].to_json(outputs['transform_file'])
outputs['pc'] = pc
return outputs
def apply_ants(transforms, base_file, in_file, out_file, interp=None, dim=None, image_type=None, n_jobs=None):
'''
Parameters
----------
transforms : list of file names
Online matrix inversion is supported as `*_0GenericAffine.mat -I`.
base_file : str
If None, apply transforms to point list using `antsApplyTransformsToPoints`,
and `in_file` is expected to be a `*.csv` file.
Otherwise, apply transforms to image using `antsApplyTransforms`.
interp : str
'LanczosWindowedSinc', 'NearestNeighbor', 'Linear', 'BSpline[<order=3>]', etc.
Note that for volumes, last transform applies first (pulling from base grid),
as in AFNI, as well as in ANTs command line.
However for point lists, FIRST transform applies first (pushing input points),
and INVERSE transforms should be used compared with volume case, as in ANTs.
'''
if interp is None:
interp = 'LanczosWindowedSinc'
prefix, ext = afni.split_out_file(out_file)
outputs = {'out_file': f"{prefix}{ext}"}
xform_cmds = []
for transform in transforms:
if transform.endswith(' -I'):
xform_cmds.append(f"-t [ {transform[:-3]}, 1 ]")
else:
xform_cmds.append(f"-t {transform}")
if n_jobs is not None:
os.environ['ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS'] = str(n_jobs)
if base_file is None or path.splitext(in_file)[1] in ['.csv']:
# Apply transforms to point list (e.g., surface mesh)
data = np.loadtxt(in_file, skiprows=1, delimiter=',')
if dim is None:
dim = 4 if any(data[:,3]) else 3
utils.run(f"antsApplyTransformsToPoints --precision 0 \
-i {in_file} -d {dim} \
-o {outputs['out_file']} \
{' '.join(xform_cmds)}")
else: # Apply transforms to image (e.g., 3D volume)
if dim is None:
dim = 3
if image_type is None: # 0/1/2/3 for scalar/vector/tensor/time-series
image_type = 3 if afni.get_dims(in_file)[3] > 1 else 0
# Make sure the image is timeseries, rather than afni bucket, during transform
is_bucket = (io.get_dim_order(in_file) == 'bucket')
if is_bucket:
io.change_dim_order(in_file, dim_order='timeseries', method='afni') # Change in_file into timeseries
# Sanitize base_file
sanitized_base, is_temp = _ants_sanitize_input(base_file, overwrite=False)
# Apply transforms
utils.run(f"antsApplyTransforms | |
* t_values[37]))
+ (R_values[38] / (1 + w * 1j * t_values[38]))
+ (R_values[39] / (1 + w * 1j * t_values[39]))
+ (R_values[40] / (1 + w * 1j * t_values[40]))
+ (R_values[41] / (1 + w * 1j * t_values[41]))
+ (R_values[42] / (1 + w * 1j * t_values[42]))
+ (R_values[43] / (1 + w * 1j * t_values[43]))
+ (R_values[44] / (1 + w * 1j * t_values[44]))
+ (R_values[45] / (1 + w * 1j * t_values[45]))
)
def KK_RC47(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
+ (R_values[9] / (1 + w * 1j * t_values[9]))
+ (R_values[10] / (1 + w * 1j * t_values[10]))
+ (R_values[11] / (1 + w * 1j * t_values[11]))
+ (R_values[12] / (1 + w * 1j * t_values[12]))
+ (R_values[13] / (1 + w * 1j * t_values[13]))
+ (R_values[14] / (1 + w * 1j * t_values[14]))
+ (R_values[15] / (1 + w * 1j * t_values[15]))
+ (R_values[16] / (1 + w * 1j * t_values[16]))
+ (R_values[17] / (1 + w * 1j * t_values[17]))
+ (R_values[18] / (1 + w * 1j * t_values[18]))
+ (R_values[19] / (1 + w * 1j * t_values[19]))
+ (R_values[20] / (1 + w * 1j * t_values[20]))
+ (R_values[21] / (1 + w * 1j * t_values[21]))
+ (R_values[22] / (1 + w * 1j * t_values[22]))
+ (R_values[23] / (1 + w * 1j * t_values[23]))
+ (R_values[24] / (1 + w * 1j * t_values[24]))
+ (R_values[25] / (1 + w * 1j * t_values[25]))
+ (R_values[26] / (1 + w * 1j * t_values[26]))
+ (R_values[27] / (1 + w * 1j * t_values[27]))
+ (R_values[28] / (1 + w * 1j * t_values[28]))
+ (R_values[29] / (1 + w * 1j * t_values[29]))
+ (R_values[30] / (1 + w * 1j * t_values[30]))
+ (R_values[31] / (1 + w * 1j * t_values[31]))
+ (R_values[32] / (1 + w * 1j * t_values[32]))
+ (R_values[33] / (1 + w * 1j * t_values[33]))
+ (R_values[34] / (1 + w * 1j * t_values[34]))
+ (R_values[35] / (1 + w * 1j * t_values[35]))
+ (R_values[36] / (1 + w * 1j * t_values[36]))
+ (R_values[37] / (1 + w * 1j * t_values[37]))
+ (R_values[38] / (1 + w * 1j * t_values[38]))
+ (R_values[39] / (1 + w * 1j * t_values[39]))
+ (R_values[40] / (1 + w * 1j * t_values[40]))
+ (R_values[41] / (1 + w * 1j * t_values[41]))
+ (R_values[42] / (1 + w * 1j * t_values[42]))
+ (R_values[43] / (1 + w * 1j * t_values[43]))
+ (R_values[44] / (1 + w * 1j * t_values[44]))
+ (R_values[45] / (1 + w * 1j * t_values[45]))
+ (R_values[46] / (1 + w * 1j * t_values[46]))
)
def KK_RC48(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
+ (R_values[9] / (1 + w * 1j * t_values[9]))
+ (R_values[10] / (1 + w * 1j * t_values[10]))
+ (R_values[11] / (1 + w * 1j * t_values[11]))
+ (R_values[12] / (1 + w * 1j * t_values[12]))
+ (R_values[13] / (1 + w * 1j * t_values[13]))
+ (R_values[14] / (1 + w * 1j * t_values[14]))
+ (R_values[15] / (1 + w * 1j * t_values[15]))
+ (R_values[16] / (1 + w * 1j * t_values[16]))
+ (R_values[17] / (1 + w * 1j * t_values[17]))
+ (R_values[18] / (1 + w * 1j * t_values[18]))
+ (R_values[19] / (1 + w * 1j * t_values[19]))
+ (R_values[20] / (1 + w * 1j * t_values[20]))
+ (R_values[21] / (1 + w * 1j * t_values[21]))
+ (R_values[22] / (1 + w * 1j * t_values[22]))
+ (R_values[23] / (1 + w * 1j * t_values[23]))
+ (R_values[24] / (1 + w * 1j * t_values[24]))
+ (R_values[25] / (1 + w * 1j * t_values[25]))
+ (R_values[26] / (1 + w * 1j * t_values[26]))
+ (R_values[27] / (1 + w * 1j * t_values[27]))
+ (R_values[28] / (1 + w * 1j * t_values[28]))
+ (R_values[29] / (1 + w * 1j * t_values[29]))
+ (R_values[30] / (1 + w * 1j * t_values[30]))
+ (R_values[31] / (1 + w * 1j * t_values[31]))
+ (R_values[32] / (1 + w * 1j * t_values[32]))
+ (R_values[33] / (1 + w * 1j * t_values[33]))
+ (R_values[34] / (1 + w * 1j * t_values[34]))
+ (R_values[35] / (1 + w * 1j * t_values[35]))
+ (R_values[36] / (1 + w * 1j * t_values[36]))
+ (R_values[37] / (1 + w * 1j * t_values[37]))
+ (R_values[38] / (1 + w * 1j * t_values[38]))
+ (R_values[39] / (1 + w * 1j * t_values[39]))
+ (R_values[40] / (1 + w * 1j * t_values[40]))
+ (R_values[41] / (1 + w * 1j * t_values[41]))
+ (R_values[42] / (1 + w * 1j * t_values[42]))
+ (R_values[43] / (1 + w * 1j * t_values[43]))
+ (R_values[44] / (1 + w * 1j * t_values[44]))
+ (R_values[45] / (1 + w * 1j * t_values[45]))
+ (R_values[46] / (1 + w * 1j * t_values[46]))
+ (R_values[47] / (1 + w * 1j * t_values[47]))
)
def KK_RC49(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
+ (R_values[9] / (1 + w * 1j * t_values[9]))
+ (R_values[10] / (1 + w * 1j * t_values[10]))
+ (R_values[11] / (1 + w * 1j * t_values[11]))
+ (R_values[12] / (1 + w * 1j * t_values[12]))
+ (R_values[13] / (1 + w * 1j * t_values[13]))
+ (R_values[14] / (1 + w * 1j * t_values[14]))
+ (R_values[15] / (1 + w * 1j * t_values[15]))
+ (R_values[16] / (1 + w * 1j * t_values[16]))
+ (R_values[17] / (1 + w * 1j * t_values[17]))
+ (R_values[18] / (1 + w * 1j * t_values[18]))
+ (R_values[19] / (1 + w * | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of web2py Web Framework (Copyrighted, 2007-2009).
Developed by <NAME> <<EMAIL>>.
License: GPL v2
Holds:
- SQLFORM: provide a form for a table (with/without record)
- SQLTABLE: provides a table for a set of records
- form_factory: provides a SQLFORM for an non-db backed table
"""
from http import HTTP
from html import *
from validators import *
from sql import SQLDB, Table, KeyedTable, Row
from storage import Storage
import logging
import urllib
import re
import sys
import os
import cStringIO
import copy
import base64
table_field = re.compile('[\w_]+\.[\w_]+')
class FormWidget(object):
"""
helper for SQLFORM to generate form input fields (widget),
related to the fieldtype
"""
@staticmethod
def _attributes(field, widget_attributes, **attributes):
"""
helper to build a common set of attributes
:param field: the field involved, some attributes are derived from this
:param widget_attributes: widget related attributes
:param attributes: any other supplied attributes
"""
attr = dict(
_id = '%s_%s' % (field._tablename, field.name),
_class = field.type,
_name = field.name,
requires = field.requires,
)
attr.update(widget_attributes)
attr.update(attributes)
return attr
@staticmethod
def widget(field, value, **attributes):
"""
generates the widget for the field.
When serialized, will provide an INPUT tag:
- id = tablename_fieldname
- class = field.type
- name = fieldname
:param field: the field needing the widget
:param value: value
:param attributes: any other attributes to be applied
"""
raise NotImplementedError
class StringWidget(FormWidget):
@staticmethod
def widget(field, value, **attributes):
"""
generates an INPUT text tag.
see also: :meth:`FormWidget.widget`
"""
default = dict(
_type = 'text',
value = (value!=None and str(value)) or '',
)
attr = StringWidget._attributes(field, default, **attributes)
return INPUT(**attr)
class IntegerWidget(StringWidget):
pass
class DoubleWidget(StringWidget):
pass
class TimeWidget(StringWidget):
pass
class DateWidget(StringWidget):
pass
class DatetimeWidget(StringWidget):
pass
class TextWidget(FormWidget):
@staticmethod
def widget(field, value, **attributes):
"""
generates a TEXTAREA tag.
see also: :meth:`FormWidget.widget`
"""
default = dict(
value = value,
)
attr = TextWidget._attributes(field, default, **attributes)
return TEXTAREA(**attr)
class BooleanWidget(FormWidget):
@staticmethod
def widget(field, value, **attributes):
"""
generates an INPUT checkbox tag.
see also: :meth:`FormWidget.widget`
"""
default=dict(
_type='checkbox',
value=value,
)
attr = BooleanWidget._attributes(field, default, **attributes)
return INPUT(**attr)
class OptionsWidget(FormWidget):
@staticmethod
def has_options(field):
"""
checks if the field has selectable options
:param field: the field needing checking
:returns: True if the field has options
"""
return hasattr(field.requires, 'options')\
or isinstance(field.requires, IS_EMPTY_OR)\
and hasattr(field.requires.other, 'options')
@staticmethod
def widget(field, value, **attributes):
"""
generates a SELECT tag, including OPTIONs (only 1 option allowed)
see also: :meth:`FormWidget.widget`
"""
default = dict(
value=value,
)
attr = OptionsWidget._attributes(field, default, **attributes)
requires = field.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
if requires:
requires0 = requires[0]
if isinstance(requires0, IS_EMPTY_OR)\
and hasattr(requires0.other, 'options'):
opts = [OPTION(_value='')]
options = requires0.other.options()
elif hasattr(requires0, 'options'):
opts = []
options = requires0.options()
else:
raise SyntaxError, 'widget cannot determine options of %s' \
% field
opts += [OPTION(v, _value=k) for (k, v) in options]
return SELECT(*opts, **attr)
class MultipleOptionsWidget(OptionsWidget):
@staticmethod
def widget(field, value, size=5, **attributes):
"""
generates a SELECT tag, including OPTIONs (multiple options allowed)
see also: :meth:`FormWidget.widget`
:param size: optional param (default=5) to indicate how many rows must
be shown
"""
attributes.update(dict(_size=size, _multiple=True))
return OptionsWidget.widget(field, value, **attributes)
class RadioWidget(OptionsWidget):
@staticmethod
def widget(field, value, **attributes):
"""
generates a TABLE tag, including INPUT radios (only 1 option allowed)
see also: :meth:`FormWidget.widget`
"""
attr = OptionsWidget._attributes(field, {}, **attributes)
if isinstance(field.requires, IS_EMPTY_OR)\
and hasattr(field.requires.other, 'options'):
opts = [TR(INPUT(_type='radio', _name=field.name,
_value='', value=value), '')]
options = field.requires.other.options()
elif hasattr(field.requires, 'options'):
opts = []
options = field.requires.options()
else:
raise SyntaxError, 'widget cannot determine options of %s' % field
opts += [TR(INPUT(_type='radio', _name=field.name,
requires=attr.get('requires',None),
_value=k, value=value), v) for (k, v) in options]
return TABLE(*opts, **attr)
class CheckboxesWidget(OptionsWidget):
@staticmethod
def widget(field, value, **attributes):
"""
generates a TABLE tag, including INPUT checkboxes (multiple allowed)
see also: :meth:`FormWidget.widget`
"""
values = re.compile('[\w\-:]+').findall(str(value))
attr = OptionsWidget._attributes(field, {}, **attributes)
if hasattr(field.requires, 'options'):
opts = []
options = field.requires.options()
else:
raise SyntaxError, 'widget cannot determine options of %s' % field
opts += [TR(INPUT(_type='checkbox', _name=field.name,
requires=attr.get('requires',None),
_value=k, value=(k in values)), v) \
for (k, v) in options]
return TABLE(*opts, **attr)
class PasswordWidget(FormWidget):
DEFAULT_PASSWORD_DISPLAY = 8*('*')
@staticmethod
def widget(field, value, **attributes):
"""
generates a INPUT password tag.
If a value is present it will be shown as a number of '*', not related
to the length of the actual value.
see also: :meth:`FormWidget.widget`
"""
default=dict(
_type='password',
_value=(value and PasswordWidget.DEFAULT_PASSWORD_DISPLAY) or '',
)
attr = PasswordWidget._attributes(field, default, **attributes)
return INPUT(**attr)
class UploadWidget(FormWidget):
DEFAULT_WIDTH = '150px'
ID_DELETE_SUFFIX = '__delete'
GENERIC_DESCRIPTION = 'file'
@staticmethod
def widget(field, value, download_url=None, **attributes):
"""
generates a INPUT file tag.
Optionally provides an A link to the file, including a checkbox so
the file can be deleted.
All is wrapped in a DIV.
see also: :meth:`FormWidget.widget`
:param download_url: Optional URL to link to the file (default = None)
"""
default=dict(
_type='file',
)
attr = UploadWidget._attributes(field, default, **attributes)
inp = INPUT(**attr)
if download_url and value:
url = download_url + '/' + value
(br, image) = ('', '')
if UploadWidget.is_image(value):
(br, image) = \
(BR(), IMG(_src = url, _width = UploadWidget.DEFAULT_WIDTH))
inp = DIV(inp, '[',
A(UploadWidget.GENERIC_DESCRIPTION, _href = url), '|',
INPUT(_type='checkbox',
_name=field.name + UploadWidget.ID_DELETE_SUFFIX),
'delete]', br, image)
return inp
@staticmethod
def represent(field, value, download_url=None):
"""
how to represent the file:
- with download url and if it is an image: <A href=...><IMG ...></A>
- otherwise with download url: <A href=...>file</A>
- otherwise: file
:param field: the field
:param value: the field value
:param download_url: url for the file download (default = None)
"""
inp = UploadWidget.GENERIC_DESCRIPTION
if download_url and value:
url = download_url + '/' + value
if UploadWidget.is_image(value):
inp = IMG(_src = url, _width = UploadWidget.DEFAULT_WIDTH)
inp = A(inp, _href = url)
return inp
@staticmethod
def is_image(value):
"""
Tries to check if the filename provided references to an image
Checking is based on filename extension. Currently recognized:
gif, png, jp(e)g, bmp
:param value: filename
"""
extension = value.split('.')[-1].lower()
if extension in ['gif', 'png', 'jpg', 'jpeg', 'bmp']:
return True
return False
class SQLFORM(FORM):
"""
SQLFORM is used to map a table (and a current record) into an HTML form
given a SQLTable stored in db.table
generates an insert form::
SQLFORM(db.table)
generates an update form::
record=db(db.table.id==some_id).select()[0]
SQLFORM(db.table, record)
generates an update with a delete button::
SQLFORM(db.table, record, deletable=True)
if record is an int::
record=db(db.table.id==record).select()[0]
optional arguments:
:param fields: a list of fields that should be placed in the form,
default is all.
:param labels: a dictionary with labels for each field, keys are the field
names.
:param col3: a dictionary with content for an optional third column
(right of each field). keys are field names.
:param linkto: the URL of a controller/function to access referencedby
records
see controller appadmin.py for examples
:param upload: the URL of a controller/function to download an uploaded file
see controller appadmin.py for examples
any named optional attribute is passed to the <form> tag
for example _class, _id, _style, _action, _method, etc.
"""
# usability improvements proposal by fpp - 4 May 2008 :
# - correct labels (for points to field id, not field name)
# - add label for delete checkbox
# - add translatable label for record ID
# - add third column to right of fields, populated from the col3 dict
widgets = Storage(dict(
string = StringWidget,
text = TextWidget,
password = <PASSWORD>,
integer = IntegerWidget,
double = DoubleWidget,
time = TimeWidget,
date = DateWidget,
datetime = DatetimeWidget,
upload = UploadWidget,
boolean = BooleanWidget,
blob = None,
options = OptionsWidget,
multiple = MultipleOptionsWidget,
radio = RadioWidget,
checkboxes = CheckboxesWidget,
))
FIELDNAME_REQUEST_DELETE = 'delete_this_record'
FIELDKEY_DELETE_RECORD = 'delete_record'
def __init__(
self,
table,
record = None,
deletable = False,
linkto = None,
upload = None,
fields = None,
labels = None,
col3 = {},
submit_button = 'Submit',
delete_label = 'Check to delete:',
showid = True,
readonly = False,
comments = True,
keepopts = [],
ignore_rw = False,
**attributes
):
"""
SQLFORM(db.table,
record=None,
fields=['name'],
labels={'name': '<NAME>'},
linkto=URL(r=request, f='table/db/')
"""
ID_LABEL_SUFFIX = 'label'
ID_ROW_SUFFIX = 'row'
self.ignore_rw = ignore_rw
nbsp = XML(' ') # Firefox2 does not display fields with blanks
FORM.__init__(self, *[], **attributes)
ofields = fields
keyed = isinstance(table,KeyedTable)
# if no fields | |
% (
dbid, current["entitytype"], network_type, throughput)))
update_virtual_resoures_tree_network(db, current["parententityid"], network_type, slice_id, class_id,
reset_flag, LOG=LOG)
except:
cloud_utils.log_exception(sys.exc_info(), LOG=LOG)
process_network_services = [
"nat_network_service",
"rts_network_service",
"fws_network_service",
"lbs_network_service",
"ipsecvpn_network_service",
"nms_network_service",
]
def update_network_resources(db, slice_id, vdc, LOG=LOG):
try:
class_resources = setup_resource_record()
for service in get_next_service(db, vdc["id"]):
if service["entitytype"] not in process_network_services:
continue
# if service["servicetype"] not in throughputs:
# throughputs[service["servicetype"]] = 0
if service["entitystatus"].lower() != "active":
continue
uris_row = db.get_row("tblUris", "tblEntities = %s AND deleted=0 " % service["id"])
if not uris_row or "rest_response" not in uris_row:
continue
try:
rest_response = ujson.loads(uris_row["rest_response"])
except:
continue
throughput = 0
if "params" in rest_response and "throughput" in rest_response["params"]:
throughput = rest_response["params"]["throughput"]
if "service_status" in rest_response and "current_instances_count" in rest_response["service_status"]:
throughput = throughput * rest_response["service_status"]["current_instances_count"]
add_network_resources(class_resources, service, throughput=throughput,
maximum_throughput=(service["throughput"] * service["maxinstancescount"]))
for class_id in class_resources["network_resources"]:
resources = class_resources["network_resources"][class_id]
for service_type in resources:
throughput = resources[service_type]["throughput"]
if throughput == 0:
continue
current = cloud_utils.lower_key(
db.get_row_dict("tblResourcesNetwork",
{"tblEntities": vdc["id"], "catagory": "deployed", "type": service_type,
"networkclassesid": class_id, "sliceid": slice_id},
order="ORDER BY id LIMIT 1"))
if not current or current["throughput"] != throughput:
cloud_utils.update_or_insert(db, "tblResourcesNetwork", {"tblentities": vdc["id"],
"throughput": throughput,
"catagory": "deployed",
"type": service_type,
"networkclassesid": class_id,
"sliceid": slice_id},
{"tblentities": vdc["id"], "type": service_type,
"catagory": "deployed", "networkclassesid": class_id, })
if not current:
current = cloud_utils.lower_key(db.get_row_dict("tblResourcesNetwork",
{"tblEntities": vdc["id"],
"catagory": "deployed", "type": service_type,
"networkclassesid": class_id,
"sliceid": slice_id},
order="ORDER BY id LIMIT 1"))
LOG.info(_("Network resources updated for dbid:%s sliceid: %s EntityType:%s "
"network type %s: throughput:%s" % (
vdc["id"], slice_id, current["entitytype"], service_type, throughput)))
update_virtual_resoures_tree_network(db, current["parententityid"], service_type, slice_id,
class_id, LOG=LOG)
sums = db.execute_db("SELECT sum(throughput) FROM tblResourcesNetwork "
" WHERE entitytype='vdc' and type = '%s' and catagory = 'deployed' and sliceid =%s "
"and networkclassesid = %s" % (service_type, slice_id, class_id))
if not sums or not isinstance(sums, tuple) or "sum(throughput)" not in sums[0] or sums[0][
"sum(throughput)"] is None:
return
throughput = int(sums[0]["sum(throughput)"])
cloud_utils.update_or_insert(db, "tblResourcesNetwork",
{"tblentities": slice_id, "throughput": throughput,
"type": service_type, "catagory": "deployed",
"networkclassesid": class_id},
{"tblentities": slice_id, "type": service_type, "catagory": "deployed",
"networkclassesid": class_id})
except:
cloud_utils.log_exception(sys.exc_info(), LOG=LOG)
def old_reset_network_resources(db, vdc, LOG=LOG):
try:
class_resources = setup_resource_record()
for service in get_next_service(db, vdc["id"]):
if service["entitytype"] not in process_network_services:
continue
add_network_resources(class_resources, service, throughput=0, maximum_throughput=0, override=True)
for class_id in class_resources["network_resources"]:
resources = class_resources["network_resources"][class_id]
for service_type in resources:
current_id = 0
throughput = resources[service_type]["throughput"]
while True:
current = cloud_utils.lower_key(
db.get_row("tblResourcesNetwork",
"id > %s AND tblEntities = %s AND catagory = 'deployed' AND "
"type = '%s' and networkclassesid = %s " % (
current_id, vdc["id"], service_type, class_id),
order="ORDER BY id LIMIT 1"))
if not current:
break
current_id = current["id"]
if current["throughput"] == throughput:
continue
slice_id = current["sliceid"]
cloud_utils.update_or_insert(db, "tblResourcesNetwork",
{"tblentities": vdc["id"], "networkclassesid": class_id,
"throughput": throughput, "catagory": "deployed",
"type": service_type, "sliceid": 0},
{"tblentities": vdc["id"], "type": service_type,
"catagory": "deployed", "networkclassesid": class_id})
if current["parententityid"] != 0:
update_virtual_resoures_tree_network(db, current["parententityid"], service_type, slice_id,
class_id, reset_flag=True)
update_network_slice_resources(db, service_type, slice_id, class_id, LOG=LOG)
except:
cloud_utils.log_exception(sys.exc_info(), LOG=LOG)
def reset_network_resources(db, vdc, LOG=LOG):
try:
current_id = 0
while True:
current = cloud_utils.lower_key(db.get_row("tblResourcesNetwork",
"id > %s AND tblEntities = %s AND catagory = 'deployed' " % (
current_id, vdc["id"]),
order="ORDER BY id LIMIT 1"))
if not current:
break
current_id = current["id"]
db.delete_row_id("tblResourcesNetwork", current["id"])
if current["parententityid"] != 0:
update_virtual_resoures_tree_network(db, current["parententityid"], current["type"], current["sliceid"],
current["networkclassesid"], reset_flag=True)
update_network_slice_resources(db, current["type"], current["sliceid"], current["networkclassesid"],
LOG=LOG)
except:
cloud_utils.log_exception(sys.exc_info(), LOG=LOG)
def update_network_slice_resources(db, service_type, slice_id, class_id, LOG=LOG):
try:
if slice_id == 0:
return
sums = db.execute_db("SELECT sum(throughput) FROM tblResourcesNetwork "
" WHERE entitytype='vdc' and type = '%s' and "
"catagory = 'deployed' and sliceid =%s and networkclassesid = %s" % (
service_type, slice_id, class_id))
if not sums or not isinstance(sums, tuple) or "sum(throughput)" not in sums[0] or sums[0][
"sum(throughput)"] is None:
return
sthroughput = int(sums[0]["sum(throughput)"])
cloud_utils.update_or_insert(db, "tblResourcesNetwork", {"tblentities": slice_id, "throughput": sthroughput,
"type": service_type, "catagory": "deployed",
"networkclassesid": class_id},
{"tblentities": slice_id, "type": service_type, "catagory": "deployed",
"networkclassesid": class_id})
except:
cloud_utils.log_exception(sys.exc_info(), LOG=LOG)
def update_compute_slice_resources(db, slice_id, class_id, LOG=LOG):
try:
# Update slice resources
if slice_id == 0:
return
sums = db.execute_db("SELECT sum(cpu), sum(ram), sum(network) FROM tblResourcesCompute "
" WHERE entitytype='vdc' and catagory = 'deployed' and sliceid =%s and computeclassesid = %s " % (
slice_id, class_id))
if not sums or not isinstance(sums, tuple) or "sum(cpu)" not in sums[0] or \
"sum(ram)" not in sums[0] or "sum(network)" not in sums[0] or sums[0]["sum(cpu)"] is None or \
sums[0]["sum(ram)"] is None or sums[0]["sum(network)"] is None:
return
scpu = int(sums[0]["sum(cpu)"])
sram = int(sums[0]["sum(ram)"])
snetwork = int(sums[0]["sum(network)"])
cloud_utils.update_or_insert(db, "tblResourcesCompute", {"tblentities": slice_id, "computeclassesid": class_id,
"cpu": scpu, "ram": sram, "network": snetwork,
"catagory": "deployed", },
{"tblentities": slice_id, "catagory": "deployed", "computeclassesid": class_id})
except:
cloud_utils.log_exception(sys.exc_info(), LOG=LOG)
def update_storage_slice_resources(db, slice_id, class_id, LOG=LOG):
try:
if slice_id == 0:
return
sums = db.execute_db("SELECT sum(capacity), sum(iops), sum(network) FROM tblResourcesStorage "
" WHERE entitytype='vdc' and storageclassesid= %s and catagory = 'deployed' and sliceid ='%s' " % (
class_id, slice_id))
if not sums or not isinstance(sums, tuple) or "sum(capacity)" not in sums[0] \
or "sum(iops)" not in sums[0] or "sum(network)" not in sums[0] \
or sums[0]["sum(capacity)"] is None or sums[0]["sum(iops)"] is None or sums[0]["sum(network)"] is None:
return
capacity = int(sums[0]["sum(capacity)"])
iops = int(sums[0]["sum(iops)"])
network = int(sums[0]["sum(network)"])
cloud_utils.update_or_insert(db, "tblResourcesStorage",
{"tblentities": slice_id, "capacity": capacity, "iops": iops,
"storageclassesid": class_id, "network": network, "catagory": "deployed"},
{"tblentities": slice_id, "storageclassesid": class_id, "catagory": "deployed"})
except:
cloud_utils.log_exception(sys.exc_info(), LOG=LOG)
def get_slice(db, entity):
row = cloud_utils.lower_key(
db.get_row_dict("tblUris", {"tblEntities": entity["id"]}, order="ORDER BY id LIMIT 1"))
if not row:
return
return row["tblslices"]
def update_vdc_resources(db, LOG=LOG):
for entity in cloud_utils.get_entity(db, "vdc"):
if entity["entitymode"].lower() == "active":
slice_id = get_slice(db, entity)
if not slice_id:
continue
update_compute_resources(db, slice_id, entity, LOG=LOG)
update_storage_resources(db, slice_id, entity, LOG=LOG)
update_network_resources(db, slice_id, entity, LOG=LOG)
else:
count = db.get_rowcount("tblEntities", "EntityType = 'container' AND deleted=0 AND "
"EntityStatus = 'Active' AND parententityid = '%s'" % entity["id"])
if count > 0:
slice_id = get_slice(db, entity)
if not slice_id:
continue
update_storage_resources(db, slice_id, entity, LOG=LOG)
def update_destination_port_name(db, dbid, name):
db.execute_db("UPDATE tblEntities JOIN tblServicePorts SET tblEntities.name='%s' "
" WHERE ( tblServicePorts.tblEntities = tblEntities.id AND "
" tblServicePorts.DestinationServiceEntityId = '%s') " % (name, dbid))
def log_entity_message(db, dbid, msg, entity=None, created_at=None, source=None, type="Info"):
if not entity:
entity = read_partial_entity(db, dbid)
if not entity:
return
if entity["entitytype"] in entity_constants.entity_grandchildren:
parent = read_partial_entity(db, entity["parententityid"])
log_entity = read_partial_entity(db, parent["parententityid"])
name = "%s.%s" % (entity["name"], parent["name"])
elif entity["entitytype"] in entity_constants.entity_children:
log_entity = read_partial_entity(db, entity["parententityid"])
name = entity["name"]
else:
return
cloud_utils.log_message(db, log_entity["id"], "%s: %s" % (name, msg), type=type)
def validate_network_resources(db, vdc_dbid, vdc_row, resources, class_id, reserve=False):
try:
if not resources:
return "success"
dept_check = True
class_name = id2name(db, class_id)
for svc in resources:
if resources[svc]["throughput"] == 0: # if no resources aare requested for this service.
continue
allocated = db.get_row("tblResourcesNetwork",
"tblEntities = %s AND catagory = 'total' AND type = '%s' AND networkclassesid = %s " % (
vdc_row["parententityid"], svc, class_id),
order="ORDER BY id LIMIT 1") or {"Throughput": 0}
deploy_id = vdc_row["parententityid"]
if allocated["Throughput"] == 0: ##### and int(class_id) == 0:
dept_check = False
cloud_utils.log_message(db, vdc_dbid, "%s - Skipping department resource check for %s - none assigned"
% (vdc_row["name"], entity_constants.resource_network_services_2_names[svc]),
type="Info")
dept_row = db.get_row_dict("tblEntities", {"id": vdc_row["parententityid"]},
order="ORDER BY id LIMIT 1")
allocated = db.get_row("tblResourcesNetwork",
"tblEntities = %s AND catagory = 'total' AND type = '%s' AND networkclassesid = %s " % (
dept_row["ParentEntityId"], svc, class_id),
order="ORDER BY id LIMIT 1") or {"Throughput": 0}
deploy_id = dept_row["ParentEntityId"]
deployed = db.get_row("tblResourcesNetwork",
"tblEntities = %s AND catagory = 'deployed' AND type = '%s' AND networkclassesid = %s " % (
deploy_id, svc, class_id),
order="ORDER BY id LIMIT 1") or {"Throughput": 0}
available = allocated["Throughput"] - deployed["Throughput"]
### if (allocated["Throughput"] == 0 and int(class_id) == 0) or available >= resources[svc]["throughput"]:
### if allocated["Throughput"] == 0 and int(class_id) == 0:
if (allocated["Throughput"] == 0) or available >= resources[svc]["throughput"]:
if allocated["Throughput"] == 0:
cloud_utils.log_message(db, vdc_dbid,
"%s - Skipping organization resource check for %s - none assigned"
% (
vdc_row["name"],
entity_constants.resource_network_services_2_names[svc]),
type="Info")
if reserve:
deployed = db.get_row("tblResourcesNetwork",
"tblEntities = %s AND catagory = 'deployed' AND type = '%s' AND networkclassesid = %s " % (
vdc_row["id"], svc, class_id),
order="ORDER BY id LIMIT 1") or {"Throughput": 0}
cloud_utils.log_message(db, vdc_dbid,
"%s - Resources (Mbps) consumed for %s. Class: %s Allocated: %s Deployed: %s Available: %s Provisioning: %s "
% (vdc_row["name"],
entity_constants.resource_network_services_2_names[svc], class_name,
allocated["Throughput"], deployed["Throughput"],
available, resources[svc]["throughput"]), type="Info")
cloud_utils.update_or_insert(db, "tblResourcesNetwork", {"tblentities": vdc_row["id"],
"throughput": (
resources[svc]["throughput"] +
deployed[
"Throughput"]),
| |
#!/router/bin/python
from collections import namedtuple, OrderedDict, deque
from client_utils import text_tables
from common.text_opts import format_text, format_threshold, format_num
from client.trex_async_client import CTRexAsyncStats
import copy
import datetime
import time
import re
import math
import copy
GLOBAL_STATS = 'g'
PORT_STATS = 'p'
PORT_STATUS = 'ps'
ALL_STATS_OPTS = {GLOBAL_STATS, PORT_STATS, PORT_STATUS}
COMPACT = {GLOBAL_STATS, PORT_STATS}
ExportableStats = namedtuple('ExportableStats', ['raw_data', 'text_table'])
# use to calculate diffs relative to the previous values
# for example, BW
def calculate_diff (samples):
total = 0.0
weight_step = 1.0 / sum(xrange(0, len(samples)))
weight = weight_step
for i in xrange(0, len(samples) - 1):
current = samples[i] if samples[i] > 0 else 1
next = samples[i + 1] if samples[i + 1] > 0 else 1
s = 100 * ((float(next) / current) - 1.0)
# block change by 100%
total += (min(s, 100) * weight)
weight += weight_step
return total
# calculate by absolute values and not relatives (useful for CPU usage in % and etc.)
def calculate_diff_raw (samples):
total = 0.0
weight_step = 1.0 / sum(xrange(0, len(samples)))
weight = weight_step
for i in xrange(0, len(samples) - 1):
current = samples[i]
next = samples[i + 1]
total += ( (next - current) * weight )
weight += weight_step
return total
class CTRexInfoGenerator(object):
"""
This object is responsible of generating stats and information from objects maintained at
CTRexStatelessClient and the ports.
"""
def __init__(self, global_stats_ref, ports_dict_ref):
self._global_stats = global_stats_ref
self._ports_dict = ports_dict_ref
def generate_single_statistic(self, port_id_list, statistic_type):
if statistic_type == GLOBAL_STATS:
return self._generate_global_stats()
elif statistic_type == PORT_STATS:
return self._generate_port_stats(port_id_list)
pass
elif statistic_type == PORT_STATUS:
return self._generate_port_status(port_id_list)
else:
# ignore by returning empty object
return {}
def generate_streams_info(self, port_id_list, stream_id_list):
relevant_ports = self.__get_relevant_ports(port_id_list)
return_data = OrderedDict()
for port_obj in relevant_ports:
streams_data = self._generate_single_port_streams_info(port_obj, stream_id_list)
if not streams_data:
continue
hdr_key = "Port {port}: {yaml_file}".format(port= port_obj.port_id,
yaml_file= streams_data.raw_data.get('referring_file', ''))
# TODO: test for other ports with same stream structure, and join them
return_data[hdr_key] = streams_data
return return_data
def _generate_global_stats(self):
stats_data = self._global_stats.generate_stats()
# build table representation
stats_table = text_tables.TRexTextInfo()
stats_table.set_cols_align(["l", "l"])
stats_table.add_rows([[k.replace("_", " ").title(), v]
for k, v in stats_data.iteritems()],
header=False)
return {"global_statistics": ExportableStats(stats_data, stats_table)}
def _generate_port_stats(self, port_id_list):
relevant_ports = self.__get_relevant_ports(port_id_list)
return_stats_data = {}
per_field_stats = OrderedDict([("owner", []),
("state", []),
("--", []),
("opackets", []),
("ipackets", []),
("obytes", []),
("ibytes", []),
("oerrors", []),
("ierrors", []),
("tx-bytes", []),
("rx-bytes", []),
("tx-pkts", []),
("rx-pkts", []),
("---", []),
("Tx bps", []),
("Rx bps", []),
("----", []),
("Tx pps", []),
("Rx pps", [])
]
)
total_stats = CPortStats(None)
for port_obj in relevant_ports:
# fetch port data
port_stats = port_obj.generate_port_stats()
total_stats += port_obj.port_stats
# populate to data structures
return_stats_data[port_obj.port_id] = port_stats
self.__update_per_field_dict(port_stats, per_field_stats)
total_cols = len(relevant_ports)
header = ["port"] + [port.port_id for port in relevant_ports]
if (total_cols > 1):
self.__update_per_field_dict(total_stats.generate_stats(), per_field_stats)
header += ['total']
total_cols += 1
stats_table = text_tables.TRexTextTable()
stats_table.set_cols_align(["l"] + ["r"] * total_cols)
stats_table.set_cols_width([10] + [17] * total_cols)
stats_table.set_cols_dtype(['t'] + ['t'] * total_cols)
stats_table.add_rows([[k] + v
for k, v in per_field_stats.iteritems()],
header=False)
stats_table.header(header)
return {"port_statistics": ExportableStats(return_stats_data, stats_table)}
def _generate_port_status(self, port_id_list):
relevant_ports = self.__get_relevant_ports(port_id_list)
return_stats_data = {}
per_field_status = OrderedDict([("type", []),
("maximum", []),
("status", [])
]
)
for port_obj in relevant_ports:
# fetch port data
# port_stats = self._async_stats.get_port_stats(port_obj.port_id)
port_status = port_obj.generate_port_status()
# populate to data structures
return_stats_data[port_obj.port_id] = port_status
self.__update_per_field_dict(port_status, per_field_status)
stats_table = text_tables.TRexTextTable()
stats_table.set_cols_align(["l"] + ["c"]*len(relevant_ports))
stats_table.set_cols_width([10] + [20] * len(relevant_ports))
stats_table.add_rows([[k] + v
for k, v in per_field_status.iteritems()],
header=False)
stats_table.header(["port"] + [port.port_id
for port in relevant_ports])
return {"port_status": ExportableStats(return_stats_data, stats_table)}
def _generate_single_port_streams_info(self, port_obj, stream_id_list):
return_streams_data = port_obj.generate_loaded_streams_sum(stream_id_list)
if not return_streams_data.get("streams"):
# we got no streams available
return None
# FORMAT VALUES ON DEMAND
# because we mutate this - deep copy before
return_streams_data = copy.deepcopy(return_streams_data)
for stream_id, stream_id_sum in return_streams_data['streams'].iteritems():
stream_id_sum['rate_pps'] = format_num(stream_id_sum['rate_pps'], suffix='pps')
stream_id_sum['packet_type'] = self._trim_packet_headers(stream_id_sum['packet_type'], 20)
info_table = text_tables.TRexTextTable()
info_table.set_cols_align(["c"] + ["l"] + ["r"] + ["c"] + ["r"] + ["c"])
info_table.set_cols_width([4] + [20] + [8] + [16] + [10] + [12])
info_table.add_rows([v.values()
for k, v in return_streams_data['streams'].iteritems()],
header=False)
info_table.header(["ID", "packet type", "length", "mode", "rate", "next stream"])
return ExportableStats(return_streams_data, info_table)
def __get_relevant_ports(self, port_id_list):
# fetch owned ports
ports = [port_obj
for _, port_obj in self._ports_dict.iteritems()
if port_obj.port_id in port_id_list]
# display only the first FOUR options, by design
if len(ports) > 4:
print format_text("[WARNING]: ", 'magenta', 'bold'), format_text("displaying up to 4 ports", 'magenta')
ports = ports[:4]
return ports
def __update_per_field_dict(self, dict_src_data, dict_dest_ref):
for key, val in dict_src_data.iteritems():
if key in dict_dest_ref:
dict_dest_ref[key].append(val)
@staticmethod
def _trim_packet_headers(headers_str, trim_limit):
if len(headers_str) < trim_limit:
# do nothing
return headers_str
else:
return (headers_str[:trim_limit-3] + "...")
class CTRexStats(object):
""" This is an abstract class to represent a stats object """
def __init__(self):
self.reference_stats = None
self.latest_stats = {}
self.last_update_ts = time.time()
self.history = deque(maxlen = 10)
def __getitem__(self, item):
# override this to allow quick and clean access to fields
if not item in self.latest_stats:
return "N/A"
# item must exist
m = re.search('_(([a-z])ps)$', item)
if m:
# this is a non-relative item
unit = m.group(2)
if unit == "b":
return self.get(item, format=True, suffix="b/sec")
elif unit == "p":
return self.get(item, format=True, suffix="pkt/sec")
else:
return self.get(item, format=True, suffix=m.group(1))
m = re.search('^[i|o](a-z+)$', item)
if m:
# this is a non-relative item
type = m.group(1)
if type == "bytes":
return self.get_rel(item, format=True, suffix="B")
elif type == "packets":
return self.get_rel(item, format=True, suffix="pkts")
else:
# do not format with suffix
return self.get_rel(item, format=True)
# can't match to any known pattern, return N/A
return "N/A"
def generate_stats(self):
# must be implemented by designated classes (such as port/ global stats)
raise NotImplementedError()
def update(self, snapshot):
# update
self.latest_stats = snapshot
self.history.append(snapshot)
diff_time = time.time() - self.last_update_ts
# 3 seconds is too much - this is the new reference
if (self.reference_stats == None) or (diff_time > 3):
self.reference_stats = self.latest_stats
self.last_update_ts = time.time()
def clear_stats(self):
self.reference_stats = self.latest_stats
def invalidate (self):
self.latest_stats = {}
def get(self, field, format=False, suffix=""):
if not field in self.latest_stats:
return "N/A"
if not format:
return self.latest_stats[field]
else:
return format_num(self.latest_stats[field], suffix)
def get_rel(self, field, format=False, suffix=""):
if not field in self.latest_stats:
return "N/A"
if not format:
return (self.latest_stats[field] - self.reference_stats[field])
else:
return format_num(self.latest_stats[field] - self.reference_stats[field], suffix)
# get trend for a field
def get_trend (self, field, use_raw = False, percision = 10.0):
if not field in self.latest_stats:
return 0
# not enough history - no trend
if len(self.history) < 5:
return 0
# absolute value is too low 0 considered noise
if self.latest_stats[field] < percision:
return 0
field_samples = [sample[field] for sample in self.history]
if use_raw:
return calculate_diff_raw(field_samples)
else:
return calculate_diff(field_samples)
def get_trend_gui (self, field, show_value = False, use_raw = False, up_color = 'red', down_color = 'green'):
v = self.get_trend(field, use_raw)
value = abs(v)
arrow = u'\u25b2' if v > 0 else u'\u25bc'
color = up_color if v > 0 else down_color
# change in 1% is not meaningful
if value < 1:
return ""
elif value > 5:
if show_value:
return format_text(u"{0}{0}{0} {1:.2f}%".format(arrow,v), color)
else:
return format_text(u"{0}{0}{0}".format(arrow), color)
elif value > 2:
if show_value:
return format_text(u"{0}{0} {1:.2f}%".format(arrow,v), color)
else:
return format_text(u"{0}{0}".format(arrow), color)
else:
if show_value:
return format_text(u"{0} {1:.2f}%".format(arrow,v), color)
else:
return format_text(u"{0}".format(arrow), color)
class CGlobalStats(CTRexStats):
def __init__(self, connection_info, server_version, ports_dict_ref):
super(CGlobalStats, self).__init__()
self.connection_info = connection_info
self.server_version = server_version
self._ports_dict = ports_dict_ref
def generate_stats(self):
return OrderedDict([("connection", "{host}, Port {port}".format(host=self.connection_info.get("server"),
port=self.connection_info.get("sync_port"))),
("version", "{ver}, UUID: {uuid}".format(ver=self.server_version.get("version", "N/A"),
uuid="N/A")),
("cpu_util", u"{0}% {1}".format( format_threshold(self.get("m_cpu_util"), [85, 100], [0, 85]),
self.get_trend_gui("m_cpu_util", use_raw = True))),
(" ", ""),
("total_tx", u"{0} {1}".format( self.get("m_tx_bps", format=True, suffix="b/sec"),
self.get_trend_gui("m_tx_bps"))),
("total_rx", u"{0} {1}".format( self.get("m_rx_bps", format=True, suffix="b/sec"),
self.get_trend_gui("m_rx_bps"))),
("total_pps", u"{0} {1}".format( self.get("m_tx_pps", format=True, suffix="pkt/sec"),
self.get_trend_gui("m_tx_pps"))),
(" ", ""),
("drop_rate", "{0}".format( format_num(self.get("m_rx_drop_bps"),
suffix = 'b/sec',
opts = 'green' if (self.get("m_rx_drop_bps")== 0) else 'red'))),
("queue_full", "{0}".format( format_num(self.get_rel("m_total_queue_full"),
suffix = 'pkts',
compact = False,
opts = 'green' if (self.get_rel("m_total_queue_full")== 0) else 'red'))),
]
)
class CPortStats(CTRexStats):
def __init__(self, port_obj):
super(CPortStats, self).__init__()
self._port_obj = port_obj
@staticmethod
def __merge_dicts (target, src):
for k, v in src.iteritems():
if k in target:
target[k] += v
else:
target[k] = v
def __add__ (self, x):
if not isinstance(x, CPortStats):
raise TypeError("cannot add non stats object to stats")
# main stats
self.__merge_dicts(self.latest_stats, x.latest_stats)
# reference stats
if x.reference_stats:
if not self.reference_stats:
self.reference_stats | |
index0 = 1
output = "permutation"
if self.base_ring().is_exact():
def rational_approximation(c):
return c
else:
c_list = []
def rational_approximation(c):
# Implementation detail: Return unique integer if two
# c-values are the same up to machine precision. But
# you can think of it as a uniquely-chosen rational
# approximation.
for i, x in enumerate(c_list):
if self._is_zero(x - c):
return i
c_list.append(c)
return len(c_list) - 1
if self.is_compact():
def edge_label(i, j, c_ij):
return c_ij
else:
# In the non-compact case, we also label the edges by the
# type of the V-representation object. This ensures that
# vertices, rays, and lines are only permuted amongst
# themselves.
def edge_label(i, j, c_ij):
return (self.Vrepresentation(i).type(), c_ij, self.Vrepresentation(j).type())
# Homogeneous coordinates for the V-representation objects.
# Mathematically, V is a matrix. For efficiency however, we
# represent it as a list of column vectors.
V = [v.homogeneous_vector() for v in self.Vrepresentation()]
# Pseudoinverse of V Vt
Qplus = sum(v.column() * v.row() for v in V).pseudoinverse()
# Construct the graph.
G = Graph()
for i in range(len(V)):
for j in range(i+1, len(V)):
c_ij = rational_approximation(V[i] * Qplus * V[j])
G.add_edge(index0+i, index0+j, edge_label(i, j, c_ij))
permgroup = G.automorphism_group(edge_labels=True)
if output == "permutation":
return permgroup
elif output == "matrix":
permgroup = permgroup.gens()
# Compute V+ = Vt Q+ as list of row vectors
Vplus = list(matrix(V) * Qplus) # matrix(V) is Vt
# Compute W = 1 - V V+
W = 1 - sum(V[i].column() * Vplus[i].row() for i in range(len(V)))
# Convert the permutation group to a matrix group.
# If P is a permutation, then we return the matrix
# B = (V P V+) + W.
#
# If output == "matrix", we loop over the generators of the group.
# Otherwise, we loop over all elements.
matrices = []
for perm in permgroup:
A = sum(V[perm(i)].column() * Vplus[i].row() for i in range(len(V)))
matrices.append(A + W)
for mat in matrices:
mat.set_immutable()
if output == "matrixlist":
return tuple(matrices)
else:
return MatrixGroup(matrices)
def is_full_dimensional(self):
"""
Return whether the polyhedron is full dimensional.
OUTPUT:
Boolean. Whether the polyhedron is not contained in any strict
affine subspace.
EXAMPLES::
sage: polytopes.hypercube(3).is_full_dimensional()
True
sage: Polyhedron(vertices=[(1,2,3)], rays=[(1,0,0)]).is_full_dimensional()
False
"""
return self.dim() == self.ambient_dim()
def is_combinatorially_isomorphic(self, other, algorithm='bipartite_graph'):
r"""
Return whether the polyhedron is combinatorially isomorphic to another polyhedron.
We only consider bounded polyhedra. By definition, they are
combinatorially isomorphic if their face lattices are isomorphic.
INPUT:
- ``other`` -- a polyhedron object
- ``algorithm`` (default = ``bipartite_graph``) -- the algorithm to use.
The other possible value is ``face_lattice``.
OUTPUT:
- ``True`` if the two polyhedra are combinatorially isomorphic
- ``False`` otherwise
.. SEEALSO::
:meth:`combinatorial_automorphism_group`,
:meth:`vertex_facet_graph`.
REFERENCES:
For the equivalence of the two algorithms see [KK1995]_, p. 877-878
EXAMPLES:
The square is combinatorially isomorphic to the 2-dimensional cube::
sage: polytopes.hypercube(2).is_combinatorially_isomorphic(polytopes.regular_polygon(4))
True
All the faces of the 3-dimensional permutahedron are either
combinatorially isomorphic to a square or a hexagon::
sage: H = polytopes.regular_polygon(6)
sage: S = polytopes.hypercube(2)
sage: P = polytopes.permutahedron(4)
sage: all(F.as_polyhedron().is_combinatorially_isomorphic(S) or F.as_polyhedron().is_combinatorially_isomorphic(H) for F in P.faces(2))
True
Checking that a regular simplex intersected with its reflection
through the origin is combinatorially isomorphic to the intersection
of a cube with a hyperplane perpendicular to its long diagonal::
sage: def simplex_intersection(k):
....: S1 = Polyhedron([vector(v)-vector(polytopes.simplex(k).center()) for v in polytopes.simplex(k).vertices_list()])
....: S2 = Polyhedron([-vector(v) for v in S1.vertices_list()])
....: return S1.intersection(S2)
sage: def cube_intersection(k):
....: C = polytopes.hypercube(k+1)
....: H = Polyhedron(eqns=[[0]+[1 for i in range(k+1)]])
....: return C.intersection(H)
sage: [simplex_intersection(k).is_combinatorially_isomorphic(cube_intersection(k)) for k in range(2,5)]
[True, True, True]
sage: simplex_intersection(2).is_combinatorially_isomorphic(polytopes.regular_polygon(6))
True
sage: simplex_intersection(3).is_combinatorially_isomorphic(polytopes.octahedron())
True
Two polytopes with the same `f`-vector, but different combinatorial types::
sage: P = Polyhedron([[-605520/1525633, -605520/1525633, -1261500/1525633, -52200/1525633, 11833/1525633],\
[-720/1769, -600/1769, 1500/1769, 0, -31/1769], [-216/749, 240/749, -240/749, -432/749, 461/749], \
[-50/181, 50/181, 60/181, -100/181, -119/181], [-32/51, -16/51, -4/51, 12/17, 1/17],\
[1, 0, 0, 0, 0], [16/129, 128/129, 0, 0, 1/129], [64/267, -128/267, 24/89, -128/267, 57/89],\
[1200/3953, -1200/3953, -1440/3953, -360/3953, -3247/3953], [1512/5597, 1512/5597, 588/5597, 4704/5597, 2069/5597]])
sage: C = polytopes.cyclic_polytope(5,10)
sage: C.f_vector() == P.f_vector(); C.f_vector()
True
(1, 10, 45, 100, 105, 42, 1)
sage: C.is_combinatorially_isomorphic(P)
False
sage: S = polytopes.simplex(3)
sage: S = S.face_truncation(S.faces(0)[3])
sage: S = S.face_truncation(S.faces(0)[4])
sage: S = S.face_truncation(S.faces(0)[5])
sage: T = polytopes.simplex(3)
sage: T = T.face_truncation(T.faces(0)[3])
sage: T = T.face_truncation(T.faces(0)[4])
sage: T = T.face_truncation(T.faces(0)[4])
sage: T.is_combinatorially_isomorphic(S)
False
sage: T.f_vector(), S.f_vector()
((1, 10, 15, 7, 1), (1, 10, 15, 7, 1))
sage: C = polytopes.hypercube(5)
sage: C.is_combinatorially_isomorphic(C)
True
sage: C.is_combinatorially_isomorphic(C, algorithm='magic')
Traceback (most recent call last):
...
AssertionError: `algorithm` must be 'bipartite graph' or 'face_lattice'
sage: G = Graph()
sage: C.is_combinatorially_isomorphic(G)
Traceback (most recent call last):
...
AssertionError: input `other` must be a polyhedron
sage: H = Polyhedron(eqns=[[0,1,1,1,1]]); H
A 3-dimensional polyhedron in QQ^4 defined as the convex hull of 1 vertex and 3 lines
sage: C.is_combinatorially_isomorphic(H)
Traceback (most recent call last):
...
AssertionError: polyhedron `other` must be bounded
"""
assert isinstance(other, Polyhedron_base), "input `other` must be a polyhedron"
assert self.is_compact(), "polyhedron `self` must be bounded"
assert other.is_compact(), "polyhedron `other` must be bounded"
assert algorithm in ['bipartite_graph', 'face_lattice'], "`algorithm` must be 'bipartite graph' or 'face_lattice'"
# For speed, we check if the polyhedra have the same number of facets and vertices.
# This is faster than building the bipartite graphs first and
# then check that they won't be isomorphic.
if self.n_vertices() != other.n_vertices() or self.n_facets() != other.n_facets():
return False
if algorithm == 'bipartite_graph':
G_self = self.vertex_facet_graph(False)
G_other = other.vertex_facet_graph(False)
return G_self.is_isomorphic(G_other)
else:
return self.face_lattice().is_isomorphic(other.face_lattice())
def _test_is_combinatorially_isomorphic(self, tester=None, **options):
"""
Run tests on the method :meth:`.is_combinatorially_isomorphic`.
TESTS::
sage: polytopes.cross_polytope(3)._test_is_combinatorially_isomorphic()
"""
if tester is None:
tester = self._tester(**options)
if not self.is_compact():
with tester.assertRaises(AssertionError):
self.is_combinatorially_isomorphic(self)
return
if self.n_vertices() > 200 or self.n_facets() > 200:
# Avoid very long doctests.
return
tester.assertTrue(self.is_combinatorially_isomorphic(ZZ(4)*self))
if self.n_vertices():
tester.assertTrue(self.is_combinatorially_isomorphic(self + self.center()))
if self.n_vertices() < 20 and self.n_facets() < 20:
tester.assertTrue(self.is_combinatorially_isomorphic(ZZ(4)*self, algorithm='face_lattice'))
if self.n_vertices():
tester.assertTrue(self.is_combinatorially_isomorphic(self + self.center(), algorithm='face_lattice'))
def affine_hull_projection(self, as_affine_map=False, orthogonal=False, orthonormal=False, extend=False):
"""
Return the polyhedron projected into its affine hull.
Each polyhedron is contained in some smallest affine subspace
(possibly the entire ambient space) -- its affine hull.
We provide a projection of the ambient
space of the polyhedron to Euclidian space of dimension of the
polyhedron. Then the image of the polyhedron under this
projection (or, depending on the parameter ``as_affine_map``,
the projection itself) is returned.
INPUT:
- ``as_affine_map`` (boolean, default = False) -- If ``False``, return
a polyhedron. If ``True``, return the affine transformation,
that sends the embedded polytope to a fulldimensional one.
It is given as a pair ``(A, b)``, where A is a linear transformation
and ``b`` is a vector, and the affine transformation sends ``v`` to
``A(v)+b``.
- ``orthogonal`` (boolean, default = False) -- if ``True``,
provide an orthogonal transformation.
- ``orthonormal`` (boolean, default = False) -- if ``True``,
provide an orthonormal transformation. If the base ring does not
provide the necessary square roots, the extend parameter
needs to be set to ``True``.
- ``extend`` (boolean, default = False) -- if ``True``,
allow base ring to be extended if necessary. This becomes
relevant when requiring an orthonormal transformation.
OUTPUT:
A full-dimensional polyhedron or an affine transformation,
depending on the parameter ``as_affine_map``.
.. TODO::
- make the parameters ``orthogonal`` and ``orthonormal`` work
with unbounded polyhedra.
EXAMPLES::
sage: triangle = Polyhedron([(1,0,0), (0,1,0), (0,0,1)]); triangle
A 2-dimensional polyhedron in ZZ^3 defined as the convex hull of 3 vertices
sage: triangle.affine_hull_projection()
A 2-dimensional polyhedron in ZZ^2 defined as the convex hull of 3 vertices
sage: half3d = Polyhedron(vertices=[(3,2,1)], rays=[(1,0,0)])
sage: half3d.affine_hull_projection().Vrepresentation()
(A ray in the direction (1), A vertex at (3))
The resulting affine hulls depend on the parameter ``orthogonal`` and ``orthonormal``::
sage: L = Polyhedron([[1,0],[0,1]]); L
A 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
sage: A = L.affine_hull_projection(); A
A 1-dimensional polyhedron in ZZ^1 defined as the convex hull of 2 vertices
sage: A.vertices()
(A vertex at | |
<gh_stars>1-10
import tkinter as tk
import copy as copy
import time
import random
import math
import sys
import queue
import cProfile
import operator
class MCTSNode:
def __init__(self, state, player):
self.state = state #type State
self.player = player #black or white
self.children = [] #list of MCTSNode
self.parent = None
self.noOfSimulations = 0
self.wins = 0
self.completed = False
self.play = ("", 0, 0)
class MonteCarloSearchTree:
def __init__(self, root, startTurn):
## #assume win when score difference > 20
## self.maxScoreDifference = 20
self.turn = startTurn #"black" or "white"
self.root = root
self.depth = 1
"""
Finds the best move to play from the root of the tree
by running the 4 stages of the Monte Carlo Tree Search
"""
def findNextMove(self):
start = int(round(time.time()))
end = start + 120
nodesSearched = 0
while(int(round(time.time())) < end):
#Selection
bestNode = self.selectBestNode(self.root)
#Expansion
#check if game ended:
self.checkGameOver(bestNode)
if bestNode.completed == False:
self.expandNode(bestNode)
#Simulation
simulateNode = bestNode
#if children, pick random child
if len(simulateNode.children) > 0:
randomIndex = random.randint(0,len(simulateNode.children)-1)
simulateNode = simulateNode.children[randomIndex]
result = self.simulatePlay(simulateNode)
#Update
self.backPropogation(simulateNode, result)
###Code that runs each simulation 50 times, takes the average and counts
###a win if more wins happened than losses.
## while simulations < 50:
## result = self.simulatePlay(simulateNode)
#### print(result)
## simulations += 1
## if result == True:
## wins += 1
## else:
## losses += 1
## simulateNode.won = False
## simulateNode.lost = False
## if wins > losses:
## #Update
## self.backPropogation(simulateNode, True)
## simulateNode.wins += 1
## else:
## #Update
## self.backPropogation(simulateNode, False)
## simulateNode.noOfSimulations += 1
## simulateNode.completed = True
#get winner
winnerPlay = self.getMaxChild()
#store level order traversal for debugging
self.levelOrderTraversal()
return winnerPlay.play
"""
Checks if there are any valid moves left for black and white.
If not, marks node as terminal.
"""
def checkGameOver(self, node):
copyNode = [list(x) for x in node.state.pieces]
playHistory = [list(x) for x in node.state.history]
rows = len(copyNode)
columns = len(copyNode[0])
lenOfBlack = 0
lenOfWhite = 0
#white turn
validWhite = []
for r in range(rows):
for c in range(columns):
if copyNode[r][c] == "empty":
#check for suicide and kill
aCopy = [list(x) for x in copyNode]
scoreBefore = gameScore(aCopy)
aCopy[r][c] = "white"
#store deleted pieces (if any)
deleted = []
#check all surroundings of row, column
for x in _getSurrounding(aCopy, r, c):
#if surrounding = color of next player's turn and no liberties
if (x[2] == "black" and len(getLiberties(aCopy, x[0], x[1])) == 0):
#kill group
group = getGroup(aCopy, x[0], x[1])
for g in group:
aCopy[g[0]][g[1]] = "empty"
deleted.append((g[2] + " " + str(g[0]) + " " + str(g[1]), g[0], g[1]))
#if not suicide or filling in an eye
if len(getLiberties(aCopy, r, c)) > 1:
scoreAfter = gameScore(aCopy)
scoreDifference = scoreAfter[1] - scoreBefore[1]
if len(playHistory) < 2:
validWhite.append((r, c, scoreDifference, deleted))
else:
#check for ko
if aCopy != playHistory[-2]:
validWhite.append((r, c, scoreDifference, deleted))
lenOfWhite = len(validWhite)
#black turn
validBlack = []
for r in range(rows):
for c in range(columns):
if copyNode[r][c] == "empty":
#check for suicide and kill
aCopy = [list(x) for x in copyNode]
scoreBefore = gameScore(aCopy)
aCopy[r][c] = "black"
#store deleted pieces (if any)
deleted = []
#check all surroundings of row, column
for x in _getSurrounding(aCopy, r, c):
#if surrounding = color of next player's turn and no liberties
if (x[2] == "white" and len(getLiberties(aCopy, x[0], x[1])) == 0):
#kill group
group = getGroup(aCopy, x[0], x[1])
for g in group:
aCopy[g[0]][g[1]] = "empty"
deleted.append((g[2] + " " + str(g[0]) + " " + str(g[1]), g[0], g[1]))
#if not suicide or filling in an eye
if len(getLiberties(aCopy, r, c)) > 1:
scoreAfter = gameScore(aCopy)
scoreDifference = scoreAfter[0] - scoreBefore[0]
if len(playHistory) < 2:
validBlack.append((r, c, scoreDifference))
else:
#check for ko
if aCopy != playHistory[-2]:
validBlack.append((r, c, scoreDifference, deleted))
lenOfBlack = len(validBlack)
if lenOfBlack == 0 or lenOfWhite == 0:
#mark node as terminal
node.completed = True
return True
node.completed = False
return False
"""
Method gets root child with highest wins/simulations ratio.
"""
def getMaxChild(self):
best = self.root.children[0]
random.shuffle(self.root.children)
for x in self.root.children:
if x.wins / (x.noOfSimulations+1) > best.wins / (best.noOfSimulations+1):
best = x
return best
"""
Updates all nodes leading up to current node that was just simulated.
"""
def backPropogation(self, node, result):
nodeCopy = node
while nodeCopy:
nodeCopy.wins += result
nodeCopy.noOfSimulations += 1
nodeCopy = nodeCopy.parent
"""
Stores level order traversal of the Monte Carlo Search Tree to out.txt
"""
def levelOrderTraversal(self):
orig_stdout = sys.stdout
f = open('out.txt', 'w')
sys.stdout = f
q = queue.Queue()
q.put(self.root)
q.put("delimiter")
while(not q.empty()):
top = q.get()
if top == "delimiter":
if not q.empty():
q.put("delimiter")
print("========================", "\n", "\n", "\n")
else:
for child in top.children:
q.put(child)
if top != "delimiter":
print(top.player[0] +'(' +str(top.play[1]) + ", " + str(top.play[2]) + '),', top.wins, "/", top.noOfSimulations, end='\t')
sys.stdout = orig_stdout
f.close()
"""
Heavy playout of a game from current node state. The simulation
continues until one of the two players runs out of moves or the number
of moves exceeds N * N * 3 (where N * N is board size).
Heavy playout means black and white will choose the move that results in
the highest score increase. If any player has a chance to imprison a
stone or surround territory they will.
"""
def simulatePlay(self, node):
#Checks if terminal node reached. Immediately stop simulation
#and propogate score up
if node.completed == True:
score = gameScore(node.state.pieces)
if score[0] > score[1]:
if node.parent:
node.parent.wins = -sys.maxsize
else:
node.wins = -sys.maxsize
return score[1] - score[0]
return score[0] - score[1]
## #simulated board
copyNode = [list(x) for x in node.state.pieces]
playHistory = [list(x) for x in node.state.history]
## copyNode = copy.deepcopy(node.state.pieces)
rows = len(copyNode)
columns = len(copyNode[0])
#move one turn forward from state
turn = ""
if node.player == "black":
turn = "white"
else:
turn = "black"
lenOfBlack = 0
lenOfWhite = 0
moves = 0
maxGameLength = rows * columns * 3
score = 0
whitePlayed = False
blackPlayed = False
#While there are valid moves or maxGameLength is not reached
while True:
#update history
playHistory.append([list(x) for x in copyNode])
#play white move
if turn == "white":
#find all valid white plays
valid = []
for r in range(rows):
for c in range(columns):
if copyNode[r][c] == "empty":
#check for suicide, ko and kill
aCopy = [list(x) for x in copyNode]
scoreBefore = gameScore(aCopy)
aCopy[r][c] = "white"
#store deleted pieces (if any)
deleted = []
#check all surroundings of row, column
for x in _getSurrounding(aCopy, r, c):
#if surrounding = color of next player's turn and no liberties
if (x[2] == "black" and len(getLiberties(aCopy, x[0], x[1])) == 0):
#kill group
group = getGroup(aCopy, x[0], x[1])
for g in group:
aCopy[g[0]][g[1]] = "empty"
deleted.append((g[2] + " " + str(g[0]) + " " + str(g[1]), g[0], g[1]))
#if not suicide or filling in an eye
if len(getLiberties(aCopy, r, c)) > 1:
scoreAfter = gameScore(aCopy)
scoreDifference = scoreAfter[1] - scoreBefore[1]
if len(playHistory) < 2:
valid.append((r, c, scoreDifference, deleted))
else:
#check for ko
if aCopy != playHistory[-2]:
valid.append((r, c, scoreDifference, deleted))
lenOfWhite = len(valid)
if len(valid) > 0:
#pick highest value play
#shuffle for random max pick
random.shuffle(valid)
bestPlay = max(valid,key=operator.itemgetter(2))
row = bestPlay[0]
col = bestPlay[1]
copyNode[row][col] = "white"
if len(bestPlay) == 4:
for deletedStone in bestPlay[3]:
copyNode[deletedStone[1]][deletedStone[2]] = "empty"
whitePlayed = True
#play black move
else:
valid = []
for r in range(rows):
for c in range(columns):
if copyNode[r][c] == "empty":
#check for suicide, ko and kill
aCopy = [list(x) for x in copyNode]
scoreBefore = gameScore(aCopy)
aCopy[r][c] = "black"
#store deleted pieces (if any)
deleted = []
#check all surroundings of row, column
for x in _getSurrounding(aCopy, r, c):
#if surrounding = color of next player's turn and no liberties
if (x[2] == "white" and len(getLiberties(aCopy, x[0], x[1])) == 0):
#kill group
group = getGroup(aCopy, x[0], x[1])
for g in group:
aCopy[g[0]][g[1]] = "empty"
deleted.append((g[2] + " " + str(g[0]) + " " + str(g[1]), g[0], g[1]))
#if not suicide or filling in an eye
if len(getLiberties(aCopy, r, c)) > 1:
scoreAfter = gameScore(aCopy)
scoreDifference | |
<reponame>Bhaskers-Blu-Org1/ImageNet-Robustness<filename>l1_attack.py
## l1_attack.py -- attack a network optimizing elastic-net distance with an l1 decision rule
##
## Copyright (C) 2017, <NAME> <<EMAIL>>.
## Copyright (C) 2016, <NAME> <<EMAIL>>.
##
## This program is licenced under the BSD 2-Clause licence,
## contained in the LICENCE file in this directory.
import sys
import tensorflow as tf
import numpy as np
import time
BINARY_SEARCH_STEPS = 9 # number of times to adjust the constant with binary search
MAX_ITERATIONS = 10000 # number of iterations to perform gradient descent
ABORT_EARLY = True # if we stop improving, abort gradient descent early
LEARNING_RATE = 1e-2 # larger values converge faster to less accurate results
TARGETED = True # should we target one specific class? or just be wrong?
CONFIDENCE = 0 # how strong the adversarial example should be
INITIAL_CONST = 1e-3 # the initial constant c to pick as a first guess
BETA = 1e-3 # Hyperparameter trading off L2 minimization for L1 minimization
class EADL1:
def __init__(self, sess, model, batch_size=1, confidence = CONFIDENCE,
targeted = TARGETED, learning_rate = LEARNING_RATE,
binary_search_steps = BINARY_SEARCH_STEPS, max_iterations = MAX_ITERATIONS,
print_every = 100, early_stop_iters = 0,
abort_early = ABORT_EARLY,
initial_const = INITIAL_CONST, beta = BETA):
"""
EAD with L1 Decision Rule
This attack is the most efficient and should be used as the primary
attack to evaluate potential defenses.
Returns adversarial examples for the supplied model.
confidence: Confidence of adversarial examples: higher produces examples
that are farther away, but more strongly classified as adversarial.
batch_size: Number of attacks to run simultaneously.
targeted: True if we should perform a targetted attack, False otherwise.
learning_rate: The learning rate for the attack algorithm. Smaller values
produce better results but are slower to converge.
binary_search_steps: The number of times we perform binary search to
find the optimal tradeoff-constant between distance and confidence.
max_iterations: The maximum number of iterations. Larger values are more
accurate; setting too small will require a large learning rate and will
produce poor results.
abort_early: If true, allows early aborts if gradient descent gets stuck.
initial_const: The initial tradeoff-constant to use to tune the relative
importance of distance and confidence. If binary_search_steps is large,
the initial constant is not important.
"""
image_size, num_channels, num_labels = model.image_size, model.num_channels, model.num_labels
self.sess = sess
self.TARGETED = targeted
self.LEARNING_RATE = learning_rate
self.MAX_ITERATIONS = max_iterations
self.print_every = print_every
self.early_stop_iters = early_stop_iters if early_stop_iters != 0 else max_iterations // 10
print("early stop:", self.early_stop_iters)
self.BINARY_SEARCH_STEPS = binary_search_steps
self.ABORT_EARLY = abort_early
self.CONFIDENCE = confidence
self.initial_const = initial_const
self.batch_size = batch_size
self.beta = beta
self.beta_t = tf.cast(self.beta, tf.float32)
self.repeat = binary_search_steps >= 10
shape = (batch_size,image_size,image_size,num_channels)
# these are variables to be more efficient in sending data to tf
self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32)
self.newimg = tf.Variable(np.zeros(shape), dtype=tf.float32)
self.slack = tf.Variable(np.zeros(shape), dtype=tf.float32)
self.tlab = tf.Variable(np.zeros((batch_size,num_labels)), dtype=tf.float32)
self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32)
# and here's what we use to assign them
self.assign_timg = tf.placeholder(tf.float32, shape)
self.assign_newimg = tf.placeholder(tf.float32, shape)
self.assign_slack = tf.placeholder(tf.float32, shape)
self.assign_tlab = tf.placeholder(tf.float32, (batch_size,num_labels))
self.assign_const = tf.placeholder(tf.float32, [batch_size])
self.global_step = tf.Variable(0, trainable=False)
self.global_step_t = tf.cast(self.global_step, tf.float32)
"""Fast Iterative Soft Thresholding"""
"""--------------------------------"""
self.zt = tf.divide(self.global_step_t, self.global_step_t+tf.cast(3, tf.float32))
cond1 = tf.cast(tf.greater(tf.subtract(self.slack, self.timg),self.beta_t), tf.float32)
cond2 = tf.cast(tf.less_equal(tf.abs(tf.subtract(self.slack,self.timg)),self.beta_t), tf.float32)
cond3 = tf.cast(tf.less(tf.subtract(self.slack, self.timg),tf.negative(self.beta_t)), tf.float32)
upper = tf.minimum(tf.subtract(self.slack,self.beta_t), tf.cast(0.5, tf.float32))
lower = tf.maximum(tf.add(self.slack,self.beta_t), tf.cast(-0.5, tf.float32))
self.assign_newimg = tf.multiply(cond1,upper)+tf.multiply(cond2,self.timg)+tf.multiply(cond3,lower)
self.assign_slack = self.assign_newimg+tf.multiply(self.zt, self.assign_newimg-self.newimg)
self.setter = tf.assign(self.newimg, self.assign_newimg)
self.setter_y = tf.assign(self.slack, self.assign_slack)
"""--------------------------------"""
# prediction BEFORE-SOFTMAX of the model
self.output = model.predict(self.newimg)
self.output_y = model.predict(self.slack)
# distance to the input data
self.l2dist = tf.reduce_sum(tf.square(self.newimg-self.timg),[1,2,3])
self.l2dist_y = tf.reduce_sum(tf.square(self.slack-self.timg),[1,2,3])
self.l1dist = tf.reduce_sum(tf.abs(self.newimg-self.timg),[1,2,3])
self.l1dist_y = tf.reduce_sum(tf.abs(self.slack-self.timg),[1,2,3])
self.elasticdist = self.l2dist + tf.multiply(self.l1dist, self.beta_t)
self.elasticdist_y = self.l2dist_y + tf.multiply(self.l1dist_y, self.beta_t)
# compute the probability of the label class versus the maximum other
self.real = tf.reduce_sum((self.tlab)*self.output,1)
self.real_y = tf.reduce_sum((self.tlab)*self.output_y,1)
self.other = tf.reduce_max((1-self.tlab)*self.output - (self.tlab*10000),1)
self.other_y = tf.reduce_max((1-self.tlab)*self.output_y - (self.tlab*10000),1)
if self.TARGETED:
# if targeted, optimize for making the other class most likely
loss1 = tf.maximum(0.0, self.other-self.real+self.CONFIDENCE)
loss1_y = tf.maximum(0.0, self.other_y-self.real_y+self.CONFIDENCE)
else:
# if untargeted, optimize for making this class least likely.
loss1 = tf.maximum(0.0, self.real-self.other+self.CONFIDENCE)
loss1_y = tf.maximum(0.0, self.real_y-self.other_y+self.CONFIDENCE)
# sum up the losses
# self.loss21 = tf.reduce_sum(self.l1dist)
# # self.loss21_y = tf.reduce_sum(self.l1dist_y)
# self.loss2 = tf.reduce_sum(self.l2dist)
# self.loss2_y = tf.reduce_sum(self.l2dist_y)
# self.loss1 = tf.reduce_sum(self.const*loss1)
# self.loss1_y = tf.reduce_sum(self.const*loss1_y)
self.loss21 = self.l1dist
# self.loss21_y = tf.reduce_sum(self.l1dist_y)
self.loss2 = self.l2dist
self.loss2_y = self.l2dist_y
self.loss1 = self.const*loss1
self.loss1_y = self.const*loss1_y
self.loss_opt = self.loss1_y+self.loss2_y
self.loss = self.loss1+self.loss2+tf.multiply(self.beta_t,self.loss21)
print("self.loss = ", self.loss)
print("self.loss_opt = ", self.loss_opt)
print("self.loss1_y = ", self.loss1_y)
print("self.loss2_y = ", self.loss2_y)
print("self.real = ", self.real)
print("self.other = ", self.other)
self.learning_rate = tf.train.polynomial_decay(self.LEARNING_RATE, self.global_step, self.MAX_ITERATIONS, 0, power=0.5)
start_vars = set(x.name for x in tf.global_variables())
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.train = optimizer.minimize(self.loss_opt, var_list=[self.slack], global_step=self.global_step)
end_vars = tf.global_variables()
new_vars = [x for x in end_vars if x.name not in start_vars]
# these are the variables to initialize when we run
self.setup = []
self.setup.append(self.timg.assign(self.assign_timg))
self.setup.append(self.tlab.assign(self.assign_tlab))
self.setup.append(self.const.assign(self.assign_const))
self.init = tf.variables_initializer(var_list=[self.global_step]+[self.slack]+[self.newimg]+new_vars)
def attack(self, imgs, targets):
"""
Perform the EAD attack on the given images for the given targets.
If self.targeted is true, then the targets represents the target labels.
If self.targeted is false, then targets are the original class labels.
"""
r = []
print('go up to',len(imgs))
for i in range(0,len(imgs),self.batch_size):
print('tick',i)
#r.extend(self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size]))
r.extend(self.attack_batch(imgs[i:i+self.batch_size], targets[i:i+self.batch_size])[0])
return np.array(r)
def attack_batch(self, imgs, labs):
"""
Run the attack on a batch of images and labels.
"""
def compare(x,y):
if not isinstance(x, (float, int, np.int64)):
x = np.copy(x)
if self.TARGETED:
x[y] -= self.CONFIDENCE
else:
x[y] += self.CONFIDENCE
x = np.argmax(x)
if self.TARGETED:
return x == y
else:
return x != y
batch_size = self.batch_size
print("="*10, "batch_size = ", batch_size, "="*10)
# # convert to tanh-space
# imgs = np.arctanh(imgs*1.999999)
# set the lower and upper bounds accordingly
lower_bound = np.zeros(batch_size)
CONST = np.ones(batch_size)*self.initial_const
upper_bound = np.ones(batch_size)*1e10
n_success = 0
# the best l2, score, and image attack
o_bestl1 = [1e10]*batch_size
o_bestscore = [-1]*batch_size
o_bestattack = [np.zeros(imgs[0].shape)]*batch_size
# fill the array as nan to indicate attack failure
for b in o_bestattack:
b.fill(np.nan)
o_best_const = [self.initial_const]*batch_size
for outer_step in range(self.BINARY_SEARCH_STEPS):
print("current best l1", o_bestl1)
# completely reset adam's internal state.
self.sess.run(self.init)
batch = imgs[:batch_size]
batchlab = labs[:batch_size]
bestl1 = [1e10]*batch_size
bestscore = [-1]*batch_size
# The last iteration (if we run many steps) repeat the search once.
if self.repeat == True and outer_step == self.BINARY_SEARCH_STEPS-1:
CONST = upper_bound
# set the variables so that we don't have to send them over again
self.sess.run(self.setup, {self.assign_timg: batch,
self.assign_tlab: batchlab,
self.assign_const: CONST})
self.sess.run(self.setter, feed_dict={self.assign_newimg: batch})
self.sess.run(self.setter_y, feed_dict={self.assign_slack: batch})
prev = 1e6
train_timer = 0.0
for iteration in range(self.MAX_ITERATIONS):
# print out the losses every 10%
# print("iteration = ", iteration)
if iteration%(self.MAX_ITERATIONS//self.print_every) == 0:
# print(iteration,self.sess.run((self.loss,self.real,self.other,self.loss1,self.loss2)))
# grad = self.sess.run(self.grad_op)
# old_modifier = self.sess.run(self.modifier)
# np.save('white_iter_{}'.format(iteration), modifier)
loss, real, other, loss1, loss2, loss21 = self.sess.run((self.loss,self.real,self.other,self.loss1,self.loss2, self.loss21))
# print("loss = ", loss)
# print("real = ", real)
# print("other = ", other)
# print("loss1 = ", loss1)
# print("loss2 = ", loss2)
if self.batch_size == 1:
print("[STATS][L2] iter = {}, time = {:.3f}, loss = {:.5g}, real = {:.5g}, other = {:.5g}, loss1 = {:.5g}, loss2 = {:.5g}, loss21 = {:.5g}".format(iteration, train_timer, loss[0], real[0], other[0], loss1[0], loss2[0], loss21[0]))
#print("[STATS][L2] iter = {}, time = {:.3f}, real = {:.5g}, other = {:.5g}".format(iteration, train_timer, real[0], other[0]))
elif self.batch_size > 10:
print("[STATS][L2][SUM of {}] iter = {}, time = {:.3f}, batch_size = {}, n_success = {:.5g}, loss = {:.5g}, real = {:.5g}, other = {:.5g}, loss1 = {:.5g}, loss2 = {:.5g}, loss21 = {:.5g}".format(self.batch_size, iteration, train_timer, batch_size, n_success, sum(loss), sum(real), sum(other), sum(loss1), sum(loss2), sum(loss21)))
# print("[STATS][L2][SUM of {}] iter = {}, time = {:.3f}, batch_size = {}, n_success = {:.5g}, real = {:.5g}, other = {:.5g}".format(self.batch_size, iteration, train_timer, batch_size, n_success, sum(real), sum(other)))
else:
print("[STATS][L2] iter = {}, time = {:.3f}".format(iteration, train_timer))
print("[STATS][L2] real =", real)
print("[STATS][L2] other =", other)
print("[STATS][L2] loss1 =", loss1)
print("[STATS][L2] loss2 =", loss2)
print("[STATS][L2] loss21 =", loss21)
print("[STATS][L2] loss =", loss)
sys.stdout.flush()
attack_begin_time = time.time()
# perform the attack
self.sess.run([self.train])
self.sess.run([self.setter, | |
<reponame>SmartElect/SmartElect
# -*- coding: utf-8 -*-
# Python imports
import base64
from decimal import Decimal, InvalidOperation
import functools
from http.client import UNAUTHORIZED
import logging
import random
import re
import string
# Django imports
from django.conf import settings
from django.contrib.auth.models import Permission
from django.contrib.auth.views import redirect_to_login
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.db.models.fields import FieldDoesNotExist
from django.http import HttpResponseBadRequest, HttpResponse
from django.template.defaultfilters import yesno, capfirst
from django.utils.functional import lazy
from django.utils.translation import get_language, ugettext as _
from django.utils.timezone import now
from django.views.decorators.debug import sensitive_variables
# 3rd party imports
from braces.views import PermissionRequiredMixin, MultiplePermissionsRequiredMixin
from pytz import utc
# This project's imports
from .constants import ARABIC_COMMA
BAD_REQUEST = HttpResponseBadRequest.status_code
logger = logging.getLogger(__name__)
def ensure_unique(model, instance, fieldname, **kwargs):
# Ensure there are no other instances with the same value of
# fieldname among undeleted records of this model
#
# :param kwargs: additional query params when checking for dupes
if not instance.deleted:
query_parms = {
'deleted': False,
fieldname: getattr(instance, fieldname),
}
query_parms.update(**kwargs)
others = model.objects.filter(**query_parms)
if instance.pk:
others = others.exclude(pk=instance.pk)
if others.exists():
verbose_name = model._meta.get_field(fieldname).verbose_name
msg = _("Duplicate value for {fieldname}").format(fieldname=verbose_name)
raise ValidationError(msg)
def get_permission_object_by_name(name,
permission_class=None,
contenttype_class=None,
create_if_needed=False):
"""Given a Django permission name like `app_label.change_thing`,
return its Permission object.
You can pass in the Permission class when using this from a migration.
Pass in create_if_needed=True to have the permission created if it doesn't exist.
"""
# I hate Django permissions
app_label, codename = name.split(".", 1)
if not permission_class:
permission_class = Permission
# Is that enough to be unique? Hope so
try:
return permission_class.objects.get(content_type__app_label=app_label,
codename=codename)
except permission_class.DoesNotExist:
if create_if_needed:
if not contenttype_class:
contenttype_class = ContentType
perm_name, model_name = codename.split("_", 1)
ct, unused = contenttype_class.objects.get_or_create(app_label=app_label,
model=model_name)
# Come up with a permission name. E.g. if code name is 'add_user',
# the full name might be 'Can add user'
full_name = "Can %s %s" % (perm_name, model_name)
return permission_class.objects.create(content_type=ct, codename=codename,
name=full_name)
print("NO SUCH PERMISSION: %s, %s" % (app_label, codename))
raise
def permission_names_to_objects(names):
"""
Given an iterable of permission names (e.g. 'app_label.add_model'),
return an iterable of Permission objects for them.
"""
return [get_permission_object_by_name(name) for name in names]
def astz(dt, tz):
"""
Given a datetime object and a timezone object, return a new
datetime object that represents the same moment, but written
in terms of the new timezone.
:param dt: a datetime object
:param tz: a timezone object
:return: a datetime object
"""
# See http://pythonhosted.org/pytz/ for why this is
# not as trivial as it might first appear.
return tz.normalize(dt.astimezone(tz))
def max_non_none_datetime(*values):
"""
Given some datetime objects, and ignoring any None values,
return the datetime object that occurs latest in
absolute time, or None.
"""
# Start by annotating the non-none values with the
# corresponding UTC times
annotated_list = [(astz(value, utc), value) for value in values if value is not None]
if not annotated_list:
return None
# Now find the max tuple, which will be the one with the max UTC. The
# second item in the tuple will be the corresponding original object.
unused, max_object = max(annotated_list)
return max_object
def min_non_none_datetime(*values):
"""
Given some datetime objects, and ignoring any None values,
return the datetime object that occurs earliest in
absolute time, or None.
"""
# Start by annotating the non-none values with the
# corresponding UTC times
annotated_list = [(astz(value, utc), value) for value in values if value is not None]
if not annotated_list:
return None
# Now find the min tuple, which will be the one with the min UTC. The
# second item in the tuple will be the corresponding original object.
unused, min_object = min(annotated_list)
return min_object
class FormErrorReturns400Mixin(object):
def form_invalid(self, form):
# If the form is not valid, return the usual page but with a 400 status
return self.render_to_response(self.get_context_data(form=form), status=BAD_REQUEST)
NUM_LATLONG_DECIMAL_PLACES = 8
LATLONG_QUANTIZE_PLACES = Decimal(10) ** -NUM_LATLONG_DECIMAL_PLACES
MAX_LATLONG = Decimal('180.0')
def parse_latlong(value):
"""
Given a string with a decimal value, or a float,
parse to a Decimal and truncate to the number of places
we're keeping for lat/long values. Return the result.
"""
val = Decimal(value).quantize(LATLONG_QUANTIZE_PLACES)
if val > MAX_LATLONG:
raise InvalidOperation("Lat or long too large")
return val
def cleanup_lat_or_long(latlng):
"""
Given character string that is supposed to contain a latitude or longitude,
return either a valid Decimal value, or None.
Note: This assumes E/N and does not handle anything west of Greenwich or
south of the equator! If the input has a - or W or S in it, it'll probably
just fail to recognize it as a valid coordinate and return None.
"""
# Strip whitespace and degree signs
s = latlng.strip().rstrip('E\xb0')
# If nothing left, we have no data.
if len(s) == 0:
return None
d = None
if d is None:
# See if it's a simple decimal value
if '.' in s:
try:
d = parse_latlong(s)
except InvalidOperation:
pass
if d is None:
# 290250
# 204650
# Assume DDMMSS
m = re.match(r'^(\d\d)(\d\d)(\d\d)$', s)
if m:
val = float(m.group(1)) + float(m.group(2)) / 60.0 + float(m.group(3)) / 3600.0
d = parse_latlong(val)
if d is None:
# 12°37'49.30"
# 20° 6'9.54"E
# 20°29'33.84"E
# 10ْ .05 30 63
# 12° 2'54.62"
# 12°37'7.00"
# Assume the format is: degrees minutes seconds.milliseconds
m = re.match(r'^(\d\d?)\D+(\d\d?)\D+(\d\d?)\D+(\d\d?)$', s)
if m:
parts = m.groups()
val = (float(parts[0])
+ float(parts[1]) / 60.0
+ float('%s.%s' % (parts[2], parts[3])) / 3600.0)
d = parse_latlong(val)
if d is None:
# Pick out the groups of digits
parts = _extract_numerals(s)
if len(parts) == 4:
# 12°37'49.30"
# 20° 6'9.54"E
# 20°29'33.84"E
# 10ْ .05 30 63
# 12° 2'54.62"
# 12°37'7.00"
# Assume the format is: degrees minutes seconds.fractionalseconds
val = (float(parts[0])
+ float(parts[1]) / 60.0
+ float('%s.%s' % (parts[2], parts[3])) / 3600.0)
d = parse_latlong(val)
elif len(parts) == 3:
# 12ْ 14 23
# 14ْ 25 816
# 57 " .579" .12
# 32ْ 453 700
# Hmm - assume degrees minutes seconds?
if float(parts[1]) > 60.0 or float(parts[2]) > 60.0:
# Just makes no sense - ignore it
return None
val = (float(parts[0])
+ float(parts[1]) / 60.0
+ float(parts[2]) / 3600.0)
d = parse_latlong(val)
elif len(parts) == 2:
# 12° 2
d = parse_latlong(float(parts[0]) + float(parts[1]) / 60.0)
if d is None:
return None
if d > Decimal('180.0'):
return None
return d
NONDIGITS_RE = re.compile(r'[^\d]', flags=re.UNICODE)
@sensitive_variables()
def clean_input_msg(msg_text):
"""Process a user's message, finding all number-strings, translating
them to American strings, and then joining them with an asterisk. We do not
validate them. That will be done by the handlers.
"""
return '*'.join(str(int(num)) for num in _extract_numerals(msg_text))
def _extract_numerals(msg_text):
"""Return a list of all strings of numerals. Works on american and
eastern arabic numerals (Python FTW!)
"""
# split string using any non-digits as a delimiter, then drop the empty strings
number_list = [n for n in NONDIGITS_RE.split(msg_text) if n]
return number_list
def get_now():
# make sure this is timezone-aware
return now()
class LoginPermissionRequiredMixin(PermissionRequiredMixin):
"""Combines LoginRequiredMixin and PermissionRequiredMixin, according to our rules.
When an unauthenticated user visits a page that requires login, s/he gets redirected to the
login page.
When an authenticated user lacks the permission for a page, s/he gets a 403.
In contrast to the LoginRequiredMixin and PermissionRequiredMixin, the subclass need not
set the raise_exception attribute. (It's ignored.)
"""
def dispatch(self, request, *args, **kwargs):
# User has to be logged in
if not request.user.is_authenticated:
return redirect_to_login(request.get_full_path(),
self.get_login_url(),
self.get_redirect_field_name())
# Force raise_exception to be True when invoking PermissionRequiredMixin.
self.raise_exception = True
return super(LoginPermissionRequiredMixin, self).dispatch(request, *args, **kwargs)
class LoginMultiplePermissionsRequiredMixin(MultiplePermissionsRequiredMixin):
"""Combines LoginRequiredMixin and MultiplePermissionsRequiredMixin, according to our rules.
When an unauthenticated user visits a page that requires login, s/he gets redirected to the
login page.
When an authenticated user lacks the permission for a page, s/he gets a 403.
In contrast to the LoginRequiredMixin and PermissionRequiredMixin, the subclass need not
set the raise_exception attribute. (It's ignored.)
Also provides the non-standard pre_dispatch_check which runs before invoking
the parent dispatch method, and if it returns a response, will
return that instead of calling parent dispatch.
"""
def pre_dispatch_check(self, request, *args, **kwargs):
return None
def dispatch(self, request, *args, **kwargs):
# User has to be logged in
if not request.user.is_authenticated:
return redirect_to_login(request.get_full_path(),
| |
"",
"SMKE" : "",
"LEHC" : "HSK",
"LRBG" : "",
"GUFA" : "FIG",
"SYOR" : "ORJ",
"ZUYB" : "YBP",
"KTPL" : "TPL",
"KTPA" : "TPA",
"KIPP" : "",
"KIPL" : "IPL",
"NZWO" : "",
"HEBL" : "ABS",
"HEBA" : "HBE",
"CNU8" : "NU8",
"OOKB" : "KHS",
"KHVN" : "HVN",
"KHVR" : "HVR",
"EINC" : "",
"EDJA" : "FMM",
"RCCM" : "CMJ",
"EINN" : "SNN",
"HSSW" : "WHF",
"YWYM" : "",
"YWYY" : "BWT",
"YDKI" : "DKI",
"LFVP" : "FSP",
"VOTX" : "",
"EHVK" : "",
"FLMF" : "MFU",
"NZQN" : "ZQN",
"VOTV" : "TRV",
"VOTP" : "TIR",
"VOTR" : "TRZ",
"VOTJ" : "",
"LFVM" : "MQC",
"EDQT" : "",
"KBXS" : "BXS",
"FZAM" : "MAT",
"EDQD" : "BYU",
"EDQE" : "",
"EDQF" : "",
"EDQM" : "HOQ",
"OOTH" : "TTH",
"KBXK" : "BXK",
"FZAJ" : "BOA",
"KDDC" : "DDC",
"ENDU" : "BDU",
"EGPT" : "PSL",
"EGPU" : "TRE",
"VYNS" : "",
"EGPW" : "UNT",
"VYNT" : "NYT",
"EGPR" : "BRR",
"EGPL" : "BEB",
"EGPM" : "SDZ",
"EGPN" : "DND",
"EGPO" : "SYY",
"EGPH" : "EDI",
"EGPI" : "ILY",
"EGPK" : "PIK",
"EGPD" : "ABZ",
"EGPE" : "INV",
"EGPF" : "GLA",
"EGPA" : "KOI",
"EGPB" : "LSI",
"EGPC" : "WIC",
"CYQM" : "YQM",
"CYQK" : "YQK",
"SVRS" : "LRV",
"SPBC" : "LHC",
"ZSFZ" : "FOC",
"ZSFY" : "FUG",
"DNJO" : "JOS",
"SPBR" : "",
"UHMH" : "",
"UJAP" : "UJE",
"KSRQ" : "SRQ",
"KSRR" : "SRR",
"KJBR" : "JBR",
"CNH2" : "",
"UHMI" : "",
"FZSA" : "KMN",
"UWOD" : "",
"UWOH" : "",
"UWOO" : "REN",
"UWOR" : "OSW",
"KWST" : "WST",
"SBES" : "",
"SBEK" : "",
"CZKE" : "ZKE",
"MMJA" : "JAL",
"SBEG" : "MAO",
"TJAB" : "ARE",
"KMLT" : "MLT",
"KMLU" : "MLU",
"NZNR" : "NPE",
"NZNS" : "NSN",
"NZNP" : "NPL",
"KMLS" : "MLS",
"KMLI" : "MLI",
"KMLJ" : "MLJ",
"KMLD" : "MLD",
"KMLB" : "MLB",
"KMLC" : "MLC",
"UIAA" : "HTA",
"KEUF" : "EUF",
"UIAR" : "",
"SDSC" : "QSC",
"SKBU" : "BUN",
"CCP4" : "YHA",
"LYBE" : "BEG",
"SEMO" : "",
"UTSS" : "SKD",
"KGSB" : "GSB",
"UTST" : "TMJ",
"SEMH" : "MCH",
"KGSO" : "GSO",
"SEMC" : "XMS",
"SEMA" : "",
"UTSB" : "BHK",
"UTSA" : "NVI",
"SEMX" : "",
"PTSA" : "KSA",
"SEMT" : "MEC",
"UTSN" : "AFS",
"UTSL" : "KSQ",
"CYMG" : "YMG",
"AYTK" : "RAB",
"YLST" : "LER",
"AYTB" : "TBG",
"AYTA" : "TIZ",
"KEYW" : "EYW",
"KPOB" : "POB",
"KPOC" : "POC",
"KPOE" : "POE",
"KPOF" : "POF",
"OAJL" : "JAA",
"LBWN" : "VAR",
"BING" : "",
"BINF" : "NOR",
"SKFL" : "FLA",
"HKEL" : "EDL",
"HKEM" : "",
"YSRI" : "RCM",
"YSRN" : "SRN",
"ZHLY" : "LYA",
"KFST" : "FST",
"NGTU" : "BBG",
"UARR" : "URA",
"EKYT" : "AAL",
"NGTM" : "TMN",
"KFSD" : "FSD",
"UHBB" : "BQS",
"KLKV" : "",
"NGTE" : "TBF",
"KFSM" : "FSM",
"NGTA" : "TRW",
"KFSI" : "FSI",
"GAA1" : "",
"KDUJ" : "DUJ",
"FNDU" : "DUE",
"MMEP" : "TPQ",
"MTRL" : "",
"BRKM" : "",
"KDUG" : "DUG",
"BRKV" : "",
"DFOO" : "BOY",
"SUPE" : "MDO",
"EISG" : "SXL",
"MYCI" : "CRI",
"MYCB" : "CAT",
"MYCA" : "ATC",
"LPLA" : "TER",
"KFCM" : "FCM",
"KVPS" : "VPS",
"GOOK" : "KLC",
"YMLT" : "LST",
"SWJI" : "JPR",
"USSS" : "SVX",
"KVPC" : "VPC",
"CYWP" : "YWP",
"MSLP" : "SAL",
"CYWY" : "YWY",
"SAAV" : "SFN",
"KUMP" : "UMP",
"SAAR" : "ROS",
"SAAP" : "PRA",
"CYWA" : "YWA",
"PHNG" : "NGF",
"SAAJ" : "",
"CYWG" : "YWG",
"SAAG" : "GHU",
"CYWH" : "YWH",
"CYWK" : "YWK",
"CYWJ" : "YWJ",
"SAAC" : "COC",
"CYWL" : "YWL",
"WIPP" : "PLM",
"WIPQ" : "PDO",
"WIPR" : "RGT",
"WIPT" : "PDG",
"WIPK" : "PGK",
"WIPL" : "BKS",
"WIPA" : "DJB",
"YBTI" : "BRT",
"YBTH" : "BHS",
"YBTL" : "TSV",
"ZBSH" : "SHP",
"ZBSJ" : "SJW",
"KGLS" : "GLS",
"ZBSN" : "TVS",
"KGLD" : "GLD",
"YBTR" : "BLT",
"FVTL" : "GWE",
"YTGM" : "XTG",
"VEIM" : "IMF",
"ZLHZ" : "HZG",
"SVIE" : "",
"VHSK" : "",
"VHST" : "",
"KTNT" : "TNT",
"KTNX" : "TNX",
"KIRK" : "IRK",
"KCHO" : "CHO",
"KHTL" : "HTL",
"KCHD" : "",
"KCHA" : "CHA",
"OEHL" : "HAS",
"KHTS" : "HTS",
"KCHS" : "CHS",
"KHII" : "HII",
"KHIO" : "HIO",
"VORY" : "RJA",
"LFTW" : "FNI",
"LFTU" : "FRJ",
"LFTZ" : "LTT",
"EDVU" : "",
"LKTB" : "BRQ",
"LFTF" : "",
"LFTH" : "TLN",
"NZSP" : "",
"FLCP" : "CIP",
"KNXX" : "NXX",
"KNXP" : "NXP",
"LTBY" : "AOE",
"EPGO" : "",
"EPGD" : "GDN",
"EDSB" : "FKB",
"KBZN" : "BZN",
"EPGR" : "",
"SLBJ" : "BJO",
"KPRC" : "PRC",
"HALA" : "AWA",
"MHUT" : "UII",
"HALL" : "LLI",
"ENJA" : "ZXB",
"FYWE" : "ERS",
"FYWB" : "WVB",
"HTDA" : "DAR",
"HTDO" : "DOD",
"FYWV" : "WDH",
"VLPS" : "PKZ",
"VLPV" : "",
"YAMB" : "",
"MLIP" : "MIJ",
"MBMC" : "MDS",
"ZSDY" : "DOY",
"KSPW" : "SPW",
"GMTT" : "TNG",
"KSPS" : "SPS",
"KSPZ" : "SPZ",
"KSPF" : "SPF",
"KSPG" : "SPG",
"KSPB" : "SPB",
"CJX7" : "YAB",
"GMTN" : "TTU",
"KSPI" : "SPI",
"HBBA" : "BJM",
"FZUA" : "KGA",
"FZUK" : "TSH",
"CNJ4" : "",
"VTPI" : "",
"CZMT" : "ZMT",
"CZML" : "ZML",
"EDCL" : "",
"CZMD" : "MSA",
"MMHC" : "TCN",
"FABR" : "",
"SBGO" : "GYN",
"SBGL" : "GIG",
"SBGM" : "",
"EDCP" : "PEF",
"FABE" : "BIY",
"SBGW" : "",
"SBGU" : "GPB",
"SBGR" : "GRU",
"SBGP" : "",
"FABB" : "",
"FABM" : "",
"FABL" : "BFN",
"EFUT" : "QVY",
"KMNM" : "MNM",
"RKTN" : "TAE",
"RKTH" : "KPO",
"VGZR" : "DAC",
"RKTY" : "YEC",
"EBFN" : "",
"RKTU" : "CJJ",
"KXNA" : "XNA",
"FAMM" : "MBD",
"ZWHM" : "HMI",
"KRSW" : "RSW",
"KRST" : "RST",
"NFTV" : "VAV",
"NFTP" : "NTT",
"LRTC" : "TCE",
"LRTM" : "TGM",
"NFTE" : "EUA",
"NFTF" : "TBU",
"LRTR" : "TSR",
"NFTL" : "HPA",
"KGQQ" : "GQQ",
"KPMP" : "PMP",
"LRT2" : "CIO",
"KPMD" : "PMD",
"KPMB" : "PMB",
"VETZ" : "TEZ",
"BIHN" : "HFN",
"HKGX" : "",
"HKGT" : "",
"OATN" : "TII",
"BIHU" : "HZK",
"HKGA" : "GAS",
"YSTW" : "TMW",
"SURV" : "RVY",
"KAGS" : "AGS",
"MBPV" : "PLS",
"KAGC" : "AGC",
"HDAM" : "JIB",
"KLIT" : "LIT",
"RJOT" : "TAK",
"RJOW" : "IWJ",
"RJOS" : "TKS",
"RJOR" : "TTJ",
"TNCS" : "SAB",
"RJOY" : "",
"RJOZ" : "",
"RJOE" : "",
"RJOF" : "",
"RJOA" : "HIJ",
"RJOC" : "IZO",
"RJOB" : "OKJ",
"RJOM" : "MYJ",
"TNCB" : "BON",
"RJOO" : "ITM",
"RJOI" : "",
"RJOH" : "YGJ",
"RJOK" : "KCZ",
"NGAB" : "ABF",
"SACO" : "COR",
"YHML" : "HLT",
"SACT" : "",
"MYAT" : "TCB",
"EYPP" : "PNV",
"MYAP" : "AXP",
"MYAS" : "",
"MYAF" : "ASD",
"MYAB" : "",
"MYAM" : "MHH",
"MYAN" : "SAQ",
"MYAK" : "COX",
"KP52" : "P52",
"SWHT" : "HUW",
"KVRB" : "VRB",
"CYYZ" : "YYZ",
"CYYY" : "YYY",
"OPNH" : "WNS",
"CYYW" : "YYW",
"CYYU" : "YYU",
"CYYT" : "YYT",
"CYYR" : "YYR",
"CYYQ" : "YYQ",
"CYYN" : "YYN",
"CYYL" : "YYL",
"CYYJ" : "YYJ",
"CYYH" : "YYH",
"CYYG" : "YYG",
"CYYF" : "YYF",
"CYYE" : "YYE",
"CYYD" : "YYD",
"CYYC" : "YYC",
"CYYB" : "YYB",
"DISP" : "SPY",
"FTTY" : "FYT",
"KMPT" : "",
"FTTD" : "MQQ",
"FTTC" : "AEH",
"FTTA" : "SRH",
"FTTN" : "AMC",
"FTTJ" : "NDJ",
"OTBH" : "IUD",
"YIVL" : "IVR",
"EAR" : "EAR",
"VEKU" : "IXS",
"VEKR" : "IXH",
"OSDZ" : "DEZ",
"PHBG" : "",
"OSDI" : "DAM",
"PGUA" : "UAM",
"KITH" : "ITH",
"BGJN" : "JAV",
"SSFB" : "FBE",
"MGSJ" : "",
"BGJH" : "JJU",
"FNCA" : "CAB",
"OIZI" : "",
"UKBB" : "KBP",
"VAGN" : "",
"VAGO" : "GOI",
"ZGKL" : "KWL",
"LELL" : "QSA",
"LELO" : "RJL",
"LLHS" : "",
"LELC" : "MJV",
"LLHA" : "HFA",
"YXCM" : "DEE",
"LILY" | |
+ (R_values[31] / (1 + w * 1j * t_values[31]))
+ (R_values[32] / (1 + w * 1j * t_values[32]))
+ (R_values[33] / (1 + w * 1j * t_values[33]))
+ (R_values[34] / (1 + w * 1j * t_values[34]))
+ (R_values[35] / (1 + w * 1j * t_values[35]))
+ (R_values[36] / (1 + w * 1j * t_values[36]))
+ (R_values[37] / (1 + w * 1j * t_values[37]))
+ (R_values[38] / (1 + w * 1j * t_values[38]))
+ (R_values[39] / (1 + w * 1j * t_values[39]))
+ (R_values[40] / (1 + w * 1j * t_values[40]))
+ (R_values[41] / (1 + w * 1j * t_values[41]))
+ (R_values[42] / (1 + w * 1j * t_values[42]))
+ (R_values[43] / (1 + w * 1j * t_values[43]))
+ (R_values[44] / (1 + w * 1j * t_values[44]))
+ (R_values[45] / (1 + w * 1j * t_values[45]))
+ (R_values[46] / (1 + w * 1j * t_values[46]))
+ (R_values[47] / (1 + w * 1j * t_values[47]))
+ (R_values[48] / (1 + w * 1j * t_values[48]))
+ (R_values[49] / (1 + w * 1j * t_values[49]))
+ (R_values[50] / (1 + w * 1j * t_values[50]))
+ (R_values[51] / (1 + w * 1j * t_values[51]))
+ (R_values[52] / (1 + w * 1j * t_values[52]))
+ (R_values[53] / (1 + w * 1j * t_values[53]))
+ (R_values[54] / (1 + w * 1j * t_values[54]))
+ (R_values[55] / (1 + w * 1j * t_values[55]))
+ (R_values[56] / (1 + w * 1j * t_values[56]))
+ (R_values[57] / (1 + w * 1j * t_values[57]))
+ (R_values[58] / (1 + w * 1j * t_values[58]))
+ (R_values[59] / (1 + w * 1j * t_values[59]))
+ (R_values[60] / (1 + w * 1j * t_values[60]))
+ (R_values[61] / (1 + w * 1j * t_values[61]))
+ (R_values[62] / (1 + w * 1j * t_values[62]))
+ (R_values[63] / (1 + w * 1j * t_values[63]))
+ (R_values[64] / (1 + w * 1j * t_values[64]))
+ (R_values[65] / (1 + w * 1j * t_values[65]))
+ (R_values[66] / (1 + w * 1j * t_values[66]))
+ (R_values[67] / (1 + w * 1j * t_values[67]))
+ (R_values[68] / (1 + w * 1j * t_values[68]))
+ (R_values[69] / (1 + w * 1j * t_values[69]))
+ (R_values[70] / (1 + w * 1j * t_values[70]))
+ (R_values[71] / (1 + w * 1j * t_values[71]))
+ (R_values[72] / (1 + w * 1j * t_values[72]))
+ (R_values[73] / (1 + w * 1j * t_values[73]))
+ (R_values[74] / (1 + w * 1j * t_values[74]))
+ (R_values[75] / (1 + w * 1j * t_values[75]))
+ (R_values[76] / (1 + w * 1j * t_values[76]))
+ (R_values[77] / (1 + w * 1j * t_values[77]))
+ (R_values[78] / (1 + w * 1j * t_values[78]))
+ (R_values[79] / (1 + w * 1j * t_values[79]))
)
### Fitting Functions
##
#
def KK_RC2_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
return Rs + (R1 / (1 + w * 1j * t_values[0])) + (R2 / (1 + w * 1j * t_values[1]))
def KK_RC3_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
)
def KK_RC4_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
)
def KK_RC5_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
)
def KK_RC6_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = params["R6"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
)
def KK_RC7_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = params["R6"]
R7 = params["R7"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
+ (R7 / (1 + w * 1j * t_values[6]))
)
def KK_RC8_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = params["R6"]
R7 = params["R7"]
R8 = params["R8"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
+ (R7 / (1 + w * 1j * t_values[6]))
+ (R8 / (1 + w * 1j * t_values[7]))
)
def KK_RC9_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = params["R6"]
R7 = params["R7"]
R8 = params["R8"]
R9 = params["R9"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
+ (R7 / (1 + w * 1j * t_values[6]))
+ (R8 / (1 + w * 1j * t_values[7]))
+ (R9 / (1 + w * 1j * t_values[8]))
)
def KK_RC10_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = params["R6"]
R7 = params["R7"]
R8 = params["R8"]
R9 = params["R9"]
R10 = params["R10"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * | |
"1.B-10510", "9374-10510"],
"info": {"numerical_ids": ["9374"]},
"children": [["1.B.1", "1.B.2"]],
},
"1.B.1": {
"title": "Solid Fuels",
"alternative_codes": ["9455", "1B1", "1 B 1", "1.B.1-10510", "9455-10510"],
"info": {"numerical_ids": ["9455"]},
"children": [["1.B.1.a", "1.B.1.b", "1.B.1.c"]],
},
"1.B.1.a": {
"title": "Coal Mining and Handling",
"alternative_codes": [
"9835",
"1B1a",
"1 B 1 a",
"1.B.1.a-10510",
"9835-10510",
],
"info": {"numerical_ids": ["9835"]},
"children": [["1.B.1.a.i", "1.B.1.a.ii"]],
},
"1.B.1.a.i": {
"title": "Underground Mines",
"alternative_codes": [
"9116",
"1B1ai",
"1 B 1 a i",
"1.B.1.a.i-10510",
"9116-10510",
],
"info": {"numerical_ids": ["9116"]},
"children": [["1.B.1.a.i.1", "1.B.1.a.i.2", "1.B.1.a.i.3"]],
},
"1.B.1.a.i.1": {
"title": "Mining Activities",
"alternative_codes": [
"9933",
"1B1ai1",
"1 B 1 a i 1",
"1.B.1.a.i.1-10510",
"9933-10510",
],
"info": {"numerical_ids": ["9933"]},
},
"1.B.1.a.i.2": {
"title": "Post-Mining Activities",
"alternative_codes": [
"9427",
"1B1ai2",
"1 B 1 a i 2",
"1.B.1.a.i.2-10510",
"9427-10510",
],
"info": {"numerical_ids": ["9427"]},
},
"1.B.1.a.i.3": {
"title": "Abandoned Underground Mines",
"alternative_codes": [
"8211",
"1B1ai3",
"1 B 1 a i 3",
"1.B.1.a.i.3-10510",
"8211-10510",
],
"info": {"numerical_ids": ["8211"]},
},
"1.B.1.a.ii": {
"title": "Surface Mines",
"alternative_codes": [
"8573",
"1B1aii",
"1 B 1 a ii",
"1.B.1.a.ii-10510",
"8573-10510",
],
"info": {"numerical_ids": ["8573"]},
"children": [["1.B.1.a.ii.1", "1.B.1.a.ii.2"]],
},
"1.B.1.a.ii.1": {
"title": "Mining Activities",
"alternative_codes": [
"10282",
"1B1aii1",
"1 B 1 a ii 1",
"1.B.1.a.ii.1-10510",
"10282-10510",
],
"info": {"numerical_ids": ["10282"]},
},
"1.B.1.a.ii.2": {
"title": "Post-Mining Activities",
"alternative_codes": [
"9601",
"1B1aii2",
"1 B 1 a ii 2",
"1.B.1.a.ii.2-10510",
"9601-10510",
],
"info": {"numerical_ids": ["9601"]},
},
"1.B.1.b": {
"title": "Solid Fuel Transformation",
"alternative_codes": [
"8300",
"1B1b",
"1 B 1 b",
"1.B.1.b-10510",
"8300-10510",
],
"info": {"numerical_ids": ["8300"]},
},
"1.B.1.c": {
"title": "Other",
"alternative_codes": [
"8964",
"1B1c",
"1 B 1 c",
"1.B.1.c-10510",
"8964-10510",
],
"info": {"numerical_ids": ["8964"]},
},
"1.B.2": {
"title": "Oil and Natural Gas and Other Emissions from Energy Production",
"alternative_codes": ["8806", "1B2", "1 B 2", "1.B.2-10510", "8806-10510"],
"info": {"numerical_ids": ["8806"]},
"children": [["1.B.2.a", "1.B.2.b", "1.B.2.c", "1.B.2.d"]],
},
"1.B.2.a": {
"title": "Oil",
"alternative_codes": [
"9822",
"1B2a",
"1 B 2 a",
"1.B.2.a-10510",
"9822-10510",
],
"info": {"numerical_ids": ["9822"]},
"children": [
[
"1.B.2.a.i",
"1.B.2.a.ii",
"1.B.2.a.iii",
"1.B.2.a.iv",
"1.B.2.a.v",
"1.B.2.a.vi",
]
],
},
"1.B.2.a.i": {
"title": "Exploration",
"alternative_codes": [
"8165",
"1B2ai",
"1 B 2 a i",
"1.B.2.a.i-10510",
"8165-10510",
],
"info": {"numerical_ids": ["8165"]},
},
"1.B.2.a.ii": {
"title": "Production",
"alternative_codes": [
"10078",
"1B2aii",
"1 B 2 a ii",
"1.B.2.a.ii-10510",
"10078-10510",
],
"info": {"numerical_ids": ["10078"]},
},
"1.B.2.a.iii": {
"title": "Transport",
"alternative_codes": [
"9550",
"1B2aiii",
"1 B 2 a iii",
"1.B.2.a.iii-10510",
"9550-10510",
],
"info": {"numerical_ids": ["9550"]},
},
"1.B.2.a.iv": {
"title": "Refining / Storage",
"alternative_codes": [
"9891",
"1B2aiv",
"1 B 2 a iv",
"1.B.2.a.iv-10510",
"9891-10510",
],
"info": {"numerical_ids": ["9891"]},
},
"1.B.2.a.v": {
"title": "Distribution of Oil Products",
"alternative_codes": [
"8371",
"1B2av",
"1 B 2 a v",
"1.B.2.a.v-10510",
"8371-10510",
],
"info": {"numerical_ids": ["8371"]},
},
"1.B.2.a.vi": {
"title": "Other",
"alternative_codes": [
"8228",
"1B2avi",
"1 B 2 a vi",
"1.B.2.a.vi-10510",
"8228-10510",
],
"info": {"numerical_ids": ["8228"]},
},
"1.B.2.b": {
"title": "Natural Gas",
"alternative_codes": [
"8493",
"1B2b",
"1 B 2 b",
"1.B.2.b-10510",
"8493-10510",
],
"info": {"numerical_ids": ["8493"]},
"children": [
[
"1.B.2.b.i",
"1.B.2.b.ii",
"1.B.2.b.iii",
"1.B.2.b.iv",
"1.B.2.b.v",
"1.B.2.b.vi",
]
],
},
"1.B.2.b.i": {
"title": "Exploration",
"alternative_codes": [
"9314",
"1B2bi",
"1 B 2 b i",
"1.B.2.b.i-10510",
"9314-10510",
],
"info": {"numerical_ids": ["9314"]},
},
"1.B.2.b.ii": {
"title": "Production",
"alternative_codes": [
"8474",
"1B2bii",
"1 B 2 b ii",
"1.B.2.b.ii-10510",
"8474-10510",
],
"info": {"numerical_ids": ["8474"]},
},
"1.B.2.b.iii": {
"title": "Processing",
"alternative_codes": [
"9999",
"1B2biii",
"1 B 2 b iii",
"1.B.2.b.iii-10510",
"9999-10510",
],
"info": {"numerical_ids": ["9999"]},
},
"1.B.2.b.iv": {
"title": "Transmission and Storage",
"alternative_codes": [
"9902",
"1B2biv",
"1 B 2 b iv",
"1.B.2.b.iv-10510",
"9902-10510",
],
"info": {"numerical_ids": ["9902"]},
},
"1.B.2.b.v": {
"title": "Distribution",
"alternative_codes": [
"9728",
"1B2bv",
"1 B 2 b v",
"1.B.2.b.v-10510",
"9728-10510",
],
"info": {"numerical_ids": ["9728"]},
},
"1.B.2.b.vi": {
"title": "Other",
"alternative_codes": [
"9974",
"1B2bvi",
"1 B 2 b vi",
"1.B.2.b.vi-10510",
"9974-10510",
],
"info": {"numerical_ids": ["9974"]},
},
"1.B.2.c": {
"title": "Venting and Flaring",
"alternative_codes": [
"8333",
"1B2c",
"1 B 2 c",
"1.B.2.c-10510",
"8333-10510",
],
"info": {"numerical_ids": ["8333"]},
"children": [["1.B.2.c.i", "1.B.2.c.ii"]],
},
"1.B.2.c.i": {
"title": "Venting",
"alternative_codes": [
"10303",
"1B2ci",
"1 B 2 c i",
"1.B.2.c.i-10510",
"10303-10510",
],
"info": {"numerical_ids": ["10303"]},
"children": [["1.B.2.c.i.1", "1.B.2.c.i.2", "1.B.2.c.i.3"]],
},
"1.B.2.c.i.1": {
"title": "Oil",
"alternative_codes": [
"9827",
"1B2ci1",
"1 B 2 c i 1",
"1.B.2.c.i.1-10510",
"9827-10510",
],
"info": {"numerical_ids": ["9827"]},
},
"1.B.2.c.i.2": {
"title": "Gas",
"alternative_codes": [
"10239",
"1B2ci2",
"1 B 2 c i 2",
"1.B.2.c.i.2-10510",
"10239-10510",
],
"info": {"numerical_ids": ["10239"]},
},
"1.B.2.c.i.3": {
"title": "Combined",
"alternative_codes": [
"8576",
"1B2ci3",
"1 B 2 c i 3",
"1.B.2.c.i.3-10510",
"8576-10510",
],
"info": {"numerical_ids": ["8576"]},
},
"1.B.2.c.ii": {
"title": "Flaring",
"alternative_codes": [
"10392",
"1B2cii",
"1 B 2 c ii",
"1.B.2.c.ii-10510",
"10392-10510",
],
"info": {"numerical_ids": ["10392"]},
"children": [["1.B.2.c.ii.1", "1.B.2.c.ii.2", "1.B.2.c.ii.3"]],
},
"1.B.2.c.ii.1": {
"title": "Oil",
"alternative_codes": [
"9210",
"1B2cii1",
"1 B 2 c ii 1",
"1.B.2.c.ii.1-10510",
"9210-10510",
],
"info": {"numerical_ids": ["9210"]},
},
"1.B.2.c.ii.2": {
"title": "Gas",
"alternative_codes": [
"8904",
"1B2cii2",
"1 B 2 c ii 2",
"1.B.2.c.ii.2-10510",
"8904-10510",
],
"info": {"numerical_ids": ["8904"]},
},
"1.B.2.c.ii.3": {
"title": "Combined",
"alternative_codes": [
"9824",
"1B2cii3",
"1 B 2 c ii 3",
"1.B.2.c.ii.3-10510",
"9824-10510",
],
"info": {"numerical_ids": ["9824"]},
},
"1.B.2.d": {
"title": "Other",
"alternative_codes": [
"9077",
"1B2d",
"1 B 2 d",
"1.B.2.d-10510",
"9077-10510",
],
"info": {"numerical_ids": ["9077"]},
},
"1.C": {
"title": "CO₂ Transport and Storage",
"alternative_codes": ["9070", "1C", "1 C", "1.C-10510", "9070-10510"],
"info": {"numerical_ids": ["9070"]},
"children": [["1.C.1", "1.C.2", "1.C.3", "11029"]],
},
"1.C.1": {
"title": "Transport of CO₂",
"alternative_codes": ["9365", "1C1", "1 C 1", "1.C.1-10510", "9365-10510"],
"info": {"numerical_ids": ["9365"]},
"children": [["1.C.1.a", "1.C.1.b", "1.C.1.c"]],
},
"1.C.1.a": {
"title": "Pipelines",
"alternative_codes": [
"9769",
"1C1a",
"1 C 1 a",
"1.C.1.a-10510",
"9769-10510",
],
"info": {"numerical_ids": ["9769"]},
},
"1.C.1.b": {
"title": "Ships",
"alternative_codes": [
"10197",
"1C1b",
"1 C 1 b",
"1.C.1.b-10510",
"10197-10510",
],
"info": {"numerical_ids": ["10197"]},
},
"1.C.1.c": {
"title": "Other",
"alternative_codes": [
"9366",
"1C1c",
"1 C 1 c",
"1.C.1.c-10510",
"9366-10510",
],
"info": {"numerical_ids": ["9366"]},
},
"1.C.2": {
"title": "Injection and Storage",
"alternative_codes": ["9474", "1C2", "1 C 2", "1.C.2-10510", "9474-10510"],
"info": {"numerical_ids": ["9474"]},
"children": [["1.C.2.a", "1.C.2.b"]],
},
"1.C.2.a": {
"title": "Injection",
"alternative_codes": [
"8741",
"1C2a",
"1 C 2 a",
"1.C.2.a-10510",
"8741-10510",
],
"info": {"numerical_ids": ["8741"]},
},
"1.C.2.b": {
"title": "Storage",
"alternative_codes": [
"10333",
"1C2b",
"1 C 2 b",
"1.C.2.b-10510",
"10333-10510",
],
"info": {"numerical_ids": ["10333"]},
},
"1.C.3": {
"title": "Other",
"alternative_codes": ["9330", "1C3", "1 C 3", "1.C.3-10510", "9330-10510"],
"info": {"numerical_ids": ["9330"]},
},
"2": {
"title": "Industrial Processes and Product Use",
"alternative_codes": [
"2.",
"10393",
"10482",
"2-10510",
"10393-10510",
"10482-10510",
],
"info": {"numerical_ids": ["10393", "10482"]},
"children": [["2.A", "2.B", "2.C", "2.D", "2.E", "2.F", "2.G", "2.H"]],
},
"2.A": {
"title": "Mineral Industry",
"alternative_codes": ["8452", "2A", "2 A", "2.A-10510", "8452-10510"],
"info": {"numerical_ids": ["8452"]},
"children": [["2.A.1", "2.A.2", "2.A.3", "2.A.4"]],
},
"2.A.1": {
"title": "Cement Production",
"alternative_codes": ["8787", "2A1", "2 A 1", "2.A.1-10510", "8787-10510"],
"info": {"numerical_ids": ["8787"]},
},
"2.A.2": {
"title": "Lime Production",
"alternative_codes": ["8702", "2A2", "2 A 2", "2.A.2-10510", "8702-10510"],
"info": {"numerical_ids": ["8702"]},
},
"2.A.3": {
"title": "Glass production",
"alternative_codes": ["8579", "2A3", "2 A 3", "2.A.3-10510", "8579-10510"],
"info": {"numerical_ids": ["8579"]},
},
"2.A.4": {
"title": "Other Process Uses of Carbonates",
"alternative_codes": ["9731", "2A4", "2 A 4", "2.A.4-10510", "9731-10510"],
"info": {"numerical_ids": ["9731"]},
"children": [["2.A.4.a", "2.A.4.b", "2.A.4.c", "2.A.4.d"]],
},
"2.A.4.a": {
"title": "Ceramics",
"alternative_codes": [
"8539",
"2A4a",
"2 A 4 a",
"2.A.4.a-10510",
"8539-10510",
],
"info": {"numerical_ids": ["8539"]},
},
"2.A.4.b": {
"title": "Other Uses of Soda Ash",
"alternative_codes": [
"9452",
"2A4b",
"2 A 4 b",
"2.A.4.b-10510",
"9452-10510",
],
"info": {"numerical_ids": ["9452"]},
},
"2.A.4.c": {
"title": "Non-metallurgical Magnesium Production",
"alternative_codes": [
"10101",
"2A4c",
"2 A 4 c",
"2.A.4.c-10510",
"10101-10510",
],
"info": {"numerical_ids": ["10101"]},
},
"2.A.4.d": {
"title": "Other",
"alternative_codes": [
"9342",
"2A4d",
"2 A 4 d",
"2.A.4.d-10510",
"9342-10510",
],
"info": {"numerical_ids": ["9342"]},
},
"2.B": {
"title": "Chemical Industry",
"alternative_codes": ["9304", "2B", "2 B", "2.B-10510", "9304-10510"],
"info": {"numerical_ids": ["9304"]},
"children": [
[
"2.B.1",
"2.B.10",
"2.B.2",
"2.B.3",
"2.B.4",
"2.B.5",
"2.B.6",
"2.B.7",
"2.B.8",
"2.B.9",
]
],
},
"2.B.1": {
"title": "Ammonia Production",
"alternative_codes": ["9658", "2B1", "2 B 1", "2.B.1-10510", "9658-10510"],
"info": {"numerical_ids": ["9658"]},
},
"2.B.2": {
"title": "Nitric Acid Production",
"alternative_codes": ["9410", "2B2", "2 B 2", "2.B.2-10510", "9410-10510"],
"info": {"numerical_ids": ["9410"]},
},
"2.B.3": {
"title": "Adipic Acid Production",
"alternative_codes": ["8468", "2B3", "2 B 3", "2.B.3-10510", "8468-10510"],
"info": {"numerical_ids": ["8468"]},
| |
<reponame>vidkidz/crossbridge
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import urlparse, urllib, time
from zope.interface import Interface
from twisted.web import html, resource
from buildbot.status import builder
from buildbot.status.builder import SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION
from buildbot import version, util
class ITopBox(Interface):
"""I represent a box in the top row of the waterfall display: the one
which shows the status of the last build for each builder."""
def getBox(self, request):
"""Return a Box instance, which can produce a <td> cell.
"""
class ICurrentBox(Interface):
"""I represent the 'current activity' box, just above the builder name."""
def getBox(self, status):
"""Return a Box instance, which can produce a <td> cell.
"""
class IBox(Interface):
"""I represent a box in the waterfall display."""
def getBox(self, request):
"""Return a Box instance, which wraps an Event and can produce a <td>
cell.
"""
class IHTMLLog(Interface):
pass
css_classes = {SUCCESS: "success",
WARNINGS: "warnings",
FAILURE: "failure",
SKIPPED: "skipped",
EXCEPTION: "exception",
}
ROW_TEMPLATE = '''
<div class="row">
<span class="label">%(label)s</span>
<span class="field">%(field)s</span>
</div>
'''
def make_row(label, field):
"""Create a name/value row for the HTML.
`label` is plain text; it will be HTML-encoded.
`field` is a bit of HTML structure; it will not be encoded in
any way.
"""
label = html.escape(label)
return ROW_TEMPLATE % {"label": label, "field": field}
def make_stop_form(stopURL, on_all=False, label="Build"):
if on_all:
data = """<form action="%s" class='command stopbuild'>
<p>To stop all builds, fill out the following fields and
push the 'Stop' button</p>\n""" % stopURL
else:
data = """<form action="%s" class='command stopbuild'>
<p>To stop this build, fill out the following fields and
push the 'Stop' button</p>\n""" % stopURL
data += make_row("Your name:",
"<input type='text' name='username' />")
data += make_row("Reason for stopping build:",
"<input type='text' name='comments' />")
data += '<input type="submit" value="Stop %s" /></form>\n' % label
return data
def make_force_build_form(forceURL, on_all=False):
if on_all:
data = """<form action="%s" class="command forcebuild">
<p>To force a build on all Builders, fill out the following fields
and push the 'Force Build' button</p>""" % forceURL
else:
data = """<form action="%s" class="command forcebuild">
<p>To force a build, fill out the following fields and
push the 'Force Build' button</p>""" % forceURL
return (data
+ make_row("Your name:",
"<input type='text' name='username' />")
+ make_row("Reason for build:",
"<input type='text' name='comments' />")
+ make_row("Branch to build:",
"<input type='text' name='branch' />")
+ make_row("Revision to build:",
"<input type='text' name='revision' />")
+ '<input type="submit" value="Force Build" /></form>\n')
def td(text="", parms={}, **props):
data = ""
data += " "
#if not props.has_key("border"):
# props["border"] = 1
props.update(parms)
comment = props.get("comment", None)
if comment:
data += "<!-- %s -->" % comment
data += "<td"
class_ = props.get('class_', None)
if class_:
props["class"] = class_
for prop in ("align", "colspan", "rowspan", "border",
"valign", "halign", "class"):
p = props.get(prop, None)
if p != None:
data += " %s=\"%s\"" % (prop, p)
data += ">"
if not text:
text = " "
if isinstance(text, list):
data += "<br />".join(text)
else:
data += text
data += "</td>\n"
return data
def build_get_class(b):
"""
Return the class to use for a finished build or buildstep,
based on the result.
"""
# FIXME: this getResults duplicity might need to be fixed
result = b.getResults()
#print "THOMAS: result for b %r: %r" % (b, result)
if isinstance(b, builder.BuildStatus):
result = b.getResults()
elif isinstance(b, builder.BuildStepStatus):
result = b.getResults()[0]
# after forcing a build, b.getResults() returns ((None, []), []), ugh
if isinstance(result, tuple):
result = result[0]
else:
raise TypeError, "%r is not a BuildStatus or BuildStepStatus" % b
if result == None:
# FIXME: this happens when a buildstep is running ?
return "running"
return builder.Results[result]
def path_to_root(request):
# /waterfall : ['waterfall'] -> ''
# /somewhere/lower : ['somewhere', 'lower'] -> '../'
# /somewhere/indexy/ : ['somewhere', 'indexy', ''] -> '../../'
# / : [] -> ''
if request.prepath:
segs = len(request.prepath) - 1
else:
segs = 0
root = "../" * segs
return root
def path_to_builder(request, builderstatus):
return (path_to_root(request) +
"builders/" +
urllib.quote(builderstatus.getName(), safe=''))
def path_to_build(request, buildstatus):
return (path_to_builder(request, buildstatus.getBuilder()) +
"/builds/%d" % buildstatus.getNumber())
def path_to_step(request, stepstatus):
return (path_to_build(request, stepstatus.getBuild()) +
"/steps/%s" % urllib.quote(stepstatus.getName(), safe=''))
def path_to_slave(request, slave):
return (path_to_root(request) +
"buildslaves/" +
urllib.quote(slave.getName(), safe=''))
class Box:
# a Box wraps an Event. The Box has HTML <td> parameters that Events
# lack, and it has a base URL to which each File's name is relative.
# Events don't know about HTML.
spacer = False
def __init__(self, text=[], class_=None, urlbase=None,
**parms):
self.text = text
self.class_ = class_
self.urlbase = urlbase
self.show_idle = 0
if parms.has_key('show_idle'):
del parms['show_idle']
self.show_idle = 1
self.parms = parms
# parms is a dict of HTML parameters for the <td> element that will
# represent this Event in the waterfall display.
def td(self, **props):
props.update(self.parms)
text = self.text
if not text and self.show_idle:
text = ["[idle]"]
return td(text, props, class_=self.class_)
class HtmlResource(resource.Resource):
# this is a cheap sort of template thingy
contentType = "text/html; charset=UTF-8"
title = "Buildbot"
addSlash = False # adapted from Nevow
def getChild(self, path, request):
if self.addSlash and path == "" and len(request.postpath) == 0:
return self
return resource.Resource.getChild(self, path, request)
def render(self, request):
# tell the WebStatus about the HTTPChannel that got opened, so they
# can close it if we get reconfigured and the WebStatus goes away.
# They keep a weakref to this, since chances are good that it will be
# closed by the browser or by us before we get reconfigured. See
# ticket #102 for details.
if hasattr(request, "channel"):
# web.distrib.Request has no .channel
request.site.buildbot_service.registerChannel(request.channel)
# Our pages no longer require that their URL end in a slash. Instead,
# they all use request.childLink() or some equivalent which takes the
# last path component into account. This clause is left here for
# historical and educational purposes.
if False and self.addSlash and request.prepath[-1] != '':
# this is intended to behave like request.URLPath().child('')
# but we need a relative URL, since we might be living behind a
# reverse proxy
#
# note that the Location: header (as used in redirects) are
# required to have absolute URIs, and my attempt to handle
# reverse-proxies gracefully violates rfc2616. This frequently
# works, but single-component paths sometimes break. The best
# strategy is to avoid these redirects whenever possible by using
# HREFs with trailing slashes, and only use the redirects for
# manually entered URLs.
url = request.prePathURL()
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
new_url = request.prepath[-1] + "/"
if query:
new_url += "?" + query
request.redirect(new_url)
return ''
data = self.content(request)
if isinstance(data, unicode):
data = data.encode("utf-8")
request.setHeader("content-type", self.contentType)
if request.method == "HEAD":
request.setHeader("content-length", len(data))
return ''
return data
def getStatus(self, request):
return request.site.buildbot_service.getStatus()
def getControl(self, request):
return request.site.buildbot_service.getControl()
def getChangemaster(self, request):
return request.site.buildbot_service.getChangeSvc()
def path_to_root(self, request):
return path_to_root(request)
def footer(self, s, req):
# TODO: this stuff should be generated by a template of some sort
projectURL = s.getProjectURL()
projectName = s.getProjectName()
data = '<hr /><div class="footer">\n'
welcomeurl = self.path_to_root(req) + "index.html"
data += '[<a href="%s">welcome</a>]\n' % welcomeurl
data += "<br />\n"
data += '<a href="http://buildbot.sourceforge.net/">Buildbot</a>'
data += "-%s " % version
if projectName:
data += "working for the "
if projectURL:
data += "<a href=\"%s\">%s</a> project." % (projectURL,
projectName)
else:
data += "%s project." % projectName
data += "<br />\n"
data += ("Page built: " +
time.strftime("%a %d %b %Y %H:%M:%S",
time.localtime(util.now()))
+ "\n")
data += '</div>\n'
return data
def getTitle(self, request):
return self.title
def fillTemplate(self, template, request):
s = request.site.buildbot_service
values = s.template_values.copy()
values['root'] = self.path_to_root(request)
# e.g. to reference the top-level 'buildbot.css' page, use
# "%(root)sbuildbot.css"
values['title'] = self.getTitle(request)
return template % values
def content(self, request):
s = request.site.buildbot_service
data = ""
data += self.fillTemplate(s.header, request)
data += "<head>\n"
for he in s.head_elements:
data += " " + self.fillTemplate(he, request) + "\n"
data += self.head(request)
data += "</head>\n\n"
data += '<body %s>\n' % " ".join(['%s="%s"' % (k,v)
for (k,v) in s.body_attrs.items()])
data += self.body(request)
| |
<filename>sfoda/suntans/sundriver.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Driver classes for generating suntans input files
<NAME>
Stanford University
April 2013
"""
from .sunpy import Grid, Spatial
from .sundepths import DepthDriver, adjust_channel_depth
from .sunboundary import modifyBCmarker, Boundary, InitialCond
from .metfile import SunMet, metfile
from sfoda.utils import timeseries
import numpy as np
import scipy.io as io
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
import pdb
PI = 3.141592653589793
class sundriver(object):
"""
Driver class for generating SUNTANS input files
"""
# Switches to generate bathymetry, boundary, meteorology and initial condition input files
makebathy=False
makebnd=False
makewinds=False
makeinitial=False
###
# General options
###
# Grid projection variables
convert2utm=False
CS='NAD83'
utmzone=51
isnorth=False
vdatum = 'MSL'
# Verical grid options
Nkmax = 1 # number of layers
r = 1.01 # vertical stretching parameter
setconstantdepth=False # Option to set constant depth
H0 = 10.0 # Constant depth
###
# Bathymetry interpolation options
###
depthfile = None
depthmax=0.1
interpmethod='idw' # Interpolation method: 'nn', 'idw', 'kriging', 'griddata'
plottype='mpl' # Type of plot: 'mpl', 'vtk2' or 'vtk3'
shapefieldname='contour' # name of shapefile attribute with contour data
scalefac = 1.0
# Interpolation options
NNear=3
# Interpolate to nodes then take maximum depth for cell
interpnodes=False
# IF interpmethod = 'idw'
p = 1.0 # power for inverse distance weighting
# IF interpmethod = 'kriging'
varmodel = 'spherical'
nugget = 0.1
sill = 0.8
vrange = 250.0
# Smoothing options
smooth=True
smoothmethod='kriging' # USe kriging or idw for smoothing
smoothnear=4 # No. of points to use for smoothing
# Option to adjust channel depths using a shapefile
adjust_depths=False
channel_shpfile='channel.shp'
####
# Open boundary options
####
opt_bcseg = 'constant' # Segment boundary condition option: 'constant' or 'file'
opt_bctype2 = 'constant' # Type 2 boundary condition option: 'constant' or 'file'
opt_bctype3 = 'constant' # Type 3 boundary condition option: 'constant', ,'file','OTIS', 'ROMS', 'ROMSOTIS','ROMSFILE', 'OTISFILE', 'ROMSOTISFILE'
modifyedges = False # Option to modify the boundary edges
bcpolygonfile = None # Shape file with fields 'marker' and 'edge_id'
bcfile = 'SUNTANS_BC.nc' # Input boundary condition file
# IF opt_bcseg = 'consant
Q0 = 100.0 # m3/s
# IF opt_bctype2/opt_bctype3 = 'constant'
T0 = 0 # Open boundary background temperature
S0 = 0 # Open boundary background salinity
# IF opt_bctype3 = 'file' or 'ROMSFILE'
waterlevelstationID = None
# IF opt_bctype3 = 'harmonic'
amp = 0.25
omega = 2*PI/(24.0*3600.0)
# IF opt_bctype2 = 'file'
TairstatationID = None
# Filter type (waterlevel)
filttype='low'
cutoff=3600.0
# Air temp cuttoff (bctype2 = file)
tairfilttype = 'low'
taircutoff = 14.0*24.0*3600.0
####
# Atmospheric input options
####
opt_met = 'constant' # Met file creation options: 'constant'
metfile = 'SUNTANS_MetForcing.nc'
# IF opt_met = 'consant'
Uwind = 0.0
Vwind = 5.0
RH = 50.0
Tair = 30.0
Pair = 1010.0
rain = 0.0
cloud = 0.0
####
# Initial condition options
####
opt_ic = 'constant', 'depth_profile', 'ROMS' , 'SUNTANS'
icfile = 'SUNTANS_IC.nc'
icfilterdx = 0.0 # Filtering length scale
# Age source term polygon
agesourcepoly = None
# Initial condition temperature and salinity
T0ic = 0
S0ic = 0
###
# Input file names
###
romsfile = None
suntansicfile = None
otisfile = None
dbasefile = None
# Use ROMS u,v and eta
useROMSuv=False
useROMSeta=False
# Use OTIS u & v
useOTISuv=False
############################
def __init__(self,**kwargs):
self.__dict__.update(kwargs)
def __call__(self,suntanspath,starttime,endtime,dt):
self.suntanspath = suntanspath
self.starttime = starttime
self.endtime = endtime
self.dt = dt
# Step through and perform each step
self._makebathy()
if self.makebnd:
self._makebnd()
if self.makeinitial:
self._makeinitial()
if self.makewinds:
self._makewinds()
print('###########\n Completed generating input files. Check text for errors!!!!\n##########')
def _makebathy(self):
"""
Loads the grid object and interpolates the depths
"""
# Interpolate the depths onto the grid
if self.makebathy:
if self.depthfile == None:
raise Exception('need to set "depthfile" parameter')
else:
print('Interpolation depths onto grid from file:\n%s'%self.depthfile)
D = DepthDriver(self.depthfile,interpmethod=self.interpmethod,\
plottype=self.plottype,NNear=self.NNear,\
p=self.p,varmodel=self.varmodel,nugget=self.nugget,sill=self.sill,vrange=self.vrange,\
convert2utm=self.convert2utm,utmzone=self.utmzone,isnorth=self.isnorth,vdatum=self.vdatum,\
shapefieldname=self.shapefieldname,\
smooth=self.smooth,smoothmethod=self.smoothmethod,smoothnear=self.smoothnear)
D(self.suntanspath,depthmax=self.depthmax,interpnodes=self.interpnodes,\
scalefac=self.scalefac)
self.grd = D.grd
# Now go through and adjust the channel depths from a shapefile
if self.adjust_depths:
self.grd = adjust_channel_depth(self.grd,self.channel_shpfile)
# Write the depths to file
print('Writing depths.dat (again)...')
self.grd.saveBathy(self.suntanspath+'/depths.dat-voro')
print('Data (re-)saved to %s.'%self.suntanspath+'/depths.dat-voro')
print('SUNTANS depths saved to: %s'%(self.suntanspath+'/depths.dat-voro'))
elif self.setconstantdepth:
print('Using constant depth (%6.2f m)...'%self.H0)
self.grd = Grid(self.suntanspath)
self.grd.dv = np.zeros_like(self.grd.xv)
self.grd.dv[:] = self.H0
else:
print('Loading grid from folder:\n%s'%self.suntanspath)
# Load the grid
self.grd = Grid(self.suntanspath)
# Load the depth data into the grid object
self.grd.loadBathy(self.suntanspath+'/depths.dat-voro')
zmax = np.abs(self.grd.dv.max())
print('Calculating vertical grid spacing for Nk = %d, r = %1.3f, %6.3f...'%(self.Nkmax,self.r,zmax))
# Set up the vertical coordinates
dz = self.grd.calcVertSpace(self.Nkmax,self.r,zmax)
self.grd.setDepth(dz)
# Save vertspace.dat
self.grd.saveVertspace(self.suntanspath+'/vertspace.dat')
# Write cells.dat and edges.dat to ensure they are in the right format
print('Overwriting cells.dat and edges.dat to ensure format consistency.')
self.grd.saveCells(self.suntanspath+'/cells.dat')
self.grd.saveEdges(self.suntanspath+'/edges.dat')
def _makebnd(self):
"""
Generate boundary condition files
"""
if self.modifyedges:
modifyBCmarker(self.suntanspath,self.bcpolygonfile)
#Load the boundary object from the grid
bnd = Boundary(self.suntanspath,(self.starttime,self.endtime,self.dt))
###
# Segment (flux) boundaries
###
if self.opt_bcseg == 'constant':
print('Setting %d boundary segments to discharge of %6.3f m3/s'%(bnd.Nseg,self.Q0))
bnd.boundary_Q[:]=self.Q0
elif self.opt_bcseg == 'file':
print('Loading river segment data from file...\n')
for ii, ID in enumerate(bnd.segp):
print('Loading discahrge data for boundary segment (%d) StationID: %d...'%(ii,ID))
ts = timeseries.loadDBstation(self.dbasefile,ID,'discharge',timeinfo=(self.starttime,self.endtime,self.dt),\
filttype=self.filttype,cutoff=self.cutoff)
bnd.boundary_Q[:,ii]=ts.y.copy()
else:
print('Unknown option: opt_bcseg = %s. Not setting boundary segment data.'%self.opt_bcseg)
###
# Type-3 boundaries
###
self.useROMS = False
self.useOTIS = False
self.useFILE = False
self.useOTISFILE = False
if self.opt_bctype3=='constant':
print('Setting constant type-3 boundary conditions...')
print('Setting salinity = %f, temperature = %f'%(self.S0,self.T0))
bnd.S[:]=self.S0
bnd.T[:]=self.T0
elif self.opt_bctype3=='depth_profile':
print('Setting type-3 boundary T/S from profile...')
self.loadTSprofile()
for ii in range(0,bnd.N3):
bnd.T[0,:,ii] = self.Tz
bnd.S[0,:,ii] = self.Sz
elif self.opt_bctype3 in ('ROMS'):
self.useROMS = True
elif self.opt_bctype3 in ('OTIS'):
self.useOTIS = True
elif self.opt_bctype3 in ('file'):
self.useFILE = True
elif self.opt_bctype3 in ('ROMSOTIS'):
self.useROMS = True
self.useOTIS = True
elif self.opt_bctype3 in ('ROMSFILE'):
self.useROMS = True
self.useFILE = True
elif self.opt_bctype3 in ('OTISFILE'):
self.useOTISFILE = True
elif self.opt_bctype3 in ('ROMSOTISFILE'):
self.useOTISFILE = True
self.useROMS = True
else:
print('Unknown option: opt_bctype3 = %s. Not setting type-3 boundaries.'%self.opt_bctype3)
if self.useROMS:
bnd.roms2boundary(self.romsfile,setUV=self.useROMSuv,seth=self.useROMSeta)
if self.useOTIS:
bnd.otis2boundary(self.otisfile,setUV=self.useOTISuv)
if self.useOTISFILE:
bnd.otisfile2boundary(self.otisfile,self.dbasefile,self.waterlevelstationID,setUV=self.useOTISuv)
if self.useFILE:
ID = self.waterlevelstationID
print('Loading waterlevel onto all type-3 points from stationID: %d...'%(ID))
ts = timeseries.loadDBstation(self.dbasefile,ID,'waterlevel',timeinfo=(self.starttime,self.endtime,self.dt),\
filttype=self.filttype,cutoff=self.cutoff)
for ii in range(bnd.N3):
bnd.h[:,ii] += ts.y.copy()
###
# Type-2 boundaries
###
self.useFILE2 = False
if self.opt_bctype2 == 'constant':
print('Setting constant type-2 boundary conditions...')
print('Setting salinity = %f, temperature = %f'%(self.S0,self.T0))
bnd.boundary_S[:]=self.S0
bnd.boundary_T[:]=self.T0
elif self.opt_bctype2 == 'file':
print('Using file for type-2 boundary condition (temperature only)')
print('Setting salinity = %f'%(self.S0))
bnd.boundary_S[:]=self.S0
self.useFILE2 = True
else:
print('Unknown option: opt_bctype2 = %s. Not setting type-2 boundaries.'%self.opt_bctype3)
if self.useFILE2:
ID = self.TairstationID
print('Loading air temperature onto all type-2 points from stationID: %s...'%(ID))
ts = timeseries.loadDBstation(self.dbasefile,ID,'Tair',timeinfo=(self.starttime,self.endtime,self.dt),\
filttype=self.tairfilttype,cutoff=self.taircutoff)
for ii in range(bnd.N2):
for kk in range(bnd.Nk):
bnd.boundary_T[:,kk,ii] += ts.y.copy()
# Write to netcdf
bnd.write2NC(self.suntanspath+'/'+self.bcfile)
def _makeinitial(self):
"""
Generate initial conditions
"""
# Initialise the class
IC = InitialCond(self.suntanspath,self.starttime)
if self.opt_ic=='constant':
print('Setting constant initial conditions...')
print('Setting salinity = %f, temperature = %f'%(self.S0ic,self.T0ic))
IC.T[:]=self.T0ic
IC.S[:]=self.S0ic
elif self.opt_ic=='depth_profile':
print('Setting depth-varying initial conditions...')
self.loadTSprofile()
for ii in range(0,IC.Nc):
IC.T[0,:,ii] = self.Tz
IC.S[0,:,ii] = self.Sz
elif self.opt_ic=='ROMS':
print('Setting initial conditions from ROMS model output...')
IC.roms2ic(self.romsfile,setUV=self.useROMSuv,seth=self.useROMSeta,interpmethod='idw',NNear=5,p=2)
#interpmethod=self.interpmethod,NNear=self.NNear,p=self.p,\
#varmodel=self.varmodel,nugget=self.nugget,sill=self.sill,\
#vrange=self.vrange)
elif self.opt_ic=='SUNTANS':
IC.suntans2ic(self.suntansicfile,setUV=self.useROMSuv,seth=self.useROMSeta)
else:
print('Unknown option: opt_ic = %s. Not setting initial conditions.'%self.opt_ic)
# Filter the variables in space
if self.icfilterdx>0:
IC.filteric(self.icfilterdx)
# Set the age source term from a polygon
if not self.agesourcepoly==None:
print('Setting age source term with shapefile: %s...'%self.agesourcepoly)
IC.setAgeSource(self.agesourcepoly)
# Write the initial condition file
IC.writeNC(self.suntanspath+'/'+self.icfile,dv=self.grd.dv)
def _makewinds(self):
"""
Generate a metfile
"""
if self.opt_met=='constant':
print('Setting constant met forcing with: Uwind = %6.2f\nVwind = %6.2f\nTair = %6.2f\nPair = %6.2f\nRH = %6.2f\nrain = %6.2f\ncloud = %6.2f\n'\
%(self.Uwind,self.Vwind,self.Tair,self.Pair,self.RH,self.cloud,self.rain))
xpt = self.grd.xv.mean()
ypt = self.grd.yv.mean()
| |
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, <NAME> (@jborean93) <<EMAIL>>
# MIT License (see LICENSE or https://opensource.org/licenses/MIT)
import logging
import typing
import uuid
from xml.etree import ElementTree
from psrpcore._crypto import PSRemotingCrypto
from psrpcore._events import PSRPEvent, SessionCapabilityEvent
from psrpcore._exceptions import (
InvalidPipelineState,
InvalidRunspacePoolState,
PSRPCoreError,
)
from psrpcore._payload import (
ProtocolVersion,
PSRPMessage,
PSRPPayload,
StreamType,
create_fragment,
create_message,
unpack_fragment,
unpack_message,
)
from psrpcore.types import (
ApartmentState,
EndOfPipelineInput,
ErrorRecord,
HostInfo,
PSInvocationState,
PSObject,
PSRPMessageType,
PSThreadOptions,
PSVersion,
RunspacePoolState,
SessionCapability,
deserialize,
serialize,
)
log = logging.getLogger(__name__)
T1 = typing.TypeVar("T1", bound="Pipeline")
T2 = typing.TypeVar("T2", bound="RunspacePool")
class RunspacePool(typing.Generic[T1]):
"""Runspace Pool base class.
This is the base class for a Runspace Pool. It contains the common
attributes and methods used by both a client and server based Runspace
Pool.
Args:
runspace_pool_id: The UUID that identified the Runspace Pool.
application_arguments: Any arguments supplied when creating the
Runspace Pool as a client.
application_private_data: Any special data supplied by the Runspace
Pool as a server.
ps_version: The PowerShell version.
protocol_version: The PSRP protocol version that the pool understands.
serialization_version: The serialization version used by the pool.
Attributes:
runspace_pool_id: See args.
our_capability: The SessionCapability of the caller.
their_capability: The SessionCapability of the peer, only populated
after the Runspace Pool has been opened.
application_arguments: The application arguments from the client, will
be populated for the server after the Runspace Pool has been
opened.
application_private_data: The app private data supplied by the server,
will be populated for the client after the Runspace Pool has been
opened.
host: The HostInfo that contains host information of the client.
state: The current state of the Runspace Pool.
apartment_state: The apartment state of the thread used to execute
commands within this Runspace Pool.
thread_options: Determines whether a new thread is created for each
invocation.
pipeline_table: A dictionary that contains associated pipelines with
this Runspace Pool.
"""
def __new__(
cls,
*args: typing.Any,
**kwargs: typing.Any,
) -> "RunspacePool":
if cls == RunspacePool:
raise TypeError(
f"Type {cls.__qualname__} cannot be instantiated; it can be used only as a base class for "
f"client/server runspace pool types."
)
return super().__new__(cls)
def __init__(
self,
runspace_pool_id: uuid.UUID,
application_arguments: typing.Dict[str, typing.Any],
application_private_data: typing.Dict[str, typing.Any],
ps_version: typing.Optional[PSVersion] = None,
protocol_version: typing.Optional[PSVersion] = None,
serialization_version: typing.Optional[PSVersion] = None,
) -> None:
log.debug(
"Creating Runspace Pool - RPID: %s, Type: %s",
runspace_pool_id,
type(self).__name__,
)
self.runspace_pool_id = runspace_pool_id
self.our_capability = SessionCapability(
PSVersion=ps_version or PSVersion("2.0"),
protocolversion=protocol_version or ProtocolVersion.Pwsh5.value,
SerializationVersion=serialization_version or PSVersion("1.1.0.1"),
)
self.their_capability: typing.Optional[SessionCapability] = None
self.application_arguments = application_arguments
self.application_private_data = application_private_data
self.host: typing.Optional[HostInfo] = None
self.state = RunspacePoolState.BeforeOpen
self.apartment_state = ApartmentState.Unknown
self.thread_options = PSThreadOptions.Default
self.pipeline_table: typing.Dict[uuid.UUID, T1] = {}
self._is_client = True
self._ci_handlers: typing.Dict[int, typing.Optional[typing.Callable[[PSRPEvent], None]]] = {}
self._ci_events: typing.Dict[int, PSRPEvent] = {}
self._ci_count = 1
self._fragment_count = 1
self._key_requested = False
self._cipher = PSRemotingCrypto()
self._min_runspaces = 0
self._max_runspaces = 0
self._send_buffer: typing.List[PSRPMessage] = []
# Raw bytes received but not yet processed.
self._receive_buffer = bytearray()
# Fragments for each object_id that have been received but not yet combined to a message.
self._incoming_fragments: typing.Dict[int, typing.List[bytearray]] = {}
# Messages from combined fragments that have been received but not yet returned.
self._incoming_messages: typing.Dict[int, PSRPMessage] = {}
@property
def max_runspaces(
self,
) -> int:
"""The maximum number of runspaces the pool maintains."""
return self._max_runspaces
@property
def min_runspaces(
self,
) -> int:
"""The minimum number of runspaces the pool maintains."""
return self._min_runspaces
@property
def _ci_counter(
self,
) -> int:
"""Counter used for ci calls."""
ci = self._ci_count
self._ci_count += 1
return ci
@property
def _fragment_counter(
self,
) -> int:
"""Counter used for fragment object IDs."""
count = self._fragment_count
self._fragment_count += 1
return count
def begin_close(self) -> None:
"""Marks the Runspace Pool to be in the closing phase."""
self._change_state(RunspacePoolState.Closing)
def close(self) -> None:
"""Marks the Runspace Pool as closed.
This closes the Runspace Pool. Communicating to the peer that the pool
is being closed is done through a connection specific process. This
method just verifies the Runspace Pool is in a state that can be closed
and that no pipelines are still running.
"""
if self.pipeline_table:
raise PSRPCoreError("Must close existing pipelines before closing the pool")
valid_states = [RunspacePoolState.Closed, RunspacePoolState.Closing, RunspacePoolState.Opened]
if self.state not in valid_states:
raise InvalidRunspacePoolState("close Runspace Pool", self.state, valid_states)
self._change_state(RunspacePoolState.Closed)
def begin_disconnect(self) -> None:
"""Marks the Runspace Pool to be in the disconnecting phase."""
self._change_state(RunspacePoolState.Disconnecting)
def disconnect(self) -> None:
"""Marks the Runspace Pool as disconnected.
This disconnects the Runspace Pool. COmmunicating to the peer that the
pool is disconnected is done through a connection specific process.
"""
valid_states = [RunspacePoolState.Opened, RunspacePoolState.Disconnecting, RunspacePoolState.Disconnected]
if self.state not in valid_states:
raise InvalidRunspacePoolState("disconnect a Runspace Pool", self.state, valid_states)
self._change_state(RunspacePoolState.Disconnected)
def reconnect(self) -> None:
"""Marks the Runspace Pool as reconnected and opened."""
valid_states = [RunspacePoolState.Disconnected, RunspacePoolState.Opened]
if self.state not in valid_states:
raise InvalidRunspacePoolState("reconnect to a Runspace Pool", self.state, valid_states)
self._change_state(RunspacePoolState.Opened)
def data_to_send(
self,
amount: typing.Optional[int] = None,
) -> typing.Optional[PSRPPayload]:
"""Gets the next PSRP payload.
Returns the PSRPPayload that contains the data that needs to be sent
to the peer. This is a non-blocking call and is used by the
implementer to get the next PSRP payload that is then sent over it's
transport.
Args:
amount: The maximum size of the data fragment that can be sent.
This must be 22 or larger to fit the fragment headers.
Returns:
typing.Optional[PSRPPayload]: The payload (if any) that needs to
be sent to the peer.
"""
if amount is not None and amount < 22:
raise ValueError("amount must be 22 or larger to fit a PSRP fragment")
current_buffer = bytearray()
stream_type = StreamType.default
pipeline_id = None
fragment_size = 21
# TODO: prioritise prompt_response over default if the last fragment was an end fragment.
for message in list(self._send_buffer):
if amount is not None and amount < fragment_size:
break
if not current_buffer:
stream_type = message.stream_type
pipeline_id = message.pipeline_id
# We can only combine fragments if they are for the same target.
if pipeline_id != message.pipeline_id:
break
if amount is None:
allowed_length = len(message)
else:
allowed_length = amount - fragment_size
amount -= fragment_size + len(message)
fragment = message.fragment(allowed_length)
log.debug(
"Packing fragment - OID: %s, FID: %s, Start: %s, End: %s, Length: %s",
fragment.object_id,
fragment.fragment_id,
fragment.start,
fragment.end,
len(fragment.data),
)
current_buffer += create_fragment(fragment.object_id, fragment.fragment_id, fragment.data, fragment.end)
if len(message) == 0:
self._send_buffer.remove(message)
return PSRPPayload(bytes(current_buffer), stream_type, pipeline_id) if current_buffer else None
def receive_data(
self,
data: PSRPPayload,
) -> None:
"""Store any incoming data.
Stores any incoming payloads in an internal buffer to be processed.
This buffer is read when calling :meth:`next_event()`.
Args:
data: The PSRP payload data received from the transport.
"""
self._receive_buffer += data.data
def next_event(
self,
) -> typing.Optional[PSRPEvent]:
"""Process data received from the peer.
This processes any PSRP data that has been received from the peer. Will
return the next PSRP event in the receive buffer or `None` if not
enough data is available.
Returns:
typing.Optional[PSRPEvent]: The next event present in the incoming
data buffer or `None` if not enough data has been received.
"""
# First unpacks the raw receive buffer into messages.
while self._receive_buffer:
fragment = unpack_fragment(self._receive_buffer)
log.debug(
"Unpacked fragment - OID: %s, FID: %s, Start: %s, End: %s, Length: %s",
fragment.object_id,
fragment.fragment_id,
fragment.start,
fragment.end,
len(fragment.data),
)
self._receive_buffer = self._receive_buffer[21 + len(fragment.data) :]
buffer = self._incoming_fragments.setdefault(fragment.object_id, [])
if fragment.fragment_id != len(buffer):
raise PSRPCoreError(
f"Expecting fragment with a fragment id of {len(buffer)} not {fragment.fragment_id}"
)
buffer.append(fragment.data)
if fragment.end:
raw_message = unpack_message(bytearray(b"".join(buffer)))
message = PSRPMessage(
raw_message.message_type, raw_message.data, raw_message.rpid, raw_message.pid, fragment.object_id
)
self._incoming_messages[fragment.object_id] = message
del self._incoming_fragments[fragment.object_id]
for object_id in list(self._incoming_messages.keys()):
message = self._incoming_messages[object_id]
# In case of a failure it is expected for the client to receive the correct data instead
del self._incoming_messages[object_id]
event = self._process_message(message)
return event
# Need more data from peer to produce an event.
return None
def prepare_message(
self,
message: typing.Optional[PSObject],
message_type: typing.Optional[PSRPMessageType] = None,
pipeline_id: typing.Optional[uuid.UUID] = None,
stream_type: StreamType = StreamType.default,
) -> None:
"""Adds a PSRP message to send buffer.
Adds the given PSRP message to the send buffer to be sent when the
caller requires it to.
Args:
message: The PSObject to be send.
message_type: | |
<reponame>KVSlab/vascularManipulationToolkit<gh_stars>10-100
import math
from scipy.interpolate import splrep, splev
from morphman.common.common import get_distance
from morphman.common.vmtk_wrapper import *
from morphman.common.vtk_wrapper import *
### The following code is adapted from:
### https://github.com/vmtk/vmtk/tree/master/vmtkApps/CerebralAneurysms/ParentVesselReconstruction
### Written by <NAME>, and distrubuted within vmtk.
def create_parent_artery_patches(parentCenterlines, clipPoints, siphon=False, bif=False):
"""Clip out a segment of the centerline, and create new centerlines with new end and
starting points.
Args:
parentCenterlines (vtkPolyData): Original centerline
clipPoints (vtkPoints): The points where to clip the centerline.
siphon (bool): On/off clipping a siphon
bif (bool): On/off bifurcation.
Returns:
centerline (vtkPolyData): New centerline without the segment.
"""
numberOfDaughterPatches = parentCenterlines.GetNumberOfCells()
if siphon:
clipIds, numberOfPatchedCenterlinesPoints = extract_patches_ids_siphon(parentCenterlines,
clipPoints)
else:
clipIds, numberOfPatchedCenterlinesPoints = extract_patches_ids(parentCenterlines,
clipPoints)
patchedCenterlines = vtk.vtkPolyData()
patchedCenterlinesPoints = vtk.vtkPoints()
patchedCenterlinesCellArray = vtk.vtkCellArray()
radiusArray = get_vtk_array(radiusArrayName, 1, numberOfPatchedCenterlinesPoints)
if bif:
clipIds = sorted(clipIds)
numberOfCommonPatch = clipIds[0] + 1
patchedCenterlinesCellArray.InsertNextCell(numberOfCommonPatch)
count = 0
line = extract_single_line(parentCenterlines, 0)
getData = line.GetPointData().GetArray(radiusArrayName).GetTuple1
for i in range(0, numberOfCommonPatch):
patchedCenterlinesPoints.InsertNextPoint(line.GetPoint(i))
patchedCenterlinesCellArray.InsertCellPoint(i)
radiusArray.SetTuple1(i, getData(i))
count += 1
for j in range(numberOfDaughterPatches):
cell = extract_single_line(parentCenterlines, j)
getData = cell.GetPointData().GetArray(radiusArrayName).GetTuple1
numberOfCellPoints = cell.GetNumberOfPoints()
startId = clipIds[j + 1]
patchNumberOfPoints = numberOfCellPoints - startId
patchedCenterlinesCellArray.InsertNextCell(patchNumberOfPoints)
for i in range(startId, cell.GetNumberOfPoints()):
point = cell.GetPoint(i)
patchedCenterlinesPoints.InsertNextPoint(point)
patchedCenterlinesCellArray.InsertCellPoint(count)
radiusArray.SetTuple1(count, getData(i))
count += 1
patchedCenterlines.SetPoints(patchedCenterlinesPoints)
patchedCenterlines.SetLines(patchedCenterlinesCellArray)
patchedCenterlines.GetPointData().AddArray(radiusArray)
return patchedCenterlines
def extract_patches_ids_siphon(parentCl, clipPts, clipped=False):
"""For each clipping points (clipPts) extract the cooresponding ID for each line in
the centerline. (This is for the siphon, see extract_patches_ids as well.)
Args:
parentCl (vtkPolyData):
clipPts (vtkPoints):
clipped (bool):
Returns:
clipIds (list): A list of IDs.
numberOfPoints (int): Total number of points.
"""
clipIds = []
numberOfPoints = 0
upstreamPoint = clipPts.GetPoint(0)
downstreamPoint = clipPts.GetPoint(1)
for j in range(parentCl.GetNumberOfCells()):
cellLine = extract_single_line(parentCl, j)
locator = get_vtk_point_locator(cellLine)
upId = locator.FindClosestPoint(upstreamPoint)
downId = locator.FindClosestPoint(downstreamPoint)
if j == 0:
if clipped:
clipIds.append(upId - 1)
clipIds.append(downId + 1)
else:
clipIds.append(upId)
clipIds.append(downId)
numberOfPoints += upId + 1
numberOfPoints += cellLine.GetNumberOfPoints() - downId
else:
if clipped:
clipIds.append(downId + 1)
else:
clipIds.append(downId)
numberOfPoints += cellLine.GetNumberOfPoints() - downId
return clipIds, numberOfPoints
def extract_patches_ids(parentCl, clipPts):
"""For each clipping points (clipPts) extract the cooresponding ID for each line in
the centerline.
Args:
parentCl (vtkPolyData):
clipPts (vtkPoints):
Returns:
clipIds (list): A list of IDs.
numberOfPoints (int): Total number of points.
"""
clipIds = []
numberOfPoints = 0
N = clipPts.GetNumberOfPoints()
if N == 3:
commonPoint = clipPts.GetPoint(0)
pnt_1 = clipPts.GetPoint(1)
pnt_2 = clipPts.GetPoint(2)
else:
pnt_1 = clipPts.GetPoint(0)
pnt_2 = clipPts.GetPoint(1)
for j in range(parentCl.GetNumberOfCells()):
cellLine = extract_single_line(parentCl, j)
locator = get_vtk_point_locator(cellLine)
if j == 0 and N == 3:
upstreamId = locator.FindClosestPoint(commonPoint)
clipIds.append(upstreamId)
numberOfPoints += upstreamId + 1
ID1 = locator.FindClosestPoint(pnt_1)
ID2 = locator.FindClosestPoint(pnt_2)
distance1 = get_distance(pnt_1, cellLine.GetPoints().GetPoint(ID1))
distance2 = get_distance(pnt_2, cellLine.GetPoints().GetPoint(ID2))
if distance1 > 1 and distance2 > 1:
ID = 0
else:
ID = ID1 if distance1 < distance2 else ID2
if N == 2:
clipIds = [ID1, ID2]
numberOfPoints = cellLine.GetNumberOfPoints()
else:
clipIds.append(ID)
numberOfPoints += cellLine.GetNumberOfPoints() - ID
return clipIds, numberOfPoints
def interpolate_patch_centerlines(patchCenterlines, parentCenterlines,
additionalPoint, lower, version, tension=0,
continuity=0):
"""Interpolate new centerlines between end and starting points. Given
additionalPoiint, lower, and version, then number and method for interpolation varies.
Args:
patchCenterlines (vtkPolyData): Clipped centerline.
parentCenterlines (vtkPolyData): The original centerline.
additionalPoint (vtkPoints): Additional point to interpolate through.
lower (str): None / 'lower' / 'bif' to indicate how to interpolate.
version (bool): Method for interpolation.
tension (float): Variable for the Kochanek spline
continuity (float): Variable for the Kochanek spline
Returns:
centerline (vtkPolyData): The new centerline, including the new interpolated
segment.
"""
if additionalPoint is not None:
additionalPointIds = []
for i in range(parentCenterlines.GetNumberOfCells()):
line = extract_single_line(parentCenterlines, i)
additionalPointIds.append(line.FindPoint(additionalPoint))
else:
additionalPointIds = ["" for i in range(parentCenterlines.GetNumberOfCells())]
interpolatedLines = vtk.vtkPolyData()
interpolatedPoints = vtk.vtkPoints()
interpolatedCellArray = vtk.vtkCellArray()
pointsInserted = 0
interpolatedCellArray.Initialize()
for i in range(parentCenterlines.GetNumberOfCells()):
startingCell = vtk.vtkGenericCell()
endingCell = vtk.vtkGenericCell()
numberOfInterpolationPoints = parentCenterlines.GetCell(i).GetNumberOfPoints()
patchCenterlines.GetCell(0, startingCell)
patchCenterlines.GetCell(i + 1, endingCell)
if version:
splinePoints = interpolate_spline(startingCell, endingCell, additionalPoint)
else:
splinePoints = interpolate_two_cells(startingCell, endingCell,
numberOfInterpolationPoints,
additionalPointIds[i], additionalPoint,
tension, continuity)
interpolatedCellArray.InsertNextCell(splinePoints.GetNumberOfPoints())
for j in range(splinePoints.GetNumberOfPoints()):
interpolatedPoints.InsertNextPoint(splinePoints.GetPoint(j))
interpolatedCellArray.InsertCellPoint(pointsInserted + j)
pointsInserted += splinePoints.GetNumberOfPoints()
interpolatedLines.SetPoints(interpolatedPoints)
interpolatedLines.SetLines(interpolatedCellArray)
attributeFilter = vtkvmtk.vtkvmtkCenterlineAttributesFilter()
attributeFilter.SetInputData(interpolatedLines)
attributeFilter.SetAbscissasArrayName(abscissasArrayName)
attributeFilter.SetParallelTransportNormalsArrayName(parallelTransportNormalsArrayName)
attributeFilter.Update()
attributeInterpolatedLines = attributeFilter.GetOutput()
return attributeInterpolatedLines
def interpolate_spline(startCell, endCell, additionalPoint):
"""Interpolate between two lines using splrep from scipy, potentially with an
additional point (additionalPoint).
Args:
startCell (vtkPolyData): Start line
endCell (tkPolyData): End line
additionalPoint (list): A list with the coordinates to the additional point.
Returns:
centerline (vtkPolyData): The new interpolated centerline.
"""
# If the centerline does not pass the bifurcation, return the centerline
if startCell.GetPoints().GetPoint(0) == endCell.GetPoints().GetPoint(0):
return endCell.GetPoints()
# Get number of cells
num_start = startCell.GetNumberOfPoints()
num_end = endCell.GetNumberOfPoints()
get_startCell = startCell.GetPoints()
get_endCell = endCell.GetPoints()
points = []
n = 5
N = 100
num_centerline_points = 3
for i in range(num_centerline_points - 1, -1, -1):
points.append(get_startCell.GetPoint(num_start - n * i - 1))
if additionalPoint is not None:
points.append(additionalPoint)
for i in range(num_centerline_points):
points.append(get_endCell.GetPoint(i * n))
curv_coor = np.zeros(len(points))
for i in range(len(points) - 1):
curv_coor[i + 1] = curv_coor[i] + get_distance(points[i], points[i + 1])
points = np.asarray(points)
fx = splrep(curv_coor, points[:, 0], k=3)
fy = splrep(curv_coor, points[:, 1], k=3)
fz = splrep(curv_coor, points[:, 2], k=3)
curv_coor = np.linspace(curv_coor[0], curv_coor[-1], N)
fx_ = splev(curv_coor, fx)
fy_ = splev(curv_coor, fy)
fz_ = splev(curv_coor, fz)
tmp = []
for i in range(num_start - n * num_centerline_points):
tmp.append(get_startCell.GetPoint(i))
for j in range(N):
tmp.append([fx_[j], fy_[j], fz_[j]])
for k in range(n * num_centerline_points, num_end):
tmp.append(get_endCell.GetPoint(k))
points = vtk.vtkPoints()
points.SetNumberOfPoints(len(tmp))
for l in range(len(tmp)):
points.SetPoint(l, tmp[l])
return points
def interpolate_two_cells(startCell, endCell, numberOfSplinePoints, additionalPointId,
additionalPoint, tension, continuitiy):
"""Interpolate between two lines using vtkCardinalSpline from vtk, potentially with an
additional point (additionalPoint).
Args:
startCell (vtkPolyData): Start line
endCell (tkPolyData): End line
numberOfSplinePoints (int): Number of spline point.
additionalPointId (int): Id of the additional point.
additionalPoint (list): A list with the coordinates to the additional point.
tension (float): Variable for the Kochanek spline
continuity (float): Variable for the Kochanek spline
Returns:
centerline (vtkPolyData): The new interpolated centerline.
"""
points = vtk.vtkPoints()
# xspline = vtk.vtkCardinalSpline()
# yspline = vtk.vtkCardinalSpline()
# zspline = vtk.vtkCardinalSpline()
xspline = vtk.vtkKochanekSpline()
yspline = vtk.vtkKochanekSpline()
zspline = vtk.vtkKochanekSpline()
for s in [xspline, yspline, zspline]:
s.SetDefaultTension(tension)
s.SetDefaultContinuity(continuitiy)
numberOfStartCellPoints = startCell.GetNumberOfPoints()
numberOfEndCellPoints = endCell.GetNumberOfPoints()
endCellFirstId = numberOfSplinePoints - numberOfEndCellPoints
for i in range(numberOfStartCellPoints):
point = startCell.GetPoints().GetPoint(i)
xspline.AddPoint(float(i), point[0])
yspline.AddPoint(float(i), point[1])
zspline.AddPoint(float(i), point[2])
if additionalPoint is not None:
xspline.AddPoint(float(additionalPointId), additionalPoint[0])
yspline.AddPoint(float(additionalPointId), additionalPoint[1])
zspline.AddPoint(float(additionalPointId), additionalPoint[2])
for i in range(numberOfEndCellPoints):
point = endCell.GetPoints().GetPoint(i)
index = float(endCellFirstId + i)
xspline.AddPoint(index, point[0])
yspline.AddPoint(index, point[1])
zspline.AddPoint(index, point[2])
xspline.Compute()
yspline.Compute()
zspline.Compute()
points.SetNumberOfPoints(numberOfSplinePoints)
for i in range(numberOfSplinePoints):
points.SetPoint(i, xspline.Evaluate(float(i)), yspline.Evaluate(float(i)), zspline.Evaluate(float(i)))
return points
def extract_cylindric_interpolation_voronoi_diagram(cellId, pointId, cylinderRadius,
voronoi, centerlines,
interpolationHalfSize=3):
"""Extract the voronoi diagram within a cylinder to be used for extrapolation.
Args:
cellId (int): LineId of the centerline.
pointId (int): Point Id of where to extract the cylinder.
cylinderRadius (float): The radius of the cylinder.
voronoi (vtkPolyData): The voronoi diagram to extract cylinder from.
centerlines (vtkPolyData): Centerline corresponding to the Voronoi diagram.
Returns:
interpolationDataset (vtkPolyData): The extracted cylinder from the Voronoi
diagram.
"""
if cellId == 0:
cylinderTop = centerlines.GetPoint(pointId)
cylinderCenter = centerlines.GetPoint(pointId - interpolationHalfSize)
cylinderBottom = centerlines.GetPoint(pointId - 2 * interpolationHalfSize)
else:
cylinderTop = centerlines.GetPoint(pointId)
cylinderCenter = centerlines.GetPoint(pointId + interpolationHalfSize)
cylinderBottom = centerlines.GetPoint(pointId + 2 * interpolationHalfSize)
interpolationDataset = vtk.vtkPolyData()
interpolationDatasetPoints = vtk.vtkPoints()
interpolationDatasetCellArray = vtk.vtkCellArray()
maskArray = vtk.vtkIntArray()
maskArray.SetNumberOfComponents(1)
maskArray.SetNumberOfTuples(voronoi.GetNumberOfPoints())
maskArray.FillComponent(0, 0)
for i in range(voronoi.GetNumberOfPoints()):
point = voronoi.GetPoint(i)
isInside = is_point_inside_interpolation_cylinder(point, cylinderTop,
cylinderCenter, cylinderBottom,
cylinderRadius)
if isInside == 1:
maskArray.SetTuple1(i, 1)
numberOfInterpolationPoints = compute_number_of_masked_points(maskArray)
radiusArray = get_vtk_array(radiusArrayName, 1, numberOfInterpolationPoints)
count = 0
for i in range(voronoi.GetNumberOfPoints()):
value = maskArray.GetTuple1(i)
if value == 1:
interpolationDatasetPoints.InsertNextPoint(voronoi.GetPoint(i))
interpolationDatasetCellArray.InsertNextCell(1)
interpolationDatasetCellArray.InsertCellPoint(count)
radius = voronoi.GetPointData().GetArray(radiusArrayName).GetTuple1(i)
radiusArray.SetTuple1(count, radius)
count += 1
interpolationDataset.SetPoints(interpolationDatasetPoints)
interpolationDataset.SetVerts(interpolationDatasetCellArray)
interpolationDataset.GetPointData().AddArray(radiusArray)
return interpolationDataset
def is_point_inside_interpolation_cylinder(x, t, c, b, r):
"""Check if a (Voronoi) point is inside a cylinder.
Args:
x (list): Point to check.
t (list): Top of the cylinder.
c (list): Center of the cylinder.
b (list): Bottom of the cylinder.
r (float): Radius of the cylinder.
Returns:
inside (bool): True if inside, False if outside.
"""
halfheigth = get_distance(b, t) / 2
xc = [x[i] - c[i] | |
format=("%Y-%m-%dT%H:%M:%S.%f", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d")
):
super(DefaultDateDeserializer, self).__init__()
self.format = format
def deserialize(
self, name: str, block: Any, parameterized: param.Parameterized
) -> None:
if self.check_if_allow_none_and_set(name, block, parameterized):
return
from datetime import datetime
if isinstance(block, datetime):
parameterized.param.set_param(name, block)
return
if self.format is not None and isinstance(block, str):
v = _get_datetime_from_formats(block, self.format)
if v is not None:
parameterized.param.set_param(name, v)
return
try:
float_block = float(block)
if float_block % 1 or float_block > datetime.max.toordinal():
block = datetime.utcfromtimestamp(float_block)
else:
block = datetime.fromordinal(int(float_block))
parameterized.param.set_param(name, block)
return
except Exception:
pass
for dt_type in param.dt_types:
try:
block = dt_type(block)
parameterized.param.set_param(name, block)
return
except Exception:
pass
raise ParamConfigTypeError(
parameterized, name, 'cannot convert "{}" to datetime'.format(block)
)
class DefaultDateRangeDeserializer(ParamConfigDeserializer):
"""Default date range deserializer
Similar to deserializing a single `datetime`, but applied to each element
separately. Cast to a tuple.
"""
def __init__(
self, format=("%Y-%m-%dT%H:%M:%S.%f", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d")
):
super(DefaultDateRangeDeserializer, self).__init__()
self.format = format
def deserialize(
self, name: str, block: Any, parameterized: param.Parameterized
) -> None:
if self.check_if_allow_none_and_set(name, block, parameterized):
return
from datetime import datetime
val = []
for elem in block:
if isinstance(elem, datetime):
val.append(elem)
continue
if self.format is not None and isinstance(elem, str):
v = _get_datetime_from_formats(elem, self.format)
if v is not None:
val.append(v)
continue
try:
float_elem = float(elem)
if float_elem % 1 or float_elem > datetime.max.toordinal():
elem = datetime.utcfromtimestamp(float_elem)
else:
elem = datetime.fromordinal(int(float_elem))
val.append(elem)
continue
except Exception:
pass
for dt_type in param.dt_types:
try:
elem = dt_type(elem)
val.append(elem)
continue
except Exception:
pass
raise ParamConfigTypeError(
parameterized,
name,
'cannot convert "{}" from "{}" to datetime'.format(elem, block),
)
val = tuple(val)
try:
parameterized.param.set_param(name, val)
except ValueError as e:
raise ParamConfigTypeError(parameterized, name) from e
class DefaultListDeserializer(ParamConfigDeserializer):
"""Default list deserializer
The process:
1. :obj:`None` check
2. If the parameter's ``class_`` attribute has been set, for each
element in `block` (we always assume `block` is iterable):
1. If the element is an instance of the class, leave it alone
2. Try instantiating a ``class_`` object using the element
as the first argument plus any arguments or keyword arguments
passed to the deserializer on initialization.
3. Cast to a list and set
"""
def __init__(self, *args, **kwargs):
super(DefaultListDeserializer, self).__init__()
self.args = args
self.kwargs = kwargs
def deserialize(
self, name: str, block: Any, parameterized: param.Parameterized
) -> None:
if self.check_if_allow_none_and_set(name, block, parameterized):
return
p = parameterized.param.params()[name]
try:
if p.class_:
block = [
x
if isinstance(x, p.class_)
else p.class_(x, *self.args, **self.kwargs)
for x in block
]
else:
block = list(block)
parameterized.param.set_param(name, block)
except (TypeError, ValueError) as e:
raise ParamConfigTypeError(parameterized, name) from e
class DefaultListSelectorDeserializer(ParamConfigDeserializer):
"""Default ListSelector deserializer
For each element in `block` (we assume `block` is iterable), match a
value or name in the selector's :func:`param.ListSelector.get_range` method
"""
def deserialize(
self, name: str, block: Any, parameterized: param.Parameterized
) -> None:
# a list selector cannot be none, only empty. Therefore, no "None"
# checks
try:
block = [
_find_object_in_object_selector(name, x, parameterized) for x in block
]
parameterized.param.set_param(name, block)
except TypeError as e:
raise ParamConfigTypeError(parameterized, name) from e
class _CastDeserializer(ParamConfigDeserializer):
"""Default {0} deserializer
The process:
1. :obj:`None` check
2. If `block` is a(n) {0}, set it
3. Initialize a(n) {0} instance with `block` as the first argument
plus any extra positional or keyword arguments passed to the
deserializer on initialization
"""
def __init__(self, *args, **kwargs):
super(_CastDeserializer, self).__init__()
self.args = args
self.kwargs = kwargs
@classmethod
def class_(cls, x, *args, **kwargs):
raise NotImplementedError(
"class_ must be specified in definition of {}".format(cls)
)
def deserialize(
self, name: str, block: Any, parameterized: param.Parameterized
) -> None:
if self.check_if_allow_none_and_set(name, block, parameterized):
return
try:
if not isinstance(block, self.class_):
block = self.class_(block, *self.args, **self.kwargs)
parameterized.param.set_param(name, block)
return
except ValueError as e:
raise ParamConfigTypeError(parameterized, name) from e
class DefaultIntegerDeserializer(_CastDeserializer):
__doc__ = _CastDeserializer.__doc__.format("int")
class_ = int
class DefaultNumberDeserializer(_CastDeserializer):
__doc__ = _CastDeserializer.__doc__.format("float")
class_ = float
class DefaultNumericTupleDeserializer(ParamConfigDeserializer):
"""Default numeric tuple deserializer
The process:
1. :obj:`None` check
2. Cast each element of `block` to a :class:`float`
3. Cast `block` to a :class:`tuple`
"""
def deserialize(
self, name: str, block: Any, parameterized: param.Parameterized
) -> None:
if self.check_if_allow_none_and_set(name, block, parameterized):
return
try:
block = tuple(float(x) for x in block)
parameterized.param.set_param(name, block)
return
except ValueError as e:
raise ParamConfigTypeError(parameterized, name) from e
class DefaultObjectSelectorDeserializer(ParamConfigDeserializer):
"""Default ObjectSelector deserializer
The process:
1. :obj:`None` check
2. Match `block` to a value or name in the selector's
:func:`param.ObjectSelector.get_range` method
"""
def deserialize(
self, name: str, block: Any, parameterized: param.Parameterized
) -> None:
if self.check_if_allow_none_and_set(name, block, parameterized):
return
block = _find_object_in_object_selector(name, block, parameterized)
parameterized.param.set_param(name, block)
class DefaultSeriesDeserializer(_CastDeserializer):
__doc__ = _CastDeserializer.__doc__.format("pandas.Series")
@property
def class_(self):
import pandas
return pandas.Series
class DefaultStringDeserializer(_CastDeserializer):
__doc__ = _CastDeserializer.__doc__.format("str")
class_ = str
class DefaultTupleDeserializer(_CastDeserializer):
__doc__ = _CastDeserializer.__doc__.format("tuple")
class_ = tuple
class JsonStringArrayDeserializer(DefaultArrayDeserializer):
"""Parses a block as JSON before converting it into a numpy array
The default deserializer used in INI files. Input is always assumed to be a
string or :obj:`None`. If :obj:`None`, a none check is performed.
Otherwise, it parses the value as JSON, then does the same as
:class:`DefaultArrayDeserializer`. However, if the input ends in the file
suffix ".npy", the input will be immediately passed to
:class:`DefaultArrayDeserializer`
See Also
--------
deserialize_to_json
To deserialize JSON into :class:`param.parameterized.Parameterized` instances
"""
file_suffixes = {"csv"}
def deserialize(
self, name: str, block: Any, parameterized: param.Parameterized
) -> None:
if self.check_if_allow_none_and_set(name, block, parameterized):
return
bs = block.split(".")
if len(bs) > 1 and bs[-1] in self.file_suffixes:
return super(JsonStringArrayDeserializer, self).deserialize(
name, block, parameterized
)
try:
block = json.loads(block)
except json.JSONDecodeError as e:
raise ParamConfigTypeError(parameterized, name) from e
super(JsonStringArrayDeserializer, self).deserialize(name, block, parameterized)
class JsonStringDataFrameDeserializer(DefaultDataFrameDeserializer):
"""Parses block as JSON before converting to pandas.DataFrame
The default deserializer used in INI files. Input is always assumed to be a
string or :obj:`None`. If :obj:`None`, a none check is performed.
Otherwise, it parses the value as JSON, then does the same as
:class:`DefaultDataFrameSerializer`. However, if the input ends in a file
suffix like ".csv", ".xls", etc., the input will be immediately passed to
:class:`DefaultDataFrameSerializer`
See Also
--------
deserialize_to_json
To deserialize JSON into :class:`param.parameterized.Parameterized` instances
"""
file_suffixes = {
"csv",
"json",
"html",
".xls",
"h5",
"feather",
"parquet",
"dta",
"sas7bdat",
"pkl",
}
def deserialize(
self, name: str, block: Any, parameterized: param.Parameterized
) -> None:
if self.check_if_allow_none_and_set(name, block, parameterized):
return
bs = block.split(".")
if len(bs) > 1 and bs[-1] in self.file_suffixes:
return super(JsonStringDataFrameDeserializer, self).deserialize(
name, block, parameterized
)
try:
block = json.loads(block)
except json.JSONDecodeError as e:
raise ParamConfigTypeError(parameterized, name) from e
super(JsonStringDataFrameDeserializer, self).deserialize(
name, block, parameterized
)
def _to_json_string_deserializer(cls, typename):
class _JsonStringDeserializer(cls):
"""Parses block as json before converting into {}
The default deserializer used in INI files.
1. :obj:`None` check
2. It parses the value as a JSON string
3. Does the same as :class:`{}`
See Also
--------
deserialize_to_json
To deserialize json into :class:`param.parameterized.Parameterized`
instances
""".format(
typename, cls.__name__
)
def deserialize(
self, name: str, block: Any, parameterized: param.Parameterized
) -> None:
if self.check_if_allow_none_and_set(name, block, parameterized):
return
try:
block = json.loads(block)
except json.JSONDecodeError as e:
raise ParamConfigTypeError(parameterized, name) from e
super(_JsonStringDeserializer, self).deserialize(name, block, parameterized)
return _JsonStringDeserializer
JsonStringDateRangeDeserializer = _to_json_string_deserializer(
DefaultDateRangeDeserializer, "date range"
)
JsonStringDictDeserializer = _to_json_string_deserializer(DefaultDeserializer, "dict")
JsonStringListDeserializer = _to_json_string_deserializer(
DefaultListDeserializer, "list"
)
JsonStringListSelectorDeserializer = _to_json_string_deserializer(
DefaultListSelectorDeserializer, "list selector"
)
JsonStringNumericTupleDeserializer = _to_json_string_deserializer(
DefaultNumericTupleDeserializer, "numeric tuple"
)
JsonStringSeriesDeserializer = _to_json_string_deserializer(
DefaultSeriesDeserializer, "pandas.Series"
)
JsonStringTupleDeserializer = _to_json_string_deserializer(
DefaultTupleDeserializer, "tuple"
)
"""Default deserializers by parameter type
See Also
--------
deserialize_from_dict
How these are used
"""
DEFAULT_DESERIALIZER_DICT = {
param.Array: DefaultArrayDeserializer(),
param.Boolean: DefaultBooleanDeserializer(),
param.ClassSelector: DefaultClassSelectorDeserializer(),
param.DataFrame: DefaultDataFrameDeserializer(),
param.Date: DefaultDateDeserializer(),
param.DateRange: DefaultDateRangeDeserializer(),
param.HookList: DefaultListDeserializer(),
param.Integer: DefaultIntegerDeserializer(),
param.List: DefaultListDeserializer(),
param.ListSelector: DefaultListSelectorDeserializer(),
param.Magnitude: DefaultNumberDeserializer(),
param.MultiFileSelector: DefaultListSelectorDeserializer(),
param.Number: DefaultNumberDeserializer(),
param.NumericTuple: DefaultNumericTupleDeserializer(),
param.ObjectSelector: DefaultObjectSelectorDeserializer(),
param.Range: DefaultNumericTupleDeserializer(),
param.Series: DefaultSeriesDeserializer(),
param.String: DefaultStringDeserializer(),
param.Tuple: DefaultTupleDeserializer(),
param.XYCoordinates: DefaultNumericTupleDeserializer(),
}
"""Default deserializer that is not type specific
See Also
--------
deserialize_from_dict
How this is used
"""
DEFAULT_BACKUP_DESERIALIZER = DefaultDeserializer()
"""JSON string deserializers by param type
Used as defaults when parsing an INI file
See Also
--------
deserialize_to_ini
How these are used
"""
JSON_STRING_DESERIALIZER_DICT = {
param.Array: JsonStringArrayDeserializer(),
param.DataFrame: JsonStringDataFrameDeserializer(),
param.DateRange: JsonStringDateRangeDeserializer(),
param.Dict: JsonStringDictDeserializer(),
param.List: JsonStringListDeserializer(),
param.ListSelector: JsonStringListSelectorDeserializer(),
param.MultiFileSelector: JsonStringListSelectorDeserializer(),
param.NumericTuple: JsonStringNumericTupleDeserializer(),
param.Range: JsonStringNumericTupleDeserializer(),
param.Series: JsonStringSeriesDeserializer(),
param.Tuple: JsonStringTupleDeserializer(),
param.XYCoordinates: JsonStringNumericTupleDeserializer(),
}
def _deserialize_from_dict_flat(
dict_, parameterized, deserializer_name_dict, deserializer_type_dict, on_missing
):
if deserializer_type_dict is not None:
deserializer_type_dict2 = dict(DEFAULT_DESERIALIZER_DICT)
deserializer_type_dict2.update(deserializer_type_dict)
deserializer_type_dict = deserializer_type_dict2
else:
deserializer_type_dict = DEFAULT_DESERIALIZER_DICT
if deserializer_name_dict is None:
deserializer_name_dict = dict()
for name, block in list(dict_.items()):
if name not in parameterized.param.params():
msg = 'No param "{}" to | |
"""
Project Module.
"""
import os
from optparse import Values
from pathlib import Path
from typing import List, Optional, Any, Dict, Callable, TYPE_CHECKING
# from pineboolib.fllegacy.flaccesscontrollists import FLAccessControlLists # FIXME: Not allowed yet
from PyQt5 import QtWidgets
from pineboolib.core.utils import logging, utils_base
from pineboolib.core.utils.struct import AreaStruct
from pineboolib.core import exceptions, settings, message_manager
from pineboolib.application.database import pnconnectionmanager
from pineboolib.application.utils import path, xpm
from pineboolib.application import module, file
if TYPE_CHECKING:
from pineboolib.interfaces.dgi_schema import dgi_schema
from pineboolib.application.database import pnconnection
from pineboolib.core.utils.struct import ActionStruct # noqa: F401
LOGGER = logging.getLogger(__name__)
class Project(object):
"""
Singleton for the whole application.
Can be accessed with pineboolib.project from anywhere.
"""
_conn_manager: "pnconnectionmanager.PNConnectionManager"
_app: Optional[QtWidgets.QApplication] = None
# _conn: Optional["PNConnection"] = None # Almacena la conexión principal a la base de datos
debug_level = 100
options: Values
# _initModules = None
main_form: Any = None # FIXME: How is this used? Which type?
main_window: Any = None
acl_ = None
dgi: Optional["dgi_schema"] = None
delete_cache: bool = False
parse_project: bool = False
path = None
_splash = None
sql_drivers_manager = None
timer_ = None
no_python_cache = False # TODO: Fill this one instead
_msg_mng = None
alternative_folder: Optional[str]
_session_func_: Optional[Callable]
areas: Dict[str, AreaStruct]
files: Dict[Any, Any]
tables: Dict[Any, Any]
actions: Dict[Any, "ActionStruct"]
translator_: List[Any]
modules: Dict[str, "module.Module"]
pending_conversion_list: List[str]
def __init__(self) -> None:
"""Constructor."""
# self._conn = None
self.dgi = None
self.tree = None
self.root = None
self.alternative_folder = None
self.apppath = ""
self.tmpdir = settings.config.value("ebcomportamiento/temp_dir")
self.parser = None
# self.main_form_name: Optional[str] = None
self.delete_cache = False
self.parse_project = False
self.translator_ = [] # FIXME: Add proper type
self.actions = {} # FIXME: Add proper type
self.tables = {} # FIXME: Add proper type
self.files = {} # FIXME: Add proper type
self.areas = {}
self.modules = {}
self.options = Values()
if self.tmpdir is None:
self.tmpdir = utils_base.filedir("%s/Pineboo/tempdata" % Path.home())
settings.config.set_value("ebcomportamiento/temp_dir", self.tmpdir)
if not os.path.exists(self.tmpdir):
Path(self.tmpdir).mkdir(parents=True, exist_ok=True)
self._session_func_ = None
self._conn_manager = pnconnectionmanager.PNConnectionManager()
self.pending_conversion_list = []
@property
def app(self) -> QtWidgets.QApplication:
"""Retrieve current Qt Application or throw error."""
if self._app is None:
raise Exception("No application set")
return self._app
def set_app(self, app: QtWidgets.QApplication):
"""Set Qt Application."""
self._app = app
@property
def conn_manager(self) -> "pnconnectionmanager.PNConnectionManager":
"""Retrieve current connection or throw."""
if self._conn_manager is None:
raise Exception("Project is not initialized")
return self._conn_manager
@property
def DGI(self) -> "dgi_schema":
"""Retrieve current DGI or throw."""
if self.dgi is None:
raise Exception("Project is not initialized")
return self.dgi
def init_conn(self, connection: "pnconnection.PNConnection") -> bool:
"""Initialize project with a connection."""
# if self._conn is not None:
# del self._conn
# self._conn = None
result = self._conn_manager.setMainConn(connection)
if result:
self.apppath = utils_base.filedir("..")
self.delete_cache = settings.config.value("ebcomportamiento/deleteCache", False)
self.parse_project = settings.config.value("ebcomportamiento/parseProject", False)
return result
def init_dgi(self, dgi: "dgi_schema") -> None:
"""Load and associate the defined DGI onto this project."""
# FIXME: Actually, DGI should be loaded here, or kind of.
self.dgi = dgi
self._msg_mng = message_manager.Manager(dgi)
self.dgi.extraProjectInit()
def load_modules(self) -> None:
"""Load all modules."""
for module_name, mod_obj in self.modules.items():
mod_obj.load()
self.tables.update(mod_obj.tables)
def setDebugLevel(self, level: int) -> None:
"""
Set debug level for application.
@param q Número con el nivel espeficicado
***DEPRECATED***
"""
self.debug_level = level
# self.dgi.pnqt3ui.Options.DEBUG_LEVEL = q
# def acl(self) -> Optional[FLAccessControlLists]:
# """
# Retorna si hay o no acls cargados
# @return Objeto acl_
# """
# return self.acl_
def acl(self):
"""Return loaded ACL."""
raise exceptions.CodeDoesNotBelongHereException("ACL Does not belong to PROJECT. Go away.")
def run(self) -> bool:
"""Run project. Connects to DB and loads data."""
self.pending_conversion_list = []
if self.actions:
del self.actions
if self.tables:
del self.tables
self.actions = {}
self.tables = {}
if self.dgi is None:
raise Exception("DGI not loaded")
if not self.conn_manager or "main_conn" not in self.conn_manager.connections_dict.keys():
raise exceptions.NotConnectedError(
"Cannot execute Pineboo Project without a connection in place"
)
conn = self.conn_manager.mainConn()
db_name = conn.DBName()
# TODO: Refactorizar esta función en otras más sencillas
# Preparar temporal
if self.delete_cache and os.path.exists(path._dir("cache/%s" % db_name)):
self.message_manager().send("splash", "showMessage", ["Borrando caché ..."])
LOGGER.debug(
"DEVELOP: delete_cache Activado\nBorrando %s", path._dir("cache/%s" % db_name)
)
for root, dirs, files in os.walk(path._dir("cache/%s" % db_name), topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
else:
keep_images = settings.config.value("ebcomportamiento/keep_general_cache", False)
if keep_images is False:
for file_name in os.listdir(self.tmpdir):
if file_name.find(".") > -1 and not file_name.endswith("sqlite3"):
file_path = os.path.join(self.tmpdir, file_name)
try:
os.remove(file_path)
except Exception:
LOGGER.warning(
"No se ha podido borrar %s al limpiar la cache", file_path
)
pass
if not os.path.exists(path._dir("cache")):
os.makedirs(path._dir("cache"))
if not os.path.exists(path._dir("cache/%s" % db_name)):
os.makedirs(path._dir("cache/%s" % db_name))
# Conectar:
# Se verifica que existen estas tablas
for table in (
"flareas",
"flmodules",
"flfiles",
"flgroups",
"fllarge",
"flserial",
"flusers",
"flvar",
"flmetadata",
"flsettings",
"flupdates",
"flmetadata",
"flseqs",
"flsettings",
):
if not self.conn_manager.manager().existsTable(table):
self.conn_manager.manager().createSystemTable(table)
cursor_ = self.conn_manager.dbAux().cursor()
self.areas = {}
cursor_.execute(""" SELECT idarea, descripcion FROM flareas WHERE 1 = 1""")
for idarea, descripcion in list(cursor_):
self.areas[idarea] = AreaStruct(idarea=idarea, descripcion=descripcion)
self.areas["sys"] = AreaStruct(idarea="sys", descripcion="Area de Sistema")
# Obtener módulos activos
cursor_.execute(
""" SELECT idarea, idmodulo, descripcion, icono FROM flmodules WHERE bloqueo = %s """
% conn.driver().formatValue("bool", "True", False)
)
self.modules = {}
for idarea, idmodulo, descripcion, icono in cursor_:
icono = xpm.cache_xpm(icono)
self.modules[idmodulo] = module.Module(idarea, idmodulo, descripcion, icono)
file_object = open(
utils_base.filedir(utils_base.get_base_dir(), "system_module", "sys.xpm"), "r"
)
icono = file_object.read()
file_object.close()
# icono = clearXPM(icono)
self.modules["sys"] = module.Module("sys", "sys", "Administración", icono)
cursor_.execute(
""" SELECT idmodulo, nombre, sha FROM flfiles WHERE NOT sha = '' ORDER BY idmodulo, nombre """
)
file_1 = open(path._dir("project.txt"), "w")
self.files = {}
count = 0
list_files: List[str] = []
for idmodulo, nombre, sha in list(cursor_):
if not self.dgi.accept_file(nombre):
continue
count += 1
if idmodulo not in self.modules:
continue # I
fileobj = file.File(idmodulo, nombre, sha, db_name=db_name)
if nombre in self.files:
LOGGER.warning("run: file %s already loaded, overwritting..." % nombre)
self.files[nombre] = fileobj
self.modules[idmodulo].add_project_file(fileobj)
file_1.write(fileobj.filekey + "\n")
fileobjdir = os.path.dirname(path._dir("cache", fileobj.filekey))
file_name = path._dir("cache", fileobj.filekey)
if not os.path.exists(fileobjdir):
os.makedirs(fileobjdir)
if os.path.exists(file_name):
if file_name.endswith(".qs"):
folder_path = os.path.dirname(file_name)
static_flag = "%s/STATIC" % folder_path
file_name_py = "%s.py" % file_name[:-3]
if os.path.exists(static_flag):
os.remove(static_flag)
if os.path.exists(file_name):
os.remove(file_name)
if os.path.exists(file_name_py):
os.remove(file_name_py)
elif os.path.exists(file_name_py):
continue
elif file_name.endswith(".mtd"):
if settings.config.value(
"ebcomportamiento/orm_enabled", False
) and not settings.config.value("ebcomportamiento/orm_parser_disabled", False):
if os.path.exists("%s_model.py" % path._dir("cache", fileobj.filekey[:-4])):
continue
else:
continue
cur2 = self.conn_manager.useConn("dbAux").cursor()
sql = (
"SELECT contenido FROM flfiles WHERE idmodulo = %s AND nombre = %s AND sha = %s"
% (
conn.driver().formatValue("string", idmodulo, False),
conn.driver().formatValue("string", nombre, False),
conn.driver().formatValue("string", sha, False),
)
)
cur2.execute(sql)
for (contenido,) in list(cur2):
encode_ = "utf-8" if str(nombre).endswith((".kut", ".ts", ".py")) else "ISO-8859-15"
folder = path._dir(
"cache",
"/".join(fileobj.filekey.split("/")[: len(fileobj.filekey.split("/")) - 1]),
)
if os.path.exists(folder) and not os.path.exists(
file_name
): # Borra la carpeta si no existe el fichero destino
for root, dirs, files in os.walk(folder):
for file_item in files:
os.remove(os.path.join(root, file_item))
if contenido and not os.path.exists(file_name):
self.message_manager().send(
"splash", "showMessage", ["Volcando a caché %s..." % nombre]
)
file_2 = open(file_name, "wb")
txt = contenido.encode(encode_, "replace")
file_2.write(txt)
file_2.close()
if self.parse_project and nombre.endswith(".qs"):
if os.path.exists(file_name):
list_files.append(file_name)
file_1.close()
self.message_manager().send("splash", "showMessage", ["Convirtiendo a Python ..."])
if list_files:
self.parse_script_list(list_files)
# Cargar el núcleo común del proyecto
for root, dirs, files in os.walk(
utils_base.filedir(utils_base.get_base_dir(), "system_module")
):
# list_files = []
for nombre in files:
if root.find("modulos") == -1:
fileobj = file.File("sys", nombre, basedir=root, db_name=db_name)
self.files[nombre] = fileobj
self.modules["sys"].add_project_file(fileobj)
# if self.parse_project and nombre.endswith(".qs"):
# self.parseScript(path._dir(root, nombre))
# list_files.append(path._dir(root, nombre))
# self.parse_script_lists(list_files)
if settings.config.value(
"ebcomportamiento/orm_enabled", False
) and not settings.config.value("ebcomportamiento/orm_load_disabled", False):
self.message_manager().send("splash", "showMessage", ["Cargando objetos ..."])
from pineboolib.application.parsers.mtdparser import pnormmodelsfactory
pnormmodelsfactory.load_models()
# FIXME: ACLs needed at this level?
# self.acl_ = FLAccessControlLists()
# self.acl_.init()
return True
def call(
self,
function: str,
args: List[Any],
object_context: Any = None,
show_exceptions: bool = True,
) -> Optional[Any]:
"""
Call to a QS project function.
@param function. Nombre de la función a llamar.
@param args. Array con los argumentos.
@param object_context. Contexto en el que se ejecuta la función.
@param show_exceptions. Boolean que especifica si se muestra los errores.
@return Boolean con el resultado.
"""
# FIXME: No deberíamos usar este método. En Python hay formas mejores
# de hacer esto.
LOGGER.trace(
"JS.CALL: fn:%s args:%s ctx:%s", function, args, object_context, stack_info=True
)
# Tipicamente flfactalma.iface.beforeCommit_articulos()
if function[-2:] == "()":
function = function[:-2]
| |
<gh_stars>1-10
#!/usr/bin/env python
import string, re
from DOM import Element, Text, Node, DocumentFragment, Document
from Tokenizer import Token, BeginGroup, EndGroup, Other
from plasTeX import Logging
log = Logging.getLogger()
status = Logging.getLogger('status')
deflog = Logging.getLogger('parse.definitions')
#
# Utility functions
#
def idgen():
""" Generate a unique ID """
i = 1
while 1:
yield 'a%.10d' % i
i += 1
idgen = idgen()
def subclasses(o):
""" Return all subclasses of the given class """
output = [o]
for item in o.__subclasses__():
output.extend(subclasses(item))
return output
def sourceChildren(o, par=True):
""" Return the LaTeX source of the child nodes """
if o.hasChildNodes():
if par:
return u''.join([x.source for x in o.childNodes])
else:
source = []
for par in o.childNodes:
source += [x.source for x in par]
return u''.join(source)
return u''
def sourceArguments(o):
""" Return the LaTeX source of the arguments """
return o.argSource
def ismacro(o):
""" Is the given object a macro? """
return hasattr(o, 'macroName')
def issection(o):
""" Is the given object a section? """
return o.level >= Node.DOCUMENT_LEVEL and o.level < Node.END_SECTIONS_LEVEL
def macroName(o):
""" Return the macro name of the given object """
if o.macroName is None:
if type(o) is type:
return o.__name__
return type(o).__name__
return o.macroName
class Argument(object):
"""
Macro argument
Argument strings in macros are compiled into Arguments
once. Then the compiled arguments can be used to get the
arguments thereafter.
"""
def __init__(self, name, index, options={}):
self.name = name
self.index = index
self.source = ''
self.options = options.copy()
def __repr__(self):
return '%s: %s' % (self.name, self.options)
def __cmp__(self, other):
c = cmp(self.name, other.name)
if c: return c
return cmp(self.options, other.options)
class CSSStyles(dict):
""" CSS Style object """
@property
def inline(self):
"""
Create an inline style representation
Returns:
string containing inline CSS
"""
if not self:
return None
return u'; '.join([u'%s:%s' % (x[0],x[1]) for x in self.items()])
class Macro(Element):
"""
Base class for all macros
"""
MODE_NONE = 0
MODE_BEGIN = 1
MODE_END = 2
macroName = None # TeX macro name (instead of class name)
macroMode = MODE_NONE # begin, end, or none
mathMode = None
# Node variables
level = Node.COMMAND_LEVEL
nodeType = Node.ELEMENT_NODE
nodeValue = None
# Counter associated with this macro
counter = None
# Value to return when macro is referred to by \ref
ref = None
# Attributes that should be persisted between runs for nodes
# that can be referenced. This allows for cross-document links.
refAttributes = ['macroName','ref','title','captionName','id','url']
# Source of the TeX macro arguments
argSource = ''
# LaTeX argument template
args = ''
# Force there to be at least on paragraph in the content
forcePars = False
def persist(self, attrs=None):
"""
Store attributes needed for cross-document links
This method really needs to be called by the renderer because
the rendered versions of the attributes are needed. If nested
classes could be pickeled, we could just pickle the attributes.
Keyword Arguments:
attrs -- dictionary to populate with values. If set to None,
a new dictionary should be created.
Returns: dictionary containing attributes to be persisted
"""
if attrs is None:
attrs = {}
for name in self.refAttributes:
value = getattr(self, name, None)
if value is None:
continue
if isinstance(value, Node):
value = u'%s' % unicode(value)
attrs[name] = value
return attrs
def restore(self, attrs):
"""
Restore attributes needed for cross-document links
Required Attributes:
attrs -- dictionary of attributes to be set on self
"""
remap = {'url':'urloverride'}
for key, value in attrs.items():
setattr(self, remap.get(key, key), value)
@property
def config(self):
""" Shortcut to the document config """
return self.ownerDocument.config
@property
def idref(self):
""" Storage area for idref argument types """
if hasattr(self, '@idref'):
return getattr(self, '@idref')
d = {}
setattr(self, '@idref', d)
return d
def captionName():
""" Name associated with the counter """
def fget(self):
if hasattr(self, '@captionName'):
return getattr(self, '@captionName')
self.captionName = name = self.ownerDocument.createTextNode('')
return name
def fset(self, value):
setattr(self, '@captionName', value)
return locals()
captionName = property(**captionName())
def title():
""" Retrieve title from variable or attributes dictionary """
def fget(self):
try:
return getattr(self, '@title')
except AttributeError:
try:
return self.attributes['title']
except KeyError:
pass
raise AttributeError, 'could not find attribute "title"'
def fset(self, value):
setattr(self, '@title', value)
return locals()
title = property(**title())
def fullTitle():
""" Retrieve title including the section number """
def fget(self):
try:
return getattr(self, '@fullTitle')
except AttributeError:
if self.ref is not None:
fullTitle = self.ownerDocument.createDocumentFragment()
fullTitle.extend([self.ref, ' ', self.title], setParent=False)
else:
fullTitle = self.title
setattr(self, '@fullTitle', fullTitle)
return fullTitle
def fset(self, value):
setattr(self, '@fullTitle', value)
return locals()
fullTitle = property(**fullTitle())
def tocEntry():
""" Retrieve table of contents entry """
def fget(self):
try:
return getattr(self, '@tocEntry')
except AttributeError:
try:
if self.attributes.has_key('toc'):
toc = self.attributes['toc']
if toc is None:
toc = self.title
setattr(self, '@tocEntry', toc)
return toc
except (KeyError, AttributeError):
pass
return self.title
def fset(self, value):
setattr(self, '@tocEntry', value)
return locals()
tocEntry = property(**tocEntry())
def fullTocEntry():
""" Retrieve title including the section number """
def fget(self):
try:
try:
return getattr(self, '@fullTocEntry')
except AttributeError:
if self.ref is not None:
fullTocEntry = self.ownerDocument.createDocumentFragment()
fullTocEntry.extend([self.ref, ' ', self.tocEntry], setParent=False)
else:
fullTocEntry = self.tocEntry
setattr(self, '@fullTocEntry', fullTocEntry)
return fullTocEntry
except Exception, msg:
return self.title
def fset(self, value):
setattr(self, '@fullTocEntry', value)
return locals()
fullTocEntry = property(**fullTocEntry())
@property
def style(self):
""" CSS styles """
try:
return getattr(self, '@style')
except AttributeError:
style = CSSStyles()
setattr(self, '@style', style)
return style
def digest(self, tokens):
pass
def locals(self):
""" Retrieve all macros local to this namespace """
tself = type(self)
localsname = '@locals'
# Check for cached versions first
try:
return vars(tself)[localsname]
except KeyError:
pass
mro = list(tself.__mro__)
mro.reverse()
loc = {}
for cls in mro:
for value in vars(cls).values():
if ismacro(value):
loc[macroName(value)] = value
# Cache the locals in a unique name
setattr(tself, localsname, loc)
return loc
def id():
def fset(self, value):
if value:
setattr(self, '@id', value)
else:
delattr(self, '@id')
def fget(self):
id = getattr(self, '@id', None)
if id is None:
for id in idgen:
setattr(self, '@hasgenid', True)
self.id = id
break
return id
return locals()
id = property(**id())
def expand(self, tex):
""" Fully expand the macro """
result = self.invoke(tex)
if result is None:
return self
return tex.expandTokens(result)
def invoke(self, tex):
# Just pop the context if this is a \end token
if self.macroMode == Macro.MODE_END:
self.ownerDocument.context.pop(self)
# If a unicode value is set, just return that
# if self.unicode is not None:
# return tex.textTokens(self.unicode)
return
# If this is a \begin token or the element needs to be
# closed automatically (i.e. \section, \item, etc.), just
# push the new context and return the instance.
elif self.macroMode == Macro.MODE_BEGIN:
self.ownerDocument.context.push(self)
self.parse(tex)
# If a unicode value is set, just return that
# if self.unicode is not None:
# return tex.textTokens(self.unicode)
self.setLinkType()
return
# Push, parse, and pop. The command doesn't need to stay on
# the context stack. We push an empty context so that the
# `self' token doesn't get put into the output stream twice
# (once here and once with the pop).
self.ownerDocument.context.push(self)
self.parse(tex)
self.ownerDocument.context.pop(self)
# If a unicode value is set, just return that
# if self.unicode is not None:
# return tex.textTokens(self.unicode)
self.setLinkType()
def setLinkType(self, key=None):
"""
Set up navigation links
Keyword Arguments:
key -- the name or names of the navigation keys to set
instead of using self.linkType
"""
if key is None:
key = self.linkType
if key:
userdata = self.ownerDocument.userdata
if 'links' not in userdata:
userdata['links'] = {}
if isinstance(key, basestring):
userdata['links'][key] = self
else:
for k in key:
userdata['links'][k] = self
@property
def tagName(self):
t = type(self)
if t.macroName is None:
return t.__name__
return t.macroName
nodeName = tagName
@property
def source(self):
name = self.nodeName
# Automatically revert internal names like "active::~"
escape = '\\'
if '::' in name:
name = name.split('::').pop()
escape = ''
# \begin environment
# If self.childNodes is not empty, print out the entire environment
if self.macroMode == Macro.MODE_BEGIN:
argSource = sourceArguments(self)
if not argSource:
argSource = ' '
s = '%sbegin{%s}%s' % (escape, name, argSource)
| |
"ve\u010der",
"vlastn\u011b",
"vy",
"v\u00e1m",
"v\u00e1mi",
"v\u00e1s",
"v\u00e1\u0161",
"v\u00edce",
"v\u0161ak",
"v\u0161echno",
"v\u0161ichni",
"v\u016fbec",
"v\u017edy",
"z",
"za",
"zat\u00edmco",
"za\u010d",
"zda",
"zde",
"ze",
"zpr\u00e1vy",
"zp\u011bt",
"\u010dau",
"\u010di",
"\u010dl\u00e1nku",
"\u010dl\u00e1nky",
"\u010dtrn\u00e1ct",
"\u010dty\u0159i",
"\u0161est",
"\u0161estn\u00e1ct",
"\u017ee",
],
"da": [
"af",
"alle",
"andet",
"andre",
"at",
"begge",
"da",
"de",
"den",
"denne",
"der",
"deres",
"det",
"dette",
"dig",
"din",
"dog",
"du",
"ej",
"eller",
"en",
"end",
"ene",
"eneste",
"enhver",
"et",
"fem",
"fire",
"flere",
"fleste",
"for",
"fordi",
"forrige",
"fra",
"f\u00e5",
"f\u00f8r",
"god",
"han",
"hans",
"har",
"hendes",
"her",
"hun",
"hvad",
"hvem",
"hver",
"hvilken",
"hvis",
"hvor",
"hvordan",
"hvorfor",
"hvorn\u00e5r",
"i",
"ikke",
"ind",
"ingen",
"intet",
"jeg",
"jeres",
"kan",
"kom",
"kommer",
"lav",
"lidt",
"lille",
"man",
"mand",
"mange",
"med",
"meget",
"men",
"mens",
"mere",
"mig",
"ned",
"ni",
"nogen",
"noget",
"ny",
"nyt",
"n\u00e6r",
"n\u00e6ste",
"n\u00e6sten",
"og",
"op",
"otte",
"over",
"p\u00e5",
"se",
"seks",
"ses",
"som",
"stor",
"store",
"syv",
"ti",
"til",
"to",
"tre",
"ud",
"var",
],
"de": [
"Ernst",
"Ordnung",
"Schluss",
"a",
"ab",
"aber",
"ach",
"acht",
"achte",
"achten",
"achter",
"achtes",
"ag",
"alle",
"allein",
"allem",
"allen",
"aller",
"allerdings",
"alles",
"allgemeinen",
"als",
"also",
"am",
"an",
"andere",
"anderen",
"andern",
"anders",
"au",
"auch",
"auf",
"aus",
"ausser",
"ausserdem",
"au\u00dfer",
"au\u00dferdem",
"b",
"bald",
"bei",
"beide",
"beiden",
"beim",
"beispiel",
"bekannt",
"bereits",
"besonders",
"besser",
"besten",
"bin",
"bis",
"bisher",
"bist",
"c",
"d",
"d.h",
"da",
"dabei",
"dadurch",
"daf\u00fcr",
"dagegen",
"daher",
"dahin",
"dahinter",
"damals",
"damit",
"danach",
"daneben",
"dank",
"dann",
"daran",
"darauf",
"daraus",
"darf",
"darfst",
"darin",
"darum",
"darunter",
"dar\u00fcber",
"das",
"dasein",
"daselbst",
"dass",
"dasselbe",
"davon",
"davor",
"dazu",
"dazwischen",
"da\u00df",
"dein",
"deine",
"deinem",
"deiner",
"dem",
"dementsprechend",
"demgegen\u00fcber",
"demgem\u00e4ss",
"demgem\u00e4\u00df",
"demselben",
"demzufolge",
"den",
"denen",
"denn",
"denselben",
"der",
"deren",
"derjenige",
"derjenigen",
"dermassen",
"derma\u00dfen",
"derselbe",
"derselben",
"des",
"deshalb",
"desselben",
"dessen",
"deswegen",
"dich",
"die",
"diejenige",
"diejenigen",
"dies",
"diese",
"dieselbe",
"dieselben",
"diesem",
"diesen",
"dieser",
"dieses",
"dir",
"doch",
"dort",
"drei",
"drin",
"dritte",
"dritten",
"dritter",
"drittes",
"du",
"durch",
"durchaus",
"durfte",
"durften",
"d\u00fcrfen",
"d\u00fcrft",
"e",
"eben",
"ebenso",
"ehrlich",
"ei",
"ei,",
"eigen",
"eigene",
"eigenen",
"eigener",
"eigenes",
"ein",
"einander",
"eine",
"einem",
"einen",
"einer",
"eines",
"einige",
"einigen",
"einiger",
"einiges",
"einmal",
"eins",
"elf",
"en",
"ende",
"endlich",
"entweder",
"er",
"erst",
"erste",
"ersten",
"erster",
"erstes",
"es",
"etwa",
"etwas",
"euch",
"euer",
"eure",
"f",
"folgende",
"fr\u00fcher",
"f\u00fcnf",
"f\u00fcnfte",
"f\u00fcnften",
"f\u00fcnfter",
"f\u00fcnftes",
"f\u00fcr",
"g",
"gab",
"ganz",
"ganze",
"ganzen",
"ganzer",
"ganzes",
"gar",
"gedurft",
"gegen",
"gegen\u00fcber",
"gehabt",
"gehen",
"geht",
"gekannt",
"gekonnt",
"gemacht",
"gemocht",
"gemusst",
"genug",
"gerade",
"gern",
"gesagt",
"geschweige",
"gewesen",
"gewollt",
"geworden",
"gibt",
"ging",
"gleich",
"gott",
"gross",
"grosse",
"grossen",
"grosser",
"grosses",
"gro\u00df",
"gro\u00dfe",
"gro\u00dfen",
"gro\u00dfer",
"gro\u00dfes",
"gut",
"gute",
"guter",
"gutes",
"h",
"habe",
"haben",
"habt",
"hast",
"hat",
"hatte",
"hatten",
"hattest",
"hattet",
"heisst",
"her",
"heute",
"hier",
"hin",
"hinter",
"hoch",
"h\u00e4tte",
"h\u00e4tten",
"i",
"ich",
"ihm",
"ihn",
"ihnen",
"ihr",
"ihre",
"ihrem",
"ihren",
"ihrer",
"ihres",
"im",
"immer",
"in",
"indem",
"infolgedessen",
"ins",
"irgend",
"ist",
"j",
"ja",
"jahr",
"jahre",
"jahren",
"je",
"jede",
"jedem",
"jeden",
"jeder",
"jedermann",
"jedermanns",
"jedes",
"jedoch",
"jemand",
"jemandem",
"jemanden",
"jene",
"jenem",
"jenen",
"jener",
"jenes",
"jetzt",
"k",
"kam",
"kann",
"kannst",
"kaum",
"kein",
"keine",
"keinem",
"keinen",
"keiner",
"kleine",
"kleinen",
"kleiner",
"kleines",
"kommen",
"kommt",
"konnte",
"konnten",
"kurz",
"k\u00f6nnen",
"k\u00f6nnt",
"k\u00f6nnte",
"l",
"lang",
"lange",
"leicht",
"leide",
"lieber",
"los",
"m",
"machen",
"macht",
"machte",
"mag",
"magst",
"mahn",
"mal",
"man",
"manche",
"manchem",
"manchen",
"mancher",
"manches",
"mann",
"mehr",
"mein",
"meine",
"meinem",
"meinen",
"meiner",
"meines",
"mensch",
"menschen",
"mich",
"mir",
"mit",
"mittel",
"mochte",
"mochten",
"morgen",
"muss",
"musst",
"musste",
"mussten",
"mu\u00df",
"mu\u00dft",
"m\u00f6chte",
"m\u00f6gen",
"m\u00f6glich",
"m\u00f6gt",
"m\u00fcssen",
"m\u00fcsst",
"m\u00fc\u00dft",
"n",
"na",
"nach",
"nachdem",
"nahm",
"nat\u00fcrlich",
"neben",
"nein",
"neue",
"neuen",
"neun",
"neunte",
"neunten",
"neunter",
"neuntes",
"nicht",
"nichts",
"nie",
"niemand",
"niemandem",
"niemanden",
"noch",
"nun",
"nur",
"o",
"ob",
"oben",
"oder",
"offen",
"oft",
"ohne",
"p",
"q",
"r",
"recht",
"rechte",
"rechten",
"rechter",
"rechtes",
"richtig",
"rund",
"s",
"sa",
"sache",
"sagt",
"sagte",
"sah",
"satt",
"schlecht",
"schon",
"sechs",
"sechste",
"sechsten",
"sechster",
"sechstes",
"sehr",
"sei",
"seid",
"seien",
"sein",
"seine",
"seinem",
"seinen",
"seiner",
"seines",
"seit",
"seitdem",
"selbst",
"sich",
"sie",
"sieben",
"siebente",
"siebenten",
"siebenter",
"siebentes",
"sind",
"so",
"solang",
"solche",
"solchem",
"solchen",
"solcher",
"solches",
"soll",
"sollen",
"sollst",
"sollt",
"sollte",
"sollten",
"sondern",
"sonst",
"soweit",
"sowie",
"sp\u00e4ter",
"startseite",
"statt",
"steht",
"suche",
"t",
"tag",
"tage",
"tagen",
"tat",
"teil",
"tel",
"tritt",
"trotzdem",
"tun",
"u",
"uhr",
"um",
"und",
"und?",
"uns",
"unser",
"unsere",
"unserer",
"unter",
"v",
"vergangenen",
"viel",
"viele",
"vielem",
"vielen",
"vielleicht",
"vier",
"vierte",
"vierten",
"vierter",
"viertes",
"vom",
"von",
"vor",
"w",
"wahr?",
"wann",
"war",
"waren",
"wart",
"warum",
"was",
"wegen",
"weil",
"weit",
"weiter",
"weitere",
"weiteren",
"weiteres",
"welche",
"welchem",
"welchen",
"welcher",
"welches",
"wem",
"wen",
"wenig",
"wenige",
"weniger",
"weniges",
"wenigstens",
"wenn",
"wer",
"werde",
"werden",
"werdet",
"weshalb",
"wessen",
"wie",
"wieder",
"wieso",
"will",
"willst",
"wir",
"wird",
"wirklich",
"wirst",
"wissen",
"wo",
"wohl",
"wollen",
"wollt",
"wollte",
"wollten",
"worden",
"wurde",
"wurden",
"w\u00e4hrend",
"w\u00e4hrenddem",
"w\u00e4hrenddessen",
"w\u00e4re",
"w\u00fcrde",
"w\u00fcrden",
"x",
"y",
"z",
"z.b",
"zehn",
"zehnte",
"zehnten",
"zehnter",
"zehntes",
"zeit",
"zu",
"zuerst",
"zugleich",
"zum",
"zun\u00e4chst",
"zur",
"zur\u00fcck",
"zusammen",
"zwanzig",
"zwar",
"zwei",
"zweite",
"zweiten",
"zweiter",
"zweites",
"zwischen",
"zw\u00f6lf",
"\u00fcber",
"\u00fcberhaupt",
"\u00fcbrigens",
],
"el": [
"\u03b1\u03bb\u03bb\u03b1",
"\u03b1\u03bd",
"\u03b1\u03bd\u03c4\u03b9",
"\u03b1\u03c0\u03bf",
"\u03b1\u03c5\u03c4\u03b1",
"\u03b1\u03c5\u03c4\u03b5\u03c3",
"\u03b1\u03c5\u03c4\u03b7",
"\u03b1\u03c5\u03c4\u03bf",
"\u03b1\u03c5\u03c4\u03bf\u03b9",
"\u03b1\u03c5\u03c4\u03bf\u03c3",
"\u03b1\u03c5\u03c4\u03bf\u03c5\u03c3",
"\u03b1\u03c5\u03c4\u03c9\u03bd",
"\u03b3\u03b9\u03b1",
"\u03b4\u03b5",
"\u03b4\u03b5\u03bd",
"\u03b5\u03b1\u03bd",
"\u03b5\u03b9\u03bc\u03b1\u03b9",
"\u03b5\u03b9\u03bc\u03b1\u03c3\u03c4\u03b5",
"\u03b5\u03b9\u03bd\u03b1\u03b9",
"\u03b5\u03b9\u03c3\u03b1\u03b9",
"\u03b5\u03b9\u03c3\u03c4\u03b5",
"\u03b5\u03ba\u03b5\u03b9\u03bd\u03b1",
"\u03b5\u03ba\u03b5\u03b9\u03bd\u03b5\u03c3",
"\u03b5\u03ba\u03b5\u03b9\u03bd\u03b7",
"\u03b5\u03ba\u03b5\u03b9\u03bd\u03bf",
"\u03b5\u03ba\u03b5\u03b9\u03bd\u03bf\u03b9",
"\u03b5\u03ba\u03b5\u03b9\u03bd\u03bf\u03c3",
"\u03b5\u03ba\u03b5\u03b9\u03bd\u03bf\u03c5\u03c3",
"\u03b5\u03ba\u03b5\u03b9\u03bd\u03c9\u03bd",
"\u03b5\u03bd\u03c9",
"\u03b5\u03c0\u03b9",
"\u03b7",
"\u03b8\u03b1",
"\u03b9\u03c3\u03c9\u03c3",
"\u03ba",
"\u03ba\u03b1\u03b9",
"\u03ba\u03b1\u03c4\u03b1",
"\u03ba\u03b9",
"\u03bc\u03b1",
"\u03bc\u03b5",
"\u03bc\u03b5\u03c4\u03b1",
"\u03bc\u03b7",
"\u03bc\u03b7\u03bd",
"\u03bd\u03b1",
"\u03bf",
"\u03bf\u03b9",
"\u03bf\u03bc\u03c9\u03c3",
"\u03bf\u03c0\u03c9\u03c3",
"\u03bf\u03c3\u03bf",
"\u03bf\u03c4\u03b9",
"\u03c0\u03b1\u03c1\u03b1",
"\u03c0\u03bf\u03b9\u03b1",
"\u03c0\u03bf\u03b9\u03b5\u03c3",
"\u03c0\u03bf\u03b9\u03bf",
"\u03c0\u03bf\u03b9\u03bf\u03b9",
"\u03c0\u03bf\u03b9\u03bf\u03c3",
"\u03c0\u03bf\u03b9\u03bf\u03c5\u03c3",
"\u03c0\u03bf\u03b9\u03c9\u03bd",
"\u03c0\u03bf\u03c5",
"\u03c0\u03c1\u03bf\u03c3",
"\u03c0\u03c9\u03c3",
"\u03c3\u03b5",
"\u03c3\u03c4\u03b7",
"\u03c3\u03c4\u03b7\u03bd",
"\u03c3\u03c4\u03bf",
"\u03c3\u03c4\u03bf\u03bd",
"\u03c4\u03b1",
"\u03c4\u03b7\u03bd",
"\u03c4\u03b7\u03c3",
"\u03c4\u03bf",
"\u03c4\u03bf\u03bd",
"\u03c4\u03bf\u03c4\u03b5",
"\u03c4\u03bf\u03c5",
"\u03c4\u03c9\u03bd",
"\u03c9\u03c3",
],
"en": [
"a",
"a's",
"able",
"about",
"above",
"according",
"accordingly",
"across",
"actually",
"after",
"afterwards",
"again",
"against",
"ain't",
"all",
"allow",
"allows",
"almost",
"alone",
"along",
"already",
"also",
"although",
"always",
"am",
"among",
"amongst",
"an",
"and",
"another",
"any",
"anybody",
"anyhow",
"anyone",
"anything",
"anyway",
"anyways",
"anywhere",
"apart",
"appear",
"appreciate",
"appropriate",
"are",
"aren't",
"around",
"as",
"aside",
"ask",
"asking",
"associated",
"at",
"available",
"away",
"awfully",
"b",
"be",
"became",
"because",
"become",
"becomes",
"becoming",
"been",
"before",
"beforehand",
"behind",
"being",
"believe",
"below",
"beside",
"besides",
"best",
"better",
"between",
"beyond",
"both",
"brief",
"but",
"by",
"c",
"c'mon",
"c's",
"came",
"can",
"can't",
"cannot",
"cant",
"cause",
"causes",
"certain",
"certainly",
"changes",
"clearly",
"co",
"com",
"come",
"comes",
"concerning",
"consequently",
"consider",
"considering",
"contain",
"containing",
"contains",
"corresponding",
"could",
"couldn't",
"course",
"currently",
"d",
"definitely",
"described",
"despite",
"did",
"didn't",
"different",
"do",
"does",
"doesn't",
"doing",
"don't",
"done",
"down",
"downwards",
"during",
"e",
"each",
"edu",
"eg",
"eight",
"either",
"else",
"elsewhere",
"enough",
"entirely",
"especially",
"et",
"etc",
"even",
"ever",
"every",
"everybody",
"everyone",
"everything",
"everywhere",
"ex",
"exactly",
"example",
"except",
"f",
"far",
"few",
"fifth",
"first",
"five",
"followed",
"following",
"follows",
"for",
"former",
"formerly",
"forth",
"four",
"from",
"further",
"furthermore",
"g",
"get",
"gets",
"getting",
"given",
"gives",
"go",
"goes",
"going",
"gone",
"got",
"gotten",
"greetings",
"h",
"had",
"hadn't",
"happens",
"hardly",
"has",
"hasn't",
"have",
"haven't",
"having",
"he",
"he's",
"hello",
"help",
"hence",
"her",
"here",
"here's",
"hereafter",
"hereby",
"herein",
"hereupon",
"hers",
"herself",
"hi",
"him",
"himself",
"his",
"hither",
"hopefully",
"how",
"howbeit",
"however",
"i",
"i'd",
"i'll",
"i'm",
"i've",
"ie",
"if",
"ignored",
"immediate",
"in",
"inasmuch",
"inc",
"indeed",
"indicate",
"indicated",
"indicates",
"inner",
"insofar",
"instead",
"into",
"inward",
"is",
"isn't",
"it",
"it'd",
"it'll",
"it's",
"its",
"itself",
"j",
"just",
"k",
"keep",
"keeps",
"kept",
"know",
"known",
"knows",
"l",
"last",
"lately",
"later",
"latter",
"latterly",
"least",
"less",
"lest",
"let",
"let's",
"like",
"liked",
"likely",
"little",
"look",
"looking",
"looks",
"ltd",
"m",
"mainly",
"many",
"may",
"maybe",
"me",
"mean",
"meanwhile",
"merely",
"might",
"more",
"moreover",
"most",
"mostly",
"much",
"must",
"my",
"myself",
"n",
"name",
"namely",
"nd",
"near",
"nearly",
"necessary",
"need",
"needs",
"neither",
"never",
"nevertheless",
"new",
"next",
"nine",
"no",
"nobody",
"non",
"none",
"noone",
"nor",
"normally",
"not",
"nothing",
"novel",
"now",
"nowhere",
"o",
"obviously",
"of",
"off",
"often",
"oh",
"ok",
"okay",
"old",
"on",
"once",
"one",
"ones",
"only",
"onto",
"or",
"other",
"others",
"otherwise",
"ought",
"our",
"ours",
"ourselves",
"out",
"outside",
"over",
"overall",
"own",
"p",
"particular",
"particularly",
"per",
"perhaps",
"placed",
"please",
"plus",
"possible",
"presumably",
"probably",
"provides",
"q",
"que",
"quite",
"qv",
"r",
"rather",
"rd",
"re",
"really",
"reasonably",
"regarding",
"regardless",
"regards",
"relatively",
"respectively",
"right",
"s",
"said",
"same",
"saw",
"say",
"saying",
"says",
"second",
"secondly",
"see",
"seeing",
"seem",
"seemed",
"seeming",
"seems",
"seen",
"self",
"selves",
"sensible",
"sent",
"serious",
"seriously",
"seven",
"several",
"shall",
"she",
"should",
"shouldn't",
"since",
"six",
"so",
"some",
"somebody",
"somehow",
"someone",
"something",
"sometime",
"sometimes",
"somewhat",
"somewhere",
"soon",
"sorry",
"specified",
"specify",
"specifying",
"still",
"sub",
"such",
"sup",
"sure",
"t",
"t's",
"take",
"taken",
"tell",
"tends",
"th",
"than",
"thank",
"thanks",
"thanx",
"that",
"that's",
"thats",
"the",
"their",
"theirs",
"them",
"themselves",
"then",
"thence",
"there",
"there's",
"thereafter",
"thereby",
"therefore",
"therein",
"theres",
"thereupon",
"these",
"they",
"they'd",
"they'll",
"they're",
"they've",
"think",
"third",
"this",
"thorough",
"thoroughly",
"those",
"though",
"three",
"through",
"throughout",
"thru",
| |
from functools import reduce
import networkx as nx
def identity(*args):
if len(args) == 1:
return args[0]
return args
def er(e):
"""
This function returns the reverse of an edge represented as a tuple
"""
s, d = e
return (d, s)
def cer(e):
"""
Canonicalize the representation of an undirected edge.
Works by returning a sorted tuple.
"""
return tuple(sorted(e))
def countedgesin(path, edges, directed=False):
"""
This function takes a path and a collection of edges, and returns
the number of edges in that path that appear in the edge list
"""
es = set(edges)
return reduce(lambda count, edge: count + int(edge in es or (not directed and er(edge) in es)), edgesin(path), 0)
def triple_count_from_pd(pd, directed=False, return_sets=False):
"""
This function combines the functionality of
edgesinpd, nodesinpd, probesinpd
by returning frequency count dictionaries
(or sets, depending on the parameter)
of edges, nodes, and probing nodes
in the given path dictionary.
This functionality matches setting repeat=False, trim=True, transit=False
as named parameters for the above functions. For transit==True,
subtract probesinpd (return[2]) from nodesinpd (return[1]).
The directed parameter can be set to True (it is False by default)
to treat the graph as directed and so edges and their reverses
will be recorded separately.
It is more efficient because it iterates through the path dictionary just once
to compute all three sets.
"""
edge_dict, node_dict, probe_dict = dict(), dict(), dict()
for s in pd:
for d in pd[s]:
only_loops = True
if d != s:
only_loops = False
probe_dict[d] = probe_dict.get(d, 0) + 1
node_dict[d] = node_dict.get(d, 0) + 1
path = pd[s][d]
for i in range(len(path) - 1):
node = path[i]
node_dict[node] = node_dict.get(node, 0) + 1
edge = (node, path[i + 1])
if not directed: edge = cer(edge)
edge_dict[edge] = edge_dict.get(edge, 0) + 1
if not only_loops:
probe_dict[s] = probe_dict.get(s, 0) + 1
if return_sets:
return set(edge_dict), set(node_dict), set(probe_dict)
else:
return edge_dict, node_dict, probe_dict
def edgesin(path, directed=False, trim=True):
"""
This function takes a path (a sequence of nodes) and returns
a list of tuples representing the edges in that path.
The directed parameter specifies whether edge direction matters.
If directed == False (default), then edges are returned in their canonical representation.
If directed == True, then edges are returned with nodes appearing in the same order as in the path.
trim==True (default) does not include self-loop edges in the result.
To include, set trim=False.
The returned list is empty for single-node paths.
"""
edges = []
t = cer if not directed else identity
for i in range(len(path) - 1):
if not trim or path[i] != path[i+1]:
edges.append(t((path[i], path[i + 1])))
return edges
def histogram_to_list(histo, repeat=False):
if not repeat:
return list(histo.keys())
return_list = []
for i, count in histo.iteritems():
return_list += [i] * count
return return_list
def all_edge_set(pseq, directed=False, trim=True):
edges = set()
if isinstance(pseq, list):
for p in pseq:
edges |= set(edgesin(p, directed=directed, trim=trim))
elif isinstance(pseq, dict):
for s in pseq:
for d in pseq[s]:
if not trim or s != d:
edges |= set(edgesin(pseq[s][d], directed=directed, trim=trim))
else:
raise TypeError
return edges
def edgesinpaths(pl, histo=False, repeat=False, directed=False, trim=True):
"""
This function takes a list of paths (sequences of nodes) and returns
either a list of tuples (default) representing edges in those paths
or a histogram of edge counts (set histo=True), which is a dictionary
where keys are edges and values are their frequencies.
If histo==False (default), then by default at most one occurrence of an edge
will appear in the returned list, unless the parameter repeat==True, in
which case the number of times an edge appears in the list is
exactly its frequency in the list of paths.
directed==False (default) assumes that the graph is undirected, and so
a tuple and its reverse represent the same edge. In this case, edges are returned
in canonical representation. Setting directed=True treats an edge as
a separate entity than its reverse.
trim==True (default) does not include self-loop edges.
To include, set trim=False.
"""
ed = {}
for p in pl:
for e in edgesin(p, directed=directed, trim=trim):
ed[e] = ed.get(e, 0) + 1
if histo: return ed
return histogram_to_list(ed, repeat=repeat)
def edgesinpd(pd, histo=False, repeat=False, directed=False, trim=True):
"""
This function takes a path dictionary and returns
either a list of tuples (default) representing edges in those paths
or a histogram of edge counts (set histo=True), which is a dictionary
where keys are edges and values are their frequencies.
If histo==False (default), then by default at most one occurrence of an edge
will appear in the returned list, unless the parameter repeat==True, in
which case the number of times an edge appears in the list is
exactly its frequency in the list of paths.
directed==False (default) assumes that the graph is undirected, and so
a tuple and its reverse represent the same edge. In this case, edges are returned
in canonical representation. Setting directed==True treats an edge as
a separate entity than its reverse.
trim==True (default) does not include self-loop edges or loop paths.
To include, set trim=False.
"""
ed = {}
for s in pd:
for d in pd[s]:
if not trim or s != d:
for e in edgesin(pd[s][d], directed=directed, trim=trim):
ed[e] = ed.get(e, 0) + 1
if histo: return ed
return histogram_to_list(ed, repeat=repeat)
def probesinpd(pd, trim=True):
"""
This function takes a path dictionary and returns a set of all
the source and destination nodes in it.
By default, endpoints of loops are not included.
To include, set trim=False.
"""
probes = set()
for src in pd:
dsts = set(pd[src])
if len(dsts - {src}) > 0 or not trim:
probes |= {src}
probes |= set(pd[src])
return probes
def nodesinpd(pd, histo=False, repeat=False, transit=False, trim=True):
'''
This function returns the set of nodes
(and, if histo==True, their counts)
that appear in a path dictionary.
If returning a list, the repeat parameter can be used
to include multiple entries corresponding to the number of times
that a node appears.
The transit parameter can be set to avoid
counting source and destination nodes.
The trim parameter ignores self loops.
'''
nd = {}
for s in pd:
for d in pd[s]:
if not trim or s != d:
path = pd[s][d]
if transit:
path = path[1:-1]
for n in path:
nd[n] = nd.get(n, 0) + 1
if histo: return nd
return histogram_to_list(nd, repeat=repeat)
def plfrompd(pd, trim=True):
"""
This function takes a path dictionary and returns a simple list of paths
in that dictionary by iterating through the keys of the dictionary.
There is a parameter to trim self loops from the dictionary.
"""
paths = []
for s in pd:
for d in pd[s]:
if not trim or s != d:
paths.append(pd[s][d])
return paths
def pdfrompl(pl, trim=True):
"""
This function takes a list of paths and returns a path dictionary
(two-level by source and destination node).
:param pl: List of paths
:param trim: Eliminate full loops (True by default)
:return: path dictionary
"""
pd = dict()
for path in pl:
src = path[0]
dst = path[-1]
if not trim or src != dst:
pd.setdefault(src, dict())[dst] = path
return pd
def filternodes(pseq, R, trim=True):
"""
This function trims a collection of paths to those that start or end with nodes in R.
There is a parameter to prevent trimming self loops (set trim==False).
When trimmed, nodes with only loops do not appear in the path dictionary at all.
"""
if isinstance(pseq, dict):
pd = pseq
pd2 = {}
for s in pd:
if s in R:
for d in pd[s]:
if (not trim or s != d) and d in R:
pd2.setdefault(s, dict())[d] = pd[s][d]
return pd2
elif isinstance(pseq, list):
pl2 = []
for path in pseq:
s = path[0]
d = path[-1]
if s in R and d in R and (s != d or not trim):
pl2.append(path)
return pl2
else:
raise TypeError
def removeSelfLoops(pd):
"""
This function trims pd of all full loops
When trimmed, nodes | |
called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_clus_run_job_summary_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_clus_run_job_summary_with_http_info(**kwargs) # noqa: E501
return data
def get_clus_run_job_summary_with_http_info(self, **kwargs): # noqa: E501
"""Get summary of ClusRun jobs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_clus_run_job_summary_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: JobSummary
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_clus_run_job_summary" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['aad'] # noqa: E501
return self.api_client.call_api(
'/dashboard/clusrun', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='JobSummary', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_clusrun_events(self, id, **kwargs): # noqa: E501
"""Get clusrun events # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_clusrun_events(id, async=True)
>>> result = thread.get()
:param async bool
:param int id: Job id (required)
:param int last_id: The object id since which(but not included) the objects are requested
:param int count: Requested number of objects
:return: list[Event]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_clusrun_events_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_clusrun_events_with_http_info(id, **kwargs) # noqa: E501
return data
def get_clusrun_events_with_http_info(self, id, **kwargs): # noqa: E501
"""Get clusrun events # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_clusrun_events_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param int id: Job id (required)
:param int last_id: The object id since which(but not included) the objects are requested
:param int count: Requested number of objects
:return: list[Event]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'last_id', 'count'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_clusrun_events" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_clusrun_events`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'last_id' in params:
query_params.append(('lastId', params['last_id'])) # noqa: E501
if 'count' in params:
query_params.append(('count', params['count'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['aad'] # noqa: E501
return self.api_client.call_api(
'/clusrun/{id}/events', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Event]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_clusrun_job(self, id, **kwargs): # noqa: E501
"""Get a clusrun # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_clusrun_job(id, async=True)
>>> result = thread.get()
:param async bool
:param int id: Job id (required)
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_clusrun_job_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_clusrun_job_with_http_info(id, **kwargs) # noqa: E501
return data
def get_clusrun_job_with_http_info(self, id, **kwargs): # noqa: E501
"""Get a clusrun # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_clusrun_job_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param int id: Job id (required)
:return: Job
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_clusrun_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_clusrun_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['aad'] # noqa: E501
return self.api_client.call_api(
'/clusrun/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Job', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_clusrun_job_aggregation_result(self, id, **kwargs): # noqa: E501
"""Get aggregation result of a clusrun job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_clusrun_job_aggregation_result(id, async=True)
>>> result = thread.get()
:param async bool
:param int id: Job id (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_clusrun_job_aggregation_result_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_clusrun_job_aggregation_result_with_http_info(id, **kwargs) # noqa: E501
return data
def get_clusrun_job_aggregation_result_with_http_info(self, id, **kwargs): # noqa: E501
"""Get aggregation result of a clusrun job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_clusrun_job_aggregation_result_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param int id: Job id (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_clusrun_job_aggregation_result" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_clusrun_job_aggregation_result`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['aad'] # noqa: E501
return self.api_client.call_api(
'/clusrun/{id}/aggregationResult', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_clusrun_jobs(self, **kwargs): # noqa: E501
"""Get a list of clusruns # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_clusrun_jobs(async=True)
>>> result = thread.get()
:param async bool
:param int last_id: The object id since which(but not included) the objects are requested
:param int count: Requested number of objects
:param bool reverse: Get the results in reverse order
:return: list[Job]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_clusrun_jobs_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_clusrun_jobs_with_http_info(**kwargs) | |
<filename>src/whacked4/ui/mainwindow.py
#!/usr/bin/env python
#coding=utf8
from whacked4 import config, utils
from whacked4.dehacked import engine, patch
from whacked4.doom import wadlist, wad
from whacked4.ui import windows, workspace
from whacked4.ui.dialogs import startdialog, aboutdialog, patchinfodialog
from whacked4.ui.editors import thingsframe, statesframe, soundsframe, stringsframe, weaponsframe, ammoframe, \
cheatsframe, miscframe, parframe
from collections import OrderedDict
import glob
import os.path
import shutil
import webbrowser
import wx
import sys
class MainWindow(windows.MainFrameBase):
"""
The main MDI parent window.
"""
def __init__(self, parent):
windows.MainFrameBase.__init__(self, parent)
wx.BeginBusyCursor()
self.SetIcon(wx.Icon(u'res/icon-hatchet.ico'))
# Patch-related data.
self.patch = None
self.patch_info = None
self.patch_modified = False
# Workspace info.
self.workspace = None
self.workspace_modified = False
# WAD\lump management.
self.iwad = None
self.pwads = wadlist.WADList()
# Engine configuration related data.
self.engines = OrderedDict()
self.load_engines()
# Window\ID relationships.
self.editor_windows = {
windows.MAIN_TOOL_THINGS: thingsframe.ThingsFrame(self),
windows.MAIN_TOOL_STATES: statesframe.StatesFrame(self),
windows.MAIN_TOOL_SOUNDS: soundsframe.SoundsFrame(self),
windows.MAIN_TOOL_STRINGS: stringsframe.StringsFrame(self),
windows.MAIN_TOOL_WEAPONS: weaponsframe.WeaponsFrame(self),
windows.MAIN_TOOL_AMMO: ammoframe.AmmoFrame(self),
windows.MAIN_TOOL_CHEATS: cheatsframe.CheatsFrame(self),
windows.MAIN_TOOL_MISC: miscframe.MiscFrame(self),
windows.MAIN_TOOL_PAR: parframe.ParFrame(self)
}
self.menu_windows = {
windows.MAIN_MENU_THINGS: windows.MAIN_TOOL_THINGS,
windows.MAIN_MENU_STATES: windows.MAIN_TOOL_STATES,
windows.MAIN_MENU_SOUNDS: windows.MAIN_TOOL_SOUNDS,
windows.MAIN_MENU_STRINGS: windows.MAIN_TOOL_STRINGS,
windows.MAIN_MENU_WEAPONS: windows.MAIN_TOOL_WEAPONS,
windows.MAIN_MENU_AMMO: windows.MAIN_TOOL_AMMO,
windows.MAIN_MENU_CHEATS: windows.MAIN_TOOL_CHEATS,
windows.MAIN_MENU_MISC: windows.MAIN_TOOL_MISC,
windows.MAIN_MENU_PAR: windows.MAIN_TOOL_PAR
}
self.workspace_windows = {
'things': self.editor_windows[windows.MAIN_TOOL_THINGS],
'states': self.editor_windows[windows.MAIN_TOOL_STATES],
'sounds': self.editor_windows[windows.MAIN_TOOL_SOUNDS],
'strings': self.editor_windows[windows.MAIN_TOOL_STRINGS],
'weapons': self.editor_windows[windows.MAIN_TOOL_WEAPONS],
'ammo': self.editor_windows[windows.MAIN_TOOL_AMMO],
'cheats': self.editor_windows[windows.MAIN_TOOL_CHEATS],
'misc': self.editor_windows[windows.MAIN_TOOL_MISC],
'par': self.editor_windows[windows.MAIN_TOOL_PAR]
}
# Reset editor window states.
self.editor_windows_show(False)
self.toolbar_set_enabled(False)
self.Show()
# Dialogs.
self.start_dialog = startdialog.StartDialog(self)
self.about_dialog = aboutdialog.AboutDialog(self)
self.update_recent_files_menu()
config.settings.main_window_state_restore(self)
wx.EndBusyCursor()
# Late bind these to prevent bad workspace data from being saved.
self.Bind(wx.EVT_MOVE, self.workspace_update_data)
self.Bind(wx.EVT_SIZE, self.workspace_update_data)
self.editor_window_set_edit()
self.file_set_state()
def show_start(self):
"""
Displays starting dialog.
"""
self.start_dialog.ShowModal()
def load_engines(self):
"""
Loads all engine configuration files. These are kept in memory for patch compatibility auto-detection.
"""
for file_name in sorted(glob.glob('cfg/tables_*.json')):
new_engine = engine.Engine()
try:
new_engine.merge_data(file_name)
new_engine.apply_defaults()
except engine.DehackedEngineError as e:
wx.MessageBox(message='Invalid engine configuration file "{}". Exception: {}'.format(file_name, e),
caption='Engine configuration error', style=wx.OK | wx.ICON_EXCLAMATION, parent=self)
else:
name = os.path.basename(file_name)
name = os.path.splitext(name)[0]
self.engines[name] = new_engine
def view_patch_settings(self, event):
"""
Displays the patch settings dialog.
"""
self.patch_info.set_state(self.patch, self.engines, self.workspace, modify_engine=False)
self.patch_info.ShowModal()
# Alter workspace settings if the user clicked Ok.
if self.patch_info.selected_engine is not None:
self.workspace.iwad = self.patch_info.selected_iwad
self.workspace.pwads = self.patch_info.selected_pwads
self.load_wads()
self.set_modified(True)
self.workspace_modified = True
self.editor_windows[windows.MAIN_TOOL_STATES].update_properties()
def open_file_dialog(self, force_show_settings=False):
"""
Displays an open file dialog to open a patch file.
"""
if not self.save_if_needed():
return
filename = utils.file_dialog(self, message='Choose a Dehacked file to open',
wildcard='All supported files|*.deh;*.bex|Dehacked files (*.deh)|*.deh|'
'Extended Dehacked files (*.bex)|*.bex|All files|*.*',
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if filename is not None:
self.open_file(filename, force_show_settings)
def open_file_merge_dialog(self):
"""
Displays an open file dialog to open a patch file.
"""
result = wx.MessageBox(message='Merging a patch cannot be undone, so make sure you have saved this file before '
'attempting a merge. Canceling a merge will not restore this patch to it\'s '
'original state.\n\nDo you want to continue?', caption='Merge patch',
style=wx.YES_NO | wx.ICON_EXCLAMATION, parent=self)
if result == wx.NO:
return
filename = utils.file_dialog(self, message='Choose a Dehacked file to merge',
wildcard='All supported files|*.deh;*.bex|Dehacked files (*.deh)|*.deh|'
'Extended Dehacked files (*.bex)|*.bex|All files|*.*',
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if filename is not None:
self.merge_file(filename)
def open_file(self, filename, force_show_settings=False):
"""
Opens and reads a new Dehacked patch.
@param filename: the filename of the file to open.
@param force_show_settings: if True, will always display the patch settings dialog.
"""
# Load the accompanying workspace file if it exists.
new_workspace = workspace.Workspace()
workspace_file = workspace.get_filename(filename)
if os.path.exists(workspace_file):
new_workspace.load(filename)
# Analyze the patch file to determine what engines support it.
new_patch = patch.Patch()
try:
new_patch.analyze_patch(filename, self.engines)
except patch.DehackedPatchError as e:
wx.MessageBox(message=e.__str__(), caption='Patch error', style=wx.OK | wx.ICON_ERROR, parent=self)
return
# Display the patch info dialog to let the user select patch settings.
# Do not show the info dialog if a workspace was found, unless forced.
patch_info = patchinfodialog.PatchInfoDialog(self)
patch_info.set_state(new_patch, self.engines, new_workspace)
if new_workspace.engine is None or force_show_settings:
patch_info.ShowModal()
# User cancelled out of the patch info dialog.
if patch_info.selected_engine is None:
return
# Store patch info in the new workspace.
new_workspace.iwad = patch_info.selected_iwad
new_workspace.pwads = patch_info.selected_pwads
new_workspace.engine = patch_info.selected_engine
new_workspace.save(workspace_file)
# Initialize the patch with tables from the selected engine.
selected_engine = self.engines[new_workspace.engine]
new_patch.initialize_from_engine(selected_engine)
# Attempt to parse the patch file.
try:
messages = new_patch.read_dehacked(filename)
except patch.DehackedVersionError as e:
wx.MessageBox(message=e.__str__(), caption='Patch version error', style=wx.OK | wx.ICON_ERROR, parent=self)
return
except patch.DehackedPatchError as e:
wx.MessageBox(message=e.__str__(), caption='Patch error', style=wx.OK | wx.ICON_ERROR, parent=self)
return
# Display any messages from the patch load process.
for message in messages.values():
message += '\n\nPress Yes to continue loading, No to stop displaying messages or Cancel to abort ' \
'loading this patch.'
result = wx.MessageBox(message=message, caption='Patch message',
style=wx.YES_NO | wx.CANCEL | wx.ICON_EXCLAMATION, parent=self)
if result == wx.NO:
break
elif result == wx.CANCEL:
return
# Store new patch info.
self.patch = new_patch
self.patch_modified = False
self.patch_info = patch_info
self.workspace = new_workspace
# Refresh user interface contents.
self.load_wads()
self.update_ui()
self.file_set_state()
# Store potentially updated workspace.
self.workspace_save()
# Add item to recent files.
config.settings.recent_files_add(filename)
self.update_recent_files_menu()
def merge_file(self, filename):
"""
Opens and reads a new Dehacked patch then merges it into the current patch.
@param filename: the filename of the file to open.
"""
# Analyze the patch file to determine what engines support it.
new_patch = patch.Patch()
try:
new_patch.analyze_patch(filename, self.engines)
except patch.DehackedPatchError as e:
wx.MessageBox(message=e.__str__(), caption='Patch error', style=wx.OK | wx.ICON_ERROR, parent=self)
return
# Check for compatibility.
if new_patch.version != self.patch.version or new_patch.extended != self.patch.extended:
wx.MessageBox(message='The patch is not compatible with the currently loaded patch. Check the patch version and any extended features.', caption='Patch not compatible', style=wx.OK | wx.ICON_ERROR, parent=self)
return
# Attempt to parse the patch file.
try:
messages = self.patch.read_dehacked(filename)
except patch.DehackedVersionError as e:
wx.MessageBox(message=e.__str__(), caption='Patch version error', style=wx.OK | wx.ICON_ERROR, parent=self)
return
except patch.DehackedPatchError as e:
wx.MessageBox(message=e.__str__(), caption='Patch error', style=wx.OK | wx.ICON_ERROR, parent=self)
return
# Display any messages from the patch load process.
for message in messages.values():
message += '\n\nPress Yes to continue loading, No to stop displaying messages or Cancel to abort ' \
'loading this patch.'
result = wx.MessageBox(message=message, caption='Patch message',
style=wx.YES_NO | wx.CANCEL | wx.ICON_EXCLAMATION, parent=self)
if result == wx.NO:
break
elif result == wx.CANCEL:
return
# Store new patch info.
self.patch_modified = True
# Refresh user interface contents.
self.load_wads()
self.update_ui()
self.file_set_state()
def load_wads(self):
"""
Loads the WAD files that are selected in the current workspace.
"""
self.iwad = None
self.pwads.clear()
if self.workspace.iwad is None:
return
# Verify if the IWAD file exists at all.
if not os.path.exists(self.workspace.iwad):
wx.MessageBox(message='The IWAD {} could not be found. Sprite previews will be'
'disabled.'.format(self.workspace.iwad),
caption='Missing IWAD', style=wx.OK | wx.ICON_INFORMATION, parent=self)
self.workspace.iwad = None
self.patch_modified = True
return
wx.BeginBusyCursor()
# Load and add the IWAD to the WAD list.
self.iwad = wad.WADReader(self.workspace.iwad)
self.pwads.add_wad(self.iwad)
# Load PWADs.
for pwad_file in self.workspace.pwads:
if not os.path.exists(pwad_file):
wx.MessageBox(message='The PWAD {} could not be found.'.format(pwad_file), caption='Missing PWAD',
style=wx.OK | wx.ICON_EXCLAMATION, parent=self)
self.workspace.pwads.remove(pwad_file)
self.patch_modified = True
else:
pwad = wad.WADReader(pwad_file)
self.pwads.add_wad(pwad)
# Build the sprite lookup tables.
self.pwads.build_sprite_list()
if self.pwads.palette is None:
wx.MessageBox(message='No PLAYPAL lump could be found in any of the loaded WAD files. Sprite previews'
'will be disabled.', caption='Missing PLAYPAL', style=wx.OK | wx.ICON_INFORMATION,
parent=self)
self.workspace.iwad = None
wx.EndBusyCursor()
def save_file_dialog(self):
"""
Displays a save file dialog to save the current patch file.
"""
# Use the patch filename if it was saved before.
if self.patch.filename is not None:
use_filename = self.patch.filename
# Otherwise use a default filename and extension.
else:
if self.patch.extended:
use_filename = 'unnamed.bex'
else:
use_filename = 'unnamed.deh'
filename = utils.file_dialog(self, message='Save Dehacked file', default_file=use_filename,
wildcard='All supported files|*.deh;*.bex|Dehacked files (*.deh)|*.deh|'
'Extended Dehacked files (*.bex)|*.bex|All files|*.*',
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if filename is not None:
self.save_file(filename)
return True
return False
def save_file(self, filename):
"""
Saves a Dehacked patch file.
"""
wx.BeginBusyCursor()
for window in self.editor_windows.values():
window.before_save()
# Create a backup of the existing file.
if os.path.exists(filename):
shutil.copyfile(filename, filename + '.bak')
# Write patch.
message = self.patch.write_dehacked(filename)
if message is not None:
wx.MessageBox(message=message, caption='Patch write error', style=wx.OK | wx.ICON_ERROR, parent=self)
return
self.patch.filename = filename
self.set_modified(False)
# Store workspace info.
self.workspace_save()
# Add to recent files.
config.settings.recent_files_add(filename)
self.update_recent_files_menu()
wx.EndBusyCursor()
def update_recent_files_menu(self):
"""
Updates the recent files submenu.
"""
# Remove the old items.
while self.MenuFileRecent.GetMenuItemCount() > 0:
item = self.MenuFileRecent.FindItemByPosition(0)
self.MenuFileRecent.DestroyItem(item)
# Add all recent files again.
recent_files = config.settings['recent_files']
for recent_file in recent_files:
item = self.MenuFileRecent.Append(wx.ID_ANY, | |
(1 - ps) * c
return new_h, new_h
else:
with tf.variable_scope(scope or type(self).__name__): # "GRUCell"
with tf.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not udpate.
r, u = tf.split(1, 2, lfe.enhanced_linear([inputs, state],
2 * self._num_units, True, 1.0, weight_initializer = self._weight_initializer, orthogonal_scale_factor = self._orthogonal_scale_factor))
r, u = tf.sigmoid(r), tf.sigmoid(u)
with tf.variable_scope("Candidate"): #you need a different one because you're doing a new linear
#notice they have the activation/non-linear step right here!
c = tf.tanh(linear.linear([inputs, r * state], self._num_units, True))
new_h = u * state + (1 - u) * c
return new_h, new_h
class BasicLSTMCell(RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/pdf/1409.2329v5.pdf.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
Biases of the forget gate are initialized by default to 1 in order to reduce
the scale of forgetting in the beginning of the training.
"""
def __init__(self, num_units, gpu_for_layer = 0, weight_initializer = "uniform_unit", orthogonal_scale_factor = 1.1):
self._num_units = num_units
self._gpu_for_layer = gpu_for_layer
self._weight_initializer = weight_initializer
self._orthogonal_scale_factor = orthogonal_scale_factor
@property
def input_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return 2 * self._num_units
def __call__(self, inputs, state, scope=None):
with tf.device("/gpu:"+str(self._gpu_for_layer)):
"""Long short-term memory cell (LSTM)."""
with tf.variable_scope(scope or type(self).__name__): # "BasicLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
c, h = tf.split(1, 2, state)
concat = lfe.enhanced_linear([inputs, h], 4 * self._num_units, True, weight_initializer = self._weight_initializer, orthogonal_scale_factor = self._orthogonal_scale_factor)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(1, 4, concat)
new_c = c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) * tf.tanh(j)
new_h = tf.tanh(new_c) * tf.sigmoid(o)
return new_h, tf.concat(1, [new_c, new_h])
'''important, the second part is the hidden state!, thus a lstm with n cells had a hidden state of dimenson 2n'''
#in the basic lstm, the output and the hidden state are different!
class LSTMCell(RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
This implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
<NAME>, <NAME>, and <NAME>.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
It uses peep-hole connections, optional cell clipping, and an optional
projection layer.
"""
def __init__(self, num_units, input_size,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None,
num_unit_shards=1, num_proj_shards=1,
gpu_for_layer = 0, weight_initializer = "uniform_unit", orthogonal_scale_factor = 1.1):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
input_size: int, The dimensionality of the inputs into the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
Note that num_unit_shards must evenly divide num_units * 4.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
Note that num_proj_shards must evenly divide num_proj
(if num_proj is not None).
Raises:
ValueError: if num_unit_shards doesn't divide 4 * num_units or
num_proj_shards doesn't divide num_proj
"""
self._num_units = num_units
self._input_size = input_size
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._gpu_for_layer = gpu_for_layer
self._weight_initializer = weight_initializer
self._orthogonal_scale_factor = orthogonal_scale_factor
if (num_units * 4) % num_unit_shards != 0:
raise ValueError("num_unit_shards must evently divide 4 * num_units")
if num_proj and num_proj % num_proj_shards != 0:
raise ValueError("num_proj_shards must evently divide num_proj")
if num_proj:
self._state_size = num_units + num_proj
self._output_size = num_proj
else:
self._state_size = 2 * num_units
self._output_size = num_units
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
with tf.device("/gpu:"+str(self._gpu_for_layer)):
"""Run one step of LSTM.
Args:
input_: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
scope: VariableScope for the created subgraph; defaults to "LSTMCell".
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "input_" when previous state was "state".
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "input_" when previous state was "state".
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
c_prev = tf.slice(state, [0, 0], [-1, self._num_units])
m_prev = tf.slice(state, [0, self._num_units], [-1, num_proj])
dtype = input_.dtype
unit_shard_size = (4 * self._num_units) // self._num_unit_shards
with tf.variable_scope(scope or type(self).__name__): # "LSTMCell"
w = tf.concat(
1,
[tf.get_variable("W_%d" % i,
shape=[self.input_size + num_proj, unit_shard_size],
initializer=self._initializer,
dtype=dtype) for i in xrange(self._num_unit_shards)])
b = tf.get_variable(
"B", shape=[4 * self._num_units],
initializer=tf.zeros_initializer, dtype=dtype)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = tf.concat(1, [input_, m_prev])
i, j, f, o = tf.split(1, 4, tf.nn.bias_add(tf.matmul(cell_inputs, w), b))
# Diagonal connections
if self._use_peepholes:
w_f_diag = tf.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = tf.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = tf.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (tf.sigmoid(f + 1 + w_f_diag * c_prev) * c_prev +
tf.sigmoid(i + w_i_diag * c_prev) * tf.tanh(j))
else:
c = (tf.sigmoid(f + 1) * c_prev + tf.sigmoid(i) * tf.tanh(j))
if self._cell_clip is not None:
c = tf.clip_by_value(c, -self._cell_clip, self._cell_clip)
if self._use_peepholes:
m = tf.sigmoid(o + w_o_diag * c) * tf.tanh(c)
else:
m = tf.sigmoid(o) * tf.tanh(c)
if self._num_proj is not None:
proj_shard_size = self._num_proj // self._num_proj_shards
w_proj = tf.concat(
1,
[tf.get_variable("W_P_%d" % i,
shape=[self._num_units, proj_shard_size],
initializer=self._initializer,
dtype=dtype)
for i in xrange(self._num_proj_shards)])
# TODO(ebrevdo), use matmulsum
m = tf.matmul(m, w_proj)
return m, tf.concat(1, [c, m])
class IdentityRNNCell(RNNCell):
"""Identity RNN from http://arxiv.org/pdf/1504.00941v2.pdf"""
'''if you want only short term memory, you can use a small scalar in the initialization of the identity matrix'''
def __init__(self, num_units, gpu_for_layer):
self._num_units = num_units
self._gpu_for_layer = gpu_for_layer
self._weight_initializer = weight_initializer
self._orthogonal_scale_factor = orthogonal_scale_factor
@property
def input_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@property
def state_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
"""Most basic RNN: output = new_state = tanh(W * input + U * state + B)."""
'''we need to separate the matmul's because of the identity matrix configuration'''
with tf.device("/gpu:"+str(self._gpu_for_layer)):
with tf.variable_scope(scope or type(self).__name__): # "IdentityRNNCell"
with tf.variable_scope("inputs_weights"):
input_weight_matrix_updated = lfe.linear(lfe.linear([inputs], self._num_units, True, weight_initializer = "constant",
bias_start = 0.0))
with tf.variable_scope("state_weights"): #notice that we make an identity matrix for the weights.
state_weight_matrix_updated = lfe.linear(lfe.linear([state], self._num_units, True, weight_initializer = "identity",
bias_start = 0.0))
output = tf.nn.relu(tf.add(input_weight_matrix_updated, state_weight_matrix_updated)) #add them together.
return output, output
class OutputProjectionWrapper(RNNCell):
"""Operator adding an output projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your outputs in time,
do the projection on this batch-concated sequence, then split it
if needed or directly feed into a softmax.
"""
def __init__(self, cell, output_size):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if output_size is not positive.
"""
if not isinstance(cell, RNNCell):
raise TypeError("The parameter cell is not RNNCell.")
if output_size < 1:
raise ValueError("Parameter output_size must be > 0: %d." % output_size)
self._cell = cell
self._output_size = output_size
@property
def input_size(self):
return self._cell.input_size
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, inputs, state, scope=None):
"""Run the cell and output projection on inputs, starting from state."""
output, res_state = self._cell(inputs, state)
# Default scope: "OutputProjectionWrapper"
with tf.variable_scope(scope or type(self).__name__):
projected = linear.linear(output, self._output_size, True)
| |
<gh_stars>10-100
from __future__ import absolute_import, division, print_function
import collections
import torch
import torch.nn as nn
import torch.nn.functional as F
# from torch.nn.utils.spectral_norm import spectral_norm
from model.pwc_modules import conv, rescale_flow, upsample2d_as, initialize_msra, upsample2d_flow_as, upsample_flow, FlowEstimatorDense_v2, ContextNetwork_v2_
from model.pwc_modules import WarpingLayer_no_div, FeatureExtractor, ContextNetwork, FlowEstimatorDense, App_model_level_select, WarpingLayer, Appearance_flow_net_for_disdiilation
from model.correlation_package.correlation import Correlation
import numpy as np
from utils_luo.tools import tools
from utils_luo.loss import loss_functions
import cv2
import os
class network_tools():
@classmethod
def normalize_features(cls, feature_list, normalize, center, moments_across_channels=True, moments_across_images=True):
"""Normalizes feature tensors (e.g., before computing the cost volume).
Args:
feature_list: list of torch tensors, each with dimensions [b, c, h, w]
normalize: bool flag, divide features by their standard deviation
center: bool flag, subtract feature mean
moments_across_channels: bool flag, compute mean and std across channels, 看到UFlow默认是True
moments_across_images: bool flag, compute mean and std across images, 看到UFlow默认是True
Returns:
list, normalized feature_list
"""
# Compute feature statistics.
statistics = collections.defaultdict(list)
axes = [1, 2, 3] if moments_across_channels else [2, 3] # [b, c, h, w]
for feature_image in feature_list:
mean = torch.mean(feature_image, dim=axes, keepdim=True) # [b,1,1,1] or [b,c,1,1]
variance = torch.var(feature_image, dim=axes, keepdim=True) # [b,1,1,1] or [b,c,1,1]
statistics['mean'].append(mean)
statistics['var'].append(variance)
if moments_across_images:
# statistics['mean'] = ([tf.reduce_mean(input_tensor=statistics['mean'])] *
# len(feature_list))
# statistics['var'] = [tf.reduce_mean(input_tensor=statistics['var'])
# ] * len(feature_list)
statistics['mean'] = ([torch.mean(torch.stack(statistics['mean'], dim=0), dim=(0,))] * len(feature_list))
statistics['var'] = ([torch.var(torch.stack(statistics['var'], dim=0), dim=(0,))] * len(feature_list))
statistics['std'] = [torch.sqrt(v + 1e-16) for v in statistics['var']]
# Center and normalize features.
if center:
feature_list = [
f - mean for f, mean in zip(feature_list, statistics['mean'])
]
if normalize:
feature_list = [f / std for f, std in zip(feature_list, statistics['std'])]
return feature_list
@classmethod
def weighted_ssim(cls, x, y, weight, c1=float('inf'), c2=9e-6, weight_epsilon=0.01):
"""Computes a weighted structured image similarity measure.
Args:
x: a batch of images, of shape [B, C, H, W].
y: a batch of images, of shape [B, C, H, W].
weight: shape [B, 1, H, W], representing the weight of each
pixel in both images when we come to calculate moments (means and
correlations). values are in [0,1]
c1: A floating point number, regularizes division by zero of the means.
c2: A floating point number, regularizes division by zero of the second
moments.
weight_epsilon: A floating point number, used to regularize division by the
weight.
Returns:
A tuple of two pytorch Tensors. First, of shape [B, C, H-2, W-2], is scalar
similarity loss per pixel per channel, and the second, of shape
[B, 1, H-2. W-2], is the average pooled `weight`. It is needed so that we
know how much to weigh each pixel in the first tensor. For example, if
`'weight` was very small in some area of the images, the first tensor will
still assign a loss to these pixels, but we shouldn't take the result too
seriously.
"""
def _avg_pool3x3(x):
# tf kernel [b,h,w,c]
return F.avg_pool2d(x, (3, 3), (1, 1))
# return tf.nn.avg_pool(x, [1, 3, 3, 1], [1, 1, 1, 1], 'VALID')
if c1 == float('inf') and c2 == float('inf'):
raise ValueError('Both c1 and c2 are infinite, SSIM loss is zero. This is '
'likely unintended.')
average_pooled_weight = _avg_pool3x3(weight)
weight_plus_epsilon = weight + weight_epsilon
inverse_average_pooled_weight = 1.0 / (average_pooled_weight + weight_epsilon)
def weighted_avg_pool3x3(z):
wighted_avg = _avg_pool3x3(z * weight_plus_epsilon)
return wighted_avg * inverse_average_pooled_weight
mu_x = weighted_avg_pool3x3(x)
mu_y = weighted_avg_pool3x3(y)
sigma_x = weighted_avg_pool3x3(x ** 2) - mu_x ** 2
sigma_y = weighted_avg_pool3x3(y ** 2) - mu_y ** 2
sigma_xy = weighted_avg_pool3x3(x * y) - mu_x * mu_y
if c1 == float('inf'):
ssim_n = (2 * sigma_xy + c2)
ssim_d = (sigma_x + sigma_y + c2)
elif c2 == float('inf'):
ssim_n = 2 * mu_x * mu_y + c1
ssim_d = mu_x ** 2 + mu_y ** 2 + c1
else:
ssim_n = (2 * mu_x * mu_y + c1) * (2 * sigma_xy + c2)
ssim_d = (mu_x ** 2 + mu_y ** 2 + c1) * (sigma_x + sigma_y + c2)
result = ssim_n / ssim_d
return torch.clamp((1 - result) / 2, 0, 1), average_pooled_weight
@classmethod
def edge_aware_smoothness_order1(cls, img, pred):
def gradient_x(img):
gx = img[:, :, :-1, :] - img[:, :, 1:, :]
return gx
def gradient_y(img):
gy = img[:, :, :, :-1] - img[:, :, :, 1:]
return gy
pred_gradients_x = gradient_x(pred)
pred_gradients_y = gradient_y(pred)
image_gradients_x = gradient_x(img)
image_gradients_y = gradient_y(img)
weights_x = torch.exp(-torch.mean(torch.abs(image_gradients_x), 1, keepdim=True))
weights_y = torch.exp(-torch.mean(torch.abs(image_gradients_y), 1, keepdim=True))
smoothness_x = torch.abs(pred_gradients_x) * weights_x
smoothness_y = torch.abs(pred_gradients_y) * weights_y
return torch.mean(smoothness_x) + torch.mean(smoothness_y)
@classmethod
def edge_aware_smoothness_order2(cls, img, pred):
def gradient_x(img, stride=1):
gx = img[:, :, :-stride, :] - img[:, :, stride:, :]
return gx
def gradient_y(img, stride=1):
gy = img[:, :, :, :-stride] - img[:, :, :, stride:]
return gy
pred_gradients_x = gradient_x(pred)
pred_gradients_xx = gradient_x(pred_gradients_x)
pred_gradients_y = gradient_y(pred)
pred_gradients_yy = gradient_y(pred_gradients_y)
image_gradients_x = gradient_x(img, stride=2)
image_gradients_y = gradient_y(img, stride=2)
weights_x = torch.exp(-torch.mean(torch.abs(image_gradients_x), 1, keepdim=True))
weights_y = torch.exp(-torch.mean(torch.abs(image_gradients_y), 1, keepdim=True))
smoothness_x = torch.abs(pred_gradients_xx) * weights_x
smoothness_y = torch.abs(pred_gradients_yy) * weights_y
return torch.mean(smoothness_x) + torch.mean(smoothness_y)
@classmethod
def flow_smooth_delta(cls, flow, if_second_order=False):
def gradient(x):
D_dy = x[:, :, 1:] - x[:, :, :-1]
D_dx = x[:, :, :, 1:] - x[:, :, :, :-1]
return D_dx, D_dy
dx, dy = gradient(flow)
# dx2, dxdy = gradient(dx)
# dydx, dy2 = gradient(dy)
if if_second_order:
dx2, dxdy = gradient(dx)
dydx, dy2 = gradient(dy)
smooth_loss = dx.abs().mean() + dy.abs().mean() + dx2.abs().mean() + dxdy.abs().mean() + dydx.abs().mean() + dy2.abs().mean()
else:
smooth_loss = dx.abs().mean() + dy.abs().mean()
# smooth_loss = dx.abs().mean() + dy.abs().mean() # + dx2.abs().mean() + dxdy.abs().mean() + dydx.abs().mean() + dy2.abs().mean()
# 暂时不上二阶的平滑损失,似乎加上以后就太猛了,无法降低photo loss TODO
return smooth_loss
@classmethod
def photo_loss_multi_type(cls, x, y, occ_mask, photo_loss_type='abs_robust', # abs_robust, charbonnier,L1, SSIM
photo_loss_delta=0.4, photo_loss_use_occ=False,
):
occ_weight = occ_mask
if photo_loss_type == 'abs_robust':
photo_diff = x - y
loss_diff = (torch.abs(photo_diff) + 0.01).pow(photo_loss_delta)
elif photo_loss_type == 'charbonnier':
photo_diff = x - y
loss_diff = ((photo_diff) ** 2 + 1e-6).pow(photo_loss_delta)
elif photo_loss_type == 'L1':
photo_diff = x - y
loss_diff = torch.abs(photo_diff + 1e-6)
elif photo_loss_type == 'SSIM':
loss_diff, occ_weight = cls.weighted_ssim(x, y, occ_mask)
else:
raise ValueError('wrong photo_loss type: %s' % photo_loss_type)
if photo_loss_use_occ:
photo_loss = torch.sum(loss_diff * occ_weight) / (torch.sum(occ_weight) + 1e-6)
else:
photo_loss = torch.mean(loss_diff)
return photo_loss
@classmethod
def compute_inpaint_photo_loss_mask(cls, img_raw, img_restore, mask, q=0.4, if_l1=False):
# img = upsample2d_as(img_raw, app_flow)
# input_im = img * mask
# img_restore = tools.torch_warp(input_im, app_flow)
diff = img_raw - img_restore
loss_mask = 1 - mask
# print(' ')
if if_l1:
diff = torch.abs(diff).mean()
diff = diff * loss_mask
loss_mean = diff.mean() / (loss_mask.mean() * 2 + 1e-6)
else:
# loss_mean=cls.photo_loss_function(diff=diff,mask=mask,charbonnier_or_abs_robust=False,if_use_occ=True,q=q)
# loss_mean = (torch.abs(diff * mask) + 0.01).pow(q).mean() / (mask.sum() * 2 + 1e-6)
diff = (torch.abs(diff) + 0.01).pow(q)
diff = diff * loss_mask
diff_sum = torch.sum(diff)
loss_mean = diff_sum / (torch.sum(loss_mask) * 2 + 1e-6)
return loss_mean
@classmethod
def compute_inpaint_photo_loss_mask_multi_type(cls, img_raw, img_restore, mask, photo_loss_type='abs_robust', # abs_robust, charbonnier,L1, SSIM
q=0.4):
# img = upsample2d_as(img_raw, app_flow)
# input_im = img * mask
# img_restore = tools.torch_warp(input_im, app_flow)
# diff = img_raw - img_restore
loss_mask = 1 - mask
# print(' ')
# if if_l1:
# diff = torch.abs(diff).mean()
# diff = diff * loss_mask
# loss_mean = diff.mean() / (loss_mask.mean() * 2 + 1e-6)
# else:
# # loss_mean=cls.photo_loss_function(diff=diff,mask=mask,charbonnier_or_abs_robust=False,if_use_occ=True,q=q)
# # loss_mean = (torch.abs(diff * mask) + 0.01).pow(q).mean() / (mask.sum() * 2 + 1e-6)
# diff = (torch.abs(diff) + 0.01).pow(q)
# diff = diff * loss_mask
# diff_sum = torch.sum(diff)
# loss_mean = diff_sum / (torch.sum(loss_mask) * 2 + 1e-6)
occ_weight = loss_mask
if photo_loss_type == 'abs_robust':
photo_diff = img_raw - img_restore
loss_diff = (torch.abs(photo_diff) + 0.01).pow(q)
elif photo_loss_type == 'charbonnier':
photo_diff = img_raw - img_restore
loss_diff = ((photo_diff) ** 2 + 1e-6).pow(q)
elif photo_loss_type == 'L1':
photo_diff = img_raw - img_restore
loss_diff = torch.abs(photo_diff + 1e-6)
elif photo_loss_type == 'SSIM':
loss_diff, occ_weight = cls.weighted_ssim(img_raw, img_restore, loss_mask)
else:
raise ValueError('wrong photo_loss type: %s' % photo_loss_type)
diff = loss_diff * occ_weight
diff_sum = torch.sum(diff)
loss_mean = diff_sum / (torch.sum(occ_weight) * 2 + 1e-6)
return loss_mean
# 这一版改掉了很多原有的细节,因此收敛稍微慢一些了
class PWCNet_unsup_irr_bi_v2(tools.abstract_model):
def __init__(self, occ_type='for_back_check', occ_alpha_1=0.1, occ_alpha_2=0.5, occ_check_sum_abs_or_squar=True, occ_check_obj_out_all='obj',
photo_loss_use_occ=False, photo_loss_delta=0.4,
flow_resize_conf='up_flow', multi_scale_weight=(1, 1, 1, 1)):
super(PWCNet_unsup_irr_bi_v2, | |
import yaml
from collections import namedtuple
from github import Github
from psycopg2 import connect, Error
from tqdm import tqdm
from .generator import ExtractorGenerator
from .models.model_manager import ModelManager
from .scanners.git_scanner import GitScanner
Rule = namedtuple('Rule', 'id regex category description')
Repo = namedtuple('Repo', 'url last_commit')
Discovery = namedtuple('Discovery',
'id file_name commit_id snippet repo_url rule_id state \
timestamp')
class Client:
def __init__(self, dbname, dbuser, dbpassword,
dbhost='localhost', dbport=5432):
""" Create a connection to the database.
The Client is the object in charge of all the operations on the
database, and in charge of launching the scans.
Parameters
----------
dbname: str
The name of the database
dbuser: str
The user of the database
dbpassword: str
The password for the user
dbhost: str, default `localhost`
The host of the database
dbport: int, default `5432`
The port for the database connection
Raises
------
OperationalError
If the Client cannot connect to the database
"""
self.db = connect(host=dbhost,
dbname=dbname,
user=dbuser,
password=<PASSWORD>,
port=dbport)
def add_discovery(self, file_name, commit_id, snippet, repo_url, rule_id,
state='new'):
""" Add a new discovery.
Parameters
----------
file_name: str
The name of the file that produced the discovery
commit_id: str
The id of the commit introducing the discovery
snippet: str
The line matched during the scan
repo_url: str
The url of the repository
rule_id: str
The id of the rule used during the scan
state: str, default `new`
The state of the discovery
Returns
-------
int
The id of the new discovery (-1 in case of error)
"""
query = 'INSERT INTO discoveries (file_name, commit_id, snippet, \
repo_url, rule_id, state) VALUES (%s, %s, %s, %s, %s, %s) \
RETURNING id'
cursor = self.db.cursor()
try:
cursor.execute(query, (file_name, commit_id, snippet, repo_url,
rule_id, state))
self.db.commit()
return int(cursor.fetchone()[0])
except (TypeError, IndexError):
""" A TypeError is raised if any of the required arguments is
missing. """
self.db.rollback()
return -1
except Error:
self.db.rollback()
return -1
def add_repo(self, repo_url):
""" Add a new repository.
Do not set the latest commit (it will be set when the repository is
scanned).
Parameters
----------
repo_url: str
The url of the repository
Returns
-------
bool
`True` if the insert was successfull, `False` otherwise
"""
query = 'INSERT INTO repos (url) VALUES (%s) RETURNING true'
cursor = self.db.cursor()
try:
cursor.execute(query, (repo_url,))
self.db.commit()
return bool(cursor.fetchone()[0])
except (TypeError, IndexError):
""" A TypeError is raised if any of the required arguments is
missing. """
self.db.rollback()
return False
except Error:
self.db.rollback()
return False
def add_rule(self, regex, category, description=''):
""" Add a new rule.
Parameters
----------
regex: str
The regex to be matched
category: str
The category of the rule
description: str, optional
The description of the rule
Returns
-------
int
The id of the new rule (-1 in case of errors)
"""
query = 'INSERT INTO rules (regex, category, description) VALUES (%s, \
%s, %s) RETURNING id'
cursor = self.db.cursor()
try:
cursor.execute(query, (regex, category, description))
self.db.commit()
return int(cursor.fetchone()[0])
except (TypeError, IndexError):
""" A TypeError is raised if any of the required arguments is
missing. """
self.db.rollback()
return -1
except Error:
self.db.rollback()
return -1
def add_rules_from_files(self, filename):
""" Add rules from a file.
Parameters
----------
filename: str
The file containing the rules
Raises
------
FileNotFoundError
If the file does not exist
ParserError
If the file is malformed
KeyError
If one of the required attributes in the file (i.e., rules, regex,
and category) is missing
"""
with open(filename, 'r') as f:
data = yaml.safe_load(f)
for rule in data['rules']:
self.add_rule(rule['regex'],
rule['category'],
rule.get('description', ''))
def delete_repo(self, repo_url):
""" Delete a repository.
Parameters
----------
repo_id: int
The id of the repo to delete
Returns
-------
bool
`True` if the repo was successfully deleted, `False` otherwise
"""
query = 'DELETE FROM repos WHERE url=%s RETURNING true'
cursor = self.db.cursor()
try:
cursor.execute(query, (repo_url,))
self.db.commit()
return bool(cursor.fetchone()[0])
except (TypeError, IndexError):
""" A TypeError is raised if any of the required arguments is
missing. """
self.db.rollback()
return False
except Error:
self.db.rollback()
return False
def get_repos(self):
""" Get all the repositories.
Returns
-------
list
A list of repositories (dictionaries).
An empty list if there are no repos (or in case of errors)
"""
query = 'SELECT * FROM repos'
cursor = self.db.cursor()
try:
all_repos = []
cursor.execute(query)
result = cursor.fetchone()
while result:
all_repos.append(dict(Repo(*result)._asdict()))
result = cursor.fetchone()
return all_repos
except (TypeError, IndexError):
""" A TypeError is raised if any of the required arguments is
missing. """
self.db.rollback()
return []
except Error:
self.db.rollback()
return []
def get_repo(self, repo_url):
""" Get a repository.
Parameters
----------
repo_url: str
The url of the repo
Returns
-------
dict
A repository (an empty dictionary if the url does not exist)
"""
query = 'SELECT * FROM repos WHERE url=%s'
cursor = self.db.cursor()
try:
cursor.execute(query, (repo_url,))
result = cursor.fetchone()
if result:
return dict(Repo(*result)._asdict())
else:
return {}
except (TypeError, IndexError):
""" A TypeError is raised if any of the required arguments is
missing. """
self.db.rollback()
return {}
except Error:
self.db.rollback()
return {}
def get_rules(self, category=None):
""" Get the rules.
Differently from other get methods, here we pass the category as
argument. This is due to the fact that categories may have a slash
(e.g., `auth/password`). Encoding such categories in the url would
cause an error on the server side.
Parameters
----------
category: str, optional
If specified get all the rules, otherwise get all the rules of this
category
Returns
-------
list
A list of rules (dictionaries)
"""
query = 'SELECT * FROM rules'
if category:
query = 'SELECT * FROM rules WHERE category=%s'
cursor = self.db.cursor()
try:
all_rules = []
cursor.execute(query, (category,))
result = cursor.fetchone()
while result:
all_rules.append(dict(Rule(*result)._asdict()))
result = cursor.fetchone()
return all_rules
except (TypeError, IndexError):
""" A TypeError is raised if any of the required arguments is
missing. """
self.db.rollback()
return []
except Error:
self.db.rollback()
return []
def get_rule(self, rule_id):
""" Get a rule.
Parameters
----------
rule_id: int
The id of the rule
Returns
-------
dict
A rule
"""
query = 'SELECT * FROM rules WHERE id=%s'
cursor = self.db.cursor()
try:
cursor.execute(query, (rule_id,))
return dict(Rule(*cursor.fetchone())._asdict())
except (TypeError, IndexError):
""" A TypeError is raised if any of the required arguments is
missing. """
self.db.rollback()
return ()
except Error:
self.db.rollback()
return ()
def get_discoveries(self, repo_url):
""" Get all the discoveries of a repository.
Parameters
----------
repo_url: str
The url of the repository
Returns
-------
list
A list of discoveries (dictionaries)
"""
query = 'SELECT * FROM discoveries WHERE repo_url=%s'
cursor = self.db.cursor()
try:
all_discoveries = []
cursor.execute(query, (repo_url,))
result = cursor.fetchone()
while result:
all_discoveries.append(dict(Discovery(*result)._asdict()))
result = cursor.fetchone()
return all_discoveries
except (TypeError, IndexError):
""" A TypeError is raised if any of the required arguments is
missing. """
self.db.rollback()
return []
except Error:
self.db.rollback()
return []
def get_discovery(self, discovery_id):
""" Get a discovery.
Parameters
----------
discovery_id: int
The id of the discovery
Returns
-------
dict
A discovery
"""
query = 'SELECT * FROM discoveries WHERE id=%s'
cursor = self.db.cursor()
try:
cursor.execute(query, (discovery_id,))
return dict(Discovery(*cursor.fetchone())._asdict())
except (TypeError, IndexError):
""" A TypeError is raised if any of the required arguments is
missing. """
self.db.rollback()
return {}
except Error:
self.db.rollback()
return {}
def get_discovery_group(self, repo_url, state=None):
""" Get all the discoveries of a repository, grouped by file_name,
snippet, and state.
Parameters
----------
repo_url: str
The url of the repository
state: str, optional
The state of the discoveries. If not set, get all the discoveries
independently from their state
Returns
-------
list
A list of tuples. Each tuple is composed by file_name, snippet,
number of times that this couple occurs, and the state of the
couple.
"""
query = 'SELECT file_name, snippet, count(id), state FROM discoveries \
WHERE repo_url=%s GROUP BY file_name, snippet, state'
if state:
query = 'SELECT file_name, snippet, count(id), state FROM \
discoveries WHERE repo_url=%s AND state=%s GROUP BY file_name,\
snippet, state'
cursor = self.db.cursor()
try:
if state:
cursor.execute(query, (repo_url, state))
else:
cursor.execute(query, (repo_url,))
return cursor.fetchall()
except (TypeError, IndexError):
""" A TypeError is raised if any of the required arguments is
missing. """
self.db.rollback()
return []
except Error:
self.db.rollback()
return []
def update_repo(self, url, last_commit):
""" Update the last | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['InstanceArgs', 'Instance']
@pulumi.input_type
class InstanceArgs:
def __init__(__self__, *,
layer_ids: pulumi.Input[Sequence[pulumi.Input[str]]],
stack_id: pulumi.Input[str],
agent_version: Optional[pulumi.Input[str]] = None,
ami_id: Optional[pulumi.Input[str]] = None,
architecture: Optional[pulumi.Input[str]] = None,
auto_scaling_type: Optional[pulumi.Input[str]] = None,
availability_zone: Optional[pulumi.Input[str]] = None,
created_at: Optional[pulumi.Input[str]] = None,
delete_ebs: Optional[pulumi.Input[bool]] = None,
delete_eip: Optional[pulumi.Input[bool]] = None,
ebs_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceEbsBlockDeviceArgs']]]] = None,
ebs_optimized: Optional[pulumi.Input[bool]] = None,
ecs_cluster_arn: Optional[pulumi.Input[str]] = None,
elastic_ip: Optional[pulumi.Input[str]] = None,
ephemeral_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceEphemeralBlockDeviceArgs']]]] = None,
hostname: Optional[pulumi.Input[str]] = None,
infrastructure_class: Optional[pulumi.Input[str]] = None,
install_updates_on_boot: Optional[pulumi.Input[bool]] = None,
instance_profile_arn: Optional[pulumi.Input[str]] = None,
instance_type: Optional[pulumi.Input[str]] = None,
last_service_error_id: Optional[pulumi.Input[str]] = None,
os: Optional[pulumi.Input[str]] = None,
platform: Optional[pulumi.Input[str]] = None,
private_dns: Optional[pulumi.Input[str]] = None,
private_ip: Optional[pulumi.Input[str]] = None,
public_dns: Optional[pulumi.Input[str]] = None,
public_ip: Optional[pulumi.Input[str]] = None,
registered_by: Optional[pulumi.Input[str]] = None,
reported_agent_version: Optional[pulumi.Input[str]] = None,
reported_os_family: Optional[pulumi.Input[str]] = None,
reported_os_name: Optional[pulumi.Input[str]] = None,
reported_os_version: Optional[pulumi.Input[str]] = None,
root_block_devices: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceRootBlockDeviceArgs']]]] = None,
root_device_type: Optional[pulumi.Input[str]] = None,
root_device_volume_id: Optional[pulumi.Input[str]] = None,
security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ssh_host_dsa_key_fingerprint: Optional[pulumi.Input[str]] = None,
ssh_host_rsa_key_fingerprint: Optional[pulumi.Input[str]] = None,
ssh_key_name: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
tenancy: Optional[pulumi.Input[str]] = None,
virtualization_type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Instance resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] layer_ids: The ids of the layers the instance will belong to.
:param pulumi.Input[str] stack_id: The id of the stack the instance will belong to.
:param pulumi.Input[str] agent_version: The AWS OpsWorks agent to install. Defaults to `"INHERIT"`.
:param pulumi.Input[str] ami_id: The AMI to use for the instance. If an AMI is specified, `os` must be `"Custom"`.
:param pulumi.Input[str] architecture: Machine architecture for created instances. Can be either `"x86_64"` (the default) or `"i386"`
:param pulumi.Input[str] auto_scaling_type: Creates load-based or time-based instances. If set, can be either: `"load"` or `"timer"`.
:param pulumi.Input[str] availability_zone: Name of the availability zone where instances will be created
by default.
:param pulumi.Input[Sequence[pulumi.Input['InstanceEbsBlockDeviceArgs']]] ebs_block_devices: Additional EBS block devices to attach to the
instance. See Block Devices below for details.
:param pulumi.Input[bool] ebs_optimized: If true, the launched EC2 instance will be EBS-optimized.
:param pulumi.Input[Sequence[pulumi.Input['InstanceEphemeralBlockDeviceArgs']]] ephemeral_block_devices: Customize Ephemeral (also known as
"Instance Store") volumes on the instance. See Block Devices below for details.
:param pulumi.Input[str] hostname: The instance's host name.
:param pulumi.Input[bool] install_updates_on_boot: Controls where to install OS and package updates when the instance boots. Defaults to `true`.
:param pulumi.Input[str] instance_type: The type of instance to start
:param pulumi.Input[str] os: Name of operating system that will be installed.
:param pulumi.Input[str] private_dns: The private DNS name assigned to the instance. Can only be
used inside the Amazon EC2, and only available if you've enabled DNS hostnames
for your VPC
:param pulumi.Input[str] private_ip: The private IP address assigned to the instance
:param pulumi.Input[str] public_dns: The public DNS name assigned to the instance. For EC2-VPC, this
is only available if you've enabled DNS hostnames for your VPC
:param pulumi.Input[str] public_ip: The public IP address assigned to the instance, if applicable.
:param pulumi.Input[Sequence[pulumi.Input['InstanceRootBlockDeviceArgs']]] root_block_devices: Customize details about the root block
device of the instance. See Block Devices below for details.
:param pulumi.Input[str] root_device_type: Name of the type of root device instances will have by default. Can be either `"ebs"` or `"instance-store"`
:param pulumi.Input[Sequence[pulumi.Input[str]]] security_group_ids: The associated security groups.
:param pulumi.Input[str] ssh_key_name: Name of the SSH keypair that instances will have by default.
:param pulumi.Input[str] state: The desired state of the instance. Can be either `"running"` or `"stopped"`.
:param pulumi.Input[str] subnet_id: Subnet ID to attach to
:param pulumi.Input[str] tenancy: Instance tenancy to use. Can be one of `"default"`, `"dedicated"` or `"host"`
:param pulumi.Input[str] virtualization_type: Keyword to choose what virtualization mode created instances
will use. Can be either `"paravirtual"` or `"hvm"`.
"""
pulumi.set(__self__, "layer_ids", layer_ids)
pulumi.set(__self__, "stack_id", stack_id)
if agent_version is not None:
pulumi.set(__self__, "agent_version", agent_version)
if ami_id is not None:
pulumi.set(__self__, "ami_id", ami_id)
if architecture is not None:
pulumi.set(__self__, "architecture", architecture)
if auto_scaling_type is not None:
pulumi.set(__self__, "auto_scaling_type", auto_scaling_type)
if availability_zone is not None:
pulumi.set(__self__, "availability_zone", availability_zone)
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if delete_ebs is not None:
pulumi.set(__self__, "delete_ebs", delete_ebs)
if delete_eip is not None:
pulumi.set(__self__, "delete_eip", delete_eip)
if ebs_block_devices is not None:
pulumi.set(__self__, "ebs_block_devices", ebs_block_devices)
if ebs_optimized is not None:
pulumi.set(__self__, "ebs_optimized", ebs_optimized)
if ecs_cluster_arn is not None:
pulumi.set(__self__, "ecs_cluster_arn", ecs_cluster_arn)
if elastic_ip is not None:
pulumi.set(__self__, "elastic_ip", elastic_ip)
if ephemeral_block_devices is not None:
pulumi.set(__self__, "ephemeral_block_devices", ephemeral_block_devices)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if infrastructure_class is not None:
pulumi.set(__self__, "infrastructure_class", infrastructure_class)
if install_updates_on_boot is not None:
pulumi.set(__self__, "install_updates_on_boot", install_updates_on_boot)
if instance_profile_arn is not None:
pulumi.set(__self__, "instance_profile_arn", instance_profile_arn)
if instance_type is not None:
pulumi.set(__self__, "instance_type", instance_type)
if last_service_error_id is not None:
pulumi.set(__self__, "last_service_error_id", last_service_error_id)
if os is not None:
pulumi.set(__self__, "os", os)
if platform is not None:
pulumi.set(__self__, "platform", platform)
if private_dns is not None:
pulumi.set(__self__, "private_dns", private_dns)
if private_ip is not None:
pulumi.set(__self__, "private_ip", private_ip)
if public_dns is not None:
pulumi.set(__self__, "public_dns", public_dns)
if public_ip is not None:
pulumi.set(__self__, "public_ip", public_ip)
if registered_by is not None:
pulumi.set(__self__, "registered_by", registered_by)
if reported_agent_version is not None:
pulumi.set(__self__, "reported_agent_version", reported_agent_version)
if reported_os_family is not None:
pulumi.set(__self__, "reported_os_family", reported_os_family)
if reported_os_name is not None:
pulumi.set(__self__, "reported_os_name", reported_os_name)
if reported_os_version is not None:
pulumi.set(__self__, "reported_os_version", reported_os_version)
if root_block_devices is not None:
pulumi.set(__self__, "root_block_devices", root_block_devices)
if root_device_type is not None:
pulumi.set(__self__, "root_device_type", root_device_type)
if root_device_volume_id is not None:
pulumi.set(__self__, "root_device_volume_id", root_device_volume_id)
if security_group_ids is not None:
pulumi.set(__self__, "security_group_ids", security_group_ids)
if ssh_host_dsa_key_fingerprint is not None:
pulumi.set(__self__, "ssh_host_dsa_key_fingerprint", ssh_host_dsa_key_fingerprint)
if ssh_host_rsa_key_fingerprint is not None:
pulumi.set(__self__, "ssh_host_rsa_key_fingerprint", ssh_host_rsa_key_fingerprint)
if ssh_key_name is not None:
pulumi.set(__self__, "ssh_key_name", ssh_key_name)
if state is not None:
pulumi.set(__self__, "state", state)
if status is not None:
pulumi.set(__self__, "status", status)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if tenancy is not None:
pulumi.set(__self__, "tenancy", tenancy)
if virtualization_type is not None:
pulumi.set(__self__, "virtualization_type", virtualization_type)
@property
@pulumi.getter(name="layerIds")
def layer_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The ids of the layers the instance will belong to.
"""
return pulumi.get(self, "layer_ids")
@layer_ids.setter
def layer_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "layer_ids", value)
@property
@pulumi.getter(name="stackId")
def stack_id(self) -> pulumi.Input[str]:
"""
The id of the stack the instance will belong to.
"""
return pulumi.get(self, "stack_id")
@stack_id.setter
def stack_id(self, value: pulumi.Input[str]):
pulumi.set(self, "stack_id", value)
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[pulumi.Input[str]]:
"""
The AWS OpsWorks agent to install. Defaults to `"INHERIT"`.
"""
return pulumi.get(self, "agent_version")
@agent_version.setter
def agent_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "agent_version", value)
@property
@pulumi.getter(name="amiId")
def ami_id(self) -> Optional[pulumi.Input[str]]:
"""
The AMI to use for the instance. If an AMI is specified, `os` must be `"Custom"`.
"""
return pulumi.get(self, "ami_id")
@ami_id.setter
def ami_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ami_id", value)
@property
@pulumi.getter
def architecture(self) -> Optional[pulumi.Input[str]]:
"""
Machine architecture for created instances. Can be either `"x86_64"` (the default) or `"i386"`
"""
return pulumi.get(self, "architecture")
@architecture.setter
def architecture(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "architecture", value)
@property
@pulumi.getter(name="autoScalingType")
def auto_scaling_type(self) -> Optional[pulumi.Input[str]]:
"""
Creates load-based or time-based instances. If set, can be either: `"load"` or `"timer"`.
"""
return pulumi.get(self, "auto_scaling_type")
@auto_scaling_type.setter
def auto_scaling_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auto_scaling_type", value)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> Optional[pulumi.Input[str]]:
"""
Name of the availability zone where instances will be created
by default.
"""
return pulumi.get(self, "availability_zone")
@availability_zone.setter
def availability_zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "availability_zone", value)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "created_at")
@created_at.setter
def created_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_at", value)
@property
@pulumi.getter(name="deleteEbs")
def delete_ebs(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "delete_ebs")
@delete_ebs.setter
def delete_ebs(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "delete_ebs", value)
@property
@pulumi.getter(name="deleteEip")
def delete_eip(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "delete_eip")
@delete_eip.setter
def delete_eip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "delete_eip", value)
@property
@pulumi.getter(name="ebsBlockDevices")
def ebs_block_devices(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['InstanceEbsBlockDeviceArgs']]]]:
"""
Additional EBS block devices to attach to the
instance. See Block Devices below for details.
"""
return pulumi.get(self, "ebs_block_devices")
@ebs_block_devices.setter
def ebs_block_devices(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceEbsBlockDeviceArgs']]]]):
pulumi.set(self, "ebs_block_devices", | |
= X[:, None]
_check_assertions(X, binary_operators, unary_operators,
use_custom_variable_names, variable_names, weights, y)
if len(X) > 10000 and not batching:
warnings.warn("Note: you are running with more than 10,000 datapoints. You should consider turning on batching (https://pysr.readthedocs.io/en/latest/docs/options/#batching). You should also reconsider if you need that many datapoints. Unless you have a large amount of noise (in which case you should smooth your dataset first), generally < 10,000 datapoints is enough to find a functional form with symbolic regression. More datapoints will lower the search speed.")
X, variable_names = _handle_feature_selection(
X, select_k_features,
use_custom_variable_names, variable_names, y
)
if maxdepth is None:
maxdepth = maxsize
if populations is None:
populations = procs
if isinstance(binary_operators, str):
binary_operators = [binary_operators]
if isinstance(unary_operators, str):
unary_operators = [unary_operators]
if X is None:
X, y = _using_test_input(X, test, y)
kwargs = dict(X=X, y=y, weights=weights,
alpha=alpha, annealing=annealing, batchSize=batchSize,
batching=batching, binary_operators=binary_operators,
fast_cycle=fast_cycle,
fractionReplaced=fractionReplaced,
ncyclesperiteration=ncyclesperiteration,
niterations=niterations, npop=npop, topn=topn,
verbosity=verbosity, progress=progress, update=update,
julia_optimization=julia_optimization, timeout=timeout,
fractionReplacedHof=fractionReplacedHof,
hofMigration=hofMigration, maxdepth=maxdepth,
maxsize=maxsize, migration=migration, nrestarts=nrestarts,
parsimony=parsimony, perturbationFactor=perturbationFactor,
populations=populations, procs=procs,
shouldOptimizeConstants=shouldOptimizeConstants,
unary_operators=unary_operators, useFrequency=useFrequency,
use_custom_variable_names=use_custom_variable_names,
variable_names=variable_names, warmupMaxsizeBy=warmupMaxsizeBy,
weightAddNode=weightAddNode,
weightDeleteNode=weightDeleteNode,
weightDoNothing=weightDoNothing,
weightInsertNode=weightInsertNode,
weightMutateConstant=weightMutateConstant,
weightMutateOperator=weightMutateOperator,
weightRandomize=weightRandomize,
weightSimplify=weightSimplify,
constraints=constraints,
extra_sympy_mappings=extra_sympy_mappings,
julia_project=julia_project, loss=loss,
output_jax_format=output_jax_format)
kwargs = {**_set_paths(tempdir), **kwargs}
if temp_equation_file:
equation_file = kwargs['tmpdir'] / f'hall_of_fame.csv'
elif equation_file is None:
date_time = datetime.now().strftime("%Y-%m-%d_%H%M%S.%f")[:-3]
equation_file = 'hall_of_fame_' + date_time + '.csv'
kwargs = {**dict(equation_file=equation_file), **kwargs}
pkg_directory = kwargs['pkg_directory']
kwargs['need_install'] = False
if not (pkg_directory / 'Manifest.toml').is_file():
kwargs['need_install'] = (not user_input) or _yesno("I will install Julia packages using PySR's Project.toml file. OK?")
if kwargs['need_install']:
print("OK. I will install at launch.")
assert update
kwargs['def_hyperparams'] = _create_inline_operators(**kwargs)
_handle_constraints(**kwargs)
kwargs['constraints_str'] = _make_constraints_str(**kwargs)
kwargs['def_hyperparams'] = _make_hyperparams_julia_str(**kwargs)
kwargs['def_datasets'] = _make_datasets_julia_str(**kwargs)
_create_julia_files(**kwargs)
_final_pysr_process(**kwargs)
_set_globals(**kwargs)
equations = get_hof(**kwargs)
if delete_tempfiles:
shutil.rmtree(kwargs['tmpdir'])
return equations
def _set_globals(X, equation_file, extra_sympy_mappings, variable_names, **kwargs):
global global_n_features
global global_equation_file
global global_variable_names
global global_extra_sympy_mappings
global_n_features = X.shape[1]
global_equation_file = equation_file
global_variable_names = variable_names
global_extra_sympy_mappings = extra_sympy_mappings
def _final_pysr_process(julia_optimization, runfile_filename, timeout, **kwargs):
command = [
f'julia', f'-O{julia_optimization:d}',
str(runfile_filename),
]
if timeout is not None:
command = [f'timeout', f'{timeout}'] + command
_cmd_runner(command, **kwargs)
def _cmd_runner(command, **kwargs):
if kwargs['verbosity'] > 0:
print("Running on", ' '.join(command))
process = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=-1)
try:
while True:
line = process.stdout.readline()
if not line: break
decoded_line = (line.decode('utf-8')
.replace('\\033[K', '\033[K')
.replace('\\033[1A', '\033[1A')
.replace('\\033[1B', '\033[1B')
.replace('\\r', '\r'))
print(decoded_line, end='')
process.stdout.close()
process.wait()
except KeyboardInterrupt:
print("Killing process... will return when done.")
process.kill()
def _create_julia_files(dataset_filename, def_datasets, hyperparam_filename, def_hyperparams,
fractionReplaced, ncyclesperiteration, niterations, npop,
runfile_filename, topn, verbosity, julia_project, procs, weights,
X, variable_names, pkg_directory, need_install, update, **kwargs):
with open(hyperparam_filename, 'w') as f:
print(def_hyperparams, file=f)
with open(dataset_filename, 'w') as f:
print(def_datasets, file=f)
with open(runfile_filename, 'w') as f:
if julia_project is None:
julia_project = pkg_directory
else:
julia_project = Path(julia_project)
print(f'import Pkg', file=f)
print(f'Pkg.activate("{_escape_filename(julia_project)}")', file=f)
if need_install:
print(f'Pkg.instantiate()', file=f)
print(f'Pkg.update()', file=f)
print(f'Pkg.precompile()', file=f)
elif update:
print(f'Pkg.update()', file=f)
print(f'using SymbolicRegression', file=f)
print(f'include("{_escape_filename(hyperparam_filename)}")', file=f)
print(f'include("{_escape_filename(dataset_filename)}")', file=f)
if len(variable_names) == 0:
varMap = "[" + ",".join([f'"x{i}"' for i in range(X.shape[1])]) + "]"
else:
varMap = "[" + ",".join(['"' + vname + '"' for vname in variable_names]) + "]"
if weights is not None:
print(f'EquationSearch(X, y, weights=weights, niterations={niterations:d}, varMap={varMap}, options=options, numprocs={procs})', file=f)
else:
print(f'EquationSearch(X, y, niterations={niterations:d}, varMap={varMap}, options=options, numprocs={procs})', file=f)
def _make_datasets_julia_str(X, X_filename, weights, weights_filename, y, y_filename, **kwargs):
def_datasets = """using DelimitedFiles"""
np.savetxt(X_filename, X.astype(np.float32), delimiter=',')
np.savetxt(y_filename, y.reshape(-1, 1).astype(np.float32), delimiter=',')
if weights is not None:
np.savetxt(weights_filename, weights.reshape(-1, 1), delimiter=',')
def_datasets += f"""
X = copy(transpose(readdlm("{_escape_filename(X_filename)}", ',', Float32, '\\n')))
y = readdlm("{_escape_filename(y_filename)}", ',', Float32, '\\n')[:, 1]"""
if weights is not None:
def_datasets += f"""
weights = readdlm("{_escape_filename(weights_filename)}", ',', Float32, '\\n')[:, 1]"""
return def_datasets
def _make_hyperparams_julia_str(X, alpha, annealing, batchSize, batching, binary_operators, constraints_str,
def_hyperparams, equation_file, fast_cycle, fractionReplacedHof, hofMigration,
maxdepth, maxsize, migration, nrestarts, npop,
parsimony, perturbationFactor, populations, procs, shouldOptimizeConstants,
unary_operators, useFrequency, use_custom_variable_names,
variable_names, warmupMaxsizeBy, weightAddNode,
ncyclesperiteration, fractionReplaced, topn, verbosity, progress, loss,
weightDeleteNode, weightDoNothing, weightInsertNode, weightMutateConstant,
weightMutateOperator, weightRandomize, weightSimplify, weights, **kwargs):
try:
term_width = shutil.get_terminal_size().columns
except:
_, term_width = subprocess.check_output(['stty', 'size']).split()
def tuple_fix(ops):
if len(ops) > 1:
return ', '.join(ops)
elif len(ops) == 0:
return ''
else:
return ops[0] + ','
def_hyperparams += f"""\n
plus=(+)
sub=(-)
mult=(*)
square=SymbolicRegression.square
cube=SymbolicRegression.cube
pow=(^)
div=(/)
logm=SymbolicRegression.logm
logm2=SymbolicRegression.logm2
logm10=SymbolicRegression.logm10
sqrtm=SymbolicRegression.sqrtm
neg=SymbolicRegression.neg
greater=SymbolicRegression.greater
relu=SymbolicRegression.relu
logical_or=SymbolicRegression.logical_or
logical_and=SymbolicRegression.logical_and
_custom_loss = {loss}
options = SymbolicRegression.Options(binary_operators={'(' + tuple_fix(binary_operators) + ')'},
unary_operators={'(' + tuple_fix(unary_operators) + ')'},
{constraints_str}
parsimony={parsimony:f}f0,
loss=_custom_loss,
alpha={alpha:f}f0,
maxsize={maxsize:d},
maxdepth={maxdepth:d},
fast_cycle={'true' if fast_cycle else 'false'},
migration={'true' if migration else 'false'},
hofMigration={'true' if hofMigration else 'false'},
fractionReplacedHof={fractionReplacedHof}f0,
shouldOptimizeConstants={'true' if shouldOptimizeConstants else 'false'},
hofFile="{_escape_filename(equation_file)}",
npopulations={populations:d},
nrestarts={nrestarts:d},
perturbationFactor={perturbationFactor:f}f0,
annealing={"true" if annealing else "false"},
batching={"true" if batching else "false"},
batchSize={min([batchSize, len(X)]) if batching else len(X):d},
mutationWeights=[
{weightMutateConstant:f},
{weightMutateOperator:f},
{weightAddNode:f},
{weightInsertNode:f},
{weightDeleteNode:f},
{weightSimplify:f},
{weightRandomize:f},
{weightDoNothing:f}
],
warmupMaxsizeBy={warmupMaxsizeBy:f}f0,
useFrequency={"true" if useFrequency else "false"},
npop={npop:d},
ncyclesperiteration={ncyclesperiteration:d},
fractionReplaced={fractionReplaced:f}f0,
topn={topn:d},
verbosity=round(Int32, {verbosity:f}),
progress={'true' if progress else 'false'},
terminal_width={term_width:d}
"""
def_hyperparams += '\n)'
return def_hyperparams
def _make_constraints_str(binary_operators, constraints, unary_operators, **kwargs):
constraints_str = "una_constraints = ["
first = True
for op in unary_operators:
val = constraints[op]
if not first:
constraints_str += ", "
constraints_str += f"{val:d}"
first = False
constraints_str += """],
bin_constraints = ["""
first = True
for op in binary_operators:
tup = constraints[op]
if not first:
constraints_str += ", "
constraints_str += f"({tup[0]:d}, {tup[1]:d})"
first = False
constraints_str += "],"
return constraints_str
def _handle_constraints(binary_operators, constraints, unary_operators, **kwargs):
for op in unary_operators:
if op not in constraints:
constraints[op] = -1
for op in binary_operators:
if op not in constraints:
constraints[op] = (-1, -1)
if op in ['plus', 'sub']:
if constraints[op][0] != constraints[op][1]:
raise NotImplementedError(
"You need equal constraints on both sides for - and *, due to simplification strategies.")
elif op == 'mult':
# Make sure the complex expression is in the left side.
if constraints[op][0] == -1:
continue
elif constraints[op][1] == -1 or constraints[op][0] < constraints[op][1]:
constraints[op][0], constraints[op][1] = constraints[op][1], constraints[op][0]
def _create_inline_operators(binary_operators, unary_operators, **kwargs):
def_hyperparams = ""
for op_list in [binary_operators, unary_operators]:
for i in range(len(op_list)):
op = op_list[i]
is_user_defined_operator = '(' in op
if is_user_defined_operator:
def_hyperparams += op + "\n"
# Cut off from the first non-alphanumeric char:
first_non_char = [
j for j in range(len(op))
if not (op[j].isalpha() or op[j].isdigit())][0]
function_name = op[:first_non_char]
op_list[i] = function_name
return def_hyperparams
def _using_test_input(X, test, y):
if test == 'simple1':
eval_str = "np.sign(X[:, 2])*np.abs(X[:, 2])**2.5 + 5*np.cos(X[:, 3]) - 5"
elif test == 'simple2':
eval_str = "np.sign(X[:, 2])*np.abs(X[:, 2])**3.5 + 1/(np.abs(X[:, 0])+1)"
elif test == 'simple3':
eval_str = "np.exp(X[:, 0]/2) + 12.0 + np.log(np.abs(X[:, 0])*10 + 1)"
elif test == 'simple4':
eval_str = "1.0 + 3*X[:, 0]**2 - 0.5*X[:, 0]**3 + 0.1*X[:, 0]**4"
elif test == 'simple5':
eval_str = "(np.exp(X[:, 3]) + 3)/(np.abs(X[:, 1]) + np.cos(X[:, 0]) + 1.1)"
X = np.random.randn(100, 5) * 3
y = eval(eval_str)
print("Running on", eval_str)
return X, y
def _handle_feature_selection(X, select_k_features, use_custom_variable_names, variable_names, y):
if select_k_features is not None:
selection = run_feature_selection(X, y, select_k_features)
print(f"Using features {selection}")
X = X[:, selection]
if use_custom_variable_names:
variable_names = [variable_names[selection[i]] for i in range(len(selection))]
return X, variable_names
def _set_paths(tempdir):
# System-independent paths
pkg_directory = Path(__file__).parents[1]
default_project_file = pkg_directory / "Project.toml"
tmpdir = Path(tempfile.mkdtemp(dir=tempdir))
hyperparam_filename = tmpdir / f'hyperparams.jl'
dataset_filename = tmpdir / f'dataset.jl'
runfile_filename = tmpdir / f'runfile.jl'
X_filename = tmpdir / "X.csv"
y_filename = tmpdir / "y.csv"
weights_filename = tmpdir / "weights.csv"
return dict(pkg_directory=pkg_directory,
default_project_file=default_project_file,
X_filename=X_filename,
dataset_filename=dataset_filename,
hyperparam_filename=hyperparam_filename,
runfile_filename=runfile_filename, tmpdir=tmpdir,
weights_filename=weights_filename, y_filename=y_filename)
def _check_assertions(X, binary_operators, unary_operators, use_custom_variable_names, variable_names, weights, y):
# Check for potential errors before they happen
assert len(unary_operators) + len(binary_operators) > 0
assert len(X.shape) == 2
assert len(y.shape) == 1
assert X.shape[0] == y.shape[0]
if weights is not None:
assert len(weights.shape) == 1
assert X.shape[0] == weights.shape[0]
if use_custom_variable_names:
assert len(variable_names) == X.shape[1]
def run_feature_selection(X, y, select_k_features):
"""Use a gradient boosting tree regressor as a proxy for finding
the k most important features in X, returning indices for those
features as output."""
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.feature_selection import SelectFromModel, SelectKBest
clf = GradientBoostingRegressor(n_estimators=100, learning_rate=0.1, max_depth=1, random_state=0, loss='ls') #RandomForestRegressor()
clf.fit(X, y)
selector = SelectFromModel(clf, threshold=-np.inf,
max_features=select_k_features, prefit=True)
return selector.get_support(indices=True)
def get_hof(equation_file=None, n_features=None, variable_names=None,
extra_sympy_mappings=None, output_jax_format=False, **kwargs):
"""Get the equations from a hall of fame file. If no arguments
entered, the ones used previously from a call to PySR will be used."""
global global_n_features
global global_equation_file
global global_variable_names
global global_extra_sympy_mappings
if equation_file is None: equation_file = global_equation_file
if n_features is None: n_features = global_n_features
if variable_names is None: variable_names = global_variable_names
if extra_sympy_mappings is None: extra_sympy_mappings = global_extra_sympy_mappings
global_equation_file = equation_file
global_n_features = n_features
global_variable_names = variable_names
global_extra_sympy_mappings = extra_sympy_mappings
try:
output = pd.read_csv(str(equation_file) + '.bkup', sep="|")
except FileNotFoundError:
print("Couldn't find equation file!")
return | |
life easier! If you'd like to read more on how the compiler operates, check the docs [here](https://www.tensorflow.org/api_docs/python/tf/keras/Model#compile).
# In[69]:
happy_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# It's time to check your model's parameters with the `.summary()` method. This will display the types of layers you have, the shape of the outputs, and how many parameters are in each layer.
# In[70]:
happy_model.summary()
# <a name='3-2'></a>
# ### 3.2 - Train and Evaluate the Model
#
# After creating the model, compiling it with your choice of optimizer and loss function, and doing a sanity check on its contents, you are now ready to build!
#
# Simply call `.fit()` to train. That's it! No need for mini-batching, saving, or complex backpropagation computations. That's all been done for you, as you're using a TensorFlow dataset with the batches specified already. You do have the option to specify epoch number or minibatch size if you like (for example, in the case of an un-batched dataset).
# In[71]:
happy_model.fit(X_train, Y_train, epochs=10, batch_size=16)
# After that completes, just use `.evaluate()` to evaluate against your test set. This function will print the value of the loss function and the performance metrics specified during the compilation of the model. In this case, the `binary_crossentropy` and the `accuracy` respectively.
# In[72]:
happy_model.evaluate(X_test, Y_test)
# Easy, right? But what if you need to build a model with shared layers, branches, or multiple inputs and outputs? This is where Sequential, with its beautifully simple yet limited functionality, won't be able to help you.
#
# Next up: Enter the Functional API, your slightly more complex, highly flexible friend.
# <a name='4'></a>
# ## 4 - The Functional API
# Welcome to the second half of the assignment, where you'll use Keras' flexible [Functional API](https://www.tensorflow.org/guide/keras/functional) to build a ConvNet that can differentiate between 6 sign language digits.
#
# The Functional API can handle models with non-linear topology, shared layers, as well as layers with multiple inputs or outputs. Imagine that, where the Sequential API requires the model to move in a linear fashion through its layers, the Functional API allows much more flexibility. Where Sequential is a straight line, a Functional model is a graph, where the nodes of the layers can connect in many more ways than one.
#
# In the visual example below, the one possible direction of the movement Sequential model is shown in contrast to a skip connection, which is just one of the many ways a Functional model can be constructed. A skip connection, as you might have guessed, skips some layer in the network and feeds the output to a later layer in the network. Don't worry, you'll be spending more time with skip connections very soon!
# <img src="images/seq_vs_func.png" style="width:350px;height:200px;">
# <a name='4-1'></a>
# ### 4.1 - Load the SIGNS Dataset
#
# As a reminder, the SIGNS dataset is a collection of 6 signs representing numbers from 0 to 5.
# In[73]:
# Loading the data (signs)
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_signs_dataset()
# <img src="images/SIGNS.png" style="width:800px;height:300px;">
#
# The next cell will show you an example of a labelled image in the dataset. Feel free to change the value of `index` below and re-run to see different examples.
# In[74]:
# Example of an image from the dataset
index = 9
plt.imshow(X_train_orig[index])
print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
# <a name='4-2'></a>
# ### 4.2 - Split the Data into Train/Test Sets
#
# In Course 2, you built a fully-connected network for this dataset. But since this is an image dataset, it is more natural to apply a ConvNet to it.
#
# To get started, let's examine the shapes of your data.
# In[75]:
X_train = X_train_orig/255.
X_test = X_test_orig/255.
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
# <a name='4-3'></a>
# ### 4.3 - Forward Propagation
#
# In TensorFlow, there are built-in functions that implement the convolution steps for you. By now, you should be familiar with how TensorFlow builds computational graphs. In the [Functional API](https://www.tensorflow.org/guide/keras/functional), you create a graph of layers. This is what allows such great flexibility.
#
# However, the following model could also be defined using the Sequential API since the information flow is on a single line. But don't deviate. What we want you to learn is to use the functional API.
#
# Begin building your graph of layers by creating an input node that functions as a callable object:
#
# - **input_img = tf.keras.Input(shape=input_shape):**
#
# Then, create a new node in the graph of layers by calling a layer on the `input_img` object:
#
# - **tf.keras.layers.Conv2D(filters= ... , kernel_size= ... , padding='same')(input_img):** Read the full documentation on [Conv2D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D).
#
# - **tf.keras.layers.MaxPool2D(pool_size=(f, f), strides=(s, s), padding='same'):** `MaxPool2D()` downsamples your input using a window of size (f, f) and strides of size (s, s) to carry out max pooling over each window. For max pooling, you usually operate on a single example at a time and a single channel at a time. Read the full documentation on [MaxPool2D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D).
#
# - **tf.keras.layers.ReLU():** computes the elementwise ReLU of Z (which can be any shape). You can read the full documentation on [ReLU](https://www.tensorflow.org/api_docs/python/tf/keras/layers/ReLU).
#
# - **tf.keras.layers.Flatten()**: given a tensor "P", this function takes each training (or test) example in the batch and flattens it into a 1D vector.
#
# * If a tensor P has the shape (batch_size,h,w,c), it returns a flattened tensor with shape (batch_size, k), where $k=h \times w \times c$. "k" equals the product of all the dimension sizes other than the first dimension.
#
# * For example, given a tensor with dimensions [100, 2, 3, 4], it flattens the tensor to be of shape [100, 24], where 24 = 2 * 3 * 4. You can read the full documentation on [Flatten](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Flatten).
#
# - **tf.keras.layers.Dense(units= ... , activation='softmax')(F):** given the flattened input F, it returns the output computed using a fully connected layer. You can read the full documentation on [Dense](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense).
#
# In the last function above (`tf.keras.layers.Dense()`), the fully connected layer automatically initializes weights in the graph and keeps on training them as you train the model. Hence, you did not need to initialize those weights when initializing the parameters.
#
# Lastly, before creating the model, you'll need to define the output using the last of the function's compositions (in this example, a Dense layer):
#
# - **outputs = tf.keras.layers.Dense(units=6, activation='softmax')(F)**
#
#
# #### Window, kernel, filter, pool
#
# The words "kernel" and "filter" are used to refer to the same thing. The word "filter" accounts for the amount of "kernels" that will be used in a single convolution layer. "Pool" is the name of the operation that takes the max or average value of the kernels.
#
# This is why the parameter `pool_size` refers to `kernel_size`, and you use `(f,f)` to refer to the filter size.
#
# Pool size and kernel size refer to the same thing in different objects - They refer to the shape of the window where the operation takes place.
# <a name='ex-2'></a>
# ### Exercise 2 - convolutional_model
#
# Implement the `convolutional_model` function below to build the following model: `CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> DENSE`. Use the functions above!
#
# Also, plug in the following parameters for all the steps:
#
# - [Conv2D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D): Use 8 4 by 4 filters, stride 1, padding is "SAME"
# - [ReLU](https://www.tensorflow.org/api_docs/python/tf/keras/layers/ReLU)
# - [MaxPool2D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D): Use an 8 by 8 filter size and an 8 by 8 stride, padding is "SAME"
# - **Conv2D**: Use 16 2 by 2 filters, stride 1, padding is "SAME"
# - **ReLU**
# - **MaxPool2D**: Use a 4 by 4 filter size and a 4 by 4 | |
'due', 'id']) for header in headers_as_dicts ]
matching_forms = []
matching_forms += add_and_replace_objects(headers_to_match_against, {"id": "matched_to"}, {"value": -100})
# by entering -100 we are creating a negative payment which has a positive balance
# so we match the positive balance of 100.00
matching_data = create_formset_data(match_form_prefix, matching_forms)
line_data = create_formset_data(LINE_FORM_PREFIX, [])
data.update(matching_data)
data.update(line_data)
# WE ARE CREATING A NEW INVOICE FOR 2400.00 and matching against -1000 worth of invoices (across 10 invoices)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 302)
headers = PurchaseHeader.objects.all()
self.assertEqual(len(headers), 11)
header = headers[0]
self.assertEqual(
header.total,
-2400
)
self.assertEqual(
header.goods,
-2400
)
self.assertEqual(
header.vat,
0
)
self.assertEqual(
header.ref,
self.ref
)
self.assertEqual(
header.paid,
-1000
)
self.assertEqual(
header.due,
header.total + 1000
)
lines = PurchaseLine.objects.all()
self.assertEqual(len(lines), 0)
matched_headers = header.matched_to.all()
for _header in matched_headers: # _header to avoid overwriting header above
self.assertEqual(
_header.paid,
100
)
matches = PurchaseMatching.objects.all()
self.assertEqual(len(matches), 10)
seen = {}
for match in matches:
if match.matched_to_id in seen:
self.fail("Matching record with same matched_to found")
seen[match.matched_to_id] = True # any value will do
self.assertEqual(
match.matched_by,
header
)
self.assertEqual(
match.value,
100
)
self.assertEqual(
len(VatTransaction.objects.all()),
0
)
# CORRECT USAGE
def test_header_total_is_non_zero_and_with_matching_transactions_equal_to_total(self):
self.client.force_login(self.user)
data = {}
header_data = create_header(
HEADER_FORM_PREFIX,
{
"cash_book": self.cash_book.pk,
"type": "pp",
"supplier": self.supplier.pk,
"period": self.period.pk,
"ref": self.ref,
"date": self.date,
"total": 2400
}
)
data.update(header_data)
headers_to_match_against = create_payments(self.supplier, "inv", 24,self.period, -100) # Negative payments of 2400 on account
headers_to_match_against_orig = headers_to_match_against
headers_as_dicts = [ to_dict(header) for header in headers_to_match_against ]
headers_to_match_against = [ get_fields(header, ['type', 'ref', 'total', 'paid', 'due', 'id']) for header in headers_as_dicts ]
matching_forms = []
matching_forms += add_and_replace_objects(headers_to_match_against, {"id": "matched_to"}, {"value": -100})
matching_data = create_formset_data(match_form_prefix, matching_forms)
line_data = create_formset_data(LINE_FORM_PREFIX, [])
data.update(matching_data)
data.update(line_data)
# WE ARE CREATING A NEW INVOICE FOR 2400.00 and matching against -1000 worth of invoices (across 10 invoices)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 302)
headers = PurchaseHeader.objects.all().order_by("-pk")
# this was seemingly ordering by primary key in ascending order but now does not. So added order_by('-pk').
self.assertEqual(len(headers), 25)
header = headers[0]
self.assertEqual(
header.total,
-2400
)
self.assertEqual(
header.goods,
-2400
)
self.assertEqual(
header.vat,
0
)
self.assertEqual(
header.ref,
self.ref
)
self.assertEqual(
header.paid,
-2400
)
self.assertEqual(
header.due,
0
)
lines = PurchaseLine.objects.all()
self.assertEqual(len(lines), 0)
matched_headers = header.matched_to.all()
for _header in matched_headers: # _header to avoid overwriting header above
self.assertEqual(
_header.paid,
100
)
matches = PurchaseMatching.objects.all()
self.assertEqual(len(matches), 24)
seen = {}
for match in matches:
if match.matched_to_id in seen:
self.fail("Matching record with same matched_to found")
seen[match.matched_to_id] = True # any value will do
self.assertEqual(
match.matched_by,
header
)
self.assertEqual(
match.value,
100
)
self.assertEqual(
len(VatTransaction.objects.all()),
0
)
# INCORRECT USAGE
def test_header_total_is_non_zero_and_with_matching_transactions_above_the_total(self):
self.client.force_login(self.user)
data = {}
header_data = create_header(
HEADER_FORM_PREFIX,
{
"cash_book": self.cash_book.pk,
"type": "pp",
"supplier": self.supplier.pk,
"period": self.period.pk,
"ref": self.ref,
"date": self.date,
"due_date": self.due_date,
"total": 2400
}
)
data.update(header_data)
headers_to_match_against = create_payments(self.supplier, "inv", 25,self.period, -100)
headers_to_match_against_orig = headers_to_match_against
headers_as_dicts = [ to_dict(header) for header in headers_to_match_against ]
headers_to_match_against = [ get_fields(header, ['type', 'ref', 'total', 'paid', 'due', 'id']) for header in headers_as_dicts ]
matching_forms = []
matching_forms += add_and_replace_objects(headers_to_match_against, {"id": "matched_to"}, {"value": -100})
matching_data = create_formset_data(match_form_prefix, matching_forms)
line_data = create_formset_data(LINE_FORM_PREFIX, [])
data.update(matching_data)
data.update(line_data)
# WE ARE CREATING A NEW INVOICE FOR 2400.00 and matching against -1000 worth of invoices (across 10 invoices)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
len(PurchaseHeader.objects.all()),
25 # the 25 trans created in set up; so not including the one we tried to just create
)
self.assertEqual(
len(PurchaseLine.objects.all()),
0
)
self.assertEqual(
len(PurchaseMatching.objects.all()),
0
)
self.assertContains(
response,
'<li class="py-1">Please ensure the total of the transactions you are matching is between 0 and 2400.00</li>',
html=True
)
self.assertEqual(
len(VatTransaction.objects.all()),
0
)
# INCORRECT - Cannot match header to matching transactions with same sign
def test_header_total_is_non_zero_and_with_matching_transactions_have_same_sign_as_new_header(self):
self.client.force_login(self.user)
data = {}
header_data = create_header(
HEADER_FORM_PREFIX,
{
"cash_book": self.cash_book.pk,
"type": "pp",
"supplier": self.supplier.pk,
"period": self.period.pk,
"ref": self.ref,
"date": self.date,
"due_date": self.due_date,
"total": 100
}
)
data.update(header_data)
headers_to_match_against = create_payments(self.supplier, "inv", 1,self.period, 100)
headers_to_match_against_orig = headers_to_match_against
headers_as_dicts = [ to_dict(header) for header in headers_to_match_against ]
headers_to_match_against = [ get_fields(header, ['type', 'ref', 'total', 'paid', 'due', 'id']) for header in headers_as_dicts ]
matching_forms = []
matching_forms += add_and_replace_objects(headers_to_match_against, {"id": "matched_to"}, {"value": 100})
matching_data = create_formset_data(match_form_prefix, matching_forms)
line_data = create_formset_data(LINE_FORM_PREFIX, [])
data.update(matching_data)
data.update(line_data)
# WE ARE CREATING A NEW INVOICE FOR 2400.00 and matching against -1000 worth of invoices (across 10 invoices)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
len(PurchaseHeader.objects.all()),
1 # the 1 trans created in set up; so not including the one we tried to just create
)
self.assertEqual(
len(PurchaseLine.objects.all()),
0
)
self.assertEqual(
len(PurchaseMatching.objects.all()),
0
)
self.assertContains(
response,
'<li class="py-1">Please ensure the total of the transactions you are matching is between 0 and 100.00</li>',
html=True
)
self.assertEqual(
len(VatTransaction.objects.all()),
0
)
"""
As with the invoice tests we now test the non-zero total tests but this time entering negatives
"""
# CORRECT USAGE -- BUT THIS MEANS THE TOTAL OF THE LINES IS USED FOR THE HEADER
# SO THIS IS NOT A ZERO VALUE MATCHING TRANSACTION
def test_header_total_is_non_zero_and_no_matching_transactions_selected_NEGATIVE(self):
self.client.force_login(self.user)
data = {}
header_data = create_header(
HEADER_FORM_PREFIX,
{
"cash_book": self.cash_book.pk,
"type": "pp",
"supplier": self.supplier.pk,
"period": self.period.pk,
"ref": self.ref,
"date": self.date,
"total": -100
}
)
data.update(header_data)
matching_data = create_formset_data(match_form_prefix, [])
line_data = create_formset_data(LINE_FORM_PREFIX, [])
data.update(matching_data)
data.update(line_data)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 302)
headers = PurchaseHeader.objects.all()
self.assertEqual(len(headers), 1)
header = headers[0]
self.assertEqual(
header.total,
100
)
self.assertEqual(
header.goods,
100
)
self.assertEqual(
header.vat,
0
)
self.assertEqual(
header.ref,
self.ref
)
self.assertEqual(
header.paid,
0
)
self.assertEqual(
header.due,
header.total
)
lines = PurchaseLine.objects.all()
self.assertEqual(
len(lines),
0
)
self.assertEqual(
len(VatTransaction.objects.all()),
0
)
# CORRECT USAGE
def test_header_total_is_non_zero_and_with_matching_transactions_less_than_total_NEGATIVE(self):
self.client.force_login(self.user)
data = {}
header_data = create_header(
HEADER_FORM_PREFIX,
{
"cash_book": self.cash_book.pk,
"type": "pp",
"supplier": self.supplier.pk,
"period": self.period.pk,
"ref": self.ref,
"date": self.date,
"total": -2400
}
)
data.update(header_data)
headers_to_match_against = create_payments(self.supplier, "pay", 10,self.period, 100)
headers_to_match_against_orig = headers_to_match_against
headers_as_dicts = [ to_dict(header) for header in headers_to_match_against ]
headers_to_match_against = [ get_fields(header, ['type', 'ref', 'total', 'paid', 'due', 'id']) for header in headers_as_dicts ]
matching_forms = []
matching_forms += add_and_replace_objects(headers_to_match_against, {"id": "matched_to"}, {"value": 100})
# by entering -100 we are creating a negative payment which has a positive balance
# so we match the positive balance of 100.00
matching_data = create_formset_data(match_form_prefix, matching_forms)
line_data = create_formset_data(LINE_FORM_PREFIX, [])
data.update(matching_data)
data.update(line_data)
# WE ARE CREATING A NEW INVOICE FOR 2400.00 and matching against -1000 worth of invoices (across 10 invoices)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 302)
headers = PurchaseHeader.objects.all()
self.assertEqual(len(headers), 11)
header = headers[0]
self.assertEqual(
header.total,
2400
)
self.assertEqual(
header.goods,
2400
)
self.assertEqual(
header.vat,
0
)
self.assertEqual(
header.ref,
self.ref
)
self.assertEqual(
header.paid,
1000
)
self.assertEqual(
header.due,
header.total - 1000
)
lines = PurchaseLine.objects.all()
self.assertEqual(len(lines), 0)
matched_headers = header.matched_to.all()
for _header in matched_headers: # _header to avoid overwriting header above
self.assertEqual(
_header.paid,
-100
)
matches = PurchaseMatching.objects.all()
self.assertEqual(len(matches), 10)
seen = {}
for match in matches:
if match.matched_to_id in seen:
self.fail("Matching record with same matched_to found")
seen[match.matched_to_id] = True # any value will do
self.assertEqual(
match.matched_by,
header
)
self.assertEqual(
match.value,
-100
)
self.assertEqual(
len(VatTransaction.objects.all()),
0
)
# CORRECT USAGE
def test_header_total_is_non_zero_and_with_matching_transactions_equal_to_total_NEGATIVE(self):
self.client.force_login(self.user)
data = {}
header_data = create_header(
HEADER_FORM_PREFIX,
{
"cash_book": self.cash_book.pk,
"type": "pp",
"supplier": self.supplier.pk,
"period": self.period.pk,
"ref": self.ref,
"date": self.date,
"total": -2400
}
)
data.update(header_data)
headers_to_match_against = create_payments(self.supplier, "inv", 24,self.period, 100) # Negative payments of 2400 on account
headers_to_match_against_orig = headers_to_match_against
headers_as_dicts = [ to_dict(header) for header in headers_to_match_against ]
headers_to_match_against = [ get_fields(header, ['type', 'ref', 'total', 'paid', 'due', 'id']) for header in headers_as_dicts ]
matching_forms = []
matching_forms += add_and_replace_objects(headers_to_match_against, {"id": "matched_to"}, {"value": 100})
matching_data = create_formset_data(match_form_prefix, matching_forms)
line_data = create_formset_data(LINE_FORM_PREFIX, [])
data.update(matching_data)
data.update(line_data)
# WE ARE CREATING A NEW INVOICE FOR 2400.00 and matching against -1000 worth of invoices (across 10 invoices)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 302)
headers = PurchaseHeader.objects.all()
self.assertEqual(len(headers), 25)
header = headers[0]
self.assertEqual(
header.total,
2400
)
self.assertEqual(
header.goods,
2400
)
self.assertEqual(
header.vat,
0
)
self.assertEqual(
header.ref,
self.ref
)
self.assertEqual(
header.paid,
2400
)
self.assertEqual(
header.due,
0
)
lines = PurchaseLine.objects.all()
self.assertEqual(len(lines), 0)
matched_headers = header.matched_to.all()
for _header in matched_headers: # _header to avoid overwriting header above
self.assertEqual(
_header.paid,
-100
| |
"""Data structure for CAtlas."""
import argparse
import cProfile
import os
import sys
import tempfile
import gzip
import copy
from .rdomset import rdomset, domination_graph
from .graph_io import read_from_gxt, write_to_gxt
from .graph import Graph
from spacegraphcats.utils.logging import log_command
from io import TextIOWrapper
from collections import defaultdict
from typing import List, Dict, Set
UPPER_RADIUS = 1
class Project(object):
"""Methods for coordinating whole projects."""
def __init__(self, directory, r, checkpoint=True):
"""
Make a project in directory at raidus r.
This object stores the intermediate variables for the CAtlas building
so that they can be checkpointed as necessary.
"""
self.dir = directory
self.r = r
self.checkpoint = checkpoint
self.graph = None
self.idx = 0
self.level = 1
self.level_nodes = None
self.root = CAtlas(self.idx, -1, self.level, list())
# project file names
self.domfilename = os.path.join(self.dir, "first_doms.txt")
self.graphfilename = os.path.join(self.dir, "cdbg.gxt")
self.catlasfilename = os.path.join(self.dir, "catlas.csv")
def existing_checkpoints(self):
"""Get the existing checkpoint files."""
files = []
for f in os.listdir(self.dir):
name, ext = os.path.splitext(f)
if ext == ".checkpoint":
r, level = map(int, name.split("_"))
if r == self.r:
files.append(level)
return list(sorted(files))
def cp_name(self, level):
"""Return the name of the checkpoint file after level level."""
return os.path.join(self.dir,
"{}_{}.checkpoint".format(self.r, level))
def load_furthest_checkpoint(self):
"""Load the checkpoint that is furthest along."""
existing = self.existing_checkpoints()
# if there are no checkpoints or we don't want to load from one,
# just read G from the graph file
if len(existing) == 0 or not self.checkpoint:
print("Loading graph from {}".format(self.graphfilename))
# we only need to set the graph variable since index, level, and
# previous nodes have the proper values by default
with open(self.graphfilename, 'r') as graph_file:
self.graph = read_from_gxt(graph_file, self.r, False)
else:
self.load_checkpoint(existing[-1])
def load_checkpoint(self, level):
"""Read cached information from a partial catlas computation."""
if not self.checkpoint:
raise IOError("I told you I didn't want to load from checkpoint!")
print("Loading results of building level {}".format(level))
# the temp file contains catlas and graph information. To use the
# readers for catlas and graph, we need to temporarily split them into
# separate files
tmpf = tempfile.TemporaryFile(mode='r+')
infile = self.cp_name(level)
with gzip.open(infile, 'rt') as f:
# read until the end of the catlas
for line in f:
if line == "###\n":
break
tmpf.write(line)
# once we are at the graph section, start reading from there
self.graph = read_from_gxt(f, radius=UPPER_RADIUS, directed=False,
sequential=False)
# move back to the beginning of the temporary file and read the
# catlas
tmpf.seek(0)
root = CAtlas.read(tmpf)
tmpf.close()
# the checkpointed CAtlas has a dummy root. The nodes in the
# current level need to be removed from the root because we haven't
# finished constructing their parents.
unfinished_idx = -1*len(self.graph)
unfinished = root.children[unfinished_idx:]
root.children = root.children[:unfinished_idx]
self.level_nodes = {node.vertex: node for node in unfinished}
self.idx = root.idx
self.level = root.level - 1
self.root = root
def _save(self):
"""Method used by the thread to write out."""
outfile = self.cp_name(self.level - 1)
print("Writing to file {}".format(outfile))
with gzip.open(outfile, 'wt') as f:
# make a dummy root to write the catlas using catlas.write method
# we add all current level nodes as children of the root
root = CAtlas(self.idx, -1, self.level,
copy.copy(self.root.children))
root.children.extend(self.level_nodes.values())
root.write(f)
f.write("###\n")
write_to_gxt(f, self.graph)
def save_checkpoint(self):
"""Write out a partial computation."""
if not self.checkpoint:
return
else:
self._save()
class CAtlas(object):
"""Hierarchical atlas for querying graphs."""
LEVEL_THRESHOLD = 10
def __init__(self, idx, vertex, level, children):
"""
Construct a CAtlas node.
Arguments:
idx: Integer identifier of the node. A CAtlas with n nodes will
have ids 0,1,...,n-1. The root will always have id n-1.
vertex: Name of vertex in the cDBG
level: The height of the node in the hierarchy. The leaves are at
level 1, their parents at level 2, etc.
children: the CAtlas nodes for which this is a parent
"""
self.idx = idx
self.vertex = vertex
self.children = children
self.level = level
@staticmethod
def build(proj, benchmark_only=False):
"""Build a CAtlas at a given radius."""
# keep creating progressively smaller graphs until we hit the level
# threshold or steady state
while True:
print()
# the base level should have a large radius, others are just 1
if proj.level == 1:
r = proj.r
else:
r = UPPER_RADIUS
# build the current level
nodes, domgraph, dominated = CAtlas._build_level(proj.graph,
r,
proj.level,
proj.idx,
proj.level_nodes)
print("Catlas level {} complete".format(proj.level))
# at the bottom level we need to write out the domination
# assignment
if proj.level == 1 and not benchmark_only:
with open(proj.domfilename, 'w') as domfile:
for v, shadow in dominated.items():
domstr = str(v)
for u in shadow:
domstr += " {}".format(u)
domstr += "\n"
domfile.write(domstr)
# increment the index and level now so they are correctly adjusted
# if we happen to return
proj.idx += len(nodes)
proj.level += 1
# Keeping isolated vertices as parents of themselves blows up the
# CAtlas size unnecessarily. We need to immediately make them
# children of the root.
for v in dominated:
if v not in domgraph:
proj.root.children.append(nodes.pop(v))
# quit if our level is sufficiently small
if len(domgraph) <= CAtlas.LEVEL_THRESHOLD or \
len(domgraph) == len(proj.graph):
break
# prep for the next iteration
proj.graph = domgraph
proj.level_nodes = nodes
# write level results to the checkpoint file if applicable
proj.save_checkpoint()
if benchmark_only:
return None
if not nodes:
return None
# place all remaining nodes as children of the root
proj.root.children.extend(nodes.values())
proj.root.level = proj.level
proj.root.vertex = list(nodes.keys())[0]
proj.root.idx = proj.idx
return proj.root
@staticmethod
def _build_level(graph: Graph, radius: int, level: int, min_id: int=0,
prev_nodes: List[int]=None):
# find the domgraph of the current domgraph
domset = rdomset(graph, radius)
# dominated maps dominating vertices to a list of the vertices they
# optimally dominate
domgraph, dominated = domination_graph(graph, domset, radius)
# create the CAtlas nodes
nodes = {} # type: Dict[int, CAtlas]
for idx, v in enumerate(domset):
# if no previous nodes were supplied, we assume we are on the
# bottom level and thus the children field is empty
if prev_nodes is None:
children = [] # type: List[int]
else:
children = [prev_nodes[u] for u in dominated[v]]
nodes[v] = CAtlas(min_id+idx, v, level, children)
return nodes, domgraph, dominated
def leaves(self, visited: Set[object]=None) -> Set[object]:
"""Find the descendants of this node with no children."""
# this function is recursive so we need to keep track of nodes we
# already visited
if visited is None:
visited = set([self])
# base case is level 0
if self.level == 1:
return set([self])
# otherwise gather the leaves of the children
res = set() # type: Set[object]
for c in self.children:
if c not in visited:
visited.add(c)
res |= c.leaves(visited)
return res
def write(self, outfile: TextIOWrapper):
"""Write the connectivity of the CAtlas to file."""
# doesn't matter how we traverse the graph, so we use DFS for ease of
# implementation
stack = [self]
seen = set()
while len(stack) > 0:
# remove from the stack
curr = stack.pop()
# write node information
child_str = " ".join(str(child.idx) for child in curr.children)
outfile.write("{},{},{},{}\n".format(curr.idx,
curr.vertex,
curr.level,
child_str))
# all nodes already seen don't get re-added
seen.add(curr)
stack.extend(filter(lambda x: x not in seen, curr.children))
@classmethod
def read(cls, catlas_file):
"""Load the catlas Directed Acyclic Graph."""
children = []
nodes = []
# load everything from the catlas file
for line in catlas_file:
catlas_node, cdbg_node, level, beneath = line.strip().split(',')
level = int(level)
catlas_node = int(catlas_node)
cdbg_node = int(cdbg_node)
# extend arrays as necessary
if len(children) <= catlas_node:
for i in range(catlas_node - len(children) + 1):
children.append([])
nodes.append(None)
# parse out the children
beneath = beneath.strip()
if beneath:
beneath = beneath.split(' ')
children[catlas_node].extend(map(int, beneath))
# make the new node with empty children
node = cls(catlas_node, cdbg_node, level, [])
nodes[catlas_node] = node
# update the nodes with pointers to their children
for i, n in enumerate(nodes):
for child in children[n.idx]:
n.children.append(nodes[child])
return nodes[-1]
def main(args):
"""Build a CAtlas for the provided input graph."""
# unpack command line arguments
r = args.radius
proj_dir = args.project
checkpoint = not args.no_checkpoint
level = args.level
# make checkpoint
proj = Project(proj_dir, r, checkpoint)
print("reading graph")
if level:
print("Loading checkpoint at level | |
import os
import io
import sys
import stat
import glob
import click
import shlex
import subprocess
import pandas as pd
from datetime import datetime, timedelta
from typing import Tuple, List, Dict, Optional
from fabrica import date_format, control_prefix, calculate_prefix, Operator, GenerateMode, EntityFactory, MMPFactory, CustomFactory, EvaluationContext, Verifier
from data_layer import Redshift as SQL
from .config import get_sql_config
from moda import style, log
from moda.command import invoke_subcommand
from subir import Uploader
def load_template(templates_path: str, template_name: str, template_dfs: Dict[str, pd.DataFrame]):
if template_name in template_dfs.keys():
return
df = pd.read_csv(f'{templates_path}/{template_name}.csv', dtype='object')
if f'{control_prefix}target_id_column' in df.columns:
df[f'{control_prefix}target_id'] = df.apply(lambda r: r[r[f'{control_prefix}target_id_column']], axis=1)
template_dfs[template_name] = df
def load_source_table(source_path: str, table_name: str, source_dfs: Dict[str, pd.DataFrame]):
if table_name in source_dfs.keys():
return
source_file_paths = sorted([f for f in glob.glob(f'{source_path}/**/{table_name}.csv', recursive=True)], key=lambda p: len(p.split('/')))
df = pd.DataFrame()
for path in source_file_paths:
df = df.append(pd.read_csv(path, dtype='object'), sort=False)
print(f'Loaded {len(df)} rows from {len(source_file_paths)} source files for table {table_name} at\n' + '\n'.join(source_file_paths))
source_dfs[table_name] = df
def load_query(queries_path: str, query_name: str, query_dfs: Dict[str, pd.DataFrame], format_parameters: Dict[str, str], data_layer: SQL.Layer):
if query_name in query_dfs.keys():
return
query_file_paths = sorted([f for f in glob.glob(f'{queries_path}/**/{query_name}.sql', recursive=True)], key=lambda p: len(p.split('/')))
df = pd.DataFrame()
for path in query_file_paths:
with open(path) as f:
query = f.read()
if format_parameters:
query = query.format(**format_parameters)
query_df = pd.read_sql_query(query, con=data_layer.connection)
df = df.append(query_df, sort=False)
print(f'Loaded {len(df)} rows from {len(query_file_paths)} query files for query {query_name} at\n' + '\n'.join(query_file_paths))
query_dfs[query_name] = df
def quote_command(run_args: str):
return ' '.join(shlex.quote(a) for a in run_args)
all_modes = [m.value for m in GenerateMode]
class Fabrica:
database: str
schema: str
user: Optional[str]
password: Optional[str]
def __init__(self, database: str, schema: str, user: Optional[str], password: Optional[str]):
self.database = database
self.schema = schema
self.user = user
self.password = password
@property
def database_name(self) -> str:
return get_sql_config()[self.database]['database']
def configure_data_layer(self):
sql_config = get_sql_config()
for database in sql_config:
if self.user is not None:
sql_config[database]['user'] = self.user
if self.password is not None:
sql_config[database]['password'] = self.password
sql_config = get_sql_config()
database_options = sql_config[self.database]
SQL.Layer.configure_connection(options=database_options)
@click.group(name='run')
@click.option('-db', '--database', 'database', type=str, default='default')
@click.option('-s', '--schema', 'schema', type=str, default='demo')
@click.option('-u', '--database-user', 'database_user', type=str)
@click.option('-p', '--database-password', 'database_password', type=str)
@click.pass_context
@invoke_subcommand()
def run(ctx: any, database: str, schema: str, database_user: Optional[str], database_password: Optional[str]):
fabrica = Fabrica(
database=database,
schema=schema,
user=database_user,
password=<PASSWORD>
)
fabrica.configure_data_layer()
ctx.obj = fabrica
@run.command()
@click.option('-t', '--templates', 'templates_path', type=str, default='input/templates')
@click.option('-q', '--queries', 'queries_path', type=str, default='input/queries')
@click.option('-s', '--sources', 'source_path', type=str, default='output/csv')
@click.option('-r', '--results', 'results_path', type=str, default='output/csv')
@click.option('-p', '--parameters', 'parameters_path', type=str, default='input/parameters')
@click.option('-m', '--mode', 'mode_values', type=click.Choice(all_modes), multiple=True, default=all_modes)
@click.pass_obj
def generate(
context: Fabrica,
templates_path: str,
queries_path: str,
source_path: str,
results_path: str,
parameters_path: str,
mode_values: List[str],
):
template_dfs = {}
for mode in GenerateMode:
if mode.value not in mode_values:
continue
result_dfs = {}
parameters_file_path = f'{parameters_path}/{mode.parameters_file_name}.csv'
parameters_df = pd.read_csv(parameters_file_path)
if 'disabled' not in parameters_df.columns:
parameters_df['disabled'] = False
parameters_df.disabled = parameters_df.disabled.apply(lambda v: v and not pd.isna(v))
log.log(f'Generating data for {len(parameters_df)} parameter rows.')
for index, parameters in parameters_df.iterrows():
if parameters.disabled:
print(f'Skipping disabled parameter row {index}')
continue
parameters_df = parameters_df[parameters_df.disabled == False]
load_template(
templates_path=templates_path,
template_name=parameters.template,
template_dfs=template_dfs
)
template_df = template_dfs[parameters.template]
template_df = template_df[template_df[f'{control_prefix}target_id'].str.match(str(parameters.target_id))]
log.log(style.Format().green()(f'Generating data for paramerter row {index} ({len(template_df)} template rows)'))
log.log(style.Format().cyan()(parameters))
table = pd.DataFrame()
source_templates = {}
if parameters.source_templates and not pd.isna(parameters.source_templates):
for source_template in parameters.source_templates.split(' '):
load_template(
templates_path=templates_path,
template_name=source_template,
template_dfs=source_templates
)
source_tables = {}
if parameters.source_tables and not pd.isna(parameters.source_tables):
for source_table in parameters.source_tables.split(' '):
load_source_table(
source_path=source_path,
table_name=source_table,
source_dfs=source_tables
)
source_queries = {}
if parameters.source_queries and not pd.isna(parameters.source_queries):
layer = SQL.Layer()
layer.connect()
for source_query in parameters.source_queries.split(' '):
load_query(
queries_path=queries_path,
query_name=source_query,
query_dfs=source_queries,
format_parameters={'SCHEMA': f'{context.schema}.'},
data_layer=layer
)
layer.disconnect()
for _, template in template_df.iterrows():
if mode is GenerateMode.custom:
parameter_context = EvaluationContext(row=parameters)
template_context = EvaluationContext(row=template)
factory = CustomFactory(
parameter_context=parameter_context,
template_context=template_context,
source_tables=source_tables,
source_templates=source_templates,
source_queries=source_queries,
evaluate_directory='output/python/evaluate',
)
factory.source_context = factory.evaluate(
context=parameter_context,
column='source_context',
default_value={
'pd': pd,
'factory': factory,
'parameters': parameters,
'template': template,
'source_tables': source_tables,
'source_templates': source_templates,
'source_queries': source_queries,
},
require_repair=True
)
# TODO: assemble source from source templates, output files, and query sources
factory.source = factory.evaluate(context=parameter_context, column='source', default_value=pd.DataFrame(), require_repair=True)
factory.source_context['s'] = factory.source
factory.source_iterator = factory.evaluate(context=parameter_context, column='source_iterator', default_value=[], require_repair=True)
factory.source_context['source_iterator'] = factory.source_iterator
factory.finish_repairs()
results = pd.DataFrame()
results = results.append(factory.generate_custom_results(
table=results
))
results.drop(columns=[k for k in results.columns if k.startswith(control_prefix)], inplace=True)
table_name = parameters.table if not pd.isna(parameters.table) else parameters.template
else:
table_name = template[f'{control_prefix}table']
start_date = datetime.strptime(parameters.start_date, date_format)
end_date = datetime.strptime(parameters.end_date, date_format)
days = (end_date - start_date).days + 1
print(f'Generating data for {days} days for parameter row {index} with generated daily row counts:')
for day in range(0, days):
result = template.copy()
date_column = template[f'{control_prefix}date_column'] if f'{control_prefix}date_column' in template else None
date = start_date + timedelta(days=day)
if date_column:
result[date_column] = date.strftime(date_format)
if mode is GenerateMode.entity:
factory = EntityFactory()
results = factory.generate_entity_results(
result=result,
day=day,
parameters=parameters,
template=template
)
elif mode is GenerateMode.mmp:
factory = MMPFactory()
load_template(
templates_path=templates_path,
template_name=parameters.entity_template,
template_dfs=template_dfs
)
entity_template_df = template_dfs[parameters.entity_template]
entity_template = entity_template_df.iloc[0]
load_source_table(
source_path=results_path,
table_name=entity_template[f'{control_prefix}table'],
source_dfs=source_tables
)
source_df = source_tables[entity_template[f'{control_prefix}table']]
source_df = source_df[source_df[entity_template[f'{control_prefix}date_column']] == date.strftime(date_format)]
results = pd.DataFrame()
for _, entity_result in source_df.iterrows():
results = results.append(factory.generate_mmp_results(
result=result.copy(),
date=date,
parameters=parameters,
template=template,
entity_result=entity_result,
entity_template=entity_template
), sort=False)
if not results.empty:
results[template[f'{control_prefix}events_column']] = results[template[f'{control_prefix}events_column']].astype(pd.Int64Dtype())
results[template[f'{control_prefix}day_column']] = results[template[f'{control_prefix}day_column']].astype(pd.Int64Dtype())
calculated_columns = [c for c in template.keys() if c.startswith(calculate_prefix)]
for calculated_column in calculated_columns:
results[calculated_column[len(calculate_prefix):]] = results.apply(
lambda r: factory.calculate_column(
expression=str(template[calculated_column]),
result=r,
parameters=parameters,
template=template
),
axis=1
)
results.drop(columns=[k for k in results.columns if k.startswith(control_prefix)], inplace=True)
table = table.append(results, sort=False)
sys.stdout.write(f'{len(results)}.')
sys.stdout.flush()
if table_name not in result_dfs.keys():
result_dfs[table_name] = pd.DataFrame()
result_dfs[table_name] = result_dfs[table_name].append(table, sort=False)
print(f'\n{table.iloc[-1] if len(table) else None}\n{len(table)} rows generated for row {index + 1} in {parameters_file_path}\nfor table {table_name} using template {parameters.template}')
for table, df in result_dfs.items():
path = f'{results_path}/{table}.csv'
df.to_csv(path, index=False)
print(f'{len(df)} total rows generated for table {table} at {path}')
@run.command()
@click.option('-r', '--results', 'results_path', type=str, default='output/csv')
@click.option('-m', '--merge', 'merge_columns', type=str, multiple=True)
@click.option('-t', '--table', 'tables', type=str, multiple=True)
@click.option('-d', '--drop', 'should_drop', is_flag=True)
@click.pass_obj
def upload(
context: Fabrica,
results_path: str,
database_name: str,
merge_columns: List[str],
tables: List[str],
should_drop: bool
):
database_name = context.database_name
schema = context.schema
result_file_paths = {f: os.path.splitext(os.path.basename(f))[0] for f in glob.glob(f'{results_path}/**/*.csv', recursive=True)}
result_file_text = '\n'.join(f'{k} ——> {result_file_paths[k]}' for k in sorted(result_file_paths.keys()) if not tables or result_file_paths[k] in tables)
confirm_action = 'drop and replace' if should_drop else 'append'
if not click.prompt(
f'Confirm {confirm_action} demo data to schema {schema} in database {database_name} from files\n{result_file_text}',
type=click.Choice(['y', 'n']),
confirmation_prompt=True
) == 'y':
return
uploader = Uploader()
action = 'Truncating table and uploading' if should_drop else 'Adding'
for path in sorted(result_file_paths.keys()):
table_name = result_file_paths[path]
if tables and table_name not in tables:
log.log(f'Skipping table {table_name}')
continue
df = pd.read_csv(path, dtype='object')
print(f'{action} {len(df)} rows to {schema}.{table_name} in database {database_name}')
uploader.upload_data_frame(
schema_name=schema,
table_name=table_name,
merge_column_names=merge_columns,
data_frame=df,
column_type_transform_dictionary={},
replace=should_drop
)
@run.command()
@click.option('-db', '--database', 'database', type=str)
@click.option('-db1', '--database-1', 'database_a', type=str)
@click.option('-db2', '--database-2', 'database_b', type=str)
@click.option('-csv1', '--csv-1', 'csv_a', is_flag=True)
@click.option('-csv2', '--csv-2', 'csv_b', is_flag=True)
@click.option('-e1/-E1', '--escape-1/--no-escape-1', 'escape_a', is_flag=True, default=True)
@click.option('-e2/-E2', '--escape-2/--no-escape-2', 'escape_b', is_flag=True, default=True)
@click.option('-s', '--script', 'script_path', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True))
@click.option('-s1', '--script-1', 'script_path_a', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True))
@click.option('-s2', '--script-2', 'script_path_b', type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True))
@click.option('-c', '--column', 'columns', type=str, multiple=True)
@click.option('-xc', '--exclude-column', 'exclude_columns', type=str, multiple=True)
@click.option('-c1', '--column-1', 'columns_a', type=str, multiple=True)
@click.option('-xc1', '--exclude-column-1', 'exclude_columns_a', type=str, multiple=True)
@click.option('-c2', '--column-2', 'columns_b', type=str, multiple=True)
@click.option('-xc2', '--exclude-column-2', 'exclude_columns_b', type=str, multiple=True)
@click.option('-i/-I', '--interactive/--no-interactive', 'interactive', is_flag=True, default=True)
@click.option('-v/-V', '--verbose/--no-verbose', 'verbose', is_flag=True, default=True)
@click.option('-o', '--output', 'output_directory', type=click.Path(exists=True, file_okay=False, dir_okay=True, writable=True))
@click.option('-in', '--input-directory', 'input_directory', type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True))
@click.option('-df', '--diff-tool', 'diff_tool', type=str, default='vimdiff')
@click.option('-at', '--absolute-tolerance', type=float, default=0)
@click.option('-rt', '--relative-tolerance', type=float, default=0)
@click.option('-fn', '--format-name', 'format_names', multiple=True)
@click.option('-fv', '--format-value', 'format_values', multiple=True)
@click.argument('path_a')
@click.argument('path_b')
@click.pass_obj
def verify(context: Fabrica, database: Optional[str], database_a: Optional[str], database_b: Optional[str], csv_a: bool, csv_b: bool, escape_a: bool, escape_b: bool, script_path: Optional[str], script_path_a: Optional[str], script_path_b: Optional[str], columns: Tuple[str], columns_a: Tuple[str], columns_b: Tuple[str], exclude_columns: Tuple[str], exclude_columns_a: Tuple[str], exclude_columns_b: Tuple[str], interactive: bool, verbose: bool, output_directory: Optional[str], input_directory: Optional[str], diff_tool: str, absolute_tolerance: float, relative_tolerance: float, format_names: Tuple[str], format_values: Tuple[str], path_a: str, path_b: str):
assert len(format_names) == len(format_values)
file_a = click.File()(os.path.join(input_directory, path_a) if input_directory else path_a)
file_b = click.File()(os.path.join(input_directory, path_b) if input_directory else path_b)
verifier = Verifier(
database=database if database is not None else context.database,
interactive=interactive,
verbose=verbose,
output_directory=output_directory if output_directory else os.path.join('output', 'verify'),
diff_command=diff_tool
)
verification = verifier.verify(
name_a=os.path.splitext(os.path.basename(file_a.name))[0],
name_b=os.path.splitext(os.path.basename(file_b.name))[0],
stream_a=file_a,
stream_b=file_b,
script_path=script_path,
script_path_a=script_path_a,
script_path_b=script_path_b,
database_a=database_a,
database_b=database_b,
csv_a=csv_a,
csv_b=csv_b,
escape_a=escape_a,
escape_b=escape_b,
columns=list(columns) if columns else None,
columns_a=list(columns_a) if columns_a else None,
columns_b=list(columns_b) if columns_b else None,
exclude_columns=list(exclude_columns) if exclude_columns else None,
exclude_columns_a=list(exclude_columns_a) if exclude_columns_a else None,
exclude_columns_b=list(exclude_columns_b) if exclude_columns_b else None,
format_parameters={
'SCHEMA': context.schema,
**dict(zip(format_names, format_values)),
},
absolute_tolerance=absolute_tolerance,
relative_tolerance=relative_tolerance
)
if verification.success:
print('Verification succeeded.')
else:
print('Verification FAILED.')
if not interactive:
raise click.ClickException('Non-interactive verification failed.')
@run.group()
def docker():
pass
@docker.command()
@click.option('-c/-C' '--cache/-no-cache', 'cache', is_flag=True, default=True)
def build(cache: bool=True):
run_args = [
'docker',
'build',
*(['--no-cache'] if not cache else []),
'-t', 'xyla/fabrica',
'-f', 'Dockerfile',
'..',
| |
<reponame>poly451/Tutorials
import random
import constants
import os, sys
from shutil import copyfile
import math
import time
def get_unique_number():
tt = time.gmtime()
return "{}{}{}{}{}{}".format(tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec)
def reset_data(player_name, profession):
# reset player's inventory
filepath_master_consumables = os.path.join("data", "master_files", "player_types", profession, "inventory", "consumable_items.txt")
filepath_master_weapons = os.path.join("data", "master_files", "player_types", profession, "inventory", "weapon_items.txt")
filepath_player_consumables = os.path.join("data", "playing_characters", player_name, "inventory", "consumable_items.txt")
filepath_player_weapons = os.path.join("data", "playing_characters", player_name, "inventory", "weapon_items.txt")
copyfile(filepath_master_consumables, filepath_player_consumables)
copyfile(filepath_master_weapons, filepath_player_weapons)
# ---- reset dark_alley ----
# --------------------------
filepath = os.path.join("data", "zones", "dark_alley", "map01", "texts", "event_record.txt")
mydict = read_file(filepath)[0]
# filepath = os.path.join("data", "testing01.txt")
with open(filepath, "w") as f:
for key, value in mydict.items():
s = "{}: {}\n".format(key, False)
f.write(s)
# ----
filepath = os.path.join("data", "zones", "dark_alley", "map01", "texts", "old_ben_history.txt")
mydict = read_file(filepath)[0]
# filepath = os.path.join("data", "testing02.txt")
new_dict = {}
for key, value in mydict.items():
if key == "quest_accepted":
new_dict[key] = False
elif key == "quest_completed":
new_dict[key] = False
else:
new_dict[key] = value
with open(filepath, "w") as f:
for key, value in new_dict.items():
s = "{}: {}\n".format(key, value)
f.write(s)
# ---- reset green_lawn ----
# --------------------------
filepath = os.path.join("data", "zones", "bridge", "map00", "texts", "westley_history.txt")
mydict = read_file(filepath)[0]
# filepath = os.path.join("data", "testing02.txt")
new_dict = {}
for key, value in mydict.items():
if key == "quest_accepted":
new_dict[key] = False
elif key == "quest_completed":
new_dict[key] = False
else:
new_dict[key] = value
with open(filepath, "w") as f:
for key, value in new_dict.items():
s = "{}: {}\n".format(key, value)
f.write(s)
def npc_fight_on_contact(player_name, npc_name):
filename = "{}.txt".format(npc_name)
filepath = os.path.join("data", "playing_characters", player_name, "npcs", filename)
mydict = read_file(filepath)[0]
mydict["on_contact"] = "fight"
with open(filepath, "w") as f:
for key, value in mydict.items():
s = "{}: {}\n".format(key, value)
f.write(s)
def get_player_initial_direction(zone_name, map_name):
filepath = os.path.join("data", "zones", zone_name, map_name, "player.txt")
mydict = read_file(filepath)[0]
if mydict["npc_position"] == "up":
return constants.UP
elif mydict["npc_position"] == "down":
return constants.DOWN
elif mydict["npc_position"] == "right":
return constants.RIGHT
elif mydict["npc_position"] == "left":
return constants.LEFT
else:
raise ValueError("Error")
def get_coords_from_map(zone_name, map_name):
"""This reads in coords from the tiles on a map."""
filename = "{}_npcs.txt".format(map_name)
filepath = os.path.join("data", "zones", zone_name, map_name, filename)
print("opening zone filepath: {}".format(filepath))
with open(filepath, "r") as f:
mytiles = f.readlines()
mytiles = [i.strip() for i in mytiles if len(i.strip()) > 0]
mytiles = [i[3:] for i in mytiles[2:]]
# print("debugging: mytiles: {}".format(mytiles))
# ------------------------------------------------------------------
tile_names = []
for col, tiles in enumerate(mytiles):
# print(tiles)
list_tiles = tiles.split(";")
list_tiles = [i.strip() for i in list_tiles if len(i.strip()) > 0]
for row, tile in enumerate(list_tiles):
if tile == "..":
pass
elif len(tile) > 0:
tile_names.append([row, col, tile])
else:
s = "Error! I don't recognize this: -{}-".format(tile)
raise ValueError(s)
# ----
new_list = []
for i in range(len(tile_names)):
mydict = {}
mydict["x"] = tile_names[i][0]
mydict["y"] = tile_names[i][1]
mydict["tile"] = tile_names[i][2]
new_list.append(mydict)
# print("debugging: new_list: {}".format(new_list))
return new_list
def health_percent(max_health, current_health):
temp = math.floor((current_health * 100) / max_health)
return temp
def is_int(mystring):
try:
temp = int(mystring)
return True
except:
return False
def is_real(mystring):
try:
temp = float(mystring)
return True
except:
return False
def is_int_or_real(mystring):
if is_int(mystring) == True: return True
if is_real(mystring) == True: return True
return False
def is_alpha(mystring):
for mychar in mystring:
if not mychar in constants.ALPHABET:
return False
return True
def key_value(mystring, mydict):
# print("mystring: {}".format(mystring))
# print("mydict: {}".format(mydict))
if mystring.find(":") == -1:
s = "Error! A colon (:) was not found in mystring: {}".format(mystring)
raise ValueError(s)
if len(mystring) == 0:
s = "The length of this string is 0."
raise ValueError(s)
# ----
myint = mystring.find(":")
tag = mystring[0:myint].strip()
value = mystring[myint+1:].strip()
if len(tag) == 0:
raise ValueError("Error: mystring = {}".format(mystring))
if len(value) == 0:
s = "mystring: {}\n".format(mystring)
s += "Error: there is no value. Here is mystring: {}".format(mystring)
raise ValueError(s)
try:
mydict[tag] = int(value) if is_int(value) else value
# mydict[tag] = True if value.lower() == "true" else value
# mydict[tag] = False if value.lower() == "false" else value
except Exception as e:
print("tag: {}; value: {}".format(tag, value))
raise ValueError(e)
return mydict
def copy_original_player_files(profession_type, character_name):
basepath = os.path.join("data", "master_files", "player_types", profession_type)
destination = os.path.join("data", "playing_characters", character_name)
copy_directory(basepath, destination)
def get_players_position_on_map():
x, y = -1, -1
filepath = os.path.join("data", constants.MAPFILE)
with open(filepath, "r") as f:
mytiles = f.readlines()
mytiles = [i.strip() for i in mytiles if len(i.strip()) > 0]
for col, tiles in enumerate(mytiles):
for row, tile in enumerate(tiles):
if tile == 'p':
x = row
y = col
return x, y
def read_data_file(filepath, num_of_fields):
if not is_int(num_of_fields):
s = "Error! num_of_fields is NOT an integer: {} != {}".format(type(num_of_fields), type(123))
raise ValueError(s)
# ----
print("opening filepath in utils.py-->read_data_file: {}".format(filepath))
with open(filepath, "r") as f:
mylines = f.readlines()
mylines = [i.strip() for i in mylines if len(i.strip()) > 0]
# ----
big_list = []
for i in range(0, len(mylines), num_of_fields):
mydict = {}
for j in range(num_of_fields):
# print("i + j: {}".format(i + j))
# print("debugging: {}".format(mydict))
try:
elem = mylines[i + j]
except Exception as e:
t = "the index: {}, filepath: {}".format(i+j, filepath)
s = "{}\n{}\n".format(e, t)
raise ValueError(s)
# print("elem: {}".format(elem))
mydict = key_value(elem, mydict)
big_list.append(mydict)
return big_list
def read_file(filepath):
print("opening filepath in utils.py-->read_file: {}".format(filepath))
if not os.path.isfile(filepath):
raise ValueError("Error! This is not a file: --{}--".format(filepath))
if file_is_empty(filepath) == True: return None
with open(filepath, "r") as f:
try:
mylines = f.readlines()
mylines = [i.strip() for i in mylines if len(i.strip()) > 0]
except Exception as e:
s = "I'm having trouble reading this file: {}".format(filepath)
t = "{}\n{}".format(e, s)
raise ValueError(t)
# print("Here is mylines: {}".format(mylines))
if len(mylines) == 0:
raise ValueError("Error!")
# ----
big_list = []
num_of_fields = len(mylines)
for i in range(0, len(mylines), num_of_fields):
mydict = {}
for j in range(num_of_fields):
# print("i + j: {}".format(i + j))
elem = mylines[i + j]
if len(elem) == 0: raise ValueError("len(elem) == 0")
try:
mydict = key_value(elem, mydict)
except Exception as e:
s = "Having trouble with this file: {}".format(filepath)
t = "{}\n{}".format(e, s)
raise ValueError(t)
big_list.append(mydict)
# ----
if big_list is None: raise ValueError("Error")
if len(big_list) == 0: raise ValueError("Error")
return big_list
def get_record(filepath, key_name, value_name, number_of_fields):
mylist = read_data_file(filepath, number_of_fields)
for elem in mylist:
if elem[key_name] == value_name:
return elem
return None
def convert_direction_to_integer(the_direction):
if not the_direction.lower() in ["down", "up", "right", "left"]:
raise ValueError("I don't recognize this: {}".format(the_direction))
the_direction = the_direction.lower()
myint = ""
if the_direction == "up":
myint = 90
elif the_direction == "down":
myint = -90
elif the_direction == "right":
myint = 0
elif the_direction == "left":
myint = 180
else:
s = "This was not found: {}".format(the_direction)
raise ValueError(s)
return myint
def convert_integer_to_direction(my_int):
if type(my_int) == type("abc"):
raise ValueError("my_int is actually of type string: {}".format(my_int))
if type(my_int) != type(123):
s = "Error! myint: {} ({})".format(my_int, type(my_int))
raise ValueError(s)
#----
the_dir = ""
if my_int == 90:
the_dir = "UP"
elif my_int == -90:
the_dir = "DOWN"
elif my_int == 0:
the_dir = "RIGHT"
elif my_int == 180:
the_dir = "LEFT"
else:
raise ValueError("Error! I don't recognize this: {}".format(my_int))
return the_dir
def get_player_position_from_map(filepath):
with open(filepath, "r") as f:
mylines = f.readlines()
mylines = [i.strip() for i in mylines if len(i.strip()) > 0]
big_list = []
for i, line in enumerate(mylines):
for j, element in enumerate(line):
# print(i, j, element)
if element == "p":
return j, i
raise ValueError("Player not found!")
def separate_text_into_lines(mytext, line_length):
mylist = []
while len(mytext) >= line_length:
int = mytext[0:line_length].rfind(" ")
mylist.append(mytext[0:int].strip())
mytext = mytext[int:].strip()
mylist.append(mytext)
return mylist
def _top_height(text_list, myfont):
# print("this is the type of text_list: {}".format(type(text_list)))
# print("t--- his is the type of 'font': {}".format(type(myfont)))
if not type(text_list) == type([]):
raise ValueError("Error")
tallest = -1
for elem in text_list:
try:
_, text_height = myfont.size(elem)
except Exception as e:
s = "elem: {}, type: {}\n".format(elem, type(elem))
s += "type of myfont: {}".format(type(myfont))
s = "{}\n{}".format(s, e)
raise ValueError(s)
if text_height > tallest:
tallest = text_height
return tallest
def convert_list_of_lists(a_list):
return [",".join(i) for i in a_list]
def command_okay(todo_list):
todo_string = ' '.join(todo_list)
for a_command in constants.CONVERSATION_ENDINGS:
if a_command in todo_string:
return True
return False
def talk_dialog(screen, text, font, width_offset, height_offset, line_length=32, color=(0,0,0)):
# text_list = separate_text_into_lines(text, line_length)
text_list = []
if type(text) == type("abc"):
text_list = separate_text_into_lines(text, | |
(
self._validate_external_net_create(
net_data, az._default_tier0_router,
self._tier0_validator))
nsx_net_id = None
is_backend_network = False
else:
is_provider_net, net_type, physical_net, vlan_id, nsx_net_id = (
self._create_network_at_the_backend(context, net_data, az,
vlt))
is_backend_network = True
try:
rollback_network = False
with db_api.CONTEXT_WRITER.using(context):
# Create network in Neutron
created_net = super(NsxV3Plugin, self).create_network(context,
network)
self._extension_manager.process_create_network(
context, net_data, created_net)
if psec.PORTSECURITY not in net_data:
net_data[psec.PORTSECURITY] = True
self._process_network_port_security_create(
context, net_data, created_net)
self._process_l3_create(context, created_net, net_data)
self._add_az_to_net(context, created_net['id'], net_data)
if is_provider_net:
# Save provider network fields, needed by get_network()
net_bindings = [nsx_db.add_network_binding(
context.session, created_net['id'],
net_type, physical_net, vlan_id)]
self._extend_network_dict_provider(context, created_net,
bindings=net_bindings)
if is_backend_network:
# Add neutron-id <-> nsx-id mapping to the DB
# after the network creation is done
neutron_net_id = created_net['id']
nsx_db.add_neutron_nsx_network_mapping(
context.session,
neutron_net_id,
nsx_net_id)
if extensions.is_extension_supported(self, 'vlan-transparent'):
super(NsxV3Plugin, self).update_network(context,
created_net['id'],
{'network': {'vlan_transparent': vlt}})
rollback_network = True
# this extra lookup is necessary to get the
# latest db model for the extension functions
net_model = self._get_network(context, created_net['id'])
resource_extend.apply_funcs('networks', created_net, net_model)
if is_backend_network:
self._create_net_mp_mdproxy_port(
context, created_net, az, nsx_net_id)
except Exception:
with excutils.save_and_reraise_exception():
# Undo creation on the backend
LOG.exception('Failed to create network')
if (nsx_net_id and
net_type != utils.NsxV3NetworkTypes.NSX_NETWORK):
self.nsxlib.logical_switch.delete(nsx_net_id)
if (cfg.CONF.nsx_v3.native_dhcp_metadata and
is_backend_network and is_ddi_network):
# Delete the mdproxy port manually
self._delete_nsx_port_by_network(created_net['id'])
if rollback_network:
super(NsxV3Plugin, self).delete_network(
context, created_net['id'])
# Update the QoS policy (will affect only future compute ports)
qos_com_utils.set_qos_policy_on_new_net(
context, net_data, created_net)
if net_data.get(qos_consts.QOS_POLICY_ID):
LOG.info("QoS Policy %(qos)s will be applied to future compute "
"ports of network %(net)s",
{'qos': net_data[qos_consts.QOS_POLICY_ID],
'net': created_net['id']})
return created_net
def _ens_qos_supported(self):
return self.nsxlib.feature_supported(
nsxlib_consts.FEATURE_ENS_WITH_QOS)
def delete_network(self, context, network_id):
if cfg.CONF.nsx_v3.native_dhcp_metadata:
self._delete_network_disable_dhcp(context, network_id)
nsx_net_id = self._get_network_nsx_id(context, network_id)
is_nsx_net = self._network_is_nsx_net(context, network_id)
is_ddi_network = self._is_ddi_supported_on_network(context, network_id)
# First call DB operation for delete network as it will perform
# checks on active ports
self._retry_delete_network(context, network_id)
if (not self._network_is_external(context, network_id) and
not is_nsx_net):
# TODO(salv-orlando): Handle backend failure, possibly without
# requiring us to un-delete the DB object. For instance, ignore
# failures occurring if logical switch is not found
self.nsxlib.logical_switch.delete(nsx_net_id)
else:
if (cfg.CONF.nsx_v3.native_dhcp_metadata and is_nsx_net and
is_ddi_network):
# Delete the mdproxy port manually
self._delete_nsx_port_by_network(network_id)
# TODO(berlin): delete subnets public announce on the network
def _get_network_nsx_id(self, context, neutron_id):
# get the nsx switch id from the DB mapping
mappings = nsx_db.get_nsx_switch_ids(context.session, neutron_id)
if not mappings or len(mappings) == 0:
LOG.debug("Unable to find NSX mappings for neutron "
"network %s.", neutron_id)
# fallback in case we didn't find the id in the db mapping
# This should not happen, but added here in case the network was
# created before this code was added.
return neutron_id
return mappings[0]
def update_network(self, context, id, network):
original_net = super(NsxV3Plugin, self).get_network(context, id)
net_data = network['network']
# Neutron does not support changing provider network values
utils.raise_if_updates_provider_attributes(net_data)
extern_net = self._network_is_external(context, id)
is_nsx_net = self._network_is_nsx_net(context, id)
# Validate the updated parameters
self._validate_update_network(context, id, original_net, net_data)
updated_net = super(NsxV3Plugin, self).update_network(context, id,
network)
self._extension_manager.process_update_network(context, net_data,
updated_net)
if psec.PORTSECURITY in net_data:
self._process_network_port_security_update(
context, net_data, updated_net)
self._process_l3_update(context, updated_net, network['network'])
self._extend_network_dict_provider(context, updated_net)
if (not extern_net and not is_nsx_net and
('name' in net_data or 'admin_state_up' in net_data or
'description' in net_data)):
try:
# get the nsx switch id from the DB mapping
nsx_id = self._get_network_nsx_id(context, id)
net_name = net_data.get('name',
original_net.get('name')) or 'network'
self.nsxlib.logical_switch.update(
nsx_id,
name=utils.get_name_and_uuid(net_name, id),
admin_state=net_data.get('admin_state_up'),
description=net_data.get('description'))
# Backend does not update the admin state of the ports on
# the switch when the switch's admin state changes. Do not
# update the admin state of the ports in neutron either.
except nsx_lib_exc.ManagerError:
LOG.exception("Unable to update NSX backend, rolling "
"back changes on neutron")
with excutils.save_and_reraise_exception():
# remove the AZ from the network before rollback because
# it is read only, and breaks the rollback
if 'availability_zone_hints' in original_net:
del original_net['availability_zone_hints']
super(NsxV3Plugin, self).update_network(
context, id, {'network': original_net})
if qos_consts.QOS_POLICY_ID in net_data:
# attach the policy to the network in neutron DB
#(will affect only future compute ports)
qos_com_utils.update_network_policy_binding(
context, id, net_data[qos_consts.QOS_POLICY_ID])
if net_data[qos_consts.QOS_POLICY_ID]:
LOG.info("QoS Policy %(qos)s will be applied to future "
"compute ports of network %(net)s",
{'qos': net_data[qos_consts.QOS_POLICY_ID],
'net': id})
if not extern_net and not is_nsx_net:
# update the network name & attributes in related NSX objects:
if 'name' in net_data or 'dns_domain' in net_data:
# update the dhcp server after finding it by tags
self._update_dhcp_server_on_net_update(context, updated_net)
if 'name' in net_data:
# update the mdproxy port after finding it by tags
self._update_mdproxy_port_on_net_update(context, updated_net)
# update the DHCP port after finding it by tags
self._update_dhcp_port_on_net_update(context, updated_net)
return updated_net
def _update_dhcp_port_on_net_update(self, context, network):
"""Update the NSX DHCP port when the neutron network changes"""
dhcp_service = nsx_db.get_nsx_service_binding(
context.session, network['id'], nsxlib_consts.SERVICE_DHCP)
if dhcp_service and dhcp_service['port_id']:
# get the neutron port id and search by it
port_tag = [{'scope': 'os-neutron-dport-id',
'tag': dhcp_service['port_id']}]
dhcpports = self.nsxlib.search_by_tags(
tags=port_tag,
resource_type=self.nsxlib.logical_port.resource_type)
if dhcpports['results']:
# There should be only 1 dhcp port
# update the port name by the new network name
name = self._get_dhcp_port_name(network['name'], network['id'])
try:
self.nsxlib.logical_port.update(
dhcpports['results'][0]['id'],
False, name=name, attachment_type=False)
except Exception as e:
LOG.warning("Failed to update network %(id)s DHCP port "
"on the NSX: %(e)s", {'id': network['id'],
'e': e})
def _update_mdproxy_port_on_net_update(self, context, network):
"""Update the NSX MDPROXY port when the neutron network changes"""
net_tag = [{'scope': 'os-neutron-net-id', 'tag': network['id']}]
# find the logical port by the neutron network id & attachment
mdproxy_list = self.nsxlib.search_by_tags(
tags=net_tag,
resource_type=self.nsxlib.logical_port.resource_type)
if not mdproxy_list['results']:
return
for port in mdproxy_list['results']:
if (port.get('attachment') and
port['attachment'].get('attachment_type') == 'METADATA_PROXY'):
# update the port name by the new network name
name = self._get_mdproxy_port_name(network['name'],
network['id'])
try:
self.nsxlib.logical_port.update(
port['id'], False, name=name, attachment_type=False)
except Exception as e:
LOG.warning("Failed to update network %(id)s mdproxy port "
"on the NSX: %(e)s", {'id': network['id'],
'e': e})
# There should be only 1 mdproxy port so it is safe to return
return
def _update_dhcp_server_on_net_update(self, context, network):
"""Update the NSX DHCP server when the neutron network changes"""
net_tag = [{'scope': 'os-neutron-net-id', 'tag': network['id']}]
# Find the DHCP server by the neutron network tag
dhcp_srv_list = self.nsxlib.search_by_tags(
tags=net_tag,
resource_type=self.nsxlib.dhcp_server.resource_type)
if dhcp_srv_list['results']:
# Calculate the new name and domain by the network data
dhcp_name = self.nsxlib.native_dhcp.build_server_name(
network['name'], network['id'])
az = self.get_network_az_by_net_id(context, network['id'])
domain_name = common_utils.get_network_dns_domain(
az, network)
try:
# There should be only 1 dhcp server
# Update its name and domain
self.nsxlib.dhcp_server.update(
dhcp_srv_list['results'][0]['id'],
name=dhcp_name,
domain_name=domain_name)
except Exception as e:
LOG.warning("Failed to update network %(id)s dhcp server on "
"the NSX: %(e)s", {'id': network['id'], 'e': e})
@nsx_plugin_common.api_replay_mode_wrapper
def create_subnet(self, context, subnet):
return self._create_subnet_with_mp_dhcp(context, subnet)
def delete_subnet(self, context, subnet_id):
# Call common V3 code to delete the subnet
self.delete_subnet_with_mp_dhcp(context, subnet_id)
def update_subnet(self, context, subnet_id, subnet):
updated_subnet = self.update_subnet_with_mp_dhcp(
context, subnet_id, subnet)
if (cfg.CONF.nsx_v3.metadata_on_demand and
not self._has_native_dhcp_metadata()):
# If enable_dhcp is changed on a subnet attached to a router,
# update internal metadata network accordingly.
if 'enable_dhcp' in subnet['subnet']:
port_filters = {'device_owner': const.ROUTER_INTERFACE_OWNERS,
'fixed_ips': {'subnet_id': [subnet_id]}}
ports = self.get_ports(context, filters=port_filters)
for port in ports:
nsx_rpc.handle_router_metadata_access(
self, context, port['device_id'],
interface=not updated_subnet['enable_dhcp'])
return updated_subnet
def _build_address_bindings(self, port):
address_bindings = []
for fixed_ip in port['fixed_ips']:
address_bindings.append(nsx_resources.PacketAddressClassifier(
fixed_ip['ip_address'], port['mac_address'], None))
for pair in port.get(addr_apidef.ADDRESS_PAIRS):
address_bindings.append(nsx_resources.PacketAddressClassifier(
pair['ip_address'], pair['mac_address'], None))
for binding1 in address_bindings[:]:
for binding2 in address_bindings[:]:
cidr1 = netaddr.IPNetwork(binding1.ip_address)
cidr2 = netaddr.IPNetwork(binding2.ip_address)
if cidr1 != cidr2 and cidr1 in cidr2:
try:
address_bindings.remove(binding1)
except ValueError:
# item already removed
pass
return address_bindings
def _get_qos_profile_id(self, context, policy_id):
switch_profile_id = nsx_db.get_switch_profile_by_qos_policy(
context.session, policy_id)
nsxlib_qos = self.nsxlib.qos_switching_profile
qos_profile = nsxlib_qos.get(switch_profile_id)
if qos_profile:
profile_ids = nsxlib_qos.build_switch_profile_ids(
self.nsxlib.switching_profile, qos_profile)
if profile_ids and len(profile_ids) > 0:
# We have only 1 QoS profile, so this array is of size 1
return profile_ids[0]
# Didn't find it
err_msg = _("Could not find QoS switching profile for policy "
"%s") % policy_id
LOG.error(err_msg)
raise n_exc.InvalidInput(error_message=err_msg)
def get_lport_tags(self, secgroups):
if len(secgroups) > nsxlib_utils.MAX_NSGROUPS_CRITERIA_TAGS:
raise nsx_exc.NumberOfNsgroupCriteriaTagsReached(
max_num=nsxlib_utils.MAX_NSGROUPS_CRITERIA_TAGS)
tags = []
for sg in secgroups:
tags = nsxlib_utils.add_v3_tag(
tags, common_utils.PORT_SG_SCOPE, sg)
if not tags:
# This port shouldn't be associated with any security-group
tags = [{'scope': common_utils.PORT_SG_SCOPE, 'tag': None}]
return tags
def _create_port_at_the_backend(self, context, port_data,
l2gw_port_check, psec_is_on,
is_ens_tz_port):
device_owner = port_data.get('device_owner')
device_id = port_data.get('device_id')
if device_owner == const.DEVICE_OWNER_DHCP:
resource_type = 'os-neutron-dport-id'
elif device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF:
resource_type = 'os-neutron-rport-id'
else:
| |
mesh = TensorMesh([h, h], x0="CC")
Then we create a discrete vector on mesh edges
>>> edges_x = mesh.edges_x
>>> edges_y = mesh.edges_y
>>> u_ex = -(edges_x[:, 1] / np.sqrt(np.sum(edges_x ** 2, axis=1))) * np.exp(
... -(edges_x[:, 0] ** 2 + edges_x[:, 1] ** 2) / 6 ** 2
... )
>>> u_ey = (edges_y[:, 0] / np.sqrt(np.sum(edges_y ** 2, axis=1))) * np.exp(
... -(edges_y[:, 0] ** 2 + edges_y[:, 1] ** 2) / 6 ** 2
... )
>>> u_e = np.r_[u_ex, u_ey]
Next, we construct the averaging operator and apply it to
the discrete vector quantity to approximate the value at cell centers.
>>> Aec = mesh.average_edge_to_cell_vector
>>> u_c = Aec @ u_e
And plot the results:
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(11, 5))
>>> ax1 = fig.add_subplot(121)
>>> mesh.plot_image(u_e, ax=ax1, v_type="E", view='vec')
>>> ax1.set_title("Variable at edges", fontsize=16)
>>> ax2 = fig.add_subplot(122)
>>> mesh.plot_image(u_c, ax=ax2, v_type="CCv", view='vec')
>>> ax2.set_title("Averaged to cell centers", fontsize=16)
>>> plt.show()
Below, we show a spy plot illustrating the sparsity and mapping
of the operator
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(9, 9))
>>> ax1 = fig.add_subplot(111)
>>> ax1.spy(Aec, ms=1)
>>> ax1.set_title("Edge Index", fontsize=12, pad=5)
>>> ax1.set_ylabel("Cell Vector Index", fontsize=12)
>>> plt.show()
"""
if getattr(self, "_average_edge_to_cell_vector", None) is None:
if self.dim == 1:
self._average_edge_to_cell_vector = self.aveEx2CC
elif self.dim == 2:
self._average_edge_to_cell_vector = sp.block_diag(
(self.aveEx2CC, self.aveEy2CC), format="csr"
)
elif self.dim == 3:
self._average_edge_to_cell_vector = sp.block_diag(
(self.aveEx2CC, self.aveEy2CC, self.aveEz2CC), format="csr"
)
return self._average_edge_to_cell_vector
@property
def average_edge_x_to_cell(self):
"""Averaging operator from x-edges to cell centers (scalar quantities).
This property constructs a 2nd order averaging operator that maps scalar
quantities from x-edges to cell centers. This averaging operator is
used when a discrete scalar quantity defined on x-edges must be
projected to cell centers. Once constructed, the operator is stored
permanently as a property of the mesh. *See notes*.
Returns
-------
(n_cells, n_edges_x) scipy.sparse.csr_matrix
The scalar averaging operator from x-edges to cell centers
Notes
-----
Let :math:`\\boldsymbol{\\phi_x}` be a discrete scalar quantity that
lives on x-edges. **average_edge_x_to_cell** constructs a discrete
linear operator :math:`\\mathbf{A_{xc}}` that projects
:math:`\\boldsymbol{\\phi_x}` to cell centers, i.e.:
.. math::
\\boldsymbol{\\phi_c} = \\mathbf{A_{xc}} \\, \\boldsymbol{\\phi_x}
where :math:`\\boldsymbol{\\phi_c}` approximates the value of the scalar
quantity at cell centers. For each cell, we are simply averaging
the values defined on its x-edges. The operation is implemented as a
matrix vector product, i.e.::
phi_c = Axc @ phi_x
Examples
--------
Here we compute the values of a scalar function on the x-edges. We then create
an averaging operator to approximate the function at cell centers. We choose
to define a scalar function that is strongly discontinuous in some places to
demonstrate how the averaging operator will smooth out discontinuities.
We start by importing the necessary packages and defining a mesh.
>>> from discretize import TensorMesh
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> h = np.ones(40)
>>> mesh = TensorMesh([h, h], x0="CC")
Then we create a scalar variable on x-edges,
>>> phi_x = np.zeros(mesh.nEx)
>>> xy = mesh.edges_x
>>> phi_x[(xy[:, 1] > 0)] = 25.0
>>> phi_x[(xy[:, 1] < -10.0) & (xy[:, 0] > -10.0) & (xy[:, 0] < 10.0)] = 50.0
Next, we construct the averaging operator and apply it to
the discrete scalar quantity to approximate the value at cell centers.
>>> Axc = mesh.average_edge_x_to_cell
>>> phi_c = Axc @ phi_x
And plot the results,
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(11, 5))
>>> ax1 = fig.add_subplot(121)
>>> v = np.r_[phi_x, np.zeros(mesh.nEy)] # create vector for plotting function
>>> mesh.plot_image(v, ax=ax1, v_type="Ex")
>>> ax1.set_title("Variable at x-edges", fontsize=16)
>>> ax2 = fig.add_subplot(122)
>>> mesh.plot_image(phi_c, ax=ax2, v_type="CC")
>>> ax2.set_title("Averaged to cell centers", fontsize=16)
>>> plt.show()
Below, we show a spy plot illustrating the sparsity and mapping
of the operator
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(9, 9))
>>> ax1 = fig.add_subplot(111)
>>> ax1.spy(Axc, ms=1)
>>> ax1.set_title("X-Edge Index", fontsize=12, pad=5)
>>> ax1.set_ylabel("Cell Index", fontsize=12)
>>> plt.show()
"""
if getattr(self, "_average_edge_x_to_cell", None) is None:
# The number of cell centers in each direction
n = self.vnC
if self.dim == 1:
self._average_edge_x_to_cell = speye(n[0])
elif self.dim == 2:
self._average_edge_x_to_cell = sp.kron(av(n[1]), speye(n[0]))
elif self.dim == 3:
self._average_edge_x_to_cell = kron3(av(n[2]), av(n[1]), speye(n[0]))
return self._average_edge_x_to_cell
@property
def average_edge_y_to_cell(self):
"""Averaging operator from y-edges to cell centers (scalar quantities).
This property constructs a 2nd order averaging operator that maps scalar
quantities from y-edges to cell centers. This averaging operator is
used when a discrete scalar quantity defined on y-edges must be
projected to cell centers. Once constructed, the operator is stored
permanently as a property of the mesh. *See notes*.
Returns
-------
(n_cells, n_edges_y) scipy.sparse.csr_matrix
The scalar averaging operator from y-edges to cell centers
Notes
-----
Let :math:`\\boldsymbol{\\phi_y}` be a discrete scalar quantity that
lives on y-edges. **average_edge_y_to_cell** constructs a discrete
linear operator :math:`\\mathbf{A_{yc}}` that projects
:math:`\\boldsymbol{\\phi_y}` to cell centers, i.e.:
.. math::
\\boldsymbol{\\phi_c} = \\mathbf{A_{yc}} \\, \\boldsymbol{\\phi_y}
where :math:`\\boldsymbol{\\phi_c}` approximates the value of the scalar
quantity at cell centers. For each cell, we are simply averaging
the values defined on its y-edges. The operation is implemented as a
matrix vector product, i.e.::
phi_c = Ayc @ phi_y
Examples
--------
Here we compute the values of a scalar function on the y-edges. We then create
an averaging operator to approximate the function at cell centers. We choose
to define a scalar function that is strongly discontinuous in some places to
demonstrate how the averaging operator will smooth out discontinuities.
We start by importing the necessary packages and defining a mesh.
>>> from discretize import TensorMesh
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> h = np.ones(40)
>>> mesh = TensorMesh([h, h], x0="CC")
Then we create a scalar variable on y-edges,
>>> phi_y = np.zeros(mesh.nEy)
>>> xy = mesh.edges_y
>>> phi_y[(xy[:, 1] > 0)] = 25.0
>>> phi_y[(xy[:, 1] < -10.0) & (xy[:, 0] > -10.0) & (xy[:, 0] < 10.0)] = 50.0
Next, we construct the averaging operator and apply it to
the discrete scalar quantity to approximate the value at cell centers.
>>> Ayc = mesh.average_edge_y_to_cell
>>> phi_c = Ayc @ phi_y
And plot the results,
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(11, 5))
>>> ax1 = fig.add_subplot(121)
>>> v = np.r_[np.zeros(mesh.nEx), phi_y] # create vector for plotting function
>>> mesh.plot_image(v, ax=ax1, v_type="Ey")
>>> ax1.set_title("Variable at y-edges", fontsize=16)
>>> ax2 = fig.add_subplot(122)
>>> mesh.plot_image(phi_c, ax=ax2, v_type="CC")
>>> ax2.set_title("Averaged to cell centers", fontsize=16)
>>> plt.show()
Below, we show a spy plot illustrating the sparsity and mapping
of the operator
.. collapse:: Expand to show scripting for plot
>>> fig = plt.figure(figsize=(9, 9))
>>> ax1 = fig.add_subplot(111)
>>> ax1.spy(Ayc, ms=1)
>>> ax1.set_title("Y-Edge Index", fontsize=12, pad=5)
>>> ax1.set_ylabel("Cell Index", fontsize=12)
>>> plt.show()
"""
if self.dim < 2:
return None
if getattr(self, "_average_edge_y_to_cell", None) is None:
# The number of cell centers in each direction
n = self.vnC
if self.dim == 2:
self._average_edge_y_to_cell = sp.kron(speye(n[1]), av(n[0]))
elif self.dim == 3:
self._average_edge_y_to_cell = kron3(av(n[2]), speye(n[1]), av(n[0]))
return self._average_edge_y_to_cell
@property
def average_edge_z_to_cell(self):
"""Averaging operator from z-edges to cell centers (scalar quantities).
This property constructs a 2nd order averaging operator that maps scalar
quantities from z-edges to cell centers. This averaging operator is
used when a discrete scalar quantity defined on z-edges must be
projected to cell centers. Once constructed, the operator is stored
permanently as a property of the mesh. *See notes*.
Returns
-------
(n_cells, n_edges_z) scipy.sparse.csr_matrix
The scalar averaging operator from z-edges to cell centers
Notes
-----
Let :math:`\\boldsymbol{\\phi_z}` be a discrete scalar quantity that
lives on z-edges. **average_edge_z_to_cell** constructs a discrete
linear operator :math:`\\mathbf{A_{zc}}` that projects
:math:`\\boldsymbol{\\phi_z}` to cell centers, i.e.:
.. math::
\\boldsymbol{\\phi_c} = \\mathbf{A_{zc}} \\, \\boldsymbol{\\phi_z}
where :math:`\\boldsymbol{\\phi_c}` approximates the value of the scalar
quantity at cell | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas
from pandas.compat import string_types
from pandas.core.dtypes.cast import find_common_type
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
)
from pandas.core.index import ensure_index
from pandas.core.base import DataError
from modin.engines.base.frame.partition_manager import BaseFrameManager
from modin.error_message import ErrorMessage
from modin.backends.base.query_compiler import BaseQueryCompiler
class PandasQueryCompiler(BaseQueryCompiler):
"""This class implements the logic necessary for operating on partitions
with a Pandas backend. This logic is specific to Pandas."""
def __init__(
self, block_partitions_object, index, columns, dtypes=None, is_transposed=False
):
assert isinstance(block_partitions_object, BaseFrameManager)
self.data = block_partitions_object
self.index = index
self.columns = columns
if dtypes is not None:
self._dtype_cache = dtypes
self._is_transposed = int(is_transposed)
# Index, columns and dtypes objects
_dtype_cache = None
def _get_dtype(self):
if self._dtype_cache is None:
def dtype_builder(df):
return df.apply(lambda row: find_common_type(row.values), axis=0)
map_func = self._prepare_method(
self._build_mapreduce_func(lambda df: df.dtypes)
)
reduce_func = self._build_mapreduce_func(dtype_builder)
# For now we will use a pandas Series for the dtypes.
if len(self.columns) > 0:
self._dtype_cache = (
self._full_reduce(0, map_func, reduce_func).to_pandas().iloc[0]
)
else:
self._dtype_cache = pandas.Series([])
# reset name to None because we use "__reduced__" internally
self._dtype_cache.name = None
return self._dtype_cache
dtypes = property(_get_dtype)
def compute_index(self, axis, data_object, compute_diff=True):
"""Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the index from self
rather than data_object. This is used when the dimension of the
index may have changed, but the deleted rows/columns are
unknown.
Returns:
A new pandas.Index object.
"""
def pandas_index_extraction(df, axis):
if not axis:
return df.index
else:
try:
return df.columns
except AttributeError:
return pandas.Index([])
index_obj = self.index if not axis else self.columns
old_blocks = self.data if compute_diff else None
new_indices = data_object.get_indices(
axis=axis,
index_func=lambda df: pandas_index_extraction(df, axis),
old_blocks=old_blocks,
)
return index_obj[new_indices] if compute_diff else new_indices
def _validate_set_axis(self, new_labels, old_labels):
new_labels = ensure_index(new_labels)
old_len = len(old_labels)
new_len = len(new_labels)
if old_len != new_len:
raise ValueError(
"Length mismatch: Expected axis has %d elements, "
"new values have %d elements" % (old_len, new_len)
)
return new_labels
_index_cache = None
_columns_cache = None
def _get_index(self):
return self._index_cache
def _get_columns(self):
return self._columns_cache
def _set_index(self, new_index):
if self._index_cache is None:
self._index_cache = ensure_index(new_index)
else:
new_index = self._validate_set_axis(new_index, self._index_cache)
self._index_cache = new_index
def _set_columns(self, new_columns):
if self._columns_cache is None:
self._columns_cache = ensure_index(new_columns)
else:
new_columns = self._validate_set_axis(new_columns, self._columns_cache)
self._columns_cache = new_columns
columns = property(_get_columns, _set_columns)
index = property(_get_index, _set_index)
# END Index, columns, and dtypes objects
# Internal methods
# These methods are for building the correct answer in a modular way.
# Please be careful when changing these!
def _prepare_method(self, pandas_func, **kwargs):
"""Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
"""
if self._is_transposed:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(
df.T, internal_indices=internal_indices, **kwargs
)
return pandas_func(df.T, **kwargs)
else:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(df, internal_indices=internal_indices, **kwargs)
return pandas_func(df, **kwargs)
return helper
def numeric_columns(self, include_bool=True):
"""Returns the numeric columns of the Manager.
Returns:
List of index names.
"""
columns = []
for col, dtype in zip(self.columns, self.dtypes):
if is_numeric_dtype(dtype) and (
include_bool or (not include_bool and dtype != np.bool_)
):
columns.append(col)
return columns
def numeric_function_clean_dataframe(self, axis):
"""Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager.
"""
result = None
query_compiler = self
# If no numeric columns and over columns, then return empty Series
if not axis and len(self.index) == 0:
result = pandas.Series(dtype=np.int64)
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
if len(nonnumeric) == len(self.columns):
# If over rows and no numeric columns, return this
if axis:
result = pandas.Series([np.nan for _ in self.index])
else:
result = pandas.Series([0 for _ in self.index])
else:
query_compiler = self.drop(columns=nonnumeric)
return result, query_compiler
# END Internal methods
# Metadata modification methods
def add_prefix(self, prefix, axis=1):
if axis == 1:
new_columns = self.columns.map(lambda x: str(prefix) + str(x))
if self._dtype_cache is not None:
new_dtype_cache = self._dtype_cache.copy()
new_dtype_cache.index = new_columns
else:
new_dtype_cache = None
new_index = self.index
else:
new_index = self.index.map(lambda x: str(prefix) + str(x))
new_columns = self.columns
new_dtype_cache = self._dtype_cache
return self.__constructor__(
self.data, new_index, new_columns, new_dtype_cache, self._is_transposed
)
def add_suffix(self, suffix, axis=1):
if axis == 1:
new_columns = self.columns.map(lambda x: str(x) + str(suffix))
if self._dtype_cache is not None:
new_dtype_cache = self._dtype_cache.copy()
new_dtype_cache.index = new_columns
else:
new_dtype_cache = None
new_index = self.index
else:
new_index = self.index.map(lambda x: str(x) + str(suffix))
new_columns = self.columns
new_dtype_cache = self._dtype_cache
return self.__constructor__(
self.data, new_index, new_columns, new_dtype_cache, self._is_transposed
)
# END Metadata modification methods
# Copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return self.__constructor__(
self.data.copy(),
self.index.copy(),
self.columns.copy(),
self._dtype_cache,
self._is_transposed,
)
# END Copy
# Append/Concat/Join (Not Merge)
# The append/concat/join operations should ideally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# appending the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a copy of the
# DataFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexing
def _join_index_objects(self, axis, other_index, how, sort=True):
"""Joins a pair of index objects (columns or rows) by a given strategy.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other_index: The other_index to join on.
how: The type of join to join to make (e.g. right, left).
Returns:
Joined indices.
"""
if isinstance(other_index, list):
joined_obj = self.columns if not axis else self.index
# TODO: revisit for performance
for obj in other_index:
joined_obj = joined_obj.join(obj, how=how)
return joined_obj
if not axis:
return self.columns.join(other_index, how=how, sort=sort)
else:
return self.index.join(other_index, how=how, sort=sort)
def join(self, other, **kwargs):
"""Joins a list or two objects together.
Args:
other: The other object(s) to join on.
Returns:
Joined objects.
"""
if not isinstance(other, list):
other = [other]
return self._join_list_of_managers(other, **kwargs)
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
return self._append_list_of_managers(other, axis, **kwargs)
def _append_list_of_managers(self, others, axis, **kwargs):
if not isinstance(others, list):
others = [others]
if self._is_transposed:
# If others are transposed, we handle that behavior correctly in
# `copartition`, but it is not handled correctly in the case that `self` is
# transposed.
return (
self.transpose()
._append_list_of_managers(
[o.transpose() for o in others], axis ^ 1, **kwargs
)
.transpose()
)
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
sort = kwargs.get("sort", None)
join = kwargs.get("join", "outer")
ignore_index = kwargs.get("ignore_index", False)
new_self, to_append, joined_axis = self.copartition(
axis ^ 1,
others,
join,
sort,
force_repartition=any(obj._is_transposed for obj in [self] + others),
)
new_data = new_self.concat(axis, to_append)
if axis == 0:
# The indices will be appended to form the final index.
# If `ignore_index` is true, we create a RangeIndex that is the
# length of all of the index objects combined. This is the same
# behavior as pandas.
new_index = (
self.index.append([other.index for other in others])
if not ignore_index
else pandas.RangeIndex(
len(self.index) + sum(len(other.index) for other in others)
)
)
return self.__constructor__(new_data, new_index, joined_axis)
else:
# The columns will | |
<gh_stars>1-10
# Copyright (C) 2021 University of Glasgow
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import csv
import email.header
import email.utils
import os
import re
import string
import sys
import pprint
import json
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from dataclasses import dataclass, field
from pathlib import Path
from ietfdata.datatracker import *
from ietfdata.datatracker_ext import *
from ietfdata.mailarchive import *
from ietfdata.mailhelper_headerdata import *
from ietfdata.mailhelper_datatracker import *
dt = DataTrackerExt()
# archive.download_all_messages()
# =================================================================================================================================
# Find and categorise the mailing lists:
lists = {}
for ml in dt.mailing_lists():
lists[ml.name.lower()] = {"name": ml.name.lower(), "category" : "other", "active": False}
print(f"Found {len(lists)} mailing lists")
print("")
print(f"Categorising meeting lists:")
next_meeting = dt.next_ietf_meeting()
for meeting in dt.meetings(meeting_type = dt.meeting_type_from_slug("ietf")):
if next_meeting is not None and next_meeting.number == meeting.number:
active = True
else:
active = False
for list_name in [
F"ietf-{meeting.number}",
F"ietf{meeting.number}bnb",
F"ietf{meeting.number}-1st-timers",
F"ietf{meeting.number}-bitsnbites",
F"ietf{meeting.number}-sponsor-info",
F"ietf{meeting.number}-team",
F"ietf{meeting.number}-ieee",
F"ietf{meeting.number}planning",
F"{meeting.number}_attendees",
F"{meeting.number}_all",
F"{meeting.number}-1st-timers",
F"{meeting.number}all",
F"{meeting.number}attendees",
F"{meeting.number}companion",
F"{meeting.number}companions",
F"{meeting.number}guestpass",
F"{meeting.number}hackathon",
F"{meeting.number}onsite",
F"{meeting.number}newcomers",
F"{meeting.number}reg",
F"{meeting.number}remote",
F"{meeting.number}remote-all",
F"{meeting.number}-mentees",
F"{meeting.number}-mentors",
F"{meeting.number}-newcomers",
]:
if list_name in lists:
lists[list_name]["category"] = "ietf-admin"
lists[list_name]["active"] = active
print(f" {list_name:25} -> {lists[list_name]['category']:14} {active:1} IETF {meeting.number})")
print("")
print(f"Categorising IAB lists:")
for group in dt.groups(parent = dt.group_from_acronym("iab")):
if group.list_archive.startswith("https://mailarchive.ietf.org/arch/browse/"):
list_name = group.list_archive[41:-1]
elif group.list_archive.startswith("https://mailarchive.ietf.org/arch/search/?email_list="):
list_name = group.list_archive[53:]
elif group.list_email.count("@") == 1:
list_name, domain = group.list_email.split("@")
else:
list_name = group.acronym
group_state = dt.group_state(group.state).slug
if group_state == "active" or group_state == "bof" or group_state == "proposed":
active = True
else:
active = False
for ln in [list_name, F"{list_name}-discuss"]:
if ln in lists:
lists[ln]["category"] = "iab"
lists[ln]["active"] = active
print(f" {ln:25} -> {lists[ln]['category']:14} {active:1} {group.name}")
print("")
print(f"Categorising IRTF lists:")
for group in dt.groups(parent = dt.group_from_acronym("irtf")):
if group.list_archive.startswith ("http://www.irtf.org/mail-archive/web/"):
list_name = group.list_archive[37:-1]
elif group.list_archive.startswith("https://irtf.org/mail-archive/web/"):
list_name = group.list_archive[34:-1]
elif group.list_archive.startswith("https://mailarchive.ietf.org/arch/browse/"):
list_name = group.list_archive[41:-1]
elif group.list_archive.startswith("https://mailarchive.ietf.org/arch/search/?email_list="):
list_name = group.list_archive[53:]
elif group.list_email.count("@") == 1:
list_name, domain = group.list_email.split("@")
else:
list_name = group.acronym
group_state = dt.group_state(group.state).slug
if group_state == "active" or group_state == "bof" or group_state == "proposed":
active = True
else:
active = False
if list_name in lists:
lists[list_name]["category"] = "irtf"
lists[list_name]["active"] = active
print(f" {list_name:25} -> {lists[list_name]['category']:14} {active:1} {group.name}")
print("")
print(f"Categorising IETF area, working group, BoF, and directorate lists:")
for area in dt.groups(parent = dt.group_from_acronym("iesg")):
print(F" {area.name} ({area.acronym})")
for ln in [F"{area.acronym}-area", F"{area.acronym}-discuss"]:
if ln in lists:
lists[ln]["category"] = "ietf-technical"
lists[ln]["active"] = active
print(f" {ln:25} -> {lists[ln]['category']:14} {active:1} {area.name}")
for group in dt.groups(parent = area):
if group.list_archive.startswith("https://mailarchive.ietf.org/arch/browse/"):
list_name = group.list_archive[41:-1]
elif group.list_archive.startswith("https://mailarchive.ietf.org/arch/search/?email_list="):
list_name = group.list_archive[53:]
elif group.list_email.count("@") == 1:
list_name, domain = group.list_email.split("@")
else:
list_name = group.acronym
group_state = dt.group_state(group.state).slug
if group_state == "active" or group_state == "bof" or group_state == "proposed":
active = True
else:
active = False
if list_name in lists:
if area.acronym == "gen" or area.acronym == "usv":
lists[list_name]["category"] = "ietf-admin"
lists[list_name]["active"] = active
else:
lists[list_name]["category"] = "ietf-technical"
lists[list_name]["active"] = active
print(f" {list_name:25} -> {lists[list_name]['category']:14} {active:1} {group.name}")
for ml in lists:
if ml.startswith(F"{list_name}-") and lists[ml]["category"] == "other":
lists[ml]["category"] = lists[list_name]["category"]
lists[ml]["active"] = lists[list_name]["active"]
print(f" {ml:25} -> {lists[ml]['category']:14} {active:1} {group.name} (inferred)")
print("")
print(f"Categorising IETF administrative lists:")
for ml in lists:
if ml.startswith("llc") or ml.startswith("iaoc") or ml.startswith("isoc") or ml.startswith("rsoc") \
or ml.startswith("icann") or ml.startswith("interim-board") or ml.startswith("ipr-") \
or ml.startswith("iesg") or ml.startswith("iab") or ml.startswith("iana"):
categorise = True
category = "ietf-admin"
elif ml.startswith("anrw") or ml.startswith("anrp"):
categorise = True
category = "irtf"
elif ml.endswith("-vote") or ml.endswith("-coord"):
categorise = True
category = "ietf-admin"
elif "workshop" in ml or "program" in ml:
categorise = True
category = "iab"
elif "nomcom" in ml or "-llc-" in ml or "venue" in ml or "datatravker" in ml or "3gpp" in ml or "sponsorship" in ml:
categorise = True
category = "ietf-admin"
else:
categorise = False
if categorise and lists[ml]["category"] == "other":
lists[ml]["category"] = category
lists[ml]["active"] = True
print(f" {ml:25} -> {lists[ml]['category']:14} {active:1}")
print("")
print(f"Categorising miscellaneous lists:")
for list_name, category, active in [
("admin-discuss", "ietf-admin", True),
("ietf-announce", "ietf-admin", True),
("bofchairs", "ietf-admin", True),
("recentattendees", "ietf-admin", True),
("training-wgchairs", "ietf-admin", True),
("wgchairs", "ietf-admin", True),
("irtf-announce", "irtf", True),
("irtf-discuss", "irtf", True),
("model-t", "iab", True),
("rfc-markdown", "ietf-admin", True),
]:
lists[list_name]["category"] = category
lists[list_name]["active"] = active
print(f" {list_name:25} -> {lists[list_name]['category']:14} {active:1}")
print("")
for category in ["ietf-admin", "ietf-technical", "irtf", "iab", "other"]:
for active in [True, False]:
if active:
active_label = "(active)"
else:
active_label = "(inactive)"
print(f"Mailing lists in category {category} {active_label}:")
print(f" ", end="")
plen = 3
for ml in lists:
if lists[ml]["category"] == category and lists[ml]["active"] == active:
plen += len(ml) + 1
if plen > 80:
print(f"")
print(f" ", end="")
plen = 3 + len(ml) + 1
print(f"{ml} ", end="")
print("")
print("")
# =================================================================================================================================
print("")
print("Analysing Mailing Lists:")
archive = MailArchive(cache_dir=Path("cache"))
addrs = {}
people = {}
person_for_addr = {}
addr_no_person = {}
lists_for_email = {}
lists_for_person = {}
categories_for_email = {}
categories_for_person = {}
total_spam = 0
total_mail = 0
index = 1
for ml_name in sorted(lists):
print(F"{index:5d} /{len(lists):5d} {ml_name:40}", end="")
index += 1
for msg_id, msg in archive.mailing_list(ml_name).messages():
date_str = msg.message["Date"]
try:
date = email.utils.parsedate_to_datetime(date_str)
year = date.timetuple().tm_year
except:
print(f"[{msg_id:06}] can't parse date {date_str}")
year = 0
if year == 2020:
total_mail += 1
spam = msg.message["X-Spam-Flag"]
if spam is not None and spam == "YES":
total_spam += 1
else:
#try:
name, addr = email.utils.parseaddr(msg.message["from"])
if addr != "":
# Record email address:
if addr not in addrs:
addrs[addr] = addr
# Record mailing lists for email address:
if addr not in lists_for_email:
lists_for_email[addr] = [ml_name]
else:
if not ml_name in lists_for_email[addr]:
lists_for_email[addr].append(ml_name)
# Record categories for email address:
if addr not in categories_for_email:
categories_for_email[addr] = [lists[ml_name]["category"]]
else:
if not lists[ml_name]["category"] in categories_for_email[addr]:
categories_for_email[addr].append(lists[ml_name]["category"])
# Record person:
if F"{name}+{addr}" not in person_for_addr:
p = dt.person_from_name_email(name, addr)
person_for_addr[F"{name}+{addr}"] = p
p = person_for_addr[F"{name}+{addr}"]
if p is not None:
people[p.id] = p
# Record Mailing lists for person:
if p.id not in lists_for_person:
lists_for_person[p.id] = [ml_name]
else:
if not ml_name in lists_for_person[p.id]:
lists_for_person[p.id].append(ml_name)
# Record categories for person:
if p.id not in categories_for_person:
categories_for_person[p.id] = [lists[ml_name]["category"]]
else:
if not lists[ml_name]["category"] in categories_for_person[p.id]:
categories_for_person[p.id].append(lists[ml_name]["category"])
else:
addr_no_person[addr] = addr
#except:
# print(f"[{msg_id:06}] can't parse From: {msg.message['From']}")
print(F" {len(addrs):6} unique emails; {len(addr_no_person)} unmapped; {total_mail} messages; {total_spam} spam")
# =================================================================================================================================
# Save Results:
with open(Path("2020-emails.txt"), "w") as outf:
for e in addrs.values():
print(e, file=outf)
with open(Path("2020-emails-no-person.txt"), "w") as outf:
for e in addr_no_person.values():
print(e, file=outf)
with open(Path("2020-people.txt"), "w") as outf:
for p in people.values():
print(F"{p.id:8} {p.name}", file=outf)
with open(Path("2020-lists-for-email.txt"), "w") as outf:
print(json.dumps(lists_for_email, sort_keys=True, indent=2), file=outf)
with open(Path("2020-lists-for-person.txt"), "w") as outf:
print(json.dumps(lists_for_person, sort_keys=True, indent=2), file=outf)
with open(Path("2020-categories-for-email.txt"), "w") as outf:
print(json.dumps(categories_for_email, sort_keys=True, indent=2), file=outf)
with open(Path("2020-categories-for-person.txt"), "w") as outf:
print(json.dumps(categories_for_person, sort_keys=True, indent=2), file=outf)
for category in ["ietf-admin", "ietf-technical", "irtf", "iab", "other"]:
print("")
print(f"Finding people only in category {category}:")
with open(Path(F"2020-people-only-in-category-{category}.txt"), "w") as outf:
for p in people.values():
if categories_for_person[p.id] == [category]:
print(f" {p.name}")
print(f"{p.id:8} | {p.name:36} | ", end="", file=outf)
for l in lists_for_person[p.id]:
print(f"{l} ", end="", file=outf)
print("", file=outf)
print("")
print(f"Finding drafts for people only in category {category}")
with open(Path(F"2020-drafts-for-people-only-in-category-{category}.txt"), "w") as outf:
for p in people.values():
if categories_for_person[p.id] == [category]:
print(f" {p.name}")
print(f"{p.id:8} | {p.name:36} | ", end="", file=outf)
drafts = {}
for da in dt.documents_authored_by_person(p):
d = dt.document(da.document)
if dt.document_type(d.type) == dt.document_type_from_slug("draft"):
print(f" {d.name}")
for submit_uri in d.submissions:
submission = dt.submission(submit_uri)
if submission is | |
the 2.5th, 50th and 97.5th percentiles of parameter distributions are extracted
# params_cred then contains the tuple (median (50th), lower bound (2.5th), upper bound (97.5th))
# to provide a 95%-credible interval
params_cred = tuple(map(lambda v: (v[1], v[0], v[2]),
zip(*np.percentile(samples, [2.5, 50, 97.5], axis=0))))
return params_cred
@staticmethod
def get_model_evidence(sampler_result):
"""Obtain logarithmic evidence value and its error estimate from the
nested sampling result.
`Note`: After running a memocell estimation there is no need to run this
method, one can simply access the logarithmic model evidence and its error
with `est.bay_est_log_evidence` and `est.bay_est_log_evidence_error` for the
estimation instance `est`.
Parameters
----------
sampler_result : dynesty.results.Results
Nested sampling result of a memocell estimation. Typically available at
`est.bay_nested_sampler_res`.
Returns
-------
log_evid_dynesty : float
Logarithmic evidence of the estimated model. Typically available at
`est.bay_est_log_evidence`.
log_evid_err_dynesty : float
Error of the logarithmic evidence of the estimated model. Typically
available at `est.bay_est_log_evidence_error`.
Examples
--------
>>> # est is a memocell estimation instance obtained by est.estimate(...)
>>> est.get_model_evidence(est.bay_nested_sampler_res)
(28.139812540432732, 0.11225503808864087)
>>> est.bay_est_log_evidence
28.139812540432732
>>> est.bay_est_log_evidence_error
0.11225503808864087
"""
# value of log evidence (logZ) (last entry of nested sampling results)
log_evid_dynesty = sampler_result.logz[-1]
# estimate of the statistical uncertainty on logZ
log_evid_err_dynesty = sampler_result.logzerr[-1]
return log_evid_dynesty, log_evid_err_dynesty
@staticmethod
def get_maximal_log_likelihood(sampler_result):
"""Obtain the maximal logarithmic likelihood value from the nested
sampling result.
`Note`: After running a memocell estimation there is no need to run this
method, one can simply access the maximal log-likelihood
with `est.bay_est_log_likelihood_max` for the estimation instance `est`.
Parameters
----------
sampler_result : dynesty.results.Results
Nested sampling result of a memocell estimation. Typically available at
`est.bay_nested_sampler_res`.
Returns
-------
logl_max : float
Maximal logarithmic likelihood value of the estimated model.
Typically available at `est.bay_est_log_likelihood_max`.
Examples
--------
>>> # est is a memocell estimation instance obtained by est.estimate(...)
>>> est.get_maximal_log_likelihood(est.bay_nested_sampler_res)
35.48531419345989
>>> est.bay_est_log_likelihood_max
35.48531419345989
"""
# get the value of the maximal log likelihood as last entry of nested sampling results
return sampler_result.logl[-1]
@staticmethod
def compute_bayesian_information_criterion(num_data, num_params, log_likelihood_max):
"""Compute the Bayesian information criterion (BIC). Calculation is based
on :math:`\\mathrm{BIC} = k \\cdot \\mathrm{ln}(n) - 2 \\, \\mathrm{ln}(L_{max})`
where :math:`k` is the number of parameters (`num_params`), :math:`n` is the number of
data points (`num_data`) and :math:`\\mathrm{ln}(L_{max})` is the maximal
log-likelihood value (`log_likelihood_max`).
`Note`: After running a memocell estimation there is no need to run this
method, one can simply access the BIC with `est.bay_est_bayesian_information_criterion`
for the estimation instance `est`.
Parameters
----------
num_data : int or float
Number of data points. Typically available at `est.data_num_values`.
num_params : int or float
Number of estimated parameters. Typically available at `est.bay_nested_ndims`.
log_likelihood_max : float
Maximal logarithmic likelihood value of the estimated model.
Typically available at `est.bay_est_log_likelihood_max`.
Returns
-------
bic : float
Bayesian information criterion of the estimated model.
Typically available at `est.bay_est_bayesian_information_criterion`.
Examples
--------
>>> # est is a memocell estimation instance obtained by est.estimate(...)
>>> est.compute_bayesian_information_criterion(
>>> est.data_num_values,
>>> est.bay_nested_ndims,
>>> est.bay_est_log_likelihood_max)
-65.55452798471536
>>> est.bay_est_bayesian_information_criterion
-65.55452798471536
>>> est.compute_bayesian_information_criterion(15, 2, -35.49)
-65.56389959779558
"""
# the BIC (bayesian_information_criterion) is defined as
# BIC = ln(n) k - 2 ln(Lmax)
# with n being the number of data points, k the number of estimated
# parameters, Lmax the maximal likelihood and ln() the natural logarithm
return np.log(num_data) * num_params - 2.0 * log_likelihood_max
@staticmethod
def compute_log_evidence_from_bic(bic):
"""Under certain assumptions one can approximate the logarithmic evidence
value with :math:`\\mathrm{ln}(p(D | M)) \\approx -\\frac{1}{2} \\mathrm{BIC}` where
:math:`M` is the model, :math:`D` is the data and :math:`\\mathrm{BIC}` is
the Bayesian information criterion, see
`BIC (wiki) <https://en.wikipedia.org/wiki/Bayesian_information_criterion>`_.
`Note`: This calculation is more a consistency check and can be accessed
with `est.bay_est_log_evidence_from_bic` after a memocell estimation for `est`.
The more accurate value of the logarithmic evidence from the nested sampling
should be preferred for serious tasks (at `est.bay_est_log_evidence`).
Parameters
----------
bic : float
Bayesian information criterion of the estimated model.
Typically available at `est.bay_est_bayesian_information_criterion`.
Returns
-------
log_evidence_from_bic : float
Logarithmic evidence of the estimated model, approximated from the BIC.
Typically available at `est.log_evidence_from_bic`.
Examples
--------
>>> # est is a memocell estimation instance obtained by est.estimate(...)
>>> est.compute_log_evidence_from_bic(est.bay_est_bayesian_information_criterion)
32.77726399235768
>>> est.bay_est_log_evidence_from_bic
32.77726399235768
>>> # compare with the more accurate log evid from nested sampling
>>> est.bay_est_log_evidence
28.139812540432732
"""
# under certain assumptions the log evidence might be approximated from
# the BIC (bayesian_information_criterion) via evidence ≈ exp(-BIC / 2)
return - 0.5 * bic
def prior_transform(self, theta_unit):
"""Transform parameter values :math:`\\theta` from the unit hypercube form
(as used in the nested sampling) to the original prior space.
For uniform parameter priors (as generally used) this transformation is
achieved with the respective lower and upper parameter bounds
:math:`[b_l, b_u]` as
:math:`\\theta_{\\mathrm{orig}} = \\theta_{\\mathrm{unit}} (b_u - b_l) + b_l`.
Parameter bounds can be accessed with `est.net_theta_bounds`.
Parameters
----------
theta_unit : 1d numpy.ndarray
Values for parameters :math:`\\theta` in unit hypercube space.
Returns
-------
theta_orig : 1d numpy.ndarray
Values for parameters :math:`\\theta` in original prior space.
Examples
--------
>>> # est is a memocell estimation instance obtained by est.estimate(...)
>>> est.net_theta_bounds
array([[0. , 0.15],
[0. , 0.15]])
>>> theta_unit = np.array([0.2, 0.5])
>>> est.prior_transform(theta_unit)
array([0.03 , 0.075])
"""
# we receive theta here in the unit hypercube form
# and have to transform it back into the true parametrisation
# since we use uniform priors we have to do in principle:
# theta_true = theta_unit * (upper_bound-lower_bound) + lower_bound
# if the lower_bound is zero, we would simply have:
# theta_true = theta_unit * upper_bound
return theta_unit * (self.net_theta_bounds[:, 1] - self.net_theta_bounds[:, 0]) + self.net_theta_bounds[:, 0]
# def log_prior(self, theta):
# """docstring for ."""
# # st = time.time()
#
# # log_prior is based on a uniform prior distribution with finite support
# # on_support is a boolean; True if all parameters/theta's are on the support (prior > 0) else False (prior = 0)
# on_support = np.all(( self.net_theta_bounds[:, 0] <= theta ) & ( theta <= self.net_theta_bounds[:, 1] ))
#
# # log_prior returns its log value > -infinity (if on_support) or -infinity (if not on_support)
# if on_support:
# # et = time.time()
# # print('log_prior (ms)', (et - st)*1000)
# return self.bay_log_prior_supp
# else:
# # et = time.time()
# # print('log_prior (ms)', (et - st)*1000)
# return -np.inf
def log_likelihood(self, theta_values, moment_initial_values,
time_values, time_ind,
mean_data, var_data, cov_data):
"""Compute the logarithmic likelihood :math:`\\mathrm{ln}(\\mathcal{L(\\theta)}) =
\\mathrm{ln}(p(D | \\theta, M))` for parameter values :math:`\\theta` of a given
model :math:`M` and given data :math:`D`. This method is used in the nested
sampling.
The computation is based on the following formula. Under the assumption
of :math:`r` independent and normally distributed errors, the likelihood
function is given by :math:`\\mathcal{L(\\theta)} = p(D | \\theta, M) =
\\prod_{i=1}^{r} f_{\\mu_i, \\sigma_i}(x_i)`, where
- :math:`D = (x_1,\\,..., x_r)` are the data points,
- :math:`\\Sigma = (\\sigma_1,\\,..., \\sigma_r)` are the data standard errors,
- :math:`M_{\\theta} = (\\mu_1,\\,..., \\mu_r)` are the model evaluations and
- :math:`f_{\\mu, \\sigma}(x) = \\frac{1}{\\sqrt{2\\pi\\sigma^2}}\\,\\mathrm{exp}\\big(-\\frac{1}{2} \\big( \\frac{x-\\mu}{\\sigma} \\big)^2\\big)` is the normal density.
The log-likelihood is then given by :math:`\\mathrm{ln}(\\mathcal{L(\\theta)})
=-\\tfrac{1}{2} \\sum_{i=1}^{r} \\big( \\frac{x_i - \\mu_i}{\\sigma_i}
\\big)^2 \\,+\\, \\eta`, where :math:`\\eta` is the model-independent
normalisation term :math:`\\eta` computed as :math:`\\eta = -\\tfrac{1}{2}
\\sum_{i=1}^{r} \\mathrm{ln}(2 \\pi \\sigma_{i}^{2})`; also see at the
`compute_log_likelihood_norm` method.
Parameters
----------
theta_values : 1d numpy.ndarray
Values for parameters :math:`\\theta` in the model order (according to `net.net_theta_symbolic`
via `net.net_rates_identifier`); passed to a moment simulation method.
moment_initial_values : dict
Initial values for all moments of the hidden network layer;
passed to a moment simulation method. Typically available at
`est.net_simulation.sim_moments.moment_initial_values`; order of the
moments corresponds to
`est.net_simulation.sim_moments.moment_order_hidden`.
time_values : 1d numpy.ndarray
Time values for which model simulations are solved;
passed to a moment simulation method. After estimation
initialisation available at `est.net_time_values`.
time_ind : slice or tuple of int
Indexing information to read out model simulations at the time points
of the data to allow comparison. After estimation
initialisation available at `est.net_time_ind`.
mean_data : numpy.ndarray
Data mean statistics and standard errors with shape
(2, `number of | |
(tuple(succStatements), state[1])
results.append((whitespace + "Or Introduction: " + disjunction, succState, cost))
# We deliberately hamper this rule because it is not very interesting.
if len(sentence) == 1:
for symbol in self.symbols:
disjunction = sentence + " || " + symbol
if disjunction not in sentences:
addDisjunction(disjunction)
not_disjunction = sentence + " || ~" + symbol
if not_disjunction not in sentences:
addDisjunction(not_disjunction)
# Or Elimination
sentenceCopy = statement[0]
disjunct = [] # List of statements connected by "||"
or_index = sentenceCopy.find(" || ")
if or_index > -1:
while or_index > -1:
atom = sentenceCopy[:(or_index)]
# If the parentheses are balanced for the atom, we take it to properly be part of a disjunction
if parensBalanced(atom):
disjunct.append(atom)
sentenceCopy = sentenceCopy[(or_index + 4):]
or_index = sentenceCopy.find(" || ")
# Loop and a half
disjunct.append(sentenceCopy)
disjuncts.add(tuple(disjunct))
# Negation Introduction
sentence = statement[0]
isImplication, antecedent, consequent = processImplication(sentence)
if isImplication:
if consequent[0] == "~":
if antecedent not in phi_to_not_psi.keys():
phi_to_not_psi[antecedent] = list()
phi_to_not_psi[antecedent].append(consequent[1:])
else:
if antecedent not in phi_to_psi.keys():
phi_to_psi[antecedent] = list()
phi_to_psi[antecedent].append(consequent)
# Negation Elimination
if sentence[:2] == "~~":
newSentence = stripOuterParens(sentence[2:])
if newSentence not in sentences:
succStatements = list(state[0])
succStatements.append((newSentence, "NE", state[1]))
succState = (tuple(succStatements), state[1])
results.append((whitespace + "Negation Elimination: " + newSentence, succState, 1))
# Implication Elimination
# Reuses the implication processing from the NI step.
if isImplication:
# Loops through all statements and sees if antecedent appears enabling us to derive consequent
for statement2 in statements:
if statement2 == statement:
continue
sentence2 = statement2[0]
if sentence2 == antecedent and consequent not in sentences:
succStatements = list(state[0])
succStatements.append((consequent, "IE", state[1]))
succState = (tuple(succStatements), state[1])
results.append((whitespace + "Implication Elimination: " + consequent, succState, 1))
# Biconditional Elimination
def addBicondElimStatement(lhs, rhs, statements):
newImplication = lhs + " -> " + rhs
if newImplication not in sentences:
succStatements = list(state[0])
succStatements.append((newImplication, "BE", state[1]))
succState = (tuple(succStatements), state[1])
results.append((whitespace + "Biconditional Elimination: " + newImplication, succState, 1))
if "<->" in self.connectiveSet:
sentence = statement[0]
bicond_index = sentence.find(" <-> ")
while bicond_index > 0:
first = sentence[:(bicond_index)]
second = sentence[(bicond_index + 5):]
addBicondElimStatement(first, second, statements)
addBicondElimStatement(second, first, statements)
bicond_index = sentence[(bicond_index + 5):].find(" <-> ")
if "&&" in self.connectiveSet:
# Finishes adding all possible statements from And Introduction
for atom1 in atoms:
for atom2 in atoms:
conjunction = atom1.strip() + " && " + atom2.strip()
if conjunction not in sentences:
# We punish this rule of inference as well for being, quite frankly, not very interesting
cost = 10 if atom1 == atom2 else 3
succStatements = list(state[0])
succStatements.append((conjunction, "AI", state[1]))
succState = (tuple(succStatements), state[1])
results.append((whitespace + "And Introduction: " + conjunction, succState, cost))
# Finishes adding all possible statements from And Elimination
for conjunct in conjuncts:
if conjunct not in sentences:
succStatements = list(state[0])
succStatements.append((conjunct, "AE", state[1]))
succState = (tuple(succStatements), state[1])
results.append((whitespace + "And Elimination: " + conjunct, succState, 1))
if "||" in self.connectiveSet:
# Finishes adding all possible statement from Or Elimination
def addOrElimStatement(psi):
succStatements = list(state[0])
succStatements.append((psi, "OE", state[1]))
succState = (tuple(succStatements), state[1])
results.append((whitespace + "Or Elimination: " + psi, succState, 1))
# Iterates through atoms in each disjunct and derives all things implied by every disjuncted unit
for disjunction in list(disjuncts): # List of disjuncted atoms
isEntailed = True
# Populate a list with all examples of psi for some implication statement (phi -> psi)
psi_list = []
for psi_l in phi_to_psi.values():
for psi in psi_l:
psi_list.append(psi)
for not_psi_l in phi_to_not_psi.values():
for not_psi in not_psi_l:
psi_list.append("~" + not_psi)
# Iterate through every psi and, if (phi -> psi) exists for every phi in the disjunction,
# then we can add an or elimination statement containing psi
for psi in psi_list:
if psi[:1] == "~":
for phi in disjunction:
if phi not in phi_to_not_psi.keys() or psi.strip("~") not in phi_to_not_psi[phi]:
isEntailed = False
else:
for phi in disjunction:
if phi not in phi_to_psi.keys() or psi not in phi_to_psi[phi]:
isEntailed = False
if isEntailed and psi not in sentences:
addOrElimStatement(psi)
# Processes the dicts built during the above for loop to cover Negation Introduction cases
for phi in phi_to_psi.keys():
for psi in phi_to_psi[phi]:
if phi in phi_to_not_psi.keys() and psi in phi_to_not_psi[phi]:
negation = "~" + phi
if negation not in sentences:
succStatements = list(state[0])
succStatements.append((negation, "NI", state[1]))
succState = (tuple(succStatements), state[1])
results.append((whitespace + "Negation Introduction: " + negation, succState, 1))
# Implication Introduction and Reiteration
if proofDepth > 0:
subproof = []
# Gets a list of all statements in the most recent subproof
for i in range(len(statements)):
if statements[i][2] < proofDepth:
subproof = []
continue
elif statements[i][2] > proofDepth:
continue
subproof.append(statements[i])
# Allows Implication Introduction from the assumption to any reached conclusion
assumption = subproof[0]
assert assumption[1] == "A"
for statement in subproof[1:]:
# If the antecedent contains an implication or biconditional, we need parens around it
if "->" in assumption[0]:
antecedent = "(" + assumption[0] + ")"
else:
antecedent = assumption[0]
newImplication = antecedent + " -> " + statement[0]
if newImplication not in sentences:
succStatements = list(state[0])
succStatements.append((newImplication, "II", state[1] - 1))
succState = (tuple(succStatements), state[1] - 1)
II_whitespace = whitespace[2:]
results.append((II_whitespace + "Implication Introduction: " + newImplication, succState, 1))
# Reiteration of statements allowed if we're inside a subproof
for sentence in sentences:
if sentence not in subproof or self.goal == sentence:
succStatements = list(state[0])
succStatements.append((sentence, "R", state[1]))
succState = (tuple(succStatements), state[1])
results.append((whitespace + "Reiteration: " + sentence, succState, 1))
# Biconditional Introduction
# If 'phi -> psi' and 'psi -> phi' for any phi and psi, can derive 'phi <-> psi'
if "<->" in self.connectiveSet:
phi_to_psi.update(phi_to_not_psi) # Merges the 2 implication dicts
for phi in phi_to_psi.keys():
psi = phi_to_psi[phi]
if psi in phi_to_psi.keys() and phi in phi_to_psi[psi]:
newBicond = phi + " <-> " + psi
if newBicond not in sentences:
succStatements = list(state[0])
succStatements.append((newBicond, "BI", state[1]))
succState = (tuple(succStatements), state[1])
results.append((whitespace + "Biconditional Introduction: " + newBicond, succState, 1))
return results
# Uses a search problem and UCS to find a proof of a goal given premises
def solveFitchProof(premises, goal):
# The first section formats the input into a usable format and extracts symbols
symbolSet = set()
# The statement set is used to keep track of full, parenthesized statements
statementSet = set()
connectiveSet = set()
formattedPremises = []
if premises != None:
premiseSymbols = premises.split()
else:
premiseSymbols = []
goalSymbols = goal.split()
currPremise = 0 # The number of premises added to the list so far
currUnit = "" # The formatted premise so far
inParens = False # Keeps track of whether inside parentheses
parensDepth = 0 # Depth of parentheses
currParenUnit = "" # The parenthesized unit made so far
for symbol in premiseSymbols:
if symbol == "*":
if currPremise == 0:
currPremise += 1
continue
formattedPremises.append(currUnit)
currPremise += 1
currUnit = ""
elif symbol == "(":
parensDepth += 1
inParens = True
currUnit += "("
elif symbol == ")":
parensDepth -= 1
if parensDepth == 0: inParens = False
currUnit += ")"
currParenUnit = ""
elif symbol == "AND" or symbol == "and" or symbol == "&&" or symbol == "&":
currUnit += " && "
connectiveSet.add("&&")
if inParens: currParenUnit += " && "
elif symbol == "OR" or symbol == "or" or symbol == "||" or symbol == "|":
currUnit += " || "
connectiveSet.add("||")
if inParens: currParenUnit += " || "
elif symbol == "NOT" or symbol == "not" or symbol == "~":
currUnit += "~"
if inParens: currParenUnit += "~"
elif symbol == "=>" or symbol == "->":
currUnit += " -> "
if inParens: currParenUnit += " -> "
elif symbol == "<=>" or symbol == "<->":
currUnit += " <-> "
connectiveSet.add("<->")
if inParens: currParenUnit += " <-> "
else:
currUnit += symbol
if inParens: currParenUnit += symbol
symbolSet.add(symbol)
# One and a half loops (less than a half, actually)
| |
ing2, contours, hierarchy = cv2.findContours(edgedForProp, method,
cv2.CHAIN_APPROX_SIMPLE)
innerCnts = []
for cnt, h in zip (contours, hierarchy[0]):
if h[2] == -1 :
innerCnts.append(cnt)
sortedContours = sorted(innerCnts, key = cv2.contourArea, reverse = True)
selectedContours = [cnt for cnt in sortedContours if cv2.contourArea(cnt) > minArea]
for cnt in selectedContours[0: numberOfCnts]:
cv2.drawContours(goldenPropImg, [cnt], -1, (255, 0, 255), 1)
# get all the ratio to check
ratioAreas = []
for index, cnt in enumerate(selectedContours[0: numberOfCnts]):
if index < len(selectedContours[0: numberOfCnts]) -1:
areaGoldenToCheck_previous = cv2.contourArea(selectedContours[index])
areaGoldenToCheck_next = cv2.contourArea(selectedContours[index + 1])
ratioArea = areaGoldenToCheck_previous / areaGoldenToCheck_next
ratioAreas.append(ratioArea)
meanAreaRatio = (np.mean(ratioAreas))
diffFromGoldenRatio = abs(1.618 - meanAreaRatio)
scoreProportionAreaVsGoldenRatio = np.exp(-diffFromGoldenRatio)
cv2.putText(goldenPropImg, "GoldPr: {:.3f}".format(scoreProportionAreaVsGoldenRatio), (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
self.scoreProportionAreaVsGoldenRatio = scoreProportionAreaVsGoldenRatio
return goldenPropImg, scoreProportionAreaVsGoldenRatio
def cornerDetectionVisualBalance (self, maxCorners = 40 , minDistance = 6, midlineOnCornersCnt = True):
# based on the idea that there is a balance in balanced distribution of corner
# the mid axis is the mid of the extremes corners detected
corners = cv2.goodFeaturesToTrack(self.gray, maxCorners, 0.01, minDistance )
cornerimg = self.image.copy()
cornersOntheLeft = 0
cornersOntheRight = 0
cornersOnTop = 0
cornersOnBottom = 0
# find the limit x and y of the detected corners
listX = [corner[0][0] for corner in corners]
listY = [corner[0][1] for corner in corners]
minX = min(listX)
maxX = max (listX)
minY = min(listY)
maxY = max (listY)
for corner in corners:
x, y = corner[0]
x = int(x)
y = int(y)
if midlineOnCornersCnt:
# find the middle x and middle y
midx = minX + int((maxX - minX)/2)
midy = minY + int((maxY - minY)/2)
pass
else:
midx = int(self.image.shape[1] / 2)
midy = int(self.image.shape[0] / 2)
cv2.rectangle(cornerimg,(x-2,y-2),(x+2,y+2),(0,255,0), 1)
if x < midx:
cornersOntheLeft += 1
if x > midx:
cornersOntheRight += 1
if y < midy:
cornersOnTop += 1
if y > midy:
cornersOnBottom += 1
scoreHorizzontalCorners = np.exp(-(abs(cornersOntheLeft - cornersOntheRight )/(maxCorners/3.14)))
scoreVerticalCorners = np.exp(-(abs(cornersOnTop - cornersOnBottom )/(maxCorners/3.14)))
cv2.putText(cornerimg, "Corn H: {:.3f} V: {:.3f}".format(scoreHorizzontalCorners, scoreVerticalCorners), (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
self.cornersBalance = (scoreHorizzontalCorners + scoreVerticalCorners) / 2
return cornerimg, scoreHorizzontalCorners, scoreVerticalCorners
def goldenSpiralAdaptedDetection (self, displayall = False , displayKeypoints = True, maxKeypoints = 100, edged = True):
goldenImgDisplay = self.image.copy()
# segmentation with orb and edges
ImgImpRegion, contours, keypoints = self._orbSegmentation ( maxKeypoints = maxKeypoints, edged = edged, edgesdilateOpen = False, method = cv2.RETR_EXTERNAL)
# find the center zig zag orb silhoutte
copyZigZag, ratioGoldenRectangleZigZagOrb , sorted_contoursZigZag, zigzagPerimeterScore= self._zigzagCntsArea()
#draw the bounding box
c = max(sorted_contoursZigZag, key=cv2.contourArea)
x,y,w,h = cv2.boundingRect(c)
if x==0 or x+w == self.image.shape[1] or y==0 or y+w == self.image.shape[0]:
cv2.rectangle(goldenImgDisplay, (0,0), (self.image.shape[1], self.image.shape[0]), (0,255,0), 1)
else:
cv2.rectangle(goldenImgDisplay,(x,y),(x+w,y+h),(0,255,0),1)
# create the guidelines
im, im2,im3, im4 = self._drawGoldenSpiral(drawRectangle=False, drawEllipses = True, x = w, y = h)
transX = x
transY = y
T = np.float32([[1,0,transX], [0,1, transY]])
imTranslated = cv2.warpAffine(im, T, (self.image.shape[1], self.image.shape[0]))
T2 = np.float32([[1,0, -self.image.shape[1] + transX + w], [0,1, -self.image.shape[0] + transY + h]])
imTranslated2 = cv2.warpAffine(im2, T2, (self.image.shape[1], self.image.shape[0]))
T3 = np.float32([[1,0, transX], [0,1, -self.image.shape[0] + transY + h]])
imTranslated3 = cv2.warpAffine(im3, T3, (self.image.shape[1], self.image.shape[0]))
T4 = np.float32([[1,0, -self.image.shape[1] + transX + w], [0,1, transY ]])
imTranslated4 = cv2.warpAffine(im4, T4, (self.image.shape[1], self.image.shape[0]))
# bitwise the guidlines for one display img
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated)
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated2)
if displayall:
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated3)
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated4)
if displayKeypoints:
goldenImgDisplay = cv2.drawKeypoints(goldenImgDisplay, keypoints,goldenImgDisplay, flags =
cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
# dilate the spirals
kernel = np.ones((5,5),np.uint8)
imTranslated = cv2.dilate(imTranslated,kernel,iterations = 3)
imTranslated2 = cv2.dilate(imTranslated2,kernel,iterations = 3)
imTranslated3 = cv2.dilate(imTranslated3,kernel,iterations = 3)
imTranslated4 = cv2.dilate(imTranslated4,kernel,iterations = 3)
# loop to collect the intersection
intersection = cv2.bitwise_and(ImgImpRegion,imTranslated)
intersection2 = cv2.bitwise_and(ImgImpRegion,imTranslated2)
intersection3 = cv2.bitwise_and(ImgImpRegion,imTranslated3)
intersection4 = cv2.bitwise_and(ImgImpRegion,imTranslated4)
# sum of imgImpRegion
sumOfAllPixelInImgImpRegion = (ImgImpRegion>0).sum()
# sum of all intersections
sum1 = (intersection>0).sum()
sum2 = (intersection2>0).sum()
sum3 = (intersection3>0).sum()
sum4 = (intersection4>0).sum()
maxSumIntersection = max(sum1, sum2, sum3, sum4)
# calculate the ratio of the max vs whole
scoreSpiralGoldenRatio = maxSumIntersection / sumOfAllPixelInImgImpRegion
cv2.putText(goldenImgDisplay, "Gold: {:.3f}".format(scoreSpiralGoldenRatio), (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
self.scoreSpiralGoldenRatio = scoreSpiralGoldenRatio
# =============================================================================
# cv2.imshow('ImgImpRegion', ImgImpRegion)
# cv2.imshow('imTranslated', imTranslated)
# cv2.imshow('inter', intersection)
# cv2.waitKey()
# cv2.destroyAllWindows()
# =============================================================================
return goldenImgDisplay, scoreSpiralGoldenRatio
def goldenSpiralFixDetection (self, displayall = False , displayKeypoints = True, maxKeypoints = 100, edged = True, numberOfCnts = 40, scaleFactor = 0.5, bonus = 10):
#goldenImgDisplay = self.image.copy()
# segmentation with orb and edges
ImgImpRegion, contours, keypoints = self._orbSegmentation ( maxKeypoints = maxKeypoints, edged = edged, edgesdilateOpen = False, method = cv2.RETR_EXTERNAL)
# implement the segmentation including the edges
edgedImg = self._edgeDetection(scalarFactor = 1, meanShift = 0, edgesdilateOpen = False, kernel = 5)
edgedImg = cv2.cvtColor(edgedImg, cv2.COLOR_GRAY2BGR)
# give a weight to the edges detection smaller than the orb
#edgedImg[np.where((edgedImg ==[255,255,255]).all(axis=2))] = [255,255,255]
# implement with inner shape
segmentationOnInnerCnts, contours = self._innerCntsSegmentation(numberOfCnts = numberOfCnts, method = cv2.RETR_CCOMP, minArea = 5)
segmentationOnInnerCnts[np.where((segmentationOnInnerCnts ==[255,255,255]).all(axis=2))] = [40,40,40]
# merge the masks
ImgImpRegion = cv2.bitwise_or(ImgImpRegion,edgedImg)
ImgImpRegion = cv2.bitwise_or(ImgImpRegion,segmentationOnInnerCnts)
goldenImgDisplay = ImgImpRegion.copy()
# =============================================================================
# # find the center zig zag orb silhoutte
# copyZigZag, ratioGoldenRectangleZigZagOrb , sorted_contoursZigZag, zigzagPerimeterScore= self._zigzagCntsArea()
#
# #draw the bounding box
# c = max(sorted_contoursZigZag, key=cv2.contourArea)
# x,y,w,h = cv2.boundingRect(c)
# =============================================================================
# set this way to make the boundig box the size of the frame.. for adaptive unmask above and adjust
x=0
y=0
w = self.image.shape[1]
h = self.image.shape[0]
if x==0 or x+w == self.image.shape[1] or y==0 or y+h == self.image.shape[0]:
cv2.rectangle(goldenImgDisplay, (0,0), (self.image.shape[1], self.image.shape[0]), (0,255,0), 1)
else:
cv2.rectangle(goldenImgDisplay,(x,y),(x+w,y+h),(0,255,0),1)
# create the guidelines
im, im2,im3, im4 = self._drawGoldenSpiral(drawRectangle=False, drawEllipses = True, x = w, y = h)
transX = x
transY = y
T = np.float32([[1,0,transX], [0,1, transY]])
imTranslated = cv2.warpAffine(im, T, (self.image.shape[1], self.image.shape[0]))
T2 = np.float32([[1,0, -self.image.shape[1] + transX + w], [0,1, -self.image.shape[0] + transY + h]])
imTranslated2 = cv2.warpAffine(im2, T2, (self.image.shape[1], self.image.shape[0]))
T3 = np.float32([[1,0, transX], [0,1, -self.image.shape[0] + transY + h]])
imTranslated3 = cv2.warpAffine(im3, T3, (self.image.shape[1], self.image.shape[0]))
T4 = np.float32([[1,0, -self.image.shape[1] + transX + w], [0,1, transY ]])
imTranslated4 = cv2.warpAffine(im4, T4, (self.image.shape[1], self.image.shape[0]))
# dilate the spirals
kernel = np.ones((5,5),np.uint8)
AimTranslated = cv2.dilate(imTranslated,kernel,iterations = 3)
AimTranslated2 = cv2.dilate(imTranslated2,kernel,iterations = 3)
AimTranslated3 = cv2.dilate(imTranslated3,kernel,iterations = 3)
AimTranslated4 = cv2.dilate(imTranslated4,kernel,iterations = 3)
# loop to collect the intersection
intersection = cv2.bitwise_and(ImgImpRegion,AimTranslated)
intersection2 = cv2.bitwise_and(ImgImpRegion,AimTranslated2)
intersection3 = cv2.bitwise_and(ImgImpRegion,AimTranslated3)
intersection4 = cv2.bitwise_and(ImgImpRegion,AimTranslated4)
# sum of imgImpRegion
sumOfAllPixelInSilhoutte = (ImgImpRegion > 0).sum()
sumofAlledgedandorb = (ImgImpRegion==255).sum()
sumofAllInnerCnts = (ImgImpRegion==40).sum()
sumOfAllPixelInImgImpRegion = sumofAlledgedandorb + (scaleFactor* sumofAllInnerCnts)
# sum of all intersections
sum1_orb = (intersection==255).sum()
sum2_orb = (intersection2==255).sum()
sum3_orb = (intersection3==255).sum()
sum4_orb = (intersection4==255).sum()
# for the inner shape
sum1_inn = (intersection==40).sum()
sum2_inn = (intersection2==40).sum()
sum3_inn = (intersection3==40).sum()
sum4_inn = (intersection4==40).sum()
# weight
sum1 = sum1_orb * bonus + (scaleFactor * sum1_inn)
sum2 = sum2_orb * bonus + (scaleFactor * sum2_inn)
sum3 = sum3_orb * bonus + (scaleFactor * sum3_inn)
sum4 = sum4_orb * bonus + (scaleFactor * sum4_inn)
maxSumIntersection = max(sum1, sum2, sum3, sum4)
# calculate the ratio of the max vs whole and weighted with the overall area of te silhoutte compare to the size of the frame
scoreSpiralGoldenRatio = maxSumIntersection / sumOfAllPixelInImgImpRegion * (sumOfAllPixelInSilhoutte / self.gray.size)
cv2.putText(goldenImgDisplay, "Gold: {:.3f}".format(scoreSpiralGoldenRatio), (20, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
self.scoreSpiralGoldenRatio = scoreSpiralGoldenRatio
# bitwise the guidlines for one display img
if displayall == False:
if sum1 == max(sum1, sum2, sum3, sum4):
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated)
if sum2 == max(sum1, sum2, sum3, sum4):
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated2)
if sum3 == max(sum1, sum2, sum3, sum4):
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated3)
if sum4 == max(sum1, sum2, sum3, sum4):
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated4)
if displayall:
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated)
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated2)
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated3)
goldenImgDisplay = cv2.bitwise_or(goldenImgDisplay, imTranslated4)
if displayKeypoints:
goldenImgDisplay = cv2.drawKeypoints(goldenImgDisplay, keypoints,goldenImgDisplay, flags =
cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)
return | |
import asyncio
import os
import random
from contextlib import suppress
from time import sleep
from unittest import mock
import pytest
from tlz import first, partition_all
from dask import delayed
from distributed import Client, Nanny, wait
from distributed.comm import CommClosedError
from distributed.compatibility import MACOS
from distributed.metrics import time
from distributed.scheduler import COMPILED
from distributed.utils import CancelledError, sync
from distributed.utils_test import (
captured_logger,
cluster,
div,
gen_cluster,
inc,
slowadd,
slowinc,
)
pytestmark = pytest.mark.ci1
def test_submit_after_failed_worker_sync(loop):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
L = c.map(inc, range(10))
wait(L)
a["proc"]().terminate()
total = c.submit(sum, L)
assert total.result() == sum(map(inc, range(10)))
@gen_cluster(client=True, timeout=60, active_rpc_timeout=10)
async def test_submit_after_failed_worker_async(c, s, a, b):
n = await Nanny(s.address, nthreads=2, loop=s.loop)
while len(s.workers) < 3:
await asyncio.sleep(0.1)
L = c.map(inc, range(10))
await wait(L)
s.loop.add_callback(n.kill)
total = c.submit(sum, L)
result = await total
assert result == sum(map(inc, range(10)))
await n.close()
@gen_cluster(client=True, timeout=60)
async def test_submit_after_failed_worker(c, s, a, b):
L = c.map(inc, range(10))
await wait(L)
await a.close()
total = c.submit(sum, L)
result = await total
assert result == sum(map(inc, range(10)))
@pytest.mark.slow
def test_gather_after_failed_worker(loop):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
L = c.map(inc, range(10))
wait(L)
a["proc"]().terminate()
result = c.gather(L)
assert result == list(map(inc, range(10)))
@pytest.mark.slow
@gen_cluster(client=True, Worker=Nanny, nthreads=[("127.0.0.1", 1)] * 4, timeout=60)
async def test_gather_then_submit_after_failed_workers(c, s, w, x, y, z):
L = c.map(inc, range(20))
await wait(L)
w.process.process._process.terminate()
total = c.submit(sum, L)
for _ in range(3):
await wait(total)
addr = first(s.tasks[total.key].who_has).address
for worker in [x, y, z]:
if worker.worker_address == addr:
worker.process.process._process.terminate()
break
result = await c.gather([total])
assert result == [sum(map(inc, range(20)))]
@pytest.mark.xfail(COMPILED, reason="Fails with cythonized scheduler")
@gen_cluster(Worker=Nanny, client=True, timeout=60)
async def test_failed_worker_without_warning(c, s, a, b):
L = c.map(inc, range(10))
await wait(L)
original_pid = a.pid
with suppress(CommClosedError):
await c._run(os._exit, 1, workers=[a.worker_address])
start = time()
while a.pid == original_pid:
await asyncio.sleep(0.01)
assert time() - start < 10
await asyncio.sleep(0.5)
start = time()
while len(s.nthreads) < 2:
await asyncio.sleep(0.01)
assert time() - start < 10
await wait(L)
L2 = c.map(inc, range(10, 20))
await wait(L2)
assert all(len(keys) > 0 for keys in s.has_what.values())
nthreads2 = dict(s.nthreads)
await c.restart()
L = c.map(inc, range(10))
await wait(L)
assert all(len(keys) > 0 for keys in s.has_what.values())
assert not (set(nthreads2) & set(s.nthreads)) # no overlap
@pytest.mark.xfail(COMPILED, reason="Fails with cythonized scheduler")
@gen_cluster(Worker=Nanny, client=True, timeout=60)
async def test_restart(c, s, a, b):
assert s.nthreads == {a.worker_address: 1, b.worker_address: 2}
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(div, 1, 0)
await y
assert set(s.who_has) == {x.key, y.key}
f = await c.restart()
assert f is c
assert len(s.workers) == 2
assert not any(ws.occupancy for ws in s.workers.values())
assert not s.who_has
assert x.cancelled()
assert y.cancelled()
assert z.cancelled()
assert z.key not in s.exceptions
assert not s.who_wants
assert not any(cs.wants_what for cs in s.clients.values())
@pytest.mark.xfail(COMPILED, reason="Fails with cythonized scheduler")
@gen_cluster(Worker=Nanny, client=True, timeout=60)
async def test_restart_cleared(c, s, a, b):
x = 2 * delayed(1) + 1
f = c.compute(x)
await wait([f])
await c.restart()
for coll in [s.tasks, s.unrunnable]:
assert not coll
@pytest.mark.xfail(COMPILED, reason="Fails with cythonized scheduler")
def test_restart_sync_no_center(loop):
with cluster(nanny=True) as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
x = c.submit(inc, 1)
c.restart()
assert x.cancelled()
y = c.submit(inc, 2)
assert y.result() == 3
assert len(c.nthreads()) == 2
@pytest.mark.xfail(COMPILED, reason="Fails with cythonized scheduler")
def test_restart_sync(loop):
with cluster(nanny=True) as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
x = c.submit(div, 1, 2)
x.result()
assert sync(loop, c.scheduler.who_has)
c.restart()
assert not sync(loop, c.scheduler.who_has)
assert x.cancelled()
assert len(c.nthreads()) == 2
with pytest.raises(CancelledError):
x.result()
y = c.submit(div, 1, 3)
assert y.result() == 1 / 3
@pytest.mark.xfail(COMPILED, reason="Fails with cythonized scheduler")
@gen_cluster(Worker=Nanny, client=True, timeout=60)
async def test_restart_fast(c, s, a, b):
L = c.map(sleep, range(10))
start = time()
await c.restart()
assert time() - start < 10
assert len(s.nthreads) == 2
assert all(x.status == "cancelled" for x in L)
x = c.submit(inc, 1)
result = await x
assert result == 2
@pytest.mark.xfail(COMPILED, reason="Fails with cythonized scheduler")
def test_worker_doesnt_await_task_completion(loop):
with cluster(nanny=True, nworkers=1) as (s, [w]):
with Client(s["address"], loop=loop) as c:
future = c.submit(sleep, 100)
sleep(0.1)
start = time()
c.restart()
stop = time()
assert stop - start < 5
@pytest.mark.xfail(COMPILED, reason="Fails with cythonized scheduler")
def test_restart_fast_sync(loop):
with cluster(nanny=True) as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
L = c.map(sleep, range(10))
start = time()
c.restart()
assert time() - start < 10
assert len(c.nthreads()) == 2
assert all(x.status == "cancelled" for x in L)
x = c.submit(inc, 1)
assert x.result() == 2
@pytest.mark.xfail(COMPILED, reason="Fails with cythonized scheduler")
@gen_cluster(Worker=Nanny, client=True, timeout=60)
async def test_fast_kill(c, s, a, b):
L = c.map(sleep, range(10))
start = time()
await c.restart()
assert time() - start < 10
assert all(x.status == "cancelled" for x in L)
x = c.submit(inc, 1)
result = await x
assert result == 2
@pytest.mark.xfail(COMPILED, reason="Fails with cythonized scheduler")
@gen_cluster(Worker=Nanny, timeout=60)
async def test_multiple_clients_restart(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
x = c1.submit(inc, 1)
y = c2.submit(inc, 2)
xx = await x
yy = await y
assert xx == 2
assert yy == 3
await c1.restart()
assert x.cancelled()
start = time()
while not y.cancelled():
await asyncio.sleep(0.01)
assert time() < start + 5
await c1.close()
await c2.close()
@gen_cluster(Worker=Nanny, timeout=60)
async def test_restart_scheduler(s, a, b):
assert len(s.nthreads) == 2
pids = (a.pid, b.pid)
assert pids[0]
assert pids[1]
await s.restart()
assert len(s.nthreads) == 2
pids2 = (a.pid, b.pid)
assert pids2[0]
assert pids2[1]
assert pids != pids2
@gen_cluster(Worker=Nanny, client=True, timeout=60)
async def test_forgotten_futures_dont_clean_up_new_futures(c, s, a, b):
x = c.submit(inc, 1)
await c.restart()
y = c.submit(inc, 1)
del x
import gc
gc.collect()
await asyncio.sleep(0.1)
await y
@pytest.mark.slow
@pytest.mark.flaky(condition=MACOS, reruns=10, reruns_delay=5)
@gen_cluster(client=True, timeout=60, active_rpc_timeout=10)
async def test_broken_worker_during_computation(c, s, a, b):
s.allowed_failures = 100
n = await Nanny(s.address, nthreads=2, loop=s.loop)
start = time()
while len(s.nthreads) < 3:
await asyncio.sleep(0.01)
assert time() < start + 5
N = 256
expected_result = N * (N + 1) // 2
i = 0
L = c.map(inc, range(N), key=["inc-%d-%d" % (i, j) for j in range(N)])
while len(L) > 1:
i += 1
L = c.map(
slowadd,
*zip(*partition_all(2, L)),
key=["add-%d-%d" % (i, j) for j in range(len(L) // 2)],
)
await asyncio.sleep(random.random() / 20)
with suppress(CommClosedError): # comm will be closed abrupty
await c._run(os._exit, 1, workers=[n.worker_address])
await asyncio.sleep(random.random() / 20)
while len(s.workers) < 3:
await asyncio.sleep(0.01)
with suppress(
CommClosedError, EnvironmentError
): # perhaps new worker can't be contacted yet
await c._run(os._exit, 1, workers=[n.worker_address])
[result] = await c.gather(L)
assert isinstance(result, int)
assert result == expected_result
await n.close()
@pytest.mark.xfail(COMPILED, reason="Fails with cythonized scheduler")
@gen_cluster(client=True, Worker=Nanny, timeout=60)
async def test_restart_during_computation(c, s, a, b):
xs = [delayed(slowinc)(i, delay=0.01) for i in range(50)]
ys = [delayed(slowinc)(i, delay=0.01) for i in xs]
zs = [delayed(slowadd)(x, y, delay=0.01) for x, y in zip(xs, ys)]
total = delayed(sum)(zs)
result = c.compute(total)
await asyncio.sleep(0.5)
assert s.rprocessing
await c.restart()
assert not s.rprocessing
assert len(s.nthreads) == 2
assert not s.tasks
class SlowTransmitData:
def __init__(self, data, delay=0.1):
self.delay = delay
self.data = data
def __reduce__(self):
import time
time.sleep(self.delay)
return (SlowTransmitData, (self.delay,))
def __sizeof__(self) -> int:
# Ensure this is offloaded to avoid blocking loop
import dask
from dask.utils import parse_bytes
return parse_bytes(dask.config.get("distributed.comm.offload")) + 1
@pytest.mark.flaky(reruns=10, reruns_delay=5)
@gen_cluster(client=True)
async def test_worker_who_has_clears_after_failed_connection(c, s, a, b):
n = await Nanny(s.address, nthreads=2, loop=s.loop)
while len(s.nthreads) < 3:
await asyncio.sleep(0.01)
def slow_ser(x, delay):
return SlowTransmitData(x, delay=delay)
n_worker_address = n.worker_address
futures = c.map(
slow_ser,
range(20),
delay=0.1,
key=["f%d" % i for i in range(20)],
workers=[n_worker_address],
allow_other_workers=True,
)
def sink(*args):
pass
await wait(futures)
result_fut = c.submit(sink, futures, workers=a.address)
with suppress(CommClosedError):
await c._run(os._exit, 1, workers=[n_worker_address])
while len(s.workers) > 2:
await asyncio.sleep(0.01)
await result_fut
assert not a.has_what.get(n_worker_address)
assert not any(n_worker_address in s for ts in a.tasks.values() for s in ts.who_has)
await n.close()
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 2), ("127.0.0.1", 3)],
)
async def test_worker_same_host_replicas_missing(c, s, a, b, x):
# See GH4784
def mock_address_host(addr):
# act as if A and X are on the same host
nonlocal a, b, x
if addr in [a.address, x.address]:
return "A"
else:
return "B"
with mock.patch("distributed.worker.get_address_host", mock_address_host):
futures = c.map(
slowinc,
range(20),
delay=0.1,
key=["f%d" % i for i in range(20)],
workers=[a.address],
allow_other_workers=True,
)
await wait(futures)
# replicate data to avoid the scheduler retriggering the computation
# retriggering cleans up the state nicely but doesn't reflect real world
# scenarios where there may be replicas on the cluster, e.g. they are
# replicated as a dependency somewhere else
await c.replicate(futures, n=2, workers=[a.address, b.address])
def sink(*args):
pass
# Since | |
get_text_for_premisegroup_uid(uid):
"""
Returns joined text of the premise group and the premise ids
:param uid: premisegroup_uid
:return: text, uids
"""
warnings.warn("Use PremiseGroup.get_text() instead.", DeprecationWarning)
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=uid).join(Statement).all()
if len(db_premises) == 0:
return ''
texts = [premise.get_text() for premise in db_premises]
lang = DBDiscussionSession.query(Statement).get(db_premises[0].statement.uid).lang
_t = Translator(lang)
return ' {} '.format(_t.get(_.aand)).join(texts)
def get_text_for_statement_uid(uid: int, colored_position=False) -> Optional[str]:
"""
Returns text of statement with given uid
:param uid: Statement.uid
:param colored_position: Boolean
:return: String
"""
warnings.warn("Use Statement.get_text() or Statement.get_html() instead.", DeprecationWarning)
if not isinstance(uid, int):
return None
db_statement = DBDiscussionSession.query(Statement).get(uid)
if not db_statement:
return None
content = db_statement.get_text()
while content.endswith(('.', '?', '!')):
content = content[:-1]
sb, se = '', ''
if colored_position:
sb = f'<{tag_type} data-argumentation-type="position">'
se = f'</{tag_type}>'
return sb + content + se
def get_text_for_conclusion(argument, start_with_intro=False, rearrange_intro=False, is_users_opinion=True):
"""
Check the arguments conclusion whether it is an statement or an argument and returns the text
:param argument: Argument
:param start_with_intro: Boolean
:param rearrange_intro: Boolean
:return: String
"""
if argument.argument_uid:
return get_text_for_argument_uid(argument.argument_uid, start_with_intro=start_with_intro,
rearrange_intro=rearrange_intro,
is_users_opinion=is_users_opinion)
else:
return argument.get_conclusion_text()
def get_all_attacking_arg_uids_from_history(history):
"""
Returns all arguments of the history, which attacked the user
:param history: SessionHistory
:return: [Arguments.uid]
:rtype: list
"""
try:
splitted_history = history.get_session_history_as_list()
uids = []
for part in splitted_history:
if 'reaction' in part:
parts = part.split('/')
pos = parts.index('reaction')
uids.append(part.split('/')[pos + 3])
return uids
except AttributeError:
return []
def get_user_by_private_or_public_nickname(nickname: str) -> Optional[User]:
"""
Gets the user by his (public) nickname, based on the option, whether his nickname is public or not
:param nickname: Nickname of the user
:return: Current user or None
"""
user: User = get_user_by_case_insensitive_nickname(nickname)
public_user: User = get_user_by_case_insensitive_public_nickname(nickname)
if not user or not public_user:
return None
settings: Settings = user.settings
if not settings:
return None
if settings.should_show_public_nickname and user:
return user
elif not settings.should_show_public_nickname and public_user:
return public_user
return None
def get_user_by_case_insensitive_nickname(nickname):
"""
Returns user with given nickname
:param nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(User.nickname.ilike(nickname)).first()
def get_user_by_case_insensitive_public_nickname(public_nickname):
"""
Returns user with given public nickname
:param public_nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(
func.lower(User.public_nickname) == func.lower(public_nickname)).first()
def pretty_print_options(message):
"""
Some modifications for pretty printing.
Use uppercase for first letter in text and a single dot for the end if there isn't one already.
:param message: String
:return: String
"""
# check for html
if message[0:1] == '<':
pos = message.index('>')
message = message[0:pos + 1] + message[pos + 1:pos + 2].upper() + message[pos + 2:]
else:
message = start_with_capital(message)
# check for html
if message[-1] == '>':
pos = message.rfind('<')
if message[pos - 1] not in ['.', '?', '!']:
message = message[0:pos] + '.' + message[pos:]
elif not message.endswith(tuple(['.', '?', '!'])) and id != 'now':
message += '.'
return message
def create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool = False, is_author: bool = False,
uid: str = '', bubble_url: str = '', content: str = '', omit_bubble_url: bool = False,
omit_vote_info: bool = False, argument_uid: int = None, statement_uid: int = None,
is_supportive: bool = False, db_user: User = None, lang: str = 'en',
is_users_opinion: bool = False, other_author: User = None):
"""
Creates an dictionary which includes every information needed for a bubble.
:param bubble_type: BubbleTypes
:param is_markable: True if the content itself could be flagged
:param is_author: True if the current user is author of the content
:param uid: Identifier for the bubble
:param bubble_url: URL for the click event of the bubble
:param content: Text of the bubble
:param omit_bubble_url: True if the bubble should have a link
:param omit_vote_info: True if the bubble have the little, grey information text
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param is_supportive: Boolean
:param db_user: current
:param omit_bubble_url: Boolean
:param lang: is_users_opinion
:param is_users_opinion: Boolean
:param other_author:
:return: dict()
"""
gravatar_link = get_global_url() + '/static/images/icon.png'
profile = None
is_enemy_user = {
'admin': False,
'author': False,
'special': False
}
if uid != 'now':
content = pretty_print_options(content)
if bubble_type is BubbleTypes.SYSTEM and other_author is not None:
gravatar_link = get_profile_picture(other_author, 25)
profile = '/user/{}'.format(other_author.uid),
is_enemy_user['admin'] = other_author.is_admin()
is_enemy_user['author'] = other_author.is_author()
is_enemy_user['special'] = other_author.is_special()
# check for users opinion
if bubble_type is BubbleTypes.USER and db_user and db_user.nickname != nick_of_anonymous_user:
db_marked = None
gravatar_link = get_profile_picture(db_user, 25)
if argument_uid is not None and db_user is not None:
db_marked = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == argument_uid,
MarkedArgument.author_uid == db_user.uid).first()
if statement_uid is not None and db_user is not None:
db_marked = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement_uid,
MarkedStatement.author_uid == db_user.uid).first()
is_users_opinion = db_marked is not None
speech = {
'is_user': bubble_type is BubbleTypes.USER,
'is_system': bubble_type is BubbleTypes.SYSTEM,
'is_status': bubble_type is BubbleTypes.STATUS,
'is_info': bubble_type is BubbleTypes.INFO,
'is_markable': is_markable,
'is_author': is_author,
'is_enemy_user': is_enemy_user,
'id': uid if len(str(uid)) > 0 else uuid4().hex,
'bubble_url': bubble_url,
'message': content,
'omit_bubble_url': omit_bubble_url,
'omit_vote_info': omit_vote_info,
'data_type': 'argument' if argument_uid else 'statement' if statement_uid else 'None',
'data_argument_uid': argument_uid,
'data_statement_uid': statement_uid,
'data_is_supportive': is_supportive,
'is_users_opinion': is_users_opinion,
'sender': db_user,
'enemy': {
'avatar': gravatar_link,
'profile': profile,
'available': profile is not None
}
}
votecount_keys = _get_text_for_click_and_mark_count(db_user, bubble_type is BubbleTypes.USER, argument_uid,
statement_uid, speech, lang)
speech['votecounts_message'] = votecount_keys[speech['votecounts']]
LOG.debug(speech)
return speech
def _get_text_for_click_and_mark_count(db_user: User, is_user: bool, argument_uid: int, statement_uid: int,
speech: dict, lang: str):
"""
Build text for a bubble, how many other participants have the same interest?
:param nickname: User.nickname
:param is_user: boolean
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param speech: dict()
:param lang: ui_locales
:return: [String]
"""
if not db_user:
db_user = DBDiscussionSession.query(User).filter_by(nickname=nick_of_anonymous_user).first()
db_clicks, db_marks = _get_clicks_and_marks(argument_uid, statement_uid, db_user)
_t = Translator(lang)
speech['votecounts'] = len(db_clicks) if db_clicks else 0
if db_marks:
speech['votecounts'] += len(db_marks)
votecount_keys = defaultdict(lambda: "{} {}.".format(speech['votecounts'], _t.get(_.voteCountTextMore)))
if is_user and db_user.gender == 'm':
gender_key = _.voteCountTextFirstM
elif is_user and db_user.gender == 'f':
gender_key = _.voteCountTextFirstF
else:
gender_key = _.voteCountTextFirst
votecount_keys[0] = '{}.'.format(_t.get(gender_key))
votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'
return votecount_keys
def _get_clicks_and_marks(argument_uid, statement_uid, db_user):
db_clicks = None
db_marks = None
if argument_uid:
db_clicks = DBDiscussionSession.query(ClickedArgument). \
filter(ClickedArgument.argument_uid == argument_uid,
ClickedArgument.is_up_vote == True,
ClickedArgument.is_valid,
ClickedArgument.author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedArgument). \
filter(MarkedArgument.argument_uid == argument_uid,
MarkedArgument.author_uid != db_user.uid).all()
elif statement_uid:
db_clicks = DBDiscussionSession.query(ClickedStatement). \
filter(ClickedStatement.statement_uid == statement_uid,
ClickedStatement.is_up_vote == True,
ClickedStatement.is_valid,
ClickedStatement.author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedStatement). \
filter(MarkedStatement.statement_uid == statement_uid,
MarkedStatement.author_uid != db_user.uid).all()
return db_clicks, db_marks
def is_argument_disabled_due_to_disabled_statements(argument):
"""
Returns true if any involved statement is disabled.
:param argument: Argument
:return: Boolean
"""
if argument.conclusion_uid is None:
# check conclusion of given arguments conclusion
db_argument = DBDiscussionSession.query(Argument).get(argument.argument_uid)
conclusion = DBDiscussionSession(Statement).get(db_argument.conclusion_uid)
if conclusion.is_disabled:
return True
# check premisegroup of given arguments conclusion
premises = _get_all_premises_of_argument(db_argument)
for premise in premises:
if premise.statement.is_disabled:
return True
else:
# check conclusion of given argument
conclusion = DBDiscussionSession.query(Statement).get(argument.conclusion_uid)
if conclusion.is_disabled:
return True
# check premisegroup of given argument
premises = _get_all_premises_of_argument(argument)
for premise in premises:
if premise.statement.is_disabled:
return True
return False
def is_author_of_statement(db_user: User, statement_uid: int) -> bool:
"""
Is the user with given nickname author of the statement?
:param db_user: User
:param statement_uid: Statement.uid
:return: Boolean
"""
db_user = db_user if db_user and db_user.nickname != nick_of_anonymous_user else None
if not db_user:
return False
db_textversion = DBDiscussionSession.query(TextVersion).filter_by(statement_uid=statement_uid).order_by(
TextVersion.uid.asc()).first()
if not db_textversion:
return False
return db_textversion.author_uid == db_user.uid
def is_author_of_argument(db_user: User, argument_uid: int) -> bool:
"""
Is the user with given nickname author of the argument?
:param db_user: User
:param argument_uid: Argument.uid
:return: Boolean
"""
db_user = db_user if db_user and db_user.nickname != nick_of_anonymous_user else None
if not db_user:
return False
db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid == argument_uid,
Argument.author_uid == db_user.uid).first()
return True if db_argument else False
def _get_all_premises_of_argument(argument):
"""
Returns list with all premises of the argument.
:param argument: Argument
:return: list()
"""
ret_list = []
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=argument.premisegroup_uid).join(
Statement).all()
for premise in db_premises:
ret_list.append(premise)
return ret_list
def get_profile_picture(user: User, size: int = 80, ignore_privacy_settings: bool = False) -> str:
"""
Returns the user's profile picture with the specified size.
:param user: User
:param size: Integer, default 80
:param ignore_privacy_settings:
:return: String
"""
if user:
additional_id = '' if user.settings.should_show_public_nickname or ignore_privacy_settings else 'x'
email = (user.email + additional_id).encode('utf-8')
else:
email = str(random.randint(0, 999999)).encode('utf-8')
gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.md5(email.lower()).hexdigest())
gravatar_url += parse.urlencode({'d': 'identicon', 's': str(size)})
return gravatar_url
def get_author_data(user: User, gravatar_on_right_side=True, linked_with_users_page=True, profile_picture_size=20) \
| |
<filename>src/ramprate/build_features.py
# -*- coding: utf-8 -*-
from typing import Dict, Optional, Union, Tuple
import pandas as pd
import numpy as np
import networkx as nx
idx = pd.IndexSlice
CAMD_FUEL_MAP = {
"Pipeline Natural Gas": "gas",
"Coal": "coal",
"Diesel Oil": "oil",
"Natural Gas": "gas",
"Process Gas": "gas",
"Residual Oil": "oil",
"Other Gas": "gas",
"Wood": "other",
"Other Oil": "oil",
"Coal Refuse": "coal",
"Petroleum Coke": "oil",
"Tire Derived Fuel": "other",
"Other Solid Fuel": "other",
}
EIA_FUEL_MAP = {
"AB": "other",
"ANT": "coal",
"BFG": "gas",
"BIT": "coal",
"BLQ": "other",
"CBL": "Coal",
"DFO": "oil",
"JF": "oil",
"KER": "oil",
"LFG": "gas",
"LIG": "coal",
"MSB": "other",
"MSN": "other",
"MSW": "other",
"MWH": "other",
"NG": "gas",
"OBG": "gas",
"OBL": "other",
"OBS": "other",
"OG": "gas",
"OTH": "other",
"PC": "oil",
"PG": "gas",
"PUR": "other",
"RC": "coal",
"RFO": "oil",
"SC": "coal",
"SGC": "gas",
"SGP": "gas",
"SLW": "other",
"SUB": "coal",
"SUN": "gas", # mis-categorized gas plants with 'solar' in the name
"TDF": "other",
"WC": "coal",
"WDL": "other",
"WDS": "other",
"WH": "other",
"WO": "oil",
}
TECH_TYPE_MAP = {
frozenset({"ST"}): "steam_turbine",
frozenset({"GT"}): "gas_turbine",
frozenset({"CT"}): "combined_cycle", # in 2019 about half of solo CTs might be GTs
# Could classify by operational characteristics but there were only 20 total so didn't bother
frozenset({"CA"}): "combined_cycle",
frozenset({"CS"}): "combined_cycle",
frozenset({"IC"}): "internal_combustion",
frozenset({"CT", "CA"}): "combined_cycle",
frozenset({"ST", "GT"}): "combined_cycle", # I think industrial cogen or mistaken
frozenset({"CA", "GT"}): "combined_cycle", # most look mistaken
frozenset({"CT", "CA", "ST"}): "combined_cycle", # most look mistaken
}
# duration of exclusion zone around startup/shutdown
# values are based on plots in cell 78 of notebook 5.0
# https://github.com/catalyst-cooperative/epacems_ramp_rates/blob/main/notebooks/5.0-tb-one_to_one_ramp_rates_by_plant_type.ipynb
EXCLUSION_SIZE_HOURS = {
"steam_turbine": 5,
"combined_cycle": 7,
"gas_turbine": -1, # no exclusions
"internal_combustion": -1, # no exclusions
}
def _find_uptime(
ser: pd.Series, multiindex_key: Optional[Union[str, int]] = None, downtime: bool = False
) -> pd.DataFrame:
"""summarize contiguous subsequences of non-zero values in a generation time series
Args:
ser (pd.Series): pandas series with datetime index
multiindex_key (Optional[Union[str, int]], optional): if not None, assign new multiindex level to output. Used in manual groupby loops. Defaults to None.
downtime (bool, optional): rearrange output events to refer to downtime instead of uptime. Defaults to False.
Raises:
NotImplementedError: whens ser has multiindex
Returns:
pd.DataFrame: table of events, with shutdown and startup timestamps
"""
# TODO: all this multiindex stuff could be a separate wrapper function
if isinstance(ser.index, pd.MultiIndex):
if multiindex_key is None:
raise NotImplementedError(
"groupby functionality not yet implemented. Pass multiindex_key manually per group"
)
else:
names = ser.index.names
ser = ser.copy()
ser.index = ser.index.droplevel(0) # for groupby
# else single plant
# binarize and find edges
diff = ser.gt(0).astype(np.int8).diff()
# last zero value of a block
# shift(-1) to select last zero instead of first non-zero
startups = ser.index[diff.shift(-1) == 1]
# first zero value of a block
# no shift needed for this side
shutdowns = ser.index[diff == -1]
generator_starts_with_zero = ser.iat[0] == 0
generator_ends_with_zero = ser.iat[-1] == 0
events = {}
nan = pd.Series([pd.NaT]).dt.tz_localize("UTC")
# Events (uptime or downtime) are defined as having a start and end.
# If the start (or end) of an event occurs outside the data
# period, it is marked with pd.NaT
# NOTE: 'startup' refers to generators transitioning from
# zero power to positive power.
# For downtime=True, this can be confusing because
# 'startup' then indicates the END of a downtime event
# Vice versa for 'shutdown', which indicates
# the START of a downtime block.
# The difference between downtime=False and downtime=True
# is the shutdown timestamps are shifted 1 row,
# plus some NaT accounting on the ends,
# and the shutdown/startup column order is switched to
# reflect the opposite begin/end convention for the events
if downtime: # events table refers to downtime periods (blocks of zeros)
if (
generator_starts_with_zero
): # first downtime period has unknown shutdown time, known startup
events["shutdown"] = nan.append(pd.Series(shutdowns), ignore_index=True)
else: # first downtime period is fully defined
events["shutdown"] = shutdowns
if generator_ends_with_zero: # last downtime period has known shutdown but unknown startup
events["startup"] = pd.Series(startups).append(nan, ignore_index=True)
else: # last downtime period is fully defined
events["startup"] = startups
else: # events table refers to uptime periods (blocks of non-zeros)
if generator_starts_with_zero: # first uptime period is fully defined
events["startup"] = startups
else: # first uptime period has unknown startup time, known shutdown
events["startup"] = nan.append(pd.Series(startups), ignore_index=True)
if generator_ends_with_zero: # last uptime period is fully defined
events["shutdown"] = shutdowns
else: # last uptime period has known startup but unknown shutdown
events["shutdown"] = pd.Series(shutdowns).append(nan, ignore_index=True)
if multiindex_key is None:
return pd.DataFrame(events)
else:
events = pd.DataFrame(events)
events.index = pd.MultiIndex.from_arrays(
[np.full(len(events), multiindex_key), np.arange(len(events))],
names=[names[0], "event"],
)
return events
def _binarize(ser: pd.Series):
"""modularize this in case I want to do more smoothing later"""
return ser.gt(0).astype(np.int8)
def _find_edges(cems: pd.DataFrame, drop_intermediates=True) -> None:
"""find timestamps of startups and shutdowns based on transition from zero to non-zero generation"""
cems["binarized"] = _binarize(cems["gross_load_mw"])
# for each unit, find change points from zero to non-zero production
# this could be done with groupby but it is much slower
# cems.groupby(level='unit_id_epa')['binarized_col'].transform(lambda x: x.diff())
cems["binary_diffs"] = (
cems["binarized"].diff().where(cems["unit_id_epa"].diff().eq(0))
) # dont take diffs across units
cems["shutdowns"] = cems["operating_datetime_utc"].where(cems["binary_diffs"] == -1, pd.NaT)
cems["startups"] = cems["operating_datetime_utc"].where(cems["binary_diffs"] == 1, pd.NaT)
if drop_intermediates:
cems.drop(columns=["binarized", "binary_diffs"], inplace=True)
return
def _distance_from_downtime(
cems: pd.DataFrame, drop_intermediates=True, boundary_offset_hours: int = 24
) -> None:
"""calculate two columns: the number of hours to the next shutdown; and from the last startup"""
# fill startups forward and shutdowns backward
# Note that this leaves NaT values for any uptime periods at the very start/end of the timeseries
# The second fillna handles this by assuming the real boundary is the edge of the dataset + an offset
offset = pd.Timedelta(boundary_offset_hours, unit="h")
cems["startups"] = (
cems["startups"]
.groupby(level="unit_id_epa")
.transform(lambda x: x.ffill().fillna(x.index[0][1] - offset))
)
cems["shutdowns"] = (
cems["shutdowns"]
.groupby(level="unit_id_epa")
.transform(lambda x: x.bfill().fillna(x.index[-1][1] + offset))
)
cems["hours_from_startup"] = (
cems["operating_datetime_utc"]
.sub(cems["startups"])
.dt.total_seconds()
.div(3600)
.astype(np.float32)
)
# invert sign so distances are all positive
cems["hours_to_shutdown"] = (
cems["shutdowns"]
.sub(cems["operating_datetime_utc"])
.dt.total_seconds()
.div(3600)
.astype(np.float32)
)
if drop_intermediates:
cems.drop(columns=["startups", "shutdowns"], inplace=True)
return None
def calc_distance_from_downtime(
cems: pd.DataFrame, classify_startup=False, drop_intermediates=True
) -> None:
"""calculate two columns: the number of hours to the next shutdown; and from the last startup"""
# in place
_find_edges(cems, drop_intermediates)
_distance_from_downtime(cems, drop_intermediates)
cems["hours_distance"] = cems[["hours_from_startup", "hours_to_shutdown"]].min(axis=1)
if classify_startup:
cems["nearest_to_startup"] = cems["hours_from_startup"] < cems["hours_to_shutdown"]
# randomly allocate midpoints
rng = np.random.default_rng(seed=42)
rand_midpoints = (cems["hours_from_startup"] == cems["hours_to_shutdown"]) & rng.choice(
np.array([True, False]), size=len(cems)
)
cems.loc[rand_midpoints, "nearest_to_startup"] = True
return None
def uptime_events(cems: pd.DataFrame, infer_boundaries=True) -> pd.DataFrame:
"""convert timeseries of generation to a table of uptime events"""
units = cems.groupby(level="unit_id_epa")
event_dfs = []
for grp, df in units["gross_load_mw"]:
event_dfs.append(_find_uptime(df, multiindex_key=grp))
events = pd.concat(event_dfs)
if infer_boundaries:
# if a timeseries starts (or ends) with uptime, the first (last) boundary is outside our data range.
# This method uses the first (last) timestamp as the boundary: a lower bound on duration.
for col, boundary in {"startup": "first", "shutdown": "last"}.items():
# __getattr__ doesn't work here
boundary_timestamps = units.__getattribute__(boundary)()[["operating_datetime_utc"]]
joined_timestamps = events.join(boundary_timestamps, on="unit_id_epa", how="left")[
"operating_datetime_utc"
]
events[col].fillna(joined_timestamps, inplace=True)
events["duration_hours"] = (
events["shutdown"].sub(events["startup"]).dt.total_seconds().div(3600)
)
return events
def _filter_retirements(df: pd.DataFrame, year_range: Tuple[int, int]) -> pd.DataFrame:
"""remove retired or not-yet-existing units that have zero overlap with year_range"""
min_year = year_range[0]
max_year = year_range[1]
not_retired_before_start = df["CAMD_RETIRE_YEAR"].replace(0, 3000) >= min_year
not_built_after_end = (pd.to_datetime(df["CAMD_STATUS_DATE"]).dt.year <= max_year) & df[
"CAMD_STATUS"
].ne("RET")
return df.loc[not_retired_before_start & not_built_after_end]
def _remove_irrelevant(df: pd.DataFrame):
"""remove unmatched or excluded (non-exporting) units"""
bad = df["MATCH_TYPE_GEN"].isin({"CAMD Unmatched", "Manual CAMD Excluded"})
return df.loc[~bad]
def _prep_crosswalk_for_networkx(
xwalk: pd.DataFrame, remove_retired_or_irrelevant=False, **kwargs
) -> pd.DataFrame:
if remove_retired_or_irrelevant:
filtered = _filter_retirements(xwalk, **kwargs)
filtered = _remove_irrelevant(filtered).copy()
else:
filtered = xwalk.copy()
# networkx can't handle composite keys, so make surrogates
filtered["combustor_id"] = filtered.groupby(by=["CAMD_PLANT_ID", "CAMD_UNIT_ID"]).ngroup()
# node IDs can't overlap so add (max + 1)
filtered["generator_id"] = (
filtered.groupby(by=["CAMD_PLANT_ID", "EIA_GENERATOR_ID"]).ngroup()
+ filtered["combustor_id"].max()
+ 1
)
return filtered
def _subcomponent_ids_from_prepped_crosswalk(prepped: pd.DataFrame) -> pd.DataFrame:
graph = nx.from_pandas_edgelist(
prepped,
source="combustor_id",
target="generator_id",
edge_attr=True,
)
for i, node_set in enumerate(nx.connected_components(graph)):
subgraph = graph.subgraph(node_set)
assert nx.algorithms.bipartite.is_bipartite(
subgraph
), f"non-bipartite: i={i}, node_set={node_set}"
nx.set_edge_attributes(subgraph, name="component_id", values=i)
return nx.to_pandas_edgelist(graph)
def make_subcomponent_ids(
xwalk: pd.DataFrame, cems: pd.DataFrame, remove_retired_or_irrelevant=False
) -> pd.DataFrame:
column_order = list(xwalk.columns)
year_range = None
if remove_retired_or_irrelevant:
| |
<gh_stars>10-100
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
@File : final_subpipe.py
@Time : 2019/11/28 20:05:43
@Author : <NAME>
@Contact : <EMAIL>
@Desc : None
'''
# .::::.
# .::::::::.
# :::::::::::
# ..:::::::::::'
# '::::::::::::'
# .::::::::::
# '::::::::::::::..
# ..::::::::::::.
# ``::::::::::::::::
# ::::``:::::::::' .:::.
# ::::' ':::::' .::::::::.
# .::::' :::: .:::::::'::::.
# .:::' ::::: .:::::::::' ':::::.
# .::' :::::.:::::::::' ':::::.
# .::' ::::::::::::::' ``::::.
# ...::: ::::::::::::' ``::.
# ```` ':. ':::::::::' ::::..
# '.:::::' ':'````..
# 美女保佑 永无BUG
import os
from collections import defaultdict
import numpy as np
import tqdm
import torch
from triplet_model import TripletModel
import pandas as pd
from utils import load_json, load_pickle, save_json, save_pickle, clean_name, get_name_index, SK_MLP
import time
from utils import get_coauthor_v2, get_year_diff_v2, get_venue_score_v2, get_org_score_v2, get_key_word_num_v2, get_relative_year_feature
from utils import get_org_with_set_score, get_venue_with_set_score, get_keywords_with_set_score
import matplotlib.pyplot as plt
import seaborn as sns
from utils import TextToVec
from multiprocessing import Pool
import math
from nltk.corpus import stopwords
import re
import string
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
sns.set()
TEST_PUB_PATH = './final_dir/data/cna_test_pub.json'
TEST_UNASS_PATH = './final_dir/data/cna_test_unass_competition.json'
TEST_FEATURE_DIR = './final_dir/feature'
FINAL_DIR = './final_dir'
RESULT_SAVE_DIR = './final_dir/save'
NEW_DATA_DIR = './new-data' # original info, for test
STACK_MODEL_DIR_v2 = './stack_model_v2'
RANDOM_SEED = 1129
np.random.seed(RANDOM_SEED)
os.makedirs(TEST_FEATURE_DIR, exist_ok=True)
os.makedirs(RESULT_SAVE_DIR, exist_ok=True)
BASE_COLS = [
'coauthors_count', 'coauthors_count_by_all_count', 'coauthors_count_by_this_coauthor_count',
'this_paper_coauthor_count', 'this_paper_coathor_count_by_all_coauthor', 'this_paper_coauthor_count_by_this_paper_coauthor_count',
'min_diff', 'max_diff', 'mean_diff', 'meadian_diff', 'min_max_avg_diff', 'is_in_range',
'this_year_count', 'this_year_count_by_all_year', 'is_in_cate_range', 'before_one', 'before_two',
'later_one', 'later_two', 'venue_max_score', 'venue_mean_score', 'venue_max_add_score',
'venue_mean_add_score', 'venue_is_match', 'venue_score_add_score', 'org_max_score', 'org_mean_score',
'org_max_add_score', 'org_mean_add_score', 'org_is_match', 'org_score_add_score', 'org_year_abs_diff',
'keywords_max_score', 'keywords_mean_score', 'rela_year_diff', 'rela_coauthor_count',
'rela_coauthor_count_by1', 'rela_coauthor_count_by2', 'rela_org_max_score',
'rela_org_mean_score', 'rela_org_max_add_score', 'rela_org_mean_add_score',
'rela_org_score_add_score', 'rela_venue_max_score', 'rela_venue_mean_score',
'rela_venue_max_add_score', 'rela_venue_mean_add_score', 'rela_venue_score_add_score',
'rela_keyword_max_score', 'rela_keyword_mean_score'
]
# length: 50 !
SET_INFO_COLS = [
'org_set_count', 'org_set_count_by_all_count',
'org_set_count_by_this_count', 'venue_word_count', 'venue_word_count_by_all_count',
'venue_word_count_by_this_venue_count', 'keyword_count', 'keyword_count_by_all_count',
'keyword_count_by_this_keyword_count'
]
# length: 9 !
TITLE_COLS = [
'title'
]
np.random.seed(RANDOM_SEED)
def get_features(aid_pid_pair, pid_info_dict, aid_author_info_dict, aid_year_info_dict, aid_venue_dict, aid_org_year_list, aid_keywords_dict, aid_year_all_info_dict, org_info_set, aid_venue_set, aid_keywords_set):
feature = [
*get_coauthor_v2(aid_pid_pair, aid_author_info_dict, pid_info_dict),
*get_year_diff_v2(aid_year_info_dict, pid_info_dict),
*get_venue_score_v2(aid_venue_dict, pid_info_dict),
*get_org_score_v2(aid_pid_pair, aid_org_year_list, pid_info_dict),
*get_key_word_num_v2(aid_keywords_dict, pid_info_dict),
*get_relative_year_feature(aid_pid_pair, aid_year_all_info_dict, pid_info_dict),
*get_org_with_set_score(aid_pid_pair, pid_info_dict, org_info_set),
*get_venue_with_set_score(aid_venue_set, pid_info_dict),
*get_keywords_with_set_score(aid_keywords_set, pid_info_dict),
]
return feature
def gen_base_feature(index, multi_size):
# process test data and save in pickle
# testdatafeatures --> {pid-with-index: {candidate-aids: [...], data: [[xxx], [xxx], [xxx]...]}}
test_unass = load_json(TEST_UNASS_PATH)
test_pub = load_json(TEST_PUB_PATH)
# whole_author_profile_pub = load_json(WHOLE_AUTHOR_PROFILE_PUB_PATH)
aid2yearinfo = load_pickle(os.path.join(NEW_DATA_DIR, 'aid2yearinfo.pkl'))
aid2coauthor = load_pickle(os.path.join(NEW_DATA_DIR, 'aid2coauthor.pkl'))
aid2venue = load_pickle(os.path.join(NEW_DATA_DIR, 'aid2venue.pkl'))
aid2keywords = load_pickle(os.path.join(NEW_DATA_DIR, 'aid2keywords.pkl'))
aid2year = load_pickle(os.path.join(NEW_DATA_DIR, 'aid2year.pkl'))
aid2orgwithyear = load_pickle(os.path.join(NEW_DATA_DIR, 'aid2orgwithyear.pkl'))
name2aids = load_pickle(os.path.join(NEW_DATA_DIR, 'name2aids.pkl'))
# aid2pids = load_pickle(os.path.join(NEW_DATA_DIR, 'aid2pids.pkl'))
aid2orgset = load_pickle(os.path.join(NEW_DATA_DIR, 'aid2orgset.pkl'))
aid2venueset = load_pickle(os.path.join(NEW_DATA_DIR, 'aid2venueset.pkl'))
aid2keywordsset = load_pickle(os.path.join(NEW_DATA_DIR, 'aid2keywordsset.pkl'))
name_map = load_json(os.path.join(FINAL_DIR, 'name.different.modified.json'))
original_name = [pair[0] for pair in name_map]
changed_name = [pair[1] for pair in name_map]
name_map2 = load_json(os.path.join(FINAL_DIR, 'name.different.2.modified.json'))
original_name2 = [pair[0] for pair in name_map2]
changed_name2 = [pair[1] for pair in name_map2]
single_range = math.ceil(len(test_unass) / multi_size)
start = index * single_range
end = (index + 1) * single_range if (index + 1) * single_range < len(test_unass) else len(test_unass)
testdatafeatures = {}
all_authors_name = list(name2aids.keys())
print('Gen test features ...')
for pid_with_index in tqdm.tqdm(test_unass[start:end]):
inner_dict = {}
now_pid, index = pid_with_index.split('-')
author_name = test_pub[now_pid]['authors'][int(index)]['name']
author_name = clean_name(author_name)
if pid_with_index == 'ToCcabLT-1':
author_name = 'junliang_wang'
if pid_with_index == 'cVvvcFzj-1':
author_name = 'xiaojun_liu'
if author_name in original_name2:
name_index = original_name2.index(author_name)
author_name = changed_name2[name_index]
elif author_name in original_name:
name_index = original_name.index(author_name)
author_name = changed_name[name_index]
else:
index = get_name_index(author_name, all_authors_name)
author_name = all_authors_name[index]
if isinstance(author_name, str):
candidate_aids = name2aids[author_name]
elif isinstance(author_name, list):
candidate_aids = []
for name in author_name:
candidate_aids.extend(name2aids[name].tolist())
candidate_aids = np.array(candidate_aids)
else:
raise ValueError("check author name ! ! !")
inner_dict['candidate-aids'] = candidate_aids
data = []
for aid in candidate_aids:
new_pair = (aid, pid_with_index)
pid_info_dict = test_pub[now_pid]
aid_author_info_dict = aid2coauthor[aid]
aid_year_info_dict = aid2year[aid]
aid_venue_dict = aid2venue[aid]
aid_org_year_list = aid2orgwithyear[aid]
aid_keywords_dict = aid2keywords[aid]
aid_year_all_info_dict = aid2yearinfo[aid]
org_info_set = aid2orgset[aid]
aid_venue_set = aid2venueset[aid]
aid_keywords_set = aid2keywordsset[aid]
data.append(get_features(new_pair, pid_info_dict, aid_author_info_dict, aid_year_info_dict, aid_venue_dict, aid_org_year_list, aid_keywords_dict, aid_year_all_info_dict, org_info_set, aid_venue_set, aid_keywords_set))
data = np.array(data)
inner_dict['data'] = data
testdatafeatures[pid_with_index] = inner_dict
# save_pickle(testdatafeatures, os.path.join(TEST_FEATURE_DIR, 'u6uRzaff-5.pkl'))
return testdatafeatures
def emb_pair_to_distance(text_model_name, mission, original_emb, save_path):
"""
original_emb shape: [(emb0, meb1), ...]
type: numpy.ndarray
"""
if mission != 'title' and mission != 'abstract':
raise ValueError('mission value error')
triplet_model = TripletModel()
triplet_model.load_state_dict(torch.load(os.path.join('./text-model', text_model_name)))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
triplet_model = triplet_model.to(device)
original_emb0 = np.stack([pair[0].tolist() for pair in original_emb])
original_emb1 = np.stack([pair[1].tolist() for pair in original_emb])
original_emb0 = np.expand_dims(original_emb0, axis=1)
original_emb1 = np.expand_dims(original_emb1, axis=1)
original_emb0 = torch.from_numpy(original_emb0).to(device).to(torch.float)
original_emb1 = torch.from_numpy(original_emb1).to(device).to(torch.float)
triplet_model.eval()
with torch.no_grad():
emb0 = triplet_model.get_emb(original_emb0)
emb1 = triplet_model.get_emb(original_emb1)
emb_sidtance = torch.sqrt(torch.sum(torch.pow((emb0 - emb1), 2), dim=1))
emb_sidtance = emb_sidtance.cpu().numpy()
df = pd.DataFrame(data=emb_sidtance, columns=[mission])
df.to_pickle(save_path)
def gen_title_feature():
aid2titlevec = load_pickle(os.path.join(NEW_DATA_DIR, 'aid2titlevec.pkl'))
test_unass = load_json(TEST_UNASS_PATH)
test_pub = load_json(TEST_PUB_PATH)
name2aids = load_pickle(os.path.join(NEW_DATA_DIR, 'name2aids.pkl'))
texttovec = TextToVec()
name_map = load_json(os.path.join(FINAL_DIR, 'name.different.modified.json'))
original_name = [pair[0] for pair in name_map]
changed_name = [pair[1] for pair in name_map]
name_map2 = load_json(os.path.join(FINAL_DIR, 'name.different.2.modified.json'))
original_name2 = [pair[0] for pair in name_map2]
changed_name2 = [pair[1] for pair in name_map2]
all_authors_name = list(name2aids.keys())
# test_title_feature --> {pid-with-index: {candidate-aids: [...], data: [(emb0, meb1), ...]}}
test_title_feature = {}
print('Gen title emb pair ...')
for pid_with_index in tqdm.tqdm(test_unass):
inner_dict = {}
now_pid, index = pid_with_index.split('-')
author_name = test_pub[now_pid]['authors'][int(index)]['name']
author_name = clean_name(author_name)
if pid_with_index == 'ToCcabLT-1':
author_name = 'junliang_wang'
if pid_with_index == 'cVvvcFzj-1':
author_name = 'xiaojun_liu'
if author_name in original_name2:
name_index = original_name2.index(author_name)
author_name = changed_name2[name_index]
elif author_name in original_name:
name_index = original_name.index(author_name)
author_name = changed_name[name_index]
else:
index = get_name_index(author_name, all_authors_name)
author_name = all_authors_name[index]
if isinstance(author_name, str):
candidate_aids = name2aids[author_name]
elif isinstance(author_name, list):
candidate_aids = []
for name in author_name:
candidate_aids.extend(name2aids[name].tolist())
candidate_aids = np.array(candidate_aids)
else:
raise ValueError("check author name !!!")
inner_dict['candidate-aids'] = candidate_aids
info = test_pub[now_pid].get('title')
if info is None:
emb = np.zeros(300)
else:
emb = texttovec.get_vec(info)
data = []
for aid in candidate_aids:
emb_pair = (aid2titlevec[aid], emb)
data.append(emb_pair)
inner_dict['data'] = data
test_title_feature[pid_with_index] = inner_dict
save_pickle(test_title_feature, os.path.join(TEST_FEATURE_DIR, 'test-title-emb-pair-name-clean-2.pkl'))
print('Gen title distance ...')
test_title_emb_pair = load_pickle(os.path.join(TEST_FEATURE_DIR, 'test-title-emb-pair-name-clean-2.pkl'))
test_unass = load_json(TEST_UNASS_PATH)
title_emb_pair = []
for pid_with_index in tqdm.tqdm(test_unass):
for pair in test_title_emb_pair[pid_with_index]['data']:
title_emb_pair.append(pair)
emb_pair_to_distance(
'tm.title.1.checkpoint.pth', 'title', title_emb_pair,
os.path.join(TEST_FEATURE_DIR, 'test-title-distance-df-name-clean-2.pkl')
)
def predict(models):
test_unass = load_json(TEST_UNASS_PATH)
testdatafeatures = load_pickle(os.path.join(TEST_FEATURE_DIR, 'testdatafeatures-withsetinfo.pkl'))
title_feature_df = pd.read_pickle(os.path.join(TEST_FEATURE_DIR, 'test-title-distance-df.pkl'))
title_feature = title_feature_df.values
models_loaded = []
for model_info in models:
model = {
'model': load_pickle(model_info['model']),
'ss': load_pickle(model_info['ss']),
'cols': model_info['cols'],
'score': model_info['score']
}
models_loaded.append(model)
scores = [model_info['score'] for model_info in models_loaded]
weights = [score / sum(scores) for score in scores]
weights = np.array(weights).reshape(1, len(models_loaded))
print(weights)
submission = defaultdict(list)
for pid_with_index in tqdm.tqdm(test_unass):
candidate_aids = testdatafeatures[pid_with_index]['candidate-aids']
data = testdatafeatures[pid_with_index]['data']
data_length = len(candidate_aids)
title_data = title_feature[:data_length]
title_feature = title_feature[data_length:]
data = np.concatenate((data, title_data), axis=1)
default_cols = BASE_COLS + SET_INFO_COLS + TITLE_COLS
df = pd.DataFrame(data=data, columns=default_cols)
inner_data = np.zeros((len(candidate_aids), len(models_loaded)))
for num, model_info in enumerate(models_loaded):
model = model_info['model']
ss = model_info['ss']
data = df[model_info['cols']].values
data = ss.transform(data)
output = model.predict_proba(data)
inner_data[:, num] = output
final_output = np.sum((inner_data * weights), axis=1)
predict_author = candidate_aids[np.argmax(final_output)]
submission[predict_author].append(pid_with_index.split('-')[0])
save_json(submission, os.path.join(FINAL_DIR, 'result-top3models.json'))
def see_year_distribution():
# test_pub = load_json(TEST_PUB_PATH)
# # test_pub = load_json('./data2/cna_data/whole_author_profile_pub.json')
# year_count = []
# for pid in test_pub:
# year = test_pub[pid].get('year', '0')
# if year == '':
# year = 0
# else:
# year = int(year)
# if year <= 1500 or year >= 2100:
# year = 0
# if year != 0:
# year_count.append(year)
# df = pd.DataFrame(data=year_count, columns=['year'])
# plt.figure(figsize=(10, 5))
# sns.countplot(x='year', data=df)
# plt.xticks(rotation='vertical')
# plt.savefig(os.path.join(FINAL_DIR, 'test.year.png'))
diff_year = []
df_dict = load_pickle('./final_dir/feature/testdatafeatures-withsetinfo.pkl')
for pid_with_index in df_dict:
data = df_dict[pid_with_index]['data']
default_cols = BASE_COLS + SET_INFO_COLS
df = pd.DataFrame(data=data, columns=default_cols)
diff_year.extend(df['max_diff'].values.tolist())
df = pd.DataFrame(data=diff_year, columns=['diff_year'])
plt.figure(figsize=(15, 5))
sns.countplot(x='diff_year', data=df)
plt.xticks(rotation='vertical')
plt.savefig(os.path.join(FINAL_DIR, 'test.max.diff.year.png'))
def multi_gen_base_feature(multi_size):
result = []
p = Pool(multi_size)
for index in range(multi_size):
result.append(p.apply_async(gen_base_feature, args=(index, multi_size)))
print('Process %d start' % index)
p.close()
p.join()
testdatafeatures = {}
for sub_dict in result:
testdatafeatures.update(sub_dict.get())
save_pickle(testdatafeatures, os.path.join(TEST_FEATURE_DIR, 'testdatafeatures-withsetinfo-name-clean-2.pkl'))
def save_time(model):
test_unass = load_json(TEST_UNASS_PATH)
testdatafeatures = load_pickle(os.path.join(TEST_FEATURE_DIR, 'testdatafeatures-withsetinfo-name-clean-2.pkl'))
title_feature_df = pd.read_pickle(os.path.join(TEST_FEATURE_DIR, 'test-title-distance-df-name-clean-2.pkl'))
title_feature = title_feature_df.values
models_loaded = {
'model': load_pickle(model['model']),
'ss': load_pickle(model['ss']),
'cols': model['cols'],
'score': model['score'],
'name': model['name'],
}
print(models_loaded['name'])
model_result = {}
for pid_with_index in tqdm.tqdm(test_unass):
inner_dict = {}
candidate_aids = testdatafeatures[pid_with_index]['candidate-aids']
data = testdatafeatures[pid_with_index]['data']
data_length = len(candidate_aids)
title_data = title_feature[:data_length]
title_feature = title_feature[data_length:]
data = np.concatenate((data, title_data), axis=1)
default_cols = BASE_COLS + SET_INFO_COLS + TITLE_COLS
df = pd.DataFrame(data=data, columns=default_cols)
model = models_loaded['model']
ss = models_loaded['ss']
data = df[models_loaded['cols']].values
data = ss.transform(data)
output = model.predict_proba(data)
inner_dict['candidate-aids'] = candidate_aids
inner_dict['result-score'] = output
model_result[pid_with_index] = inner_dict
save_pickle(model_result, os.path.join(RESULT_SAVE_DIR, 'name.clean.2.%s.result.score.pkl' % models_loaded['name']))
def get_coauthor_count_for_enhence(aid_pid_pair, aid_author_info_dict, pid_info_dict):
index = int(aid_pid_pair[1].split('-')[1])
authors = pid_info_dict['authors']
authors = [clean_name(item['name']) for item in authors]
authors.pop(index)
count = 0
for author_name | |
<gh_stars>1-10
from enum import Enum, unique
from nmigen import *
from nmigen.cli import main
from nmigen.asserts import *
from .muxing import *
from .arch import Registers
from .mcycler import *
from .transparent_latch import TransparentLatch
from ..z80fi.z80fi import Z80fiInterface
class Sequencer(Elaboratable):
def __init__(self, include_z80fi=False):
self.cycle_num = Signal.range(0, 10)
self.dataBusIn = Signal(8)
self.controls = SequencerControls(name="ctrls")
self.extended_cycle_controls = SequencerControls(name="extcyc_ctrls")
self.instr = TransparentLatch(8)
# During M1, whether this is the beginning of the instruction.
self.start_insn = Signal()
# Where the data from a memory read should be stored.
self.memrd_addr = Signal.enum(Register16)
self.memrd_dest = Signal.enum(Register8)
self.memwr_addr = Signal.enum(Register16)
self.memwr_src = Signal.enum(Register8)
self.useIX = Signal()
self.useIY = Signal()
self.registerSet = Signal()
#
# Signals going to the mcycler
#
self.extend = Signal()
self.last_cycle = Signal()
self.cycle = Signal.enum(MCycle)
#
# Signals coming from the mcycler
#
# Tells the sequencer that all the actions it set up are to be
# registered on the positive edge of the clock.
self.act = Signal()
self.include_z80fi = include_z80fi
if self.include_z80fi:
self.z80fi = Z80fiInterface()
def ports(self):
ps = [
self.extend, self.last_cycle, self.cycle, self.dataBusIn, self.act,
]
# if self.include_z80fi:
# ps.extend(self.z80fi.ports())
return ps
def elaborate(self, platform):
m = Module()
# INSTR register
m.submodules.instr = self.instr
m.d.comb += self.instr.input.eq(self.dataBusIn)
# When the MCycler is waitstated, there will be no act. In any other
# case, every state transition leads to an act.
with m.FSM(domain="pos", reset="RESET") as fsm:
# defaults
m.d.comb += self.controls.eq(0)
m.d.comb += self.controls.useIX.eq(self.useIX)
m.d.comb += self.controls.useIY.eq(self.useIY)
m.d.comb += self.controls.registerSet.eq(self.registerSet)
m.d.comb += self.controls.readRegister8.eq(Register8.NONE)
m.d.comb += self.controls.incR.eq(0)
if self.include_z80fi:
m.d.comb += self.z80fi.control.add_operand.eq(0)
m.d.comb += self.z80fi.control.add_memrd_access.eq(0)
m.d.comb += self.z80fi.control.add_memwr_access.eq(0)
m.d.comb += self.z80fi.control.add_iord_access.eq(0)
m.d.comb += self.z80fi.control.add_iowr_access.eq(0)
m.d.comb += self.z80fi.control.add_tcycle.eq(0)
m.d.comb += self.z80fi.control.add_mcycle.eq(MCycle.NONE)
m.d.comb += self.z80fi.control.save_registers_in.eq(0)
m.d.comb += self.z80fi.control.save_registers_out.eq(0)
m.d.comb += self.z80fi.control.save_instruction.eq(0)
m.d.comb += self.z80fi.control.set_valid.eq(0)
m.d.comb += self.z80fi.control.clear.eq(0)
with m.State("RESET"):
m.d.pos += self.useIX.eq(0)
m.d.pos += self.useIY.eq(0)
m.d.pos += self.registerSet.eq(0)
self.initiateInstructionFetch(m)
with m.State("M1_T1"):
m.d.comb += self.controls.readRegister16.eq(Register16.PC)
m.d.comb += self.controls.readRegister8.eq(
Register8.MCYCLER_RDATA)
if self.include_z80fi:
with m.If(self.start_insn):
# Take a snapshot of the state. This is the state coming
# out of the previous instruction. We want to keep the
# state going in to the previous instruction, and we'll
# load up the state going in to this instruction on the
# next cycle.
m.d.comb += self.z80fi.control.set_valid.eq(1)
m.d.comb += self.z80fi.control.save_registers_out.eq(1)
m.next = "M1_T2"
# This state can be waitstated. If waitstated, self.act will be 0.
with m.State("M1_T2"):
m.d.comb += self.controls.readRegister16.eq(Register16.PC)
m.d.comb += self.controls.readRegister8.eq(
Register8.MCYCLER_RDATA)
with m.If(self.act):
m.d.comb += self.controls.addrIncDecSetting.eq(
IncDecSetting.INC)
m.d.comb += self.controls.writeRegister16.eq(Register16.PC)
m.d.pos += self.instr.en.eq(0)
# Take a snapshot of the state. This is the state going
# in to this instruction. Also the instruction register.
# We do this even if the instruction was prefixed, because
# we don't count the prefix in the collected data --
# otherwise we'd have to allow infinite prefixes!
if self.include_z80fi:
m.d.comb += [
self.z80fi.control.save_registers_in.eq(1),
self.z80fi.control.clear.eq(1),
self.z80fi.control.add_mcycle.eq(MCycle.M1),
self.z80fi.control.save_instruction.eq(1),
self.z80fi.control.instr.eq(self.instr.input),
self.z80fi.control.useIX.eq(self.controls.useIX),
self.z80fi.control.useIY.eq(self.controls.useIY),
]
m.next = "M1_T3"
with m.State("M1_T3"):
m.d.comb += self.controls.readRegister16.eq(Register16.R)
if self.include_z80fi:
m.d.comb += self.z80fi.control.add_tcycle.eq(1)
m.next = "M1_T4"
with m.State("M1_T4"):
m.d.comb += self.controls.readRegister16.eq(Register16.R)
m.d.comb += self.controls.incR.eq(1)
m.d.pos += self.start_insn.eq(0)
if self.include_z80fi:
m.d.comb += self.z80fi.control.add_tcycle.eq(1)
self.execute(m)
with m.State("EXTENDED"):
m.d.comb += self.controls.eq(self.extended_cycle_controls)
if self.include_z80fi:
m.d.comb += self.z80fi.control.add_tcycle.eq(1)
self.execute(m)
with m.State("RDOPERAND_T1"):
m.d.comb += self.controls.readRegister16.eq(Register16.PC)
m.d.comb += self.controls.readRegister8.eq(
Register8.MCYCLER_RDATA)
if self.include_z80fi:
m.d.comb += self.z80fi.control.add_mcycle.eq(MCycle.MEMRD)
m.next = "RDOPERAND_T2"
# This state can be waitstated. If waitstated, self.act will be 0.
with m.State("RDOPERAND_T2"):
m.d.comb += self.controls.readRegister16.eq(Register16.PC)
m.d.comb += self.controls.readRegister8.eq(
Register8.MCYCLER_RDATA)
with m.If(self.act):
if self.include_z80fi:
m.d.comb += self.z80fi.control.add_tcycle.eq(1)
m.next = "RDOPERAND_T3"
with m.State("RDOPERAND_T3"):
m.d.comb += self.controls.readRegister16.eq(Register16.PC)
m.d.comb += self.controls.addrIncDecSetting.eq(
IncDecSetting.INC)
m.d.comb += self.controls.writeRegister16.eq(Register16.PC)
m.d.comb += self.controls.readRegister8.eq(
Register8.MCYCLER_RDATA)
m.d.comb += self.controls.writeRegister8.eq(self.memrd_dest)
self.execute(m)
if self.include_z80fi:
m.d.comb += self.z80fi.control.add_operand.eq(1)
m.d.comb += self.z80fi.control.data.eq(self.z80fi.bus.data)
m.d.comb += self.z80fi.control.addr.eq(self.z80fi.bus.addr)
m.d.comb += self.z80fi.control.add_tcycle.eq(1)
with m.State("RDMEM_T1"):
m.d.comb += self.controls.readRegister16.eq(self.memrd_addr)
m.d.comb += self.controls.readRegister8.eq(
Register8.MCYCLER_RDATA)
if self.include_z80fi:
m.d.comb += self.z80fi.control.add_mcycle.eq(MCycle.MEMRD)
m.next = "RDMEM_T2"
# This state can be waitstated. If waitstated, self.act will be 0.
with m.State("RDMEM_T2"):
m.d.comb += self.controls.readRegister16.eq(self.memrd_addr)
m.d.comb += self.controls.readRegister8.eq(
Register8.MCYCLER_RDATA)
with m.If(self.act):
if self.include_z80fi:
m.d.comb += self.z80fi.control.add_tcycle.eq(1)
m.next = "RDMEM_T3"
with m.State("RDMEM_T3"):
m.d.comb += self.controls.readRegister16.eq(self.memrd_addr)
m.d.comb += self.controls.readRegister8.eq(
Register8.MCYCLER_RDATA)
m.d.comb += self.controls.writeRegister8.eq(self.memrd_dest)
self.execute(m)
if self.include_z80fi:
m.d.comb += self.z80fi.control.add_memrd_access.eq(1)
m.d.comb += self.z80fi.control.data.eq(self.z80fi.bus.data)
m.d.comb += self.z80fi.control.addr.eq(self.z80fi.bus.addr)
m.d.comb += self.z80fi.control.add_tcycle.eq(1)
with m.State("WRMEM_T1"):
m.d.comb += self.controls.readRegister16.eq(self.memwr_addr)
m.d.comb += self.controls.readRegister8.eq(self.memwr_src)
if self.include_z80fi:
m.d.comb += self.z80fi.control.add_mcycle.eq(MCycle.MEMWR)
m.next = "WRMEM_T2"
# This state can be waitstated. If waitstated, self.act will be 0.
with m.State("WRMEM_T2"):
m.d.comb += self.controls.readRegister16.eq(self.memwr_addr)
m.d.comb += self.controls.readRegister8.eq(self.memwr_src)
with m.If(self.act):
if self.include_z80fi:
m.d.comb += self.z80fi.control.add_tcycle.eq(1)
m.next = "WRMEM_T3"
with m.State("WRMEM_T3"):
m.d.comb += self.controls.readRegister16.eq(self.memwr_addr)
m.d.comb += self.controls.readRegister8.eq(self.memwr_src)
self.execute(m)
if self.include_z80fi:
m.d.comb += self.z80fi.control.add_memwr_access.eq(1)
m.d.comb += self.z80fi.control.data.eq(self.z80fi.bus.data)
m.d.comb += self.z80fi.control.addr.eq(self.z80fi.bus.addr)
m.d.comb += self.z80fi.control.add_tcycle.eq(1)
with m.State("INTERNAL_T1"):
if self.include_z80fi:
m.d.comb += self.z80fi.control.add_mcycle.eq(MCycle.INTERNAL)
m.next = "INTERNAL_T2"
m.d.comb += self.cycle.eq(MCycle.INTERNAL)
self.execute(m)
with m.State("INTERNAL_T2"):
if self.include_z80fi:
m.d.comb += self.z80fi.control.add_tcycle.eq(1)
m.next = "INTERNAL_T3"
m.d.comb += self.cycle.eq(MCycle.INTERNAL)
self.execute(m)
with m.State("INTERNAL_T3"):
if self.include_z80fi:
m.d.comb += self.z80fi.control.add_tcycle.eq(1)
self.execute(m)
with m.State("HALT"):
m.next = "HALT"
return m
def initiateInstructionFetch(self, m):
"""Initiates an M1 cycle for the first byte in an instruction.
This resets any setting from prefixes and resets the cycle number.
"""
self.initiateOpcodeFetch(m)
m.d.comb += self.last_cycle.eq(1)
m.d.pos += self.useIX.eq(0)
m.d.pos += self.useIY.eq(0)
m.d.pos += self.start_insn.eq(1)
m.d.pos += self.instr.en.eq(1)
m.d.pos += self.cycle_num.eq(0)
def initiateOpcodeFetch(self, m):
"""Initiates an M1 cycle for the first byte in an opcode.
Differs from initiateInstructionFetch() in that it retains
prefix settings and cycle numbers.
* Registers (PC) -> addr bus (MCycler always gets this)
* Enable INSTR.
* PC is automatically incremented by state machine
"""
m.next = "M1_T1"
m.d.comb += self.cycle.eq(MCycle.M1)
m.d.pos += self.instr.en.eq(1)
def initiateOperandRead(self, m):
"""Initiates a memory read of an instruction operand.
* Registers (PC) -> addr bus (MCycler always gets this)
* Disable INSTR.
* Instruction decides where data bus goes once read is done.
* PC is automatically incremented by state machine
"""
m.next = "RDOPERAND_T1"
m.d.comb += self.cycle.eq(MCycle.MEMRD)
m.d.pos += self.instr.en.eq(0)
def initiateOperandReadInto(self, m, reg):
"""Initiates a memory read of an instruction operand into reg.
* Registers (PC) -> addr bus (MCycler always gets this)
* Disable INSTR.
* Data goes into reg when done.
* PC is automatically incremented by state machine
"""
m.next = "RDOPERAND_T1"
m.d.comb += self.cycle.eq(MCycle.MEMRD)
m.d.pos += self.instr.en.eq(0)
m.d.pos += self.memrd_dest.eq(reg)
def initiateMemRead(self, m, reg_addr, reg_data):
"""Initiates a memory read using a 16-bit register as address."""
m.next = "RDMEM_T1"
m.d.comb += self.cycle.eq(MCycle.MEMRD)
m.d.pos += self.memrd_addr.eq(reg_addr)
m.d.pos += self.memrd_dest.eq(reg_data)
m.d.pos += self.instr.en.eq(0)
def initiateMemWrite(self, m, reg_addr, reg_data):
m.next = "WRMEM_T1"
m.d.comb += self.cycle.eq(MCycle.MEMWR)
m.d.pos += self.memwr_addr.eq(reg_addr)
m.d.pos += self.memwr_src.eq(reg_data)
m.d.pos += self.instr.en.eq(0)
def initiateInternalOperation(self, m):
m.next = "INTERNAL_T1"
m.d.comb += self.cycle.eq(MCycle.INTERNAL)
m.d.pos += self.instr.en.eq(0)
def setDataBusSource(self, m, reg):
m.d.comb += self.controls.readRegister8.eq(reg)
def writeRegister8(self, m, reg):
m.d.comb += self.controls.writeRegister8.eq(reg)
def aluAddrAddLow(self, m, reg16operand, reg_dest):
self.extendCycle(m)
m.d.pos += self.extended_cycle_controls.addrALUInput.eq(reg16operand)
m.d.pos += self.extended_cycle_controls.addrALUInputByte.eq(0)
m.d.pos += self.extended_cycle_controls.readRegister8.eq(
Register8.ADDR_ALU)
m.d.pos += self.extended_cycle_controls.writeRegister8.eq(reg_dest)
def aluAddrAddHigh(self, m, reg16operand, reg_dest):
self.extendCycle(m)
m.d.pos += self.extended_cycle_controls.addrALUInput.eq(reg16operand)
m.d.pos += self.extended_cycle_controls.addrALUInputByte.eq(1)
m.d.pos += self.extended_cycle_controls.readRegister8.eq(
Register8.ADDR_ALU)
m.d.pos += self.extended_cycle_controls.writeRegister8.eq(reg_dest)
def extendCycle(self, m):
m.next = "EXTENDED"
m.d.comb += self.extend.eq(1)
m.d.pos += self.extended_cycle_controls.eq(0)
m.d.pos += self.extended_cycle_controls.useIX.eq(self.useIX)
m.d.pos += self.extended_cycle_controls.useIY.eq(self.useIY)
m.d.pos += self.extended_cycle_controls.registerSet.eq(
self.registerSet)
def execute(self, m):
m.d.pos += self.cycle_num.eq(self.cycle_num + 1)
with m.Switch(self.instr.output):
with m.Case(0xDD):
m.d.pos += self.useIX.eq(1)
m.d.pos += self.useIY.eq(0)
m.d.pos += self.cycle_num.eq(0)
self.initiateOpcodeFetch(m)
with m.Case(0xFD):
m.d.pos += self.useIX.eq(0)
m.d.pos += self.useIY.eq(1)
m.d.pos += self.cycle_num.eq(0)
self.initiateOpcodeFetch(m)
with m.Case("00000000"):
self.NOP(m)
with m.Case("01------"):
self.LD_REG_REG(m)
with m.Case("00---110"):
self.LD_REG_N(m)
def NOP(self, m):
self.initiateInstructionFetch(m)
def LD_REG_REG_gen(self, m):
dst_r = self.instr.output[3:6]
src_r = self.instr.output[0:3]
dst_hl = (dst_r == 6)
src_hl = (src_r == 6)
indexed = self.controls.useIX | self.controls.useIY
reg_to_reg = ~dst_hl & ~src_hl
halt = dst_hl & src_hl
rd_indirect = src_hl & ~dst_hl & ~indexed
wr_indirect = ~src_hl & dst_hl & ~indexed
rd_indexed = src_hl & ~dst_hl & indexed
wr_indexed = ~src_hl & dst_hl & indexed
conditions = [reg_to_reg, halt, rd_indirect, wr_indirect, rd_indexed, wr_indexed]
programs = [
[(Step.COPY_REG8, Arg.REG8_R, Arg.REG8_R, MCycle.M1, 0)],
[(Step.HALT, Arg.NONE, Arg.NONE, MCycle.HALT, 0)],
[(Step.MEM_RD, Arg.HL, Arg.REG8_R, MCycle.MEMRD, 0)],
[(Step.MEM_WR, Arg.HL, Arg.REG8_R, MCycle.MEMWR, 0)],
[(Step.MEM_RD, Arg.PC, Arg.OFFSET, MCycle.MEMRD, 1),
(Step.NOP, Arg.NONE, Arg.NONE, MCycle.INTERNAL, 2),
(Step.NOP, Arg.NONE, Arg.NONE, MCycle.EXTEND, 3),
(Step.NOP, Arg.NONE, Arg.NONE, MCycle.EXTEND, 4),
(Step.ADDR_ALU_ADD_LO, Arg.HL, Arg.Z, MCycle.EXTEND, 5),
(Step.ADDR_ALU_ADD_HI, Arg.HL, Arg.W, MCycle.EXTEND, | |
identifier (see below and Section 9.7.7).
value (required, string): The identifier itself.
expires (optional, string): The timestamp after which the server will consider this authorization invalid, encoded in the format specified in [RFC3339]. This field is REQUIRED for objects with "valid" in the "status" field.
status (required, string):
challenges (required, array of objects):
wildcard (optional, boolean)
potentially raises:
errors.AcmeMissingChallenges
"""
log.info("getcreate__AcmeAuthorization(")
if not dbAcmeOrder:
raise ValueError("do not invoke this without a `dbAcmeOrder`")
is_created__AcmeAuthorization = None
dbAcmeAuthorization = get__AcmeAuthorization__by_authorization_url(
ctx, authorization_url
)
if not dbAcmeAuthorization:
#
dbAcmeAuthorization = model_objects.AcmeAuthorization()
dbAcmeAuthorization.authorization_url = authorization_url
dbAcmeAuthorization.timestamp_created = ctx.timestamp
dbAcmeAuthorization.acme_status_authorization_id = (
model_utils.Acme_Status_Authorization.ID_DEFAULT
)
dbAcmeAuthorization.acme_order_id__created = dbAcmeOrder.id
ctx.dbSession.add(dbAcmeAuthorization)
ctx.dbSession.flush(
objects=[
dbAcmeAuthorization,
]
)
is_created__AcmeAuthorization = True
dbOrder2Auth = model_objects.AcmeOrder2AcmeAuthorization()
dbOrder2Auth.acme_order_id = dbAcmeOrder.id
dbOrder2Auth.acme_authorization_id = dbAcmeAuthorization.id
dbOrder2Auth.is_present_on_new_order = is_via_new_order
ctx.dbSession.add(dbOrder2Auth)
ctx.dbSession.flush(
objects=[
dbOrder2Auth,
]
)
is_created__AcmeAuthorization2Order = True
_result = process__AcmeAuthorization_payload(
ctx,
authorization_payload=authorization_payload,
authenticatedUser=authenticatedUser,
dbAcmeAuthorization=dbAcmeAuthorization,
dbAcmeOrder=dbAcmeOrder,
transaction_commit=transaction_commit,
)
# persist this to the db
if transaction_commit:
ctx.pyramid_transaction_commit()
return (dbAcmeAuthorization, is_created__AcmeAuthorization)
def process__AcmeAuthorization_payload(
ctx,
authorization_payload=None,
authenticatedUser=None,
dbAcmeAuthorization=None,
dbAcmeOrder=None,
transaction_commit=None,
):
"""
:param ctx: (required) A :class:`lib.utils.ApiContext` instance
:param authorization_payload: (required) an RFC-8555 authorization payload
:param authenticatedUser: (optional) an object which contains a `accountkey_thumbprint` attribute
:param dbAcmeAuthorization: (required) The :class:`model.objects.AcmeAuthorization` associated with the discovered item
:param dbAcmeOrder: (required) The :class:`model.objects.AcmeOrder` associated with the discovered item
:param transaction_commit: (required) Boolean value. required to indicate this persists to the database.
"""
log.info("process__AcmeAuthorization_payload")
is_created__AcmeAuthorization2Order = None
# is this associated?
dbOrder2Auth = (
ctx.dbSession.query(model_objects.AcmeOrder2AcmeAuthorization)
.filter(
model_objects.AcmeOrder2AcmeAuthorization.acme_order_id == dbAcmeOrder.id,
model_objects.AcmeOrder2AcmeAuthorization.acme_authorization_id
== dbAcmeAuthorization.id,
)
.first()
)
if not dbOrder2Auth:
dbOrder2Auth = model_objects.AcmeOrder2AcmeAuthorization()
dbOrder2Auth.acme_order_id = dbAcmeOrder.id
dbOrder2Auth.acme_authorization_id = dbAcmeAuthorization.id
dbOrder2Auth.is_present_on_new_order = False
ctx.dbSession.add(dbOrder2Auth)
ctx.dbSession.flush(
objects=[
dbOrder2Auth,
]
)
is_created__AcmeAuthorization2Order = True
# no matter what, update
# this will set the following:
# `dbAcmeAuthorization.timestamp_expires`
# `dbAcmeAuthorization.domain_id`
# `dbAcmeAuthorization.acme_status_authorization_id`
# `dbAcmeAuthorization.timestamp_updated`
_updated = update_AcmeAuthorization_from_payload(
ctx, dbAcmeAuthorization, authorization_payload
)
# parse the payload for our http01 challenge
try:
dbAcmeChallenges = getcreate__AcmeChallenges_via_payload(
ctx,
authenticatedUser=authenticatedUser,
dbAcmeAuthorization=dbAcmeAuthorization,
authorization_payload=authorization_payload,
)
except errors.AcmeMissingChallenges as exc:
pass
# persist this to the db
if transaction_commit:
ctx.pyramid_transaction_commit()
return True
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getcreate__AcmeChallenges_via_payload(
ctx,
authenticatedUser=None,
dbAcmeAuthorization=None,
authorization_payload=None,
):
"""
:param ctx: (required) A :class:`lib.utils.ApiContext` instance
:param authenticatedUser: (optional) an object which contains a `accountkey_thumbprint` attribute
:param dbAcmeAuthorization: (required) The :class:`model.objects.AcmeAuthorization` associated with the payload
:param authorization_payload: (required) an RFC-8555 authorization payload
returns:
dbAcmeChallenges: a list of tuples, each tuple being (`model.objects.AcmeChallenge`, is_created)
potentially raises:
errors.AcmeMissingChallenges
"""
dbAcmeChallenges = []
acme_challenges = lib.acme_v2.get_authorization_challenges(
authorization_payload,
required_challenges=[
"http-01",
],
)
for acme_challenge in acme_challenges.values():
if acme_challenge is None:
continue
challenge_url = acme_challenge["url"]
challenge_status = acme_challenge["status"]
acme_challenge_type_id = model_utils.AcmeChallengeType.from_string(
acme_challenge["type"]
)
acme_status_challenge_id = model_utils.Acme_Status_Challenge.from_string(
challenge_status
)
_dbAcmeChallenge = get__AcmeChallenge__by_challenge_url(ctx, challenge_url)
_is_created_AcmeChallenge = False
if not _dbAcmeChallenge:
challenge_token = acme_challenge["token"]
# TODO: should we build an authenticatedUser here?
keyauthorization = (
lib.acme_v2.create_challenge_keyauthorization(
challenge_token, authenticatedUser.accountKeyData
)
if authenticatedUser
else None
)
_dbAcmeChallenge = create__AcmeChallenge(
ctx,
dbAcmeAuthorization=dbAcmeAuthorization,
dbDomain=dbAcmeAuthorization.domain,
challenge_url=challenge_url,
token=challenge_token,
keyauthorization=keyauthorization,
acme_challenge_type_id=acme_challenge_type_id,
acme_status_challenge_id=acme_status_challenge_id,
is_via_sync=True,
)
_is_created_AcmeChallenge = True
else:
if _dbAcmeChallenge.acme_status_challenge_id != acme_status_challenge_id:
_dbAcmeChallenge.acme_status_challenge_id = acme_status_challenge_id
_dbAcmeChallenge.timestamp_updated = datetime.datetime.utcnow()
ctx.dbSession.add(_dbAcmeChallenge)
ctx.dbSession.flush(objects=[_dbAcmeChallenge])
dbAcmeChallenges.append((_dbAcmeChallenge, _is_created_AcmeChallenge))
return dbAcmeChallenges
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getcreate__AcmeDnsServer(ctx, root_url, is_global_default=None):
"""
getcreate wrapping an acms-dns Server (AcmeDnsServer)
return dbAcmeDnsServer, is_created
:param ctx: (required) A :class:`lib.utils.ApiContext` instance
:param root_url:
"""
is_created = False
dbAcmeDnsServer = get__AcmeDnsServer__by_root_url(ctx, root_url)
if not dbAcmeDnsServer:
event_payload_dict = utils.new_event_payload_dict()
dbOperationsEvent = log__OperationsEvent(
ctx, model_utils.OperationsEventType.from_string("AcmeDnsServer__insert")
)
dbAcmeDnsServer = model_objects.AcmeDnsServer()
dbAcmeDnsServer.root_url = root_url
dbAcmeDnsServer.timestamp_created = ctx.timestamp
dbAcmeDnsServer.operations_event_id__created = dbOperationsEvent.id
dbAcmeDnsServer.is_active = True
ctx.dbSession.add(dbAcmeDnsServer)
ctx.dbSession.flush(objects=[dbAcmeDnsServer])
is_created = True
event_payload_dict["domain.id"] = dbAcmeDnsServer.id
dbOperationsEvent.set_event_payload(event_payload_dict)
ctx.dbSession.flush(objects=[dbOperationsEvent])
_log_object_event(
ctx,
dbOperationsEvent=dbOperationsEvent,
event_status_id=model_utils.OperationsObjectEventStatus.from_string(
"AcmeDnsServer__insert"
),
dbAcmeDnsServer=dbAcmeDnsServer,
)
if is_global_default:
_res = update_AcmeDnsServer__set_global_default(ctx, dbAcmeDnsServer)
return (dbAcmeDnsServer, is_created)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getcreate__CertificateCAChain__by_pem_text(
ctx,
chain_pem,
display_name=None,
):
chain_pem = lib.cert_utils.cleanup_pem_text(chain_pem)
chain_certs = lib.cert_utils.split_pem_chain(chain_pem) # this will clean it
if len(chain_certs) < 1:
raise ValueError("Did not find at least 1 Certificate in this Chain.")
is_created = False
dbCertificateCAChain = get__CertificateCAChain__by_pem_text(ctx, chain_pem)
# Ensure the certificate chain is structured front to back
# this will raise an error
if len(chain_certs) > 1:
lib.cert_utils.ensure_chain_order(chain_certs)
if not dbCertificateCAChain:
chain_pem_md5 = utils.md5_text(chain_pem)
dbCertificateCAs = []
for cert_pem in chain_certs:
(_dbCertificateCA, _is_created) = getcreate__CertificateCA__by_pem_text(
ctx, cert_pem, display_name=display_name
)
dbCertificateCAs.append(_dbCertificateCA)
# bookkeeping
event_payload_dict = utils.new_event_payload_dict()
dbOperationsEvent = log__OperationsEvent(
ctx,
model_utils.OperationsEventType.from_string("CertificateCAChain__insert"),
)
dbCertificateCAChain = model_objects.CertificateCAChain()
dbCertificateCAChain.display_name = display_name or "discovered"
dbCertificateCAChain.timestamp_created = ctx.timestamp
dbCertificateCAChain.chain_pem = chain_pem
dbCertificateCAChain.chain_pem_md5 = chain_pem_md5
dbCertificateCAChain.certificate_ca_0_id = dbCertificateCAs[0].id
dbCertificateCAChain.certificate_ca_n_id = dbCertificateCAs[-1].id
dbCertificateCAChain.chain_length = len(dbCertificateCAs)
dbCertificateCAChain.certificate_ca_ids_string = ",".join(
[str(i.id) for i in dbCertificateCAs]
)
dbCertificateCAChain.operations_event_id__created = dbOperationsEvent.id
ctx.dbSession.add(dbCertificateCAChain)
ctx.dbSession.flush(objects=[dbCertificateCAChain])
is_created = True
event_payload_dict["certificate_ca_chain.id"] = dbCertificateCAChain.id
dbOperationsEvent.set_event_payload(event_payload_dict)
ctx.dbSession.flush(objects=[dbOperationsEvent])
_log_object_event(
ctx,
dbOperationsEvent=dbOperationsEvent,
event_status_id=model_utils.OperationsObjectEventStatus.from_string(
"CertificateCAChain__insert"
),
dbCertificateCAChain=dbCertificateCAChain,
)
return (dbCertificateCAChain, is_created)
def getcreate__CertificateCA__by_pem_text(
ctx,
cert_pem,
display_name=None,
is_trusted_root=None,
key_technology_id=None,
):
"""
Gets or Creates CertificateCAs
:param ctx: (required) A :class:`lib.utils.ApiContext` instance
:param cert_pem: (required)
:param display_name: a name to display this as
:param is_trusted_root:
:param key_technology_id: :class:`lib.utils.KeyTechnology` value
"""
cert_pem = lib.cert_utils.cleanup_pem_text(cert_pem)
_certs = lib.cert_utils.split_pem_chain(cert_pem) # this will clean it
if len(_certs) > 1:
raise ValueError("More than 1 Certificate in this PEM.")
elif len(_certs) != 1:
raise ValueError("Did not find 1 Certificate in this PEM.")
is_created = False
dbCertificateCA = get__CertificateCA__by_pem_text(ctx, cert_pem)
if not dbCertificateCA:
cert_pem_md5 = utils.md5_text(cert_pem)
_tmpfile = None
try:
if lib.cert_utils.NEEDS_TEMPFILES:
_tmpfile = lib.cert_utils.new_pem_tempfile(cert_pem)
# validate
lib.cert_utils.validate_cert(
cert_pem=cert_pem, cert_pem_filepath=_tmpfile.name if _tmpfile else None
)
_key_technology = lib.cert_utils.parse_cert__key_technology(
cert_pem=cert_pem, cert_pem_filepath=_tmpfile.name if _tmpfile else None
)
_key_technology_id = model_utils.KeyTechnology.from_string(_key_technology)
if key_technology_id is None:
key_technology_id = _key_technology_id
else:
if key_technology_id != _key_technology_id:
raise ValueError(
"Detected a different `key_technology_id` than submitted"
)
# bookkeeping
event_payload_dict = utils.new_event_payload_dict()
dbOperationsEvent = log__OperationsEvent(
ctx,
model_utils.OperationsEventType.from_string("CertificateCA__insert"),
)
dbCertificateCA = model_objects.CertificateCA()
dbCertificateCA.display_name = display_name or "unknown"
dbCertificateCA.key_technology_id = key_technology_id
dbCertificateCA.is_trusted_root = is_trusted_root
dbCertificateCA.timestamp_created = ctx.timestamp
dbCertificateCA.cert_pem = cert_pem
dbCertificateCA.cert_pem_md5 = cert_pem_md5
_cert_data = lib.cert_utils.parse_cert(
cert_pem=cert_pem, cert_pem_filepath=_tmpfile.name if _tmpfile else None
)
dbCertificateCA.timestamp_not_before = _cert_data["startdate"]
dbCertificateCA.timestamp_not_after = _cert_data["enddate"]
dbCertificateCA.cert_subject = _cert_data["subject"]
dbCertificateCA.cert_issuer = _cert_data["issuer"]
dbCertificateCA.fingerprint_sha1 = _cert_data["fingerprint_sha1"]
dbCertificateCA.key_technology_id = model_utils.KeyTechnology.from_string(
_cert_data["key_technology"]
)
dbCertificateCA.spki_sha256 = _cert_data["spki_sha256"]
dbCertificateCA.cert_issuer_uri = _cert_data["issuer_uri"]
dbCertificateCA.cert_authority_key_identifier = _cert_data[
"authority_key_identifier"
]
dbCertificateCA.operations_event_id__created = dbOperationsEvent.id
ctx.dbSession.add(dbCertificateCA)
ctx.dbSession.flush(objects=[dbCertificateCA])
is_created = True
event_payload_dict["certificate_ca.id"] = dbCertificateCA.id
dbOperationsEvent.set_event_payload(event_payload_dict)
ctx.dbSession.flush(objects=[dbOperationsEvent])
_log_object_event(
ctx,
dbOperationsEvent=dbOperationsEvent,
event_status_id=model_utils.OperationsObjectEventStatus.from_string(
"CertificateCA__insert"
),
dbCertificateCA=dbCertificateCA,
)
except Exception as exc:
raise
finally:
if _tmpfile is not None:
_tmpfile.close()
return (dbCertificateCA, is_created)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def getcreate__CertificateRequest__by_pem_text(
ctx,
csr_pem,
certificate_request_source_id=None,
dbPrivateKey=None,
dbCertificateSigned__issued=None,
domain_names=None,
):
"""
getcreate for a CSR
This is only used for inserting test records.
If uploading CSR is enabled, ensure it conforms to LetsEncrypt practices:
* CN=/
* all domains in SubjectAlternateNames
LetsEncrypt will not process a CSR if the domain in CN is not duplicated as a SAN
:param ctx: (required) A :class:`lib.utils.ApiContext` instance
:param csr_pem:
:param certificate_request_source_id: Must match an option in :class:`model.utils.CertificateRequestSource`
:param dbPrivateKey: (required) The :class:`model.objects.PrivateKey` that signed the certificate
:param dbCertificateSigned__issued: (required) The :class:`model.objects.CertificateSigned` this issued as
:param domain_names: (required) A list of fully qualified domain names
log__OperationsEvent takes place in `create__CertificateRequest`
"""
is_created = False
dbCertificateRequest = get__CertificateRequest__by_pem_text(ctx, csr_pem)
if not dbCertificateRequest:
dbCertificateRequest = create__CertificateRequest(
ctx,
csr_pem,
certificate_request_source_id=certificate_request_source_id,
dbPrivateKey=dbPrivateKey,
dbCertificateSigned__issued=dbCertificateSigned__issued,
domain_names=domain_names,
)
is_created = True
return (dbCertificateRequest, is_created)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - | |
<reponame>isandlaTech/cohorte-devtools
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Python modules repository
:author: <NAME>
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import ast
import imp
import logging
import os
# ######### added by: <NAME>.
import json
# #########
# Pelix
from pelix.ipopo.decorators import ComponentFactory, Provides, Property, \
Invalidate, Validate
from pelix.utilities import is_string
# Repository beans
import cohorte
import cohorte.repositories
from cohorte.repositories.beans import Artifact, Version
# ------------------------------------------------------------------------------
# Documentation strings format
__docformat__ = "restructuredtext en"
# Version
__version_info__ = (1, 1, 0)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class Module(Artifact):
"""
Represents a bundle
"""
def __init__(self, name, version, imports, filename):
"""
Sets up the bundle details
:param name: Name of the module
:param version: Version of the module (as a string)
:param imports: List of names of imported modules
:param filename: Path to the .py file
:raise ValueError: Invalid argument
"""
Artifact.__init__(self, "python", name, version, filename)
# Store information
self.all_imports = imports
def imports(self, artifact):
"""
Tests if this module might import the given artifact
:param artifact: Another artifact
:return: True if this module imports the given one
"""
if artifact.language != self.language:
# No inter-language imports
return False
return artifact.name in self.all_imports
# ------------------------------------------------------------------------------
class AstVisitor(ast.NodeVisitor):
"""
AST visitor to extract imports and version
"""
# pylint: disable=invalid-name
def __init__(self, module_name, is_package):
"""
Sets up the visitor
:param module_name: The module name
:param is_package: Whether the name is a package name
"""
ast.NodeVisitor.__init__(self)
self.imports = set()
self.version = None
self.module_parts = module_name.split(".")
# Drop module name, keeping only packages' names
if not is_package:
self.module_parts = self.module_parts[:-1]
self.module_name = module_name
def generic_visit(self, node):
"""
Custom default visit method that avoids to visit further that the
module level.
"""
if type(node) is ast.Module:
ast.NodeVisitor.generic_visit(self, node)
def resolve_relative_import_from(self, node):
"""
Converts a relative import (import .module) into an absolute one
:param node: An ImportFrom AST node
:return: The absolute module name
"""
if node.level > 0:
# Relative import
if node.level == 1:
parent = '.'.join(self.module_parts)
else:
parent = '.'.join(self.module_parts[:-node.level + 1])
if node.module:
# from .module import ...
return '.'.join((parent, node.module))
else:
# from . import ...
return parent
else:
# Absolute import
return node.module
def visit_Import(self, node):
"""
Found an "import"
"""
for alias in node.names:
self.imports.add(alias.name)
def visit_ImportFrom(self, node):
"""
Found a "from ... import ..."
"""
imported = self.resolve_relative_import_from(node)
self.imports.add(imported)
def visit_Assign(self, node):
"""
Found an assignment
"""
field = getattr(node.targets[0], 'id', None)
if not self.version \
and field in ('__version__', '__version_info__'):
try:
version_parsed = ast.literal_eval(node.value)
if isinstance(version_parsed, (tuple, list)):
self.version = ".".join(str(version_parsed))
else:
self.version = str(version_parsed)
except ValueError:
# Ignore errors
pass
def _extract_module_info(filename, module_name, is_package):
"""
Extract the version and the imports from the given Python file
:param filename: Path to the file to parse
:param module_name: The fully-qualified module name
:param is_package: Whether the name is a package name
:return: A (version, [imports]) tuple
:raise ValueError: Unreadable file
"""
try:
with open(filename) as filep:
source = filep.read()
except (OSError, IOError) as ex:
raise ValueError("Error reading {0}: {1}".format(filename, ex))
visitor = AstVisitor(module_name, is_package)
try:
module = ast.parse(source, filename, 'exec')
except (ValueError, SyntaxError, TypeError) as ex:
raise ValueError("Error parsing {0}: {1}".format(filename, ex))
visitor.visit(module)
return visitor.version, visitor.imports
# ------------------------------------------------------------------------------
@ComponentFactory("cohorte-repository-artifacts-python-factory")
@Provides(cohorte.repositories.SERVICE_REPOSITORY_ARTIFACTS)
@Property('_language', cohorte.repositories.PROP_REPOSITORY_LANGUAGE, "python")
class PythonModuleRepository(object):
"""
Represents a repository
"""
def __init__(self):
"""
Sets up the repository
"""
self._language = "python"
# Name -> [Modules]
self._modules = {}
# Directory name -> Package name
self._directory_package = {}
# File -> Module
self._files = {}
def __contains__(self, item):
"""
Tests if the given item is in the repository
:param item: Item to be tested
:return: True if the item is in the repository
"""
if isinstance(item, Artifact):
# Test artifact language
if item.language != "python":
return False
# Test if the name is in the modules
return item.name in self._modules
elif item in self._modules:
# Item matches a module name
return True
else:
# Test the file name
for name in (item, os.path.realpath(item)):
if name in self._files:
return True
# No match
return False
def __len__(self):
"""
Length of a repository <=> number of individual artifacts
"""
return sum((len(modules) for modules in self._modules.values()))
def __add_module(self, module, registry=None):
"""
Adds a module to the registry
:param module: A Module object
:param registry: Registry where to store the module
"""
if registry is None:
registry = self._modules
# Add the module to the registry
modules_list = registry.setdefault(module.name, [])
if module not in modules_list:
modules_list.append(module)
modules_list.sort(reverse=True)
# Associate the file name with the module
self._files[module.file] = module
@staticmethod
def __compute_name(root, filename):
"""
Computes the module name of the given file by looking for '__init__.py'
files in its parent directories
:param filename: Path of the module file
:return: The Python name of the module, and a boolean indicating
whether the name is a package name
:raise ValueError: Invalid directory name
"""
# Subtract the root part
filename = os.path.relpath(filename, root)
# Drop extension
filename = os.path.splitext(filename)[0]
name_parts = filename.split(os.path.sep)
is_package = name_parts[len(name_parts)-1] == "__init__"
if is_package:
name_parts = name_parts[:-1]
return ".".join(name_parts), is_package
@staticmethod
def __test_import(name):
"""
Tries to import the given module, using imp.find_module().
:param name: A module name
:return: True if the module can be imported
"""
try:
# find_module() uses a path-like name, not a dotted one
path_name = name.replace('.', os.sep)
result = imp.find_module(path_name)
except ImportError:
# Module not found
return False
else:
# Module found: close the file opened by find_module(), if any
if result[0] is not None:
result[0].close()
return True
def add_file(self, root, filename):
"""
Adds a Python file to the repository
:param root: Path to the python package base of the added file
:param filename: A Python full-path file name
:raise ValueError: Unreadable file
"""
# Compute the real name of the Python file
realfile = os.path.realpath(filename)
if realfile in self._files:
# Already read it: ignore
return
if os.path.basename(filename).startswith('.'):
# Hidden file: ignore
return
# Compute the complete module name
name, is_package = self.__compute_name(root, filename)
# Parse the file
version, imports = _extract_module_info(realfile, name, is_package)
# Store the module
self.__add_module(Module(name, version, imports, realfile))
@staticmethod
def __is_module(dirname):
"""
Class method testing whether a directory, given its name, contains a
valid python package.
:param dirname: The directory' name
:return: True if the directory contains a valid python package.
False otherwise.
"""
init_file = os.path.join(dirname, "__init__.py")
return os.path.exists(init_file)
def add_directory(self, dirname):
"""
Recursively adds all .py modules found in the given directory into the
repository
:param dirname: A path to a directory
"""
for root, dirnames, filenames in os.walk(dirname, followlinks=True):
# Check if the current directory, ie. root, is either the base
# directory or a valid python package.
# Otherwise, do not walk through sub-directories.
if not os.path.samefile(dirname, root) \
and not self.__is_module(root):
continue
for filename in filenames:
if os.path.splitext(filename)[1] == '.py':
fullname = os.path.join(root, filename)
try:
self.add_file(dirname, fullname)
except ValueError as ex:
_logger.warning("Error analyzing %s: %s", fullname, ex)
def clear(self):
"""
Clears the repository content
"""
self._modules.clear()
self._files.clear()
self._directory_package.clear()
def get_artifact(self, name=None, version=None, filename=None,
registry=None):
"""
Retrieves a module from the repository
:param name: The module name (mutually exclusive with filename)
:param version: The module version (None or '0.0.0' for any), ignored
if filename is used
:param filename: The module file name (mutually exclusive with name)
:param registry: Registry where to look for the module
:return: The first matching module
:raise ValueError: If the module can't be found
"""
if registry is None:
registry = self._modules
if filename:
# Use the file name (direct search)
module = self._files.get(filename)
if module:
# Found it
return module
for bundle_file | |
# -*- coding: utf-8 -*-
#
# ramstk.db.base.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright 2019 <NAME> doyle.rowland <AT> reliaqual <DOT> com
"""RAMSTK Base Database Module."""
# Standard Library Imports
import sqlite3
from typing import Any, Dict, List, TextIO, Tuple
# Third Party Imports
import psycopg2 # type: ignore
from psycopg2 import sql # type: ignore
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT # type: ignore
from pubsub import pub
# noinspection PyPackageRequirements
from sqlalchemy import create_engine, exc
# noinspection PyPackageRequirements,PyProtectedMember
from sqlalchemy.engine import Engine # type: ignore
# noinspection PyPackageRequirements
from sqlalchemy.orm import query, scoped_session, sessionmaker # type: ignore
from sqlalchemy.orm.exc import FlushError # type: ignore
# RAMSTK Package Imports
from ramstk.exceptions import DataAccessError
def do_create_program_db(database: Dict[str, str], sql_file: TextIO) -> None:
"""Create a shiny new, unpopulated RAMSTK program database.
:param database: a dict containing the database connection arguments.
:param sql_file: the absolute path to the text file containing the
SQL statements for creating a bare RAMSTK Program Database.
:return: None
:rtype: None
"""
conn: Any = ""
if database["dialect"] == "sqlite":
conn = sqlite3.connect(database["database"])
conn.executescript(sql_file.read().strip())
elif database["dialect"] == "postgres":
# Create the database.
conn = psycopg2.connect(
host=database["host"],
dbname="postgres",
user=database["user"],
# deepcode ignore NoHardcodedPasswords:
password=database["password"],
)
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cursor = conn.cursor()
cursor.execute(
sql.SQL("DROP DATABASE IF EXISTS {}").format(
sql.Identifier(database["database"])
)
)
cursor.execute(
sql.SQL("CREATE DATABASE {}").format(sql.Identifier(database["database"]))
)
cursor.close()
conn.close()
# Populate the database.
conn = psycopg2.connect(
host=database["host"],
dbname=database["database"],
user=database["user"],
# deepcode ignore NoHardcodedPasswords:
password=database["password"],
)
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
conn.set_session(autocommit=True)
cursor = conn.cursor()
cursor.execute(sql_file.read())
cursor.close()
conn.close()
def do_open_session(database: str) -> Tuple[Engine, scoped_session]:
"""Create a session to be used with an instance of the BaseDatabase."""
engine: Any = create_engine(database)
# deepcode ignore missing~close~connect: engines are disposed
engine.connect()
return (
engine,
scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine)),
)
# noinspection PyUnresolvedReferences
class BaseDatabase:
"""The BaseDatabase class."""
# Define public class dict attributes.
cxnargs: Dict[str, str] = {
"dialect": "",
"user": "",
"password": "",
"host": "",
"port": "",
"dbname": "",
}
# Define public class scalar attributes.
engine: Engine = None # type: ignore
session: scoped_session = None # type: ignore
database: str = ""
sqlstatements: Dict[str, str] = {
"select": "SELECT {0:s} ",
"from": "FROM {0:s} ",
"order": "ORDER BY {0:s} DESC LIMIT 1",
}
def __init__(self) -> None:
"""Initialize an instance of the BaseDatabase."""
# Initialize private dictionary instance attributes.
# Initialize private list instance attributes.
# Initialize private scalar instance attributes.
# Initialize public dictionary instance attributes.
# Initialize public list instance attributes.
# Initialize public scalar instance attributes.
def do_connect(self, database: Dict) -> None:
"""Connect to the database.
:param database: the connection information for the database to
connect to.
:return: None
:rtype: None
:raise: sqlalchemy.exc.OperationalError if passed an invalid database
URL.
:raise: sqlalchemy.exc.ArgumentError if passed a database URL with
an unknown/unsupported SQL dialect.
"""
self.cxnargs["dialect"] = database["dialect"]
self.cxnargs["user"] = database["user"]
self.cxnargs["password"] = database["password"]
self.cxnargs["host"] = database["host"]
self.cxnargs["port"] = database["port"]
self.cxnargs["dbname"] = database["database"]
try:
if self.cxnargs["dialect"] == "sqlite":
self.database = "sqlite:///" + self.cxnargs["dbname"]
elif self.cxnargs["dialect"] == "postgres":
self.database = (
"postgresql+psycopg2://"
+ self.cxnargs["user"]
+ ":"
+ self.cxnargs["password"]
+ "@"
+ self.cxnargs["host"]
+ ":"
+ self.cxnargs["port"]
+ "/"
+ self.cxnargs["dbname"]
)
else:
raise DataAccessError(
"Unknown database dialect in database " "connection dict."
)
except TypeError as _error:
raise DataAccessError(
"Unknown dialect or non-string value in " "database connection dict."
) from _error
if self.database != "":
self.engine, self.session = do_open_session(self.database)
def do_delete(self, item: object) -> None:
"""Delete a record from the RAMSTK Program database.
:param item: the item to remove from the RAMSTK Program database.
:type item: Object()
:return: None
:rtype: None
"""
try:
self.session.delete(item)
self.session.commit()
except exc.InvalidRequestError as _error:
# This exception generally corresponds to runtime state errors.
# These types of errors are unlikely to be user errors and will
# most likely be the result of a corrupted database. Some
# situations that can raise this exception are:
# 1. Attempting to delete a record from a non-existent table.
self.session.rollback()
_error_message = (
"There was an database error when attempting to delete a "
"record. Error returned from database was:\n\t{0:s}.".format(
str(_error)
)
)
pub.sendMessage("fail_delete_record", error_message=_error_message)
raise DataAccessError(_error_message) from _error
except exc.ProgrammingError as _error:
# This exception is raised when there is an error during
# execution of a SQL statement. These types of errors are
# unlikely to be user errors and will most likely be the result of
# a corrupted database. Some situations that can raise this
# exception are:
# 1. Foreign key exists, but foreign table does not.
self.session.rollback()
_error_message = (
"There was an database error when attempting to delete a "
"record. Error returned from database was:\n\t{0:s}.".format(
str(_error.orig)
)
)
pub.sendMessage("fail_delete_record", error_message=_error_message)
raise DataAccessError(_error_message) from _error
def do_disconnect(self) -> None:
"""Close the current session.
:return: None
:rtype: None
"""
self.session.close()
self.engine.dispose()
# noinspection PyTypeChecker
self.session = None # type: ignore
self.database = ""
def do_insert(self, record: object) -> None:
"""Add a new record to a database table.
:param record: the object to add to the RAMSTK Program database.
:return: None
:rtype: None
"""
try:
self.session.add(record)
self.session.commit()
except AttributeError as _error:
# This exception is raised when there is no database connection.
_error_message = (
"dao.do_insert: No database connected when attempting to add a record."
)
pub.sendMessage(
"fail_insert_record",
error_message=_error_message,
)
raise DataAccessError(_error_message) from _error
except exc.InternalError as _error:
print("postgresql error: {}".format(_error.orig.pgcode))
self.session.rollback()
except (exc.DataError, exc.IntegrityError, exc.StatementError) as _error:
# This exception is raised when there is an error during
# execution of a SQL statement. These types of errors are
# unlikely to be user errors as the programmer should ensure
# everything is ready to insert. Some situations that can raise
# this exception are:
# 1. Primary key violations.
# 2. Non-date data supplied to date type fields.
# 3. Foreign key violations.
# 4. np.nan data supplied to any field type.
#
# With psycopg2, _error will have attributes pgcode and pgerror.
# The first is a code associated with the error captured by
# psycopg2 and the second is the original error message from the
# database. These should be used to generate the error message
# to send to the client. Error codes are defined in the
# errorcodes.py file in the psycopg2 code base.
self.session.rollback()
_error_message = (
"do_insert: Database error when attempting to add a record. "
"Database returned:\n\t{0:s}".format(
str(_error.orig.pgerror.split(":")[2].strip())
)
)
pub.sendMessage(
"fail_insert_record",
error_message=_error_message,
)
raise DataAccessError(_error_message) from _error
except FlushError as _error:
self.session.rollback()
_error_message = (
"do_insert: Flush error when attempting to add records. "
"Database returned:\n\t{0:s}".format(str(_error))
)
pub.sendMessage(
"fail_insert_record",
error_message=_error_message,
)
raise DataAccessError(_error_message) from _error
def do_insert_many(self, records: List[object]) -> None:
"""Add a group of new records to a database table.
:param list records: the list of objects to add to the RAMSTK database.
:return: None
:rtype: None
"""
for _record in records:
self.do_insert(_record)
def do_select_all(self, table, **kwargs) -> query.Query:
"""Select all records from the RAMSTK database for table.
:param table: the database table object to select all from.
:return: a list of table instances; one for each record.
"""
_keys: List[str] = kwargs.get("key", None)
_values: List[Any] = kwargs.get("value", None)
_order: Any = kwargs.get("order", None)
_all: bool = kwargs.get("_all", True)
_filters = {}
if _values[0] is not None:
for _idx, _key in enumerate(_keys):
_filters[_key] = _values[_idx]
_results = self.session.query(table).filter_by(**_filters)
if isinstance(_order, list):
_results = _results.order_by(*_order)
else:
_results = _results.order_by(_order)
if _all:
_results = _results.all()
else:
_results = _results.first()
return _results
def do_update(self, record: object = None) -> None:
"""Update the RAMSTK database with any pending changes.
:keyword record: the record to update in the database.
:return: None
:rtype: None
"""
if record is not None:
self.session.add(record)
try:
self.session.commit()
except (
exc.DataError,
exc.IntegrityError,
exc.InvalidRequestError,
exc.ProgrammingError,
) as _error:
self.session.rollback()
_error_message = (
"There was an database error when attempting to update a "
"record. Faulty SQL statement was:\n\t{0:s}.\nParameters "
"were:\n\t{1:s}.".format(
str(_error.statement), str(_error.params) # type: ignore
)
) # type: ignore
pub.sendMessage("fail_update_record", error_message=_error_message)
raise DataAccessError(_error_message) from _error
def get_database_list(self, database: Dict[str, str]) -> List:
"""Retrieve the list of program databases available to RAMSTK.
This method is used | |
<filename>azure-iot-device/tests/iothub/pipeline/test_pipeline_stages_iothub.py
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import json
import logging
import pytest
import sys
from azure.iot.device.exceptions import ServiceError
from azure.iot.device.common import handle_exceptions
from azure.iot.device.iothub.pipeline import (
pipeline_events_iothub,
pipeline_ops_iothub,
pipeline_stages_iothub,
constant as pipeline_constants,
)
from azure.iot.device.common.pipeline import pipeline_events_base, pipeline_ops_base
from tests.common.pipeline.helpers import StageRunOpTestBase, StageHandlePipelineEventTestBase
from tests.common.pipeline import pipeline_stage_test
logging.basicConfig(level=logging.DEBUG)
this_module = sys.modules[__name__]
pytestmark = pytest.mark.usefixtures("fake_pipeline_thread")
fake_device_id = "__fake_device_id__"
fake_module_id = "__fake_module_id__"
fake_hostname = "__fake_hostname__"
fake_gateway_hostname = "__fake_gateway_hostname__"
fake_server_verification_cert = "__fake_server_verification_cert__"
fake_sas_token = "__fake_sas_token__"
fake_symmetric_key = "<KEY>"
fake_x509_cert_file = "fantastic_beasts"
fake_x509_cert_key_file = "where_to_find_them"
fake_pass_phrase = "<PASSWORD>"
###################
# COMMON FIXTURES #
###################
@pytest.fixture(params=[True, False], ids=["With error", "No error"])
def op_error(request, arbitrary_exception):
if request.param:
return arbitrary_exception
else:
return None
@pytest.fixture
def mock_handle_background_exception(mocker):
mock_handler = mocker.patch.object(handle_exceptions, "handle_background_exception")
return mock_handler
#########################################
# ENSURE DESIRED PROPERTIES STAGE STAGE #
#########################################
class EnsureDesiredPropertiesStageTestConfig(object):
@pytest.fixture
def cls_type(self):
return pipeline_stages_iothub.EnsureDesiredPropertiesStage
@pytest.fixture
def init_kwargs(self):
return {}
@pytest.fixture
def stage(self, mocker, cls_type, init_kwargs):
stage = cls_type(**init_kwargs)
stage.send_op_down = mocker.MagicMock()
stage.send_event_up = mocker.MagicMock()
return stage
class EnsureDesiredPropertiesStageInstantiationTests(EnsureDesiredPropertiesStageTestConfig):
@pytest.mark.it("Initializes 'last_version_seen' None")
def test_last_version_seen(self, init_kwargs):
stage = pipeline_stages_iothub.EnsureDesiredPropertiesStage(**init_kwargs)
assert stage.last_version_seen is None
@pytest.mark.it("Initializes 'pending_get_request' None")
def test_pending_get_request(self, init_kwargs):
stage = pipeline_stages_iothub.EnsureDesiredPropertiesStage(**init_kwargs)
assert stage.pending_get_request is None
pipeline_stage_test.add_base_pipeline_stage_tests(
test_module=this_module,
stage_class_under_test=pipeline_stages_iothub.EnsureDesiredPropertiesStage,
stage_test_config_class=EnsureDesiredPropertiesStageTestConfig,
extended_stage_instantiation_test_class=EnsureDesiredPropertiesStageInstantiationTests,
)
@pytest.mark.describe(
"EnsureDesiredPropertiesStage - .run_op() -- Called with EnableFeatureOperation"
)
class TestEnsureDesiredPropertiesStageRunOpWithEnableFeatureOperation(
StageRunOpTestBase, EnsureDesiredPropertiesStageTestConfig
):
@pytest.fixture
def op(self, mocker):
return pipeline_ops_base.EnableFeatureOperation(
feature_name="fake_feature_name", callback=mocker.MagicMock()
)
@pytest.mark.it("Sets `last_version_seen` to -1 if `op.feature_name` is 'twin_patches'")
def test_sets_last_version_seen(self, mocker, stage, op):
op.feature_name = pipeline_constants.TWIN_PATCHES
assert stage.last_version_seen is None
stage.run_op(op)
assert stage.last_version_seen == -1
@pytest.mark.parametrize(
"feature_name",
[
pipeline_constants.C2D_MSG,
pipeline_constants.INPUT_MSG,
pipeline_constants.METHODS,
pipeline_constants.TWIN,
],
)
@pytest.mark.it(
"Does not change `last_version_seen` if `op.feature_name` is not 'twin_patches'"
)
def test_doesnt_set_last_version_seen(self, mocker, stage, op, feature_name):
op.feature_name = feature_name
stage.last_version_seen = mocker.MagicMock()
old_value = stage.last_version_seen
stage.run_op(op)
assert stage.last_version_seen == old_value
@pytest.mark.parametrize(
"feature_name",
[
pipeline_constants.C2D_MSG,
pipeline_constants.INPUT_MSG,
pipeline_constants.METHODS,
pipeline_constants.TWIN,
pipeline_constants.TWIN_PATCHES,
],
)
@pytest.mark.it(
"Sends the EnableFeatureOperation op to the next stage for all valid `op.feature_name` values"
)
def test_passes_all_other_features_down(self, mocker, stage, op, feature_name):
op.feature_name = feature_name
stage.run_op(op)
assert stage.send_op_down.call_count == 1
assert stage.send_op_down.call_args == mocker.call(op)
@pytest.mark.describe("EnsureDesiredPropertiesStage - OCCURRENCE: ConnectedEvent received")
class TestEnsureDesiredPropertiesStageWhenConnectedEventReceived(
EnsureDesiredPropertiesStageTestConfig, StageHandlePipelineEventTestBase
):
@pytest.fixture
def stage(self, mocker, cls_type, init_kwargs):
stage = cls_type(**init_kwargs)
stage.send_op_down = mocker.MagicMock()
stage.send_event_up = mocker.MagicMock()
return stage
@pytest.fixture
def event(self):
return pipeline_events_base.ConnectedEvent()
@pytest.mark.it(
"Sends a GetTwinOperation if last_version_seen is set and there is no pending GetTwinOperation"
)
def test_last_version_seen_no_pending(self, mocker, stage, event):
stage.last_version_seen = mocker.MagicMock()
stage.pending_get_request = None
stage.handle_pipeline_event(event)
assert stage.send_op_down.call_count == 1
assert isinstance(stage.send_op_down.call_args[0][0], pipeline_ops_iothub.GetTwinOperation)
@pytest.mark.it(
"Does not send a GetTwinOperation if last verion seen is set and there is already a pending GetTwinOperation"
)
def test_last_version_seen_pending(self, mocker, stage, event):
stage.last_version_seen = mocker.MagicMock()
stage.pending_get_request = mocker.MagicMock()
stage.handle_pipeline_event(event)
assert stage.send_op_down.call_count == 0
@pytest.mark.it(
"Does not send a GetTwinOperation if last_version_seen is not set and there is no pending GetTwinOperation"
)
def test_no_last_version_seen_no_pending(self, mocker, stage, event):
stage.last_version_seen = None
stage.pending_get_request = None
stage.handle_pipeline_event(event)
assert stage.send_op_down.call_count == 0
@pytest.mark.it(
"Does not send a GetTwinOperation if last verion seen is not set and there is already a pending GetTwinOperation"
)
def test_no_last_version_seen_pending(self, mocker, stage, event):
stage.last_version_seen = None
stage.pending_get_request = mocker.MagicMock()
stage.handle_pipeline_event(event)
assert stage.send_op_down.call_count == 0
@pytest.mark.describe(
"EnsureDesiredPropertiesStage - OCCURRENCE: TwinDesiredPropertiesPatchEvent received"
)
class TestEnsureDesiredPropertiesStageWhenTwinDesiredPropertiesPatchEventReceived(
EnsureDesiredPropertiesStageTestConfig, StageHandlePipelineEventTestBase
):
@pytest.fixture
def stage(self, mocker, cls_type, init_kwargs):
stage = cls_type(**init_kwargs)
stage.send_op_down = mocker.MagicMock()
stage.send_event_up = mocker.MagicMock()
return stage
@pytest.fixture
def version(self, mocker):
return mocker.MagicMock()
@pytest.fixture
def event(self, version):
return pipeline_events_iothub.TwinDesiredPropertiesPatchEvent(patch={"$version": version})
@pytest.mark.it("Saves the `$version` attribute of the patch into `last_version_seen`")
def test_saves_the_last_version_seen(self, mocker, stage, event, version):
stage.last_version_seen = mocker.MagicMock()
stage.handle_pipeline_event(event)
assert stage.last_version_seen == version
@pytest.mark.it("Sends the event to the previous stage")
def test_sends_event_up(self, mocker, stage, event, version):
stage.handle_pipeline_event(event)
assert stage.send_event_up.call_count == 1
assert stage.send_event_up.call_args == mocker.call(event)
@pytest.mark.describe(
"EnsureDesiredPropertiesStage - OCCURRENCE: GetTwinOperation that was sent down by this stage completes"
)
class TestEnsureDesiredPropertiesStageWhenGetTwinOperationCompletes(
EnsureDesiredPropertiesStageTestConfig
):
@pytest.fixture
def stage(self, mocker, cls_type, init_kwargs):
stage = cls_type(**init_kwargs)
stage.send_op_down = mocker.MagicMock()
stage.send_event_up = mocker.MagicMock()
return stage
@pytest.fixture
def get_twin_op(self, stage):
stage.last_version_seen = -1
stage.handle_pipeline_event(pipeline_events_base.ConnectedEvent())
get_twin_op = stage.send_op_down.call_args[0][0]
assert isinstance(get_twin_op, pipeline_ops_iothub.GetTwinOperation)
stage.send_op_down.reset_mock()
stage.send_event_up.reset_mock()
return get_twin_op
@pytest.fixture
def new_version(self):
return 1234
@pytest.fixture
def new_twin(self, new_version):
return {"desired": {"$version": new_version}, "reported": {}}
@pytest.mark.it("Does not send a new GetTwinOperation if the op completes with success")
def test_does_not_send_new_get_twin_operation_on_success(self, stage, get_twin_op, new_twin):
get_twin_op.twin = new_twin
get_twin_op.complete()
assert stage.send_op_down.call_count == 0
@pytest.mark.it("Sets `pending_get_request` to None if the op completes with success")
def test_sets_pending_request_to_none_on_success(self, mocker, stage, get_twin_op, new_twin):
stage.pending_get_request = mocker.MagicMock()
get_twin_op.twin = new_twin
get_twin_op.complete()
assert stage.pending_get_request is None
@pytest.mark.it("Sends a new GetTwinOperation if the op completes with an error")
def test_sends_new_get_twin_operation_on_failure(self, stage, get_twin_op, arbitrary_exception):
assert stage.send_op_down.call_count == 0
get_twin_op.complete(error=arbitrary_exception)
assert stage.send_op_down.call_count == 1
assert isinstance(stage.send_op_down.call_args[0][0], pipeline_ops_iothub.GetTwinOperation)
@pytest.mark.it(
"Sets `pending_get_request` to the new GetTwinOperation if the op completes with an error"
)
def test_sets_pending_request_to_none_on_failure(
self, mocker, stage, get_twin_op, arbitrary_exception
):
old_get_request = mocker.MagicMock()
stage.pending_get_request = old_get_request
get_twin_op.complete(error=arbitrary_exception)
assert stage.pending_get_request is not old_get_request
assert isinstance(stage.pending_get_request, pipeline_ops_iothub.GetTwinOperation)
@pytest.mark.it(
"Does not send a `TwinDesiredPropertiesPatchEvent` if the op copmletes with an error"
)
def test_doesnt_send_patch_event_if_error(self, stage, get_twin_op, arbitrary_exception):
get_twin_op.complete(arbitrary_exception)
assert stage.send_event_up.call_count == 0
@pytest.mark.it(
"Sends a `TwinDesiredPropertiesPatchEvent` if the desired properties '$version' doesn't match the `last_version_seen`"
)
def test_sends_patch_event_if_different_version(
self, mocker, stage, get_twin_op, new_twin, new_version
):
stage.last_version_seen = mocker.MagicMock()
get_twin_op.twin = new_twin
get_twin_op.complete()
assert stage.send_event_up.call_count == 1
assert isinstance(
stage.send_event_up.call_args[0][0],
pipeline_events_iothub.TwinDesiredPropertiesPatchEvent,
)
@pytest.mark.it(
"Does not send a `TwinDesiredPropertiesPatchEvent` if the desired properties '$version' matches the `last_version_seen`"
)
def test_doesnt_send_patch_event_if_same_version(
self, stage, get_twin_op, new_twin, new_version
):
stage.last_version_seen = new_version
get_twin_op.twin = new_twin
get_twin_op.complete()
assert stage.send_event_up.call_count == 0
@pytest.mark.it(
"Does not change the `last_version_seen` attribute if the op completes with an error"
)
def test_doesnt_change_last_version_seen_if_error(
self, mocker, stage, get_twin_op, arbitrary_exception
):
old_version = mocker.MagicMock()
stage.last_version_seen = old_version
get_twin_op.complete(error=arbitrary_exception)
assert stage.last_version_seen == old_version
@pytest.mark.it(
"Sets the `last_version_seen` attribute to the new version if the desired properties '$version' doesn't match the `last_version_seen`"
)
def test_changes_last_version_seen_if_different_version(
self, mocker, stage, get_twin_op, new_twin, new_version
):
stage.last_version_seen = mocker.MagicMock()
get_twin_op.twin = new_twin
get_twin_op.complete()
assert stage.last_version_seen == new_version
@pytest.mark.it(
"Does not change the `last_version_seen` attribute if the desired properties '$version' matches the `last_version_seen`"
)
def test_does_not_change_last_version_seen_if_same_version(
self, stage, get_twin_op, new_twin, new_version
):
stage.last_version_seen = new_version
get_twin_op.twin = new_twin
get_twin_op.complete()
assert stage.last_version_seen == new_version
###############################
# TWIN REQUEST RESPONSE STAGE #
###############################
class TwinRequestResponseStageTestConfig(object):
@pytest.fixture
def cls_type(self):
return pipeline_stages_iothub.TwinRequestResponseStage
@pytest.fixture
def init_kwargs(self):
return {}
@pytest.fixture
def stage(self, mocker, cls_type, init_kwargs):
stage = cls_type(**init_kwargs)
stage.send_op_down = mocker.MagicMock()
stage.send_event_up = mocker.MagicMock()
return stage
pipeline_stage_test.add_base_pipeline_stage_tests(
test_module=this_module,
stage_class_under_test=pipeline_stages_iothub.TwinRequestResponseStage,
stage_test_config_class=TwinRequestResponseStageTestConfig,
)
@pytest.mark.describe("TwinRequestResponseStage - .run_op() -- Called with GetTwinOperation")
class TestTwinRequestResponseStageRunOpWithGetTwinOperation(
StageRunOpTestBase, TwinRequestResponseStageTestConfig
):
@pytest.fixture
def op(self, mocker):
return pipeline_ops_iothub.GetTwinOperation(callback=mocker.MagicMock())
@pytest.mark.it(
"Sends a new RequestAndResponseOperation down the pipeline, configured to request a twin"
)
def test_request_and_response_op(self, mocker, stage, op):
stage.run_op(op)
assert stage.send_op_down.call_count == 1
new_op = stage.send_op_down.call_args[0][0]
assert isinstance(new_op, pipeline_ops_base.RequestAndResponseOperation)
assert new_op.request_type == "twin"
assert new_op.method == "GET"
assert new_op.resource_location == "/"
assert new_op.request_body == " "
@pytest.mark.describe(
"TwinRequestResponseStage - .run_op() -- Called with PatchTwinReportedPropertiesOperation"
)
class TestTwinRequestResponseStageRunOpWithPatchTwinReportedPropertiesOperation(
StageRunOpTestBase, TwinRequestResponseStageTestConfig
):
@pytest.fixture(params=["Dictionary Patch", "String Patch", "Integer Patch", "None Patch"])
def json_patch(self, request):
if request.param == "Dictionary Patch":
return {"json_key": "json_val"}
elif request.param == "String Patch":
return "some_json"
elif request.param == "Integer Patch":
return 1234
elif request.param == "None Patch":
return None
@pytest.fixture
def op(self, mocker, json_patch):
return pipeline_ops_iothub.PatchTwinReportedPropertiesOperation(
patch=json_patch, callback=mocker.MagicMock()
)
@pytest.mark.it(
"Sends a new RequestAndResponseOperation down the pipeline, configured to send a twin reported properties patch, with the patch serialized as a JSON string"
)
def test_request_and_response_op(self, mocker, stage, op):
stage.run_op(op)
assert stage.send_op_down.call_count == 1
new_op = stage.send_op_down.call_args[0][0]
assert isinstance(new_op, pipeline_ops_base.RequestAndResponseOperation)
assert new_op.request_type == "twin"
assert new_op.method == "PATCH"
assert new_op.resource_location == "/properties/reported/"
assert new_op.request_body == json.dumps(op.patch)
@pytest.mark.describe(
"TwinRequestResponseStage - .run_op() -- Called with other arbitrary operation"
)
class TestTwinRequestResponseStageRunOpWithArbitraryOperation(
StageRunOpTestBase, TwinRequestResponseStageTestConfig
):
@pytest.fixture
def op(self, arbitrary_op):
return arbitrary_op
@pytest.mark.it("Sends the operation down the pipeline")
def test_sends_op_down(self, mocker, stage, op):
stage.run_op(op)
assert stage.send_op_down.call_count == 1
assert stage.send_op_down.call_args == mocker.call(op)
# TODO: Provide a more accurate set of status codes for tests
@pytest.mark.describe(
"TwinRequestResponseStage - OCCURRENCE: RequestAndResponseOperation created from GetTwinOperation is completed"
)
class TestTwinRequestResponseStageWhenRequestAndResponseCreatedFromGetTwinOperationCompleted(
TwinRequestResponseStageTestConfig
):
@pytest.fixture
def get_twin_op(self, mocker):
return pipeline_ops_iothub.GetTwinOperation(callback=mocker.MagicMock())
@pytest.fixture
def stage(self, mocker, cls_type, init_kwargs, get_twin_op):
stage = cls_type(**init_kwargs)
stage.send_op_down = mocker.MagicMock()
stage.send_event_up = mocker.MagicMock()
# Run the GetTwinOperation
stage.run_op(get_twin_op)
return stage
@pytest.fixture
def request_and_response_op(self, stage):
assert stage.send_op_down.call_count == 1
op = stage.send_op_down.call_args[0][0]
assert isinstance(op, pipeline_ops_base.RequestAndResponseOperation)
# reset the stage mock for convenience
stage.send_op_down.reset_mock()
return op
@pytest.mark.it(
"Completes the GetTwinOperation unsuccessfully, with the error from the RequestAndResponseOperation, if the RequestAndResponseOperation is completed unsuccessfully"
)
| |
sizenotcolor=False, z2=None, colorbar=False, reversecolorbar=False)
for key in defaults:
if (not kw.has_key(key)):
kw[key] = defaults[key]
map = kw.pop('map')
zmin = kw.pop('zmin')
zmax = kw.pop('zmax')
z2min = kw.pop('z2min')
z2max = kw.pop('z2max')
sizenotcolor = kw.pop('sizenotcolor')
z2 = kw.pop('z2')
colorbar = kw.pop('colorbar')
reversecolorbar = kw.pop('reversecolorbar')
try:
cmap = eval('py.cm.'+map)
except:
print "Colormap %s not found -- exiting!" % map
return -1
if not sizenotcolor:
if zmin is None:
zmin = z.min()
if zmax is None:
zmax = z.max()
zscale = cmap( ((z-zmin) / (zmax-zmin) * cmap.N).astype(int))
if z2 is not None:
if z2min is None:
z2min = z2.min()
if zmax is None:
z2max = z2.max()
z2scale = z2.copy()
z2scale[z2scale>=z2max] = z2max
z2scale[z2scale<=z2min] = z2min
plotlist = []
if z2 is None:
zipparam = zip(x, y, zscale)
else:
zipparam = zip(x, y, zscale, z2scale)
print kw
if colorbar:
fig = py.figure()
axs = [py.subplot(111, position=[.12, .12, .7, .8])]
else:
fig = py.gcf()
axs = [py.gca()]
for thisparam in zipparam:
if z2 is None:
xx,yy,param = thisparam
else:
xx,yy,param,size = thisparam
if sizenotcolor:
kw['ms'] = param
else:
kw['color'] = param
if z2 is not None: kw['ms'] = size
plotlist.append(py.plot([xx],[yy],**kw))
if colorbar:
axs.append(py.subplot(111, position=[.84, .12, .07, .8]))
ztemp = np.linspace(zmin, zmax, 101)
py.imshow(np.tile(ztemp, (2,1)).T, cmap=cmap, aspect='auto')
axs[-1].set_ylim([0,int(ztemp.size)-1])
zticks = np.interp(axs[-1].get_yticks(), np.arange(ztemp.size), ztemp)
if reversecolorbar: axs[-1].set_ylim(axs[-1].get_ylim()[::-1])
axs[-1].set_yticklabels(['%i' % el for el in zticks])
axs[-1].set_xticklabels([])
axs[-1].get_yaxis().set_ticks_position('right')
#axs[-1].set_xlabel('$T_{eff}$ [K]', fontsize=fs*1.1)
return fig, axs, plotlist
def hist2d(x,y, bins=None):
"""Compute 2-d histogram data for specified bins.
:INPUT:
x
y
:OPTIONAL INPUT:
bins: a two-tuple containing one of the following:
(nx,ny) -- tuple, number of bins in each direction
(xlims, ylims) -- tuple of sequences (x- and y-bin edges)
:OUTPUT:
A 3-tuple consisting of:
xbins -- the centers of the x bins
ybins -- the centers of the y bins
hist -- The 2D histogram array
:SEE ALSO: :func:`numpy.histogram2d`
:REQUIREMENTS: :doc:`numpy`
"""
# 2010-02-25 17:26 IJC: Created from my old Hess Diagram fcn.
# 2010-03-04 13:59 IJC: Fixed a typo -- now actually returns bin centers
from numpy import arange, array, zeros, isfinite, linspace
x = array(x).ravel()
y = array(y).ravel()
x = x[isfinite(x)]
y = y[isfinite(y)]
if bins==None:
bins = [20,20]
if hasattr(bins,'__iter__'):
if len(bins)<2:
print "bins must have len>2"
return -1
else:
print "bins must have len>2"
return -1
# Test X bins
if hasattr(bins[0],'__iter__'): # sequence of limits
nx = len(bins[0])-1
xlims = bins[0]
else:
nx = bins[0]
xlims = linspace(x.min(), x.max(), nx+1)
# Test Y bins
if hasattr(bins[1],'__iter__'): # sequence of limits
ny = len(bins[1])-1
ylims = bins[1]
else:
ny = bins[1]
ylims = linspace(y.min(), y.max(), ny+1)
xcen = zeros(nx,float)
ycen = zeros(ny,float)
hist = zeros((ny,nx),int)
for ii in range(nx):
xindex = (x>xlims[ii]) * (x<xlims[ii+1])
xcen[ii] = 0.5*(xlims[ii]+xlims[ii+1])
for jj in range(ny):
ycen[jj] = 0.5*(ylims[jj]+ylims[jj+1])
yindex = (y>ylims[jj]) * (y<ylims[jj+1])
index = xindex * yindex
hist[jj,ii] = index.sum()
return (xcen, ycen, hist)
def plotcorrs(params, labs=None, tit=None, xrot=0, yrot=0, cmap=None,figsize=None,plotregion=[.1,.1,.8,.8], n=6, nbins=None, clim=None, docontour=False, contourcolor='k', newfig=True, getdist=False, gd_smooth=0.2, plotmid=None, fontsize=12):
""" Plot correlation coefficient matrix in one big, huge, honkin'
figure. Color-code the figure based on the correlation
coefficients between parameters.
:INPUTS:
params -- (M x N array) M instantiations of a set of N parameters.
:OPTIONS:
labs -- (list) labels for each of the N parameters
tit -- (str) title for figure
xrot/yrot -- (float) axis label rotation, in degrees
cmap -- (matplotlib cm) -- colormap for color-coding.
figsize -- (2-list) -- width and height of figure
plotregion -- (4-list) -- (left, bottom, width, height) of plotted region in each figure
n -- (int) -- number of subplots across each figure
nbins : int
Bin the data into this many bins, and show 2D histograms instead of points.
clim : None
Colorscale limits for normalized 2D histograms (where hist.sum() = 1.0)
docontour : bool
Whether to plot contours, or do an 'imshow'
newfig : bool
Whether to generate a new figure, or plot in the current axes.
contourcolor
Color of contour line, if "docontour" is set to a list of confidence intervals.
:REQUIREMENTS: :doc:`pylab`, :doc:`nsdata`
:NOTES:
Based on the concept by <NAME> at U. Central Florida
Beware of plotting two many points, and clogging your system!
"""
# 2010-05-27 09:27 IJC: Created
# 2010-08-26 14:49 IJC: Added test for higher-rank dimensional
# 2010-08-27 09:21 IJC: Moved 'tit' title text lower
# 2011-11-03 12:03 IJMC: Added 'nbins' option
# 2012-03-30 09:04 IJMC: Moved axes labels to upper-right, rather than lower-left.
# 2013-08-19 13:17 IJMC: Added 'docontour' option.
# 2013-10-09 14:59 IJMC: Added 'newfig' option.
# 2015-11-15 19:59 IJMC: Added 'getdist' option -- much prettier!
import pylab as py
import nsdata as ns
import kdestats as kde
n = int(n)
n, m = params.shape
if n>=m:
npts0 = n
nparam = m
else:
npts0 = m
nparam = n
params = params.copy().transpose()
if nbins is not None:
nbins = int(nbins)
hist_bins = [py.linspace(min(params[:,ii]), max(params[:,ii]), nbins+1) for ii in range(nparam)]
hist_cmap = cmap
nind = params.shape[1]
if labs is None:
labs = ['']*nind
if figsize is None:
figsize = [9,9]
nperpage = min(n,nind-1)
nfigs = py.ceil(float(nind-1.)/nperpage).astype(int)
#print "nind, nperpage, nfigs>>",nind, nperpage, nfigs
subx0,suby0, xwid,ywid = plotregion
subdx = xwid/(nperpage) # was nind-1.
subdy = ywid/(nperpage) # was nind-1.
#print "subx0,suby0,subdx,subdy>>",[subx0,suby0,subdx,subdy]
oldaxlinewidth = py.rcParams['axes.linewidth']
if nind>40:
py.rcParams['axes.linewidth'] = 0
figs = []
allsubplots = []
if getdist:
from getdist import plots, MCSamples
from analysis import dumbconf
g = plots.getSubplotPlotter()
gd_labels = labs
samps = MCSamples(samples=params, names=gd_labels, labels=gd_labels)
samps.updateSettings({'contours': [0.683, 0.954, 0.997]})
g.settings.num_plot_contours = 3
samps.smooth_scale_2D = gd_smooth
g.triangle_plot([samps], filled=True, contour_colors=contourcolor, linewidth=2, contour_lws=[2]*len(labs), figsize=figsize)
figs.append(py.gcf())
figs[-1].set_size_inches(figsize)
allsubplots.append(figs[-1].get_axes())
[ax.get_xaxis().get_label().set_fontsize(fontsize) for ax in allsubplots[-1]]
[ax.get_yaxis().get_label().set_fontsize(fontsize) for ax in allsubplots[-1]]
if plotmid is None:
plotmid = np.zeros(len(gd_labels), float)
for ii, key in enumerate(gd_labels):
plotmid[ii] = np.median(params[:,ii])
else:
plotmid = np.array(plotmid, copy=False)
hivals = np.array([dumbconf(vec, .84134, type='upper')[0] for vec in params.T]) - plotmid
lovals = plotmid - np.array([dumbconf(vec, .15866, type='upper')[0] for vec in params.T])
#for k in range(len(x_mean)):
# g.add_x_bands(x_mean[k], x_var[k]**0.5, ax=allsubplots[0][k], alpha2=0.25, color=contourcolor, linewidth=2)
maxlab = '%' + str(1+max(map(len, gd_labels))) + 's : '
textparams = [maxlab % el for el in gd_labels]
for ii in range(len(gd_labels)):
textvals = roundvals([plotmid[ii], hivals[ii], lovals[ii]])
textparams[ii] += ' $%s^{+%s}_{-%s}$ ' % tuple(textvals)
if n>3:
newpos = [.62, .62, .35, .35]
else:
newpos = [.62, .7, .35, .27]
newax = py.subplot(111, position=newpos)
out = textfig(['\n']+textparams, ax=newax, fig=figs[-1],fontsize=fontsize*1.2)
else:
# Iterate over figure columns
for kk2 in range(nfigs):
# Iterate over figure rows
for kk1 in range(nfigs):
if newfig:
f=py.figure(nextfig(),figsize)
else:
f = py.gcf()
subplots = []
jj0 = 0
#Iterate over subplot columns:
for jj in range(nperpage*kk2,min(nperpage*(kk2+1),nind)):
# Set the vertical panel offset:
if kk1==kk2: # a figure on the diagonal
ii0 = jj0+1
elif kk1>kk2: # a figure below the diagonal
ii0 = 1
#Iterate over subplots rows:
for ii in range(max(jj+1,nperpage*kk1+1), min(nperpage*(kk1+1)+1,nind)):
#print '(kk2,kk1,jj,jj0,ii,ii0): (%i,%i,%i,%i,%i,%i)'%(kk2,kk1,jj,jj0,ii,ii0), [subx0+subdx*jj0,suby0+subdy*(nperpage-ii0),subdx,subdy]
s = py.axes([subx0+subdx*jj0,suby0+subdy*(nperpage-ii0),subdx,subdy])
param_doesnt_vary = params[:,jj].std()==0 or params[:,ii].std()==0 or \
(py.np.abs(params[:,jj].std()/py.median(params[:,jj])) < 1e-9) or \
(py.np.abs(params[:,ii].std()/py.median(params[:,ii])) < 1e-9)
if nbins is None or param_doesnt_vary:
py.plot(params[:,jj],params[:,ii],',k')
#pdb.set_trace()
else:
#pdb.set_trace()
thishist = py.histogram2d(params[:,jj], params[:,ii], \
bins=[hist_bins[jj], hist_bins[ii]])
if docontour:
xplot = 0.5*(thishist[1][1:] + thishist[1][0:-1])
yplot = 0.5*(thishist[2][1:] + thishist[2][0:-1])
if hasattr(docontour, '__iter__'):
clev = [kde.confmap(1.0*thishist[0]/npts0, thisDC) for thisDC in docontour]
py.contour(xplot, yplot, 1.0*thishist[0].transpose()/npts0, clev, colors=contourcolor, linewidths=2)
else:
py.contourf(xplot, yplot, 1.0*thishist[0].transpose()/npts0, cmap=hist_cmap)
h_axis = py.xlim() + py.ylim()
else:
ns.imshow(1.0*thishist[0].transpose()/npts0, x=thishist[1], y=thishist[2], cmap=hist_cmap)
h_axis = py.xlim() + py.ylim()[::-1]
#pdb.set_trace()
py.axis(h_axis)
if clim is None:
py.clim([0., thishist[0].ravel().max()*1.0/npts0])
else:
py.clim(clim)
if jj0>0: #
s.set_yticklabels('');
else:
#py.ylabel(labs[ii], rotation=yrot)
pass
if newfig: s.set_yticks(s.get_yticks()[1:-1]);
if ii0 == (jj0+1):
s.get_xaxis().set_label_position('top')
py.xlabel(labs[jj])
s.get_yaxis().set_label_position('right')
py.ylabel(labs[ii])#, rotation='horizontal')
s.get_yaxis().get_label().set_rotation(90)
if ii0<(nperpage-1) and ii<(nind-1):
s.set_xticklabels('');
else:
#py.xlabel(labs[jj],rotation=xrot)
s.get_xaxis().set_major_formatter(py.FormatStrFormatter('%01.2f'));
if newfig: s.set_xticks(s.get_xticks()[1:-1]);
if nperpage>10:
s.set_xticklabels('');
s.set_yticklabels('');
if nperpage>50:
s.set_xticks([])
s.set_yticks([])
else:
[obj.set_rotation(90.) for obj in s.get_xticklabels()] ;
if cmap is not None:
s.set_axis_bgcolor(cmap(.3+.7*abs(py.corrcoef(params[:,jj],params[:,ii])[0,1])))
#py.title('(kk2,kk1,jj,jj0,ii,ii0): (%i,%i,%i,%i,%i,%i)'%(kk2,kk1,jj,jj0,ii,ii0))
if nbins is not None and (not param_doesnt_vary):
py.axis(h_axis)
subplots.append(s)
ii0 += | |
<filename>src/app/voltdb/voltdb_src/lib/python/voltcli/utility.py
# This file is part of VoltDB.
# Copyright (C) 2008-2021 VoltDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
# Volt CLI utility functions.
# IMPORTANT: This depends on no other voltcli modules. Please keep it that way.
import sys
import os
import subprocess
import glob
import copy
import inspect
import configparser
import zipfile
import re
import pkgutil
import binascii
import stat
import signal
import textwrap
import string
from voltcli import daemon
#===============================================================================
class Global:
#===============================================================================
"""
Global data for utilities.
"""
verbose_enabled = False
debug_enabled = False
dryrun_enabled = False
manifest_path = 'MANIFEST'
state_directory = ''
#===============================================================================
def set_dryrun(dryrun):
#===============================================================================
"""
Enable or disable command dry run (display only/no execution).
"""
Global.dryrun_enabled = dryrun
#===============================================================================
def set_verbose(verbose):
#===============================================================================
"""
Enable or disable verbose messages. Increases the number of INFO messages.
"""
Global.verbose_enabled = verbose
#===============================================================================
def set_debug(debug):
#===============================================================================
"""
Enable or disable DEBUG messages. Also enables verbose INFO messages.
"""
Global.debug_enabled = debug
if debug:
Global.verbose_enabled = True
#===============================================================================
def set_state_directory(directory):
#===============================================================================
if not os.path.exists(directory):
try:
os.makedirs(directory)
except (OSError, IOError) as e:
abort('Error creating state directory "%s".' % directory, e)
Global.state_directory = os.path.expandvars(os.path.expanduser(directory))
#===============================================================================
def get_state_directory():
#===============================================================================
"""
Return and create as needed a path for saving state.
"""
return Global.state_directory
#===============================================================================
def is_dryrun():
#===============================================================================
"""
Return True if dry-run is enabled.
"""
return Global.dryrun_enabled
#===============================================================================
def is_verbose():
#===============================================================================
"""
Return True if verbose messages are enabled.
"""
return Global.verbose_enabled
#===============================================================================
def is_debug():
#===============================================================================
"""
Return True if debug messages are enabled.
"""
return Global.debug_enabled
#===============================================================================
def get_state_directory():
#===============================================================================
return Global.state_directory
#===============================================================================
def display_messages(msgs, f = sys.stdout, tag = None, level = 0):
#===============================================================================
"""
Low level message display.
"""
if tag:
stag = '%s: ' % tag
else:
stag = ''
# Special case to allow a string instead of an iterable.
try:
# Raises TypeError if not string
var = msgs + ' '
msgs = [msgs]
except TypeError:
pass
sindent = level * ' '
# Recursively process message list and sub-lists.
for msg in msgs:
if msg is not None:
# Handle exceptions
if issubclass(msg.__class__, Exception):
f.write('%s%s%s Exception: %s\n' % (stag, sindent, msg.__class__.__name__, str(msg)))
else:
# Handle multi-line strings
if is_string(msg):
# If it is a string slice and dice it by linefeeds.
for msg2 in msg.split('\n'):
f.write('%s%s%s\n' % (stag, sindent, msg2))
else:
# Recursively display an iterable with indentation added.
if hasattr(msg, '__iter__'):
display_messages(msg, f = f, tag = tag, level = level + 1)
else:
for msg2 in str(msg).split('\n'):
f.write('%s%s%s\n' % (stag, sindent, msg2))
#===============================================================================
def info(*msgs):
#===============================================================================
"""
Display INFO level messages.
"""
display_messages(msgs, tag = 'INFO')
#===============================================================================
def verbose_info(*msgs):
#===============================================================================
"""
Display verbose INFO level messages if enabled.
"""
if Global.verbose_enabled:
display_messages(msgs, tag = 'INFO2')
#===============================================================================
def debug(*msgs):
#===============================================================================
"""
Display DEBUG level message(s) if debug is enabled.
"""
if Global.debug_enabled:
display_messages(msgs, tag = 'DEBUG')
#===============================================================================
def warning(*msgs):
#===============================================================================
"""
Display WARNING level messages.
"""
display_messages(msgs, tag = 'WARNING')
#===============================================================================
def error(*msgs):
#===============================================================================
"""
Display ERROR level messages.
"""
display_messages(msgs, tag = 'ERROR')
#===============================================================================
def abort(*msgs, **kwargs):
#===============================================================================
"""
Display ERROR messages and then abort.
:Keywords:
return_code: integer result returned to the OS (default=1)
"""
keys = list(kwargs.keys())
bad_keywords = [k for k in list(kwargs.keys()) if k != 'return_code']
if bad_keywords:
warning('Bad keyword(s) passed to abort(): %s' % ' '.join(bad_keywords))
return_code = kwargs.get('return_code', 1)
error(*msgs)
# Return code must be 0-255 for shell.
if return_code < 0 or return_code > 255:
return_code = 1
sys.exit(return_code)
#===============================================================================
def find_in_path(name):
#===============================================================================
"""
Find program in the system path.
"""
# NB: non-portable
for dir in os.environ['PATH'].split(':'):
if os.path.exists(os.path.join(dir, name)):
return os.path.join(dir, name)
return None
#===============================================================================
def find_programs(*names):
#===============================================================================
"""
Check for required programs in the path.
"""
missing = []
paths = {}
for name in names:
paths[name] = find_in_path(name)
if paths[name] is None:
missing.append(name)
if missing:
abort('Required program(s) are not in the path:', missing)
return paths
#===============================================================================
class PythonSourceFinder(object):
#===============================================================================
"""
Find and invoke python source files in a set of directories and resource
subdirectories (for searching in zip packages). Execute all discovered
source files and pass in the symbols provided.
A typical usage relies on decorators to mark discoverable functions in user
code. The decorator is called when the source file is executed which serves
as an opportunity to keep track of discovered functions.
"""
class Scan(object):
def __init__(self, package, path):
self.package = package
self.path = path
def __init__(self):
self.scan_locs = []
self.manifests = {}
def add_path(self, path):
# Use the absolute path to avoid visiting the same directory more than once.
full_path = os.path.realpath(path)
for scan_loc in self.scan_locs:
if scan_loc.path == full_path:
break
else:
self.scan_locs.append(PythonSourceFinder.Scan(None, full_path))
def add_resource(self, package, path):
self.scan_locs.append(PythonSourceFinder.Scan(package, path))
def search_and_execute(self, **syms):
for scan_loc in self.scan_locs:
verbose_info('Scanning "%s" for modules to run...' % scan_loc.path)
if scan_loc.package:
# Load the manifest as needed so that individual files can be
# found in package directories. There doesn't seem to be an
# easy way to search for resource files, e.g. by glob pattern.
if scan_loc.package not in self.manifests:
try:
manifest_raw = pkgutil.get_data(scan_loc.package, Global.manifest_path)
self.manifests[scan_loc.package] = manifest_raw.split('\n')
except (IOError, OSError) as e:
abort('Failed to load package %s.' % Global.manifest_path, e)
for path in self.manifests[scan_loc.package]:
if os.path.dirname(path) == scan_loc.path and path.endswith('.py'):
debug('Executing package module "%s"...' % path)
try:
code = pkgutil.get_data(scan_loc.package, path)
except (IOError, OSError) as e:
abort('Failed to load package resource "%s".' % path, e)
syms_tmp = copy.copy(syms)
exec(code, syms_tmp)
elif os.path.exists(scan_loc.path):
for modpath in glob.glob(os.path.join(scan_loc.path, '*.py')):
debug('Executing module "%s"...' % modpath)
syms_tmp = copy.copy(syms)
exec(compile(open(modpath, "rb").read(), modpath, 'exec'), syms_tmp)
#===============================================================================
def normalize_list(items, width, filler = None):
#===============================================================================
"""
Normalize list to a specified width, truncating or filling as needed.
Filler data can be supplied by caller. The filler will be copied to each
added item. None will be used as the filler if none is provided.
"""
assert items is not None
assert width >= 0
output = items[:width]
if len(output) < width:
output += filler * (width - len(output))
return tuple(output)
#===============================================================================
def format_table(tuples, caption = None, headings = None, indent = 0, separator = ' '):
#===============================================================================
"""
Format a table, i.e. tuple list, including an optional caption, optional
column headings, and rows of data cells. Aligns the headings and data
cells. Headings and data rows must be iterable. Each data row must provide
iterable cells. For now it only handles stringized data and right
alignment. Returns the table-formatted string.
"""
output = []
sindent = ' ' * indent
# Display the caption, if supplied.
if caption:
output.append('\n%s-- %s --\n' % (sindent, caption))
rows = []
# Add a row for headings, if supplied. Underlining is added after widths are known.
if headings:
rows.append(headings)
# Add the data rows.
rows.extend(tuples)
# Measure the column widths.
widths = []
for row in rows:
icolumn = 0
for column in row:
width = len(str(column))
if len(widths) == icolumn:
widths.append(width)
else:
widths[icolumn] = max(widths[icolumn], width)
icolumn += 1
# If we have headings inject a row with underlining based on the calculated widths.
if headings:
rows.insert(1, ['-' * widths[i] for i in range(len(widths))])
# Generate the format string and then format the headings and rows.
fmt = '%s%s' % (sindent, separator.join(['%%-%ds' % width for width in widths]))
for row in rows:
output.append(fmt % normalize_list(row, len(widths), ''))
return '\n'.join(output)
#===============================================================================
def format_tables(tuples_list, caption_list = None, heading_list = None, indent = 0):
#===============================================================================
"""
Format multiple tables, i.e. a list of tuple lists. See format_table() for
more information.
"""
output = []
for i in range(len(tuples_list)):
if caption_list is None or i >= len(caption_list):
caption = None
else:
caption = caption_list[i]
if heading_list is None or i >= len(heading_list):
heading = None
else:
heading = heading_list[i]
s = format_table(tuples_list[i], caption = caption, heading = heading, indent = indent)
output.append(s)
return '\n\n'.join(output)
#===============================================================================
def format_volt_table(table, caption = | |
from typing import Any
from jsonschema.exceptions import ValidationError
import urllib.request
import json
import pytest
from mojap_metadata import Metadata
from mojap_metadata.metadata.metadata import (
_parse_and_split,
_get_first_level,
_unpack_complex_data_type,
_table_schema,
_get_type_category_pattern_dict_from_schema,
_schema_url,
)
@pytest.mark.parametrize(
argnames="attribute,default_value,valid_value,invalid_value",
argvalues=[
("name", "", "test", 0),
("description", "", "test", 0),
("file_format", "", "test", 0),
("sensitive", False, True, 0),
],
)
def test_basic_attributes(
attribute: str, default_value: Any, valid_value: Any, invalid_value: Any
):
"""
Attributes with default, valid and invalid types are handled as
expected.
"""
metadata = Metadata()
assert getattr(metadata, attribute) == default_value
setattr(metadata, attribute, valid_value)
assert getattr(metadata, attribute) == valid_value
with pytest.raises(ValidationError):
setattr(metadata, attribute, invalid_value)
def test_columns_default():
metadata = Metadata()
assert metadata.columns == []
@pytest.mark.parametrize(
argnames="col_input",
argvalues=[
"",
[""],
[{"name": "test"}],
[{"type_category": "integer"}],
[{"type": "null"}],
[{"name": "test", "type_category": "test"}],
[{"name": 0, "type_category": "integer"}],
[{"name": "test", "type_category": 0}],
[{"name": "test", "type": 0}],
[{"name": "test", "type": "int7"}],
[{"name": "test", "type": "kint8"}],
[{"name": "test", "type": "float8"}],
[{"name": "test", "type": "decimal"}],
[{"name": "test", "type": "decimal(0,38)"}],
[{"name": "test", "type_category": "null", "type": "test"}],
[{"name": "test", "type_category": "integer", "type": "test"}],
[{"name": "test", "type": "time32"}],
[{"name": "test", "type": "time64"}],
[{"name": "test", "type": "timestamp"}],
[{"name": "test", "type_category": "timestamp", "type": "timestamp"}],
[{"name": "test", "type_category": "datetime", "type": "timestamp(s)"}],
],
)
def test_columns_validation_error(col_input: Any):
metadata = Metadata()
with pytest.raises(ValidationError):
metadata.columns = col_input
@pytest.mark.parametrize(
argnames="col_input",
argvalues=[
[{"name": "test", "type_category": "null"}],
[{"name": "test", "type_category": "integer"}],
[{"name": "test", "type_category": "float"}],
[{"name": "test", "type_category": "string"}],
[{"name": "test", "type_category": "timestamp"}],
[{"name": "test", "type_category": "binary"}],
[{"name": "test", "type_category": "boolean"}],
[{"name": "test", "type": "int8"}],
[{"name": "test", "type": "bool"}],
[{"name": "test", "type": "bool_"}],
[{"name": "test", "type": "int16"}],
[{"name": "test", "type": "int32"}],
[{"name": "test", "type": "int64"}],
[{"name": "test", "type": "uint8"}],
[{"name": "test", "type": "uint16"}],
[{"name": "test", "type": "uint32"}],
[{"name": "test", "type": "uint64"}],
[{"name": "test", "type": "float16"}],
[{"name": "test", "type": "float32"}],
[{"name": "test", "type": "float64"}],
[{"name": "test", "type": "decimal128(0,38)"}],
[{"name": "test", "type": "time32(s)"}],
[{"name": "test", "type": "time32(ms)"}],
[{"name": "test", "type": "time64(us)"}],
[{"name": "test", "type": "time64(ns)"}],
[{"name": "test", "type": "timestamp(s)"}],
[{"name": "test", "type": "timestamp(ms)"}],
[{"name": "test", "type": "timestamp(us)"}],
[{"name": "test", "type": "timestamp(ns)"}],
[{"name": "test", "type": "date32"}],
[{"name": "test", "type": "date64"}],
[{"name": "test", "type": "string"}],
[{"name": "test", "type": "large_string"}],
[{"name": "test", "type": "utf8"}],
[{"name": "test", "type": "large_utf8"}],
[{"name": "test", "type": "binary"}],
[{"name": "test", "type": "binary(128)"}],
[{"name": "test", "type": "large_binary"}],
[{"name": "test", "type_category": "null", "type": "null"}],
[{"name": "test", "type_category": "integer", "type": "int8"}],
[{"name": "test", "type_category": "float", "type": "float32"}],
[{"name": "test", "type_category": "string", "type": "string"}],
[{"name": "test", "type_category": "timestamp", "type": "timestamp(ms)"}],
[{"name": "test", "type_category": "binary", "type": "binary(128)"}],
[{"name": "test", "type_category": "binary", "type": "binary"}],
[{"name": "test", "type_category": "boolean", "type": "bool"}],
[{"name": "test", "type_category": "boolean", "type": "bool_"}],
[{"name": "test", "type": "struct<num:int64>"}],
[{"name": "test", "type": "list<int64>"}],
[{"name": "test", "type": "list<list_<int64>>"}],
[{"name": "test", "type": "large_list<int64>"}],
[{"name": "test", "type": "large_list<large_list<int64>>"}],
[{"name": "test", "type": "struct<num:int64,newnum:int64>"}],
[{"name": "test", "type": "struct<num:int64,arr:list_<int64>>"}],
[{"name": "test", "type": "list_<struct<num:int64,desc:string>>"}],
[{"name": "test", "type": "struct<num:int64,desc:string>"}],
[{"name": "test", "type": "list<decimal128(38,0)>"}],
[{"name": "test", "type": "large_list<decimal128(38,0)>"}],
],
)
def test_columns_pass(col_input: Any):
Metadata(columns=col_input)
def test_primary_key_and_partitions_attributes():
pass
def test_from_dict():
test_dict = {
"name": "test",
"description": "test",
"file_format": "test",
"sensitive": False,
"columns": [{"name": "test", "type": "null"}],
"primary_key": ["test"],
"partitions": ["test"],
}
meta = Metadata.from_dict(test_dict)
for k, v in test_dict.items():
assert getattr(meta, k) == v
meta.name = "bob"
assert meta.name == meta._data["name"]
def test_preservation_of_underlying_metadata():
# Test if additional data is preserved
test_dict = {
"name": "test",
"description": "test",
"file_format": "test",
"sensitive": False,
"columns": [{"name": "test", "type": "null"}],
"primary_key": ["test"],
"partitions": ["test"],
"additional-attr": "test",
}
meta = Metadata.from_dict(test_dict)
out_dict = meta.to_dict()
for k, v in test_dict.items():
assert out_dict[k] == v
# make sure data is copied and not just a pointer
assert id(test_dict) != id(meta._data)
test_dict["columns"] = [{"name": "new_test", "type": "bool_"}]
assert test_dict != meta.columns
# Assert Metadata instances are different
m1 = Metadata()
m2 = Metadata()
assert m1.columns == m2.columns
m1.columns.append({"name": "new_test", "type": "bool_"})
assert m1.columns != m2.columns
def test_to_dict():
metadata = Metadata(
name="test",
description="test",
file_format="test",
sensitive=False,
columns=[{"name": "test", "type": "null"}],
primary_key=["test"],
partitions=["test"],
)
assert metadata.to_dict() == {
"$schema": _schema_url,
"name": "test",
"description": "test",
"file_format": "test",
"sensitive": False,
"columns": [{"name": "test", "type": "null"}],
"primary_key": ["test"],
"partitions": ["test"],
}
@pytest.mark.parametrize(argnames="writer", argvalues=["json", "yaml"])
def test_to_from_json_yaml(tmpdir, writer):
path_file = tmpdir.mkdir("test_outputs").join("meta.{writer}")
test_dict = {
"name": "test",
"description": "test",
"file_format": "test",
"sensitive": False,
"columns": [{"name": "test", "type": "null"}],
"primary_key": ["test"],
"partitions": ["test"],
}
meta = Metadata.from_dict(test_dict)
# test in/out reader
getattr(meta, f"to_{writer}")(str(path_file))
read_meta = getattr(Metadata, f"from_{writer}")(str(path_file))
out_dict = read_meta.to_dict()
for k, v in test_dict.items():
assert out_dict[k] == v
@pytest.mark.parametrize(
"t,e",
[
("Don't grab this <Get this stuff> Don't grab this ", "Get this stuff"),
(
"struct<a: timestamp[s], b: struct<f1: int32, f2: string>>",
"a: timestamp[s], b: struct<f1: int32, f2: string>",
),
("a: timestamp[s], b: struct<f1: int32, f2: string>", "f1: int32, f2: string"),
],
)
def test_get_first_level(t, e):
assert _get_first_level(t) == e
@pytest.mark.parametrize(
"text,char,expected",
[
(
"a: timestamp[s], b: struct<f1: int32, f2: string>",
",",
["a: timestamp[s]", "b: struct<f1: int32, f2: string>"],
),
(
'a: timestamp["s", +07:30], b: decimal128(3,5)',
",",
['a: timestamp["s", +07:30]', "b: decimal128(3,5)"],
),
(
'a: timestamp["s", +07:30], b: decimal128(3,5)',
":",
["a", 'timestamp["s", +07:30], b', "decimal128(3,5)"],
),
(
"k1:list<string>, k2:decimal128(0, 38), k3:struct<a:int64, b:int64>",
",",
["k1:list<string>", "k2:decimal128(0, 38)", "k3:struct<a:int64, b:int64>"],
)
],
)
def test_parse_and_split(text, char, expected):
assert list(_parse_and_split(text, char)) == expected
@pytest.mark.parametrize(
"data_type,expected",
[
("string", "string"),
("struct<num:int64>", {"struct": {"num": "int64"}}),
("list_<int64>", {"list_": "int64"}),
("list<int64>", {"list": "int64"}),
("list<list_<int64>>", {"list": {"list_": "int64"}}),
("list_<list_<int64>>", {"list_": {"list_": "int64"}}),
("large_list<int64>", {"large_list": "int64"}),
("large_list<large_list<int64>>", {"large_list": {"large_list": "int64"}}),
(
"struct<num:int64,newnum:int64>",
{"struct": {"num": "int64", "newnum": "int64"}},
),
(
"struct<num:int64,arr:list_<int64>>",
{"struct": {"num": "int64", "arr": {"list_": "int64"}}},
),
(
"list_<struct<num:int64,desc:string>>",
{"list_": {"struct": {"num": "int64", "desc": "string"}}},
),
(
"struct<num:int64,desc:string>",
{"struct": {"num": "int64", "desc": "string"}},
),
(
"list_<decimal128(38,0)>",
{"list_": "decimal128(38,0)"},
),
(
"struct<a:timestamp[s], b:struct<f1:int32, f2:string, f3:decimal128(3,5)>>",
{
"struct": {
"a": "timestamp[s]",
"b": {
"struct": {
"f1": "int32",
"f2": "string",
"f3": "decimal128(3,5)",
}
},
}
},
),
],
)
def test_unpack_complex_data_type(data_type, expected):
meta = Metadata()
assert _unpack_complex_data_type(data_type) == expected
assert meta.unpack_complex_data_type(data_type) == expected
def test_set_col_types_from_type_category():
test_dict = {
"name": "test",
"description": "test",
"file_format": "test",
"sensitive": False,
"columns": [
{"name": "test_null", "type_category": "null"},
{"name": "test_integer", "type_category": "integer"},
{"name": "test_float", "type_category": "float"},
{"name": "test_string", "type_category": "string"},
{"name": "test_timestamp", "type_category": "timestamp"},
{"name": "test_binary", "type_category": "binary"},
{"name": "test_boolean", "type_category": "boolean"},
{"name": "test_list", "type_category": "list"},
{"name": "test_struct", "type_category": "struct"},
],
}
meta = Metadata.from_dict(test_dict)
with pytest.warns(UserWarning):
meta.set_col_types_from_type_category()
for c in meta.columns:
default_type_cat = c["name"].replace("test_", "")
expected_type = meta.default_type_category_lookup.get(default_type_cat)
assert c["type"] == expected_type
new_dict = {
"null": "null",
"integer": "uint8",
"float": "decimal128(2,5)",
"string": "large_string",
"timestamp": "timestamp(us)",
"binary": "large_binary",
"boolean": "bool_",
"list": "large_list<null>",
"struct": "map_<null>",
}
meta2 = Metadata.from_dict(test_dict)
meta2.set_col_types_from_type_category(lambda x: new_dict.get(x["type_category"]))
for c in meta2.columns:
default_type_cat = c["name"].replace("test_", "")
assert c["type"] == new_dict.get(default_type_cat)
def test_spec_matches_public_schema():
msg = (
"You will need to update the public schema here: "
"https://github.com/moj-analytical-services/metadata_schema/"
)
m = Metadata()
with urllib.request.urlopen(m._data["$schema"]) as url:
public_schema = json.loads(url.read().decode())
assert public_schema == _table_schema, msg
def test_type_category_mapping():
expected = {
"null": r"^null$",
"integer": r"^u?int(8|16|32|64)$",
"float": r"^float(16|32|64)$|^decimal128\(\d+,\d+\)$",
"string": r"^string$|^large_string$|^utf8$|^large_utf8$",
"timestamp": r"^time32\((s|ms)\)$|^time64\((us|ns)\)$|^date(32|64)$|^timestamp\((s|ms|us|ns)\)$", # noqa
"binary": r"^binary(\([0-9]+\))?$|^large_binary$",
"boolean": r"^bool$|^bool_$",
"list": r"^large_list<.+>$|^list_<.+>$|^list<.+>$",
"struct": r"^map_<.+>$|^struct<.+>$",
}
actual = _get_type_category_pattern_dict_from_schema()
assert expected == actual
@pytest.mark.parametrize(
argnames="col_input,expected_cat",
argvalues=[
([{"name": "test", "type": "null"}], "null"),
([{"name": "test", "type": "int8"}], "integer"),
([{"name": "test", "type": "bool_"}], "boolean"),
([{"name": "test", "type": "int16"}], "integer"),
([{"name": "test", "type": "int32"}], "integer"),
([{"name": "test", "type": "int64"}], "integer"),
([{"name": "test", "type": "uint8"}], "integer"),
([{"name": "test", "type": "uint16"}], "integer"),
([{"name": "test", "type": "uint32"}], "integer"),
([{"name": "test", "type": "uint64"}], "integer"),
([{"name": "test", "type": "float16"}], "float"),
([{"name": "test", "type": "float32"}], "float"),
([{"name": "test", "type": "float64"}], "float"),
([{"name": "test", "type": "decimal128(0,38)"}], "float"),
([{"name": "test", "type": "time32(s)"}], "timestamp"),
([{"name": "test", "type": "time32(ms)"}], "timestamp"),
([{"name": "test", "type": "time64(us)"}], "timestamp"),
([{"name": "test", "type": "time64(ns)"}], "timestamp"),
([{"name": "test", "type": "timestamp(s)"}], "timestamp"),
([{"name": "test", "type": "timestamp(ms)"}], "timestamp"),
([{"name": "test", "type": "timestamp(us)"}], "timestamp"),
([{"name": "test", "type": "timestamp(ns)"}], "timestamp"),
([{"name": "test", "type": "date32"}], "timestamp"),
([{"name": "test", "type": "date64"}], "timestamp"),
([{"name": "test", "type": "string"}], "string"),
([{"name": "test", "type": "large_string"}], "string"),
([{"name": "test", "type": "utf8"}], "string"),
([{"name": "test", "type": "large_utf8"}], "string"),
([{"name": "test", "type": "binary"}], "binary"),
([{"name": "test", "type": "binary(128)"}], "binary"),
([{"name": "test", "type": "large_binary"}], "binary"),
([{"name": "test", "type": "struct<num:int64>"}], "struct"),
([{"name": "test", "type": "list_<int64>"}], "list"),
([{"name": "test", "type": "list_<list_<int64>>"}], "list"),
([{"name": "test", "type": "large_list<int64>"}], "list"),
([{"name": "test", "type": "large_list<large_list<int64>>"}], "list"),
([{"name": "test", "type": "struct<num:int64,newnum:int64>"}], "struct"),
([{"name": "test", "type": "struct<num:int64,arr:list_<int64>>"}], "struct"),
([{"name": "test", "type": "list_<struct<num:int64,desc:string>>"}], "list"),
([{"name": "test", "type": "struct<num:int64,desc:string>"}], "struct"),
([{"name": "test", "type": "list_<decimal128(38,0)>"}], "list"),
([{"name": "test", | |
= '''MATCH (n:{}:word)
WITH n.label as label, count(n.label) as c
ORDER BY c DESC, label
with label
LIMIT {}
return label'''.format(c.cypher_safe_name, count)
results = c.execute_cypher(statement)
results = [x['label'] for x in results]
return Response(results)
@action(detail=True,methods=['get'])
def default_subsets(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
subset_class = request.GET.get('subset_class', 'syllabics')
corpus = models.Corpus.objects.get(pk=pk)
permissions = corpus.user_permissions.filter(user=request.user, can_query=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
if subset_class not in ['syllabics', 'sibilants', 'stressed_vowels']:
return Response(
'Invalid subset class',
status=status.HTTP_400_BAD_REQUEST)
if subset_class == 'syllabics':
subset = corpus.syllabics
elif subset_class == "sibilants":
subset = corpus.sibilants
elif subset_class == "stressed_vowels":
subset = corpus.stressed_vowels
return Response(json.dumps(subset))
@action(detail=True, methods=['get'])
def phones(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = self.get_object()
permissions = corpus.user_permissions.filter(user=request.user, can_query=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
with CorpusContext(corpus.config) as c:
phones = c.query_lexicon(c.lexicon_phone).columns(c.lexicon_phone.label).all()
return Response(phones.to_json())
@action(detail=True, methods=['get'])
def phone_set(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = self.get_object()
permissions = corpus.user_permissions.filter(user=request.user, can_query=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
with CorpusContext(corpus.config) as c:
q = c.query_lexicon(c.lexicon_phone).columns(c.lexicon_phone.label.column_name('label'))
phones = q.all()
# Remove duplicates to get phone set
phones = sorted(x['label'] for x in phones)
return Response(phones)
@action(detail=True, methods=['get'])
def word_set(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = self.get_object()
permissions = corpus.user_permissions.filter(user=request.user, can_query=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
with CorpusContext(corpus.config) as c:
q = c.query_lexicon(c.lexicon_word).columns(c.lexicon_word.label.column_name('label'))
words = q.all()
# Remove duplicates to get phone set
words = sorted(set(x['label'] for x in words))
return Response(words)
@action(detail=True, methods=['get'])
def hierarchy(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = self.get_object()
if corpus.database.status != 'R':
return Response('Database is not running', status=status.HTTP_400_BAD_REQUEST)
permissions = corpus.user_permissions.filter(user=request.user, can_query=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
try:
with CorpusContext(corpus.config) as c:
hierarchy = c.hierarchy
data = serializers.HierarchySerializer(hierarchy).data
except neo4j_exceptions.ServiceUnavailable:
corpus.database.status = 'S'
corpus.database.neo4j_pid = None
corpus.database.influxdb_pid = None
corpus.database.save()
return Response('Database is not running', status=status.HTTP_400_BAD_REQUEST)
return Response(data)
@action(detail=True, methods=['get'])
def utterance_pitch_track(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = self.get_object()
permissions = corpus.user_permissions.filter(user=request.user, can_query=True, can_view_detail=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
utterance_id = request.query_params.get('utterance_id', None)
if utterance_id is None:
return Response(None)
source = request.query_params.get('source', 'praat')
min_pitch = int(request.query_params.get('min_pitch', 50))
max_pitch = int(request.query_params.get('max_pitch', 500))
with CorpusContext(corpus.config) as c:
results = c.analyze_utterance_pitch(utterance_id, source=source, min_pitch=min_pitch, max_pitch=max_pitch)
pitch_data = {}
pitch_data['pitch_track'] = serializers.PitchPointSerializer([x for x in results if x.F0 != None],
many=True).data
return Response(pitch_data['pitch_track'])
@action(detail=True, methods=['post'])
def save_utterance_pitch_track(self, request, pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = self.get_object()
permissions = corpus.user_permissions.filter(user=request.user, can_query=True, can_edit=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
id = request.data['id']
track = request.data['track']
with CorpusContext(corpus.config) as c:
time_stamp = c.update_utterance_pitch_track(id, track)
return Response({'success': True, 'time_stamp': time_stamp})
class SourceChoiceViewSet(viewsets.ViewSet):
def list(self, request):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
choices = os.listdir(settings.SOURCE_DATA_DIRECTORY)
return Response(choices)
class DiscourseViewSet(viewsets.ViewSet):
def list(self, request, corpus_pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = models.Corpus.objects.get(pk=corpus_pk)
permissions = corpus.user_permissions.filter(user=request.user, can_query=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
with CorpusContext(corpus.config) as c:
discourses = c.discourses
return Response(discourses)
@action(detail=False, methods=['get'])
def properties(self, request, corpus_pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = models.Corpus.objects.get(pk=corpus_pk)
permissions = corpus.user_permissions.filter(user=request.user, can_query=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
with CorpusContext(corpus.config) as c:
props = c.query_metadata(c.discourse).grouping_factors()
data = []
data.append({'name':'name', 'options': c.discourses})
for p in props:
data.append({'name': p, 'options': c.query_metadata(c.discourse).levels(getattr(c.discourse, p))})
return Response(data)
class SpeakerViewSet(viewsets.ViewSet):
def list(self, request, corpus_pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = models.Corpus.objects.get(pk=corpus_pk)
permissions = corpus.user_permissions.filter(user=request.user, can_query=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
with CorpusContext(corpus.config) as c:
speakers = c.speakers
return Response(speakers)
@action(detail=False, methods=['get'])
def properties(self, request, corpus_pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = models.Corpus.objects.get(pk=corpus_pk)
permissions = corpus.user_permissions.filter(user=request.user, can_query=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
with CorpusContext(corpus.config) as c:
props = c.query_metadata(c.speaker).grouping_factors()
data = []
data.append({'name':'name', 'options': c.speakers})
for p in props:
data.append({'name': p, 'options': c.query_metadata(c.speaker).levels(getattr(c.speaker, p))})
return Response(data)
class SubannotationViewSet(viewsets.ViewSet):
def create(self, request, corpus_pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = models.Corpus.objects.get(pk=corpus_pk)
permissions = corpus.user_permissions.filter(user=request.user, can_query=True, can_annotate=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
a_type = request.data.pop('annotation_type')
a_id = request.data.pop('annotation_id')
s_type = request.data.pop('subannotation_type')
data = request.data['subannotation']
with CorpusContext(corpus.config) as c:
att = getattr(c, a_type)
q = c.query_graph(att).filter(getattr(att, 'id') == a_id)
res = q.all()[0]
res.add_subannotation(s_type, **data)
data = serializers.serializer_factory(c.hierarchy, a_type, top_level=True, with_subannotations=True)(
res).data
data = data[a_type][s_type][-1]
return Response(data)
def update(self, request, pk=None, corpus_pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = models.Corpus.objects.get(pk=corpus_pk)
permissions = corpus.user_permissions.filter(user=request.user, can_query=True, can_edit=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
data = request.data
s_id = data.pop('id')
props = []
prop_template = 's.%s = {%s}'
with CorpusContext(corpus.config) as c:
statement = '''MATCH (s:{corpus_name}) WHERE s.id = {{s_id}} RETURN s'''.format(corpus_name=c.cypher_safe_name)
res = c.execute_cypher(statement, s_id=s_id)
for r in res:
for x in r['s'].labels:
if x in c.hierarchy.subannotation_properties:
s_type = x
for k, v in data.items():
props.append(prop_template % (k, k))
if c.hierarchy.has_subannotation_property(s_type, k):
for name, t in c.hierarchy.subannotation_properties[s_type]:
if name == k:
data[k] = t(v)
set_props = ',\n'.join(props)
statement = '''MATCH (s:{corpus_name}) WHERE s.id = {{s_id}}
SET {set_props}'''.format(corpus_name=c.cypher_safe_name, set_props=set_props)
c.execute_cypher(statement, s_id=s_id, **data)
return Response(None)
def destroy(self, request, pk=None, corpus_pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = models.Corpus.objects.get(pk=corpus_pk)
permissions = corpus.user_permissions.filter(user=request.user, can_query=True, can_edit=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
with CorpusContext(corpus.config) as c:
statement = '''MATCH (s:{corpus_name}) WHERE s.id = {{s_id}}
DETACH DELETE s'''.format(corpus_name=c.cypher_safe_name)
c.execute_cypher(statement, s_id=pk)
return Response(None)
class AnnotationViewSet(viewsets.ViewSet):
@action(detail=True, methods=['get'])
def sound_file(self, request, pk=None, corpus_pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = models.Corpus.objects.get(pk=corpus_pk)
permissions = corpus.user_permissions.filter(user=request.user, can_query=True, can_listen=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
with CorpusContext(corpus.config) as c:
fname = c.utterance_sound_file(pk, 'consonant')
response = FileResponse(open(fname, "rb"), content_type='audio/wav')
return response
class EnrichmentViewSet(viewsets.ModelViewSet):
model = models.Enrichment
serializer_class = serializers.EnrichmentSerializer
def get_queryset(self):
return models.Enrichment.objects.filter(corpus__pk=self.kwargs['corpus_pk'])
def list(self, request, corpus_pk=None):
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = models.Corpus.objects.get(pk=corpus_pk)
permissions = corpus.user_permissions.filter(user=request.user, can_query=True, can_enrich=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
enrichments = models.Enrichment.objects.filter(corpus=corpus).all()
return Response(serializers.EnrichmentSerializer(enrichments, many=True).data)
def create(self, request, corpus_pk=None, *args, **kwargs):
log.info("Creating an enrichment.")
if isinstance(request.user, django.contrib.auth.models.AnonymousUser):
return Response(status=status.HTTP_401_UNAUTHORIZED)
corpus = models.Corpus.objects.get(pk=corpus_pk)
permissions = corpus.user_permissions.filter(user=request.user, can_query=True, can_enrich=True).all()
if not len(permissions):
return Response(status=status.HTTP_401_UNAUTHORIZED)
data = request.data
enrich_type = data['enrichment_type']
if enrich_type in ['pitch', 'formants', 'intensity']:
if not data.get('source', ''):
return Response(
'A program to use for this enrichment must be specified.',
status=status.HTTP_400_BAD_REQUEST)
name = 'Encode {} tracks'.format(enrich_type)
elif enrich_type == 'praat_script':
name = 'Enrich praat_script'
elif enrich_type in ['pauses', 'utterances', 'syllables']:
name = 'Encode {}'.format(enrich_type)
# Subset validation
elif enrich_type == 'subset':
label = data.get('subset_label', '')
if not label:
return Response(
'The subset must have a name.',
status=status.HTTP_400_BAD_REQUEST)
if not data.get('annotation_labels', []):
return Response(
'The subset cannot be empty.',
status=status.HTTP_400_BAD_REQUEST)
with CorpusContext(corpus.config) as c:
if c.hierarchy.has_token_subset(data.get('annotation_type', ''), data.get('subset_label', '')) or \
c.hierarchy.has_type_subset(data.get('annotation_type', ''), data.get('subset_label', '')):
return Response(
"The {} subset already exists".format(data.get('subset_label', '')),
status=status.HTTP_400_BAD_REQUEST)
name = 'Encode {} subset'.format(label)
#Stress pattern validation
elif request.data['enrichment_type'] in ['patterned_stress']:
prop = data.get('word_property', '')
if not prop:
return Response(
'There must be a word property.',
status=status.HTTP_400_BAD_REQUEST)
name = 'Encode lexical stress from {}'.format(prop)
# Hierarchical property validation
elif request.data['enrichment_type'] in ['hierarchical_property']:
label = data.get('property_label', '')
if not label:
return Response(
'The hierarchical property must have a name.',
status=status.HTTP_400_BAD_REQUEST)
higher = data.get('higher_annotation', '')
if not higher:
return Response(
'Higher annotation must be specified.',
status=status.HTTP_400_BAD_REQUEST)
lower = data.get('lower_annotation', '')
if not lower:
return Response(
'Lower annotation must be specified.',
status=status.HTTP_400_BAD_REQUEST)
with CorpusContext(corpus.config) as c:
annotation_types = c.hierarchy.highest_to_lowest
if higher not in annotation_types:
return Response(
'Must specify a higher annotation that has been encoded.',
status=status.HTTP_400_BAD_REQUEST)
if lower not in annotation_types:
return Response(
'Must specify a lower annotation that has been encoded.',
status=status.HTTP_400_BAD_REQUEST)
if annotation_types.index(lower) <= annotation_types.index(higher):
return Response(
'The lower annotation level must be lower than the higher annotation level.',
status=status.HTTP_400_BAD_REQUEST)
name = 'Encode {}'.format(label)
elif enrich_type in ['speaker_csv', 'discourse_csv', 'lexicon_csv', 'phone_csv']:
name = 'Enrich {}'.format(enrich_type.split('_')[0])
elif enrich_type in 'importcsv':
name = 'Temp enrichname {}'.format(uuid1())
elif enrich_type in ['relativize_pitch', 'relativize_intensity', 'relativize_formants']:
name = 'Relativize {}'.format(enrich_type.split('_')[1])
elif enrich_type == 'relativize_property':
name = 'Relativize {}'.format(data.get('property_name'))
# Formant validation
elif enrich_type == 'refined_formant_points':
dur_thresh = data.get('duration_threshold', '')
n_iterations = data.get('number_of_iterations', '')
if dur_thresh:
try:
int(dur_thresh)
except ValueError:
return Response(
'The duration threshold must be either blank or an integer.',
status=status.HTTP_400_BAD_REQUEST)
try:
int(n_iterations)
except ValueError:
return Response(
'The number of iterations must be an integer.',
status=status.HTTP_400_BAD_REQUEST)
name = 'Encode point formant values via refinement'
elif enrich_type == 'vot':
stops_label = data.get('stop_label', '')
vot_min = data.get('vot_min', '')
vot_max = data.get('vot_max', '')
| |
<filename>prometheus/fx3/prometheus_fx3.py
#! /usr/bin/python
# -*- coding: UTF-8 -*-
#Distributed under the MIT licesnse.
#Copyright (c) 2013 <NAME> (<EMAIL>)
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
#of the Software, and to permit persons to whom the Software is furnished to do
#so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from array import array as Array
from usb_device import USBDevice
from usb_device import USBDeviceError
import time
import usb
from defines import *
class PrometheusFX3Error(Exception):
pass
class PrometheusFX3(USBDevice):
@staticmethod
def is_connected():
return usb.core.find(idVendor = CYPRESS_VID, idProduct=PROMETHEUS_PID) is not None
def __init__(self):
super(PrometheusFX3, self).__init__(name = "Prometheus FX3", vid = CYPRESS_VID, pid = PROMETHEUS_PID)
self.configuration = None
def read_logger(self):
print "Read the Logger"
data = None
try:
#data = self.dev.read(0x81, 128, 0, 10)
data = self.dev.read(0x81, 128, 10)
except usb.core.USBError, err:
if err.errno == 110:
return
if err.errno == 5:
print "Error 5: Device was disconnected"
return
if err.errno == 16:
print "Error 16: Device was disconnected"
return
else:
print "Unknown USB Error: %s" % str(err)
return
#print "Read Log: %s" % str(data)
#self.usb_server.device_to_host_comm(self.name, data[0], data[8:].tostring())
def host_to_device_comm(self, text):
print "Host to device comm"
with self.usb_lock:
try:
#self.dev.write(0x01, text, 0, 100)
self.dev.write(0x01, text, 100)
except usb.core.USBError, err:
if err.errno == 110:
return
if err.errno == 5:
print "Error 5: Device was disconnected"
return
if err.errno == 16:
print "Error 16: Device was disconnected"
return
def read_mcu_config(self, address = 0xB3, length = 1):
print "Read MCU Config"
data = None
with self.usb_lock:
try:
data = self.dev.ctrl_transfer(
bmRequestType = 0xC0, #VRequest, from the devce, Endpoint
bRequest = address, #FPGA Comm Mode
wValue = 0x00,
wIndex = 0x00,
data_or_wLength = length,
timeout = 3000) #Timeout = 1 second
except usb.core.USBError, err:
if err.errno == 110:
raise USBDeviceError("Error 110: Timeout")
if err.errno == 5:
raise USBDeviceError("Error 5: Device was disconnected")
if err.errno == 16:
raise USBDeviceError("Error 16: Device was disconnected")
else:
raise USBDeviceError("Unknown USB Device Error: %s" % str(err))
return data
def write_mcu_config(self, address = 0xB3, data = []):
with self.usb_lock:
try:
data = self.dev.ctrl_transfer(
bmRequestType = 0x40, #VRequest, from the devce, Endpoint
bRequest = address, #FPGA Comm Mode
wValue = 0x00,
wIndex = 0x00,
data_or_wLength = data)
#data_or_wLength = data,
#timeout = 1000) #Timeout = 1 second
except usb.core.USBError, err:
if err.errno == 110:
raise USBDeviceError("Device Timeout set COMM Mode")
if err.errno == 5:
raise USBDeviceError("Error 5: Device was disconnected")
if err.errno == 16:
raise USBDeviceError("Error 16: Device was disconnected")
else:
raise USBDeviceError("Unknown USB Device Error: %s" % str(err))
def set_fpga_comm_mode(self):
with self.usb_lock:
try:
self.dev.ctrl_transfer(
bmRequestType = 0x40, #VRequest, To the devce, Endpoint
bRequest = 0xB1, #FPGA Comm Mode
wValue = 0x00,
wIndex = 0x00,
data_or_wLength = "",
timeout = 1000) #Timeout = 1 second
except usb.core.USBError, err:
if err.errno == 110:
raise USBDeviceError("Device Timeout set COMM Mode")
if err.errno == 5:
raise USBDeviceError("Device was disconnected")
if err.errno == 16:
raise USBDeviceError("Device was disconnected")
else:
raise USBDeviceError("Unknown USB Device Error: %s" % str(err))
def ping(self):
buf = Array('B', [0x59, 0xA4, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00])
self.low_write(buf)
print "FPGA Data Sent"
data = self.low_read_response()
print "response: %s" % str(data)
def read(self, address, length):
buf = Array('B', [0x59, 0xA4, 0x00, 0x02])
#Append the length
buf.append((length >> 24) & 0xFF)
buf.append((length >> 16) & 0xFF)
buf.append((length >> 8) & 0xFF)
buf.append(length & 0xFF)
#Append the Address
buf.append((address >> 24) & 0xFF)
buf.append((address >> 16) & 0xFF)
buf.append((address >> 8) & 0xFF)
buf.append(address & 0xFF)
buf.append(0x00)
buf.append(0x00)
buf.append(0x00)
buf.append(0x00)
self.low_write(buf)
data = self.low_read_response()
print "response: %s" % str(data)
def write(self, address, data):
buf = Array('B', [0x59, 0xA4, 0x00, 0x02])
length = len(data)
#Append the length
buf.append((length >> 24) & 0xFF)
buf.append((length >> 16) & 0xFF)
buf.append((length >> 8) & 0xFF)
buf.append(length & 0xFF)
#Append the Address
buf.append((address >> 24) & 0xFF)
buf.append((address >> 16) & 0xFF)
buf.append((address >> 8) & 0xFF)
buf.append(address & 0xFF)
for d in data:
buf.append((d >> 24) & 0xFF)
buf.append((d >> 16) & 0xFF)
buf.append((d >> 8) & 0xFF)
buf.append((d ) & 0xFF)
buf.append(0x00)
buf.append(0x00)
buf.append(0x00)
buf.append(0x00)
self.low_write(buf)
self.low_read_response()
print "response: %s" % str(data)
#Extract the length from the read
#buf = Array('B')
#buf.append(data)
length = (buf[4] << 24 | buf[5] << 16 | buf[6] << 8 | buf[7])
print "length: %d" % length
self.low_read(length)
def low_write(self, write_buf):
max_size = 512
with self.usb_lock:
while len(write_buf) > max_size:
print "Sending: %d" % count
count += 1
buf = write_buf[:max_size]
write_buf = write_buf[max_size:]
print "Length of buffer: %d" % len(buf)
try:
#self.dev.write(0x02, buf, 0, timeout=3000)
self.dev.write(0x02, buf, timeout=3000)
#self.dev.write(0x02, write_buf, 0, timeout=3000)
except usb.core.USBError, err:
if err.errno == 110:
raise USBDeviceError("Device timed out while attempting to send FPGA Config")
if err.errno == 5:
raise USBDeviceError("Device was disconnected")
if err.errno == 16:
raise USBDeviceError("Device was disconnected")
else:
raise USBDeviceError("Unknown USB Device Error: %s" % str(err))
def low_read(self, length):
data = None
try:
#data = self.dev.read(0x82, length << 2, 0, timeout=3000)
data = self.dev.read(0x82, length << 2, timeout=3000)
except usb.core.USBError, err:
if err.errno == 110:
return
if err.errno == 5:
print "Device was disconnected"
return
if err.errno == 16:
print "Device was disconnected"
return
else:
print "Unknown USB Error: %s" % str(err)
return
return data
def low_read_response(self):
data = None
try:
#data = self.dev.read(0x82, 12, 0, timeout=3000)
data = self.dev.read(0x82, 12, timeout=3000)
except usb.core.USBError, err:
if err.errno == 110:
return
if err.errno == 5:
print "Device was disconnected"
return
if err.errno == 16:
print "Device was disconnected"
return
else:
print "Unknown USB Error: %s" % str(err)
return
return data
def upload_fpga_image(self, bit_buf):
max_size = 512
if self.dev is None:
raise USBDeviceError("Device is None")
bit_buf_length = len(bit_buf)
length_buf = Array('B', [0, 0, 0, 0])
length_buf[3] = (bit_buf_length >> 24) & 0x000000FF
length_buf[2] = (bit_buf_length >> 16) & 0x000000FF
length_buf[1] = (bit_buf_length >> 8) & 0x000000FF
length_buf[0] = (bit_buf_length) & 0x000000FF
print "bit buf packets [3] [2] [1] [0]: %X %X %X %X" % (length_buf[3],
length_buf[2],
length_buf[1],
length_buf[0])
print "Length: (Hex): 0x%08X, (Dec): %d" % (bit_buf_length, bit_buf_length)
with self.usb_lock:
try:
self.dev.ctrl_transfer(
bmRequestType = 0x40, #VRequest, To the devce, Endpoint
bRequest = 0xB2, #FPGA Configuration mode
wValue = 0x00,
wIndex = 0x00,
data_or_wLength = length_buf.tostring(),
timeout = 1000) #Timeout = 1 second
except usb.core.USBError, err:
if err.errno == 110:
raise USBDeviceError("Device Timed Out while attempting to send FPGA Config")
if err.errno == 5:
raise USBDeviceError("Device was disconnected")
if err.errno == 16:
raise USBDeviceError("Device was disconnected")
else:
raise USBDeviceError("Unknown USB Device Error: %s" % str(err))
print "Sleep for a few seconds"
time.sleep(1)
count = 0
with self.usb_lock:
while len(bit_buf) > max_size:
print "Sending: %d" % count
count += 1
buf = bit_buf[:max_size]
bit_buf = bit_buf[max_size:]
print "Length of buffer: %d" % len(buf)
try:
#self.dev.write(0x02, buf, 0, timeout=3000)
self.dev.write(0x02, buf, timeout=3000)
#self.dev.write(0x02, bit_buf, 0, timeout=3000)
except usb.core.USBError, err:
if err.errno == 110:
raise USBDeviceError("Device timed out while attempting to send FPGA Config")
if err.errno == 5:
raise USBDeviceError("Device was disconnected")
if err.errno == 16:
raise USBDeviceError("Device was disconnected")
else:
raise USBDeviceError("Unknown USB Device Error: %s" % str(err))
print "FPGA Data Sent"
if len(bit_buf) > 0:
try:
#self.dev.write(0x02, bit_buf, 0, timeout=3000)
self.dev.write(0x02, bit_buf, timeout=3000)
except usb.core.USBError, err:
if err.errno == 110:
raise USBDeviceError("Device timed out while attempting | |
gl['JournalLib'] = gl["JournalLib"].str.replace("NET PAY", "SALAIRE NET")
gl['JournalLib'] = gl["JournalLib"].str.replace("CASH ", "ARGENT ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Repayment ", "Repaiement ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Acct. ", "Comptab. ")
gl['JournalLib'] = gl["JournalLib"].str.replace("ACCR ", "ACC ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Accr ", "Acc.")
gl['JournalLib'] = gl["JournalLib"].str.replace("Cash Balance", "Solde de caisse")
gl['JournalLib'] = gl["JournalLib"].str.replace("RECLASS ", "RECLASSEMENT ")
gl['JournalLib'] = gl["JournalLib"].str.replace("VAT FILING ", "Dépôt de TVA ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Needs to be re-booked due", "KI")
gl['JournalLib'] = gl["JournalLib"].str.replace("reclass from", "reclasser de")
gl['JournalLib'] = gl["JournalLib"].str.replace("RECLASS FROM", "reclasser de")
gl['JournalLib'] = gl["JournalLib"].str.replace("PAYROLL", "PAIE")
gl['JournalLib'] = gl["JournalLib"].str.replace("RECLASS ", "Reclasser")
gl['JournalLib'] = gl["JournalLib"].str.replace("DEDICTION","DEDUCTION")
gl['JournalLib'] = gl["JournalLib"].str.replace("Cash","Argent ")
gl['JournalLib'] = gl["JournalLib"].str.replace("cash ","argent ")
gl['JournalLib'] = gl["JournalLib"].str.replace("ReclasserIFICATIO","RECLASSEMENT ")
gl['JournalLib'] = gl["JournalLib"].str.replace("ImpôtS ","Impôts ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Working Repas (Employees Only) ","Repas de travail (employés seulement) ")
gl['JournalLib'] = gl["JournalLib"].str.replace("/Banque Frais","/Frais Bancaires")
gl['JournalLib'] = gl["JournalLib"].str.replace("MED. INS.","ASSURANCE MED.")
gl['JournalLib'] = gl["JournalLib"].str.replace("AJE WIRE LOG TRAN","AJE VERSEMENT")
gl['JournalLib'] = gl["JournalLib"].str.replace("JUN'","JUIN'")
gl['JournalLib'] = gl["JournalLib"].str.replace("Deferred Rent18 rue de Lo","Loyer différé 18 Rue de Lo")
gl['JournalLib'] = gl["JournalLib"].str.replace("Facture - Brut'","Facture - Brute")
gl['JournalLib'] = gl["JournalLib"].str.replace("T&E","VD")
gl['JournalLib'] = gl["JournalLib"].str.replace("/","")
gl['JournalLib'] = gl["JournalLib"].str.replace("Inv","Facture")
gl['JournalLib'] = gl["JournalLib"].str.replace("2019`","2019")
gl['JournalLib'] = gl["JournalLib"].str.replace("-2014V","")
mapping_Valuation1 = {" Valuation on": " Évaluation sur"," Valuation on Reverse":" Évaluation sur Contre Passation",
" Reverse Posting":" Contre-Passation d'Ecriture - Conversion de devise sur",
" Translation Using":" Conversion de devise sur"}
mapping_AA1 = {"Reclass from": " Reclassification de", "reclass from": " Reclassification de", "ZEE MEDIA":"ZEE MEDIA Campaignes Numériques", "TRAINING CONTRI. ER JANUARY '19":"FORMATION CONTRI. ER JANVIER' 19",
"TAX FEES":"Taxes","SOCIAL SECURITY: URSSAF":"SÉCURITÉ SOCIALE: URSSAF","SOCIAL SECURITY: TRAINING CONTRIBUTIONS":"SÉCURITÉ SOCIALE: CONTRIBUTIONS À LA FORMATION",
"SOCIAL SECURITY: APPRENTICESHIP CONTRIBU":"SÉCURITÉ SOCIALE: CONTRIBUTION À L’APPRENTISSAGE","RSM":"SERVICES DE PAIE RSM EF18","RSA":"SERVICES DE PAIE RSA OCT-JAN",
"PRIVATE HEALTH":"SANTÉ PRIVÉE: ASSURANCE MÉDICALE-AXA/","PENSION: PENSION CONTRIBUTIONS - REUNICA":"PENSION: COTISATIONS DE RETRAITE-REUNICA","PENSION: LIFE & DISABILITY INSURANCE - R":"PENSION: ASSURANCE VIE & INVALIDITÉ-R",
"PENSION JANUARY '19":"PENSION JANVIER '19",
"ON CALL JANUARY '19":"Disponible Janvier'19",
"NRE + PROJECT INITIATION FEES":"NRE + FRAIS D’INITIATION AU PROJET (PO 750003","NET PAY JANUARY '19":"Payeante Janvier'19","JANUARY'19":"JANVIER'19",
"LUNCH VOUCHER- WITHHOLDING":"BON DÉJEUNER-RETENUE","HOLIDAY BONUS ACCRUAL FY18/19":"CUMUL DES PRIMES DE VACANCES EF18/19",
"GROSS SALARY JANUARY '19":"SALAIRE BRUT JANVIER' 19","EMEA ACCRUAL P8FY19":"P8FY19 D’ACCUMULATION EMEA","COMMISSION RE-ACCRUAL":"COMMISSION RÉ-ACCUMULATION",
"COMMISSION ACCRUAL":"COMMISSION D’ACCUMULATION","MARCH":"MARS","MAY":"MAI","APRIL":"AVRIL","AUDIT FEES":"HONORAIRES D’AUDIT",
"UNSUBMITTED_UNPOSTED BOA ACCRUAL":"Accumulation BOA non soumise non exposée","UNASSIGNED CREDITCARD BOA ACCRUAL":"NON ASSIGNÉ CREDITCARD BOA ACCUMULATION ",
"EMEA ACCRUAL":"ACCUMULATION EMEA","Exhibit Expenses":"Frais d'exposition","Hotel Tax":"Taxe hôtelière","Company Events":"Événements d'entreprise",
"Public Transport":"Transport public", "Agency Booking Fees":"Frais de réservation d'agence","Working Meals (Employees Only)":"Repas de travail (employés seulement)",
"Airfare":"Billet d'avion","Office Supplies":"Fournitures de bureau","Tolls":"Péages",
"write off difference see e-mail attached":"radiation de la différence voir e-mail ci-joint",
"Manual P/ment and double payment to be deduct":"P/ment manuel et double paiement à déduire","FX DIFFERENCE ON RSU":"DIFFERENCE FX SUR RSU",
"DEFINED BENEFIT LIABILITY-TRUE UP":"RESPONSABILITÉ À PRESTATIONS DÉTERMINÉES-TRUE UP","EXTRA RELEASE FOR STORAGE REVERSED":"EXTRA LIBERATION POUR STOCKAGE CONTREPASSATION",
"RECLASS BANK CHARGES TO CORRECT COST CEN":"RECLASSER LES FRAIS BANCAIRES POUR CORRIGER","PAYROLL INCOME TAXES":"IMPÔTS SUR LES SALAIRES",
"TRAINING TAX TRUE UP":"TAXE DE FORMATION", "FX DIFFERENCE ON STOCK OPTION EXERCISES":"FX DIFFERENCE SUR LES EXERCICES D'OPTIONS STOCK",
"Airline Frais":"Frais de Transport Aérien","Agency Booking Fees":"Frais de Réservation d'Agence","Computer Supplies":"Fournitures informatiques",
"AUDIT FEES":"FRAIS D'AUDIT", "HOLIDAY BONUS ACCRUAL ":"ACCUMULATION DE BONUS DE VACANCES","TAX FEES":"FRAIS D'IMPÔT",
"SOCIAL SECURITY: APPRENTICESHIP CONTRIBU":"SÉCURITÉ SOCIALE: CONTRIBUITION À L’APPRENTISSAGE",
"SOCIAL SECURITY: TRAINING CONTRIBUTIONS":"SÉCURITÉ SOCIALE: CONTRIBUTIONS À LA FORMATION", "TRAVEL COST":"FRAIS DE VOYAGE", "HOUSING TAX":"TAXE SUR LE LOGEMENT",
"PAYROLL INCOME TAXES":"IMPÔTS SUR LE REVENU DE LA PAIE","INCOME TAX-PAS":"IMPÔT SUR LE REVENU-PAS", "IC SETTLEMENT":"Règlement Interentreprises",
"VACATION TAKEN":"VACANCES PRISES", "SOCIAL SECURITY: APPR. CONTR.":"SÉCURITÉ SOCIALE: CONTRIBUTION À L’APPRENTISSAGE",
"POST OF AVRIL DEC IN CORRECT SIGN":"CORRECTION D'ECRITURE AVRIL DEC"}
gl = gl.replace({"PieceRef":mapping_Valuation1}, regex=True)
gl = gl.replace({"PieceRef":mapping_AA1}, regex=True)
gl['PieceRef'] = gl["PieceRef"].str.replace('COST-PLUS', 'Revient Majoré')
gl['PieceRef'] = gl["PieceRef"].str.replace('PRITVAE HEALTH: MEDICAL INSURANCE', 'SANTÉ PRIVÉE: ASSURANCE MÉDICALE')
gl['PieceRef'] = gl["PieceRef"].str.replace('MEDICAL INSURANCE', 'ASSURANCE MÉDICALE')
gl['PieceRef'] = gl["PieceRef"].str.replace('UNASSIGNED', 'NON ATTRIBUÉ')
gl['PieceRef'] = gl["PieceRef"].str.replace('Payout', 'Paiement')
gl['PieceRef'] = gl["PieceRef"].str.replace('FRINGE COST', 'COÛT MARGINAL')
gl['PieceRef'] = gl["PieceRef"].str.replace('PROJECT INITIATION', 'LANCEMENT DU PROJET')
gl['PieceRef'] = gl["PieceRef"].str.replace('ACCRUAL', 'ACCUMULATION')
gl['PieceRef'] = gl["PieceRef"].str.replace('CREDITCARD', 'CARTE DE CRÉDIT')
gl['PieceRef'] = gl["PieceRef"].str.replace('ACCR ', 'ACCUM ')
gl['PieceRef'] = gl["PieceRef"].str.replace('VAT ', 'TVA ')
gl['PieceRef'] = gl["PieceRef"].str.replace('SOCIAL SECURITY ', 'SÉCURITÉ SOCIALE')
gl['PieceRef'] = gl["PieceRef"].str.replace('SEPTEMBER', 'SEPT')
gl['PieceRef'] = gl["PieceRef"].str.replace('TAXBACK', 'Reboursement')
gl['PieceRef'] = gl["PieceRef"].str.replace('REPORT', '')
gl['PieceRef'] = gl["PieceRef"].str.replace("Reverse Posting", "Contre Passation d'Ecriture")
gl['PieceRef'] = gl["PieceRef"].str.replace("BASE RENT", "Location Base")
gl['PieceRef'] = gl["PieceRef"].str.replace("Rent ", "Location ")
gl['PieceRef'] = gl["PieceRef"].str.replace("RENT ", "Location ")
gl['PieceRef'] = gl["PieceRef"].str.replace("CLEARING", "compensation ")
gl['PieceRef'] = gl["PieceRef"].str.replace("clearing", "compensation ")
gl['PieceRef'] = gl["PieceRef"].str.replace("BILLING CHARGES", "FRAIS DE FACTURATION ")
gl['PieceRef'] = gl["PieceRef"].str.replace("UNPAID", "NON PAYÉ")
gl['PieceRef'] = gl["PieceRef"].str.replace("PROPERTY TAX", "IMPÔT FONCIER ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Trans. Using", "Conversion sur")
gl['PieceRef'] = gl["PieceRef"].str.replace("SALARIES", "Salaires")
gl['PieceRef'] = gl["PieceRef"].str.replace("Refund", "Remboursement")
gl['PieceRef'] = gl["PieceRef"].str.replace("REFUND", "Remboursement")
gl['PieceRef'] = gl["PieceRef"].str.replace("no invoice", "pas de facture")
gl['PieceRef'] = gl["PieceRef"].str.replace("COST-PLUS SERVICE REVENUE", "Revenus de service Revient Majoré")
gl['PieceRef'] = gl["PieceRef"].str.replace("SETTLEMENT", "RÈGLEMENT ")
gl['PieceRef'] = gl["PieceRef"].str.replace("PURCHASE", "ACHAT")
gl['PieceRef'] = gl["PieceRef"].str.replace("NON-CP SETTLE", "RÈGLEMENT NON-CP")
gl['PieceRef'] = gl["PieceRef"].str.replace("PAID ", " Payé ")
gl['PieceRef'] = gl["PieceRef"].str.replace("FEES ", "Frais")
gl['PieceRef'] = gl["PieceRef"].str.replace("January", "Janvier")
gl['PieceRef'] = gl["PieceRef"].str.replace("February", "Février")
gl['PieceRef'] = gl["PieceRef"].str.replace("March", "Mars")
gl['PieceRef'] = gl["PieceRef"].str.replace("April", "Avril")
gl['PieceRef'] = gl["PieceRef"].str.replace("May", "Mai")
gl['PieceRef'] = gl["PieceRef"].str.replace("June", "Juin")
gl['PieceRef'] = gl["PieceRef"].str.replace("July", "Juillet")
gl['PieceRef'] = gl["PieceRef"].str.replace("September", "Septembre")
gl['PieceRef'] = gl["PieceRef"].str.replace("Aug.", "Août")
gl['PieceRef'] = gl["PieceRef"].str.replace("JANUARY", "Janvier")
gl['PieceRef'] = gl["PieceRef"].str.replace("FEBRUARY", "Février")
gl['PieceRef'] = gl["PieceRef"].str.replace("MARCH", "Mars")
gl['PieceRef'] = gl["PieceRef"].str.replace("APRIL", "Avril")
gl['PieceRef'] = gl["PieceRef"].str.replace("MAY", "Mai")
gl['PieceRef'] = gl["PieceRef"].str.replace("JUNE", "Juin")
gl['PieceRef'] = gl["PieceRef"].str.replace("JULY", "Juillet")
gl['PieceRef'] = gl["PieceRef"].str.replace("SEPTEMBER", "Septembre")
gl['PieceRef'] = gl["PieceRef"].str.replace("AUGUST.", "Août")
gl['PieceRef'] = gl["PieceRef"].str.replace("NOVEMBER.", "Novembre")
gl['PieceRef'] = gl["PieceRef"].str.replace("DECEMBER.", "Décembre")
gl['PieceRef'] = gl["PieceRef"].str.replace("December", "Décembre")
gl['PieceRef'] = gl["PieceRef"].str.replace("Feb.", "Fév.")
gl['PieceRef'] = gl["PieceRef"].str.replace("Mar.", "Mars")
gl['PieceRef'] = gl["PieceRef"].str.replace("Apr.", "Avril")
gl['PieceRef'] = gl["PieceRef"].str.replace("Aug.", "Août")
gl['PieceRef'] = gl["PieceRef"].str.replace("Aug.", "Août")
gl['PieceRef'] = gl["PieceRef"].str.replace("Reverse ", "Contre-passation ")
gl['PieceRef'] = gl["PieceRef"].str.replace("INTEREST CHARGE", "CHARGE D'INTÉRÊT")
gl['PieceRef'] = gl["PieceRef"].str.replace("-SICK LEAVE PAY", "-Paiement congé maladie")
gl['PieceRef'] = gl["PieceRef"].str.replace("RECLASSEMENTIFICATION", "RECLASSIFICATION")
gl['PieceRef'] = gl["PieceRef"].str.replace("INSTALMENT", "VERSEMENT")
gl['PieceRef'] = gl["PieceRef"].str.replace("FIRST", "1ere")
gl['PieceRef'] = gl["PieceRef"].str.replace("FINE LATE PAY.", "Amende pour retard de paiement")
gl['PieceRef'] = gl["PieceRef"].str.replace("-PATERNITY PAY", "Indemnités de paternité")
gl['PieceRef'] = gl["PieceRef"].str.replace("SOCIAL SECURITY:", "SÉCURITÉ SOCIALE:")
gl['PieceRef'] = gl["PieceRef"].str.replace("Trip from", "Voyage de:")
gl['PieceRef'] = gl["PieceRef"].str.replace(" To ", " à")
gl['PieceRef'] = gl["PieceRef"].str.replace("Shipping", "Livraison")
gl['PieceRef'] = gl["PieceRef"].str.replace("VOXEET INTEGRATION COSTS", "COÛTS D'INTÉGRATION DE VOXEET")
gl['PieceRef'] = gl["PieceRef"].str.replace("INCOME TAX", "IMPÔT SUR LE REVENU")
gl['PieceRef'] = gl["PieceRef"].str.replace('Rideshare', 'Covoiturage')
gl['PieceRef'] = gl["PieceRef"].str.replace('Travel Meals', 'Repas de Travail')
gl['PieceRef'] = gl["PieceRef"].str.replace('Fees', 'Frais')
gl['PieceRef'] = gl["PieceRef"].str.replace('Phone', 'Téléphone')
gl['PieceRef'] = gl["PieceRef"].str.replace("Books", "Abonnements")
gl['PieceRef'] = gl["PieceRef"].str.replace("Subcriptions", "Location Base")
gl['PieceRef'] = gl["PieceRef"].str.replace("Meals", "Repas")
gl['PieceRef'] = gl["PieceRef"].str.replace("Entertainment", "divertissement ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Third Party", "tiers ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Training Fees", "Frais d0 Formation")
gl['PieceRef'] = gl["PieceRef"].str.replace("Conferences/Tradeshows Registratio", "Conférences/Tradeshows Enregistrement")
gl['PieceRef'] = gl["PieceRef"].str.replace("FOR", "POUR")
gl['PieceRef'] = gl["PieceRef"].str.replace("ROUNDING", "ARRONDISSEMENT")
gl['PieceRef'] = gl["PieceRef"].str.replace("STORAGE", "STOCKAGE")
gl['PieceRef'] = gl["PieceRef"].str.replace("VACATION ACCURAL", "Vacances Accumulées")
gl['PieceRef'] = gl["PieceRef"].str.replace("RECEIVABLE ", "Recevables")
gl['PieceRef'] = gl["PieceRef"].str.replace("AFTER PAYOUT ", "APRÈS PAIEMENT")
gl['PieceRef'] = gl["PieceRef"].str.replace("CLEAN UP ", "APUREMENT")
gl['PieceRef'] = gl["PieceRef"].str.replace("EMPLOYEE TRAVEL INSUR ", "ASSURANCE DE VOYAGE DES EMPLOYÉS")
gl['PieceRef'] = gl["PieceRef"].str.replace("CORRECTION OF", "CORRECTION DE")
gl['PieceRef'] = gl["PieceRef"].str.replace("TAXES PAYROLL", "IMPÔTS SUR LA MASSE SALARIALE")
gl['PieceRef'] = gl["PieceRef"].str.replace("ACCOUNT", "COMPTE")
gl['PieceRef'] = gl["PieceRef"].str.replace("TAX", "Impôt")
gl['PieceRef'] = gl["PieceRef"].str.replace("life disab", "Incapacité de vie")
gl['PieceRef'] = gl["PieceRef"].str.replace("HOUSING TAX","TAXE D'HABITATION")
gl['PieceRef'] = gl["PieceRef"].str.replace("GROSS SALARY","SALAIRE BRUT")
gl['PieceRef'] = gl["PieceRef"].str.replace("Cleaning Services","Nettoyage")
gl['PieceRef'] = gl["PieceRef"].str.replace("Freight","Fret")
gl['PieceRef'] = gl["PieceRef"].str.replace("Membership","adhésion")
gl['PieceRef'] = gl["PieceRef"].str.replace("Air cooling Maintenance","Entretien de refroidissement de l'air")
gl['PieceRef'] = gl["PieceRef"].str.replace("Power on Demand Platform","Plateforme d'energie à la demande")
gl['PieceRef'] = gl["PieceRef"].str.replace("Sanitaire room installation"," Installation de la salle sanitaire")
gl['PieceRef'] = gl["PieceRef"].str.replace("subscription","abonnement")
gl['PieceRef'] = gl["PieceRef"].str.replace("Coffee supplies "," Fournitures de café")
gl['PieceRef'] = gl["PieceRef"].str.replace("Duty and Tax ","Devoir et fiscalité")
gl['PieceRef'] = gl["PieceRef"].str.replace("Electricity ","Electricité ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Lunch vouchers ","Bons déjeuner")
gl['PieceRef'] = gl["PieceRef"].str.replace("Security monitoring","Surveillance de la sécurité")
gl['PieceRef'] = gl["PieceRef"].str.replace("Water", "L'EAU")
gl['PieceRef'] = gl["PieceRef"].str.replace("Statutory Audit", "Audit statutaire")
gl['PieceRef'] = gl["PieceRef"].str.replace(" Meeting room screen installation", "Installation de l'écran de la salle de réunion")
gl['PieceRef'] = gl["PieceRef"].str.replace("Water", "L'EAU")
gl['PieceRef'] = gl["PieceRef"].str.replace("Water", "L'EAU")
gl['PieceRef'] = gl["PieceRef"].str.replace("Tax Credit FY 2016", "Crédit d'impôt Exercice 2016")
gl['PieceRef'] = gl["PieceRef"].str.replace("Bank of America Merill Lynch-T&E statement","Déclaration de Merill Lynch")
gl['PieceRef'] = gl["PieceRef"].str.replace("English Translation", "Traduction anglaise")
gl['PieceRef'] = gl["PieceRef"].str.replace("Office Rent", "Location de Bureau")
gl['PieceRef'] = gl["PieceRef"].str.replace("Annual Electrical Verification", "Vérification électrique annuelle ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Health costs ", "Coûts santé")
gl['PieceRef'] = gl["PieceRef"].str.replace("Unlimited-receipt and policy audit", "Vérification illimitée des reçus et audites")
gl['PieceRef'] = gl["PieceRef"].str.replace("Water fountain | |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE+2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep MNIST classifier using convolutional layers.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/pros
"""
# Disable linter warnings to maintain consistency with tutorial.
# pylint: disable=invalid-name
# pylint: disable=g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import time
import warnings
from sklearn.model_selection import train_test_split
from ops import bn
LEARNING_RATE = 1e-5
from matplotlib.legend_handler import HandlerLine2D
import argparse
import pickle
warnings.filterwarnings("ignore", category=RuntimeWarning)
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
from utils import load_mnist
FLAGS = None
np.random.seed(517)
CONFIDENCE_THRESHOLD = 0.98
# losses
def one_hot_encoder(data):
data = data.astype(np.int32)
onehot = np.zeros((len(data), 10))
onehot[np.arange(len(data)), data] = 1
return onehot
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope(name):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
pass
class CNNClassifier():
def __init__(self, classifier_name, pkl_fname=None, data_X=None, data_y=None, test_X=None, test_y=None, save_model=False,seed=88):
self.seed=seed
self.is_save_model = save_model
self.num_epochs = 50
self.classifier_name = classifier_name
self.log_dir = 'logs/{}/'.format(classifier_name)
self.batch_size = 64
self.dropout_prob = 0.7
self.save_to = classifier_name + "_classifier.pkl"
self.lamb = 1e-3
self.c_dim = 1
self.accuracy_list = []
self.loss_list = []
self.confidence_list = []
self.IMAGE_WIDTH = 28
self.IMAGE_HEIGHT = 28
self.fname = pkl_fname
# pkl_label_path = "{}{}/edited_labels_{}.pkl".format(dir, dir_results, pkl_fname)
# pkl_path = "{}{}/edited_training_set_{}.pkl".format(dir, dir_results, pkl_fname)
self.set_log_dir("{}_".format(pkl_fname))
# if load_from_pkl:
# self.data_X = pickle.load(open(pkl_path, 'rb'))
# self.data_y = pickle.load(open(pkl_label_path, 'rb'))
self.train_X = data_X
self.train_y = data_y
self.test_X = test_X
self.test_y = test_y
# if self.classifier_name == 'mnist' or self.classifier_name == 'fashion-mnist':
# # mnist = input_data.read_data_sets('../data/mnist', one_hot=True)
# self.train_X, self.train_y = load_mnist(self.classifier_name)
# self.train_X, self.test_X, self.train_y, self.test_y = train_test_split(data_X, data_y, test_size=0.1, random_state=10)
# elif self.classifier_name == "cifar10":
# self.IMAGE_WIDTH = 32
# self.IMAGE_HEIGHT = 32
# self.c_dim = 3
# self.data_X, self.data_y, self.test_images, self.test_labels = get_train_test_data()
# self.test_images = self.test_images.reshape(-1, 1024)
# # get number of batches for a single epoch
# self.num_batches = len(self.data_X) // self.batch_size
# init_variables try to load from pickle:
try:
self.load_model()
except:
# Model params
self.W_conv1 = weight_variable([5, 5, self.c_dim, 32])
self.b_conv1 = bias_variable([32])
self.W_conv2 = weight_variable([5, 5, 32, 64])
self.b_conv2 = bias_variable([64])
self.W_fc1 = weight_variable([int(self.IMAGE_HEIGHT // 4) * int(self.IMAGE_HEIGHT // 4) * 64, 1024])
self.b_fc1 = bias_variable([1024])
self.W_fc2 = weight_variable([1024, 10])
self.b_fc2 = bias_variable([10])
self._create_model()
def set_log_dir(self, log_dir_name):
self.log_dir = "logs/{}".format(log_dir_name)
def _deepcnn(self, x, keep_prob):
x_image = tf.reshape(x, [-1, self.IMAGE_WIDTH, self.IMAGE_HEIGHT, self.c_dim])
h_conv1 = tf.nn.relu(conv2d(x_image, self.W_conv1) + self.b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
h_conv2 = tf.nn.relu(conv2d(h_pool1, self.W_conv2) + self.b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
h_pool2_flat = tf.reshape(h_pool2, [-1, int(self.IMAGE_HEIGHT / 4) * int(self.IMAGE_HEIGHT / 4) * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, self.W_fc1) + self.b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
y_conv = tf.matmul(h_fc1_drop, self.W_fc2) + self.b_fc2
# summary
# variable_summaries(self.W_conv1, 'W_conv1')
# variable_summaries(self.W_conv2, 'W_conv2')
# variable_summaries(self.b_conv1, 'b_conv1')
# variable_summaries(self.b_conv2, 'b_conv2')
# variable_summaries(self.W_fc1, 'W_fc1')
# variable_summaries(self.W_fc2, 'W_fc2')
# variable_summaries(self.b_fc1, 'b_fc1')
# variable_summaries(self.b_fc2, 'b_fc2')
return y_conv
def _create_model(self):
self.x = tf.placeholder(tf.float32, [None, self.IMAGE_HEIGHT * self.IMAGE_WIDTH], name="data")
self.y_ = tf.placeholder(tf.float32, [None, 10], name="labels")
self.keep_prob = tf.placeholder(tf.float32, name="dropout")
# Build the graph for the deep net
self.y_conv = self._deepcnn(self.x, self.keep_prob)
# loss
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.y_, logits=self.y_conv)
self.l2_regularization = self.lamb * tf.nn.l2_loss(self.W_conv1) + self.lamb * tf.nn.l2_loss(self.W_conv1) + self.lamb * tf.nn.l2_loss(
self.W_fc1) + self.lamb * tf.nn.l2_loss(self.W_fc2)
cross_entropy = tf.reduce_mean(cross_entropy)
self.cross_entropy = cross_entropy
cross_entropy += self.l2_regularization
# tf.summary.scalar('cross_entropy', cross_entropy)
self.train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(self.y_conv, 1), tf.argmax(self.y_, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
self.accuracy = tf.reduce_mean(correct_prediction)
# tf.summary.scalar('accuracy', self.accuracy)
# self.confidence = tf.cast(tf.reduce_mean(tf.reduce_max(tf.nn.softmax(self.y_conv), axis=-1), axis=0), tf.float32)
self.confidence = tf.cast(tf.reduce_max(tf.nn.softmax(self.y_conv), axis=-1), tf.float32)
# tf.summary.scalar('confidence', self.confidence)
self.argmax = tf.argmax(self.y_conv, 1)
# graph_location = self.log_dir + 'train'
# graph_location_test = self.log_dir + 'test'
# self.merged = tf.summary.merge_all()
# print('Saving graph to: %s' % graph_location)
# self.train_writer = tf.summary.FileWriter(graph_location)
# self.test_writer = tf.summary.FileWriter(graph_location_test)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
# self.train_writer.add_graph(self.sess.graph)
# self.test_writer.add_graph(self.sess.graph)
def train(self, confidence_in_train=False, confidence_thresh=0.9):
use_confidence = not confidence_in_train
start_batch_id = 0 # int(1000 / self.batch_size)
self.num_batches = min(len(self.train_X) // self.batch_size, 4000)
if self.fname is not None:
print("START TRAINING:{}".format(self.fname))
start_time = time.time()
for epoch in range(self.num_epochs):
for i in range(start_batch_id, self.num_batches):
X_batch = self.train_X[i * self.batch_size:(i + 1) * self.batch_size].reshape(-1, self.IMAGE_WIDTH * self.IMAGE_HEIGHT)
y_batch = self.train_y[i * self.batch_size:(i + 1) * self.batch_size]
# plt.title(y_batch[0])
# plt.imshow(X_batch[0].reshape(28, 28))
# plt.show()
if i % self.num_batches - 1 == 0:
self.test_y, self.test_X = shuffle(self.test_y, self.test_X, random_state=self.seed)
accuracy, confidence, loss = self.test(self.test_X.reshape(-1, 784), self.test_y.reshape(-1, 10), epoch * i)
# summary, _ = self.sess.run([self.merged, self.train_step],
# feed_dict={self.x: X_batch, self.y_: y_batch, self.keep_prob: 1.})
# self.train_writer.add_summary(summary, i)
_ = self.sess.run([self.train_step], feed_dict={self.x: X_batch, self.y_: y_batch, self.keep_prob: self.dropout_prob})
print('epoch{}: step{}/{}'.format(epoch, i, self.num_batches))
print("time: %4.4f" % (time.time() - start_time))
print('accuracy:{}, mean_confidence:{}, loss:{}'.format(accuracy, np.mean(confidence), loss))
self.accuracy_list.append(accuracy)
else:
if not use_confidence:
self.train_step.run(session=self.sess, feed_dict={self.x: X_batch, self.y_: y_batch, self.keep_prob: self.dropout_prob})
else:
accuracy, confidence, loss = self.test(X_batch, y_batch, epoch * i)
high_confidence_threshold_indices = confidence >= confidence_thresh
if len(high_confidence_threshold_indices[high_confidence_threshold_indices]) > 0:
_ = self.sess.run([self.train_step], feed_dict={self.x: X_batch[high_confidence_threshold_indices], self.y_: y_batch[high_confidence_threshold_indices],
self.keep_prob: self.dropout_prob})
else:
print("skipping confidence low max_confidence ={}".format(np.max(confidence)))
if self.is_save_model:
self.save_model()
# self.plot_train_test_loss("accuracy", self.accuracy_list)
return self.accuracy_list
# self.plot_train_test_loss("confidence", self.confidence_list)
# self.plot_train_test_loss("loss", self.loss_list)
def test(self, test_batch, test_labels, counter=0, is_arg_max=False):
if is_arg_max:
accuracy, confidence, loss, arg_max = self.sess.run([self.accuracy, self.confidence, self.cross_entropy, self.argmax],
feed_dict={self.x: test_batch, self.y_: test_labels, self.keep_prob: 1.})
print("argmax:{}".format(arg_max))
# self.test_writer.add_summary(summary, counter)
print('step {}: accuracy:{}, mean_confidence:{}, loss:{}'.format(counter, accuracy, np.mean(confidence), loss))
return accuracy, confidence, loss, arg_max
else:
accuracy, confidence, loss = self.sess.run([self.accuracy, self.confidence, self.cross_entropy],
feed_dict={self.x: test_batch.reshape(-1, 784), self.y_: test_labels, self.keep_prob: 1.})
# self.test_writer.add_summary(summary, counter)
# print('step {}: accuracy:{}, confidence:{}, loss:{}'.format(counter, accuracy, confidence, loss))
return accuracy, confidence, loss
def save_model(self):
# Save the model for a pickle
pickle.dump([self.sess.run(self.W_conv1), self.sess.run(self.b_conv1), self.sess.run(self.W_conv2), self.sess.run(self.b_conv2), self.sess.run(self.W_fc1),
self.sess.run(self.b_fc1), self.sess.run(self.W_fc2), self.sess.run(self.b_fc2)], open(self.save_to, 'wb'))
print("Model has been saved!")
def load_model(self):
model = pickle.load(open(self.save_to, 'rb'))
self.W_conv1 = tf.Variable(tf.constant(model[0]))
self.b_conv1 = tf.Variable(tf.constant(model[1]))
self.W_conv2 = tf.Variable(tf.constant(model[2]))
self.b_conv2 = tf.Variable(tf.constant(model[3]))
self.W_fc1 = tf.Variable(tf.constant(model[4]))
self.b_fc1 = tf.Variable(tf.constant(model[5]))
self.W_fc2 = tf.Variable(tf.constant(model[6]))
self.b_fc2 = tf.Variable(tf.constant(model[7]))
print("\nmodel has been loaded from {}\n".format(self.save_to))
def plot_train_test_loss(self, name_of_measure, array, color="b", marker="P"):
plt.Figure()
plt.title('{} {} score'.format(self.fname, name_of_measure), fontsize=18)
x_range = np.linspace(1, len(array) - 1, len(array))
measure, = plt.plot(x_range, array, color=color, marker=marker, label=name_of_measure, linewidth=2)
plt.legend(handler_map={measure: HandlerLine2D(numpoints=1)})
plt.legend(bbox_to_anchor=(1.05, 1), loc=0, borderaxespad=0.)
plt.yscale('linear')
plt.xlabel('Epoch')
plt.ylabel('Score')
plt.grid()
plt.show()
name_figure = "classifier_results_seed_{}/classifier_MMinfoGAN_{}_{}".format(self.seed,self.fname, name_of_measure)
pickle.dump(array, open("{}.pkl".format(name_figure), 'wb'))
plt.savefig(name_figure + ".png")
plt.close()
def save_and_plot_results_cv(self, name_of_measure, array, color="b", marker="P") :
name_figure = "classifier_results_seed_{}/classifier_MMinfoGAN_{}_{}".format(self.seed,self.fname, name_of_measure)
pickle.dump(array, open("{}.pkl".format(name_figure), 'wb'))
plt.Figure()
plt.title('{} {} score'.format(self.fname, name_of_measure), fontsize=18)
mean = np.mean(array, axis=0)
x_range = np.linspace(1, len(mean) - 1, len(mean))
measure, = plt.plot(x_range, mean, color=color, marker=marker, label=name_of_measure, linewidth=2)
plt.legend(handler_map={measure: HandlerLine2D(numpoints=1)})
plt.legend(bbox_to_anchor=(1.05, 1), loc=0, borderaxespad=0.)
plt.yscale('linear')
plt.xlabel('Epoch')
plt.ylabel('Score')
plt.grid()
plt.show()
plt.savefig(name_figure + ".png")
plt.close()
print("pkl has been sSaved! ", name_figure)
def parse_args():
desc = "Tensorflow implementation of GAN collections"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--dir_name', type=str, default='')
parser.add_argument('--preprocess', type=bool, default=False)
parser.add_argument('--fname', type=str, default='fashion-mnist_MultivariateGaussianSampler')
parser.add_argument('--original', type=str, default="mnist")
parser.add_argument('--use_confidence', type=bool, default="False")
parser.add_argument('--confidence_thresh', type=float, default=0.9)
parser.add_argument('--train_model', type=bool, default="True")
parser.add_argument('--seed', type=int, default=88)
return parser.parse_args()
# def preprocess_data(dir, pkl_fname, original_dataset_name='mnist', batch_size=64, dir_results="classifier_results_seed_{}".format(SEED)):
# # mapping only once need to edit the condition
# if not os.path.exists(dir_results):
# os.makedirs(dir_results)
# pkl_label_path = "{}{}/generated_labels_{}.pkl".format(dir, dir_results, pkl_fname)
# pkl_path = "{}{}/generated_training_set_{}.pkl".format(dir, dir_results, pkl_fname)
# data_X = pickle.load(open(pkl_path, 'rb'))
# data_y = pickle.load(open(pkl_label_path, 'rb'))
#
# data_X = np.asarray([y for x in data_X for y in x]).reshape(-1, 28, 28)
#
# data_y = np.asarray(data_y, dtype=np.int32).flatten()
#
# data_y_categorical = data_y
# # data_y = one_hot_encoder(data_y)
# pretraind = CNNClassifier(original_dataset_name, original_dataset_name=original_dataset_name)
# # indices = np.argwhere(data_y == 1)
# # low_confidence_indices = []
# for current_label in range(10):
# mask = data_y == current_label # (indices[:, 1] == current_label)
# limit = min(len(data_X) // 10, 10000)
# # confident = False
# # offset = 0
# # while not confident:
# # small_data_X = data_X[np.where(mask == True)][offset:limit+offset]
# data_X_for_current_label = data_X[np.where(mask == True)]
# dummy_labels = one_hot_encoder(np.random.randint(0, 10, size=(limit))) # no meaning for the labels
# _, confidence, _, arg_max = pretraind.test(data_X_for_current_label[:limit].reshape(-1, 784), dummy_labels.reshape(-1, 10), is_arg_max=True)
# argwhere = np.argwhere(confidence < CONFIDENCE_THRESHOLD).flatten()
# # confidence_threshold_idx = confidence > CONFIDENCE_THRESHOLD #min(CONFIDENCE_THRESHOLD, np.max(confidence) - 0.001)
# # offset+=50
# # if np.count_nonzero(confidence_threshold_idx) > 30:
# # confident=True
#
# # arg_max = arg_max[confidence_threshold_idx]
# print(str(len(arg_max)) + " were taken")
#
# # low_confidence_indices.extend(argwhere)
#
# new_label = np.bincount(arg_max).argmax()
# print("Assinging:{}".format(new_label))
# # plt.title("old_label=" + str(current_label) + "new_label=" + str(new_label))
# # plt.imshow(data_X_for_current_label[0].reshape(28, 28))
# # plt.show()
# data_y_categorical[mask] = new_label
# print(np.bincount(arg_max))
# # if len(low_confidence_indices) > 0:
# # low_confidence_indices = np.asarray(low_confidence_indices)
# # mask_not_take = np.ones_like(low_confidence_indices,dtype=bool) #np.ones_like(a,dtype=bool)
# # mask_not_take[low_confidence_indices] = False
# # data_y_categorical= data_y_categorical[~low_confidence_indices]
# # data_X = data_X[~mask_not_take]
# data_y = one_hot_encoder(data_y_categorical)
# # data_X, data_y = shuffle(data_X, data_y, random_state=0)
# pickle.dump(data_y, open("{}{}/edited_labels_{}.pkl".format(dir, dir_results, pkl_fname), 'wb'))
# pickle.dump(data_X, open("{}{}/edited_training_set_{}.pkl".format(dir, dir_results, pkl_fname), 'wb'))
def main():
# parse arguments
args = parse_args()
if args is None:
exit()
train_classifier_for_generated_data = args.train_model
fname = args.fname
dir = args.dir_name
original_dataset_name = args.original
do_preprocess = args.preprocess
confidence_in_train = args.use_confidence
confidence_thresh = args.confidence_thresh
seed = args.seed
dir_results = 'classifier_results_seed_{}'.format(seed)
pkl_label_path = "{}{}/edited_labels_{}.pkl".format(dir, dir_results, fname)
pkl_path = "{}{}/edited_training_set_{}.pkl".format(dir, dir_results, fname)
data_X = pickle.load(open(pkl_path, 'rb'))
data_y = pickle.load(open(pkl_label_path, 'rb'))
data_X_real, data_y_real = load_mnist(original_dataset_name)
X_train_real, X_test_real, y_train_real, y_test_real = train_test_split(data_X_real, data_y_real, test_size=0.2, random_state=seed)
# print("X_train_real={}, data_X={}, y_test_real={}, y_test={}".format(len(X_train_real), len(data_X), len(y_test_real), len(data_y)))
# len_dataX = min(len(X_train_real), len(data_X))
# data_X = data_X[:len_dataX]
# data_y = data_y[:len_dataX]
# X_train = np.append(data_X, X_train_real).reshape(-1, 784)
# y_train = np.append(data_y, y_train_real.reshape(-1, 10)).reshape(-1, 10)
# X_test | |
<reponame>HiKapok/DAN
# Copyright 2018 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import math
import tensorflow as tf
import numpy as np
from tensorflow.contrib.image.python.ops import image_ops
from utility import custom_op
def areas(gt_bboxes):
with tf.name_scope('bboxes_areas', [gt_bboxes]):
ymin, xmin, ymax, xmax = tf.split(gt_bboxes, 4, axis=1)
return (xmax - xmin + 1.) * (ymax - ymin + 1.)
def intersection(gt_bboxes, default_bboxes):
with tf.name_scope('bboxes_intersection', [gt_bboxes, default_bboxes]):
# num_anchors x 1
ymin, xmin, ymax, xmax = tf.split(gt_bboxes, 4, axis=1)
# 1 x num_anchors
gt_ymin, gt_xmin, gt_ymax, gt_xmax = [tf.transpose(b, perm=[1, 0]) for b in tf.split(default_bboxes, 4, axis=1)]
# broadcast here to generate the full matrix
int_ymin = tf.maximum(ymin, gt_ymin)
int_xmin = tf.maximum(xmin, gt_xmin)
int_ymax = tf.minimum(ymax, gt_ymax)
int_xmax = tf.minimum(xmax, gt_xmax)
h = tf.maximum(int_ymax - int_ymin + 1., 0.)
w = tf.maximum(int_xmax - int_xmin + 1., 0.)
return h * w
def iou_matrix(gt_bboxes, default_bboxes):
with tf.name_scope('iou_matrix', [gt_bboxes, default_bboxes]):
inter_vol = intersection(gt_bboxes, default_bboxes)
# broadcast
areas_gt = areas(gt_bboxes)
union_vol = areas_gt + tf.transpose(areas(default_bboxes), perm=[1, 0]) - inter_vol
#areas_gt = tf.Print(areas_gt, [areas_gt], summarize=100)
return tf.where(tf.equal(union_vol, 0.0), tf.zeros_like(inter_vol), tf.truediv(inter_vol, union_vol))
def do_dual_max_match(overlap_matrix, low_thres, high_thres, ignore_between=True, gt_max_first=True):
'''do_dual_max_match, but using the transpoed overlap matrix, this may be faster due to the cache friendly
Args:
overlap_matrix: num_anchors * num_gt
'''
with tf.name_scope('dual_max_match', [overlap_matrix]):
# first match from anchors' side
anchors_to_gt = tf.argmax(overlap_matrix, axis=1)
# the matching degree
match_values = tf.reduce_max(overlap_matrix, axis=1)
#positive_mask = tf.greater(match_values, high_thres)
less_mask = tf.less(match_values, low_thres)
between_mask = tf.logical_and(tf.less(match_values, high_thres), tf.greater_equal(match_values, low_thres))
negative_mask = less_mask if ignore_between else between_mask
ignore_mask = between_mask if ignore_between else less_mask
# comment following two lines
# over_pos_mask = tf.greater(match_values, 0.7)
# ignore_mask = tf.logical_or(ignore_mask, over_pos_mask)
# fill all negative positions with -1, all ignore positions is -2
match_indices = tf.where(negative_mask, -1 * tf.ones_like(anchors_to_gt), anchors_to_gt)
match_indices = tf.where(ignore_mask, -2 * tf.ones_like(match_indices), match_indices)
# negtive values has no effect in tf.one_hot, that means all zeros along that axis
# so all positive match positions in anchors_to_gt_mask is 1, all others are 0
anchors_to_gt_mask = tf.one_hot(tf.clip_by_value(match_indices, -1, tf.cast(tf.shape(overlap_matrix)[1], tf.int64)),
tf.shape(overlap_matrix)[1], on_value=1, off_value=0, axis=1, dtype=tf.int32)
# match from ground truth's side
gt_to_anchors = tf.argmax(overlap_matrix, axis=0)
gt_to_anchors_overlap = tf.reduce_max(overlap_matrix, axis=0, keepdims=True)
#gt_to_anchors = tf.Print(gt_to_anchors, [tf.equal(overlap_matrix, gt_to_anchors_overlap)], message='gt_to_anchors_indices:', summarize=100)
# the max match from ground truth's side has higher priority
left_gt_to_anchors_mask = tf.equal(overlap_matrix, gt_to_anchors_overlap)#tf.one_hot(gt_to_anchors, tf.shape(overlap_matrix)[0], on_value=True, off_value=False, axis=0, dtype=tf.bool)
if not gt_max_first:
# the max match from anchors' side has higher priority
# use match result from ground truth's side only when the the matching degree from anchors' side is lower than position threshold
left_gt_to_anchors_mask = tf.logical_and(tf.reduce_max(anchors_to_gt_mask, axis=0, keep_dims=True) < 1, left_gt_to_anchors_mask)
# can not use left_gt_to_anchors_mask here, because there are many ground truthes match to one anchor, we should pick the highest one even when we are merging matching from ground truth side
left_gt_to_anchors_mask = tf.to_int64(left_gt_to_anchors_mask)
left_gt_to_anchors_scores = overlap_matrix * tf.to_float(left_gt_to_anchors_mask)
# merge matching results from ground truth's side with the original matching results from anchors' side
# then select all the overlap score of those matching pairs
selected_scores = tf.gather_nd(overlap_matrix, tf.stack([tf.range(tf.cast(tf.shape(overlap_matrix)[0], tf.int64)),
tf.where(tf.reduce_max(left_gt_to_anchors_mask, axis=1) > 0,
tf.argmax(left_gt_to_anchors_scores, axis=1),
anchors_to_gt)], axis=1))
# return the matching results for both foreground anchors and background anchors, also with overlap scores
return tf.where(tf.reduce_max(left_gt_to_anchors_mask, axis=1) > 0,
tf.argmax(left_gt_to_anchors_scores, axis=1),
match_indices), selected_scores
# def save_anchors(bboxes, labels, anchors_point):
# if not hasattr(save_image_with_bbox, "counter"):
# save_image_with_bbox.counter = 0 # it doesn't exist yet, so initialize it
# save_image_with_bbox.counter += 1
# np.save('./debug/bboxes_{}.npy'.format(save_image_with_bbox.counter), np.copy(bboxes))
# np.save('./debug/labels_{}.npy'.format(save_image_with_bbox.counter), np.copy(labels))
# np.save('./debug/anchors_{}.npy'.format(save_image_with_bbox.counter), np.copy(anchors_point))
# return save_image_with_bbox.counter
class AnchorEncoder(object):
def __init__(self, positive_threshold, ignore_threshold, prior_scaling):
super(AnchorEncoder, self).__init__()
self._all_anchors = None
self._positive_threshold = positive_threshold
self._ignore_threshold = ignore_threshold
self._prior_scaling = prior_scaling
def center2point(self, center_y, center_x, height, width):
with tf.name_scope('center2point'):
return center_y - (height - 1.) / 2., center_x - (width - 1.) / 2., center_y + (height - 1.) / 2., center_x + (width - 1.) / 2.,
def point2center(self, ymin, xmin, ymax, xmax):
with tf.name_scope('point2center'):
height, width = (ymax - ymin + 1.), (xmax - xmin + 1.)
return (ymin + ymax) / 2., (xmin + xmax) / 2., height, width
def get_anchors_width_height(self, anchor_scale, extra_anchor_scale, anchor_ratio, name=None):
'''get_anchors_width_height
Given scales and ratios, generate anchors along depth (you should use absolute scale in the input image)
Args:
anchor_scale: base scale of the window size used to transform anchors, each scale should have every ratio in 'anchor_ratio'
extra_anchor_scale: base scale of the window size used to transform anchors, each scale should have ratio of 1:1
anchor_ratio: all ratios of anchors for each scale in 'anchor_scale'
'''
with tf.name_scope(name, 'get_anchors_width_height'):
all_num_anchors_depth = len(anchor_scale) * len(anchor_ratio) + len(extra_anchor_scale)
list_h_on_image = []
list_w_on_image = []
# for square anchors
for _, scale in enumerate(extra_anchor_scale):
list_h_on_image.append(scale)
list_w_on_image.append(scale)
# for other aspect ratio anchors
for scale_index, scale in enumerate(anchor_scale):
for ratio_index, ratio in enumerate(anchor_ratio):
list_h_on_image.append(scale / math.sqrt(ratio))
list_w_on_image.append(scale * math.sqrt(ratio))
# shape info:
# y_on_image, x_on_image: layers_shapes[0] * layers_shapes[1]
# h_on_image, w_on_image: num_anchors_along_depth
return tf.constant(list_h_on_image, dtype=tf.float32), tf.constant(list_w_on_image, dtype=tf.float32), all_num_anchors_depth
def generate_anchors_by_offset(self, anchors_height, anchors_width, anchor_depth, image_shape, layer_shape, feat_stride, offset=0.5, name=None):
'''generate_anchors_by_offset
Given anchor width and height, generate tiled anchors across the 'layer_shape'
Args:
anchors_height, anchors_width, anchor_depth: generate by the above function 'get_anchors_width_height'
image_shape: the input image size, since we will generate anchors in absolute coordinates, [height, width]
layer_shape: the size of layer on which we will tile the anchors, [height, width]
feat_stride: the strides from input image to the layer on which we will generate anchors
offset: the offset (height offset and width offset) in in the feature map when we tile anchors, should be either single scalar or a list of scalar
'''
with tf.name_scope(name, 'generate_anchors'):
image_height, image_width, feat_stride = tf.to_float(image_shape[0]), tf.to_float(image_shape[1]), tf.to_float(feat_stride)
x_on_layer, y_on_layer = tf.meshgrid(tf.range(layer_shape[1]), tf.range(layer_shape[0]))
if isinstance(offset, list) or isinstance(offset, tuple):
tf.logging.info('{}: Using seperate offset: height: {}, width: {}.'.format(name, offset[0], offset[1]))
offset_h = offset[0]
offset_w = offset[1]
else:
offset_h = offset
offset_w = offset
y_on_image = (tf.to_float(y_on_layer) + offset_h) * feat_stride
x_on_image = (tf.to_float(x_on_layer) + offset_w) * feat_stride
anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax = self.center2point(tf.expand_dims(y_on_image, axis=-1),
tf.expand_dims(x_on_image, axis=-1),
anchors_height, anchors_width)
anchors_ymin = tf.reshape(anchors_ymin, [-1, anchor_depth])
anchors_xmin = tf.reshape(anchors_xmin, [-1, anchor_depth])
anchors_ymax = tf.reshape(anchors_ymax, [-1, anchor_depth])
anchors_xmax = tf.reshape(anchors_xmax, [-1, anchor_depth])
return anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax
def get_anchors_count(self, anchors_depth, layer_shape, name=None):
'''get_anchors_count
Return the total anchors on specific layer
Args:
anchor_depth: generate by the above function 'get_anchors_width_height'
layer_shape: the size of layer on which we will tile the anchors, [height, width]
'''
with tf.name_scope(name, 'get_anchors_count'):
all_num_anchors_spatial = layer_shape[0] * layer_shape[1]
all_num_anchors = all_num_anchors_spatial * anchors_depth
return all_num_anchors_spatial, all_num_anchors
def get_all_anchors(self, image_shape, anchors_height, anchors_width, anchors_depth, anchors_offsets, layer_shapes, feat_strides, allowed_borders, should_clips, name=None):
'''get_all_anchors
Return the all anchors from all layers
Args:
image_shape: the input image size, since we will generate anchors in absolute coordinates, [height, width]
anchors_height: list, each of which is generated by the above function 'get_anchors_width_height'
anchors_width: list, each of which is generated by the above function 'get_anchors_width_height'
anchors_depth: list, each of which is generated by the above function 'get_anchors_width_height'
anchors_offsets: list, each of which will be used by 'generate_anchors_by_offset'
layer_shapes: list, each of which will be used by 'generate_anchors_by_offset'
feat_strides: list, each of which will be used by 'generate_anchors_by_offset'
allowed_borders: list, each of which is the border margin to clip border anchors for each layer
should_clips: list, each of which indicate that if we should clip anchors to image border for each layer
'''
with tf.name_scope(name, 'get_all_anchors'):
image_height, image_width = tf.to_float(image_shape[0]), tf.to_float(image_shape[1])
anchors_ymin = []
anchors_xmin = []
anchors_ymax = []
anchors_xmax = []
anchor_allowed_borders = []
for ind, anchor_depth in enumerate(anchors_depth):
with tf.name_scope('generate_anchors_{}'.format(ind)):
_anchors_ymin, _anchors_xmin, _anchors_ymax, _anchors_xmax = self.generate_anchors_by_offset(anchors_height[ind], anchors_width[ind], anchor_depth, image_shape, layer_shapes[ind], feat_strides[ind], offset=anchors_offsets[ind])
if should_clips[ind]:
_anchors_ymin = tf.clip_by_value(_anchors_ymin, 0., image_height | |
T)
I_C = bsr_matrix( array([[ 0.]]), blocksize=(1,1))
I_F = bsr_matrix( array([[ 1.]]), blocksize=(1,1))
P_I = bsr_matrix( array([[ 0.]]), blocksize=(1,1) )
assert_equal(array([ ]), params['Cpts'])
assert_equal(array([0]), params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
##
# 2x2
A = csr_matrix(array([[1., 1.],[1., 1.]]))
Cpts = array([0])
AggOp = csr_matrix(array([[1.], [1.]]))
T = AggOp.copy().tobsr()
params = get_Cpt_params(A, Cpts, AggOp, T)
I_C = bsr_matrix( array([[1., 0.],[0., 0.]]), blocksize=(1,1))
I_F = bsr_matrix( array([[0., 0.],[0., 1.]]), blocksize=(1,1))
P_I = bsr_matrix( array([[1.], [0.]]), blocksize=(1,1) )
assert_equal(array([0]), params['Cpts'])
assert_equal(array([1]), params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
##
Cpts = array([0,1])
AggOp = csr_matrix(array([[1.,0], [0.,1.]]))
T = AggOp.copy().tobsr()
params = get_Cpt_params(A, Cpts, AggOp, T)
I_C = bsr_matrix( array([[1., 0.],[0., 1.]]), blocksize=(1,1))
I_F = bsr_matrix( array([[0., 0.],[0., 0.]]), blocksize=(1,1))
P_I = bsr_matrix( array([[1., 0.], [0., 1.]]), blocksize=(1,1) )
assert_equal(array([0,1]), params['Cpts'])
assert_equal(array([ ]), params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
##
Cpts = array([ ])
AggOp = csr_matrix(array([[0.], [0.]]))
T = AggOp.copy().tobsr()
params = get_Cpt_params(A, Cpts, AggOp, T)
I_C = bsr_matrix( array([[0., 0.],[0., 0.]]), blocksize=(1,1))
I_F = bsr_matrix( array([[1., 0.],[0., 1.]]), blocksize=(1,1))
P_I = bsr_matrix( array([[ 0.], [0. ]]), blocksize=(1,1) )
assert_equal(array([ ]), params['Cpts'])
assert_equal(array([0,1]), params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
##
A = A.tobsr( blocksize=(2,2) )
Cpts = array([0])
AggOp = csr_matrix(array([[1.]]) )
T = bsr_matrix(array([[1., 1.], [1., 2.]]), blocksize=(2,2))
params = get_Cpt_params(A, Cpts, AggOp, T)
I_C = bsr_matrix( array([[1., 0.],[0., 1.]]), blocksize=(2,2))
I_F = bsr_matrix( array([[0., 0.],[0., 0.]]), blocksize=(2,2))
P_I = bsr_matrix( array([[1., 0.],[0., 1.]]), blocksize=(2,2))
assert_equal(array([0,1]), params['Cpts'])
assert_equal(array([ ]), params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
##
Cpts = array([ ])
AggOp = csr_matrix(array([[1.]]) )
T = bsr_matrix(array([[1., 1.], [1., 2.]]), blocksize=(2,2))
params = get_Cpt_params(A, Cpts, AggOp, T)
I_C = bsr_matrix( array([[0., 0.],[0., 0.]]), blocksize=(2,2))
I_F = bsr_matrix( array([[1., 0.],[0., 1.]]), blocksize=(2,2))
P_I = bsr_matrix( array([[0., 0.],[0., 0.]]), blocksize=(2,2))
assert_equal(array([ ]), params['Cpts'])
assert_equal(array([0,1]), params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
##
# Begin more "realistic" tests
A = poisson((10,), format='csr')
Cpts = array([3, 7])
AggOp = ([[ 1., 0.],
[ 1., 0.],
[ 1., 0.],
[ 1., 0.],
[ 1., 0.],
[ 0., 1.],
[ 0., 1.],
[ 0., 1.],
[ 0., 1.],
[ 0., 1.]])
AggOp = csr_matrix(AggOp)
T = AggOp.copy().tobsr()
##
# CSR Test
params = get_Cpt_params(A, Cpts, AggOp, T)
I_C = bsr_matrix(( array([[[ 1.]], [[ 1.]]]),
array([3, 7]), array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2]) ),
shape=(10,10) )
I_F = bsr_matrix((
array([[[ 1.]], [[ 1.]], [[ 1.]], [[ 1.]], [[ 1.]], [[ 1.]], [[ 1.]], [[ 1.]]]),
array([0, 1, 2, 4, 5, 6, 8, 9]),
array([0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8]) ),
shape=(10,10) )
P_I = matrix([[ 0., 0.],
[ 0., 0.],
[ 0., 0.],
[ 1., 0.],
[ 0., 0.],
[ 0., 0.],
[ 0., 0.],
[ 0., 1.],
[ 0., 0.],
[ 0., 0.]])
P_I = bsr_matrix(P_I, blocksize=(1,1))
Fpts = array([0,1,2,4,5,6,8,9])
assert_equal(Cpts, params['Cpts'])
assert_equal(Fpts, params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
##
# BSR Test
A = A.tobsr(blocksize=(2,2))
Cpts = array([1, 3])
AggOp = ([[ 1., 0.],
[ 1., 0.],
[ 1., 0.],
[ 0., 1.],
[ 0., 1.]])
AggOp = csr_matrix(AggOp)
T = hstack((T.todense(), T.todense()))[:,[0,2,1,3]]
T = bsr_matrix(T, blocksize=(2,2))
params = get_Cpt_params(A, Cpts, AggOp, T)
I_C = bsr_matrix(( array([ [[ 1., 0.],[ 0., 1.]],
[[ 1., 0.],[ 0., 1.]]]),
array([1, 3]),
array([0, 0, 1, 1, 2, 2]) ),
shape=(10,10) )
I_F = bsr_matrix((
array([[[ 1., 0.],[ 0., 1.]],
[[ 1., 0.],[ 0., 1.]],
[[ 1., 0.],[ 0., 1.]]]),
array([0, 2, 4]),
array([0, 1, 1, 2, 2, 3]) ),
shape=(10,10) )
P_I = matrix([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 1., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 1., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]])
P_I = bsr_matrix(P_I, blocksize=(2,2))
Fpts = array([0, 1, 4, 5, 8, 9])
Cpts = array([2, 3, 6, 7])
assert_equal(Cpts, params['Cpts'])
assert_equal(Fpts, params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
def test_compute_BtBinv(self):
##
# Trivially sized tests
# 1x1x1
T = matrix([[ 1.]])
T = bsr_matrix(T, blocksize=(1,1))
B = array([[1.]])
BtBinv = compute_BtBinv(B, T)
answer = array([ [[ 1. ]] ])
assert_array_almost_equal(BtBinv, answer)
##
T = matrix([[ 1.]])
T = bsr_matrix(T, blocksize=(1,1))
B = array([[0.]])
BtBinv = compute_BtBinv(B, T)
answer = array([ [[ 0. ]] ])
assert_array_almost_equal(BtBinv, answer)
##
T = matrix([[ 1.]])
T = bsr_matrix(T, blocksize=(1,1))
B = array([[0.5]])
BtBinv = compute_BtBinv(B, T)
answer = array([ [[ 4. ]] ])
assert_array_almost_equal(BtBinv, answer)
##
# 2x1x1
T = matrix([[ 1.,0.], [1.,1.]])
T = bsr_matrix(T, blocksize=(1,1))
B = array([[1.], [1.] ])
BtBinv = compute_BtBinv(B, T)
answer = array([[[ 1. ]], [[ 0.5]]])
assert_array_almost_equal(BtBinv, answer)
##
T = matrix([[ 1.,0.], [1.,1.]])
T = bsr_matrix(T, blocksize=(1,1))
B = array([[0.], [1.] ])
BtBinv = compute_BtBinv(B, T)
answer = array([[[ 0. ]], [[ 1.]]])
assert_array_almost_equal(BtBinv, answer)
##
T = matrix([[ 1.,0.], [1.,1.]])
T = bsr_matrix(T, blocksize=(2,2))
B = array([[0.], [2.] ])
BtBinv = compute_BtBinv(B, T)
answer = array([[[ 0.25 ]]])
assert_array_almost_equal(BtBinv, answer)
##
T = matrix([[ 1., 0.], [ 1., 0.],
[ 0., .5], [ 0., .25]])
T = bsr_matrix(T, blocksize=(1,1))
B = array([[1.],[2.]])
BtBinv = compute_BtBinv(B, T)
answer = array([[[ 1. ]], [[ 1. ]],
[[ 0.25]], [[ 0.25]]])
assert_array_almost_equal(BtBinv, answer)
##
T = matrix([[ 1., 0.], [ 0., .25]])
T = bsr_matrix(T, blocksize=(1,1))
B = array([[1., 1.],[2., 1.]])
BtBinv = compute_BtBinv(B, T)
answer = array([[[ 0.25, 0.25], [ 0.25, 0.25]],
[[ 0.16, 0.08], [ 0.08, 0.04]]])
assert_array_almost_equal(BtBinv, answer)
##
T = matrix([[ 1., 0.], [ 0., .25]])
T = bsr_matrix(T, blocksize=(2,2))
B = array([[1., 1.],[1., 1.]])
BtBinv = compute_BtBinv(B, T)
answer = array([[[ 0.125, 0.125],
[ 0.125, 0.125]]])
assert_array_almost_equal(BtBinv, answer)
##
# Simple BSR test
T = matrix([[ 1. , 1. , 0. , 0. ],
[ 1. , 1. , 0. , 0. ],
[ 0. , 0. , 0.5 , 0.5 ],
[ 0. , 0. , 0.25, 0.25]])
T = bsr_matrix(T, blocksize=(2,2))
B = array([[1., 1.],[1., 2.],[1., 1.],[1., 3.]])
BtBinv = compute_BtBinv(B, T)
answer = array([[[ 5. , -3. ], [-3. , 2. ]],
[[ 2.5, -1. ], [-1. , 0.5]]])
assert_array_almost_equal(BtBinv, answer)
def test_eliminate_diag_dom_nodes(self):
##
# Simple CSR test
from pyamg.gallery import poisson
A = poisson( (4,), format='csr' )
C = eliminate_diag_dom_nodes(A, A.copy(), 1.1)
answer = array([[ 1., 0., 0., 0.],
[ 0., 2., -1., 0.],
[ 0., -1., 2., 0.],
[ 0., 0., 0., 1.]])
assert_array_almost_equal(C.todense(), answer)
##
# Simple BSR test
from pyamg.gallery import poisson
A = poisson( (6,), format='csr' )
A.data[3] = 5.0
A = A.tobsr( (2,2) )
C = poisson( (3,), format='csr' )
C = eliminate_diag_dom_nodes(A, C, 1.1)
answer = array([[ 1., 0., 0.],
[ 0., 2., -1.],
[ 0., -1., 2.]])
assert_array_almost_equal(C.todense(), answer)
class TestComplexUtils(TestCase):
def test_diag_sparse(self):
#check sparse -> array
A = matrix([[-4-4.0j]])
assert_equal(diag_sparse(csr_matrix(A)),[-4-4.0j])
A = matrix([[1,0,-5],[-2,5-2.0j,0]])
assert_equal(diag_sparse(csr_matrix(A)),[1,5-2.0j])
#check array -> sparse
A = matrix([[-4+1.0j]])
assert_equal(diag_sparse(array([-4+1.0j])).todense(),csr_matrix(A).todense())
A = matrix([[1,0],[0,5-2.0j]])
assert_equal(diag_sparse(array([1,5-2.0j])).todense(),csr_matrix(A).todense())
def test_symmetric_rescaling(self):
cases = []
A = array([ [ 5.5+1.0j, 3.5, 4.8 ],
[ 2. , 9.9, 0.5-2.0j],
[ 6.5, 2.6, 5.7+1.0j]])
A = csr_matrix( A )
cases.append(A)
P = diag_sparse([1,0,1.0j])
| |
open('stru_mutation.txt', 'w') as s:
with open(file_pdb, 'rU') as raw:
for i in raw:
i = i.split()
with open('mutation_id.txt') as mu:
for m in mu:
m = m.split()
if i[0] == m[0]:
try:
if m[3] == 'Error:':
with open('stru_mutation.txt', 'a') as s:
s.write("input file contains error position for" +m[2]+ "protein"+'\n')
continue
if int(i[2]) <= int(m[3]) and int(i[3]) >= int(m[3]):
take = m[0]+'\t'+m[1]+'\t'+m[2]+'\t'+i[1]+'\t'+i[2]+'\t'+m[3]+'\t'+i[3]+'\t'+i[4]+'\t'+'UniProt'
if take > str(0):
with open('stru_mutation.txt', 'a+') as s:
summary = open('summary.txt', 'a+')
s.write(take+'\n')
summary.write(m[0]+'\t'+m[2]+'\t'+m[3]+'\t'+i[4]+'\t'+i[1]+'\t'+'UniProt'+'\n')
except IndexError:
pass
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
#/////////////////// Annotated PTMs data from other resources than UniProt (know to play role in PPI and cross-talk) /////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#To get the mutational effects on PPI, PTM based crosstalk, Protein domains, we need to run the following data files from one local dict.; the data
#retrieved from PTMcode 2.0 and PTMfunc, for this reason. To run your own lis tagainst this program, all you need to do is to change the file name in
#the test varible and then you get to go, the output contains the pvalue of the GO terms effected by the mutations and also step wise protein output data
#to interpret your experiment."""
#This frame work contains the advance stage of mapping, where same one code can be used for the mapping to the different
#PTM types, present at interface and/or ppi.
def interface(file1, mutation):
"""PTM present at the interface of two proteins and known to play role in interaction (Beltrao et al. Cell 2012)"""
with open('interface_mutation.txt', 'w') as out:
with open(file1, 'rU') as f:
for l in f:
line = l.split()
if len(line) > 5:
take = line[1]+'\t'+line[2]+'\t'+line[3]+'\t'+line[5]
take = take.split()
with open(mutation) as mu:
for m in mu:
m = m.split()
if m[0] == take[1] and m[1] == take[2]:
take2 = take[0]+'\t'+take[1]+'\t'+take[2]+'\t'+take[3]+'\t'+'PTMfunc'
fi = take2.split()
with open('yeastID.txt') as id:
for di in id:
di = di.split()
if len(di) > 2 and di[2] == fi[1]:
summary = open('summary.txt', 'a+')
summary.write(di[0]+'\t'+di[2]+'\t'+fi[2]+'\t'+fi[3]+'\t'+'Interface'+'\t'+'PTMfunc'+'\n')
if take2 > str(0):
with open('interface_mutation.txt', 'a') as out:
out.write(take2+'\n')
def ppi(file1,mutation):
""" PTM present at the interface of two proteins and known to play role in interaction (PTMfunc; Beltrao et al. Cell 2012)"""
with open('ppi_mutation.txt', 'w') as out:
with open(file1, 'rU') as f:
for ls in f:
line = ls.split()
with open (mutation) as mu:
for m in mu:
m = m.split()
if len(line) > 7:
if m[0] == line[1] and m[1] == line[3]:
take = line[1]+'\t'+line[2]+'\t'+line[3]+'\t'+'PTMfunc'
fi = take.split()
with open('yeastID.txt') as i:
for di in i:
di = di.split()
if len(di) > 2 and di[2] == fi[0]:
summary = open('summary.txt', 'a+')
summary.write(di[0]+'\t'+di[2]+'\t'+fi[2]+'\t'+'\t'+'PPI'+'\t'+'PTMfunc'+'\n')
if take > str(0):
with open('ppi_mutation.txt', 'a') as out:
out.write(take+'\n')
continue
if m[0] == line[6] and m[1] == line[3]:
take2 = line[6]+'\t'+line[2]+'\t'+line[3]+'\t'+'PTMfunc'
fi = take2.split()
with open('yeastID.txt') as i:
for di in i:
di=di.split()
if len(di) > 2 and di[2] == fi[0]:
summary = open('summary.txt', 'a+')
summary.write(di[0]+'\t'+di[2]+'\t'+fi[2]+'\t'+'\t'+'PPI'+'\t'+'PTMfunc'+'\n')
if take2 > str(0):
with open('ppi_mutation.txt', 'a+') as out:
out.write(take2+'\n')
def withinPro(file2, mutation):
""" PTMs (predicted) involved in the crosstalk within a given protein at baker's years (Minguez el 2012)"""
with open('within_protein.txt', 'w') as file1:
with open(file2, 'rU') as f:
for l in f:
line = l.split()
if len(line)>19:
take = line[15]+'\t'+line[16]+'\t'+line[3]+'\t'+line[17]+'\t'+line[7]+'\t'+line[19]
take = take.split()
with open(mutation, 'r') as mu:
for m in mu:
m = m.split()
if m[0] == take[1] and m[1]==take[3]:
take2 = take[0]+'\t'+take[1]+'\t'+take[2]+'\t'+take[3]+'\t'+'PTMcode'
fi=take2.split()
with open('yeastID.txt') as id:
for di in id:
di=di.split()
if len(di) > 2 and di[2] == fi[1]:
summary = open('summary.txt', 'a+')
summary.write(di[0]+'\t'+di[2]+'\t'+fi[3]+'\t'+fi[2]+'\t'+'WithinProtein'+'\t'+'PTMcode'+'\n')
if take2 > str(0):
with open('within_protein.txt', 'a') as file1:
file1.write(take2+'\n')
continue
if m[0] == take[1] and m[1] == take[5]:
take3 = take[0]+'\t'+take[1]+'\t'+take[4]+'\t'+take[5]+'\t'+'PTMcode'
fi = take3.split()
with open('yeastID.txt') as id:
for di in id:
di=di.split()
if len(di) > 2 and di[2] == fi[1]:
summary = open('summary.txt', 'a+')
summary.write(di[0]+'\t'+di[2]+'\t'+fi[3]+'\t'+fi[2]+'\t'+'WithinProtein'+'\t'+'PTMcode'+'\n')
if take3 > str(0):
with open('within_protein.txt', 'a+') as file1:
file1.write(take3+'\n')
def betweenPro(fileb, mutation):
""" PTMs (predicted) involved in the crosstalk in different proteins at baker's years (PTMcode 2.0; Minguez el 2012) """
with open('ptm_between_proteins.txt', 'w') as file1:
with open(fileb, 'rU') as f:
for l in f:
line = l.split()
if len(line)>20:
take = line[16]+'\t'+line[18]+'\t'+line[15]+'\t'+line[17]+'\t'+line[19]+'\t'+line[21]+'\t'+line[4]+'\t'+line[8]
take = take.split()
with open(mutation, 'r') as mu:
for m in mu:
m = m.split()
if m[0] == take[0] and m[1]==take[4]:
take2 = take[0]+'\t'+take[2]+'\t'+take[4]+'\t'+take[6]+'\t'+'PTMcode'
fi = take2.split()
with open('yeastID.txt') as id:
for di in id:
di = di.split()
if len(di) > 2 and di[2] == fi[0]:
summary = open('summary.txt', 'a+')
summary.write(di[0]+'\t'+di[2]+'\t'+fi[2]+'\t'+fi[3]+'\t'+'BetweenProteins'+'\t'+'PTMcode'+'\n')
if take2 > str(0):
with open('ptm_between_proteins.txt', 'a') as file1:
file1.write(take2+'\n')
continue
if m[0] == take[1] and m[1] == take[5]:
take3 = take[1]+'\t'+take[3]+'\t'+take[5]+'\t'+take[7]+'\t'+'PTMcode'
fi=take3.split()
with open('yeastID.txt') as id:
for di in id:
di = di.split()
if len(di) > 2 and di[2] == fi[0]:
summary = open('summary.txt', 'a+')
summary.write(di[0]+'\t'+di[2]+'\t'+fi[2]+'\t'+fi[3]+'\t'+'BetweenProteins'+'\t'+'PTMcode'+'\n')
if take3 > str(0):
with open('ptm_between_proteins.txt', 'a+') as file1:
file1.write(take3+'\n')
def hotspot(fileh, mutation):
""" PTMs containing motifs in a close proximity are named hotspots (Beltrao et al. Cell 2012)"""
with open('hotspot.txt', 'w') as hotspot:
with open(fileh, 'rU') as f:
for l in f:
line = l.split()
with open(mutation, 'r') as mu:
for m in mu:
m = m.split()
if len(line) > 6:
if m[0] == line[2] and m[1] == line[3]:
take = line[1]+'\t'+line[2]+'\t'+line[3]+'\t'+line[5]+'\t'+line[6]+'\t'+'PTMfunc'
fi = take.split()
with open('yeastID.txt') as id:
for di in id:
di = di.split()
if len(di) > 2 and di[2] == fi[1]:
summary = open('summary.txt', 'a+')
summary.write(di[0]+'\t'+di[2]+'\t'+fi[2]+'\t'+fi[3]+'\t'+'HotSpot'+'\t'+'PTMFunc'+'\n')
if take > str(0):
with open('hotspot.txt', 'a') as hotspot:
hotspot.write(take+'\n')
def sum_file_map():
""" reports all the results in a 'final-report' file """
with open('final_report.txt', 'w') as x:
with open('summary.txt') as fil1:
for fi in OrderedDict.fromkeys(fil1):
x.write(fi)
def resc():
try:
r = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/3DID_aceksites_interfaceRes_sc.txt").read().decode()
with open('3DID_aceksites_interfaceRes_sc.txt','w') as h:
h.write(r+'\n')
except IOError:
pass
try:
ri = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/3DID_phosphosites_interfaceRes_sc.txt").read().decode()
with open('3DID_phosphosites_interfaceRes_sc.txt','w') as hi:
hi.write(ri+'\n')
except IOError:
pass
try:
riu = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/3DID_ubisites_interfaceRessc_sc.txt").read().decode()
with open('3DID_ubisites_interfaceRessc_sc.txt','w') as hiu:
hiu.write(riu+'\n')
except IOError:
pass
try:
rac = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/SC_acet_interactions.txt").read().decode()
with open('SC_acet_interactions.txt','w') as hia:
hia.write(rac+'\n')
except IOError:
pass
try:
t = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/sc_btw_proteins.txt.zip").read()
with open('sc_btw_proteins.txt.zip','wb') as ht:
ht.write(t)
except IOError:
pass
try:
zipfile.ZipFile('sc_btw_proteins.txt.zip', 'r').extractall()
except IOError:
pass
try:
rps = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/SC_psites_interactions_sc.txt").read().decode()
with open('SC_psites_interactions_sc.txt','w') as hip:
hip.write(rps+'\n')
except IOError:
pass
try:
rui = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/SC_ubi_interactions_sc.txt").read().decode()
with open('SC_ubi_interactions_sc.txt','w') as hui:
hui.write(rui+'\n')
except IOError:
pass
try:
rin = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/sc_within_proteins.txt").read().decode()
with open('sc_within_proteins.txt','w') as hin:
hin.write(rin+'\n')
except IOError:
pass
try:
rsc = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/schotspot_updated.txt").read().decode()
with open('schotspot_updated.txt','w') as his:
his.write(rsc+'\n')
except IOError:
pass
return
#////////////////////////////////////////////////////////////////////////////////////////////////////////////
# USEAGE (Optional)
#------------------------------------------------------------------------------------------------------------
#This usage strategy is optional, and a user can use above written codes in any convenient way as
#required by experiemental settings and data interpretation (see README for proper use)
#////////////////////////////////////////////////////////////////////////////////////////////////////////////
c = YGtPM()
wd = os.getcwd()
def data():
""" this function will download and clean required data to run ymap methods smoothly """
start_time = time.time()
try:
resc()
except IOError:
pass
try:
dat = c.pTMdata()
except IOError:
pass
try:
cl = c.clean('uniprot_mod_raw.txt')
except IOError:
pass
try:
i = c.iD()
except IOError:
pass
try:
m = c.pmap('yeastID.txt', 'PTMs.txt')
except IOError:
pass
try:
d = c.dclean('uniprot_mod_raw.txt')
except IOError:
pass
try:
dm = c.d_map('yeastID.txt', 'domains.txt')
except IOError:
pass
try:
ab = c.ab('uniprot_mod_raw.txt')
except IOError:
pass
try:
ii = c.id('bact.txt', 'yeast_id.txt')
except IOError:
pass
try:
bio=c.bioGrid()
except IOError:
pass
try:
c.pdb_c('uniprot_mod_raw.txt')
except IOError:
pass
try:
c.gff()
except IOError:
pass
try:
c.frmt('gff.txt')
except IOError:
pass
try:
c.id_map('yeastID.txt', 'frmt.txt')
except IOError:
pass
try:
c.nucleotide()
except IOError:
pass
try:
c.n_map('yeastID.txt', 'nucleotide.txt')
except IOError:
pass
try:
z = zipfile.ZipFile(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'sc_btw_proteins.txt.zip', 'r')
z.extractall()
except IOError:
pass
try:
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'3DID_aceksites_interfaceRes_sc.txt', wd)
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'3DID_phosphosites_interfaceRes_sc.txt', wd)
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'3DID_ubisites_interfaceRessc_sc.txt', wd)
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'SC_acet_interactions.txt', wd)
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'SC_psites_interactions_sc.txt', wd)
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'SC_ubi_interactions_sc.txt', wd)
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'sc_within_proteins.txt', wd)
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'schotspot_updated.txt', wd)
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'sc_btw_proteins.txt', wd)
except IOError:
pass
return "All required data downloaded in %s seconds" % (time.time() - start_time)
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
#//////////////////////////////// Following two codes are used for return the mutations at proteins level \\\\\\\\\\\\\\\\\\
#///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
def mutation_types_file():
""" mutation type and amino acid change calculation where ref. and mutant base known """
start_time = time.time()
try:
mutation_file("mutated_proteins.txt", 'd_id_map.txt')
except IOError:
pass
return "Mutations with mutations types are available to map on functional entities"
#//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#////////////////////////////////// Following series of codes will | |
from functools import cached_property
from warnings import warn
import esda
import geopandas as gpd
import scikitplot as skplt
from sklearn.metrics import silhouette_samples
from ..visualize.mapping import plot_timeseries
from .dynamics import predict_markov_labels as _predict_markov_labels
from .incs import lincs_from_gdf
class ModelResults:
"""Storage for clustering and regionalization results.
Attributes
----------
df: pandas.DataFrame
data used to estimate the model
columns: list
subset of variables in the dataframe used to fit the model
W: libpysal.weights.W
libpysal spatial weights matrix used in model
labels: array-like
`cluster` or `region` label assigned to each observation
instance: instance of model class used to generate neighborhood labels.
fitted model instance, e.g sklearn.cluster.AgglomerativeClustering object
or other model class used to estimate class labels
name : str
name of model
temporal_index : str, optional
which column on the dataframe defines time and or sequencing of the
long-form data. Default is "year"
unit_index : str, optional
which column on the long-form dataframe identifies the stable units
over time. In a wide-form dataset, this would be the unique index
"""
def __init__(
self,
df,
columns,
labels,
instance,
W,
name,
unit_index,
temporal_index,
scaler,
pooling,
):
"""Initialize a new ModelResults instance.
Parameters
----------
df: array-like
data of the cluster
columns: list-like
columns used to compute model
W: libpysal.weights.W
libpysal spatial weights matrix used in model
labels: array-like
labels of each column
instance: AgglomerativeCluserting object, or other model specific object type
how many clusters model was computed with
name: str
name of the model
"""
self.columns = columns
self.df = df
self.W = W
self.instance = instance
self.labels = labels
if self.W is None:
self.model_type = "aspatial"
else:
self.model_type = "spatial"
self.name = name
self.unit_index = unit_index
self.temporal_index = temporal_index
self.scaler = scaler
self.pooling = pooling
@cached_property
def lincs(self):
"""Calculate Local Indicators of Neighborhood Change (LINC) scores for each unit.
Returns
-------
geopandas.GeoDataFrame
geodataframe with linc values available under the `linc` column
"""
assert (
self.model_type != "spatial"
), "The Local Index of Neighborhood Change (LINC) measure is only valid for models where labels are pooled across time periods"
df = self.df.copy()
df = df.dropna(subset=self.columns)
lincs = lincs_from_gdf(
self.df,
unit_index=self.unit_index,
temporal_index=self.temporal_index,
cluster_col=self.name,
periods="all",
)
return lincs
@cached_property
def silhouette_scores(self):
"""Calculate silhouette scores for the each unit.
Returns
-------
geopandas.GeoDataFrame
geodataframe with silhouette values available under the `silhouette_score` column
"""
df = self.df.copy()
df = df.dropna(subset=self.columns)
time_idx = self.temporal_index
if self.scaler:
if self.pooling in ["fixed", "unique"]:
# if fixed (or unique), scale within each time period
for time in df[time_idx].unique():
df.loc[
df[time_idx] == time, self.columns
] = self.scaler.fit_transform(
df.loc[df[time_idx] == time, self.columns].values
)
elif self.pooling == "pooled":
# if pooled, scale the whole series at once
df.loc[:, self.columns] = self.scaler.fit_transform(df.values)
return gpd.GeoDataFrame(
{
"silhouette_score": silhouette_samples(
df[self.columns].values, df[self.name]
),
self.unit_index: df[self.unit_index],
self.temporal_index: df[self.temporal_index],
},
index=self.df.index,
geometry=self.df.geometry,
crs=self.df.crs,
)
@cached_property
def nearest_label(self):
"""Calculate next-best cluster labels for each unit.
Returns
-------
geopandas.GeoDataFrame
geodataframe with next-best label assignments available under the `nearest_label` column
"""
df = self.df.copy()
df = df.dropna(subset=self.columns)
return gpd.GeoDataFrame(
{
"nearest_label": esda.silhouettes.nearest_label(
self.df[self.columns].values, self.labels
),
self.unit_index: df[self.unit_index],
self.temporal_index: df[self.temporal_index],
},
index=self.df.index,
geometry=self.df.geometry,
crs=self.df.crs,
)
@cached_property
def boundary_silhouette(self):
"""Calculate boundary silhouette scores for each unit.
Returns
-------
geopandas.GeoDataFrame
geodataframe withboundary silhouette scores available under the `boundary_silhouette` column
"""
df = self.df.copy()
df = df.dropna(subset=self.columns)
assert self.model_type == "spatial", (
"Model is aspatial (lacks a W object), but has been passed to a spatial diagnostic."
" Try aspatial diagnostics like nearest_label() or sil_scores()"
)
time_idx = self.temporal_index
if self.scaler:
if self.pooling in ["fixed", "unique"]:
# if fixed (or unique), scale within each time period
for time in df[time_idx].unique():
df.loc[
df[time_idx] == time, self.columns
] = self.scaler.fit_transform(
df.loc[df[time_idx] == time, self.columns].values
)
elif self.pooling == "pooled":
# if pooled, scale the whole series at once
df.loc[:, self.columns] = self.scaler.fit_transform(df.values)
return gpd.GeoDataFrame(
{
"boundary_silhouette": esda.boundary_silhouette(
self.df[self.columns].values, self.labels, self.W
),
self.unit_index: df[self.unit_index],
self.temporal_index: df[self.temporal_index],
},
index=self.df.index,
geometry=self.df.geometry,
crs=self.df.crs,
)
@cached_property
def path_silhouette(self):
"""Calculate path silhouette scores for each unit.
Returns
-------
geopandas.GeoDataFrame
geodataframe with path-silhouette scores available under the `path_silhouette` column
"""
df = self.df.copy()
df = df.dropna(subset=self.columns)
time_idx = self.temporal_index
if self.scaler:
if self.pooling in ["fixed", "unique"]:
# if fixed (or unique), scale within each time period
for time in df[time_idx].unique():
df.loc[
df[time_idx] == time, self.columns
] = self.scaler.fit_transform(
df.loc[df[time_idx] == time, self.columns].values
)
elif self.pooling == "pooled":
# if pooled, scale the whole series at once
df.loc[:, self.columns] = self.scaler.fit_transform(df.values)
assert self.model_type == "spatial", (
"Model is aspatial(lacks a W object), but has been passed to a spatial diagnostic."
" Try aspatial diagnostics like nearest_label() or sil_scores()"
)
return gpd.GeoDataFrame(
{
"path_silhouette": esda.path_silhouette(
self.df[self.columns].values, self.labels, self.W
),
self.unit_index: df[self.temporal_index],
self.temporal_index: df[self.temporal_index],
},
index=self.df.index,
geometry=self.df.geometry,
crs=self.df.crs,
)
def plot_silhouette(self, metric="euclidean", title="Silhouette Score"):
"""Create a diagnostic plot of silhouette scores using scikit-plot.
Parameters
----------
metric : str, optional
metric used to calculate distance. Accepts any string
used with sklearn.metrics.pairwise
title : str, optional
title passed to the matplotlib figure. Defaults to "Silhouette Score"
Returns
-------
matplotlib.Figure
silhouette plot created by scikit-plot.
"""
df = self.df.copy()
time_idx = self.temporal_index
if self.scaler:
if self.pooling in ["fixed", "unique"]:
# if fixed (or unique), scale within each time period
for time in df[time_idx].unique():
df.loc[
df[time_idx] == time, self.columns
] = self.scaler.fit_transform(
df.loc[df[time_idx] == time, self.columns].values
)
elif self.pooling == "pooled":
# if pooled, scale the whole series at once
df.loc[:, self.columns] = self.scaler.fit_transform(df.values)
fig = skplt.metrics.plot_silhouette(
df[self.columns].values, self.labels, metric=metric, title=title
)
return fig
def plot_silhouette_map(
self,
time_periods="all",
ctxmap="default",
figsize=None,
nrows=None,
ncols=None,
save_fig=None,
alpha=0.5,
cmap="bwr",
scheme="quantiles",
k=8,
title="Silhouette Score",
dpi=500,
plot_kwargs=None,
web_mercator=True,
):
"""Plot the silhouette scores for each unit as a [series of] choropleth map(s).
Parameters
----------
scheme : string,optional
matplotlib scheme to be used
default is 'quantiles'
k : int, optional
number of bins to graph. k may be ignored
or unnecessary for some schemes, like headtailbreaks, maxp, and maximum_breaks
Default is 6.
cmap : string, optional
matplotlib colormap used to shade polygons
title : string, optional
title of figure.
dpi : int, optional
dpi of the saved image if save_fig=True
default is 500
web_mercator : bool, optional
whether to reproject the data into web mercator (epsg 3857)
prior to plotting. Defaults to True
ncols : int, optional
number of columns in the figure
if passing ncols, nrows must also be passed
default is None
nrows : int, optional
number of rows in the figure
if passing nrows, ncols must also be passed
default is None
figsize : tuple, optional
the desired size of the matplotlib figure
save_fig : str, optional
path to save figure if desired.
ctxmap : contextily map provider, optional
contextily basemap. Set to False for no basemap.
Default is Stamen.TonerLite
alpha : int (optional)
Transparency parameter passed to matplotlib
Returns
-------
matplotlib.Axes
"""
if not plot_kwargs:
plot_kwargs = dict()
df = self.silhouette_scores.copy()
if time_periods == "all":
time_periods = df[self.temporal_index].unique()
ax = plot_timeseries(
df,
"silhouette_score",
time_subset=time_periods,
alpha=alpha,
legend=True,
cmap=cmap,
scheme=scheme,
k=k,
figsize=figsize,
ncols=ncols,
nrows=nrows,
temporal_index=self.temporal_index,
ctxmap=ctxmap,
title=title,
web_mercator=web_mercator,
dpi=dpi,
save_fig=save_fig,
**plot_kwargs,
)
return ax
def plot_next_best_label(
self,
time_periods="all",
ctxmap="default",
figsize=None,
nrows=None,
ncols=None,
save_fig=None,
alpha=0.5,
cmap="set1",
title="Next-Best Label",
dpi=500,
plot_kwargs=None,
web_mercator=True,
):
"""Plot the next-best cluster label for each unit as a choropleth map.
Parameters
----------
cmap : string, optional
matplotlib colormap used to shade polygons
title : string, optional
title of figure.
dpi : int, optional
dpi of the saved image if save_fig=True
default is 500
web_mercator : bool, optional
whether to reproject the data into web mercator (epsg 3857)
prior to plotting. Defaults to True
ncols : int, optional
number of columns in the figure
if passing ncols, nrows must also be passed
default is None
nrows : int, optional
number of rows in the figure
if passing nrows, ncols must also be passed
default is None
figsize : tuple, optional
the desired size of the matplotlib figure
save_fig : str, optional
path to save figure if desired.
ctxmap : contextily map provider, optional
contextily basemap. Set to False for no basemap.
| |
len(conf_scores)-1):
c_mask = conf_scores[cl] > score_thresh
if c_mask.sum() == 0: continue
scores = conf_scores[cl][c_mask]
l_mask = c_mask.unsqueeze(1).expand_as(box_coords)
boxes = box_coords[l_mask].view(-1, 4)
ids, count = nms(boxes.data, scores, nms_overlap, 50)
ids = ids[:count]
out1.append(scores[ids])
out2.append(boxes.data[ids])
cc.append([cl]*count)
# return(out1,out2)
if len(cc)> 0:
clas = np.concatenate(cc)
prs = torch.cat(out1).cpu().numpy()
bbox = ((torch.cat(out2).cpu()*self.image_size[0]).long()).numpy()
# bb = [bb_hw(o) for o in bbox.reshape(-1,4)]
bb = [[o[1],o[0],o[3],o[2]] for o in bbox.reshape(-1,4)]
return (bb, ['bg' if c == len(self.class_names) else self.class_names[c] for c in clas])
else:
return ([],[])
class CustomSSDObjectDetection_LPR(TransferNetworkImg):
def __init__(self,
model_name='resnet50',
model_type='obj_detection',
lr=0.02,
one_cycle_factor = 0.5,
criterion= nn.NLLLoss(),
optimizer_name = 'Adam',
dropout_p=0.45,
pretrained=True,
device=None,
best_accuracy=0.,
best_validation_loss=None,
best_model_file ='best_custom_ssd.pth',
chkpoint_file ='cusstom_ssd_chkpoint_file.pth',
head = {'num_outputs':10,
'layers':[],
'model_type':'classifier'
},
pre_trained_back = None,
add_extra = True,
class_names = None,
num_classes = None,
image_size = (224,224)):
super().__init__(model_name = model_name,
model_type = model_type,
lr = lr,
one_cycle_factor = one_cycle_factor,
criterion = criterion,
optimizer_name = optimizer_name,
dropout_p = dropout_p,
pretrained = pretrained,
device = device,
best_accuracy = best_accuracy,
best_validation_loss = best_validation_loss,
best_model_file = best_model_file,
chkpoint_file = chkpoint_file,
head = head,
add_extra = add_extra,
set_params = False,
class_names = class_names,
num_classes = num_classes,
set_head = False)
self.obj = True
self.image_size = image_size
grids = get_grids(image_size,ceil = self.grid_ceil)
print('Grids: {}'.format(grids))
self.set_up_object_detection(anc_grids=grids, anc_zooms=[0.7,1.,1.3], anc_ratios=[(1.,1.),(1.,0.5),(0.5,1.)],
num_classes=num_classes,drop_out=dropout_p)
self.set_model_head(model_name = model_name, head = self.custom_head,pre_trained_back = pre_trained_back)
super(CustomSSDObjectDetection_LPR,self).set_model_params(
criterion = self.ssd_loss,
optimizer_name = optimizer_name,
lr = lr,
one_cycle_factor = one_cycle_factor,
dropout_p = dropout_p,
model_name = model_name,
model_type = model_type,
best_accuracy = best_accuracy,
best_validation_loss = best_validation_loss,
best_model_file = best_model_file,
chkpoint_file = chkpoint_file,
head = head,
class_names = class_names,
num_classes = num_classes
)
self.model = self.model.to(device)
def set_model_head(self,
model_name = 'resnet50',
head = {'num_outputs':10,
'layers':[],
'class_names': None,
'model_type':'classifier'
},
pre_trained_back = None,
criterion = nn.NLLLoss(),
adaptive = True,
dropout_p = 0.45,
device = None):
# models_meta = {
# 'resnet': {'head_id': -2, 'adaptive_head': [DAI_AvgPool,Flatten()],'normal_head': [nn.AvgPool2d(7,1),Flatten()]},
# 'densenet': {'head_id': -1,'adaptive_head': [nn.ReLU(inplace=True),DAI_AvgPool,Flatten()]
# ,'normal_head': [nn.ReLU(inplace=True),nn.AvgPool2d(7,1),Flatten()]}
# }
models_meta = {
'resnet34': {'conv_channels':512,'head_id': -2,'clip_func':resnet_obj_clip,'adaptive_head': [DAI_AvgPool],
'normal_head': [nn.AvgPool2d(7,1)]},
'resnet50': {'conv_channels':2048,'head_id': -2,'clip_func':resnet_obj_clip,'adaptive_head': [DAI_AvgPool],
'normal_head': [nn.AvgPool2d(7,1)]},
'densenet': {'conv_channels':1024,'head_id': -1,'clip_func':densenet_obj_clip,'adaptive_head': [nn.ReLU(inplace=True),DAI_AvgPool]
,'normal_head': [nn.ReLU(inplace=True),nn.AvgPool2d(7,1)]}
}
# name = ''.join([x for x in model_name.lower() if x.isalpha()])
name = model_name.lower()
meta = models_meta[name]
if pre_trained_back:
model = pre_trained_back
else:
modules = list(self.model.children())
# l = meta['clip_func'](modules,meta['head_id'])
l = modules[:meta['head_id']]
if self.dream_model:
# for layer in self.dream_model.children():
# if(type(layer).__name__) == 'Conv2d':
# layer.in_channels,layer.out_channels = meta['conv_channels'],meta['conv_channels']
# if(type(layer).__name__) == 'BatchNorm2d':
# layer.num_features = meta['conv_channels']
# self.dream_model[0].in_channels = meta['conv_channels']
# self.dream_model[-4].out_channels = meta['conv_channels']
# self.dream_model[-3].num_features = meta['conv_channels']
l+=self.dream_model
# if type(head).__name__ != 'dict':
model = nn.Sequential(*l)
for layer in head.children():
if(type(layer).__name__) == 'StdConv':
conv_module = layer
break
# temp_conv = head.sconv0.conv
conv_layer = conv_module.conv
temp_args = [conv_layer.out_channels,conv_layer.kernel_size,conv_layer.stride,conv_layer.padding]
temp_args.insert(0,meta['conv_channels'])
conv_layer = nn.Conv2d(*temp_args)
conv_module.conv = conv_layer
# print(head)
# model.add_module('adaptive_avg_pool',DAI_AvgPool)
model.add_module('custom_head',head)
# else:
# head['criterion'] = criterion
# self.num_outputs = head['num_outputs']
# fc = modules[-1]
# in_features = fc.in_features
# fc = FC(
# num_inputs = in_features,
# num_outputs = head['num_outputs'],
# layers = head['layers'],
# model_type = head['model_type'],
# output_non_linearity = head['output_non_linearity'],
# dropout_p = dropout_p,
# criterion = head['criterion'],
# optimizer_name = None,
# device = device
# )
# if adaptive:
# l += meta['adaptive_head']
# else:
# l += meta['normal_head']
# model = nn.Sequential(*l)
# model.add_module('fc',fc)
self.model = model
self.head = head
# if type(head).__name__ == 'dict':
# print('{}: setting head: inputs: {} hidden:{} outputs: {}'.format(model_name,
# in_features,
# head['layers'],
# head['num_outputs']))
# else:
print('{}: setting head: {}'.format(model_name,type(head).__name__))
def set_up_object_detection(self,anc_grids,anc_zooms,anc_ratios,num_classes,num_colr = 12,drop_out = 0.5):
# print('Would you like to give your own values for anchor_grids, anchor_zooms,and anchor_ratios? The default values are: {}, {} and {}'
# .format(anc_grids,anc_zooms,anc_ratios))
# print('If so, you may call the function "set_up_object_detection" with your own paramteres.')
cmap = get_cmap(num_colr)
self.colr_list = [cmap(float(x)) for x in range(num_colr)]
self.num_colr = num_colr
self.create_anchors(anc_grids,anc_zooms,anc_ratios)
self.custom_head = CustomSSD_MultiHead(len(anc_grids),self.k,num_classes,drop_out,-4.)
self.loss_f = FocalLoss(num_classes,device=self.device)
def create_anchors(self,anc_grids,anc_zooms,anc_ratios):
anchor_scales = [(anz*i,anz*j) for anz in anc_zooms for (i,j) in anc_ratios]
k = len(anchor_scales)
anc_offsets = [1/(o*2) for o in anc_grids]
anc_x = np.concatenate([np.repeat(np.linspace(ao, 1-ao, ag), ag)
for ao,ag in zip(anc_offsets,anc_grids)])
anc_y = np.concatenate([np.tile(np.linspace(ao, 1-ao, ag), ag)
for ao,ag in zip(anc_offsets,anc_grids)])
anc_ctrs = np.repeat(np.stack([anc_x,anc_y], axis=1), k, axis=0)
anc_sizes = np.concatenate([np.array([[o/ag,p/ag] for i in range(ag*ag) for o,p in anchor_scales])
for ag in anc_grids])
grid_sizes = torch.tensor(np.concatenate([np.array(
[ 1/ag for i in range(ag*ag) for o,p in anchor_scales])
for ag in anc_grids])).float().unsqueeze(1).to(self.device)
anchors = torch.tensor(np.concatenate([anc_ctrs, anc_sizes], axis=1)).float().to(self.device)
anchor_cnr = hw2corners(anchors[:,:2], anchors[:,2:])
self.anchors,self.anchor_cnr,self.grid_sizes,self.k = anchors,anchor_cnr,grid_sizes,k
# def draw_im(im, ann, cats):
# ax = img_grid(im, figsize=(16,8))
# for b,c in ann:
# b = bb_hw(b)
# draw_rect(ax, b)
# draw_text(ax, b[:2], cats[c], sz=16)
# def draw_idx(i):
# im_a = trn_anno[i]
# # im = open_image(IMG_PATH/trn_fns[i])
# im = Image.open(IMG_PATH/trn_fns[i]).convert('RGB')
# draw_im(im, im_a)
def intersect(self,box_a, box_b):
max_xy = torch.min(box_a[:, None, 2:], box_b[None, :, 2:])
min_xy = torch.max(box_a[:, None, :2], box_b[None, :, :2])
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def box_sz(self,b): return ((b[:, 2]-b[:, 0]) * (b[:, 3]-b[:, 1]))
def jaccard(self,box_a, box_b):
inter = self.intersect(box_a, box_b)
union = self.box_sz(box_a).unsqueeze(1) + self.box_sz(box_b).unsqueeze(0) - inter
return inter / union
def get_y(self, bbox, clas):
bbox = bbox.view(-1,4)/self.image_size[0]
bb_keep = ((bbox[:,2]-bbox[:,0])>0).nonzero()[:,0]
return bbox[bb_keep],clas[bb_keep]
def actn_to_bb(self, actn):
actn_bbs = torch.tanh(actn)
# print(self.grid_sizes.size())
# print(self.anchors[:,:2].size())
actn_centers = (actn_bbs[:,:2]/2 * self.grid_sizes) + self.anchors[:,:2]
actn_hw = (actn_bbs[:,2:]/2+1) * self.anchors[:,2:]
return hw2corners(actn_centers, actn_hw)
def map_to_ground_truth(self, overlaps, print_it=False):
prior_overlap, prior_idx = overlaps.max(1)
# if print_it: print(prior_overlap)
gt_overlap, gt_idx = overlaps.max(0)
gt_overlap[prior_idx] = 1.99
for i,o in enumerate(prior_idx): gt_idx[o] = i
return gt_overlap,gt_idx
def ssd_1_loss(self, b_c,b_bb,bbox,clas,print_it=False):
anchor_cnr = hw2corners(self.anchors[:,:2], self.anchors[:,2:])
bbox,clas = self.get_y(bbox,clas)
a_ic = self.actn_to_bb(b_bb)
overlaps = self.jaccard(bbox.data, anchor_cnr.data)
gt_overlap,gt_idx = self.map_to_ground_truth(overlaps,print_it)
gt_clas = clas[gt_idx]
pos = gt_overlap > 0.4
pos_idx = torch.nonzero(pos)[:,0]
gt_clas[1-pos] = len(self.class_names)
gt_bbox = bbox[gt_idx]
loc_loss = ((a_ic[pos_idx] - gt_bbox[pos_idx]).abs()).mean()
clas_loss = self.loss_f(b_c, gt_clas)
return loc_loss, clas_loss
def ssd_loss(self, pred, targ, print_it=False):
lcs,lls = 0.,0.
for b_c,b_bb,bbox,clas in zip(*pred,*targ):
loc_loss,clas_loss = self.ssd_1_loss(b_c,b_bb,bbox,clas)
lls += loc_loss
lcs += clas_loss
if print_it: print(f'loc: {lls.data[0]}, clas: {lcs.data[0]}')
return lls+lcs
def set_loss(self,loss):
self.loss_f = loss
# def dai_plot_results(self,thresh,loader,model):
# dai_x,dai_y = next(iter(loader))
# dai_x = dai_x.to(self.device)
# dai_y = [torch.tensor(l).to(self.device) for l in dai_y]
# dai_batch = model(dai_x)
# dai_b_clas,dai_b_bb = dai_batch
# dai_x = dai_x.cpu()
# dai_y = [torch.tensor(l).cpu() for l in dai_y]
# fig, axes = plt.subplots(3, 4, figsize=(16, 12))
# for idx,ax in enumerate(axes.flat):
# ima = dai.denorm_img(dai_x[idx])
# bbox,clas = self.get_y(dai_y[0][idx], dai_y[1][idx])
# a_ic = self.actn_to_bb(dai_b_bb[idx])
# clas_pr, clas_ids = dai_b_clas[idx].max(1)
# clas_pr = clas_pr.sigmoid()
# self.show_objects(ax, ima, a_ic, clas_ids, clas_pr, clas_pr.max().data[0]*thresh)
# plt.tight_layout()
def batch_loss(self,model,loader,crit):
data_batch = next(iter(loader))
dai_x,dai_y = data_batch[0],data_batch[1]
dai_x = dai_x.to(self.device)
dai_y = [torch.tensor(l).to(self.device) if type(l).__name__ == 'Tensor' else l.to(device) for l in dai_y]
dai_batch = model(dai_x)
return crit(dai_batch,dai_y)
def show_objects(self, ax, y, num_plate, ima, bbox, clas, prs=None, thresh=0.4,ocr_net=None,ocr_dp=None):
return self.show_objects_(ax, y, num_plate, ima, ((bbox*self.image_size[0]).long()).numpy(),
(clas).numpy(), (prs).numpy() if prs is not None else None, thresh,ocr_net=ocr_net,ocr_dp=ocr_dp)
def show_objects_(self, ax, y, num_plate, im, bbox, clas=None, prs=None, thresh=0.3,ocr_net=None,ocr_dp=None):
# ocr_net = data_processing.load_obj('/home/farhan/hamza/Object_Detection/best_lpr_chars_net.pkl')
# ocr_dp = data_processing.load_obj('/home/farhan/hamza/lpr_chars/DP_lpr_chars.pkl')
bb = [bb_hw(o) for o in bbox.reshape(-1,4)]
# print(bb)
if prs is None: prs = [None]*len(bb)
if clas is None: clas = [None]*len(bb)
ax = img_grid(im, ax=ax)
for i,(b,c,pr) in enumerate(zip(bb, clas, prs)):
if((b[2]>0) and (pr is None or pr > thresh)):
draw_rect(ax, b, color=self.colr_list[i % self.num_colr])
txt = f'{i}: '
if c is not None: txt += ('bg' if c==len(self.class_names) else self.class_names[c])
if pr is not None: txt += f' {pr:.2f}'
draw_text(ax, b[:2], txt, color=self.colr_list[i % self.num_colr])
resize_w = 512
resize_h = 512
im_resized = cv2.resize(im,(resize_w,resize_h))
im_r,im_c = im.shape[0],im.shape[1]
row_scale = resize_h/im_r
col_scale = resize_w/im_c
b = hw_bb(b)
b[0] = int(np.round(b[0]*col_scale))
b[1] = int(np.round(b[1]*row_scale))
b[2] = int(np.round(b[2]*col_scale))
b[3] = int(np.round(b[3]*row_scale))
b = bb_hw(b)
margin = 12
if b[1] >= 0 and b[0] >= 0:
try:
if b[1] == 0 or b[0] == 0:
im2 = im_resized[b[1]:b[1]+b[3],b[0]:b[0]+b[2]]
else:
im2 = im_resized[b[1]-margin:b[1]+b[3]+margin,b[0]-margin:b[0]+b[2]+margin]
plt.imsave('carlp.png',im2)
except:
im2 = im_resized[b[1]:b[1]+b[3],b[0]:b[0]+b[2]]
plt.imsave('carlp.png',im2)
# print(im2.shape)
chars = get_lp_chars('carlp.png',size = 150,char_width = | |
<reponame>Mynti207/TimeDB
from timeseries import TimeSeries
import numpy as np
from scipy.stats import norm
from tsdb import *
import time
identity = lambda x: x
schema = {
'pk': {'convert': identity, 'index': None},
'ts': {'convert': identity, 'index': None},
'order': {'convert': int, 'index': 1},
'blarg': {'convert': int, 'index': 1},
'useless': {'convert': identity, 'index': None},
'mean': {'convert': float, 'index': 1},
'std': {'convert': float, 'index': 1},
'vp': {'convert': bool, 'index': 1},
'deleted': {'convert': bool, 'index': 1}
}
def tsmaker(m, s, j):
'''
Helper function: randomly generates a time series for testing.
Parameters
----------
m : float
Mean value for generating time series data
s : float
Standard deviation value for generating time series data
j : float
Quantifies the "jitter" to add to the time series data
Returns
-------
A time series and associated meta data.
'''
# generate metadata
meta = {}
meta['order'] = int(np.random.choice(
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]))
meta['blarg'] = int(np.random.choice([1, 2]))
meta['vp'] = False # initialize vantage point indicator as negative
# generate time series data
t = np.arange(0.0, 1.0, 0.01)
v = norm.pdf(t, m, s) + j * np.random.randn(100)
# return time series and metadata
return meta, TimeSeries(t, v)
def test_server():
########################################
#
# set up
#
########################################
# initialize database
db = DictDB(schema, 'pk')
# initialize server
server = TSDBServer(db)
assert server.db == db
assert server.port == 9999
# initialize protocol
protocol = TSDBProtocol(server)
assert protocol.server == server
# parameters for testing
num_ts = 25
num_vps = 5
########################################
#
# create dummy data for testing
#
########################################
# a manageable number of test time series
mus = np.random.uniform(low=0.0, high=1.0, size=num_ts)
sigs = np.random.uniform(low=0.05, high=0.4, size=num_ts)
jits = np.random.uniform(low=0.05, high=0.2, size=num_ts)
# initialize dictionaries for time series and their metadata
tsdict = {}
metadict = {}
# fill dictionaries with randomly generated entries for database
for i, m, s, j in zip(range(num_ts), mus, sigs, jits):
meta, tsrs = tsmaker(m, s, j) # generate data
pk = "ts-{}".format(i) # generate primary key
tsdict[pk] = tsrs # store time series data
metadict[pk] = meta # store metadata
# for testing later on
ts_keys = sorted(tsdict.keys())
########################################
#
# for all tests below:
# - package the operation
# - test that this is packaged as expected
# - run the operation
# - unpack the results of running the operation
# - test that the return values are as expected
#
########################################
########################################
#
# test time series insert
#
########################################
for k in tsdict:
# package the operation
op = {'op': 'insert_ts', 'pk': k, 'ts': tsdict[k]}
# test that this is packaged as expected
assert op == TSDBOp_InsertTS(k, tsdict[k])
# run operation
result = protocol._insert_ts(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
idx = np.random.choice(list(tsdict.keys()))
# try to insert a duplicate primary key
op = {'op': 'insert_ts', 'pk': idx, 'ts': tsdict[idx]}
# test that this is packaged as expected
assert op == TSDBOp_InsertTS(idx, tsdict[idx])
# run operation
result = protocol._insert_ts(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.INVALID_KEY
assert payload is None
########################################
#
# test time series deletion
#
########################################
idx = np.random.choice(list(tsdict.keys()))
# delete a valid time series
# package the operation
op = {'op': 'delete_ts', 'pk': idx}
# test that this is packaged as expected
assert op == TSDBOp_DeleteTS(idx)
# run operation
result = protocol._delete_ts(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
# check that it isn't present any more
# package the operation
op = {'op': 'select', 'md': {'pk': idx}, 'fields': None,
'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({'pk': idx}, None, None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert len(payload) == 0
# add it back in
# package the operation
op = {'op': 'insert_ts', 'pk': idx, 'ts': tsdict[idx]}
# test that this is packaged as expected
assert op == TSDBOp_InsertTS(idx, tsdict[idx])
# run operation
result = protocol._insert_ts(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
# check that it's present now
# package the operation
op = {'op': 'select', 'md': {'pk': idx}, 'fields': None,
'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({'pk': idx}, None, None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert len(payload) == 1
# delete an invalid time series
# package the operation
op = {'op': 'delete_ts', 'pk': 'mistake'}
# test that this is packaged as expected
assert op == TSDBOp_DeleteTS('mistake')
# run operation
result = protocol._delete_ts(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.INVALID_KEY
assert payload is None
########################################
#
# test metadata upsert
#
########################################
for k in metadict:
# package the operation
op = {'op': 'upsert_meta', 'pk': k, 'md': metadict[k]}
# test that this is packaged as expected
assert op == TSDBOp_UpsertMeta(k, metadict[k])
# run operation
result = protocol._upsert_meta(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
########################################
#
# test select operations
#
########################################
# select all database entries; no metadata fields
# package the operation
op = {'op': 'select', 'md': {}, 'fields': None, 'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({}, None, None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
if len(payload) > 0:
assert list(payload[list(payload.keys())[0]].keys()) == []
assert sorted(payload.keys()) == ts_keys
# select all database entries; no metadata fields; sort by primary key
# package the operation
op = {'op': 'select', 'md': {}, 'fields': None,
'additional': {'sort_by': '+pk'}}
# test that this is packaged as expected
assert op == TSDBOp_Select({}, None, {'sort_by': '+pk'})
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
if len(payload) > 0:
assert list(payload[list(payload.keys())[0]].keys()) == []
assert list(payload.keys()) == ts_keys
# select all database entries; all metadata fields
# package the operation
op = {'op': 'select', 'md': {}, 'fields': [], 'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({}, [], None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
if len(payload) > 0:
assert (sorted(list(payload[list(payload.keys())[0]].keys())) ==
['blarg', 'order', 'pk', 'vp'])
assert sorted(payload.keys()) == ts_keys
# select all database entries; all invalid metadata fields
# package the operation
op = {'op': 'select', 'md': {}, 'fields': ['wrong', 'oops'],
'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({}, ['wrong', 'oops'], None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
if len(payload) > 0:
assert sorted(list(payload[list(payload.keys())[0]].keys())) == []
assert sorted(payload.keys()) == ts_keys
# select all database entries; some invalid metadata fields
# package the operation
op = {'op': 'select', 'md': {}, 'fields': ['not_there', 'blarg'],
'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({}, ['not_there', 'blarg'], None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return | |
: [u'g'] ,
u'㗛' : [u'x'] ,
u'譤' : [u'j'] ,
u'惩' : [u'c'] ,
u'巫' : [u'w'] ,
u'汶' : [u'm', u'w'] ,
u'裹' : [u'g'] ,
u'䗻' : [u'j'] ,
u'㰁' : [u'l'] ,
u'钆' : [u'q', u'g'] ,
u'冈' : [u'g'] ,
u'欏' : [u'l'] ,
u'禘' : [u'd'] ,
u'㪚' : [u's'] ,
u'錟' : [u'y', u'x', u't'] ,
u'䰡' : [u'c'] ,
u'憨' : [u'h'] ,
u'琱' : [u'd'] ,
u'覸' : [u'x', u'j'] ,
u'䪺' : [u'g'] ,
u'鱁' : [u'z'] ,
u'奃' : [u'd'] ,
u'狊' : [u'j'] ,
u'葑' : [u'f'] ,
u'䅓' : [u'q', u'j'] ,
u'髚' : [u'q'] ,
u'埜' : [u'y'] ,
u'楣' : [u'm'] ,
u'苪' : [u'b'] ,
u'翬' : [u'h'] ,
u'酳' : [u'y'] ,
u'創' : [u'c'] ,
u'柼' : [u'y'] ,
u'㪃' : [u'k'] ,
u'圆' : [u'y'] ,
u'玉' : [u'y'] ,
u'谌' : [u'c'] ,
u'编' : [u'b'] ,
u'鮙' : [u't'] ,
u'朦' : [u'm'] ,
u'莩' : [u'p', u'f'] ,
u'㶭' : [u'y', u'j'] ,
u'嘰' : [u'j'] ,
u'犳' : [u'c'] ,
u'輶' : [u'y'] ,
u'繀' : [u's'] ,
u'髃' : [u'y'] ,
u'晐' : [u'g'] ,
u'苓' : [u'l'] ,
u'㳗' : [u'c'] ,
u'奚' : [u'x'] ,
u'痝' : [u'm'] ,
u'蹠' : [u'z'] ,
u'䅪' : [u't'] ,
u'鷭' : [u'f'] ,
u'䳷' : [u'c'] ,
u'楺' : [u'r'] ,
u'藽' : [u'c'] ,
u'㤅' : [u'a'] ,
u'墄' : [u'q'] ,
u'醊' : [u'c', u'z'] ,
u'䂔' : [u'x'] ,
u'阛' : [u'h'] ,
u'䤥' : [u'g'] ,
u'梤' : [u'f'] ,
u'㠯' : [u'y'] ,
u'宮' : [u'g'] ,
u'焵' : [u'g'] ,
u'邴' : [u'b'] ,
u'䎾' : [u'g'] ,
u'饅' : [u'm'] ,
u'䡏' : [u'h'] ,
u'毎' : [u'm'] ,
u'腕' : [u'w'] ,
u'㭙' : [u's', u'z'] ,
u'嫘' : [u'l'] ,
u'灟' : [u'z'] ,
u'鏞' : [u'y'] ,
u'䋨' : [u'f'] ,
u'顯' : [u'x'] ,
u'㗲' : [u'h', u'x'] ,
u'䭹' : [u'a'] ,
u'櫸' : [u'j'] ,
u'聿' : [u'y'] ,
u'錈' : [u'j'] ,
u'倊' : [u'z'] ,
u'沍' : [u'h'] ,
u'砚' : [u'y'] ,
u'钝' : [u'd'] ,
u'㔜' : [u'b'] ,
u'冟' : [u's'] ,
u'怪' : [u'g'] ,
u'崬' : [u'd'] ,
u'禯' : [u'n'] ,
u'蠺' : [u'c'] ,
u'䔼' : [u's'] ,
u'憿' : [u'j'] ,
u'廁' : [u'c'] ,
u'觏' : [u'g'] ,
u'䛑' : [u'm'] ,
u'镜' : [u'j'] ,
u'剞' : [u'j'] ,
u'满' : [u'm'] ,
u'穮' : [u'b'] ,
u'雱' : [u'p'] ,
u'㝰' : [u'm'] ,
u'右' : [u'y'] ,
u'找' : [u'z'] ,
u'往' : [u'w'] ,
u'甇' : [u'y'] ,
u'誎' : [u's'] ,
u'䞐' : [u'c', u's'] ,
u'鴗' : [u'l'] ,
u'娙' : [u'x'] ,
u'澠' : [u's', u'm'] ,
u'蔧' : [u'h', u's'] ,
u'㼫' : [u'h'] ,
u'咲' : [u'x'] ,
u'樹' : [u's'] ,
u'糂' : [u's'] ,
u'㧄' : [u'q'] ,
u'鉉' : [u'x'] ,
u'佋' : [u's'] ,
u'擒' : [u'q'] ,
u'睛' : [u'j'] ,
u'㑝' : [u'l'] ,
u'賢' : [u'x'] ,
u'䧤' : [u'p'] ,
u'齫' : [u'k'] ,
u'屭' : [u'x'] ,
u'燴' : [u'h'] ,
u'蝻' : [u'a', u'n'] ,
u'䑽' : [u'd', u't'] ,
u'誇' : [u'k'] ,
u'甎' : [u'z'] ,
u'妑' : [u'p'] ,
u'梛' : [u'n'] ,
u'䴞' : [u'd'] ,
u'㞥' : [u'c'] ,
u'阤' : [u'y', u'z', u't'] ,
u'䂫' : [u'h'] ,
u'覱' : [u'z'] ,
u'琸' : [u'z'] ,
u'墻' : [u'q'] ,
u'柅' : [u'n'] ,
u'䱈' : [u'q', u'y'] ,
u'㛏' : [u'q', u'o'] ,
u'镎' : [u'n'] ,
u'翕' : [u'x'] ,
u'裛' : [u'y'] ,
u'獢' : [u'x'] ,
u'埥' : [u'c'] ,
u'曯' : [u'c'] ,
u'䭲' : [u'y', u'n'] ,
u'鑸' : [u'l'] ,
u'绿' : [u'l'] ,
u'䐃' : [u'j'] ,
u'贉' : [u't', u'd'] ,
u'犌' : [u'j'] ,
u'尓' : [u'e'] ,
u'欝' : [u'y'] ,
u'䪜' : [u'c'] ,
u'鎢' : [u'w'] ,
u'㨧' : [u'b'] ,
u'谳' : [u'y'] ,
u'熶' : [u'c'] ,
u'嬽' : [u'y'] ,
u'橇' : [u'q', u'c'] ,
u'䧆' : [u'q', u'h', u'k'] ,
u'鋌' : [u't', u'd'] ,
u'譝' : [u's'] ,
u'烠' : [u'h'] ,
u'婧' : [u'j'] ,
u'㿪' : [u'h'] ,
u'楱' : [u'c', u'z'] ,
u'䣰' : [u'y', u'j'] ,
u'釶' : [u's'] ,
u'㡻' : [u'l'] ,
u'薃' : [u'h'] ,
u'戂' : [u'm'] ,
u'䚅' : [u'l'] ,
u'鶓' : [u'm'] ,
u'稒' : [u'g'] ,
u'底' : [u'd'] ,
u'掗' : [u'y'] ,
u'鄠' : [u'h'] ,
u'刢' : [u'l'] ,
u'箧' : [u'q'] ,
u'漴' : [u'c'] ,
u'厷' : [u'g'] ,
u'艂' : [u'f'] ,
u'泉' : [u'q'] ,
u'驒' : [u't'] ,
u'彔' : [u'l'] ,
u'菗' : [u'c'] ,
u'鯧' : [u'c'] ,
u'硦' : [u'l'] ,
u'峩' : [u'e'] ,
u'㵨' : [u'p', u'b'] ,
u'懫' : [u'z'] ,
u'轴' : [u'z'] ,
u'偶' : [u'o'] ,
u'移' : [u'y', u'c'] ,
u'㫽' : [u'l'] ,
u'消' : [u'x'] ,
u'鈍' : [u'd'] ,
u'圏' : [u'q'] ,
u'肖' : [u'x'] ,
u'䖘' : [u't'] ,
u'瀡' : [u's'] ,
u'㔣' : [u'l'] ,
u'颦' : [u'p'] ,
u'嶨' : [u'x'] ,
u'蜯' : [u'b'] ,
u'䠱' : [u's', u'z'] ,
u'皺' : [u'z'] ,
u'鼿' : [u'w'] ,
u'敃' : [u'm'] ,
u'跈' : [u'j', u'n'] ,
u'今' : [u'j'] ,
u'絓' : [u'g'] ,
u'遡' : [u's'] ,
u'啣' : [u'x'] ,
u'䏬' : [u'm'] ,
u'湵' : [u'y'] ,
u'雺' : [u'm'] ,
u'导' : [u'd'] ,
u'欆' : [u's'] ,
u'侉' : [u'k'] ,
u'钏' : [u'c'] ,
u'㨐' : [u'p', u'b'] ,
u'䌖' : [u'j'] ,
u'蠜' : [u'f'] ,
u'皣' : [u'y'] ,
u'嬦' : [u'c'] ,
u'亳' : [u'b'] ,
u'鞹' : [u'k'] ,
u'㔺' : [u's'] ,
u'䉀' : [u's'] ,
u'譆' : [u'x'] ,
u'燍' : [u's'] ,
u'婐' : [u'e', u'w'] ,
u'敚' : [u'd'] ,
u'䧝' : [u'y', u'z', u'd'] ,
u'難' : [u'n'] ,
u'㑤' : [u'm'] ,
u'絪' : [u'y'] ,
u'詰' : [u'j'] ,
u'烷' : [u'w'] ,
u'啺' : [u't'] ,
u'茁' : [u'z'] ,
u'撄' : [u'y'] ,
u'刋' : [u'q', u'k'] ,
u'㞎' : [u'p', u'b', u'f'] ,
u'鬑' : [u'l'] ,
u'粔' : [u'j'] ,
u'薚' : [u't'] ,
u'甥' : [u's'] ,
u'咤' : [u'c', u'z'] ,
u'舫' : [u'f'] ,
u'鶪' : [u'j'] ,
u'枮' : [u'x'] ,
u'䴵' : [u'b', u'z'] ,
u'騻' : [u's'] ,
u'羾' : [u'g'] ,
u'蓄' : [u'x'] ,
u'瑏' : [u'c'] ,
u'城' : [u'c'] ,
u'鳔' : [u'b'] ,
u'曘' : [u'r'] ,
u'䱟' : [u'j'] ,
u'镥' : [u'l'] ,
u'绨' : [u't'] ,
u'蟮' : [u's'] ,
u'睹' : [u'd'] ,
u'囸' : [u'r'] ,
u'鎋' : [u'x'] ,
u'氊' : [u'z'] ,
u'傍' : [u'p', u'b'] ,
u'蜘' : [u'z'] ,
u'䐚' : [u'j'] ,
u'涟' : [u'l'] ,
u'鼨' : [u'z', u't'] ,
u'尪' : [u'w'] ,
u'肭' : [u'n'] ,
u'愬' : [u's'] ,
u'䖯' : [u'k', u'g'] ,
u'颽' : [u'k'] ,
u'礼' : [u'l'] ,
u'㸾' : [u'r'] ,
u'拁' : [u'j'] ,
u'豊' : [u'l', u'f'] ,
u'兌' : [u'd'] ,
u'竑' : [u'h'] ,
u'㿓' : [u'j'] ,
u'跟' : [u'g'] ,
u'湞' : [u'c', u'z'] ,
u'勡' : [u'p'] ,
u'腬' : [u'r'] ,
u'䙮' : [u'g'] ,
u'濳' : [u'q'] ,
u'饼' : [u'b'] ,
u'幾' : [u'j'] ,
u'掀' : [u'x'] ,
u'萅' : [u'c'] ,
u'箐' : [u'q', u'j'] ,
u'㢒' : [u'c'] ,
u'鰕' : [u'x'] ,
u'昙' : [u't'] ,
u'躞' : [u'x'] ,
u'厠' : [u'c'] ,
u'縩' : [u'c'] ,
u'梲' : [u'z'] ,
u'鄷' : [u'f'] ,
u'嘹' : [u'l'] ,
u'菀' : [u'y', u'w'] ,
u'䃂' : [u'g'] ,
u'獋' : [u'h'] ,
u'壒' : [u'a'] ,
u'虙' : [u'f'] ,
u'䭛' : [u'd'] ,
u'痤' : [u'c'] ,
u'鹩' : [u'l'] ,
u'恭' : [u'g'] ,
u'裲' : [u'l'] ,
u'硽' : [u'y'] ,
u'㵿' : [u'x'] ,
u'䖁' : [u'y'] ,
u'躇' : [u'c'] ,
u'焎' : [u'x'] ,
u'嶑' : [u'x'] ,
u'沛' : [u'p'] ,
u'䤞' : [u'y'] ,
u'鈤' : [u'r'] ,
u'䒫' : [u'd'] ,
u'趱' : [u'z'] ,
u'瀸' : [u'j'] ,
u'岻' : [u'c'] ,
u'揅' : [u'y'] ,
u'䡈' : [u'j'] ,
u'酎' : [u'z'] ,
u'篕' : [u'h'] ,
u'賛' : [u'z'] ,
u'睢' : [u's'] ,
u'句' : [u'j', u'g'] ,
u'拯' : [u'z'] ,
u'佲' : [u'm'] ,
u'選' : [u'x', u's'] ,
u'竿' : [u'g'] ,
u'䀃' : [u't'] ,
u'褉' : [u'x'] ,
u'皌' : [u'm'] ,
u'堓' : [u'a'] ,
u'漝' : [u'x'] ,
u'亜' : [u'y'] ,
u'鞢' : [u'x'] ,
u'㸧' : [u'y', u'k'] ,
u'䜭' : [u'j'] ,
u'蠳' : [u'y'] ,
u'疶' : [u'x'] ,
u'弽' : [u's'] ,
u'湇' : [u'q'] ,
u'雌' : [u'c'] ,
u'㵑' : [u'h'] ,
u'䙗' : [u't'] ,
u'轝' : [u'y'] ,
u'瓠' : [u'h', u'g'] ,
u'幧' : [u'q'] ,
u'㯪' : [u'l'] ,
u'浱' : [u'c'] ,
u'䳰' : [u'b'] ,
u'闶' : [u'k'] ,
u'㱻' : [u'l'] ,
u'膃' : [u'w'] ,
u'昂' : [u'a'] ,
u'䊅' : [u'm'] ,
u'馓' : [u's'] ,
u'縒' : [u'c'] ,
u'媕' : [u'a'] ,
u'㬔' : [u'h'] ,
u'林' : [u'l'] ,
u'锠' : [u'c'] ,
u'嘢' : [u'y'] ,
u'羧' : [u's', u'z'] ,
u'㢩' : [u'd'] ,
u'誵' : [u'x'] ,
u'欴' : [u'l'] ,
u'垷' : [u'x'] ,
u'虂' : [u'l'] ,
u'䍄' : [u'd'] ,
u'棉' : [u'm'] ,
u'㗋' : [u'h'] ,
u'鹒' : [u'g'] ,
u'孔' : [u'k'] ,
u'蟗' : [u'q'] ,
u'摖' : [u'q'] ,
u'䃙' : [u'l'] ,
u'籦' : [u'z'] ,
u'壩' : [u'b'] ,
u'㥨' : [u'c', u's', u'w'] ,
u'旫' : [u't'] ,
u'譴' : [u'q'] ,
u'呶' : [u'n'] ,
u'緻' : [u'z'] ,
u'㻽' : [u's'] ,
u'榈' : [u'l'] ,
u'阍' : [u'h'] ,
u'匏' : [u'p'] ,
u'蒖' : | |
= nnn / nu_c
# define a mask for integration
m = (log(x) > self.__start) & (log(x) < self.__end)
# and for the maximum extension
m &= rrr <= self._r0
result = np.full(x.shape, 1e-80)
# synchrotron function
result[m] = exp(self.log_xF(log(x[m])))
# electron spectrum
# dN / dV d gamma
result[m] *= self._n_el(exp(ggg[m]), rrr[m], **self._parameters)
# integrate over gamma
if integration_mode == 'romb':
result = romb(result * exp(ggg), dx=np.diff(log_g)[0], axis=g_axis)
else:
result = simps(result * exp(ggg), ggg, axis=g_axis)
# pre factors: sqrt(3) * e^3 / mc^2 with B in G, see e.g. B&G 4.44
# this has then units Fr^3 s^2 B g-1 cm-2
# When you use Fr G s^2 / (cm g) = 1 you get
# units Fr^2 / cm and with Fr = cm^3/2 g^1/2 s^-1
# this becomes g cm^2 s^2 = erg = erg / Hz / s.
# The pre factor is then consistent with 18.36 in Longair Vol.2
# since he calculates in W and for B in Tesla.
result *= ((c.e.esu**3.) / (c.m_e.cgs * c.c.cgs**2.) * sqrt(3.)).value
# this is equal to 2.344355730864404e-22
# average over all pitch angles gives 2/3
result *= self._B(rr, **self._parameters) * sqrt(2.0/3.0)
# Together with electron spectrum, this has now units
# erg / Hz / s / cm^3, i.e. is the Volume emissivity
# Since emission is assumed to be isotropic, divide by 4 pi
# to get volume emissivity per solid angle
result /= 4. * pi
# returns value in unites erg/s/Hz/cm^3/sr
return result
def interp_sync_init(self, r_min, r_max,
gmin=None,
gmax=None,
nu_steps=100,
g_steps=129,
r_steps=80,
integration_mode='simps'):
"""
Initialize 2D interpolation of synchrotron emissivity j_nu in erg/s/Hz/cm^3/sr
over frequency nu and radius for given electron spectrum in log - log space.
Sets self.FSyncInterp function pointer.
Parameters
----------
r_min: float
minimum radius for interpolation
r_max: float
maximum radius for interpolation
gmin: float or None
minimum lorentz factor
gmax: float or None
maximum lorentz factor
r_steps: int,
number of steps in radius
g_steps: int,
number of integration steps for gamma
interpolation_grid_r: str
either 'log' or 'lin' for interpolation over logarithmic
or linear grid points
"""
log_nu_intp, log_nu_intp_steps = np.linspace(np.log(self._nu_sync_min),
np.log(self._nu_sync_max),
nu_steps, retstep=True)
r_intp, r_intp_steps = np.linspace(r_min, r_max, r_steps, retstep=True)
log_nn, rr = np.meshgrid(log_nu_intp, r_intp, indexing='ij')
j_sync = self.j_sync(np.exp(log_nn), rr,
gmin=gmin,
gmax=gmax,
g_steps=g_steps,
integration_mode=integration_mode)
if self._use_fast_interp:
self._j_sync_interp_object = fast_interp2d([log_nu_intp[0], r_intp[0]],
[log_nu_intp[-1], r_intp[-1]],
[log_nu_intp_steps, r_intp_steps],
log(j_sync),
k=1,
p=[False, False],
c=[True, True],
e=[0, 0]
)
else:
self._j_sync_interp_object = RectBivariateSpline(log_nu_intp, r_intp, log(j_sync), kx=1, ky=1, s=0)
if self._use_fast_interp:
self._j_sync_interp = lambda log_nu, r: self._j_sync_interp_object(log_nu, r)
else:
self._j_sync_interp = lambda log_nu, r: self._j_sync_interp_object(log_nu, r, grid=False)
def j_dust_nebula(self, nu, r):
"""
Return volume emissivity of grey body j_nu erg/s/cm^3/Hz/sr,
Parameters
----------
nu: array like
array with frequencies in Hz
r: array-like
distance from the nebula center in cm
Returns
-------
array with grey body flux in erg/s/cm^3/Hz/sr
"""
if len(nu.shape) == len(r.shape) == 1:
nn, rr = np.meshgrid(nu, r, indexing='ij')
elif len(nu.shape) > 1 and np.all(np.equal(nu.shape, r.shape)):
nn = nu
rr = r
else:
raise ValueError("nu and theta have inconsistent shapes")
# Get the emissivity
t0 = time.time()
result = self._j_dust(rr, nu, **self._parameters)
t1 = time.time()
self._logger.debug(f"Dust calculation in j_dust_nebula function took {t1 - t0:.3f}s")
# results in emissivity erg / s / Hz / cm^3 / sr
return result
def j_grey_body(self, nu, r):
"""
Return volume emissivity of grey body j_nu erg/s/cm^3/Hz/sr,
assumes dust component to scale as radial gaussian from nebula center
Deprecated, only kept for checks!
Parameters
----------
nu: array like
array with frequencies in Hz
r: array-like
distance from the nebula center in cm
Returns
-------
array with grey body flux in erg/s/cm^2/Hz/sr
"""
# TODO: remove
if len(nu.shape) == len(r.shape) == 1:
nn, rr = np.meshgrid(nu, r, indexing='ij')
elif len(nu.shape) > 1 and np.all(np.equal(nu.shape, r.shape)):
nn = nu
rr = r
else:
raise ValueError("nu and theta have inconsistent shapes")
# photons dens of black body in photons/eV/cm^3
t0 = time.time()
if self._use_fast_interp:
result = np.zeros(nn.flatten().size)
black_body_nb(result, nn.flatten() / eV2Hz, self._parameters['dust_T'])
result = result.reshape(nn.shape)
else:
result = black_body(nn / eV2Hz, self._parameters['dust_T'])
t1 = time.time()
self._logger.debug(f"Black body calculation in grey body function took {t1 - t0:.3f}s")
# change to dens in photon / Hz / cm^3, dn / d nu = dn / de * de / d nu = dn / de * h
result *= c.h.to('eV s').value
# multiply with energy to get energy density per Hz
result *= nn * c.h.to('erg s').value
# multiply with c / 4 pi to get energy flux in erg / s / cm^2 / Hz
result *= c.c.cgs.value / 4. / pi
# convert to dust luminosity in erg / s / Hz (4 pi cancels out when assuming isotropic emission)
result *= self._d * self._d * 4. * np.pi
# multiply with spatial dependence, norm is in units of 1/cm^3
result *= self._parameters['dust_norm']
# multiply with gaussian extension
sigma = tan(self._parameters['dust_extension'] * arcmin2rad) * self._d
t2 = time.time()
if self._dust_radial_dependence == 'gauss':
result *= np.exp(-rr ** 2. / 2. / sigma ** 2.)
elif self._dust_radial_dependence == 'shell':
rmin = tan(self._parameters['min_dust_extension'] * arcmin2rad) * self._d
mask = (rr <= sigma) & (rr >= rmin)
# divide by dust volume, i.e., normalization parameter dust_norm is unit less in this case
volume = 4. / 3. * np.pi * (sigma - rmin) ** 3.
result /= volume
result[~mask] = 0.
t3 = time.time()
self._logger.debug(f"extension calculation in grey body function took {t3 - t2:.3f}s")
# assume isotropic emission
result /= 4. * np.pi
# results in emissivity erg / s / Hz / cm^3 / sr
return result
def phot_dens(self, eps, r, r1_steps=33):
"""
Calculate photon number density of Crab nebula according to Hillas et al. (1998)
for the synchrotron and / or dust compoment
Parameters
----------
eps: array-like
n-dim array with energy of photons, in eV
r: array-like
angular offset from nebula center in deg
r1_steps: int
number of steps in radius for integration
Returns
-------
m x n-dim array with photon densities in photons / eV / cm^3
Notes
-----
See https://arxiv.org/pdf/1008.4524.pdf Eq. (A3)
"""
t0 = time.time()
r_max = np.max([r.max(), 1. * self._r0])
r_min = np.min([r.min(), 1e-5])
ee, xx, y, yy = self._get_integration_arrays(eps, r, r1_steps, r_max, r_min)
# photon emissivity
#j_nu = np.full(ee.shape, 1e-10, dtype=np.float32)
j_nu = np.full(ee.shape, 1e-80, dtype=np.float64)
t1 = time.time()
if self._ic_sync:
# for KC model: emissitivity is 0 for r < r0,
# change r1 integration array to accomodate for this
if 'r_shock' in self._parameters:
_, xx, y, yy = self._get_integration_arrays(eps, r, r1_steps, r_max, self._parameters['r_shock'])
# initialize synchrotron interpolation
if self._j_sync_interp is None:
self.interp_sync_init(r_min=r_min, r_max=r_max, r_steps=r1_steps)
# mask for frequencies
m = (log(ee * eV2Hz) > log(self._nu_sync_min)) & \
(log(ee * eV2Hz) < log(self._nu_sync_max))
# get synchrotron volume emissivity in units of erg/s/cm^3/Hz/sr
# from log-log interpolation
if self._use_fast_interp:
j_nu[m] = np.exp(self._j_sync_interp(
log(ee[m] * eV2Hz).flatten(),
(r_max * yy[m]).flatten()
#log(r_max * yy[m]).flatten()
)).reshape(ee[m].shape)
else:
j_nu[m] = np.exp(self._j_sync_interp(log(ee[m] * eV2Hz),
#log(r_max * yy[m]),
r_max * yy[m],
))
t2 = time.time()
# conversion to photon emissivity in photons/s/cm^3/eV/sr
# Now in units of photons/s/Hz/cm^3/sr
j_nu[m] /= ee[m] * eV2erg
# convert in units of photons/eV/cm^3/s/sr
j_nu[m] *= eV2Hz
# assume isotropic emissivity
# and convert to photons/eV/cm^3/s
j_nu[m] *= 4. * np.pi
# seed photon density at distance r now calculated
# through integration over r1, see Eq. 15
# in Atoyan & Aharonian
kernel = kernel_r(yy, xx)
kernel *= j_nu
# seed photon density in photons/eV/cm^3
self._logger.debug(kernel.shape)
self._logger.debug(f"Integrating using {self._integration_mode}")
if self._integration_mode == 'romb' or not len(yy.shape) == 5:
phot_dens = romb(kernel * yy, dx=np.diff(log(y))[0], axis=-1)
else:
phot_dens = self._integrate_5d(kernel * yy, log(yy))
phot_dens *= 0.5 * r_max / c.c.cgs.value
t3 = time.time()
self._logger.debug("phot_dens: time for interpolation of Sync: {0:.3f}s,"
" time for integration of SSC component {1:.3f}s, "
" time for filling arrays {2:.3f}s ".format(t2-t1, t3 - t2, t1 - t0))
| |
<reponame>rnk/llvm-premerge-checks<filename>scripts/patch_diff.py<gh_stars>0
#!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import logging
import os
import re
import subprocess
import sys
from typing import List, Optional, Tuple, Dict
import backoff
from buildkite_utils import annotate, feedback_url, upload_file
import git
from phabricator import Phabricator
"""URL of upstream LLVM repository."""
LLVM_GITHUB_URL = 'ssh://git@github.com/llvm/llvm-project'
FORK_REMOTE_URL = 'ssh://git@github.com/llvm-premerge-tests/llvm-project'
"""How far back the script searches in the git history to find Revisions that
have already landed. """
APPLIED_SCAN_LIMIT = datetime.timedelta(days=90)
class ApplyPatch:
"""Apply a diff from Phabricator on local working copy.
This script is a rewrite of `arc patch` to accommodate for dependencies
that have already landed, but could not be identified by `arc patch`.
For a given diff_id, this class will get the dependencies listed on Phabricator.
For each dependency D it will check the diff history:
- if D has already landed, skip it.
- If D has not landed, it will download the patch for D and try to apply it locally.
Once this class has applied all dependencies, it will apply the original diff.
This script must be called from the root folder of a local checkout of
https://github.com/llvm/llvm-project or given a path to clone into.
"""
def __init__(self, path: str, diff_id: int, token: str, url: str, git_hash: str,
phid: str, push_branch: bool = False):
self.push_branch = push_branch # type: bool
self.conduit_token = token # type: Optional[str]
self.host = url # type: Optional[str]
self.diff_id = diff_id # type: int
self.phid = phid # type: str
if not self.host.endswith('/api/'):
self.host += '/api/'
self.phab = self.create_phab()
self.base_revision = git_hash # type: str
self.branch_base_hexsha = ''
self.apply_diff_counter = 0
self.build_dir = os.getcwd()
self.revision_id = ''
if not os.path.isdir(path):
logging.info(f'{path} does not exist, cloning repository...')
self.repo = git.Repo.clone_from(FORK_REMOTE_URL, path)
else:
logging.info('repository exist, will reuse')
self.repo = git.Repo(path) # type: git.Repo
self.repo.remote('origin').set_url(FORK_REMOTE_URL)
os.chdir(path)
logging.info(f'working dir {os.getcwd()}')
@property
def branch_name(self):
"""Name used for the git branch."""
return f'phab-diff-{self.diff_id}'
def run(self):
"""try to apply the patch from phabricator
"""
try:
diff = self.get_diff(self.diff_id)
revision = self.get_revision(diff.revisionID)
url = f"https://reviews.llvm.org/D{revision['id']}?id={diff['id']}"
annotate(f"Patching changes [{url}]({url})", style='info', context='patch_diff')
self.reset_repository()
self.revision_id = revision['id']
dependencies = self.get_dependencies(revision)
dependencies.reverse() # Now revisions will be from oldest to newest.
missing, landed = self.classify_revisions(dependencies)
if len(dependencies) > 0:
logging.info('This diff depends on: {}'.format(revision_list_to_str(dependencies)))
logging.info(' Already landed: {}'.format(revision_list_to_str(landed)))
logging.info(' Will be applied: {}'.format(revision_list_to_str(missing)))
plan = []
for r in missing:
d = self.get_diff(r['diffs'][0])
plan.append((r, d))
plan.append((revision, diff))
logging.info('Planning to apply in order:')
for (r, d) in plan:
logging.info(f"https://reviews.llvm.org/D{r['id']}?id={d['id']}")
# Pick the newest known commit as a base for patches.
base_commit = None
for (r, d) in plan:
c = self.find_commit(d['sourceControlBaseRevision'])
if c is None:
logging.warning(f"D{r['id']}#{d['id']} commit {d['sourceControlBaseRevision']} does not exist")
continue
if base_commit is None:
logging.info(f"D{r['id']}#{d['id']} commit {c.hexsha} exists")
base_commit = c
elif c.committed_datetime > base_commit.committed_datetime:
logging.info(f"D{r['id']}#{d['id']} commit {c.hexsha} has a later commit date then"
f"{base_commit.hexsha}")
base_commit = c
if self.base_revision != 'auto':
logging.info(f'Base revision "{self.base_revision}" is set by command argument. Will use '
f'instead of resolved "{base_commit}"')
base_commit = self.find_commit(self.base_revision)
if base_commit is None:
base_commit = self.repo.heads['main'].commit
annotate(f"Cannot find a base git revision. Will use current HEAD.",
style='warning', context='patch_diff')
self.create_branch(base_commit)
for (r, d) in plan:
if not self.apply_diff(d, r):
return 1
if self.push_branch:
self.repo.git.push('--force', 'origin', self.branch_name)
annotate(f"Created branch [{self.branch_name}]"
f"(https://github.com/llvm-premerge-tests/llvm-project/tree/{self.branch_name}).\n\n"
f"To checkout locally, run in your copy of llvm-project directory:\n\n"
"```shell\n"
"git remote add premerge <EMAIL>:llvm-premerge-tests/llvm-project.git #first time\n"
f"git fetch premerge {self.branch_name}\n"
f"git checkout -b {self.branch_name} --track premerge/{self.branch_name}\n"
"```",
style='success',
context='patch_diff')
logging.info('Branch {} has been pushed'.format(self.branch_name))
return 0
except Exception as e:
annotate(f":bk-status-failed: Unexpected error. Consider [creating a bug]({feedback_url()}).",
style='error', context='patch_diff')
logging.error(f'exception: {e}')
return 1
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def reset_repository(self):
"""Update local git repo and origin.
As origin is disjoint from upstream, it needs to be updated by this script.
"""
logging.info('Syncing local, origin and upstream...')
if 'upstream' not in self.repo.remotes:
self.repo.create_remote('upstream', url=LLVM_GITHUB_URL)
self.repo.remotes.upstream.fetch()
self.repo.git.clean('-ffxdq')
self.repo.git.reset('--hard')
self.repo.git.fetch('--all')
if self.find_commit('main') is None:
origin = self.repo.remotes.origin
self.repo.create_head('main', origin.refs.main)
self.repo.heads.main.set_tracking_branch(origin.refs.main)
self.repo.heads.main.checkout()
self.repo.git.pull('origin', 'main')
self.repo.git.pull('upstream', 'main')
if self.push_branch:
self.repo.git.push('origin', 'main')
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def find_commit(self, rev):
try:
return self.repo.commit(rev)
except:
return None
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def create_branch(self, base_commit: git.Commit):
if self.branch_name in self.repo.heads:
self.repo.delete_head('--force', self.branch_name)
logging.info(f'creating branch {self.branch_name} at {base_commit.hexsha}')
new_branch = self.repo.create_head(self.branch_name, base_commit.hexsha)
self.repo.head.reference = new_branch
self.repo.head.reset(index=True, working_tree=True)
self.branch_base_hexsha = self.repo.head.commit.hexsha
logging.info('Base branch revision is {}'.format(self.repo.head.commit.hexsha))
annotate(f"Branch {self.branch_name} base revision is `{self.branch_base_hexsha}`.",
style='info', context='patch_diff')
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def commit(self, revision: Dict, diff: Dict):
"""Commit the current state and annotates with the revision info."""
self.repo.git.add('-A')
diff.setdefault('authorName', 'unknown')
diff.setdefault('authorEmail', 'unknown')
author = git.Actor(name=diff['authorName'], email=diff['authorEmail'])
message = (f"{revision['title']}\n\n"
f"Automated commit created by applying diff {self.diff_id}\n"
f"\n"
f"Phabricator-ID: {self.phid}\n"
f"Review-ID: {diff_to_str(revision['id'])}\n")
self.repo.index.commit(message=message, author=author)
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def create_phab(self):
phab = Phabricator(token=self.conduit_token, host=self.host)
phab.update_interfaces()
return phab
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def get_diff(self, diff_id: int):
"""Get a diff from Phabricator based on it's diff id."""
return self.phab.differential.getdiff(diff_id=diff_id)
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def get_revision(self, revision_id: int):
"""Get a revision from Phabricator based on its revision id."""
return self.phab.differential.query(ids=[revision_id])[0]
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def get_revisions(self, *, phids: List[str] = None):
"""Get a list of revisions from Phabricator based on their PH-IDs."""
if phids is None:
raise Exception('_get_revisions phids is None')
if not phids:
# Handle an empty query locally. Otherwise the connection
# will time out.
return []
return self.phab.differential.query(phids=phids)
def get_dependencies(self, revision: Dict) -> List[Dict]:
"""Recursively resolves dependencies of the given revision.
They are listed in reverse chronological order - from most recent to least recent."""
dependency_ids = revision['auxiliary']['phabricator:depends-on']
revisions = self.get_revisions(phids=dependency_ids)
result = []
for r in revisions:
result.append(r)
sub = self.get_dependencies(r)
result.extend(sub)
return result
def apply_diff(self, diff: Dict, revision: Dict) -> bool:
"""Download and apply a diff to the local working copy."""
logging.info(f"Applying {diff['id']} for revision {revision['id']}...")
patch = self.get_raw_diff(str(diff['id']))
self.apply_diff_counter += 1
patch_file = f"{self.apply_diff_counter}_{diff['id']}.patch"
with open(os.path.join(self.build_dir, patch_file), 'wt') as f:
f.write(patch)
# For annotate to properly link this file it must exist before the upload.
upload_file(self.build_dir, patch_file)
logging.debug(f'raw patch:\n{patch}')
proc = subprocess.run('git apply -', input=patch, shell=True, text=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if proc.returncode != 0:
logging.info(proc.stdout)
logging.error(proc.stderr)
message = f":bk-status-failed: Failed to apply [{patch_file}](artifact://{patch_file}).\n\n"
if self.revision_id != revision['id']:
message += f"**Attention! D{revision['id']} is one of the dependencies of the target " \
f"revision D{self.revision_id}.**\n\n"
message += (f"No testing is possible because we couldn't apply the patch.\n\n"
f"---\n\n"
'### Troubleshooting\n\n'
'More information is available in the log of of *create branch* step. '
f"All patches applied are available as *Artifacts*.\n\n"
f":bulb: The patch may not apply if it includes only the most recent of "
f"multiple local commits. Try to upload a patch with\n"
f"```shell\n"
f"arc diff `git merge-base HEAD origin` --update D{revision['id']}\n"
f"```\n\n"
f"to include all local changes.\n\n"
'---\n\n'
f"If this case could have been handled better, please [create a bug]({feedback_url()}).")
annotate(message,
style='error',
context='patch_diff')
return False
self.commit(revision, diff)
return True
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger='', factor=3)
def get_raw_diff(self, diff_id: str) -> str:
return self.phab.differential.getrawdiff(diffID=diff_id).response
def get_landed_revisions(self):
"""Get list of landed revisions from current git branch."""
diff_regex = re.compile(r'^Differential Revision: https://reviews\.llvm\.org/(.*)$', re.MULTILINE)
earliest_commit = None
rev = self.base_revision
age_limit = datetime.datetime.now() - APPLIED_SCAN_LIMIT
if rev == 'auto': # FIXME: use revison that created the branch
rev = 'main'
for commit in self.repo.iter_commits(rev):
if datetime.datetime.fromtimestamp(commit.committed_date) < age_limit:
break
earliest_commit = commit
result = diff_regex.search(commit.message)
if result is not None:
yield result.group(1)
if earliest_commit is not None:
logging.info(f'Earliest analyzed commit in history {earliest_commit.hexsha}, '
f'{earliest_commit.committed_datetime}')
return
def classify_revisions(self, revisions: List[Dict]) -> Tuple[List[Dict], List[Dict]]:
"""Check which of the dependencies have already landed on the current branch."""
landed_deps = []
missing_deps = []
for d in revisions:
| |
= self.context.ldap_session.search
res = search(baseDN=dn)[0]
return ensure_text(res[1][self._key_attr][0])
except ldap.NO_SUCH_OBJECT:
raise KeyError(dn)
@override
@property
def ids(self):
return list(self.__iter__())
@default
@locktree
def __delitem__(self, key):
principal = self[key]
context = principal.context
del context.parent[context.name]
del self.storage[key]
@default
@locktree
def __getitem__(self, key):
key = ensure_text(key)
try:
return self.storage[key]
except KeyError:
criteria = {self._key_attr: key}
attrlist = ['rdn', self._key_attr]
res = self.context.search(criteria=criteria, attrlist=attrlist)
if not res:
raise KeyError(key)
if len(res) > 1: # pragma: no cover
msg = u'More than one principal with id "{0}" found.'
logger.warning(msg.format(key))
prdn = res[0][1]['rdn']
if prdn in self.context._deleted_children:
raise KeyError(key)
dn = res[0][0]
path = explode_dn(dn)[:len(self.context.DN.split(',')) * -1]
context = self.context
for rdn in reversed(path):
context = context[rdn]
principal = self.principal_factory(
context,
attraliaser=self.principal_attraliaser
)
principal.__name__ = key
principal.__parent__ = self
self.storage[key] = principal
return principal
@default
@locktree
def __iter__(self):
attrlist = ['rdn', self._key_attr]
for principal in self.context.batched_search(attrlist=attrlist):
prdn = principal[1]['rdn']
if prdn in self.context._deleted_children:
continue
yield ensure_text(principal[1][self._key_attr][0])
for principal in self.context._added_children:
yield self.context[principal].attrs[self._key_attr]
@default
@locktree
def __setitem__(self, name, value):
if not isinstance(value, self.principal_factory):
raise ValueError(u"Given value not instance of '{0}'".format(
self.principal_factory.__name__
))
# XXX: check if there is valid user context
exists = False
try:
self[name]
exists = True
except KeyError:
pass
if exists:
raise KeyError(
u"Principal with id '{0}' already exists.".format(name)
)
value.__name__ = name
value.__parent__ = self
self.storage[name] = value
@default
@property
def changed(self):
return self.context.changed
@default
@locktree
def invalidate(self, key=None):
"""Invalidate LDAPPrincipals.
"""
if key is None:
self.context.invalidate()
self.storage.clear()
return
try:
principal = self.storage[key]
principal.context.parent.invalidate(principal.context.name)
del self.storage[key]
except KeyError:
pass
@default
@locktree
def __call__(self):
self.context()
@default
def _alias_dict(self, dct):
ret = dict()
for key, val in six.iteritems(self.principal_attraliaser):
for k, v in six.iteritems(dct):
if val == k:
ret[key] = v
return ret
@default
def _unalias_list(self, lst):
unalias = self.principal_attraliaser.unalias
return [unalias(x) for x in lst]
@default
def _unalias_dict(self, dct):
if dct is None:
return None
unalias = self.principal_attraliaser.unalias
unaliased_dct = dict(
[(unalias(key), val) for key, val in six.iteritems(dct)])
return unaliased_dct
@default
def raw_search(self, criteria=None, attrlist=None,
exact_match=False, or_search=False, or_keys=None,
or_values=None, page_size=None, cookie=None):
search_attrlist = [self._key_attr]
if attrlist is not None and self._key_attr not in attrlist:
search_attrlist += attrlist
try:
results = self.context.search(
criteria=self._unalias_dict(criteria),
attrlist=self._unalias_list(search_attrlist),
exact_match=exact_match,
or_search=or_search,
or_keys=or_keys,
or_values=or_values,
page_size=page_size,
cookie=cookie
)
except ldap.NO_SUCH_OBJECT: # pragma: no cover
logger.debug("LDAPPrincipals.raw_search: ldap.NO_SUCH_OBJECT")
return []
if isinstance(results, tuple):
results, cookie = results
if attrlist is not None:
_results = list()
for _, att in results:
try:
principal_id = att[self._key_attr][0]
except (KeyError, IndexError):
continue
aliased = self._alias_dict(att)
for key in list(aliased.keys()):
if key not in attrlist:
del aliased[key]
_results.append((principal_id, aliased))
results = _results
else:
results = [att[self._key_attr][0] for _, att in results]
if cookie is not None:
return results, cookie
return results
@default
def search(self, criteria=None, attrlist=None,
exact_match=False, or_search=False):
result = []
cookie = ''
while True:
chunk, cookie = self.raw_search(
criteria=criteria,
attrlist=attrlist,
exact_match=exact_match,
or_search=or_search,
page_size=self.context.ldap_session._props.page_size,
cookie=cookie
)
result += chunk
if not cookie:
break
return result
@default
@locktree
def create(self, pid, **kw):
# XXX: mechanism for defining a target container if scope is SUBTREE
# create principal with LDAPNode as context
context = LDAPNode()
principal = self.principal_factory(
context,
attraliaser=self.principal_attraliaser
)
# ensure id on attributes
kw['id'] = pid
# avoid overwriting key attribute if given in kw
if self._key_attr in kw:
del kw[self._key_attr]
# set additional attributes on principal
for k, v in kw.items():
principal.attrs[k] = v
# set principal to self
self[pid] = principal
# if setting principal has been successful, hook up principal context
# to ldap tree
rdn = u'{0}={1}'.format(
self._rdn_attr,
principal.context.attrs[self._rdn_attr]
)
self.context[rdn] = context
# return newly created principal
return self[pid]
def calculate_expired(expiresUnit, expires):
"""Return bool whether expired.
"""
if expires and expires not in ['99999', '-1']:
# check expiration timestamp
expires = int(expires)
# XXX: maybe configurable?
# shadow account specific
# if self.expiresAttr == 'shadowExpire':
# expires += int(user.attrs.get('shadowInactive', '0'))
days = time.time()
if expiresUnit == EXPIRATION_DAYS:
# numer of days since epoch
days /= 86400
if days >= expires:
return True
return False
class LDAPUsers(LDAPPrincipals, UgmUsers):
principal_factory = default(User)
@override
@locktree
def __delitem__(self, key):
user = self[key]
try:
groups = user.groups
except AttributeError:
groups = list()
for group in groups:
del group[user.name]
parent = self.parent
if parent and parent.rcfg is not None:
for role in user.roles:
user.remove_role(role)
context = user.context
del context.parent[context.name]
del self.storage[key]
@default
def id_for_login(self, login):
criteria = {self._login_attr: login}
attrlist = [self._key_attr]
res = self.context.search(criteria=criteria, attrlist=attrlist)
if not res:
return ensure_text(login)
if len(res) > 1: # pragma: no cover
msg = u'More than one principal with login "{0}" found.'
logger.warning(msg.format(login))
return ensure_text(res[0][1][self._key_attr][0])
@default
@debug
def authenticate(self, login=None, pw=None, id=None):
if id is not None:
# bbb. deprecated usage
login = id
user_id = self.id_for_login(login)
criteria = {self._key_attr: user_id}
attrlist = ['dn']
if self.expiresAttr:
attrlist.append(self.expiresAttr)
try:
res = self.context.search(criteria=criteria, attrlist=attrlist)
except ldap.NO_SUCH_OBJECT: # pragma: no cover
return False
if not res:
return False
if len(res) > 1: # pragma: no cover
msg = u'More than one principal with login "{0}" found.'
logger.warning(msg.format(user_id))
if self.expiresAttr:
expires = res[0][1].get(self.expiresAttr)
expires = expires and expires[0] or None
try:
expired = calculate_expired(self.expiresUnit, expires)
except ValueError:
# unknown expires field data
msg = (
u"Accound expiration flag for user '{0}' "
u"contains unknown data"
)
logger.error(msg.format(id))
return False
if expired:
return ACCOUNT_EXPIRED
user_dn = res[0][1]['dn']
session = self.context.ldap_session
authenticated = session.authenticate(user_dn, pw)
return authenticated and user_id or False
@default
@debug
def passwd(self, id, oldpw, newpw):
user_id = self.id_for_login(id)
criteria = {self._key_attr: user_id}
attrlist = ['dn']
if self.expiresAttr:
attrlist.append(self.expiresAttr)
res = self.context.search(criteria=criteria, attrlist=attrlist)
if not res:
raise KeyError(id)
if len(res) > 1: # pragma: no cover
msg = u'More than one principal with login "{0}" found.'
logger.warning(msg.format(user_id))
user_dn = res[0][1]['dn']
self.context.ldap_session.passwd(user_dn, oldpw, newpw)
object_classes = self.context.child_defaults['objectClass']
user_node = self[user_id].context
user_node.attrs.load()
if 'sambaSamAccount' in object_classes:
user_node.attrs['sambaNTPassword'] = sambaNTPassword(newpw)
user_node.attrs['sambaLMPassword'] = sambaLMPassword(newpw)
user_node()
@plumbing(
LDAPUsers,
NodeChildValidate,
Nodespaces,
Adopt,
Attributes,
Nodify,
)
class Users(object):
pass
def member_format(object_classes):
for object_class in MEMBER_LOOKUP_BY_CLASS:
if object_class in object_classes:
return MEMBER_LOOKUP_BY_CLASS[object_class]['format']
raise Exception(
u"Can not lookup member format for object-classes: {0}".format(
object_classes,
)
)
def member_attribute(object_classes):
for object_class in MEMBER_LOOKUP_BY_CLASS:
if object_class in object_classes:
return MEMBER_LOOKUP_BY_CLASS[object_class]['attribute']
raise Exception(
u"Can not lookup member attribute for object-classes: {0}".format(
object_classes,
)
)
class LDAPGroupsMapping(LDAPPrincipals, UgmGroups):
@default
@property
def _member_format(self):
return member_format(self.context.child_defaults['objectClass'])
@default
@property
def _member_attribute(self):
return member_attribute(self.context.child_defaults['objectClass'])
@plumb
def __init__(_next, self, props, cfg):
mem_attr = member_attribute(cfg.objectClasses)
cfg.attrmap[mem_attr] = mem_attr
_next(self, props, cfg)
@plumb
def __setitem__(_next, self, key, value):
# XXX: kick this, dummy member should be created by default value
# callback
if self._member_attribute not in value.attrs:
value.attrs[self._member_attribute] = []
if self._member_format is FORMAT_UID:
value.attrs[self._member_attribute].insert(0, 'nobody')
else:
value.attrs[self._member_attribute].insert(0, 'cn=nobody')
_next(self, key, value)
class LDAPGroups(LDAPGroupsMapping):
principal_factory = default(Group)
@override
@locktree
def __delitem__(self, key):
key = ensure_text(key)
group = self[key]
parent = self.parent
if parent and parent.rcfg is not None:
for role in group.roles:
group.remove_role(role)
context = group.context
del context.parent[context.name]
del self.storage[key]
@plumbing(
LDAPGroups,
NodeChildValidate,
Nodespaces,
Adopt,
Attributes,
Nodify,
)
class Groups(object):
pass
class LDAPRole(LDAPGroupMapping, AliasedPrincipal):
@default
def related_principals(self, key):
ugm = self.parent.parent
if key.startswith('group:'):
return ugm.groups
return ugm.users
@default
@property
def existing_member_ids(self):
ugm = self.parent.parent
users = ugm.users
groups = ugm.groups
ret = [key for key in users]
for key in groups:
ret.append('group:{}'.format(key))
return ret
@default
def translate_ids(self, members):
if self._member_format == FORMAT_DN:
ugm = self.parent.parent
users = ugm.users
groups = ugm.groups
user_members = list()
for dn in members:
try:
user_members.append(users.idbydn(dn, True))
except KeyError:
pass
group_members = list()
for dn in members:
try:
group_members.append('group:{}'.format(groups.idbydn(dn, True)))
except KeyError:
pass
members = user_members + group_members
return members
@default
def translate_key(self, key):
ret = None
if self._member_format == FORMAT_DN:
if key.startswith('group:'):
key = key[6:]
principals = self.parent.parent.groups
else:
principals = self.parent.parent.users
# make sure principal is loaded
principal = principals[key]
ret = principal.context.DN
elif self._member_format == FORMAT_UID:
ret = key
return ret
@override
@locktree
def __getitem__(self, key):
key = ensure_text(key)
if key not in self:
raise KeyError(key)
principals = self.related_principals(key)
if key.startswith('group:'):
key = key[6:]
return principals[key]
@override
@locktree
def __delitem__(self, key):
key = ensure_text(key)
if key not in self:
raise KeyError(key)
principals = self.related_principals(key)
if | |
<reponame>alisaifee/coredis
from __future__ import annotations
import asyncio
import functools
import inspect
import textwrap
from abc import ABCMeta
from ssl import SSLContext
from typing import TYPE_CHECKING, Any, cast, overload
from deprecated.sphinx import versionadded
from packaging.version import Version
from coredis._utils import b, clusterdown_wrapper, first_key, nativestr
from coredis.cache import AbstractCache
from coredis.commands._key_spec import KeySpec
from coredis.commands.constants import CommandName
from coredis.commands.core import CoreCommands
from coredis.commands.function import Library
from coredis.commands.monitor import Monitor
from coredis.commands.pubsub import ClusterPubSub, PubSub, ShardedPubSub
from coredis.commands.script import Script
from coredis.commands.sentinel import SentinelCommands
from coredis.connection import Connection, RedisSSLContext, UnixDomainSocketConnection
from coredis.exceptions import (
AskError,
BusyLoadingError,
ClusterDownError,
ClusterError,
ClusterRoutingError,
ConnectionError,
MovedError,
RedisClusterException,
ResponseError,
TimeoutError,
TryAgainError,
WatchError,
)
from coredis.lock import Lock, LuaLock
from coredis.nodemanager import Node, NodeFlag
from coredis.pool import ClusterConnectionPool, ConnectionPool
from coredis.response._callbacks import NoopCallback
from coredis.response.types import ScoredMember
from coredis.typing import (
AnyStr,
AsyncGenerator,
AsyncIterator,
Awaitable,
Callable,
Coroutine,
Dict,
Generator,
Generic,
Iterable,
Iterator,
KeyT,
Literal,
Optional,
Parameters,
ParamSpec,
ResponseType,
StringT,
Tuple,
Type,
TypeVar,
ValueT,
add_runtime_checks,
)
P = ParamSpec("P")
R = TypeVar("R")
if TYPE_CHECKING:
import coredis.pipeline
class ClusterMeta(ABCMeta):
ROUTING_FLAGS: Dict[bytes, NodeFlag]
SPLIT_FLAGS: Dict[bytes, NodeFlag]
RESULT_CALLBACKS: Dict[str, Callable[..., ResponseType]]
NODE_FLAG_DOC_MAPPING = {
NodeFlag.PRIMARIES: "all primaries",
NodeFlag.REPLICAS: "all replicas",
NodeFlag.RANDOM: "a random node",
NodeFlag.ALL: "all nodes",
NodeFlag.SLOT_ID: "a node selected by :paramref:`slot`",
}
def __new__(
cls, name: str, bases: Tuple[type, ...], namespace: Dict[str, object]
) -> ClusterMeta:
kls = super().__new__(cls, name, bases, namespace)
methods = dict(k for k in inspect.getmembers(kls) if inspect.isfunction(k[1]))
for name, method in methods.items():
doc_addition = ""
if cmd := getattr(method, "__coredis_command", None):
if cmd.cluster.route:
kls.ROUTING_FLAGS[cmd.command] = cmd.cluster.route
aggregate_note = ""
if cmd.cluster.multi_node:
if cmd.cluster.combine:
aggregate_note = "and the results will be aggregated"
else:
aggregate_note = (
"and a mapping of nodes to results will be returned"
)
doc_addition = f"""
.. admonition:: Cluster note
The command will be run on **{cls.NODE_FLAG_DOC_MAPPING[cmd.cluster.route]}** {aggregate_note}
"""
elif cmd.cluster.split and cmd.cluster.combine:
kls.SPLIT_FLAGS[cmd.command] = cmd.cluster.split
doc_addition = f"""
.. admonition:: Cluster note
If :paramref:`RedisCluster.non_atomic_cross_slot` is set the
command will be run on **{cls.NODE_FLAG_DOC_MAPPING[cmd.cluster.split]}**
by distributing the keys to the appropriate nodes and the results aggregated
"""
if cmd.cluster.multi_node:
kls.RESULT_CALLBACKS[cmd.command] = cmd.cluster.combine
if cmd.readonly:
ConnectionPool.READONLY_COMMANDS.add(cmd.command)
if (wrapped := add_runtime_checks(method)) != method:
setattr(kls, name, wrapped)
method = wrapped
if doc_addition and not hasattr(method, "__cluster_docs"):
def __w(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[R]]:
@functools.wraps(func)
async def _w(*a: P.args, **k: P.kwargs) -> R:
return await func(*a, **k)
_w.__doc__ = f"""{textwrap.dedent(method.__doc__ or "")}
{doc_addition}
"""
return _w
wrapped = __w(method)
setattr(wrapped, "__cluster_docs", doc_addition)
setattr(kls, name, wrapped)
return kls
class RedisMeta(ABCMeta):
def __new__(
cls, name: str, bases: Tuple[type, ...], namespace: Dict[str, object]
) -> RedisMeta:
kls = super().__new__(cls, name, bases, namespace)
methods = dict(k for k in inspect.getmembers(kls) if inspect.isfunction(k[1]))
for name, method in methods.items():
if hasattr(method, "__coredis_command"):
if (wrapped := add_runtime_checks(method)) != method:
setattr(kls, name, wrapped)
return kls
RedisConnectionT = TypeVar("RedisConnectionT", bound="RedisConnection")
class RedisConnection:
encoding: str
decode_responses: bool
connection_pool: ConnectionPool
protocol_version: Literal[2, 3]
def __init__(
self,
host: Optional[str] = "localhost",
port: Optional[int] = 6379,
db: int = 0,
username: Optional[str] = None,
password: Optional[str] = None,
stream_timeout: Optional[int] = None,
connect_timeout: Optional[int] = None,
connection_pool: Optional[ConnectionPool] = None,
unix_socket_path: Optional[str] = None,
encoding: str = "utf-8",
decode_responses: bool = False,
ssl: bool = False,
ssl_context: Optional[SSLContext] = None,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
ssl_cert_reqs: Optional[str] = None,
ssl_ca_certs: Optional[str] = None,
max_connections: Optional[int] = None,
retry_on_timeout: bool = False,
max_idle_time: float = 0,
idle_check_interval: float = 1,
client_name: Optional[str] = None,
protocol_version: Literal[2, 3] = 2,
verify_version: bool = True,
**kwargs: Any,
):
if not connection_pool:
kwargs = {
"db": db,
"username": username,
"password": password,
"encoding": encoding,
"stream_timeout": stream_timeout,
"connect_timeout": connect_timeout,
"max_connections": max_connections,
"retry_on_timeout": retry_on_timeout,
"decode_responses": decode_responses,
"max_idle_time": max_idle_time,
"idle_check_interval": idle_check_interval,
"client_name": client_name,
"protocol_version": protocol_version,
}
# based on input, setup appropriate connection args
if unix_socket_path is not None:
kwargs.update(
{
"path": unix_socket_path,
"connection_class": UnixDomainSocketConnection,
}
)
else:
# TCP specific options
kwargs.update({"host": host, "port": port})
if ssl_context is not None:
kwargs["ssl_context"] = ssl_context
elif ssl:
ssl_context = RedisSSLContext(
ssl_keyfile, ssl_certfile, ssl_cert_reqs, ssl_ca_certs
).get()
kwargs["ssl_context"] = ssl_context
connection_pool = ConnectionPool(**kwargs)
self.connection_pool = connection_pool
self.encoding = str(connection_pool.connection_kwargs.get("encoding", encoding))
self.decode_responses = bool(
connection_pool.connection_kwargs.get("decode_responses", decode_responses)
)
connection_protocol_version = (
connection_pool.connection_kwargs.get("protocol_version")
or protocol_version
)
assert connection_protocol_version in {
2,
3,
}, "Protocol version can only be one of {2,3}"
self.protocol_version = connection_protocol_version
self.server_version: Optional[Version] = None
self.verify_version = verify_version
async def parse_response(
self,
connection: Connection,
*,
decode: Optional[ValueT] = None,
**_: Any,
) -> ResponseType:
"""
Parses a response from the Redis server
:meta private:
"""
return await connection.read_response(
decode=decode if decode is None else bool(decode)
)
async def initialize(self: RedisConnectionT) -> RedisConnectionT:
await self.connection_pool.initialize()
return self
def __await__(self) -> Generator[Any, None, RedisConnection]:
return self.initialize().__await__()
def __repr__(self) -> str:
return f"{type(self).__name__}<{repr(self.connection_pool)}>"
def ensure_server_version(self, version: Optional[str]) -> None:
if not self.verify_version:
return
if not version:
return
if not self.server_version and version:
self.server_version = Version(nativestr(version))
elif str(self.server_version) != nativestr(version):
raise Exception(
f"Server version changed from {self.server_version} to {version}"
)
class AbstractRedis(
Generic[AnyStr],
CoreCommands[AnyStr],
SentinelCommands[AnyStr],
):
"""
Async Redis client
"""
server_version: Optional[Version]
protocol_version: Literal[2, 3]
cache: Optional[AbstractCache]
async def scan_iter(
self,
match: Optional[StringT] = None,
count: Optional[int] = None,
type_: Optional[StringT] = None,
) -> AsyncIterator[AnyStr]:
"""
Make an iterator using the SCAN command so that the client doesn't
need to remember the cursor position.
"""
cursor = None
while cursor != 0:
cursor, data = await self.scan(
cursor=cursor, match=match, count=count, type_=type_
)
for item in data:
yield item
async def sscan_iter(
self,
key: KeyT,
match: Optional[StringT] = None,
count: Optional[int] = None,
) -> AsyncIterator[AnyStr]:
"""
Make an iterator using the SSCAN command so that the client doesn't
need to remember the cursor position.
"""
cursor = None
while cursor != 0:
cursor, data = await self.sscan(
key, cursor=cursor, match=match, count=count
)
for item in data:
yield item
async def hscan_iter(
self,
key: KeyT,
match: Optional[StringT] = None,
count: Optional[int] = None,
) -> AsyncGenerator[Tuple[AnyStr, AnyStr], None]:
"""
Make an iterator using the HSCAN command so that the client doesn't
need to remember the cursor position.
"""
cursor = None
while cursor != 0:
cursor, data = await self.hscan(
key, cursor=cursor, match=match, count=count
)
for item in data.items():
yield item
async def zscan_iter(
self,
key: KeyT,
match: Optional[StringT] = None,
count: Optional[int] = None,
) -> AsyncIterator[ScoredMember]:
"""
Make an iterator using the ZSCAN command so that the client doesn't
need to remember the cursor position.
"""
cursor = None
while cursor != 0:
cursor, data = await self.zscan(
key,
cursor=cursor,
match=match,
count=count,
)
for item in data:
yield item
def register_script(self, script: ValueT) -> Script[AnyStr]:
"""
Registers a Lua :paramref:`script`
:return: A :class:`coredis.commands.script.Script` instance that is
callable and hides the complexity of dealing with scripts, keys, and
shas.
"""
return Script[AnyStr](self, script) # type: ignore
@versionadded(version="3.1.0")
async def register_library(
self, name: StringT, code: StringT, replace: bool = False
) -> Library[AnyStr]:
"""
Register a new library
:param name: name of the library
:param code: raw code for the library
:param replace: Whether to replace the library when intializing. If ``False``
an exception will be raised if the library was already loaded in the target
redis instance.
"""
return await Library[AnyStr](self, name=name, code=code, replace=replace)
@versionadded(version="3.1.0")
async def get_library(self, name: StringT) -> Library[AnyStr]:
"""
Fetch a pre registered library
:param name: name of the library
"""
return await Library[AnyStr](self, name)
RedisT = TypeVar("RedisT", bound="Redis[Any]")
RedisStringT = TypeVar("RedisStringT", bound="Redis[str]")
RedisBytesT = TypeVar("RedisBytesT", bound="Redis[bytes]")
RedisClusterT = TypeVar("RedisClusterT", bound="RedisCluster[Any]")
RedisClusterStringT = TypeVar("RedisClusterStringT", bound="RedisCluster[str]")
RedisClusterBytesT = TypeVar("RedisClusterBytesT", bound="RedisCluster[bytes]")
class Redis(
AbstractRedis[AnyStr],
Generic[AnyStr],
RedisConnection,
metaclass=RedisMeta,
):
"""
Redis client
"""
@overload
def __init__(
self: Redis[bytes],
host: Optional[str] = ...,
port: Optional[int] = ...,
db: int = ...,
*,
username: Optional[str] = ...,
password: Optional[str] = ...,
stream_timeout: Optional[int] = ...,
connect_timeout: Optional[int] = ...,
connection_pool: Optional[ConnectionPool] = ...,
unix_socket_path: Optional[str] = ...,
encoding: str = ...,
decode_responses: Literal[False] = ...,
ssl: bool = ...,
ssl_context: Optional[SSLContext] = ...,
ssl_keyfile: Optional[str] = ...,
ssl_certfile: Optional[str] = ...,
ssl_cert_reqs: Optional[str] = ...,
ssl_ca_certs: Optional[str] = ...,
max_connections: Optional[int] = ...,
retry_on_timeout: bool = ...,
max_idle_time: float = ...,
idle_check_interval: float = ...,
client_name: Optional[str] = ...,
protocol_version: Literal[2, 3] = ...,
verify_version: bool = ...,
cache: Optional[AbstractCache] = ...,
**_: | |
<filename>data/corpus/raw_to_formated_script/raw_to_formated.py
# <NAME>
# <EMAIL>
# Developed for my internship at the LIS lab during my 4th year in the Computer Science Engineer program at Polytech Marseille
# Feel free to use this code as you wish as long as you quote me as author
# There is a notebook version of this script in the folder 'notebook' wich is easier to read, but it is in french
# ----------------------------
# | PURPOSE |
# ----------------------------
# Pupose of this script is to convert a file in Semeval 2010 format to the format used by Mo Yu as input to FCM (Gormley, Yu 2015)
# FCM publication : https://www.cs.cmu.edu/~mgormley/papers/gormley+yu+dredze.emnlp.2015.pdf_
# Input format :
#
# 4 "A misty <e1>ridge</e1> uprises from the <e2>surge</e2>."
# Other
#
# Output format :
#
# Other 2 2 ridge 6 6 surge
# A DT 0 0 0 misty JJ B-adj.all 0 0 ridge NN B-noun.object 0 0 uprises NNS I-noun.object 0 1 from IN 0 0 1 the DT 0 0 0 surge NN B-noun.event 0 0 . . 0 0 0
# 1 B-noun.object 0
# 1 B-noun.event 0
# Input format is pretty simple, first line is a sentence with 2 entities between quotes (not necessarily) and an index, second line is the relation type
# Ouput format contains 4 lines, first line is the relation type, index of begining for 1rst entity, index of ending for 1rst entity and same for 2nd entity
# second line is the sentence tagged with the SST (SuperSenseTagger), note that the last feature is not generated by the SST but by the present script
# third line is the size of the 1rst entity plus two of its tags
# fourth line is the same for 2nd entity
# Once this format is generated, it can be directly used by the FCM
# FCM implementation in C++ : https://github.com/Gorov/FCM_nips_workshop
# ----------------------------
# | USAGE |
# ----------------------------
# Using python 3 (I have not tested with version 2)
# Open a terminal, go to 'raw_to_formated_script' folder and just use :
# python raw_to_formated.py <input file>
# Example : python raw_to_formated.py semeval2010_train
# Note: The input file HAS to be in the 'data/corpus/raw' folder, and the output will have the same name and be located in the 'formated' folder
# Of course the file has to be in the 2010 Semeval format for the script to work
# Setup
import sys
import copy
import numpy as np
import subprocess
import networkx as nx
import spacy
from spacy import displacy
nlp = spacy.load('en_core_web_sm')
file_input = '../raw/'+str(sys.argv[1])
file_output = '../formated/'+str(sys.argv[1])
# Checking corpus funciton
# cheks if each sentence contains 2 entities
def check_corpus(file_path):
file = open(file_path, "r")
lines = file.readlines()
corpus_ok = True
for i in range(0, len(lines), 3):
if('<e1>' not in lines[i] or '<e2>' not in lines[i] or '</e2>' not in lines[i] or '</e1>' not in lines[i]):
print("There is a problem with the corpus at the line ",i+1)
corpus_ok = False
if(corpus_ok):
print("The corpus seems ok")
print("\n\nChecking corpus integrity...")
# Shortest Dependency Path between 2 words function
# Input : sentence and the two words for which we want dep path
# Ouput : a tab containing words on the dep path
def shortest_dependency_path(doc, e1=None, e2=None):
edges = []
shortest_path = []
for token in doc:
for child in token.children:
edges.append(('{0}'.format(token),
'{0}'.format(child)))
graph = nx.Graph(edges)
try:
if(e1 in graph.nodes and e2 in graph.nodes):
shortest_path = nx.shortest_path(graph, source=e1, target=e2)
except nx.NetworkXNoPath:
shortest_path = []
return shortest_path
# Here starts the file processing
# Processing inconvenient words like 'a.m', '100,000' ..., spacing out entities and deleting sentence indexes
# Cheking corpus
check_corpus(file_input)
file = open(file_input, "r")
lines = file.readlines()
# Note: This preprocessing is necessary because the SST tagger is a little dumber than the one used by Mo Yu
# And I could not find it
print("Processing inconvenient words like 'a.m', 'U.S' ...")
for i in range(0, len(lines), 3): # All 3 lines ... so each sentence
line_split = lines[i].split()
for j in range(len(line_split)):
if(line_split[j].find(',') > 0): # if there's a comma inside a word
line_split[j] = line_split[j].replace(',','')# we delete it
if(line_split[j].find('.') > 0):
line_split[j] = line_split[j].replace('.','')
if(line_split[j] == '.' and j != len(line_split)-2):
line_split[j] = line_split[j].replace('.','')
lines[i] = ' '.join(line_split) + '\n' # rebuild the line from the split
# Spacing out entities for split to recognize them
# Deleting external quotes
print("Spacing out entities and deleting useless quotes")
i = 0
while i < len(lines):
#lines[i] = lines[i].replace('"', '') # Old version
line_split = lines[i].split()
for j in range(len(line_split)):
if(j == 1 and '"' in line_split[j]):
line_split[j] = line_split[j].replace('"','')
if(j == len(line_split)-1 and '"' in line_split[j]):
line_split[j] = line_split[j].replace('"','')
lines[i] = ' '.join(line_split) + '\n'
lines[i] = lines[i].replace('<e1>', '<e1> ')
lines[i] = lines[i].replace('<e2>', '<e2> ')
lines[i] = lines[i].replace('</e1>', ' </e1>')
lines[i] = lines[i].replace('</e2>', ' </e2>')
i+=1
# Deleting indexes
for i in range(0, len(lines), 3):
line_split = lines[i].split()
if(len(lines[i]) > 0):
del line_split[0]
lines[i] = ' '.join(line_split) + '\n'
i+=1
# Extracting first line of Yu format and adding cleaned sentence
lines_res_temp_1 = copy.deepcopy(lines) # will contain the first temporary result
i = 0
for i in range(0, len(lines), 3): # All 3 lines ... so each sentence
line_split = lines[i].split()
first_line = lines[i+1].replace('\n', '') + ' ' # first we put the relation in first_line
tabulation_fin = False # Bolean not to tabulate after e2
j = 0
while j < len(line_split):
# Extracting indexes and deleting entity tags
if('<e' in line_split[j]):
tabulation_fin = not tabulation_fin
del line_split[j]
first_line += str(j) + ' ' # j = entity begining index
k = j # k is used to go over entity starting at j
ent = ''
while "</e" not in line_split[k]:
ent += line_split[k] # adding the word in the entity to ent
if("</e" not in line_split[k+1]): # if the entity is not over we put a space
ent += ' '
k+=1
if(tabulation_fin == True): # if it's the first entity we tabulate
ent += ' '
elif('</e' in line_split[j]):
del line_split[j]
j-=1
first_line += str(j) + ' ' + ent # j = end entity index
# Building res_temp_1 (containing blocks of 2 lines)
lines_res_temp_1[i] = first_line + '\n'
lines_res_temp_1[i+1] = ' '.join(line_split) + '\n'
j+=1
# Processing file that will go through SST
file = open("sst/DATA/to_sst.txt", "w")
i = 0
file.write(" ") # tabulate once, otherwise the tagger will 'eat' the first word of the file
# Only the sentences will go through SST, the first line will be used later
for i in range(1, len(lines_res_temp_1)+1, 3):
file.write(lines_res_temp_1[i])
file.write('\n')
file.write('\n')
file.close()
# Command to launch SST, see its README for more details
command = ['sst', 'multitag-line', './DATA/to_sst.txt', '0', '0', 'DATA/GAZ/gazlistall_minussemcor',
'./MODELS/WSJPOSc_base_20', 'DATA/WSJPOSc.TAGSET',
'./MODELS/SEM07_base_12', './DATA/WNSS_07.TAGSET',
'./MODELS/WSJc_base_20', './DATA/WSJ.TAGSET',
'./MODELS/CONLL03_base_15', './DATA/CONLL03.TAGSET',
'>', './DATA/res_sst.tags', '&', 'clean.bat']
print('We call the SST tagger\n')
process = subprocess.Popen(command, cwd="sst", shell=True, stdout=subprocess.PIPE)
process.wait() # Waiting for the end of SST tagging, results in res_sst.tags
print("End of SST tagging")
# Generating last tag for each word (whether it's on the dep path between entities or not)
# Deleting lemmas
# reading annotated sentences
res_sst = open("sst/DATA/res_sst.tags", "r")
lines_res_sst = res_sst.readlines()
res_sst.close()
# Generating a dependencies graph for each sentence, and finding the shortest path between e1 and e2
# Then checking for each word of the sentence if it's in this path
print("Generating dependency path feature")
for i in range(0, len(lines_res_sst), 3):
line_split_res_sst = lines_res_sst[i].split() # Line to modify (adding the tag for dep path)
line_split_res_temp_1 = lines_res_temp_1[i].split() # Sentence without annotation
# finding e1, its size and starting index
e1_size = int(line_split_res_temp_1[2]) - int(line_split_res_temp_1[1])+1
e1_start = int(line_split_res_temp_1[1])
e1 = lines_res_temp_1[i+1].split()[e1_start]
# Same for e2
e2_size = int(line_split_res_temp_1[4+e1_size]) - int(line_split_res_temp_1[3+e1_size])+1
e2_start = int(line_split_res_temp_1[3+e1_size])
e2 = lines_res_temp_1[i+1].split()[e2_start]
# Loading the sentence to be processed with NetworkX
doc = nlp(lines_res_temp_1[i+1])
dep_path = shortest_dependency_path(doc, e1, e2)
# For each word...
for j in range(5, len(line_split_res_sst), 6):
# if it's on the dep path between e1 and e2...
if(line_split_res_sst[j-5] in dep_path
and line_split_res_sst[j-5] != e1
and line_split_res_sst[j-5] != e2):
# putting it's last tag to 1
line_split_res_sst[j] = '1'
else:
line_split_res_sst[j] = '0'
lines_res_sst[i] = ' '.join(line_split_res_sst) + '\n'
# Deleting lemmas
print("Deleting lemmas")
i = 0
for i in range(0, len(lines_res_sst), 3): # All 3 lines ... so each sentence
line_split = lines_res_sst[i].split()
j = 2
while j < len(line_split):
del line_split[j] # Deleting lemma
j+=5 # going to the next one
lines_res_sst[i] = ' '.join(line_split) + '\n'
# Replacing the sentence by its annotated version in temporary result 1, and adding the last 2 lines
print("Finalizing...")
file = open(file_output, "w")
# In the final result
for i in | |
from datetime import datetime
from decimal import Decimal
from django.conf import settings
import pytz
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator
from django.db import models
from guardian.shortcuts import get_objects_for_user
from associations.models import Association
from venues.models import Venue
from itertools import chain
from users.models import User
def validate_barcode(value):
"""
Validate a barcode.
Checks if the barcode is all digits, of the required length and if the checksum is correct
:param value: the value to validate
:return: None, raises ValidationError if a check fails
"""
if value is None:
return
if not value.isdigit():
raise ValidationError("A barcode must consist of only digits")
if len(value) == 8:
value = "000000" + value
elif len(value) == 13:
value = "0" + value
else:
raise ValidationError("A barcode must be either 8 or 13 integers long")
counter = 0
for index, digit in enumerate(value[: len(value) - 1]):
if index % 2 == 0:
counter += int(digit) * 3
else:
counter += int(digit)
if (10 - (counter % 10)) % 10 != int(value[len(value) - 1]):
raise ValidationError("The checksum of the barcode is not correct")
def get_default_start_time_shift():
"""
Get the default start time of a Shift object.
:return: the default start time of a shift
"""
timezone = pytz.timezone(settings.TIME_ZONE)
return timezone.localize(datetime.now()).replace(hour=12, minute=15, second=0, microsecond=0)
def get_default_end_time_shift():
"""
Get the default end time of a Shift object.
:return: the default end time of a shift
"""
timezone = pytz.timezone(settings.TIME_ZONE)
return timezone.localize(datetime.now()).replace(hour=13, minute=15, second=0, microsecond=0)
class OrderVenue(models.Model):
"""Venues where Shifts can be created."""
venue = models.OneToOneField(
Venue,
on_delete=models.CASCADE,
primary_key=True,
)
class Meta:
"""Meta class for OrderVenue."""
ordering = ["venue__name"]
permissions = [
("can_order_in_venue", "Can order products during shifts in this venue"),
("can_manage_shift_in_venue", "Can manage shifts in this venue"),
]
def __str__(self):
"""Representation by venue."""
return str(self.venue)
def get_users_with_shift_admin_perms(self):
"""Get users with permissions to manage shifts in this venue."""
users = []
for user in User.objects.all():
if self in get_objects_for_user(
user, "orders.can_manage_shift_in_venue", accept_global_perms=True, with_superuser=True
):
users.append(user)
return users
def get_users_with_shift_admin_perms_queryset(self):
"""Get users with permissions to manage shifts in this venue as queryset."""
users_ids = []
for user in User.objects.all():
if self in get_objects_for_user(
user, "orders.can_manage_shift_in_venue", accept_global_perms=True, with_superuser=True
):
users_ids.append(user.pk)
return User.objects.filter(pk__in=users_ids)
def get_users_with_order_perms(self):
"""Get users with permissions to manage shifts in this venue."""
users = []
for user in User.objects.all():
if self in get_objects_for_user(
user, "orders.can_order_in_venue", accept_global_perms=True, with_superuser=True
):
users.append(user)
return users
def get_users_with_order_perms_queryset(self):
"""Get users with permissions to manage shifts in this venue as queryset."""
users_ids = []
for user in User.objects.all():
if self in get_objects_for_user(
user, "orders.can_order_in_venue", accept_global_perms=True, with_superuser=True
):
users_ids.append(user.pk)
return User.objects.filter(pk__in=users_ids)
class Product(models.Model):
"""Products that can be ordered."""
name = models.CharField(max_length=50, unique=True)
icon = models.CharField(
max_length=20,
default="",
blank=True,
help_text=("Font-awesome icon name that is used for quick display of the product type."),
)
available = models.BooleanField(default=True)
available_at = models.ManyToManyField(OrderVenue)
current_price = models.DecimalField(
max_digits=6, decimal_places=2, validators=[MinValueValidator(Decimal("0.00"))]
)
orderable = models.BooleanField(
default=True,
help_text="Whether or not this product should appear on the order page.",
)
ignore_shift_restrictions = models.BooleanField(
default=False,
help_text="Whether or not this product should ignore the maximum orders per shift" " restriction.",
)
max_allowed_per_shift = models.PositiveSmallIntegerField(
verbose_name="Max. allowed orders per shift",
default=2,
null=True,
blank=True,
help_text="The maximum amount a single user can order this product in a single shift. Note that shifts are "
"bound to the venue. Empty means no limit.",
)
barcode = models.CharField(
max_length=13,
default=None,
null=True,
blank=True,
unique=True,
help_text="Either an EAN-8 or EAN-13 barcode.",
validators=[validate_barcode],
)
class Meta:
"""Meta class."""
ordering = ["-available", "name"]
def __str__(self):
"""
Convert a Product object to string.
:return: the name of the Product object
"""
return self.name
def to_json(self):
"""
Convert this object to JSON.
:return: a to JSON convertable dictionary of properties.
"""
return {
"name": self.name,
"icon": self.icon,
"price": self.current_price,
"max_per_shift": self.max_allowed_per_shift,
"available": self.available,
"id": self.pk,
"ignore_shift_restriction": self.ignore_shift_restrictions,
}
def user_can_order_amount(self, user, shift, amount=1):
"""
Test if a user can order the specified amount of this Product in a specific shift.
:param user: the user
:param shift: the shift
:param amount: the amount that the user wants to order
:return: True if the already ordered amount of this Product plus the amount specified in the amount parameter
is lower than the max_allowed_per_shift variable, False otherwise
"""
if self.max_allowed_per_shift is not None:
user_order_amount_product = Order.objects.filter(user=user, shift=shift, product=self).count()
return user_order_amount_product + amount <= self.max_allowed_per_shift
return True
def user_max_order_amount(self, user, shift):
"""
Get the maximum amount a user can still order for this product.
:param user: the user
:param shift: the shift on which to order the product
:return: None if the user can order unlimited of the product, the maximum allowed to still order otherwise
"""
if self.max_allowed_per_shift is not None:
if not user.is_authenticated:
return 0 # Non logged-in users can never order items
user_order_amount_product = Order.objects.filter(user=user, shift=shift, product=self).count()
return max(0, self.max_allowed_per_shift - user_order_amount_product)
else:
return None
def active_venue_validator(value):
"""Filter to only allow shifts for active venues."""
if OrderVenue.objects.get(pk=value).venue.active:
return True
else:
raise ValidationError("This venue is not active.")
class Shift(models.Model):
"""Shifts in which orders can be placed."""
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M"
HUMAN_DATE_FORMAT = "%a. %-d %b. %Y"
venue = models.ForeignKey(
OrderVenue, related_name="shifts", on_delete=models.PROTECT, validators=[active_venue_validator]
)
start_date = models.DateTimeField(default=get_default_start_time_shift)
end_date = models.DateTimeField(default=get_default_end_time_shift)
can_order = models.BooleanField(
verbose_name="Orders allowed",
default=False,
help_text=(
"If checked, people can order within the given time frame. If not checked,"
" ordering will not be possible, even in the given time frame."
),
)
finalized = models.BooleanField(
verbose_name="Shift Finalized",
default=False,
help_text="If checked, shift is finalized and no alterations on the shift can be made anymore.",
)
max_orders_per_user = models.PositiveSmallIntegerField(
verbose_name="Max. number of orders per user",
default=2,
null=True,
blank=True,
help_text="The maximum amount of products a single user can order in this shift. Empty means no limit.",
)
max_orders_total = models.PositiveSmallIntegerField(
verbose_name="Max. total number of orders",
default=50,
null=True,
blank=True,
help_text="The maximum amount of products that can be ordered during this shift in total. Empty means no "
"limit.",
)
assignees = models.ManyToManyField(User)
class Meta:
"""Meta class."""
ordering = ["start_date", "end_date"]
def __str__(self):
"""
Convert this object to string.
:return: this object in string format
"""
return f"{self.venue} {self.date}"
def save(self, *args, **kwargs):
"""Save a Shift."""
self._clean()
try:
old_instance = Shift.objects.get(id=self.id)
except Shift.DoesNotExist:
old_instance = None
if old_instance is not None and not old_instance.finalized and self.finalized:
# Shift was not finalized yet but will be made finalized now
self._make_finalized()
return super(Shift, self).save(*args, **kwargs)
@property
def orders_sorted_staff_first(self):
"""
Get the orders of this shift with the staff orders first.
:return: a chain object with all the orders of this shift.
"""
staff_users = self.venue.get_users_with_shift_admin_perms()
ordered_staff_orders = Order.objects.filter(shift=self, user__in=staff_users).order_by("created")
ordered_normal_orders = Order.objects.filter(shift=self).exclude(user__in=staff_users).order_by("created")
ordered_orders = chain(ordered_staff_orders, ordered_normal_orders)
return list(ordered_orders)
@property
def orders_ordered_type_only(self):
"""
Get the orders with type Ordered of this shift.
:return: a chain object with the ordered orders of this shift.
"""
staff_users = self.venue.get_users_with_shift_admin_perms()
ordered_staff_orders = Order.objects.filter(
shift=self, user__in=staff_users, type=Order.TYPE_ORDERED
).order_by("created")
ordered_normal_orders = (
Order.objects.filter(shift=self, type=Order.TYPE_ORDERED).exclude(user__in=staff_users).order_by("created")
)
ordered_orders = chain(ordered_staff_orders, ordered_normal_orders)
return list(ordered_orders)
@property
def products_open(self):
"""
Get a list with all products and amounts that are not ready.
:return: a list of products with a amount object variable indicating the products and amounts that are not
ready for this shift
"""
distinct_ordered_items = Product.objects.filter(order__shift_id=self, order__ready=False).distinct()
for item in distinct_ordered_items:
item.amount = Order.objects.filter(product=item, ready=False, shift=self).count()
return distinct_ordered_items
@property
def products_closed(self):
"""
Get a list with all products and amounts that are ready.
:return: a list of products with a amount object variable indicating the products and amounts that are ready
for this shift
"""
distinct_ordered_items = Product.objects.filter(order__shift_id=self, order__ready=True).distinct()
for item in distinct_ordered_items:
item.amount = Order.objects.filter(product=item, ready=True, shift=self).count()
return distinct_ordered_items
@property
def products_total(self):
"""
Get a list with all products and amounts.
:return: a list of products with a amount object variable indicating the products and amounts for this shift
"""
distinct_ordered_items = Product.objects.filter(order__shift_id=self).distinct()
for item in distinct_ordered_items:
item.amount = Order.objects.filter(product=item, shift=self).count()
return distinct_ordered_items
@property
def number_of_orders(self):
"""
Get the total number of orders in this shift.
:return: the total number of orders in this shift
"""
return Order.objects.filter(shift=self).count()
@property
def max_orders_total_string(self):
"""
Get the maximum amount of orders in string | |
self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Date':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Date')
value_ = self.gds_validate_string(value_, node, 'Date')
self.Date = value_
self.Date_nsprefix_ = child_.prefix
elif nodeName_ == 'Time':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Time')
value_ = self.gds_validate_string(value_, node, 'Time')
self.Time = value_
self.Time_nsprefix_ = child_.prefix
elif nodeName_ == 'TransactionCharge':
obj_ = TransactionChargeType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.TransactionCharge = obj_
obj_.original_tagname_ = 'TransactionCharge'
# end class TransactionInfoType
class ProductType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, ProductName=None, ProductDescription=None, ProductCountryCodeOfOrigin=None, TariffInfo=None, Quantity=None, UnitPrice=None, Weight=None, TariffCodeAlert=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.ProductName = ProductName
self.ProductName_nsprefix_ = None
self.ProductDescription = ProductDescription
self.ProductDescription_nsprefix_ = None
self.ProductCountryCodeOfOrigin = ProductCountryCodeOfOrigin
self.ProductCountryCodeOfOrigin_nsprefix_ = None
self.TariffInfo = TariffInfo
self.TariffInfo_nsprefix_ = None
self.Quantity = Quantity
self.Quantity_nsprefix_ = None
self.UnitPrice = UnitPrice
self.UnitPrice_nsprefix_ = None
self.Weight = Weight
self.Weight_nsprefix_ = None
self.TariffCodeAlert = TariffCodeAlert
self.TariffCodeAlert_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ProductType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ProductType.subclass:
return ProductType.subclass(*args_, **kwargs_)
else:
return ProductType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_ProductName(self):
return self.ProductName
def set_ProductName(self, ProductName):
self.ProductName = ProductName
def get_ProductDescription(self):
return self.ProductDescription
def set_ProductDescription(self, ProductDescription):
self.ProductDescription = ProductDescription
def get_ProductCountryCodeOfOrigin(self):
return self.ProductCountryCodeOfOrigin
def set_ProductCountryCodeOfOrigin(self, ProductCountryCodeOfOrigin):
self.ProductCountryCodeOfOrigin = ProductCountryCodeOfOrigin
def get_TariffInfo(self):
return self.TariffInfo
def set_TariffInfo(self, TariffInfo):
self.TariffInfo = TariffInfo
def get_Quantity(self):
return self.Quantity
def set_Quantity(self, Quantity):
self.Quantity = Quantity
def get_UnitPrice(self):
return self.UnitPrice
def set_UnitPrice(self, UnitPrice):
self.UnitPrice = UnitPrice
def get_Weight(self):
return self.Weight
def set_Weight(self, Weight):
self.Weight = Weight
def get_TariffCodeAlert(self):
return self.TariffCodeAlert
def set_TariffCodeAlert(self, TariffCodeAlert):
self.TariffCodeAlert = TariffCodeAlert
def hasContent_(self):
if (
self.ProductName is not None or
self.ProductDescription is not None or
self.ProductCountryCodeOfOrigin is not None or
self.TariffInfo is not None or
self.Quantity is not None or
self.UnitPrice is not None or
self.Weight is not None or
self.TariffCodeAlert is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ProductType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ProductType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ProductType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ProductType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ProductType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ProductType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ProductType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.ProductName is not None:
namespaceprefix_ = self.ProductName_nsprefix_ + ':' if (UseCapturedNS_ and self.ProductName_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sProductName>%s</%sProductName>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.ProductName), input_name='ProductName')), namespaceprefix_ , eol_))
if self.ProductDescription is not None:
namespaceprefix_ = self.ProductDescription_nsprefix_ + ':' if (UseCapturedNS_ and self.ProductDescription_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sProductDescription>%s</%sProductDescription>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.ProductDescription), input_name='ProductDescription')), namespaceprefix_ , eol_))
if self.ProductCountryCodeOfOrigin is not None:
namespaceprefix_ = self.ProductCountryCodeOfOrigin_nsprefix_ + ':' if (UseCapturedNS_ and self.ProductCountryCodeOfOrigin_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sProductCountryCodeOfOrigin>%s</%sProductCountryCodeOfOrigin>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.ProductCountryCodeOfOrigin), input_name='ProductCountryCodeOfOrigin')), namespaceprefix_ , eol_))
if self.TariffInfo is not None:
namespaceprefix_ = self.TariffInfo_nsprefix_ + ':' if (UseCapturedNS_ and self.TariffInfo_nsprefix_) else ''
self.TariffInfo.export(outfile, level, namespaceprefix_, namespacedef_='', name_='TariffInfo', pretty_print=pretty_print)
if self.Quantity is not None:
namespaceprefix_ = self.Quantity_nsprefix_ + ':' if (UseCapturedNS_ and self.Quantity_nsprefix_) else ''
self.Quantity.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Quantity', pretty_print=pretty_print)
if self.UnitPrice is not None:
namespaceprefix_ = self.UnitPrice_nsprefix_ + ':' if (UseCapturedNS_ and self.UnitPrice_nsprefix_) else ''
self.UnitPrice.export(outfile, level, namespaceprefix_, namespacedef_='', name_='UnitPrice', pretty_print=pretty_print)
if self.Weight is not None:
namespaceprefix_ = self.Weight_nsprefix_ + ':' if (UseCapturedNS_ and self.Weight_nsprefix_) else ''
self.Weight.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Weight', pretty_print=pretty_print)
if self.TariffCodeAlert is not None:
namespaceprefix_ = self.TariffCodeAlert_nsprefix_ + ':' if (UseCapturedNS_ and self.TariffCodeAlert_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sTariffCodeAlert>%s</%sTariffCodeAlert>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.TariffCodeAlert), input_name='TariffCodeAlert')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'ProductName':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ProductName')
value_ = self.gds_validate_string(value_, node, 'ProductName')
self.ProductName = value_
self.ProductName_nsprefix_ = child_.prefix
elif nodeName_ == 'ProductDescription':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ProductDescription')
value_ = self.gds_validate_string(value_, node, 'ProductDescription')
self.ProductDescription = value_
self.ProductDescription_nsprefix_ = child_.prefix
elif nodeName_ == 'ProductCountryCodeOfOrigin':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'ProductCountryCodeOfOrigin')
value_ = self.gds_validate_string(value_, node, 'ProductCountryCodeOfOrigin')
self.ProductCountryCodeOfOrigin = value_
self.ProductCountryCodeOfOrigin_nsprefix_ = child_.prefix
elif nodeName_ == 'TariffInfo':
obj_ = TariffInfoType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.TariffInfo = obj_
obj_.original_tagname_ = 'TariffInfo'
elif nodeName_ == 'Quantity':
obj_ = ValueWithUnitsType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Quantity = obj_
obj_.original_tagname_ = 'Quantity'
elif nodeName_ == 'UnitPrice':
obj_ = ChargesType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.UnitPrice = obj_
obj_.original_tagname_ = 'UnitPrice'
elif nodeName_ == 'Weight':
obj_ = ValueWithUnitsType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Weight = obj_
obj_.original_tagname_ = 'Weight'
elif nodeName_ == 'TariffCodeAlert':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'TariffCodeAlert')
value_ = self.gds_validate_string(value_, node, 'TariffCodeAlert')
self.TariffCodeAlert = value_
self.TariffCodeAlert_nsprefix_ = child_.prefix
# end class ProductType
class ProductResultType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, TariffCode=None, Question=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.TariffCode = TariffCode
self.TariffCode_nsprefix_ = None
if Question is None:
self.Question = []
else:
self.Question = Question
self.Question_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ProductResultType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ProductResultType.subclass:
return ProductResultType.subclass(*args_, **kwargs_)
else:
return ProductResultType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_TariffCode(self):
return self.TariffCode
def set_TariffCode(self, TariffCode):
self.TariffCode = TariffCode
def get_Question(self):
return self.Question
def set_Question(self, Question):
self.Question = Question
def add_Question(self, value):
self.Question.append(value)
def insert_Question_at(self, index, value):
self.Question.insert(index, value)
def replace_Question_at(self, index, value):
self.Question[index] = value
def hasContent_(self):
if (
self.TariffCode is not None or
self.Question
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ProductResultType', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ProductResultType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ProductResultType':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ProductResultType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ProductResultType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ProductResultType'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ProductResultType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.TariffCode is not None:
namespaceprefix_ = self.TariffCode_nsprefix_ + ':' if (UseCapturedNS_ and self.TariffCode_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sTariffCode>%s</%sTariffCode>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.TariffCode), input_name='TariffCode')), namespaceprefix_ , eol_))
for Question_ in self.Question:
namespaceprefix_ = self.Question_nsprefix_ + ':' if (UseCapturedNS_ and self.Question_nsprefix_) else ''
Question_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Question', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'TariffCode':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'TariffCode')
value_ = self.gds_validate_string(value_, node, 'TariffCode')
self.TariffCode = value_
self.TariffCode_nsprefix_ = child_.prefix
elif nodeName_ == 'Question':
obj_ = QuestionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Question.append(obj_)
obj_.original_tagname_ = 'Question'
# end class ProductResultType
class ProductAnswerType(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = | |
failure_function[matched_len]
matched_len = matched_len + 1
if matched_len == len(pat):
matches_done += 1
matched_len = 0
if matches_done == max_repl:
break
cumulative_offset += match_len_change * matches_done
output_offsets[length] = cumulative_offset
output_buffer = np.empty(cumulative_offset, dtype=np.uint8)
output_pos = 0
for row_idx in range(length):
if has_nulls and not _check_valid_row(row_idx, valid_bits, valid_offset):
continue
matched_len = 0
matches_done = 0
write_idx = offsets[row_idx]
for read_idx in range(offsets[row_idx], offsets[row_idx + 1]):
# A modified version of utils.kmp.append_to_kmp_matching
while matched_len > -1 and pat[matched_len] != data[read_idx]:
matched_len = failure_function[matched_len]
matched_len = matched_len + 1
if read_idx - write_idx == len(pat):
output_buffer[output_pos] = data[write_idx]
output_pos += 1
write_idx += 1
if matched_len == len(pat):
matched_len = 0
if matches_done != max_repl:
matches_done += 1
write_idx = read_idx + 1
for char in repl:
output_buffer[output_pos] = char
output_pos += 1
while write_idx < offsets[row_idx + 1]:
output_buffer[output_pos] = data[write_idx]
output_pos += 1
write_idx += 1
return output_offsets, output_buffer
@apply_per_chunk
def _text_replace_case_sensitive(
data: pa.Array, pat: str, repl: str, max_repl: int
) -> pa.Array:
"""
Replace occurrences of ``pat`` with ``repl`` in the Series/Index with some other string. For every
row, only the first ``max_repl`` replacements will be performed. If ``max_repl = -1`` we consider that
we have no limit for the number of replacements.
This implementation does basic byte-by-byte comparison and is independent
of any locales or encodings.
"""
# Convert to UTF-8 bytes
pat_bytes: bytes = pat.encode()
repl_bytes: bytes = repl.encode()
offsets_buffer, data_buffer = _extract_string_buffers(data)
if data.null_count == 0:
valid_buffer = np.empty(0, dtype=np.uint8)
else:
valid_buffer = _buffer_to_view(data.buffers()[0])
if len(pat) > 0:
output_t = _text_replace_case_sensitive_numba(
len(data),
valid_buffer,
data.offset,
offsets_buffer,
data_buffer,
pat_bytes,
repl_bytes,
max_repl,
)
else:
output_t = _text_replace_case_sensitive_empty_pattern(
len(data),
valid_buffer,
data.offset,
offsets_buffer,
data_buffer,
repl_bytes,
max_repl,
)
output_offsets, output_buffer = output_t
if data.null_count == 0:
output_valid = None
else:
output_valid = data.buffers()[0].slice(data.offset // 8)
if data.offset % 8 != 0:
output_valid = shift_unaligned_bitmap(
output_valid, data.offset % 8, len(data)
)
buffers = [output_valid, pa.py_buffer(output_offsets), pa.py_buffer(output_buffer)]
return pa.Array.from_buffers(pa.string(), len(data), buffers, data.null_count)
@apply_per_chunk
def _text_strip(data: pa.Array, to_strip) -> pa.Array:
"""
Strip the characters of ``to_strip`` from start and end of each element in the data.
"""
if len(data) == 0:
return data
offsets, data_buffer = _extract_string_buffers(data)
valid_buffer = data.buffers()[0]
valid_offset = data.offset
builder = StringArrayBuilder(max(len(data_buffer), len(data)))
_do_strip(
valid_buffer,
valid_offset,
offsets,
data_buffer,
len(data),
to_strip,
inout_builder=builder,
)
result_array = finalize_string_array(builder, pa.string())
return result_array
@njit
def _utf8_chr4(arr):
return chr(
np.int32((arr[0] & 0x7) << 18)
| np.int32((arr[1] & 0x3F) << 12)
| np.int32((arr[2] & 0x3F) << 6)
| np.int32((arr[3] & 0x3F))
)
@njit
def _utf8_chr3(arr):
return chr(
np.int32((arr[0] & 0xF)) << 12
| np.int32((arr[1] & 0x3F) << 6)
| np.int32((arr[2] & 0x3F))
)
@njit
def _utf8_chr2(arr):
return chr(np.int32((arr[0] & 0x1F)) << 6 | np.int32((arr[1] & 0x3F)))
@njit
def _extract_striped_string(last_offset, offset, data_buffer, to_strip):
if last_offset < offset:
start_offset = last_offset
while start_offset < offset:
if (data_buffer[start_offset] & 0x80) == 0:
if chr(data_buffer[start_offset]) in to_strip:
start_offset += 1
else:
break
# for utf-8 encoding, see: https://en.wikipedia.org/wiki/UTF-8
elif (
(data_buffer[start_offset] & 0xF8) == 0xF0
and start_offset + 3 < offset
and _utf8_chr4(data_buffer[start_offset : start_offset + 4]) in to_strip
):
start_offset += 4
elif (
(data_buffer[start_offset] & 0xF0) == 0xE0
and start_offset + 2 < offset
and _utf8_chr3(data_buffer[start_offset : start_offset + 3]) in to_strip
):
start_offset += 3
elif (
(data_buffer[start_offset] & 0xE0) == 0xC0
and start_offset + 1 < offset
and _utf8_chr2(data_buffer[start_offset : start_offset + 2]) in to_strip
):
start_offset += 2
else:
break
end_offset = offset
while end_offset > start_offset:
if (data_buffer[end_offset - 1] & 0x80) == 0:
if chr(data_buffer[end_offset - 1]) in to_strip:
end_offset -= 1
else:
break
elif (
end_offset > start_offset + 3
and (data_buffer[end_offset - 4] & 0xF8) == 0xF0
and _utf8_chr4(data_buffer[end_offset - 4 : end_offset]) in to_strip
):
end_offset -= 4
elif (
end_offset > start_offset + 2
and (data_buffer[end_offset - 3] & 0xF0) == 0xE0
and _utf8_chr3(data_buffer[end_offset - 3 : end_offset]) in to_strip
):
end_offset -= 3
elif (
end_offset > start_offset + 1
and (data_buffer[end_offset - 2] & 0xE0) == 0xC0
and _utf8_chr2(data_buffer[end_offset - 2 : end_offset]) in to_strip
):
end_offset -= 2
else:
break
stripped_str = data_buffer[start_offset:end_offset]
else:
stripped_str = data_buffer[0:0]
return stripped_str
@njit
def _do_strip(
valid_buffer, valid_offset, offsets, data_buffer, len_data, to_strip, inout_builder
):
previous_offset = offsets[0]
for idx in range(len_data):
current_offset = offsets[1 + idx]
valid = (
bool(
valid_buffer[(idx + valid_offset) // 8]
& (1 << ((idx + valid_offset) % 8))
)
if valid_buffer is not None
else True
)
if valid:
current_str = _extract_striped_string(
previous_offset, current_offset, data_buffer, to_strip
)
inout_builder.append_value(current_str, len(current_str))
else:
inout_builder.append_null()
previous_offset = current_offset
@njit
def _startswith(sa, needle, na, offset, out):
for i in range(sa.size):
if sa.isnull(i):
out[offset + i] = na
continue
if sa.byte_length(i) < needle.length:
out[offset + i] = 0
continue
for j in range(needle.length):
if sa.get_byte(i, j) != needle.get_byte(j):
out[offset + i] = 0
break
else:
out[offset + i] = 1
@njit
def _endswith(sa, needle, na, offset, out):
for i in range(sa.size):
if sa.isnull(i):
out[offset + i] = na
continue
string_length = sa.byte_length(i)
needle_length = needle.length
if string_length < needle.length:
out[offset + i] = 0
continue
out[offset + i] = 1
for j in range(needle_length):
if sa.get_byte(i, string_length - needle_length + j) != needle.get_byte(j):
out[offset + i] = 0
break
@apply_per_chunk
def _slice_handle_chunk(pa_arr, start, end, step):
"""Slice each string according to the (start, end, step) inputs."""
offsets, data = _extract_string_buffers(pa_arr)
valid = _buffer_to_view(pa_arr.buffers()[0])
if step == 0:
raise ValueError("step cannot be zero.")
if start >= 0 and (end is None or end >= 0) and step >= 1:
if step == 1:
res = _slice_pos_inputs_nostep(
offsets, data, valid, pa_arr.offset, start, end
)
else:
res = _slice_pos_inputs_step(
offsets, data, valid, pa_arr.offset, start, end, step
)
else:
res = _slice_generic(offsets, data, valid, pa_arr.offset, start, end, step)
return finalize_string_array(res, pa.string())
@njit
def get_utf8_size(first_byte: int):
if first_byte < 0b10000000:
return 1
elif first_byte < 0b11100000:
return 2
elif first_byte < 0b11110000:
return 3
else:
return 4
@njit
def _slice_pos_inputs_nostep(
offsets, data, valid_bits, valid_offset, start: int, end: int
) -> StringArrayBuilder:
"""
start, end >= 0
step == 1
"""
builder = StringArrayBuilder(len(offsets) - 1)
for i in prange(len(offsets) - 1):
if len(valid_bits) > 0:
byte_offset = (i + valid_offset) // 8
bit_offset = (i + valid_offset) % 8
mask = np.uint8(1 << bit_offset)
valid = valid_bits[byte_offset] & mask
if not valid:
builder.append_null()
continue
str_len_bytes = offsets[i + 1] - offsets[i]
char_idx = 0
byte_idx = 0
while char_idx < start and byte_idx < str_len_bytes:
char_idx += 1
byte_idx += get_utf8_size(data[offsets[i] + byte_idx])
start_byte = offsets[i] + byte_idx
while (end is None or char_idx < end) and byte_idx < str_len_bytes:
char_idx += 1
byte_idx += get_utf8_size(data[offsets[i] + byte_idx])
end_byte = offsets[i] + byte_idx
builder.append_value(data[start_byte:end_byte], end_byte - start_byte)
return builder
@njit
def _slice_pos_inputs_step(
offsets, data, valid_bits, valid_offset, start: int, end: int, step: int
) -> StringArrayBuilder:
"""
start, end >= 0
step > 1
"""
builder = StringArrayBuilder(len(offsets) - 1)
for i in prange(len(offsets) - 1):
if len(valid_bits) > 0:
byte_offset = (i + valid_offset) // 8
bit_offset = (i + valid_offset) % 8
mask = np.uint8(1 << bit_offset)
valid = valid_bits[byte_offset] & mask
if not valid:
builder.append_null()
continue
str_len_bytes = offsets[i + 1] - offsets[i]
char_idx = 0
byte_idx = 0
while char_idx < start and byte_idx < str_len_bytes:
char_idx += 1
byte_idx += get_utf8_size(data[offsets[i] + byte_idx])
to_skip = 0
include_bytes: List[bytes] = []
while (end is None or char_idx < end) and byte_idx < str_len_bytes:
char_size = get_utf8_size(data[offsets[i] + byte_idx])
if not to_skip:
include_bytes.extend(
data[offsets[i] + byte_idx : offsets[i] + byte_idx + char_size]
)
to_skip = step
char_idx += 1
byte_idx += char_size
to_skip -= 1
builder.append_value(include_bytes, len(include_bytes))
return builder
@njit
def _slice_generic(
offsets, data, valid_bits, valid_offset, start: int, end: int, step: int
) -> StringArrayBuilder:
builder = StringArrayBuilder(len(offsets) - 1)
for i in prange(len(offsets) - 1):
if len(valid_bits) > 0:
byte_offset = (i + valid_offset) // 8
bit_offset = (i + valid_offset) % 8
mask = np.uint8(1 << bit_offset)
valid = valid_bits[byte_offset] & mask
if not valid:
builder.append_null()
continue
str_len_bytes = offsets[i + 1] - offsets[i]
char_bytes: List[bytes] = []
byte_idx | |
<gh_stars>0
apitypes = {
'DWORD': 'unsigned int',
'HANDLE': 'unsigned int',
}
api = {
# libc
'plt___libc_start_main':( 'int', None, 'cdecl', '*.__libc_start_main', (('int', 'main'), ('int', 'argc'), ('int', 'argv')) ),
'plt_main_entry':( 'int', None, 'stdcall', '*.main_entry', (('int', None), ('int', None), ('int', None)) ),
'plt__cicos':( 'int', None, 'cdecl', '*._CIcos', () ),
'plt__cilog':( 'int', None, 'cdecl', '*._CIlog', () ),
'plt__cipow':( 'int', None, 'cdecl', '*._CIpow', () ),
'plt__cisin':( 'int', None, 'cdecl', '*._CIsin', () ),
'plt__cisqrt':( 'int', None, 'cdecl', '*._CIsqrt', () ),
'plt___isascii':( 'int', None, 'cdecl', '*.__isascii', (('int', None),) ),
'plt___iscsym':( 'int', None, 'cdecl', '*.__iscsym', (('int', None),) ),
'plt___iscsymf':( 'int', None, 'cdecl', '*.__iscsymf', (('int', None),) ),
'plt___toascii':( 'int', None, 'cdecl', '*.__toascii', (('int', None),) ),
'plt__alldiv':( 'int', None, 'stdcall', '*._alldiv', (('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt__alldvrm':( 'int', None, 'stdcall', '*._alldvrm', (('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt__allmul':( 'int', None, 'stdcall', '*._allmul', (('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt__alloca_probe':( 'int', None, 'cdecl', '*._alloca_probe', () ),
'plt__allrem':( 'int', None, 'stdcall', '*._allrem', (('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt__allshl':( 'int', None, 'cdecl', '*._allshl', () ),
'plt__allshr':( 'int', None, 'cdecl', '*._allshr', () ),
'plt__atoi64':( 'int', None, 'cdecl', '*._atoi64', (('int', None),) ),
'plt__aulldiv':( 'int', None, 'stdcall', '*._aulldiv', (('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt__aulldvrm':( 'int', None, 'stdcall', '*._aulldvrm', (('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt__aullrem':( 'int', None, 'stdcall', '*._aullrem', (('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt__aullshr':( 'int', None, 'cdecl', '*._aullshr', () ),
'plt__chkstk':( 'int', None, 'cdecl', '*._chkstk', () ),
'plt__fltused':( 'int', None, 'cdecl', '*._fltused', () ),
'plt__ftol':( 'int', None, 'cdecl', '*._ftol', () ),
'plt__i64toa':( 'int', None, 'msfastcall', '*._i64toa', (('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt__i64tow':( 'int', None, 'msfastcall', '*._i64tow', (('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt__itoa':( 'int', None, 'cdecl', '*._itoa', (('int', None), ('void *', 'obj'), ('int', None)) ),
'plt__itow':( 'int', None, 'cdecl', '*._itow', (('int', None), ('void *', 'obj'), ('int', None)) ),
'plt__lfind':( 'int', None, 'cdecl', '*._lfind', (('int', None), ('int', None), ('int', None), ('int', None), ('void *', 'funcptr')) ),
'plt__ltoa':( 'int', None, 'cdecl', '*._ltoa', (('int', None), ('void *', 'obj'), ('int', None)) ),
'plt__ltow':( 'int', None, 'cdecl', '*._ltow', (('int', None), ('void *', 'obj'), ('int', None)) ),
'plt__memccpy':( 'int', None, 'cdecl', '*._memccpy', (('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt__memicmp':( 'int', None, 'cdecl', '*._memicmp', (('int', None), ('int', None), ('int', None)) ),
'plt__snprintf':( 'int', None, 'cdecl', '*._snprintf', (('int', None), ('int', None), ('int', None)) ),
'plt__snwprintf':( 'int', None, 'cdecl', '*._snwprintf', (('void *', 'buffer'), ('int', 'count'), ('void *', 'fmt')) ),
'plt__splitpath':( 'int', None, 'cdecl', '*._splitpath', (('int', None), ('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt__strcmpi':( 'int', None, 'cdecl', '*._strcmpi', (('int', None), ('int', None)) ),
'plt__stricmp':( 'int', None, 'cdecl', '*._stricmp', (('int', None), ('int', None)) ),
'plt__strlwr':( 'int', None, 'cdecl', '*._strlwr', (('int', None),) ),
'plt__strnicmp':( 'int', None, 'cdecl', '*._strnicmp', (('int', None), ('int', None), ('int', None)) ),
'plt__strupr':( 'int', None, 'cdecl', '*._strupr', (('int', None),) ),
'plt__tolower':( 'int', None, 'cdecl', '*._tolower', (('int', None),) ),
'plt__toupper':( 'int', None, 'cdecl', '*._toupper', (('int', None),) ),
'plt__ui64toa':( 'int', None, 'msfastcall', '*._ui64toa', (('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt__ui64tow':( 'int', None, 'msfastcall', '*._ui64tow', (('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt__ultoa':( 'int', None, 'cdecl', '*._ultoa', (('int', None), ('void *', 'obj'), ('int', None)) ),
'plt__ultow':( 'int', None, 'cdecl', '*._ultow', (('int', None), ('void *', 'obj'), ('int', None)) ),
'plt__vsnprintf':( 'int', None, 'cdecl', '*._vsnprintf', (('void *', 'ptr'), ('int', None), ('int', None), ('int', None)) ),
'plt__vsnwprintf':( 'int', None, 'cdecl', '*._vsnwprintf', (('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt__wcsicmp':( 'int', None, 'cdecl', '*._wcsicmp', (('int', None), ('int', None)) ),
'plt__wcslwr':( 'int', None, 'cdecl', '*._wcslwr', (('int', None),) ),
'plt__wcsnicmp':( 'int', None, 'cdecl', '*._wcsnicmp', (('void *', 'ptr'), ('int', None), ('int', None)) ),
'plt__wcsupr':( 'int', None, 'cdecl', '*._wcsupr', (('int', None),) ),
'plt__wtoi':( 'int', None, 'cdecl', '*._wtoi', (('int', None),) ),
'plt__wtoi64':( 'int', None, 'cdecl', '*._wtoi64', (('int', None),) ),
'plt__wtol':( 'int', None, 'cdecl', '*._wtol', (('int', None),) ),
'plt_abs':( 'int', None, 'cdecl', '*.abs', (('int', None),) ),
'plt_atan':( 'int', None, 'cdecl', '*.atan', () ),
'plt_atoi':( 'int', None, 'cdecl', '*.atoi', (('int', None),) ),
'plt_atol':( 'int', None, 'cdecl', '*.atol', (('int', None),) ),
'plt_bsearch':( 'int', None, 'cdecl', '*.bsearch', (('void *', 'ptr'), ('int', None), ('int', None), ('int', None), ('void *', 'funcptr')) ),
'plt_ceil':( 'int', None, 'cdecl', '*.ceil', () ),
'plt_cos':( 'int', None, 'cdecl', '*.cos', () ),
'plt_fabs':( 'int', None, 'cdecl', '*.fabs', (('int', None), ('int', None)) ),
'plt_floor':( 'int', None, 'cdecl', '*.floor', () ),
'plt_isalnum':( 'int', None, 'cdecl', '*.isalnum', (('int', None),) ),
'plt_isalpha':( 'int', None, 'cdecl', '*.isalpha', (('int', None),) ),
'plt_iscntrl':( 'int', None, 'cdecl', '*.iscntrl', (('int', None),) ),
'plt_isdigit':( 'int', None, 'cdecl', '*.isdigit', (('int', None),) ),
'plt_isgraph':( 'int', None, 'cdecl', '*.isgraph', (('int', None),) ),
'plt_islower':( 'int', None, 'cdecl', '*.islower', (('int', None),) ),
'plt_isprint':( 'int', None, 'cdecl', '*.isprint', (('int', None),) ),
'plt_ispunct':( 'int', None, 'cdecl', '*.ispunct', (('int', None),) ),
'plt_isspace':( 'int', None, 'cdecl', '*.isspace', (('int', None),) ),
'plt_isupper':( 'int', None, 'cdecl', '*.isupper', (('int', None),) ),
'plt_iswalpha':( 'int', None, 'cdecl', '*.iswalpha', (('int', None),) ),
'plt_iswctype':( 'int', None, 'cdecl', '*.iswctype', (('int', None), ('int', None)) ),
'plt_iswdigit':( 'int', None, 'cdecl', '*.iswdigit', (('int', None),) ),
'plt_iswlower':( 'int', None, 'cdecl', '*.iswlower', (('int', None),) ),
'plt_iswspace':( 'int', None, 'cdecl', '*.iswspace', (('int', None),) ),
'plt_iswxdigit':( 'int', None, 'cdecl', '*.iswxdigit', (('int', None),) ),
'plt_isxdigit':( 'int', None, 'cdecl', '*.isxdigit', (('int', None),) ),
'plt_labs':( 'int', None, 'cdecl', '*.labs', (('int', None),) ),
'plt_log':( 'int', None, 'cdecl', '*.log', () ),
'plt_mbstowcs':( 'int', None, 'cdecl', '*.mbstowcs', (('void *', 'ptr'), ('int', None), ('int', None)) ),
'plt_memchr':( 'int', None, 'cdecl', '*.memchr', (('int', None), ('int', None), ('int', None)) ),
'plt_memcmp':( 'int', None, 'cdecl', '*.memcmp', (('int', None), ('int', None), ('int', None)) ),
'plt_memcpy':( 'int', None, 'cdecl', '*.memcpy', (('int', None), ('int', None), ('int', None)) ),
'plt_memmove':( 'int', None, 'cdecl', '*.memmove', (('void *', 'dst'), ('void *', 'src'), ('int', 'count')) ),
'plt_memset':( 'int', None, 'cdecl', '*.memset', (('int', None), ('int', None), ('int', None)) ),
'plt_pow':( 'int', None, 'cdecl', '*.pow', () ),
'plt_qsort':( 'int', None, 'stdcall', '*.qsort', ( ('void *', 'funcptr'), ('int', None), ('int', None), ('void *', 'funcptr'), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None), ('int', None)) ),
'plt_sin':( 'int', None, 'cdecl', '*.sin', () ),
'plt_sprintf':( 'int', None, 'cdecl', '*.sprintf', (('void *', 'ptr'), ('int', None)) ),
'plt_sqrt':( 'int', None, 'cdecl', '*.sqrt', () ),
'plt_sscanf':( 'int', None, 'cdecl', '*.sscanf', (('int', None), ('int', None)) ),
'plt_strcat':( 'int', None, 'cdecl', '*.strcat', (('int', None), ('int', None)) ),
'plt_strchr':( 'int', None, 'cdecl', '*.strchr', (('int', None), ('int', None)) ),
'plt_strcmp':( 'int', None, 'cdecl', '*.strcmp', (('int', None), ('int', None)) ),
'plt_strcpy':( 'int', None, 'cdecl', '*.strcpy', (('int', None), ('int', None)) ),
'plt_strcspn':( 'int', None, 'cdecl', '*.strcspn', (('int', None), ('int', None)) ),
'plt_strlen':( 'int', None, 'cdecl', '*.strlen', (('int', None),) ),
'plt_strncat':( 'int', None, 'cdecl', '*.strncat', (('int', None), ('int', None), ('int', None)) ),
'plt_strncmp':( 'int', None, 'cdecl', '*.strncmp', (('int', None), ('int', None), ('int', None)) ),
'plt_strncpy':( 'int', None, 'cdecl', '*.strncpy', (('int', None), ('int', None), ('int', None)) ),
'plt_strpbrk':( 'int', None, 'cdecl', '*.strpbrk', (('int', None), ('int', None)) ),
'plt_strrchr':( 'int', None, 'cdecl', '*.strrchr', (('int', None), ('int', None)) ),
'plt_strspn':( 'int', None, 'cdecl', '*.strspn', (('int', None), ('int', None)) ),
'plt_strstr':( 'int', None, 'cdecl', '*.strstr', (('int', None), ('int', None)) ),
'plt_strtol':( 'int', None, 'cdecl', '*.strtol', (('int', None), ('int', None), ('int', None)) ),
'plt_strtoul':( 'int', None, 'cdecl', '*.strtoul', (('int', None), ('int', None), ('int', None)) ),
'plt_swprintf':( 'int', None, 'cdecl', '*.swprintf', (('wchar *', 'ptr'), ('int', None)) ),
'plt_tan':( 'int', None, 'cdecl', '*.tan', () ),
'plt_tolower':( 'int', None, 'cdecl', '*.tolower', (('int', None),) ),
'plt_toupper':( 'int', None, 'cdecl', '*.toupper', (('int', None),) | |
<filename>test/test_writers_readers.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
import tempfile
import shutil
import zlib
import unittest
import copy
########### Debug printing ###########
import pprint
pp_xfawedfssa = pprint.PrettyPrinter(indent=2)
def myprint(x):
if type(x) == str:
print(x)
else:
pp_xfawedfssa.pprint(x)
#######################################
# Graph related stuff
graph_nets_available = True
try:
from graph_nets import graphs
from graph_nets import utils_tf
from graph_nets import utils_np
import networkx as nx
except ImportError:
print("Unable to use graph_nets")
graph_nets_available = False
# Our module
import tf_data_loader as tfdataloader
def myhash(x):
return zlib.adler32(str(x).encode('utf-8'))
############### Data Generators ###############
class BasicDataGenerator(tfdataloader.DataGenerator):
"""Test dataset generator """
def __init__(self):
features_list = [
tfdataloader.TensorFeature(
key='tensor',
shape=[3,3,3],
description='A tensor feature',
dtype='float32',
),
tfdataloader.IntFeature(
key='class',
description='A integer feature',
),
]
super(BasicDataGenerator, self).__init__(features_list)
self.out_val = None
def gen_sample(self, name, index):
# Pick a random seed value
seed = abs(index * myhash(name)) % (2**32 - 1)
np.random.seed(seed)
# Generate data
class_val = index # np.random.randint(0,3)
tensor = np.random.randn(3,3,3) + class_val
self.out_val = {
'tensor': tensor.astype('float32'),
'class': class_val,
}
return self.out_val
basic_generator = BasicDataGenerator()
class AllFeaturesDataGenerator(tfdataloader.DataGenerator):
"""Test dataset generator """
def __init__(self):
self.tensor_shape = [4, 4, 4]
self.num_auxilary_feats = 16
features_list = [
tfdataloader.TensorFeature(
key='tensor',
shape=self.tensor_shape,
description='A tensor feature',
dtype='float32',
),
tfdataloader.IntFeature(
key='class',
dtype='int64',
description='A integer feature',
),
tfdataloader.VarLenIntListFeature(
key='aux_classes',
dtype='int32',
description='A variable length integer feature',
),
tfdataloader.VarLenFloatFeature(
key='aux_classes_feat',
shape=[None, self.num_auxilary_feats],
description='A variable length float feature',
),
tfdataloader.SparseTensorFeature(
key='outlier_labels',
shape=self.tensor_shape,
description='A sparse tensor feature',
),
]
super(AllFeaturesDataGenerator, self).__init__(features_list)
def gen_sample(self, name, index):
# Pick a random seed value
seed = abs(index * myhash(name)) % (2**32 - 1)
np.random.seed(seed)
tshape = self.tensor_shape
ashape = self.num_auxilary_feats
# Generate data
class_val = np.random.randint(0, 3)
nc = np.random.randint(2, 6)
aux_classes = np.random.randint(0, 2, nc)
aux_classes_feat = np.random.randn(nc, ashape) + aux_classes.reshape(-1, 1)
outliers = np.random.binomial(1, 1./16., tshape)
while np.sum(outliers) > 1:
outliers = np.random.binomial(1, 1./16., tshape)
tensor = (np.random.randn(*tshape) + outliers) + class_val
outlier_labels = tfdataloader.np_dense_to_sparse(outliers.astype('float32'))
return {
'tensor': tensor.astype('float32'),
'class': class_val,
'aux_classes': aux_classes,
'aux_classes_feat': aux_classes_feat.astype('float32'),
'outlier_labels': outlier_labels,
}
all_features_generator = AllFeaturesDataGenerator()
if graph_nets_available:
class GraphDataGenerator(tfdataloader.DataGenerator):
"""Test dataset generator """
def __init__(self):
self.max_nodes = 8
self.edge_prob = 0.2
self.node_feature_size=8
self.edge_feature_size=2
self.global_feature_size=4
features_list = [
tfdataloader.GraphFeature(
key='output_graph',
node_feature_size=self.node_feature_size,
edge_feature_size=self.edge_feature_size,
global_feature_size=self.global_feature_size,
description='A graph feature',
dtype='float32',
),
tfdataloader.TensorFeature(
key='adj_mat_dense',
shape=[self.max_nodes, self.max_nodes],
description='A tensor feature',
dtype='float32',
),
tfdataloader.SparseTensorFeature(
key='adj_mat_sparse',
shape=[self.max_nodes, self.max_nodes],
description='A sparse tensor feature',
),
]
super(GraphDataGenerator, self).__init__(features_list)
self.out_val = None
def gen_sample(self, name, index):
seed = abs(index * myhash(name)) % (2**32 - 1)
np.random.seed(seed)
# Pose graph and related objects
num_nodes = np.random.randint(2,self.max_nodes)
# Generate Erdos Renyi Graph
AdjMat = np.random.binomial(1, self.edge_prob, (num_nodes, num_nodes))
# Build spart graph representation
G_nx = nx.from_numpy_matrix(AdjMat, create_using=nx.DiGraph)
node_attrs = { i : np.random.randn(
self.node_feature_size
).astype(np.float32)
for i in range(len(G_nx)) }
edges_attrs = { (i, j) : np.random.randn(
self.edge_feature_size
).astype(np.float32)
for (i, j) in G_nx.edges }
nx.set_node_attributes(G_nx, node_attrs, 'features')
nx.set_edge_attributes(G_nx, edges_attrs, 'features')
G = utils_np.networkx_to_data_dict(G_nx)
G['globals'] = np.random.randn(
self.global_feature_size
).astype('float32')
# Build dense
adj_mat_dense = np.zeros((self.max_nodes, self.max_nodes))
adj_mat_dense[:num_nodes, :num_nodes] = AdjMat
out_dict = {
'output_graph': G,
'adj_mat_dense' : adj_mat_dense,
'adj_mat_sparse' : tfdataloader.np_dense_to_sparse(adj_mat_dense),
}
return out_dict
graph_generator = GraphDataGenerator()
############### Base Classes ###############
class ReadTestCase(unittest.TestCase):
def mySetUp(self, generator):
# Create a temporary directory for data saving before each test
self.test_dir = tempfile.mkdtemp()
self.config = tf.ConfigProto()
self.config.gpu_options.allow_growth = True
self.generator = generator
self.data_writer = tfdataloader.DataWriter(self.test_dir,
self.generator,
verbose=False)
self.data_reader = tfdataloader.DataReader(self.test_dir)
# Switchers
self.equal_switcher = {
tfdataloader.SparseTensorFeature :
lambda f, v0, v1: self.sparseTensorValEqual(f, v0, v1),
tfdataloader.VarLenIntListFeature :
lambda f, v0, v1: self.varLenIntListEqual(f, v0, v1),
tfdataloader.VarLenFloatFeature:
lambda f, v0, v1: self.varLenFloatValEqual(f, v0, v1),
}
self.select_switcher = {
tfdataloader.SparseTensorFeature :
lambda v, i: self.sparseTensorSelect(v, i),
}
def tearDown(self):
# Remove the directory after each test
shutil.rmtree(self.test_dir)
# Not all features require the same equality
def sparseTensorValToTuple(self, sptensor):
indices = [
x.reshape(-1)
for x in np.split(sptensor.indices, sptensor.indices.shape[-1], axis=-1)
]
values = sptensor.values
ttensor = (indices, values)
return ttensor
def isEqualSampleVals(self, feat, val0, val1):
eqOp_default = lambda f, v0, v1: self.standardEqual(v0, v1)
eqOp = self.equal_switcher.get(type(feat), eqOp_default)
return eqOp(feat, val0, val1)
def select(self, feat, val, idx):
select_default = lambda v, i: v[i]
eqOp = self.select_switcher.get(type(feat), select_default)
return eqOp(val, idx)
# TODO: Should this go into the classes themselves?
def standardEqual(self, val0, val1):
return np.array_equiv(np.squeeze(val0), np.squeeze(val1))
def varLenFloatValEqual(self, feat, val0, val1):
if len(val0.shape) > 1 and val0.shape[0] == 1 \
and len(val0.shape) == len(val1.shape) + 1:
val0 = val0[0]
elif len(val1.shape) > 1 and val1.shape[0] == 1 \
and len(val1.shape) == len(val0.shape) + 1:
val1 = val1[0]
if len(val0.shape) != len(val1.shape):
return False
x = tuple(slice(None,min(s0,s1)) for s0, s1 in zip(val0.shape, val1.shape))
val0_ = val0[x]
val1_ = val1[x]
if not np.allclose(val0_, val1_):
return False
x = tuple(slice(min(s0,s1),None) for s0, s1 in zip(val0.shape, val1.shape))
val0_zeros = val0[x]
val1_zeros = val1[x]
if not np.allclose(val0_zeros,0) or not np.allclose(val1_zeros, 0):
return False
return True
def varLenIntListEqual(self, feat, val0, val1):
if not isinstance(val0, np.ndarray) or not isinstance(val1, np.ndarray):
return False
while len(val0.shape) > 1 and val0.shape[0] == 1:
val0 = val0[0]
while len(val1.shape) > 1 and val1.shape[0] == 1:
val1 = val1[0]
if len(val0.shape) != len(val1.shape) or len(val1.shape) != 1:
return False
l = min(len(val0), len(val1))
val0_ = val0[:l]
val1_ = val1[:l]
return np.allclose(val0_, val1_)
def sparseTensorValEqual(self, feat, val0, val1):
if isinstance(val0, tf.SparseTensorValue):
val0 = self.sparseTensorValToTuple(val0)
if isinstance(val1, tf.SparseTensorValue):
val1 = self.sparseTensorValToTuple(val1)
# Now treat equally
indices0, indices1 = val0[0], val1[0]
values0, values1 = val0[1], val1[1]
if len(values0) == 0 and len(values1) == 0:
return True
if len(indices0) > 1 and np.allclose(indices0[0],0) \
and len(indices0) == len(indices1) + 1:
indices0 = indices0[1:]
elif len(indices1) > 1 and np.allclose(indices1[0],0) \
and len(indices1) == len(indices0) + 1:
indices1 = indices1[1:]
if len(indices0) != len(indices1):
return False
if len(indices0[0]) != len(indices1[0]):
return False
if values0.shape != values1.shape:
return False
for ind0, ind1 in zip(indices0, indices1):
if not np.array_equiv(ind0,ind1):
return False
equal_val = np.array_equiv(values0, values1)
return equal_val
def sparseTensorSelect(self, val, idx):
if isinstance(val, tf.SparseTensorValue):
inds = val.indices
vals = val.values
elif isinstance(val, tuple):
inds, vals = val
else:
self.assertTrue(False, 'Invalid Sparse type for selection')
inds_new = [ [] for i in range(len(inds[0])-1) ]
vals_new = []
for i in range(len(vals)):
if inds[i][0] == idx:
vals_new.append(vals[i])
for j in range(len(inds[0])-1):
inds_new[j].append(inds[i][j+1])
return tuple([ np.array(x) for x in inds_new]), np.array(vals_new)
############### TFRecord Tests ###############
# Writer Tests
class DataTFRecordWriterBasicTest(unittest.TestCase):
def mySetUp(self, generator):
# Create a temporary directory for data saving before each test
self.test_dir = tempfile.mkdtemp()
self.data_writer = tfdataloader.DataWriter(self.test_dir,
generator,
verbose=False)
def setUp(self):
self.mySetUp(basic_generator)
def tearDown(self):
# Remove the directory after each test
shutil.rmtree(self.test_dir)
def test_config_write(self):
path = os.path.join(self.test_dir, 'config.yaml')
self.assertTrue(os.path.exists(path))
load_passed = True
try:
import yaml
with open(path, 'r') as f:
config = yaml.load(f)
except Exception as e:
load_passed = False
self.assertTrue(load_passed)
def test_basic_write(self):
name = 'test_basic_write'
self.data_writer.create_dataset(name, 1)
path = os.path.join(self.test_dir, name, '000.tfrecords')
self.assertTrue(os.path.exists(path))
# TODO: Change this behavior? (https://stackoverflow.com/questions/52191167/optimal-size-of-a-tfrecord-file)
def test_large_write(self):
name = 'test_large_write'
self.data_writer.create_dataset(name, self.data_writer.MAX_IDX+1)
path0 = os.path.join(self.test_dir, name, '000.tfrecords')
self.assertTrue(os.path.exists(path0))
path1 = os.path.join(self.test_dir, name, '001.tfrecords')
self.assertTrue(os.path.exists(path1))
class DataTFRecordWriterAllFeaturesTest(DataTFRecordWriterBasicTest):
def setUp(self):
self.mySetUp(all_features_generator)
if graph_nets_available:
class DataTFRecordWriterGraphTest(DataTFRecordWriterBasicTest):
def setUp(self):
self.mySetUp(graph_generator)
# Reader tests
class DataTFRecordReaderBasicTest(ReadTestCase):
def setUp(self):
self.mySetUp(basic_generator)
def test_features(self):
for key, write_feat in self.data_writer.features.items():
read_feat = self.data_reader.features[key]
self.assertEqual(read_feat, write_feat)
def test_basic_read(self):
name = 'test_basic_read'
num_total = 1
batch_size = 1
self.data_writer.create_dataset(name, num_total)
mysample = self.generator.gen_sample(name, 0)
sample = self.data_reader.get_standard_batch(name, batch_size, shuffle_data=False)
for k in sorted(list(sample.keys())):
key_valid = k in self.data_writer.features
self.assertTrue(key_valid, '{} not valid'.format(k))
with tf.Session(config=copy.deepcopy(self.config)) as sess:
sample_ = sess.run(sample)
self.assertEqual(sample_.keys(), mysample.keys())
for k in sorted(list(sample_.keys())):
feat = self.data_reader.features[k]
equal_val = self.isEqualSampleVals(feat, sample_[k], mysample[k])
self.assertTrue(equal_val, '{} not equal'.format(k))
def test_multi_read(self):
name = 'test_multi_read'
num_total = 10
batch_size = 1
self.data_writer.create_dataset(name, num_total)
sample = self.data_reader.get_standard_batch(name, batch_size, shuffle_data=False)
for k in sorted(list(sample.keys())):
key_valid = k in self.data_writer.features
self.assertTrue(key_valid, '{} not valid'.format(k))
with tf.Session(config=copy.deepcopy(self.config)) as sess:
for b in range(num_total):
sample_ = sess.run(sample)
mysample = self.generator.gen_sample(name, b)
self.assertEqual(sample_.keys(), mysample.keys())
for k in sorted(list(sample_.keys())):
feat = self.data_reader.features[k]
equal_val = self.isEqualSampleVals(feat, sample_[k], mysample[k])
self.assertTrue(equal_val, '{} not equal'.format(k))
def test_batch_read(self):
name = 'test_batch_read'
num_total = 10
batch_size = 5
num_batches = num_total // batch_size
self.data_writer.create_dataset(name, num_total)
sample = self.data_reader.get_standard_batch(name, batch_size, shuffle_data=False)
feats = self.data_reader.features
for k in sorted(list(sample.keys())):
key_valid = k in self.data_writer.features
self.assertTrue(key_valid, '{} not valid'.format(k))
with tf.Session(config=copy.deepcopy(self.config)) as sess:
for b in | |
/home/ubuntu/setup/benchmarking.csv && touch /home/ubuntu/setup/benchmarking.log")
# wait_and_log(stdout, stderr)
"""
time.sleep(experiment_config['duration']/2)
logger.info("Shutting down the raft leader")
# Fabric_Network.shutdown_raft_nonleader(blockchain_config, blockchain_ssh_clients, blockchain_scp_clients, logger)
Fabric_Network.stop_node(experiment_handler.dapp_handler.blockchain_handler, 1, 0)
time.sleep(experiment_config['duration'] / 2)
"""
logger.info(f"Waiting until all requests have been sent")
status_flags = wait_till_done(client_config, [client_ssh_clients[index] for index in client_indices], [client_config['pub_ips'][index] for index in client_indices],
np.ceil(max_time / 60) * 60 + 10, 10, "/home/ubuntu/setup/benchmarking.csv", False,
max_time, logger)
if False in status_flags:
return False
else:
logger.info("Finished with sending the requests")
return True
def get_benchmarking_data(experiment_handler, experiment_config, test_config):
"""
Pulls the benchmarking data from the client and blockchain instances
:param blockchain_config:
:param client_config:
:param experiment_config:
:param test_config:
:param logger:
:param blockchain_ssh_clients:
:param blockchain_scp_clients:
:param client_ssh_clients:
:param client_scp_clients:
:return:
"""
blockchain_config = experiment_handler.blockchain_formation_config
client_config = experiment_handler.client_config
logger = experiment_handler.logger
blockchain_ssh_clients = experiment_handler.dapp_handler.blockchain_handler.ssh_clients
blockchain_scp_clients = experiment_handler.dapp_handler.blockchain_handler.scp_clients
client_ssh_clients = experiment_handler.dapp_handler.client_handler.ssh_clients
client_scp_clients = experiment_handler.dapp_handler.client_handler.scp_clients
logger.info("Getting the measurement data")
path = test_config['exp_dir']
freq = test_config['freq']
rep = test_config['rep']
# logger.info("Getting csvs and logs for evaluation from clients and nodes")
if client_config["blockchain_type"] == "acapy":
client_indices = client_config["coordinator_indices"]
else:
client_indices = range(0, len(client_ssh_clients))
for _, client in enumerate(client_indices):
client_scp_clients[client].get(f"/home/ubuntu/setup/benchmarking.csv", path + f"/data/freq{freq}_client{client}_tx_data{rep}.csv")
client_scp_clients[client].get(f"/home/ubuntu/setup/benchmarking.log", path + f"/logs/freq{freq}_client{client}_tx_data{rep}.log")
client_scp_clients[client].get(f"/home/ubuntu/resources_measurement.log", path + f"/data/freq{freq}_client{client}_resources{rep}.csv")
if len(blockchain_ssh_clients) > 0:
client_scp_clients[client].get(f"/home/ubuntu/ping.log", path + f"/data/freq{freq}_client{client}_ping{rep}.csv")
client_scp_clients[client].get(f"/home/ubuntu/io.log", path + f"/data/freq{freq}_client{client}_io{rep}.csv")
client_scp_clients[client].get(f"/home/ubuntu/network.log", path + f"/data/freq{freq}_client{client}_network{rep}.csv")
client_scp_clients[client].get(f"/home/ubuntu/single_cpus.log", path + f"/data/freq{freq}_client{client}_single_cpus{rep}.csv")
os.system(f"awk 'NF>1' {path}/data/freq{freq}_client{client}_single_cpus{rep}.csv | awk '!/CPU/' | awk '!/all/' > {path}/data/freq{freq}_client{client}_single_cpus_clean{rep}.csv")
stdin, stdout, stderr = client_ssh_clients[client].exec_command("rm /home/ubuntu/setup/benchmarking.csv /home/ubuntu/setup/benchmarking.log /home/ubuntu/ping.log /home/ubuntu/resources_measurement.log /home/ubuntu/io.log /home/ubuntu/network.log /home/ubuntu/single_cpus.log")
stdout.readlines()
stdin, stdout, stderr = client_ssh_clients[client].exec_command(
"for pid in $(pidof iostat); do kill -9 $pid; done && for pid in $(pidof vmstat); do kill -9 $pid; done && for pid in $(pidof ping); do kill -9 $pid; done && for pid in $(pidof ifstat); do kill -9 $pid; done && for pid in $(pidof mpstat); do kill -9 $pid; done")
stdout.readlines()
types = ['node']
if blockchain_config['blockchain_type'] == "vendia":
types = []
if blockchain_config['blockchain_type'] == "fabric":
if blockchain_config['fabric_settings']['orderer_type'].upper() == "KAFKA":
if 'internal_orderer' in blockchain_config['fabric_settings'] and blockchain_config['fabric_settings']['internal_orderer'] == 1:
types = ['peer', 'zookeeper', 'kafka']
else:
types = ['peer', 'orderer', 'zookeeper', 'kafka']
else:
if 'internal_orderer' in blockchain_config['fabric_settings'] and blockchain_config['fabric_settings']['internal_orderer'] == 1:
types = ['peer']
else:
types = ['peer', 'orderer']
if blockchain_config['fabric_settings']['database'] == "CouchDB" and (('external' in blockchain_config['fabric_settings'] and blockchain_config['fabric_settings']['external'] == 1) or ('external_database' in blockchain_config['fabric_settings'] and blockchain_config['fabric_settings']['external_database'] == 1)):
types.append('db')
for type in types:
type_indices = blockchain_config[f'{type}_indices']
if client_config["blockchain_type"] == "acapy":
blockchain_ssh_clients = [client_ssh_clients[i] for i in client_config["agent_indices"]]
blockchain_scp_clients = [client_scp_clients[i] for i in client_config["agent_indices"]]
type_indices = [0]
# print(blockchain_scp_clients)
# print(type_indices)
for node, index in enumerate(type_indices):
blockchain_scp_clients[index].get(f"/home/ubuntu/resources_measurement.log", path + f"/data/freq{freq}_{type}{node}_resources{rep}.csv")
blockchain_scp_clients[index].get(f"/home/ubuntu/ping.log", path + f"/data/freq{freq}_{type}{node}_ping{rep}.csv")
blockchain_scp_clients[index].get(f"/home/ubuntu/network.log", path + f"/data/freq{freq}_{type}{node}_network{rep}.csv")
blockchain_scp_clients[index].get(f"/home/ubuntu/io.log", path + f"/data/freq{freq}_{type}{node}_io{rep}.csv")
blockchain_scp_clients[index].get(f"/home/ubuntu/single_cpus.log", path + f"/data/freq{freq}_{type}{node}_single_cpus{rep}.csv")
os.system(f"awk 'NF>1' {path}/data/freq{freq}_{type}{node}_single_cpus{rep}.csv | awk '!/CPU/' | awk '!/all/' > {path}/data/freq{freq}_{type}{node}_single_cpus_clean{rep}.csv")
stdin, stdout, stderr = blockchain_ssh_clients[index].exec_command("rm /home/ubuntu/resources_measurement.log /home/ubuntu/ping.log /home/ubuntu/network.log /home/ubuntu/io.log /home/ubuntu/single_cpus.log")
stdout.readlines()
stdin, stdout, stderr = blockchain_ssh_clients[index].exec_command(
"for pid in $(pidof iostat); do kill -9 $pid; done && for pid in $(pidof vmstat); do kill -9 $pid; done && for pid in $(pidof ping); do kill -9 $pid; done && for pid in $(pidof ifstat); do kill -9 $pid; done && for pid in $(pidof mpstat); do kill -9 $pid; done")
stdout.readlines()
# logger.info("All Measurements should now be pulled successfully")
if blockchain_config['blockchain_type'] == "fabric":
type = "peer"
else:
type = "node"
if client_config["blockchain_type"] == "acapy":
pass
else:
for node, index in enumerate(blockchain_config['node_indices']):
stdin, stdout, stderr = blockchain_ssh_clients[index].exec_command(f"df > /home/ubuntu/df_after.log")
wait_and_log(stdout, stderr)
blockchain_scp_clients[index].get(f"/home/ubuntu/df_before.log", path + f"/data/freq{freq}_{type}{node}_df_before{rep}.csv")
blockchain_scp_clients[index].get(f"/home/ubuntu/df_after.log", path + f"/data/freq{freq}_{type}{node}_df_after{rep}.csv")
stdin, stdout, stderr = blockchain_ssh_clients[index].exec_command("rm /home/ubuntu/df_before.log /home/ubuntu/df_after.log")
wait_and_log(stdout, stderr)
def evaluate_benchmarking_test(experiment_handler, experiment_config, test_config, demo, plot):
"""
Evaluates a finished benchmark test
:param blockchain_config:
:param client_config:
:param experiment_config:
:param test_config:
:param logger:
:param blockchain_ssh_clients:
:param blockchain_scp_clients:
:param client_ssh_clients:
:param client_scp_clients:
:return:
"""
blockchain_config = experiment_handler.blockchain_formation_config
client_config = experiment_handler.client_config
logger = experiment_handler.logger
path = test_config['exp_dir']
freq = test_config['freq']
rep = test_config['rep']
# print(test_config)
logger.info("Evaluating the data")
measurements = {}
# the data containing the cpu utilization of all the blockchain nodes
measurements['blockchain_cpu_data'] = {}
# the data containing the cpu utilization of all the clients
measurements['client_cpu_data'] = []
# the data containing the network speed (ping) among all the blockchain nodes
measurements['blockchain_ping_data'] = {}
# the data containing the network speed (ping) between the client and their respective blockchain nodes
measurements['client_ping_data'] = []
# the data cotaining the upload and download rate
measurements['blockchain_network_data'] = {}
measurements['client_network_data'] = {}
# the data containing disk writing and reading
measurements['blockchain_io_data'] = {}
measurements['client_io_data'] = {}
# the data containing the cpus usage of the most used core
measurements['blockchain_single_cpu_data'] = {}
measurements['client_single_cpu_data'] = {}
# the data containing the total energy consumption
measurements['blockchain_energy_consumption_data'] = {}
measurements['client_energy_consumption_data'] = {}
# the data containing the total storage required
measurements['blockchain_storage_data'] = {}
measurements['success_rates'] = []
success_rates = []
tx_data = []
tx_data_all = []
if client_config["blockchain_type"] == "acapy":
client_indices = client_config["coordinator_indices"]
else:
client_indices = range(0, len(client_config['priv_ips']))
# print(client_indices)
for i, client in enumerate(client_indices):
if i % 4 != 3:
pass
# continue
else:
pass
# Reading tx_data on client {client} and removing invalid responses")
try:
# print(f"looking for /data/freq{freq}_client{client}_tx_data{rep}.csv")
# print(path + f"/data/freq{freq}_client{client}_tx_data{rep}.csv")
data = readCSV(path + f"/data/freq{freq}_client{client}_tx_data{rep}.csv", None)
try:
data = check_data(data, 3, logger, f"freq{freq}_client{client}_tx_data{rep}.csv")
except Exception as e:
print("Exception ")
print(data)
continue
data_all = data
data = data[np.where(data[:, 2] != -1)]
if demo == False:
success_rate = len(data[:, 0]) / (test_config['frequency'] * experiment_config['duration'])
success_rates.append(success_rate)
if (success_rate < 0 and (test_config['frequency'] * experiment_config['duration']) > 2):
logger.info(f"Too little valid responses on client {client}:")
logger.info(f"Expected length: {round(test_config['frequency'] * experiment_config['duration'], 1)}, actual length: {len(data[:, 0])} - repeating...")
raise BlockchainNotRespondingError()
except Exception as e:
# pass
logger.exception(e)
# logger.info(f"No response from blockchain on client {client}")
# raise BlockchainNotRespondingError()
data = []
data_all = []
# Putting everything together in tx_datas
if tx_data == []:
tx_data = data
else:
try:
tx_data = np.concatenate((tx_data, data), axis=0)
except Exception as e:
pass
# logger.exception(e)
# raise Exception("Error when concatenating tx data")
if tx_data_all == []:
tx_data_all = data_all
else:
try:
tx_data_all = np.concatenate((tx_data_all, data_all), axis=0)
except Exception as e:
pass
# logger.exception(e)
# raise Exception("Error when concatenating tx data all")
cut_before = 0.1
cut_after = 0.1
tx_data_print = tx_data
tx_data_all_print = tx_data_all
tx_data = tx_data[tx_data[:, 0].argsort()]
tx_data_print = tx_data_print[tx_data_print[:, 0].argsort()]
tx_data[:, 0] = tx_data[:, 1] - tx_data[:, 2]
tx_data_all[:, 0] = tx_data_all[:, 1] - tx_data_all[:, 2]
tx_data_print[:, 0] = tx_data_print[:, 1] - tx_data_print[:, 2]
tx_data_all_print[:, 0] = tx_data_all_print[:, 1] - tx_data_all_print[:, 2]
tx_data = tx_data[:, 0:2]
tx_data_all = tx_data_all[:, 0:2]
tx_data_print = tx_data_print[:, 0:2]
tx_data_all_print = tx_data_all_print[:, 0:2]
# print("tx_data")
# print(tx_data)
# print("\ntx_data_all")
# print(tx_data_all)
base = min(tx_data_print[:, 0])
up = max(tx_data_print[:, 0])
# print(f"base: {base}")
base_all = min(tx_data_all_print[:, 0])
up_all = max(tx_data_all_print[:, 0])
# print(f"base_all: {base_all}")
tx_data[:, :] = tx_data[:, :] - base
tx_data_all[:, :] = tx_data_all[:, :] - base_all
tx_data_print[:, :] = tx_data_print[:, :] - base
tx_data_all_print[:, :] = tx_data_all_print[:, :] # - base_all
# print("tx_data: ")
# print(tx_data)
# print("\ntx_data_all")
# print(tx_data_all)
try:
# if demo == False and effectivity < experiment_config['success_bound']:
if demo == False and test_config['frequency'] * experiment_config['duration'] / len(success_rates) > 5 and min(success_rates) == 0:
logger.info(f"Too little valid responses")
# logger.info(f"Success rates: {success_rates}")
logger.info(f"Expected length: {round(test_config['frequency'] * experiment_config['duration'], 1)}, actual length: {len(data[:, 0])} - repeating...")
raise BlockchainNotRespondingError()
except Exception as e:
# logger.exception(e)
if demo == False:
logger.info(f"Success rates: {success_rates}")
raise BlockchainNotRespondingError()
tx_data_send = tx_data[tx_data[:, 0].argsort()]
n = len(tx_data_send[:, 0])
# print(f"n: {n}")
# print("tx_data_send")
# print(tx_data_send)
# tx_data_send = tx_data_send[list(range(round(cut_before * n), round((1 - cut_after) * n))), :]
# print(np.where((tx_data_send[:, 0] > cut_before * (up - base)) & (tx_data_send[:, 0] < (1 - cut_after) * (up - base))))
tx_data_send = tx_data_send[np.where((tx_data_send[:, 0] > cut_before * (up - base)) & (tx_data_send[:, 0] < (1 - cut_after) * (up - base))), :][0]
# print(tx_data_send)
# print(np.arange(n * cut_before, n * cut_before + tx_data_send.shape[1]))
tx_data_send[:, 1] = np.arange(n * cut_before, n * cut_before + len(tx_data_send[:, 0]))
# print("After")
# print(tx_data_send)
effectivity = len(tx_data_send[:, 0]) / (float(test_config['freq']) * experiment_config['duration'] * (1 - cut_before - cut_after))
print(effectivity)
| |
<gh_stars>10-100
import matplotlib.pyplot as plt
from collections import defaultdict
import seaborn as sns
# import pandas as pd
from sklearn.decomposition import TruncatedSVD
import sklearn.manifold
import numpy as np
import math
import wordcloud
from .nlp import create_tfidf
from .clean import clean_attributes, get_affiliations_doc
sns.set('paper')
#-----------------------------------------------------------------------
# Statistics plotting functions
#-----------------------------------------------------------------------
def top_k(mapping, k=10):
return sorted(mapping.keys(), key=lambda x: mapping[x])[::-1][:k]
def prepare_plot(w=1, h=None):
if h is None: h = w
plt.rcParams['figure.figsize'] = [w, h]
fig = plt.figure()
ax = plt.gca()
return fig, ax
# Publications per aggregation type
def plot_statistic(fun, docset, x=None, ax=None, x_label="", count=None, vertical=False, title=None):
""" Plot statistics of some sort in a bar plot. If x is not given,
(None) all keys with a count > 0 are plotted. If x is a list, the
counts of all list elements are included. If x is an integer,
the x keys with highest counts are plotted. """
if ax is None:
ax = plt.gca()
# Use given count dict if we are plotting something
# unrelated to documents and the counting has already been performed.
if count is None:
count = defaultdict(int)
for d in docset.docs:
for key in fun(d):
if key:
count[str(key)] += 1
if title is not None:
plt.title(title)
if type(x) == type([]):
keys = x
elif type(x) == type(1):
keys = top_k(count, x)
else:
keys = list(count.keys())
if not vertical:
keys = keys[::-1]
ax.set_xlabel(x_label)
ax.barh(keys,
[count[str(a)] for a in keys],
tick_label=[str(key)[:50] for key in keys])
else:
ax.set_ylabel(x_label)
ax.bar(keys,
[count[str(a)] for a in keys],
tick_label=[str(key)[:50] for key in keys])
def merge_author_affiliation(doc):
if doc.authors is None:
return []
authors_plus_aff = []
for author in doc.authors:
if author.affiliations is None:
authors_plus_aff.append(author.name + ', Unknown')
else:
merged = [author.name + ', ' + affiliation.name for affiliation in author.affiliations]
authors_plus_aff += merged
return set(authors_plus_aff)
def plot_year_histogram(docset, ax=None):
"""Plot a histogram of the number of documents published for each year.
:param docset: The `DocumentSet`.
:param ax: The axis on which to plot the histogram, defaults to current axis.
"""
# Publications per year
year_count = defaultdict(int)
for d in docset.docs:
if d.year is not None:
year_count[d.year] += 1
min_year = min(year_count.keys())
max_year = max(year_count.keys())
years = list(range(min_year, max_year+1))
plot_statistic(lambda p: [p.year], docset=docset, x=years, ax=ax, x_label="No. publications", vertical=True, title='Publications per year')
def plot_author_histogram(docset, top_k=20, ax=None):
"""Plot a histogram of the number of documents published by each author. Note that
this is done on a best-effort basis since one author could have published under
multiple spellings of the same name (e.g., "<NAME>", "<NAME>", or "<NAME>")
:param docset: The `DocumentSet`.
:param ax: The axis on which to plot the histogram, defaults to current axis.
:param top_k: Limit results to the top k authors.
"""
plot_statistic(lambda p: set(a.name for a in p.authors or []), x=top_k, docset=docset, ax=ax, x_label="No. publications", title="Publications per author")
def plot_author_affiliation_histogram(docset, top_k=30, ax=None):
"""Plot a histogram of the number of documents published by each combination
of author + affiliation. This helps to reduce the name number of collisions for
different persons having the same name.
:param docset: The `DocumentSet`.
:param ax: The axis on which to plot the histogram, defaults to current axis.
:param top_k: Limit results to the top k combinations.
"""
plot_statistic(lambda p: merge_author_affiliation(p), x=top_k, docset=docset, ax=ax, x_label="No. publications", title="Publications per author+affiliation")
def plot_number_authors_histogram(docset, ax=None):
"""Plot a histogram of the number of authors per document.
:param docset: The `DocumentSet`.
:param ax: The axis on which to plot the histogram, defaults to current axis.
"""
plot_statistic(lambda p: [len(set(a.name for a in p.authors or []))], x=list(range(1, 26)), docset=docset, ax=ax, x_label="No. publications", vertical=True, title="Histogram for number of authors")
def plot_source_type_histogram(docset, ax=None):
"""Plot a histogram of the document source types (e.g., Journal, conference proceedings, etc.)
:param docset: The `DocumentSet`.
:param ax: The axis on which to plot the histogram, defaults to current axis.
"""
plot_statistic(lambda p: [p.source_type], docset=docset, ax=ax, x_label="No. publications", title="Publication per source type")
def plot_source_histogram(docset, top_k=10, ax=None, filename="translations_sources.yml", clean=True):
"""Plot a histogram of the document source. Note that this done on a best-effort basis since
one publication venue could have multiple spellings.
:param docset: The `DocumentSet`.
:param ax: The axis on which to plot the histogram, defaults to current axis.
:param top_k: Limit results to the top k sources.
:param filename: Filename to read from/write to (previous) source cleaning decisions.
:param clean: Switch off the cleaning process.
"""
if not clean:
plot_statistic(lambda p: [p.source], x=top_k, docset=docset, ax=ax, x_label="No. publications", title="Publications per source")
else:
clean_attributes(plot_source_histogram, docset, top_k, ax, filename, cleaning_type='sources')
def plot_affiliation_histogram(docset, top_k=10, ax=None, filename="translations_affiliations.yml", clean=True):
"""Plot a histogram of the number of documents published by each affiliation. Note that
this is done on a best-effort basis since one affiliation could have multiple spellings
(e.g., "University of Amsterdam", "Universiteit van Amsterdam", or "UvA"). The user is
presented with the option to merge potential duplicate affiliation names.
:param docset: The `DocumentSet`.
:param ax: The axis on which to plot the histogram, defaults to current axis.
:param top_k: Limit results to the top k institutes.
:param filename: Filename to read from/write to (previous) affiliation name cleaning decisions.
:param clean: Switch off the cleaning process.
"""
if not clean:
plot_statistic(lambda p: get_affiliations_doc(p), x=top_k, docset=docset, ax=ax, x_label="No. publications", title="Publications per affiliation")
else:
clean_attributes(plot_affiliation_histogram, docset, top_k, ax, filename, cleaning_type='affiliations')
def plot_country_histogram(docset, top_k=10, ax=None):
"""Plot a histogram of the number of documents published by each country based
on author affiliation. Note that the country is not always available.
:param docset: The `DocumentSet`.
:param ax: The axis on which to plot the histogram, defaults to current axis.
:param top_k: Limit results to the top k country.
"""
plot_statistic(lambda p: get_affiliations_doc(p, attribute='country'), x=top_k, docset=docset, ax=ax, x_label="No. publications", title="Publications per country of affiliations")
def plot_affiliation_type_histogram(docset, x=10, ax=None):
"""Plot a histogram of the number of documents published by each type
of affiliation (e.g., research institutes, academic institute, company, etc.)
:param docset: The `DocumentSet`.
:param ax: The axis on which to plot the histogram, defaults to current axis.
:param top_k: Limit results to the top k entries.
"""
plot_statistic(lambda p: get_affiliations_doc(p, attribute='affiliation_type'), x=x, docset=docset, ax=ax, x_label="No. publications", title="Publications per type of affiliation")
def plot_language_histogram(docset, ax=None):
"""Plot a histogram of the number of documents published for each language
(e.g., English, German, Chinese, etc.)
:param docset: The `DocumentSet`.
:param ax: The axis on which to plot the histogram, defaults to current axis.
"""
plot_statistic(lambda p: [p.language], docset=docset, ax=ax, x_label="No. publications", title="Publications per source language")
def plot_words_histogram(freqs, dic, top_k=25, ax=None):
"""Plot a histogram of word frequencies in the documents.
:param docset: The `DocumentSet`.
:param ax: The axis on which to plot the histogram, defaults to current axis.
:param top_k: Limit results to the top k entries.
"""
all_freqs = []
for doc_freq in freqs:
all_freqs += doc_freq
count = defaultdict(int)
for word, freq in all_freqs:
count[str(dic[word])] += freq
plot_statistic(None, docset=None, ax=ax, x_label="No. occurences", x=top_k, count=count)
def plot_bigram_histogram(freqs, dic, top_k=25, ax=None):
"""Plot a histogram of bigram frequency in the documents.
:param docset: The `DocumentSet`.
:param ax: The axis on which to plot the histogram, defaults to current axis.
:param top_k: Limit results to the top k entries.
"""
all_freqs = []
for doc_freq in freqs:
all_freqs += doc_freq
count = defaultdict(int)
for word, freq in all_freqs:
count[str(dic[word])] += freq
plot_statistic(None, docset=None, ax=ax, x_label="No. publications", x=top_k, count=count)
#-----------------------------------------------------------------------
# Wordcloud plotting functions
#-----------------------------------------------------------------------
def plot_topic_clouds(model, cols=3, fig=None, **kwargs):
"""Plot the word distributions of a topic model.
:param model: The `TopicModel`.
:param cols: Number of columns (e.g., word clouds per row).
:param fig: The figure on which to plot the results, defaults to current figure.
:param \**kwargs: Additional parameters passed to `plot_topic_cloud`.
"""
if fig is None:
plt.clf()
fig = plt.gcf()
rows = math.ceil(model.num_topics / float(cols))
for i in range(model.num_topics):
ax = fig.add_subplot(rows, cols, i + 1)
plot_topic_cloud(model, i, ax=ax, **kwargs)
def plot_topic_cloud(model, topicid, ax=None, **kwargs):
"""Plot the word distributions of a single topic from a topic model.
:param model: The `TopicModel`.
:param topicid: The topic index within the topic model.
:param ax: The axis on which to plot the histogram, defaults to current axis.
:param \**kwargs: Additional parameters passed to `generate_topic_cloud`.
"""
if ax is None: | |
= """
</cfdi:Conceptos>
<cfdi:Impuestos TotalImpuestosTrasladados="{TotalImpuestosTrasladados}">
<cfdi:Traslados>""".format(**locals())
cfdi += cfdi_conceptos
cfdi += cfdi_traslados
cfdi_complemento = ""
cfdi_emisor = ""
cfdi_receptor = ""
cfdi_header = """
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Comprobante>
""".format(**locals())
if c.comercio_exterior == 1:
EDireccion = frappe.get_doc("Address", c.customer_address)
ECalle = re.findall("[^0-9]+", EDireccion.address_line1)[0].replace('#', '')
ENumeroExterior = re.findall("\d+", EDireccion.address_line1)[0]
EColonia = EDireccion.county
EEstado = EDireccion.clave_estado
ECp = EDireccion.pincode
#########################################
#Letras del pais UNIDECODE Origen
pais = frappe.get_doc("CFDI Clave Estado", EDireccion.clave_estado)
EPais = pais.pais
if c.comercio_exterior == 1:
cfdi_complemento = """
<cfdi:Complemento>
<cce11:ComercioExterior Version="1.1" TipoOperacion="2" ClaveDePedimento="{c.clave_pedimento}" CertificadoOrigen="0" Incoterm="{c.incoterm}" Subdivision="0" TipoCambioUSD="{TipoCambio}" TotalUSD="{Total}" xmlns:cce11="http://www.sat.gob.mx/ComercioExterior11" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sat.gob.mx/ComercioExterior11 http://www.sat.gob.mx/sitio_internet/cfd/ComercioExterior11/ComercioExterior11.xsd">""".format(**locals())
cfdi_emisor = """
<cce11:Emisor>
<cce11:Domicilio Calle="López Cotilla" NumeroExterior="13" Localidad="12" Municipio="101" Estado="JAL" Pais="MEX" CodigoPostal="{company.lugar_expedicion}"/>
</cce11:Emisor>
""".format(**locals())
cfdi_receptor = """
<cce11:Receptor>
<cce11:Domicilio Calle="{ECalle}" NumeroExterior="{ENumeroExterior}" Colonia="{EColonia}" Estado="{EEstado}" Pais="{EPais}" CodigoPostal="{ECp}"/>
</cce11:Receptor>
""".format(**locals())
cfdi_header = """
</cce11:Mercancias>
</cce11:ComercioExterior>
</cfdi:Complemento>
</cfdi:Comprobante>
""".format(**locals())
cfdi += cfdi_complemento
cfdi += cfdi_emisor
cfdi += cfdi_receptor
cfdi += cfdi_mercancias
cfdi += cfdi_header
frappe.errprint(cfdi)
return cfdi
@frappe.whitelist()
def cancel_by_uuid_sales_invoice(url, token,uuid,docname, rfc):
c = frappe.get_doc("Sales Invoice", docname)
headers = {
'Authorization': "bearer " + token,
'Content-Type': "application/json"
}
response = requests.request("POST", url + "/cfdi33/cancel/" + rfc + "/" + uuid, headers=headers)
if response.json().get('status') == 'error':
if response.json().get('messageDetail'):
frappe.msgprint((response.json().get('message')) + ". <b>Detalle del Error: </b>" + (response.json().get('messageDetail')), "ERROR DE SERVIDOR (PAC) ")
else:
frappe.msgprint((response.json().get('message')) , "ERROR DE SERVIDOR")
else:
frappe.db.set_value("Sales Invoice", c.name, 'cfdi_status','Cancelado')
frappe.msgprint(str(c.name)+ " Cancelada Exitosamente")
return response.text
#############################################
#Comienza Carta Porte
@frappe.whitelist()
def carta_porte_timbrado(url, token, docname, version, b64=False):
# RG - POST request al server de swarterweb
xml = carta_porte_timbrado_xml(docname)
frappe.errprint(xml)
lst = [random.choice(string.ascii_letters + string.digits) for n in range(30)]
boundary = "".join(lst)
payload = "--" + boundary + "\r\nContent-Type: text/xml\r\nContent-Transfer-Encoding: binary\r\nContent-Disposition: " \
"form-data; name=\"xml\"; filename=\"xml\"\r\n\r\n" + str(xml) + "\r\n--" + boundary + "-- "
headers = {
'Authorization': "bearer " + token,
'Content-Type': "multipart/form-data; boundary=\"" + boundary + "\""
}
response = requests.request("POST", url + "/cfdi33/issue/" + version + "/" , data=payload.encode('utf-8'), headers=headers)
liga = url + "/cfdi33/issue/" + version + "/"
frappe.errprint(response.json())
if response.json().get('status') == 'error':
if response.json().get('messageDetail'):
frappe.msgprint((response.json().get('message')) + ". <b>Detalle del Error: </b>" + (response.json().get('messageDetail')), "ERROR DE SERVIDOR (PAC) ")
else:
frappe.msgprint((response.json().get('message')) , "ERROR DE SERVIDOR")
else:
# RG- Recuperar el response y manejar la info pa grabar los archivos/datos en el CFDI
c = frappe.get_doc("Delivery Trip", docname)
uuid = response.json().get('data').get('uuid')
cfdi_recibido = response.json().get('data').get('cfdi')
fechaxml = str(c.creation)
dest = '/home/frappe/frappe-bench/sites/' + frappe.local.site + '/public/files/' + c.name + "_" + fechaxml[0:10]
f = open( dest + '.xml',"w+")
f.write(cfdi_recibido)
f.close()
save_url( "/files/" + c.name + "_" + fechaxml[0:10] + ".xml" , c.name + "_" + fechaxml[0:10] + ".xml" , "Delivery Trip" , c.name , "Home/Attachments" , 0)
qr = response.json().get('data').get('qrCode')
png = open( dest + ".png", "wb")
png.write(base64.b64decode(qr))
png.close()
frappe.db.set_value("Delivery Trip",c.name, 'qr', "/files/" + c.name + "_" + fechaxml[0:10] + ".png")
frappe.db.set_value("Delivery Trip",c.name, 'cfdi_status', 'Timbrado')
frappe.db.set_value("Delivery Trip",c.name, 'sellocfd', response.json().get('data').get('selloCFDI'))
frappe.db.set_value("Delivery Trip",c.name, 'cadenaoriginalsat', response.json().get('data').get('cadenaOriginalSAT'))
frappe.db.set_value("Delivery Trip",c.name, 'fechatimbrado', response.json().get('data').get('fechaTimbrado') )
frappe.db.set_value("Delivery Trip",c.name, 'uuid', uuid)
frappe.db.set_value("Delivery Trip",c.name, 'nocertificadosat', response.json().get('data').get('noCertificadoSAT') )
frappe.db.set_value("Delivery Trip",c.name, 'sellosat', response.json().get('data').get('selloSAT') )
mensaje = "TIMBRADO EXITOSO . <a class= 'alert-info' href='https://" + frappe.local.site + "/files/" + uuid + ".xml' download> Descarga XML </a>"
frappe.msgprint(mensaje)
return ["TIMBRADO EXITOSO!",mensaje,uuid,xml]
def carta_porte_timbrado_xml(docname):
c = frappe.get_doc("Delivery Trip", docname)
TranspInternac = ""
if c.transporte_internacional == 1:
TranspInternac = 'Si'
else:
TranspInternac = 'No'
company = frappe.get_doc("Configuracion CFDI", c.company)
fecha_actual = (c.creation).isoformat()[0:19]
fecha_salida = (c.departure_time).isoformat()[0:19]
serie = c.naming_series.replace('-','')
folio = c.name.replace(serie,'')
FormaPago = c.forma_de_pago
if c.tipo_de_comprobante == "I":
SubTotal = '%.2f' % c.precio_traslado
Total = round(c.precio_traslado * 1.16, 2)
else:
SubTotal = 0
Total = 0
TipoDeComprobante = c.tipo_de_comprobante
MetodoPago = c.metodo_de_pago
LugarExpedicion = company.lugar_expedicion
Currency = c.currency
if Currency == 'MXN':
TipoCambio = 1
else:
TipoCambio = '%.4f' % c.conversion_rate
rfc_emisor = company.rfc_emisor
nombre_emisor = company.nombre_emisor.replace('&','&').replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u').replace('À','a').replace('É','e').replace('Í','i').replace('Ó','o').replace('Ú','u').replace('@',' ')
regimen_fiscal = company.regimen_fiscal
uso_cfdi = c.uso_cfdi
##########################################
#Datos de Direccion de Origen
ODireccion = frappe.get_doc("Address", c.driver_address)
OCalle = re.findall("[^0-9]+", ODireccion.address_line1)[0].replace('#', '')
ONumeroExterior = re.findall("\d+", ODireccion.address_line1)[0]
#########################################
#Letras del pais UNIDECODE Origen
OClave_estado = ODireccion.clave_estado
InfOClave_estado = frappe.get_doc("CFDI Clave Estado", OClave_estado)
OPais = InfOClave_estado.pais
articulo_claveDT = c.unidad_pesocp
suma_distancia = 0
##########################################
#Datos de Direccion de destinatario
for dest in c.delivery_stops:
UCliente = dest.customer
cliente = frappe.get_doc("Customer", UCliente)
nombre_receptor = UCliente.replace('&','&').replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u').replace('À','a').replace('É','e').replace('Í','i').replace('Ó','o').replace('Ú','u').replace('@',' ')
tax_id = cliente.tax_id.replace('&','&').replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u').replace('À','a').replace('É','e').replace('Í','i').replace('Ó','o').replace('Ú','u').replace('@',' ').replace('Ü', 'U')
rfc_receptor = cliente.tax_id
Fecha_llegada = (dest.estimated_arrival).isoformat()[0:19]
UDireccion = frappe.get_doc("Address", dest.address)
UCalle = re.findall("[^0-9]+", UDireccion.address_line1)[0].replace('#', '')
UNumeroExterior = re.findall("\d+", UDireccion.address_line1)[0]
#########################################
#Letras del pais UNIDECODE Origen
UClave_estado = UDireccion.clave_estado
InfUClave_estado = frappe.get_doc("CFDI Clave Estado", UClave_estado)
UPais = InfUClave_estado.pais
UCodigo_postal = UDireccion.pincode
##########################################
distancia = round(dest.distance, 2)
suma_distancia += round(distancia, 2)
##########################################
#Obtener informacion de Notra de Entrega
DN = frappe.get_doc("Delivery Note", dest.delivery_note)
cant = len(DN.items)
PesoBrutoTotal = DN.total_net_weight
cfdi_ubicacion_destino = """
<cartaporte20:Ubicacion TipoUbicacion="Destino" RFCRemitenteDestinatario="{rfc_receptor}" FechaHoraSalidaLlegada="{Fecha_llegada}" DistanciaRecorrida="{distancia}">
<cartaporte20:Domicilio Calle="{UCalle}" NumeroExterior="{UNumeroExterior}" Estado="{UClave_estado}" Pais="{UPais}" CodigoPostal="{UCodigo_postal}" />
</cartaporte20:Ubicacion>
""".format(**locals())
##########################################
#Obtener informacion de articulos en Notra de Entrega
tipo = []
tasa = []
cantidad = []
cfdi_items = ""
cfdi_traslados = ""
for articulos_nota in DN.items:
articulo_qty = articulos_nota.qty
articulo_peso = articulos_nota.total_weight
row = str(articulos_nota.idx)
NumTotalMercancias = len(row)
##########################################
#Obtener informacion del articulo en general
informacion_articulo = frappe.get_doc("Item", articulos_nota.item_code)
articulo_cps = informacion_articulo.clave_producto
articulo_cu = informacion_articulo.clave_unidad
articulo_claveUP = informacion_articulo.unidad_pesocp
material_peligroso = informacion_articulo.material_peligroso
articulo_descripcion = informacion_articulo.description
articulos_mercancias_header = """ </cartaporte20:Ubicaciones>
<cartaporte20:Mercancias PesoBrutoTotal="{PesoBrutoTotal}" UnidadPeso="{articulo_claveDT}" NumTotalMercancias="{NumTotalMercancias}" >""".format(**locals())
articulos_mercancias = """
<cartaporte20:Mercancia BienesTransp="{articulo_cps}" Descripcion="{articulo_descripcion}" Cantidad="{articulo_qty}" ClaveUnidad="{articulo_claveUP}" PesoEnKg="{articulo_peso}">
</cartaporte20:Mercancia>""".format(**locals())
NoIdentificacion = articulos_nota.item_code.replace('"','').replace('&','&')
ClaveProdServ = informacion_articulo.clave_producto
ClaveUnidad = informacion_articulo.clave_unidad
Cantidad = articulos_nota.qty
Unidad = articulos_nota.stock_uom
ValorUnitario = '%.2f' % c.precio_traslado
Importe = '%.2f' % c.precio_traslado
idx = articulos_nota.idx
Descripcion = articulos_nota.item_name.replace('"','').replace('&','&')
TrasladosBase= '%.2f' % c.precio_traslado
TasaOCuota = .01 * float(informacion_articulo.tasa)
ImpuestosTrasladosTasaOCuota='%.6f' % TasaOCuota
Importetax= '%.2f' % (TasaOCuota * (float(c.precio_traslado)))
Tasa = 'Tasa'
if informacion_articulo.tipo_de_impuesto == 'IVA':
Impuesto = '002'
tipo.append(Impuesto)
tasa.append(ImpuestosTrasladosTasaOCuota)
cantidad.append(Importetax)
frappe.errprint(Importetax)
cfdi_items += """
<cfdi:Concepto ClaveProdServ="78101800" NoIdentificacion="01" Cantidad="1" ClaveUnidad="E48" Unidad="SERVICIO" Descripcion="FLETE" ValorUnitario="{ValorUnitario}" Importe="{Importe}">
<cfdi:Impuestos>
<cfdi:Traslados>
<cfdi:Traslado Base="{TrasladosBase}" Impuesto="{Impuesto}" TipoFactor="{Tasa}" TasaOCuota="{ImpuestosTrasladosTasaOCuota}" Importe="{Importetax}"/>
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Concepto>""".format(**locals())
elif informacion_articulo.tipo_de_impuesto == "SIN IVA":
Impuesto="002"
tipo.append(Impuesto)
tasa.append(ImpuestosTrasladosTasaOCuota)
cantidad.append(Importetax)
frappe.errprint(Importetax)
cfdi_items += """
<cfdi:Concepto ClaveProdServ="78101800" NoIdentificacion="01" Cantidad="1" ClaveUnidad="E48" Unidad="SERVICIO" Descripcion="FLETE" ValorUnitario="{ValorUnitario}" Importe="{Importe}">
<cfdi:Impuestos>
<cfdi:Traslados>
<cfdi:Traslado Base="{TrasladosBase}" Impuesto="{Impuesto}" TipoFactor="{Tasa}" TasaOCuota="{ImpuestosTrasladosTasaOCuota}" Importe="{Importetax}"/>
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Concepto>""".format(**locals())
elif informacion_articulo.tipo_de_impuesto == "IEPS":
Impuesto="003"
tipo.append(Impuesto)
tasa.append(ImpuestosTrasladosTasaOCuota)
cantidad.append(Importetax)
frappe.errprint(Importetax)
cfdi_items += """
<cfdi:Concepto ClaveProdServ="78101800" NoIdentificacion="01" Cantidad="1" ClaveUnidad="E48" Unidad="SERVICIO" Descripcion="FLETE" ValorUnitario="{ValorUnitario}" Importe="{Importe}">
<cfdi:Impuestos>
<cfdi:Traslados>
<cfdi:Traslado Base="{TrasladosBase}" Impuesto="{Impuesto}" TipoFactor="{Tasa}" TasaOCuota="{ImpuestosTrasladosTasaOCuota}" Importe="{Importetax}"/>
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Concepto>""".format(**locals())
elif informacion_articulo.tipo_de_impuesto == "EXENTO":
TrasladosBase1= articulos_nota.net_amount
TrasladosBase= '%.2f' % (TrasladosBase1)
Impuesto="002"
ImpuestosTrasladosTasaOCuota="0.000000"
Importetax= "0.00"
Tasa = 'Exento'
tipo.append(Impuesto)
tasa.append(ImpuestosTrasladosTasaOCuota)
cantidad.append(Importetax)
frappe.errprint(Importetax)
cfdi_items += """
<cfdi:Concepto ClaveProdServ="78101800" NoIdentificacion="01" Cantidad="1" ClaveUnidad="E48" Unidad="SERVICIO" Descripcion="FLETE" ValorUnitario="{ValorUnitario}" Importe="{Importe}">
<cfdi:Impuestos>
<cfdi:Traslados>
<cfdi:Traslado Base="{TrasladosBase}" Impuesto="{Impuesto}" TipoFactor="{Tasa}"/>
</cfdi:Traslados>
</cfdi:Impuestos>
</cfdi:Concepto>""".format(**locals())
cTipo = collections.Counter(tipo)
cTasa = collections.Counter(tasa)
total_impuesto = 0
TotalImpuestosTrasladados = 0.00
for w, val1 in cTipo.items():
for y, val2 in cTasa.items():
suma =0
for z in range(0,cant):
if (tasa[z] == y) and (tipo[z] == w):
suma1 = suma+float(cantidad[z])
suma = round(suma1, 2)
b = y
t = w
total_impuesto = total_impuesto+suma
TotalImpuestosTrasladados = suma
if(suma>0):
cfdi_traslados += """
<cfdi:Traslado Impuesto="{t}" TipoFactor="{Tasa}" TasaOCuota="{b}" Importe="{suma}"/>""".format(**locals())
else:
cfdi_traslados += """
<cfdi:Traslado Impuesto="{t}" TipoFactor="{Tasa}" TasaOCuota="{b}" Importe="{suma}"/>""".format(**locals())
# Total = round(SubTotal + TotalImpuestosTrasladados, 2)
##########################################
#Si es auto transporte AutotransporteFederal
if c.via == '01':
#Obtener datos de Vehiculo
vehicle = frappe.get_doc("Vehicle", c.vehicle)
PermSCT = vehicle.tipo_permiso
NumPermisoSCT = vehicle.numero_permiso
NombreAseg = vehicle.insurance_company
NumPolizaSeguro = vehicle.policy_no
ConfigVehicular = vehicle.configuracion_vehicular
AnioModeloVM = vehicle.model
PlacaVM = c.vehicle.replace("-","")
##########################################
#Obtener datos de Operador
operador = frappe.get_doc("Driver", c.driver)
RFCOperador = operador.rfc
NumLicencia = operador.license_number
NombreOperador = operador.full_name
#Obtener datos de Direccion de Operador
DO = frappe.get_doc("Address", c.driver_address)
DOCalle = re.findall("[^0-9]+", DO.address_line1)[0].replace('#', '')
DONumeroExterior = re.findall("\d+", DO.address_line1)[0]
#########################################
#Letras del pais UNIDECODE Origen
InfDOClave_estado = frappe.get_doc("CFDI Clave Estado", UClave_estado)
DOPais = InfDOClave_estado.pais
DOClave_estado = DO.clave_estado
DOCodigo_postal = DO.pincode
if c.tipo_de_comprobante == "I":
cfdi = """<?xml version="1.0" encoding="UTF-8"?>
<cfdi:Comprobante xsi:schemaLocation="http://www.sat.gob.mx/cfd/3 http://www.sat.gob.mx/sitio_internet/cfd/3/cfdv33.xsd http://www.sat.gob.mx/CartaPorte20 http://www.sat.gob.mx/sitio_internet/cfd/CartaPorte/CartaPorte20.xsd"
xmlns:cartaporte20="http://www.sat.gob.mx/CartaPorte20" Version="3.3" Serie="{serie}" Folio="{folio}" Fecha="{fecha_actual}" Sello="" FormaPago="{FormaPago}" NoCertificado=""
xmlns:cfdi="http://www.sat.gob.mx/cfd/3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" Certificado="" CondicionesDePago="CONTADO" SubTotal="{SubTotal}" Moneda="{Currency}" TipoCambio = "{TipoCambio}" Total="{Total}" TipoDeComprobante="{TipoDeComprobante}" MetodoPago="{MetodoPago}" LugarExpedicion="{LugarExpedicion}">""".format(**locals())
else:
cfdi = """<?xml version="1.0" encoding="UTF-8"?>
<cfdi:Comprobante xsi:schemaLocation="http://www.sat.gob.mx/cfd/3 http://www.sat.gob.mx/sitio_internet/cfd/3/cfdv33.xsd http://www.sat.gob.mx/CartaPorte20 http://www.sat.gob.mx/sitio_internet/cfd/CartaPorte/CartaPorte20.xsd"
xmlns:cartaporte20="http://www.sat.gob.mx/CartaPorte20" Version="3.3" Serie="{serie}" Folio="{folio}" Fecha="{fecha_actual}" Sello="" NoCertificado=""
xmlns:cfdi="http://www.sat.gob.mx/cfd/3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" Certificado="" SubTotal="{SubTotal}" Moneda="XXX" Total="{Total}" TipoDeComprobante="{TipoDeComprobante}" LugarExpedicion="{LugarExpedicion}">""".format(**locals())
cfdi+= """
<cfdi:Emisor Rfc="{rfc_emisor}" Nombre="{nombre_emisor}" RegimenFiscal="{regimen_fiscal}"/>
<cfdi:Receptor Rfc="{tax_id}" Nombre="{nombre_receptor}" UsoCFDI="{uso_cfdi}"/>
<cfdi:Conceptos>""".format(**locals())
if c.tipo_de_comprobante == "I":
cfdi += cfdi_items
else:
cfdi += """
<cfdi:Concepto ClaveProdServ="78101800" NoIdentificacion="01" Cantidad="1" ClaveUnidad="E48" Unidad="SERVICIO" Descripcion="FLETE" ValorUnitario="{ValorUnitario}" Importe="{Importe}" />
</cfdi:Conceptos>
""".format(**locals())
cfdi_conceptos = """
</cfdi:Conceptos>
<cfdi:Impuestos TotalImpuestosTrasladados="{TotalImpuestosTrasladados}">
<cfdi:Traslados>""".format(**locals())
if c.tipo_de_comprobante == "I":
cfdi | |
all plots
self.list_ids.append(image_data.id)
self.list_all.append(image_data)
# Update model
self.modelImages._model_data.append(image_data.model_style)
self.modelImages.layoutChanged.emit()
def __updateImagesDataStyle(self):
"""
Something style-related changed in the model; thus, need to change \
these elements in the fill_between data
"""
for num, style_info in enumerate(self.modelImages._model_data):
idx = self.list_ids.index(style_info['id'])
self.list_all[idx].model_style = style_info
self.refreshAllPlots()
def __updateImagesDataDelete(self, row, plt_id):
"""
A plot was deleted (likely from within the model); thus, need to \
remove the corresponding plot data
"""
idx_to_remove = self.list_ids.index(plt_id)
self.list_ids.pop(idx_to_remove)
popd = self.list_all.pop(idx_to_remove)
if popd.cbar['obj'] is not None:
popd.cbar['obj'].remove()
# self.axisAspect()
self.refreshAllPlots()
def __bar(self, x, y, bottom=0, width_factor=1.0, use_real_width=False,
label=None, meta={}, x_label=None, y_label=None, **kwargs):
"""
MPL-like plotting functionality
Note
----
Unlike MPL bar, this method uses centered data. Thus, x is the center \
position of the bar
Parameters
----------
x : ndarray (1D)
X-axis data (center of bars)
y : ndarray (1D, for now)
Y-axis data (height)
bottom : float (for now)
Baseline of bars
width_factor: float
If legnth of y>1, fraction of space between bars taken up by bar \
(e.g. 1.0 leads to bars that tough). If y is a single-value OR \
use_real_width is True), is the width of the bar.
use_real_width : bool, optional (default=False):
If True, width_factor is the real width (in x-units)
label : str
Label of plot
x_label : str
X-axis label (units)
y_label : str
Y-axis label (units)
kwargs : dict
Other parameters sent directly to mpl-plot
"""
# Temporary plot-data
bar_data = _DataBar()
bar_data.x = x
bar_data.y = y
bar_data.bottom = bottom
bar_data.label = label
bar_data.meta = meta
bar_data.id = _time.time()
bar_data.style_dict['width_factor'] = width_factor
_multi_value = None
if isinstance(y, (int, float)):
_multi_value = False
if isinstance(y, _np.ndarray):
if y.size == 1:
_multi_value = False
else:
_multi_value = True
if isinstance(y, (list, tuple)):
if len(y) == 1:
_multi_value = False
else:
_multi_value = True
if _multi_value and use_real_width == False:
# Distance between bars
bar_data._gap = _np.abs(x[1]-x[0])
# Width of a bar is a fraction of the gap
bar_data._width = bar_data._gap*bar_data.style_dict['width_factor']
else:
# Single-valued: no gap
bar_data._gap = None
bar_data._width = width_factor
# MPL-bar uses left-edge rather than center
bar_data._left = bar_data.x - bar_data._width/2
# Plot outputs a list of patch objects
bar_data.mplobj = self.mpl_widget.ax.bar(bar_data._left, y,
bottom=bar_data.bottom,
width=bar_data._width,
label=label, **kwargs)
self.mpl_widget.ax.legend(loc='best')
# If labels are provided, update the global data and the linEdits
if x_label is not None or y_label is not None:
self.updateAllLabels(x_label=x_label, y_label=y_label)
self.mpl_widget.fig.tight_layout()
self.axisAspect()
self.mpl_widget.draw()
# Since the plot was not fed style-info (unless kwargs were used)
# we rely on the mpl stylesheet to setup color, linewidth, etc.
# Thus, we plot, then retrieve what the style info was
bar_data.retrieve_style_from_bar(bar_data.mplobj[0])
# Append this specific plot data to out list of all plots
self.list_ids.append(bar_data.id)
self.list_all.append(bar_data)
# Update model
self.modelBars._model_data.append(bar_data.model_style)
self.modelBars.layoutChanged.emit()
# Note: New in MPL2, edgecolor is RGBA with A defaulting to 0
# (ie transparent, which Sciplot does not currently support).
self.refreshAllPlots()
def __hist(self, data, bins=10, label=None, meta={}, x_label=None,
y_label='Counts', **kwargs):
"""
MPL-like histogram plotting
Parameters
----------
data : ndarray (1D, for now)
Data (center of bars)
bins : int
Number of histogram bins
label : str
Label of plot
x_label : str
X-axis label (units)
y_label : str
Y-axis label (units)
kwargs : dict
Other parameters sent directly to mpl-plot
"""
counts, lefts = _np.histogram(data, bins=bins)
gap = _np.abs(lefts[1] - lefts[0])
offset = gap/2
self.bar(lefts[:-1]+offset, counts, width_factor=1.0, label=label,
x_label=x_label, y_label=y_label, meta=meta, **kwargs)
def __updateBarsDataStyle(self):
"""
Something style-related changed in the model; thus, need to change \
these elements in the fill_between data
"""
for num, style_info in enumerate(self.modelBars._model_data):
idx = self.list_ids.index(style_info['id'])
self.list_all[idx].model_style = style_info
self.refreshAllPlots()
def __updateBarsDataDelete(self, row, plt_id):
"""
A plot was deleted (likely from within the model); thus, need to \
remove the corresponding plot data
"""
idx_to_remove = self.list_ids.index(plt_id)
self.list_ids.pop(idx_to_remove)
self.list_all.pop(idx_to_remove)
self.refreshAllPlots()
def axisAspect(self):
"""
Set axis aspect ratio property
"""
aspect = self.ui.comboBoxAspect.currentText()
self.mpl_widget.ax.set_aspect(aspect)
self.mpl_widget.fig.tight_layout()
self.updateAxisParameters()
self.updateFigureParameters()
self.mpl_widget.draw()
def axisScaling(self):
"""
Set axis scaling property
"""
ratio = self.ui.comboBoxAxisScaling.currentText()
self.mpl_widget.ax.axis(ratio)
self.mpl_widget.fig.tight_layout()
self.updateAxisParameters()
self.updateFigureParameters()
self.mpl_widget.draw()
def axisVisible(self):
"""
Set whether axis is on or off
"""
state = self.ui.checkBoxAxisVisible.isChecked()
if state:
state = 'on'
else:
state = 'off'
self.mpl_widget.ax.axis(state)
self.mpl_widget.fig.tight_layout()
self.updateAxisParameters()
self.updateFigureParameters()
self.mpl_widget.draw()
def axisLimits(self):
"""
Set axis limits
"""
if self.sender() == self.ui.lineEditXLimMin:
value = float(self.ui.lineEditXLimMin.text())
self.mpl_widget.ax.axis(xmin=value)
elif self.sender() == self.ui.lineEditXLimMax:
value = float(self.ui.lineEditXLimMax.text())
self.mpl_widget.ax.axis(xmax=value)
elif self.sender() == self.ui.lineEditYLimMin:
value = float(self.ui.lineEditYLimMin.text())
self.mpl_widget.ax.axis(ymin=value)
elif self.sender() == self.ui.lineEditYLimMax:
value = float(self.ui.lineEditYLimMax.text())
self.mpl_widget.ax.axis(ymax=value)
self.mpl_widget.fig.tight_layout()
self.updateAxisParameters()
self.updateFigureParameters()
self.mpl_widget.draw()
def updateAxisParameters(self):
"""
Query current state of axis settings and update appropriate lineEdit's
"""
axis_visible = self.mpl_widget.ax.axison
self.ui.checkBoxAxisVisible.setChecked(axis_visible)
xmin, xmax, ymin, ymax = self.mpl_widget.ax.axis()
self.ui.lineEditXLimMin.setText(str(round(xmin,5)))
self.ui.lineEditXLimMax.setText(str(round(xmax,5)))
self.ui.lineEditYLimMin.setText(str(round(ymin,5)))
self.ui.lineEditYLimMax.setText(str(round(ymax,5)))
def updateFigureParameters(self):
"""
Query current state of axis settings and update appropriate lineEdit's
"""
fig_dpi = self.mpl_widget.fig.get_dpi()
save_dpi = _mpl.rcParams['savefig.dpi']
if save_dpi is 'figure':
save_dpi = fig_dpi
fig_size = self.mpl_widget.fig.get_size_inches()
self.ui.spinBoxFigureDPI.setValue(fig_dpi)
self.ui.spinBoxFigureSavedDPI.setValue(save_dpi)
self.ui.spinBoxFigSizeWidth.setValue(fig_size[0])
self.ui.spinBoxFigSizeHeight.setValue(fig_size[1])
def defaultView(self):
"""
Set default and Home view to the current one
"""
# New versions of MPL don't have these functions
try:
self.mpl_widget.toolbar._views.clear()
self.mpl_widget.toolbar._positions.clear()
except:
pass
self.mpl_widget.toolbar.update()
def clearAllBars(self):
try:
self.modelBars._model_data = []
ids = self.list_bar_ids
for i in ids:
self.clearID(i)
self.modelBars.layoutChanged.emit()
except:
print('Error in clearAllBars')
def clearID(self, clear_id):
idx_to_remove = self.list_ids.index(clear_id)
self.list_ids.pop(idx_to_remove)
self.list_all.pop(idx_to_remove)
def clearAll(self):
"""
Clear all plots and graphs and images
"""
try:
self.modelLine._model_data = []
self.modelLine.layoutChanged.emit()
except:
print('Error in clear all of plots/lines')
try:
self.modelBars._model_data = []
self.modelBars.layoutChanged.emit()
except:
print('Error in clear all of bars')
try:
# Need to iterate as to check for colorbar existance
for num, model_data in enumerate(self.modelImages._model_data):
idx_to_remove = self.list_ids.index(model_data['id'])
self.list_ids.pop(idx_to_remove)
popd = self.list_all.pop(idx_to_remove)
if popd.cbar['obj'] is not None:
popd.cbar['obj'].remove()
self.modelImages._model_data = []
self.modelImages.layoutChanged.emit()
except:
print('Error in clear all of images')
try:
self.modelFillBetween._model_data = []
self.modelFillBetween.layoutChanged.emit()
except:
print('Error in clear all of fill-betweens')
try:
self.list_ids = []
self.list_all = []
except:
print('Error in clear all')
finally:
self.refreshAllPlots()
self.all_cleared.emit(id(self))
def export_bars_csv(self):
ret = _QFileDialog.getSaveFileName(filter="Comma-Separated Values (*.csv);;All Files (*.*)")
if ret[0]:
# pth, fname = _os.path.split(ret[0])
with open(ret[0],'w') as f:
for q in self.list_bar_objs:
f.write('{}\n'.format(q.label))
f.write('left,')
q._left.tofile(f, sep=',')
f.write('\nx,')
q.x.tofile(f, sep=',')
f.write('\ny,')
q.y.tofile(f,sep=',')
f.write('\n\n')
def export_lines_csv(self):
ret = _QFileDialog.getSaveFileName(filter="Comma-Separated Values (*.csv);;All Files (*.*)")
if ret[0]:
# pth, fname = _os.path.split(ret[0])
with open(ret[0],'w') as f:
for q in self.list_line_objs:
f.write('{}\n'.format(q.label))
f.write('x,')
q.x.tofile(f, sep=',')
f.write('\ny,')
q.y.tofile(f,sep=',')
f.write('\n\n')
def export_fillbetweens_csv(self):
ret = _QFileDialog.getSaveFileName(filter="Comma-Separated Values (*.csv);;All Files (*.*)")
if ret[0]:
# pth, fname = _os.path.split(ret[0])
with open(ret[0],'w') as f:
for q in self.list_fillbetween_objs:
f.write('{}\n'.format(q.label))
f.write('x,')
q.x.tofile(f, sep=',')
f.write('\ny_low,')
q.y_low.tofile(f,sep=',')
f.write('\ny_high,')
q.y_high.tofile(f,sep=',')
f.write('\n\n')
@property
def n_lines(self):
return sum(isinstance(x, _DataLine) for x in self.list_all)
@property
def n_bars(self):
return sum(isinstance(x, _DataBar) for x in self.list_all)
@property
def n_fillbetweens(self):
return sum(isinstance(x, _DataFillBetween) for x in self.list_all)
@property
def n_images(self):
return sum(isinstance(x, _DataImages) for x in self.list_all)
@property
def list_line_objs(self):
return [x for x in self.list_all if isinstance(x, _DataLine)]
@property
def list_line_ids(self):
return [x.id for x in self.list_all if isinstance(x, _DataLine)]
@property
def list_bar_objs(self):
return [x for x in self.list_all if isinstance(x, _DataBar)]
@property
def list_bar_ids(self):
return [x.id for x in self.list_all if isinstance(x, _DataBar)]
@property
def list_fillbetween_objs(self):
return [x for x in self.list_all if isinstance(x, _DataFillBetween)]
@property
def list_fillbetween_ids(self):
return [x.id for x in self.list_all if isinstance(x, _DataFillBetween)]
@property
def list_image_objs(self):
return [x for x in self.list_all if isinstance(x, _DataImages)]
@property
def list_image_ids(self):
return [x.id for x in self.list_all if isinstance(x, _DataImages)]
def figureDPI(self):
curr_dpi = self.mpl_widget.fig.get_dpi()
dpi, okPressed = _QInputDialog.getInt(self, "New Figure DPI","DPI:", curr_dpi, 10, 100000, 25)
if okPressed:
self.mpl_widget.fig.set_dpi(dpi)
self.mpl_widget.updateGeometry()
self.mpl_widget.draw()
self.updateFigureParameters()
def figureSaveDPI(self):
curr_dpi = _mpl.rcParams['savefig.dpi']
if not isinstance(curr_dpi, int):
# Savefig is set to 'figure'
curr_dpi = self.mpl_widget.fig.get_dpi()
dpi, okPressed = _QInputDialog.getInt(self, "New DPI for Saved Figures","DPI:", curr_dpi, 10, 100000, 25)
if okPressed:
_mpl.rcParams['savefig.dpi'] = dpi
self.updateFigureParameters()
def figureSizeDisplay(self):
curr_size = self.mpl_widget.fig.get_size_inches()
new_size, okPressed = DualEntry.getDualEntries(curr_size[0], curr_size[1], input_type=float, text="Figure Size (W x H inches)", parent=self)
if okPressed:
self.mpl_widget.updateGeometry()
self.mpl_widget.fig.set_size_inches(new_size[0], | |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests _symbolic_operator.py."""
import copy
import numpy
import unittest
from openfermion.ops._symbolic_operator import SymbolicOperator
class DummyOperator1(SymbolicOperator):
"""Subclass of SymbolicOperator created for testing purposes."""
actions = (1, 0)
action_strings = ('^', '')
action_before_index = False
different_indices_commute = False
class DummyOperator2(SymbolicOperator):
"""Subclass of SymbolicOperator created for testing purposes."""
actions = ('X', 'Y', 'Z')
action_strings = ('X', 'Y', 'Z')
action_before_index = True
different_indices_commute = True
class SymbolicOperatorTest1(unittest.TestCase):
"""Test the subclass DummyOperator1."""
def test_init_defaults(self):
loc_op = DummyOperator1()
self.assertEqual(len(loc_op.terms), 0)
def test_init_tuple_real_coefficient(self):
loc_op = ((0, 1), (5, 0), (6, 1))
coefficient = 0.5
fermion_op = DummyOperator1(loc_op, coefficient)
self.assertEqual(len(fermion_op.terms), 1)
self.assertEqual(fermion_op.terms[tuple(loc_op)], coefficient)
def test_init_tuple_complex_coefficient(self):
loc_op = ((0, 1), (5, 0), (6, 1))
coefficient = 0.6j
fermion_op = DummyOperator1(loc_op, coefficient)
self.assertEqual(len(fermion_op.terms), 1)
self.assertEqual(fermion_op.terms[tuple(loc_op)], coefficient)
def test_init_tuple_npfloat64_coefficient(self):
loc_op = ((0, 1), (5, 0), (6, 1))
coefficient = numpy.float64(2.303)
fermion_op = DummyOperator1(loc_op, coefficient)
self.assertEqual(len(fermion_op.terms), 1)
self.assertEqual(fermion_op.terms[tuple(loc_op)], coefficient)
def test_init_tuple_npcomplex128_coefficient(self):
loc_op = ((0, 1), (5, 0), (6, 1))
coefficient = numpy.complex128(-1.123j + 43.7)
fermion_op = DummyOperator1(loc_op, coefficient)
self.assertEqual(len(fermion_op.terms), 1)
self.assertEqual(fermion_op.terms[tuple(loc_op)], coefficient)
def test_init_list_real_coefficient(self):
loc_op = [(0, 1), (5, 0), (6, 1)]
coefficient = 1. / 3
fermion_op = DummyOperator1(loc_op, coefficient)
self.assertEqual(len(fermion_op.terms), 1)
self.assertEqual(fermion_op.terms[tuple(loc_op)], coefficient)
def test_init_list_complex_coefficient(self):
loc_op = [(0, 1), (5, 0), (6, 1)]
coefficient = 2j / 3.
fermion_op = DummyOperator1(loc_op, coefficient)
self.assertEqual(len(fermion_op.terms), 1)
self.assertEqual(fermion_op.terms[tuple(loc_op)], coefficient)
def test_init_list_npfloat64_coefficient(self):
loc_op = [(0, 1), (5, 0), (6, 1)]
coefficient = numpy.float64(2.3037)
fermion_op = DummyOperator1(loc_op, coefficient)
self.assertEqual(len(fermion_op.terms), 1)
self.assertEqual(fermion_op.terms[tuple(loc_op)], coefficient)
def test_init_list_npcomplex128_coefficient(self):
loc_op = [(0, 1), (5, 0), (6, 1)]
coefficient = numpy.complex128(-1.1237j + 43.37)
fermion_op = DummyOperator1(loc_op, coefficient)
self.assertEqual(len(fermion_op.terms), 1)
self.assertEqual(fermion_op.terms[tuple(loc_op)], coefficient)
def test_identity_is_multiplicative_identity(self):
u = DummyOperator1.identity()
f = DummyOperator1(((0, 1), (5, 0), (6, 1)), 0.6j)
g = DummyOperator1(((0, 0), (5, 0), (6, 1)), 0.3j)
h = f + g
self.assertTrue(f.isclose(u * f))
self.assertTrue(f.isclose(f * u))
self.assertTrue(g.isclose(u * g))
self.assertTrue(g.isclose(g * u))
self.assertTrue(h.isclose(u * h))
self.assertTrue(h.isclose(h * u))
u *= h
self.assertTrue(h.isclose(u))
self.assertFalse(f.isclose(u))
# Method always returns new instances.
self.assertFalse(DummyOperator1.identity().isclose(u))
def test_zero_is_additive_identity(self):
o = DummyOperator1.zero()
f = DummyOperator1(((0, 1), (5, 0), (6, 1)), 0.6j)
g = DummyOperator1(((0, 0), (5, 0), (6, 1)), 0.3j)
h = f + g
self.assertTrue(f.isclose(o + f))
self.assertTrue(f.isclose(f + o))
self.assertTrue(g.isclose(o + g))
self.assertTrue(g.isclose(g + o))
self.assertTrue(h.isclose(o + h))
self.assertTrue(h.isclose(h + o))
o += h
self.assertTrue(h.isclose(o))
self.assertFalse(f.isclose(o))
# Method always returns new instances.
self.assertFalse(DummyOperator1.zero().isclose(o))
def test_zero_is_multiplicative_nil(self):
o = DummyOperator1.zero()
u = DummyOperator1.identity()
f = DummyOperator1(((0, 1), (5, 0), (6, 1)), 0.6j)
g = DummyOperator1(((0, 0), (5, 0), (6, 1)), 0.3j)
self.assertTrue(o.isclose(o * u))
self.assertTrue(o.isclose(o * f))
self.assertTrue(o.isclose(o * g))
self.assertTrue(o.isclose(o * (f + g)))
def test_init_str(self):
fermion_op = DummyOperator1('0^ 5 12^', -1.)
correct = ((0, 1), (5, 0), (12, 1))
self.assertIn(correct, fermion_op.terms)
self.assertEqual(fermion_op.terms[correct], -1.0)
def test_init_long_str(self):
fermion_op = DummyOperator1(
'(-2.0+3.0j) [0^ 1] +\n\n -1.0[ 2^ 3 ] - []', -1.)
correct = \
DummyOperator1('0^ 1', complex(2., -3.)) + \
DummyOperator1('2^ 3', 1.) + \
DummyOperator1('', 1.)
self.assertEqual(len((fermion_op-correct).terms), 0)
reparsed_op = DummyOperator1(str(fermion_op))
self.assertEqual(len((fermion_op-reparsed_op).terms), 0)
def test_merges_multiple_whitespace(self):
fermion_op = DummyOperator1(' \n ')
self.assertEqual(fermion_op.terms, {(): 1})
def test_init_str_identity(self):
fermion_op = DummyOperator1('')
self.assertIn((), fermion_op.terms)
def test_init_bad_term(self):
with self.assertRaises(ValueError):
DummyOperator1(2)
def test_init_bad_coefficient(self):
with self.assertRaises(ValueError):
DummyOperator1('0^', "0.5")
def test_init_bad_action_str(self):
with self.assertRaises(ValueError):
DummyOperator1('0-')
def test_init_bad_action_tuple(self):
with self.assertRaises(ValueError):
DummyOperator1(((0, 2),))
def test_init_bad_tuple(self):
with self.assertRaises(ValueError):
DummyOperator1(((0, 1, 1),))
def test_init_bad_str(self):
with self.assertRaises(ValueError):
DummyOperator1('^')
def test_init_bad_mode_num(self):
with self.assertRaises(ValueError):
DummyOperator1('-1^')
def test_init_invalid_tensor_factor(self):
with self.assertRaises(ValueError):
DummyOperator1(((-2, 1), (1, 0)))
def test_DummyOperator1(self):
op = DummyOperator1((), 3.)
self.assertTrue(op.isclose(DummyOperator1(()) * 3.))
def test_isclose_abs_tol(self):
a = DummyOperator1('0^', -1.)
b = DummyOperator1('0^', -1.05)
c = DummyOperator1('0^', -1.11)
self.assertTrue(a.isclose(b, rel_tol=1e-14, abs_tol=0.1))
self.assertFalse(a.isclose(c, rel_tol=1e-14, abs_tol=0.1))
a = DummyOperator1('0^', -1.0j)
b = DummyOperator1('0^', -1.05j)
c = DummyOperator1('0^', -1.11j)
self.assertTrue(a.isclose(b, rel_tol=1e-14, abs_tol=0.1))
self.assertFalse(a.isclose(c, rel_tol=1e-14, abs_tol=0.1))
def test_isclose_rel_tol(self):
a = DummyOperator1('0', 1)
b = DummyOperator1('0', 2)
self.assertTrue(a.isclose(b, rel_tol=2.5, abs_tol=0.1))
# Test symmetry
self.assertTrue(a.isclose(b, rel_tol=1, abs_tol=0.1))
self.assertTrue(b.isclose(a, rel_tol=1, abs_tol=0.1))
def test_isclose_zero_terms(self):
op = DummyOperator1('1^ 0', -1j) * 0
self.assertTrue(op.isclose(DummyOperator1((), 0.0),
rel_tol=1e-12, abs_tol=1e-12))
self.assertTrue(DummyOperator1().isclose(
op, rel_tol=1e-12, abs_tol=1e-12))
def test_isclose_different_terms(self):
a = DummyOperator1(((1, 0),), -0.1j)
b = DummyOperator1(((1, 1),), -0.1j)
self.assertTrue(a.isclose(b, rel_tol=1e-12, abs_tol=0.2))
self.assertFalse(a.isclose(b, rel_tol=1e-12, abs_tol=0.05))
self.assertTrue(b.isclose(a, rel_tol=1e-12, abs_tol=0.2))
self.assertFalse(b.isclose(a, rel_tol=1e-12, abs_tol=0.05))
def test_isclose_different_num_terms(self):
a = DummyOperator1(((1, 0),), -0.1j)
a += DummyOperator1(((1, 1),), -0.1j)
b = DummyOperator1(((1, 0),), -0.1j)
self.assertFalse(b.isclose(a, rel_tol=1e-12, abs_tol=0.05))
self.assertFalse(a.isclose(b, rel_tol=1e-12, abs_tol=0.05))
def test_imul_inplace(self):
fermion_op = DummyOperator1("1^")
prev_id = id(fermion_op)
fermion_op *= 3.
self.assertEqual(id(fermion_op), prev_id)
self.assertEqual(fermion_op.terms[((1, 1),)], 3.)
def test_imul_scalar_real(self):
loc_op = ((1, 0), (2, 1))
multiplier = 0.5
fermion_op = DummyOperator1(loc_op)
fermion_op *= multiplier
self.assertEqual(fermion_op.terms[loc_op], multiplier)
def test_imul_scalar_complex(self):
loc_op = ((1, 0), (2, 1))
multiplier = 0.6j
fermion_op = DummyOperator1(loc_op)
fermion_op *= multiplier
self.assertEqual(fermion_op.terms[loc_op], multiplier)
def test_imul_scalar_npfloat64(self):
loc_op = ((1, 0), (2, 1))
multiplier = numpy.float64(2.303)
fermion_op = DummyOperator1(loc_op)
fermion_op *= multiplier
self.assertEqual(fermion_op.terms[loc_op], multiplier)
def test_imul_scalar_npcomplex128(self):
loc_op = ((1, 0), (2, 1))
multiplier = numpy.complex128(-1.123j + 1.7911)
fermion_op = DummyOperator1(loc_op)
fermion_op *= multiplier
self.assertEqual(fermion_op.terms[loc_op], multiplier)
def test_imul_fermion_op(self):
op1 = DummyOperator1(((0, 1), (3, 0), (8, 1), (8, 0), (11, 1)), 3.j)
op2 = DummyOperator1(((1, 1), (3, 1), (8, 0)), 0.5)
op1 *= op2
correct_coefficient = 1.j * 3.0j * 0.5
correct_term = ((0, 1), (3, 0), (8, 1), (8, 0), (11, 1),
(1, 1), (3, 1), (8, 0))
self.assertEqual(len(op1.terms), 1)
self.assertIn(correct_term, op1.terms)
def test_imul_fermion_op_2(self):
op3 = DummyOperator1(((1, 1), (0, 0)), -1j)
op4 = DummyOperator1(((1, 0), (0, 1), (2, 1)), -1.5)
op3 *= op4
op4 *= op3
self.assertIn(((1, 1), (0, 0), (1, 0), (0, 1), (2, 1)), op3.terms)
self.assertEqual(op3.terms[((1, 1), (0, 0), (1, 0), (0, 1), (2, 1))],
1.5j)
def test_imul_fermion_op_duplicate_term(self):
op1 = DummyOperator1('1 2 3')
op1 += DummyOperator1('1 2')
op1 += DummyOperator1('1')
op2 = DummyOperator1('3')
op2 += DummyOperator1('2 3')
op1 *= op2
self.assertAlmostEqual(op1.terms[((1, 0), (2, 0), (3, 0))], 2.)
def test_imul_bidir(self):
op_a = DummyOperator1(((1, 1), (0, 0)), -1j)
op_b = DummyOperator1(((1, 1), (0, 1), (2, 1)), -1.5)
op_a *= op_b
op_b *= op_a
self.assertIn(((1, 1), (0, 0), (1, 1), (0, 1), (2, 1)), op_a.terms)
self.assertEqual(op_a.terms[((1, 1), (0, 0), (1, 1), (0, 1), (2, 1))],
1.5j)
self.assertIn(((1, 1), (0, 1), (2, 1),
(1, 1), (0, 0), (1, 1), (0, 1), (2, 1)), op_b.terms)
self.assertEqual(op_b.terms[((1, 1), (0, 1), (2, 1),
(1, 1), (0, 0),
(1, 1), (0, 1), (2, 1))], -2.25j)
def test_imul_bad_multiplier(self):
op = DummyOperator1(((1, 1), (0, 1)), -1j)
with self.assertRaises(TypeError):
op *= "1"
def test_mul_by_scalarzero(self):
op = DummyOperator1(((1, 1), (0, 1)), -1j) * 0
self.assertNotIn(((0, 1), (1, 1)), op.terms)
self.assertIn(((1, 1), (0, 1)), op.terms)
self.assertEqual(op.terms[((1, 1), (0, 1))], 0.0)
def test_mul_bad_multiplier(self):
op = DummyOperator1(((1, 1), (0, 1)), -1j)
with self.assertRaises(TypeError):
op = op * "0.5"
def test_mul_out_of_place(self):
op1 = DummyOperator1(((0, 1), (3, 1), (3, 0), (11, 1)), 3.j)
op2 = DummyOperator1(((1, 1), (3, 1), (8, 0)), 0.5)
op3 = op1 * op2
correct_coefficient = 3.0j * 0.5
correct_term = ((0, 1), (3, 1), (3, 0), (11, 1),
(1, 1), (3, 1), (8, 0))
self.assertTrue(op1.isclose(DummyOperator1(
((0, 1), (3, 1), (3, 0), (11, 1)), 3.j)))
self.assertTrue(op2.isclose(DummyOperator1(((1, 1), (3, 1), (8, 0)),
0.5)))
self.assertTrue(op3.isclose(DummyOperator1(correct_term,
correct_coefficient)))
def test_mul_npfloat64(self):
op = DummyOperator1(((1, 0), (3, 1)), 0.5)
res = op * numpy.float64(0.5)
self.assertTrue(res.isclose(DummyOperator1(((1, 0), (3, 1)),
0.5 * 0.5)))
def test_mul_multiple_terms(self):
op = DummyOperator1(((1, 0), (8, 1)), 0.5)
op += DummyOperator1(((1, 1), (9, 1)), 1.4j)
res = op * op
correct = DummyOperator1(((1, 0), (8, 1), (1, 0), (8, 1)), 0.5 ** 2)
correct += (DummyOperator1(((1, 0), (8, 1), (1, 1), (9, 1)), 0.7j) +
DummyOperator1(((1, 1), (9, 1), (1, 0), (8, 1)), 0.7j))
correct += DummyOperator1(((1, 1), (9, 1), (1, 1), (9, 1)), 1.4j ** 2)
self.assertTrue(res.isclose(correct))
def test_rmul_scalar_real(self):
op = DummyOperator1(((1, 1), (3, 0), (8, 1)), 0.5)
multiplier = 0.5
res1 = op * multiplier
res2 = multiplier * op
self.assertTrue(res1.isclose(res2))
def test_rmul_scalar_complex(self):
op | |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/16_agent.ipynb (unless otherwise specified).
__all__ = ['Agent', 'PredictiveAgent', 'BaselineAgent', 'CriticAgent', 'GenerativeAgent', 'SupervisedCB', 'Rollback',
'RetrainRollback', 'ResetAndRetrain', 'MetricResetAndRetrain', 'SaveAgentWeights']
# Cell
from ..imports import *
from ..core import *
from ..torch_imports import *
from ..torch_core import *
from .callback import *
# Cell
class Agent(Callback):
'''
Agent - class for bundling a model, loss function, and dataset
Inputs:
- `model nn.Module`: model
- `loss_function Callable`: loss function for supervised training. Should
function as `loss = loss_function(model_output, y)`
- `dataset Base_Dataset`: dataset
- `opt_kwargs dict`: dictionary of keyword arguments passed to `optim.Adam`
- `clip float`: gradient clipping
- `name str`: agent name
'''
def __init__(self, model, loss_function, dataset, opt_kwargs={}, clip=1., name='agent'):
super().__init__(name=name, order=2)
self.model = model
to_device(self.model)
self.loss_function = loss_function
self.dataset = dataset
self.opt = self.get_opt(self.model.parameters(), **opt_kwargs)
self.clip = clip
self.training = True
self.compute_outputs = True
def get_opt(self, parameters, **optim_kwargs):
return optim.Adam(parameters, **optim_kwargs)
def before_compute_reward(self):
'''
uses self.dataset to convert samples into tensors
'''
env = self.environment
batch_state = env.batch_state
sequences = batch_state.samples
batch_ds = self.dataset.new(sequences)
batch = batch_ds.collate_function([batch_ds[i] for i in range(len(batch_ds))])
batch = to_device(batch)
bs = len(batch_ds)
x,y = batch
batch_state.x = x
batch_state.y = y
batch_state.bs = bs
# batch_state.rewards = to_device(torch.zeros(bs))
def zero_grad(self):
self.opt.zero_grad()
def before_step(self):
if self.training:
nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
def step(self):
if self.training:
self.opt.step()
def one_batch(self, batch, fp16=False):
batch = to_device(batch)
x,y = batch
if not isinstance(x, (list, tuple)):
x = [x]
if fp16:
with torch.cuda.amp.autocast():
output = self.model(*x)
loss = self.loss_function(output, y)
else:
output = self.model(*x)
loss = self.loss_function(output, y)
return loss
def train_supervised(self, bs, epochs, lr, percent_valid=0.05,
silent=False, fp16=False, save_every=None, opt_kwargs={}):
'''
train_supervised - trains on data in `self.dataset`
Inputs:
- `bs int`: batch size
- `epochs int`: number of epochs
- `lr float`: learning rate passed to `optim.lr_scheduler.OneCycleLR`
- `percent_valid float`: validation set percentage
- `silent bool`: if training losses should be printed
- `fp16 bool`: if FP16 training should be used
- `save_every Optional[int]`: If an integer is given, model
weights are saved every `save_every` batches to
`{self.name}_weights_{batch}.pt`
- `opt_kwargs Optional[dict]`: keyword arguments passed to optimzier
'''
total_batches = 0
if fp16:
scaler = torch.cuda.amp.GradScaler()
train_ds, valid_ds = self.dataset.split(percent_valid)
if len(train_ds)%bs==1:
train_dl = train_ds.dataloader(bs, shuffle=True, drop_last=True)
else:
train_dl = train_ds.dataloader(bs, shuffle=True)
valid_dl = valid_ds.dataloader(bs)
opt = optim.Adam(self.model.parameters(), lr=lr, **opt_kwargs)
scheduler = optim.lr_scheduler.OneCycleLR(opt, max_lr=lr,
steps_per_epoch=len(train_dl), epochs=epochs)
if silent:
mb = range(epochs)
else:
mb = master_bar(range(epochs))
mb.write(['Epoch', 'Train Loss', 'Valid Loss', 'Time'], table=True)
for epoch in mb:
start = time.time()
train_losses = []
if silent:
batch_iter = iter(train_dl)
else:
batch_iter = progress_bar(train_dl, parent=mb)
for batch in batch_iter:
loss = self.one_batch(batch, fp16=fp16)
opt.zero_grad()
if fp16:
scaler.scale(loss).backward()
scaler.step(opt)
scaler.update()
else:
loss.backward()
opt.step()
scheduler.step()
train_losses.append(loss.detach().cpu())
if not silent:
mb.child.comment = f"{train_losses[-1]:.5f}"
total_batches += 1
if (save_every is not None) and (total_batches%save_every==0):
self.save_weights(f'{self.name}_weights_{total_batches}.pt')
with torch.no_grad():
self.model.eval()
valid_losses = []
if len(valid_ds)>0:
if silent:
batch_iter = iter(valid_dl)
else:
batch_iter = progress_bar(valid_dl, parent=mb)
for batch in batch_iter:
loss = self.one_batch(batch)
valid_losses.append(loss.detach().cpu())
if not silent:
mb.child.comment = f"{valid_losses[-1]:.5f}"
else:
valid_losses = [torch.tensor(0.)]
self.model.train()
train_loss = smooth_batches(train_losses)
valid_loss = smooth_batches(valid_losses)
end = time.time() - start
if not silent:
mb.write([epoch, f'{train_losses[-1]:.5f}',
f'{valid_losses[-1]:.5f}', f'{format_time(end)}'], table=True)
def update_dataset(self, dataset):
self.dataset = dataset
def update_dataset_from_inputs(self, *dataset_inputs):
dataset = self.dataset.new(*dataset_inputs)
self.update_dataset(dataset)
def load_state_dict(self, state_dict):
self.model.load_state_dict(state_dict)
def load_weights(self, filename):
state_dict = torch.load(filename, map_location=get_model_device(self.model))
self.load_state_dict(state_dict)
def save_weights(self, filename):
state_dict = self.model.state_dict()
torch.save(state_dict, filename)
def save(self, filename):
torch.save(self, filename)
# Cell
class PredictiveAgent(Agent):
'''
PredictiveAgent - Agent class for predictive models
Inputs:
- `model nn.Module`: model
- `loss_function Callable`: loss function for supervised training. Should
function as `loss = loss_function(model_output, y)`
- `dataset Base_Dataset`: dataset
- `opt_kwargs dict`: dictionary of keyword arguments passed to `optim.Adam`
- `clip float`: gradient clipping
- `name str`: agent name
'''
def predict_tensor(self, x):
if not isinstance(x, (list, tuple)):
x = [x]
output = self.model(*x)
return output
def predict_data(self, data):
ds = self.dataset.new(data, [0 for i in data])
batch = ds.collate_function([ds[i] for i in range(len(ds))])
batch = to_device(batch)
x,y = batch
return self.predict_tensor(x)
def predict_data_batch(self, data, bs):
ds = self.dataset.new(data, [0 for i in data])
dl = ds.dataloader(bs, shuffle=False)
preds = []
for i, batch in enumerate(dl):
x,y = batch
x = to_device(x)
if not isinstance(x, (list, tuple)):
x = [x]
p = self.model(*x)
preds.append(p)
preds = torch.cat(preds)
return preds
# Cell
class BaselineAgent(Agent):
'''
BaselineAgent - agent for a model with a baseline model
Inputs:
- `model nn.Module`: model
- `loss_function Callable`: loss function for supervised training. Should
function as `loss = loss_function(model_output, y)`
- `dataset Base_Dataset`: dataset
- `base_update float`: update fraction for the baseline model. Updates
the base model following `base_model = base_update*base_model + (1-base_update)*model`
- `base_update_iter int`: update frequency for baseline model
- `base_model bool`: if False, baseline model will not be created
- `opt_kwargs dict`: dictionary of keyword arguments passed to `optim.Adam`
- `clip float`: gradient clipping
- `name str`: agent name
'''
def __init__(self, model, loss_function, dataset, base_update=0.99,
base_update_iter=10, base_model=True, opt_kwargs={},
clip=1., name='baseline_agent'):
super().__init__(model, loss_function, dataset, opt_kwargs, clip, name)
self.set_models(base_model)
self.base_update = base_update
self.base_update_iter = base_update_iter
def after_batch(self):
log = self.environment.log
iterations = log.iterations
if iterations%self.base_update_iter == 0 and iterations>0:
self.update_base_model()
def set_models(self, base_model):
if base_model==True:
self.base_model = copy.deepcopy(self.model)
else:
self.base_model = base_model
try:
to_device(self.base_model)
except:
pass
def base_to_model(self):
'''
copies weights from `model` into `base_model`
'''
if type(self.base_model)==type(self.model):
self.base_model.load_state_dict(self.model.state_dict())
def model_to_base(self):
'''
copies weights from `base_model` into `model`
'''
if type(self.base_model)==type(self.model):
self.model.load_state_dict(self.base_model.state_dict())
def update_base_model(self):
'''
updates baseline model weights
'''
if type(self.base_model)==type(self.model):
if self.base_update < 1:
merge_models(self.base_model, self.model, alpha=self.base_update)
def save_weights(self, filename):
state_dict = {}
state_dict['model'] = self.model.state_dict()
if isinstance(self.base_model, nn.Module):
state_dict['base_model'] = self.base_model.state_dict()
else:
state_dict['base_model'] = None
torch.save(state_dict, filename)
def load_state_dict(self, state_dict):
self.model.load_state_dict(state_dict['model'])
if isinstance(self.base_model, nn.Module):
self.base_model.load_state_dict(state_dict['base_model'])
def load_weights(self, filename):
state_dict = torch.load(filename, map_location=get_model_device(self.model))
self.load_state_dict(state_dict)
# Cell
class CriticAgent(BaselineAgent):
'''
CriticAgent - baseline agent for critic models
Inputs:
- `model nn.Module`: model
- `loss_function Callable`: loss function for supervised training. Should
function as `loss = loss_function(model_output, y)`
- `dataset Base_Dataset`: dataset
- `base_update float`: update fraction for the baseline model. Updates
the base model following `base_model = base_update*base_model + (1-base_update)*model`
- `base_update_iter int`: update frequency for baseline model
- `base_model bool`: if False, baseline model will not be created
- `opt_kwargs dict`: dictionary of keyword arguments passed to `optim.Adam`
- `clip float`: gradient clipping
- `name str`: agent name
'''
def predict_tensor(self, x, baseline=False):
if not type(x)==list:
x = [x]
if baseline:
if isinstance(self.base_model, nn.Module):
output = self.base_model(*x)
else:
output = None
else:
output = self.model(*x)
return output
def predict_data(self, data):
ds = self.dataset.new(data, [0 for i in data])
batch = ds.collate_function([ds[i] for i in range(len(ds))])
batch = to_device(batch)
x,y = batch
return self.predict_tensor(x)
def get_model_outputs(self):
if self.compute_outputs:
env = self.environment
batch_state = env.batch_state
x = batch_state.x
y = batch_state.y
preds = self.predict_tensor(x, baseline=False)
batch_state.model_output = preds
with torch.no_grad():
base_preds = self.predict_tensor(x, baseline=True)
batch_state.base_output = base_preds
# Cell
class GenerativeAgent(BaselineAgent):
'''
GenerativeAgent - baseline agent for generative models
Inputs:
- `model nn.Module`: model
- `vocab Vocab`: vocabulary
- `loss_function Callable`: loss function for supervised training. Should
function as `loss = loss_function(model_output, y)`
- `dataset Base_Dataset`: dataset
- `base_update float`: update fraction for the baseline model. Updates
the base model following `base_model = base_update*base_model + (1-base_update)*model`
- `base_update_iter int`: update frequency for baseline model
- `base_model bool`: if False, baseline model will not be created
- `opt_kwargs dict`: dictionary of keyword arguments passed to `optim.Adam`
- `clip float`: gradient clipping
- `name str`: agent name
'''
def __init__(self, model, vocab, loss_function, dataset,
base_update=0.99, base_update_iter=10, base_model=True,
opt_kwargs={}, clip=1., name='generative_agent'):
super().__init__(model, loss_function, dataset,
base_update=base_update,
base_update_iter=base_update_iter,
base_model=base_model,
opt_kwargs=opt_kwargs,
clip=clip,
name=name)
self.vocab = vocab
def reconstruct(self, preds):
return maybe_parallel(self.vocab.reconstruct, [i for i in preds.detach().cpu()])
def sample_and_reconstruct(self, bs, sl, **sample_kwargs):
preds, _ = self.model.sample_no_grad(bs, sl, **sample_kwargs)
recon = self.reconstruct(preds)
return recon
def batch_sample_and_reconstruct(self, n_samples, sample_bs, sl, **sample_kwargs):
n_batches = n_samples//sample_bs
recon = []
for i in range(n_batches):
recon += self.sample_and_reconstruct(sample_bs, sl, **sample_kwargs)
return recon
def before_compute_reward(self):
env = self.environment
batch_state = env.batch_state
sequences = batch_state.samples
batch_ds = self.dataset.new(sequences)
batch = batch_ds.collate_function([batch_ds[i] for i in range(len(batch_ds))])
| |
density, g_local, g_local_FWHM)
data_error += ' %3d %6.3s %6.3f %6.3f %6.3f %6.3f %6.3f %6.3s %6.3s %6.3s %6.3s %6.3s %6.3f %6.3f\n'\
%(index+1, "NA", Eww_self_error, Eww_error, Esw_error, dTStrans_error, dTSorient_error, "NA", "NA", "NA", "NA", "NA", g_local_half_maximum*volume, g_local_FWHM)
if return_it:
returner = OrderedDict()
returner.update({'n_avg':water_position_n_average, \
'dens':water_position_dens, \
'dH':water_positions_delta_H, \
'TdS':water_positions_delta_TS, \
'dG':water_positions_delta_G, \
'dEww':water_positions_Eww, \
'dEww_self':water_positions_Eww_self, \
'dEsw':water_positions_Esw, \
'dTStra':water_positions_dTStrans, \
'dTSor':water_positions_dTSorient, \
})
return returner
print ("Writing output files...")
o = open('%s_gist_water_analysis.dat' %output, "w")
o.write(header+data)
o.close()
o = open('%s_gist_water_analysis_errorestimation.dat' %output, "w")
o.write(header+data_error)
o.close()
write_pdb(water_positions_delta_H ,'%s_gist_water_analysis_dH' %output, dict_indexing)
write_pdb(water_positions_delta_TS ,'%s_gist_water_analysis_TdS' %output, dict_indexing)
write_pdb(water_positions_delta_G ,'%s_gist_water_analysis_dG' %output, dict_indexing)
write_pdb(water_positions_Eww ,'%s_gist_water_analysis_Eww' %output, dict_indexing)
write_pdb(water_positions_Esw ,'%s_gist_water_analysis_Esw' %output, dict_indexing)
write_pdb(water_positions_dTStrans ,'%s_gist_water_analysis_dTStr' %output, dict_indexing)
write_pdb(water_positions_dTSorient,'%s_gist_water_analysis_dTSor' %output, dict_indexing)
print ("Done")
def integrate_finite_spheres(bins, coordinate_data, sphere_origin, radius, radius_start=0.0, grid_data={}, is_self_grid=False, is_volume_field=False, FWHM=False):
#if is_self_grid, grid_data must be dict[x,y,z] = {dict[x,y,z]:float(value)}
#we need a bounding box...
# self_volume = 0.0
# if is_self_grid:
# self_volume = integrate_finite_spheres(bins, coordinate_data, sphere_origin, radius, radius_start, {}, is_self_grid=False,is_volume_field=True)
sampling_grid_size = 0.1
o = np.array(coordinate_data[0,0,0])
xdim = np.array(coordinate_data[bins[0]-1,0,0]) - o
ydim = np.array(coordinate_data[0,bins[1]-1,0]) - o
zdim = np.array(coordinate_data[0,0,bins[2]-1]) - o
bounding_min = np.array(sphere_origin)-radius - o
bounding_max = np.array(sphere_origin)+radius - o
min_x = 0 if int(bounding_min[0]/xdim[0]*bins[0])-10 < 0 else int(bounding_min[0]/xdim[0]*bins[0])-10
min_y = 0 if int(bounding_min[1]/ydim[1]*bins[1])-10 < 0 else int(bounding_min[1]/ydim[1]*bins[1])-10
min_z = 0 if int(bounding_min[2]/zdim[2]*bins[2])-10 < 0 else int(bounding_min[2]/zdim[2]*bins[2])-10
max_x = bins[0] if int(bounding_max[0]/xdim[0]*bins[0])+10 > bins[0] else int(bounding_max[0]/xdim[0]*bins[0])+10
max_y = bins[1] if int(bounding_max[1]/ydim[1]*bins[1])+10 > bins[1] else int(bounding_max[1]/ydim[1]*bins[1])+10
max_z = bins[2] if int(bounding_max[2]/zdim[2]*bins[2])+10 > bins[2] else int(bounding_max[2]/zdim[2]*bins[2])+10
sampling_rate = int(radius/sampling_grid_size)+1
#Then we build the sampling grid...
xx = np.array(map(lambda x: coordinate_data[x,0,0][0], range(min_x, max_x)))
yy = np.array(map(lambda y: coordinate_data[0,y,0][1], range(min_y, max_y)))
zz = np.array(map(lambda z: coordinate_data[0,0,z][2], range(min_z, max_z)))
def grid_the_values(x,y,z):
if is_volume_field:
return 1.0
if is_self_grid and (x,y,z) in grid_data:
return integrate_finite_spheres(bins, coordinate_data, sphere_origin, radius, grid_data=grid_data[x,y,z], is_self_grid=False)
if not (x,y,z) in grid_data:
return 0.0
return grid_data[x,y,z]
grid_data_vectorized = np.vectorize(grid_the_values)
data = grid_data_vectorized(*np.meshgrid(np.array(range(min_x,max_x),dtype=float), np.array(range(min_y,max_y), dtype=float), \
np.array(range(min_z,max_z),dtype=float), indexing='ij', sparse=True))
value_field_interpolation_linear = scipy.interpolate.RegularGridInterpolator((xx,yy,zz), data, bounds_error=False, fill_value=None)
def sphere_to_xyz (r, theta, phi):
x, y, z = r * np.sin(theta) * np.cos(phi) + sphere_origin[0], r * np.sin(theta) * np.sin(phi) + sphere_origin[1], r * np.cos(theta) + sphere_origin[2]
return value_field_interpolation_linear((x,y,z)) * r**2 * np.sin(theta)
phi_sampling = np.linspace(0.0, 2*np.pi, sampling_rate)
theta_sampling = np.linspace(0.0, np.pi, sampling_rate)
r_sampling = np.linspace(radius_start, radius, sampling_rate)
sampled_data = sphere_to_xyz(*np.meshgrid(r_sampling, theta_sampling, phi_sampling, indexing='ij', sparse=True))
Integral_r = np.zeros(sampling_rate)
Integral_theta = np.zeros(sampling_rate)
for r in range(sampling_rate):
for theta in range(sampling_rate):
Integral_theta[theta]=np.trapz(sampled_data[r,theta,:], phi_sampling)
Integral_r[r]=np.trapz(Integral_theta, theta_sampling)
result = np.trapz(Integral_r, r_sampling)
#calculate FWHM
if not FWHM:
return (result, None, None)
big_radius = 0.5*np.sqrt(3.0)*(max_x-min_x)*(xdim[0]/bins[0])
sampling_rate = int(big_radius/sampling_grid_size)+1
phi_sampling = np.linspace(0.0, 2*np.pi, sampling_rate)
theta_sampling = np.linspace(0.0, np.pi, sampling_rate)
r_sampling = np.linspace(0.0, big_radius, sampling_rate)
sampled_data = sphere_to_xyz(*np.meshgrid(r_sampling, theta_sampling, phi_sampling, indexing='ij', sparse=True))
Integral_r = np.zeros(sampling_rate)
Integral_theta = np.zeros(sampling_rate)
for r in range(sampling_rate):
for theta in range(sampling_rate):
Integral_theta[theta]=np.trapz(sampled_data[r,theta,:], phi_sampling)
Integral_r[r]=np.trapz(Integral_theta, theta_sampling)
if r>3:
Integral_last=np.trapz(Integral_r[r-2:r], r_sampling[r-2:r]) / (4.0/3.0 * np.pi * (((r-1)*sampling_grid_size)**3-((r-2)*sampling_grid_size)**3))
Integral_new =np.trapz(Integral_r[r-1:r+1], r_sampling[r-1:r+1]) / (4.0/3.0 * np.pi * ((r*sampling_grid_size)**3-((r-1)*sampling_grid_size)**3))
if -0.1 < (Integral_new-Integral_last)/sampling_grid_size < 0.1:
maximum = np.trapz(Integral_r[1:3], r_sampling[1:3]) / (4.0/3.0 * np.pi * ((2*sampling_grid_size)**3-(sampling_grid_size)**3))
half_maximum = maximum - 0.5*(maximum-Integral_new)
for r1 in range(1, sampling_rate):
value = np.trapz(Integral_r[r1:r1+2], r_sampling[r1:r+2]) / (4.0/3.0 * np.pi * (((r1+1)*sampling_grid_size)**3-(r1*sampling_grid_size)**3))
if value<half_maximum:
return (result, half_maximum, (r1+0.5)*sampling_grid_size)
#via gauss quadrature, not stable...
#return scipy.integrate.tplquad(sphere_to_xyz, 0, radius, lambda x: 0.0, lambda x: np.pi, lambda x,y: 0.0, lambda x,y: 2*np.pi)[0]
def density_vs_time (traj, prmtop, h_site, site_radius, name, np, start_init, stop_init, skip):
site_occupancy = OrderedDict()
if np > 1:
O_coord = MDAnalysis.Universe(h_site).coord
h_site_count = len(O_coord)
MDtraj = MDAnalysis.Universe(prmtop, traj)
stop_init = MDtraj.trajectory[stop_init].frame
del O_coord, MDtraj #saving some memory...
length = stop_init - start_init
processes = []
for i in range(np):
start = start_init + length / np * i
stop = start_init + length / np * (i + 1)
p = subprocess.Popen([sys.executable, path2script, '-j', 'dens_v_t', '-pa', '%s' %prmtop, '-tj', '%s' %traj, '-hi', '%s' %h_site, '-hr', '%s' %site_radius, '-np', '1', '-o', '%s' %name, '-start', '%s' %str(start), '-stop', '%s' %str(stop), '-skip', '%s' %str(skip)], shell = False)
processes.append(p)
for i in range(np):
processes[i].wait()
f = open(".tmp_%s.dat" %str(start_init + length / np * i), "r").read()
temp = f.replace('[', '').replace(']', '').replace(',', '').rstrip().split()
os.remove(".tmp_%s.dat" %str(start_init + length / np * i))
#temp = processes[i].communicate()[0].replace('[', '').replace(']', '').replace(',', '').rstrip().split()
for j in range(len(temp) / h_site_count):
site_occupancy[j + i * len(temp) / h_site_count ] = temp [(j * h_site_count) : ((j + 1) * h_site_count)]
else: #wird nur ausgefuehrt, fuer Aufruf von subprocess (da np = 1)
#traj: path to trajectory
#prmtop: path to parameter file
#hydration-site: path to hydration-sites as pdb format
O_coord = MDAnalysis.Universe(h_site).coord
MDtraj = MDAnalysis.Universe(prmtop, traj)
density_vs_time_compute (MDtraj, O_coord, site_radius, start_init, stop_init, skip)
exit()
data = ''
for i in site_occupancy.keys():
for j in site_occupancy[i]:
data = data + str(j) + ' '
data = data + '\n'
o = open('density_vs_time_%s.dat' %name, "w")
o.write(data)
o.flush()
o.close()
R_input = '''library(bio3d)
pdb_ref <- read.pdb("%s")
ca.inds_ref <- atom.select(pdb_ref, resno=c(1:%s))
data <- read.table("./density_vs_time_%s.dat")
hcor <- cor(data, method="spearman")
view.dccm(hcor, pdb_ref$xyz[ca.inds_ref$xyz], outprefix="%s_corr")
''' %(h_site, str(h_site_count), name, name)
o = open('density_vs_time_%s.R' %name, "w")
o.write(R_input)
o.close()
os.system("R CMD BATCH density_vs_time_%s.R" %name)
#coord = hydration_site coordinates [[x1,y1,z1], [x2,y2,z2], ...]
def density_vs_time_compute (MDtraj, O_coord, site_radius, start, stop, skip):
#site_occupancy = {frame#:[occupancy1, occupancy2, ...} The ordering of the occupancies should be exactly the same as in coord.
site_occupancy = OrderedDict()
frame_occupancy_init = []
o = open(".tmp_%s.dat" %str(start), "a")
for site_count in range(len(O_coord)):
frame_occupancy_init.append(0)
for ts in MDtraj.trajectory[start:stop:skip]:
frame_occupancy = frame_occupancy_init
for j, site in enumerate(O_coord):
wat_around_site = MDtraj.selectAtoms("resname WAT and name O and point %s %s %s %s" %(str(site[0]), str(site[1]), str(site[2]), str(site_radius)))
frame_occupancy[j] = len(wat_around_site)
o.write(str(frame_occupancy))
o.write(', ')
#site_occupancy[ts.frame] = frame_occupancy
o.close()
return None
#return site_occupancy
def calc_dist (vector1, vector2):
value = math.sqrt((vector1[0]-vector2[0])**2 + (vector1[1]-vector2[1])**2 + (vector1[2]-vector2[2])**2)
return value
def calc_mean (vector1, vector2):
value_vector = []
value_vector.append((vector1[0] + vector2[0]) / 2.0)
value_vector.append((vector1[1] + vector2[1]) / 2.0)
value_vector.append((vector1[2] + vector2[2]) / 2.0)
return value_vector
def calc_sd (dict_init):
# dict_init can be real or frac
scores_str = dict_init.values()
scores = []
for i in scores_str:
if float(i) > 0.0:
scores.append(float(i))
sd = np.sqrt(np.var(scores))
print len(scores)
if sd == 0.0:
print "Standard deviation is zero. Check the map."
exit()
else:
print "Standard deviation for all non-zero values is %5.3f." %sd
dict_new = dict_init
for key, value in dict_init.items():
dict_new[key] = str(float(value)/sd)
return dict_new
def frac2real (dict_init, origin, dim, n, alpha, beta, gamma):
#dict_init has fractional coordinates!
dict_real = OrderedDict()
cos_a = ( math.cos(math.pi*beta/180) * math.cos(math.pi*gamma/180) - math.cos(math.pi*alpha/180) ) / ( math.sin(math.pi*beta/180) * math.sin(math.pi*gamma/180))
sin_a = math.sqrt(1.0 - cos_a**2)
for key, value in dict_init.items():
X_coord = origin[0] + dim[0] / n[0] * key[0] + dim[1] / n[1] * math.cos(math.pi*gamma/180) * key[1] + dim[2] / n[2] * math.cos(math.pi*beta/180) * key[2]
Y_coord = origin[1] + 0 + dim[1] / n[1] * math.sin(math.pi*gamma/180) * key[1] - dim[2] / n[2] * math.sin(math.pi*beta/180) * cos_a * key[2]
Z_coord = origin[2] + 0 + 0 + dim[2] / n[2] * math.sin(math.pi*beta/180) * sin_a * key[2]
dict_real[X_coord, Y_coord, Z_coord] = value
return dict_real
def real_cut (map_data_old, map_data, output):
bins_old = map_data_old['bins']
n_old = map_data_old['n']
dim_old = map_data_old['dim']
X_bin_size_old = float(dim_old[0]/n_old[0])
Y_bin_size_old = float(dim_old[1]/n_old[1])
Z_bin_size_old = float(dim_old[2]/n_old[2])
old_map_frac = map_data_old['map']
bins = map_data['bins']
n = map_data['n']
dim = map_data['dim']
origin = map_data['origin']
alpha = map_data['alpha']
beta = map_data['beta']
gamma = map_data['gamma']
cos_a = ( math.cos(math.pi*beta/180) * math.cos(math.pi*gamma/180) - math.cos(math.pi*alpha/180) ) / ( math.sin(math.pi*beta/180) * math.sin(math.pi*gamma/180))
sin_a = math.sqrt(1.0 - cos_a**2)
crd_real = OrderedDict()
#Find out which values are inside the new box and write them into a new map in real space
for x in range(0,bins[0]):
for y in range(0,bins[1]):
for z in range(0,bins[2]):
X_crd = origin[0] + dim[0] / n[0] * x + dim[1] / n[1] * math.cos(math.pi*gamma/180) * y + dim[2] / n[2] * math.cos(math.pi*beta/180) * z
Y_crd = origin[1] + 0 + dim[1] / n[1] * math.sin(math.pi*gamma/180) * y - dim[2] / n[2] * math.sin(math.pi*beta/180) * cos_a * z
Z_crd = origin[2] + 0 + 0 + dim[2] / n[2] * math.sin(math.pi*beta/180) * sin_a * z
box_checker, basis_vectors = is_in_box([X_crd, Y_crd, Z_crd], map_data_old, True)
if box_checker:
X_vector_frac = int(basis_vectors[0] * bins_old[0])
X_vector_frac_deviation = | |
- 1. # to network input -1..1 format
# Get visibility map from depth if needed
if self.use_custom_visibility:
visibility_map_input = ClassicMapping.is_visible_from_depth(depth, self.local_map_shape, sim=self.params.sim, zoom_factor=self.brain_requirements.transform_window_scaler,
fix_habitat_depth=self.params.fix_habitat_depth)
visibility_map_input = visibility_map_input[:, :, None].astype(np.float32)
assert np.all(visibility_map_input <= 1.)
else:
visibility_map_input = np.zeros(self.visibility_input.shape[2:], dtype=np.float32)
# # Map prediction only, using known pose
# last_global_map_input = np.zeros(self.max_map_size + (self.map_ch, ), np.float32)
# last_global_map_input[:map_shape[0], :map_shape[1]] = self.global_map_logodds
# true_map_input = np.zeros(self.max_map_size + (1, ), np.uint8)
# true_map_input[:global_map_label.shape[0], :global_map_label.shape[1]] = global_map_label
#
# feed_dict = {
# self.images_input: images, self.xy_input: true_xy, self.yaw_input: np.array((true_yaw, )),
# self.global_map_input: last_global_map_input,
# self.true_map_input: true_map_input,
# }
# if self.visibility_input is not None:
# visibility_map_input = ClassicMapping.is_visible_from_depth(depth, self.local_map_shape, sim=self.params.sim, zoom_factor=self.brain_requirements.transform_window_scaler)
# visibility_map_input = visibility_map_input[:, :, None].astype(np.uint8)
# feed_dict[self.visibility_input] = visibility_map_input
#
# mapping_output = self.run_inference(feed_dict)
# global_map_logodds = np.array(mapping_output.global_map_logodds[0, -1]) # squeeze batch and traj
# global_map_logodds = global_map_logodds[:map_shape[0], :map_shape[1]]
# self.global_map_logodds = global_map_logodds
time_prepare = time.time() - time_last
time_last = time.time()
# SLAM prediction
if self.step_i == 0:
# For the first step we dont do pose update, but we need to obtain local maps and image features
self.image_traj = [images.copy()]
# Get local maps for first
feed_dict = {
self.new_images_input: images[None, None],
self.visibility_input: visibility_map_input[None, None],
}
# TODO we should predict global map as well with a single local map added to it
new_local_maps, new_visibility_maps, new_image_features = self.sess.run([self.inference_outputs['new_local_maps'], self.inference_outputs['new_visibility_maps'], self.inference_outputs['new_image_features']], feed_dict=feed_dict)
self.local_map_traj = [new_local_maps[0, 0]]
self.visibility_traj = [new_visibility_maps[0, 0]]
self.image_features_traj = [new_image_features[0, 0]]
slam_outputs = None
# Transform predictions
global_map_true_partial = None
assert self.global_map_logodds.shape[-1] == 1
global_map_pred = ClassicMapping.inverse_logodds(self.global_map_logodds)
slam_xy = np.mean(self.particle_xy_list[-1], axis=0)
slam_yaw = np.mean(self.particle_yaw_list[-1], axis=0)
slam_mean_xy = slam_xy
slam_mean_yaw = slam_yaw
slam_mean2_xy = slam_xy
slam_mean2_yaw = slam_yaw
slam_ml_xy = slam_xy
slam_ml_yaw = slam_yaw
slam_traj_xy = None
slam_traj_yaw = None
else:
assert len(self.action_traj) > 0
assert len(self.particle_xy_list) == len(self.action_traj)
assert self.visibility_traj[-1].dtype == np.float32
assert np.all(self.visibility_traj[-1] <= 1.)
inference_trajlen = self.params.inference_trajlen
self.image_traj.append(images.copy())
self.true_xy_traj.append(true_xy)
self.true_yaw_traj.append(true_yaw)
new_action = np.array((self.action_traj[-1], ), np.int32)[None]
new_rel_xy, new_rel_yaw = actions_from_trajectory(
np.stack([self.true_xy_traj[-2], self.true_xy_traj[-1]], axis=0), np.stack([self.true_yaw_traj[-2], self.true_yaw_traj[-1]], axis=0))
# Pick best segment of the trajectory based on how much viewing areas overlap
current_trajlen = len(self.particle_xy_list) + 1
assert len(self.true_xy_traj) == current_trajlen and len(self.image_features_traj) == current_trajlen - 1
if self.params.slam_use_best_steps:
mean_traj_xy, mean_traj_yaw = ClassicMapping.mean_particle_traj(
np.array(self.particle_xy_list), np.array(self.particle_yaw_list), self.particle_logit_acc_list[-1][None, :, None])
mean_traj_xy, mean_traj_yaw = ClassicMapping.propage_trajectory_with_action(mean_traj_xy, mean_traj_yaw, self.action_traj[-1])
segment_steps = ClassicMapping.get_steps_with_largest_overlapping_view(
mean_traj_xy, mean_traj_yaw, segment_len=inference_trajlen, view_distance=30*self.brain_requirements.transform_window_scaler)
else:
segment_steps = np.arange(max(current_trajlen-inference_trajlen, 0), current_trajlen)
assert segment_steps.ndim == 1
past_particle_xy = np.stack(self.particle_xy_list, axis=0)
past_particle_yaw = np.stack(self.particle_yaw_list, axis=0)
true_xy_seg = np.stack([self.true_xy_traj[i] for i in segment_steps], axis=0) + self.true_xy_offset[None]
true_yaw_seg = np.stack([self.true_yaw_traj[i] for i in segment_steps], axis=0)
past_image_features_seg = np.stack([self.image_features_traj[i] for i in segment_steps[:-1]], axis=0)
past_local_maps = np.stack(self.local_map_traj, axis=0)
past_visibility = np.stack(self.visibility_traj, axis=0)
feed_dict = {
self.inference_timesteps_input: segment_steps[None],
self.new_images_input: images[None, None],
self.last_images_input: self.image_traj[-2][None, None],
self.visibility_input: visibility_map_input[None, None],
self.past_local_maps_input: past_local_maps[None],
self.past_visibility_input: past_visibility[None],
self.past_needed_image_features_input: past_image_features_seg[None],
self.global_map_shape_input: np.array(map_shape[:2], np.int32),
# global_map_input: global_map,
# self.images_input: images_seg[None], # always input both images and global map, only one will be connected
self.true_xy_input: true_xy_seg[None], # used for global to local transition and loss
self.true_yaw_input: true_yaw_seg[None],
# self.visibility_input: visibility_seg[None],
# self.particle_xy_input: particle_xy_seg[None],
# self.particle_yaw_input: particle_yaw_seg[None],
self.particle_xy_input: past_particle_xy[None],
self.particle_yaw_input: past_particle_yaw[None],
self.new_action_input: new_action[None],
self.new_rel_xy_input: new_rel_xy[None],
self.new_rel_yaw_input: new_rel_yaw[None],
self.last_step_particle_logits_input: self.particle_logit_acc_list[-1][None],
}
slam_outputs = self.run_inference(feed_dict, need_map=need_map)
# Deal with resampling
self.particle_xy_list = [particle[slam_outputs.particle_indices[0]] for particle in self.particle_xy_list]
self.particle_yaw_list = [particle[slam_outputs.particle_indices[0]] for particle in self.particle_yaw_list]
self.particle_logit_acc_list = [particle[slam_outputs.particle_indices[0]] for particle in self.particle_logit_acc_list]
# Store new particles
self.particle_xy_list.append(slam_outputs.particle_xy_t[0])
self.particle_yaw_list.append(slam_outputs.particle_yaw_t[0])
self.particle_logit_acc_list.append(slam_outputs.particle_logits_acc[0])
if FAKE_INPUT_FOR_SPEED_TEST:
self.particle_xy_list[-1] = self.particle_xy_list[-1] * 0 + true_xy[None] + self.true_xy_offset[None]
# Store local map prediction
self.local_map_traj.append(slam_outputs.new_local_maps[0, 0])
self.visibility_traj.append(slam_outputs.new_visibility_maps[0, 0])
self.image_features_traj.append(slam_outputs.new_image_features[0, 0])
print (self.image_features_traj[-1].shape)
# Store losses. only meaningful if true state was input
self.xy_loss_list.append(slam_outputs.loss_xy_all[0])
self.yaw_loss_list.append(slam_outputs.loss_yaw_all[0])
# Update map
if need_map:
global_map_logodds = np.array(slam_outputs.global_map_logodds[0]) # squeeze batch and traj
# if global_map_logodds.shape != self.global_map_logodds.shape:
# raise ValueError("Unexpected global map shape output from slam net.")
if not self.fixed_map_size:
global_map_logodds = global_map_logodds[:map_shape[0], :map_shape[1]]
self.global_map_logodds = global_map_logodds
# Transform predictions
global_map_true_partial = None
assert self.global_map_logodds.shape[-1] == 1
global_map_pred = ClassicMapping.inverse_logodds(self.global_map_logodds)
slam_mean_xy = slam_outputs.mean_xy[0, -1]
slam_mean_yaw = slam_outputs.mean_yaw[0, -1]
slam_mean2_xy = slam_outputs.mean2_xy[0, -1]
slam_mean2_yaw = slam_outputs.mean2_yaw[0, -1]
slam_ml_xy = slam_outputs.ml_xy[0, -1]
slam_ml_yaw = slam_outputs.ml_yaw[0, -1]
slam_traj_xy = slam_outputs.xy[0, :] # the one used for mapping
slam_traj_yaw = slam_outputs.yaw[0, :] # the one used for mapping
slam_xy = slam_outputs.xy[0, -1] # the one used for mapping
slam_yaw = slam_outputs.yaw[0, -1]
# TODO should separate reassemble the map for the whole trajectory for the mean particle trajectory
# do NOT use most likely particle, its meaningless after resampling. Density is what matters.
# need to implement reasonable sequential averaging of yaws..
# Compute mean separately here
if self.params.brain == 'habslambrain_v1' and USE_ASSERTS:
mean_xy_from_np, mean_yaw_from_np = ClassicMapping.mean_particle_traj(self.particle_xy_list[-1], self.particle_yaw_list[-1], self.particle_logit_acc_list[-1][:, None])
xy_diff = np.abs(mean_xy_from_np - slam_mean_xy)
yaw_diff = np.abs(mean_yaw_from_np - slam_mean_yaw)
yaw_diff = (yaw_diff + np.pi) % (2 * np.pi) - np.pi
if not np.all(xy_diff < 1.) or not np.all(yaw_diff < np.deg2rad(10.)):
raise ValueError("SLAM mean and numpy mean dont match. Mean difference: %s vs %s | %s vs. %s" % (
str(mean_xy_from_np), str(slam_mean_xy), str(mean_yaw_from_np), str(slam_mean_yaw)))
# Pose source
if self.pose_source == 'true':
xy = true_xy + self.true_xy_offset
yaw = true_yaw
traj_xy = np.array(self.true_xy_traj) + self.true_xy_offset[None]
traj_yaw = np.array(self.true_yaw_traj)
assert slam_traj_xy is None or traj_xy.shape[0] == slam_traj_xy.shape[0]
elif self.pose_source in ["slam-truestart", "slam"]:
xy = slam_xy
yaw = slam_yaw
traj_xy = slam_traj_xy
traj_yaw = slam_traj_yaw
# TODO weighted mean of particles
else:
raise NotImplementedError
self.xy = xy
self.yaw = yaw
# Verify true pose
if USE_ASSERTS and self.params.agent_pose_source == 'true':
assert np.all(np.isclose(traj_xy[:, None], np.array(self.particle_xy_list), atol=1e-3))
assert np.all(np.isclose(traj_yaw[:, None], np.array(self.particle_yaw_list), atol=1e-3))
# last_action = self.action_traj[-1]
# if last_action == 1:
# nominal_xy = traj_xy[-2] + rotate_2d(np.array([5., 0.], np.float32), traj_yaw[-2])
# else:
# nominal_xy = traj_xy[-2]
# move_error = np.linalg.norm(xy - nominal_xy)
# move_amount = np.linalg.norm(xy - traj_xy[-2])
# print ("Act %d. Moved %f. Error %f"%(last_action, move_amount, move_error))
# if move_error > 3.:
# pdb.set_trace()
local_map_label = None
# local_map_label = slam_outputs.local_map_label[0, 0, :, :, 0]
# local_map_pred = slam_outputs.combined_local_map_pred[0, 0, :, :, 0]
ang_vel = yaw - self.prev_yaw
ang_vel = (ang_vel + np.pi) % (2*np.pi) - np.pi
target_dist = np.linalg.norm(self.target_xy - xy)
true_target_dist = np.linalg.norm(true_target_xy - true_xy)
xy_error, yaw_error = self.pose_error(slam_xy, slam_yaw, true_xy, true_yaw)
mean_xy_error, mean_yaw_error = self.pose_error(slam_mean_xy, slam_mean_yaw, true_xy, true_yaw)
mean2_xy_error, _ = self.pose_error(slam_mean2_xy, slam_mean2_yaw, true_xy, true_yaw)
ml_xy_error, _ = self.pose_error(slam_ml_xy, slam_ml_yaw, true_xy, true_yaw)
self.distance_history.append(target_dist)
if self.pose_source != 'slam' and not FAKE_INPUT_FOR_SPEED_TEST:
assert np.abs(np.sqrt(self.xy_loss_list[-1]) - xy_error) < 2. # one is before resampling, other is after
# Detect collision
is_colliding = False
if self.step_i > 2 and self.action_traj[-1] == 1 and self.recover_step_i == 0: # moved forward
last_step_len = np.linalg.norm(traj_xy[-2] - traj_xy[-1], axis=0)
if last_step_len < COLLISION_DISTANCE_THRESHOLD:
is_colliding = True
self.collision_timesteps.append(self.step_i)
self.num_collisions += 1
if self.recover_step_i >= len(self.recover_policy):
self.recover_step_i = 0 # done with recovery
dist_hist = np.array(self.distance_history[-self.GIVE_UP_NO_PROGRESS_STEPS:])
time_slam = time.time() - time_last
time_last = time.time()
should_give_up = False
# Modify state if its out of bounds, or give up if goal is out of bounds
if (np.any(self.target_xy_for_planning < TARGET_MAP_MARGIN)
or np.any(self.target_xy_for_planning + TARGET_MAP_MARGIN >= np.array(self.max_map_size))):
should_give_up = True
if USE_ASSERTS and self.fixed_map_size:
raise ValueError("Target is outside of map area -- this should not happen for fixed size map.")
elif (np.any(self.xy < 0) or np.any(self.xy >= np.array(self.max_map_size))):
print ("State is outside of map area -- this can happen for fixed size map because its cropped before the slam update.")
if self.fixed_map_size:
new_xy = np.clip(xy, [0., 0.], np.array(self.max_map_size, np.float32) - 0.001)
print ("moving state.. %s --> %s"%(str(xy), str(new_xy)))
xy = new_xy
self.xy = new_xy
else:
print ("Giving up")
should_give_up = True
# Check for time and distance limits
try:
for time_thres, dist_thres in self.GIVE_UP_STEP_AND_DISTANCE:
if self.step_i >= time_thres and target_dist >= dist_thres:
should_give_up = True
break
except Exception as e:
print ("Exception " + str(e))
# Give up if no progress for too long wallclock time
try:
mins_since_ep_start = (time.time() - self.episode_t) / 60
reduction_since_beginning = self.distance_history[0] - self.distance_history[-1]
for time_thres, reduct_thres in self.GIVE_UP_TIME_AND_REDUCTION:
if mins_since_ep_start >= time_thres and reduction_since_beginning < reduct_thres:
print ("Give up because of wallclock time and reduction t=%f reduct=%f"%(mins_since_ep_start, reduction_since_beginning))
should_give_up | |
'''
BSD 3-Clause License
Copyright (c) 2021, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import geopandas as gpd
import pandas as pd
from .grids import (
GPS_to_grid,
area_to_params,
grid_to_centre,
grid_to_polygon
)
from .coordinates import getdistance
def clean_same(data, col=['VehicleNum', 'Time', 'Lng', 'Lat']):
'''
Delete the data with the same information as the data before and
after to reduce the amount of data. For example, if several consecutive
data of an individual have the same information except for the time,
only the first and last two data can be kept
Parameters
-------
data : DataFrame
Data
col : List
The column name, in the order of [‘Vehicleid, Time’]. It will sort
by time, and then determine the information of other columns besides
the time
Returns
-------
data1 : DataFrame
Cleaned data
'''
[VehicleNum, Time, Lng, Lat] = col[:4]
extra = col[4:]
data1 = data.copy()
data1 = data1.drop_duplicates(subset=[VehicleNum, Time])
data1 = data1.sort_values(by=[VehicleNum, Time])
data1['issame'] = 0
for i in [VehicleNum, Lng, Lat]+extra:
data1['issame'] += (data1[i].shift() == data1[i]
) & (data1[i].shift(-1) == data1[i])
data1 = data1[-(data1['issame'] == len([VehicleNum, Lng, Lat]+extra))]
data1 = data1.drop('issame', axis=1)
return data1
def clean_drift(data, col=['VehicleNum', 'Time', 'Lng', 'Lat'],
speedlimit=80, dislimit=1000):
'''
Delete the drift data. The select principle is that: if the speed of a
trajectory point is larger than the speed limit with before and after
points, while the speed between the before and after data is less than
the speedlimit. The time column in the input data is calculated more
efficiently if it is in datetime format.
Parameters
-------
data : DataFrame
Data
col : List
Column names, in the order of [‘VehicleNum’, ‘Time’, ‘Lng’, ‘Lat’]
speedlimit : number
Speed limitation
Returns
-------
data1 : DataFrame
Cleaned data
'''
[VehicleNum, Time, Lng, Lat] = col
data1 = data.copy()
data1 = data1.drop_duplicates(subset=[VehicleNum, Time])
data1[Time+'_dt'] = pd.to_datetime(data1[Time])
data1 = data1.sort_values(by=[VehicleNum, Time])
for i in [VehicleNum, Lng, Lat, Time+'_dt']:
data1[i+'_pre'] = data1[i].shift()
data1[i+'_next'] = data1[i].shift(-1)
data1['dis_pre'] = getdistance(
data1[Lng],
data1[Lat],
data1[Lng+'_pre'],
data1[Lat+'_pre'])
data1['dis_next'] = getdistance(
data1[Lng],
data1[Lat],
data1[Lng+'_next'],
data1[Lat+'_next'])
data1['dis_prenext'] = getdistance(
data1[Lng+'_pre'],
data1[Lat+'_pre'],
data1[Lng+'_next'],
data1[Lat+'_next'])
data1['timegap_pre'] = data1[Time+'_dt'] - data1[Time+'_dt_pre']
data1['timegap_next'] = data1[Time+'_dt_next'] - data1[Time+'_dt']
data1['timegap_prenext'] = data1[Time+'_dt_next'] - data1[Time+'_dt_pre']
data1['speed_pre'] = data1['dis_pre'] / \
data1['timegap_pre'].dt.total_seconds()*3.6
data1['speed_next'] = data1['dis_next'] / \
data1['timegap_next'].dt.total_seconds()*3.6
data1['speed_prenext'] = data1['dis_prenext'] / \
data1['timegap_prenext'].dt.total_seconds()*3.6
if speedlimit:
data1 = data1[
-((data1[VehicleNum+'_pre'] == data1[VehicleNum]) &
(data1[VehicleNum+'_next'] == data1[VehicleNum]) &
(data1['speed_pre'] > speedlimit) &
(data1['speed_next'] > speedlimit) &
(data1['speed_prenext'] < speedlimit))]
if dislimit:
data1 = data1[
-((data1[VehicleNum+'_pre'] == data1[VehicleNum]) &
(data1[VehicleNum+'_next'] == data1[VehicleNum]) &
(data1['dis_pre'] > dislimit) &
(data1['dis_next'] > dislimit) &
(data1['dis_prenext'] < dislimit))]
data1 = data1[data.columns]
return data1
def clean_outofbounds(data, bounds, col=['Lng', 'Lat']):
'''
The input is the latitude and longitude coordinates of the lower
left and upper right of the study area and exclude data that are
outside the study area
Parameters
-------
data : DataFrame
Data
bounds : List
Latitude and longitude of the lower left and upper right of
the study area, in the order of [lon1, lat1, lon2, lat2]
col : List
Column name of longitude and latitude
Returns
-------
data1 : DataFrame
Data within the scope of the study
'''
lon1, lat1, lon2, lat2 = bounds
if (lon1 > lon2) | (lat1 > lat2) | (abs(lat1) > 90) | (
abs(lon1) > 180) | (abs(lat2) > 90) | (abs(lon2) > 180):
raise Exception(
'Bounds error. The input bounds should be in the order \
of [lon1,lat1,lon2,lat2]. (lon1,lat1) is the lower left corner and \
(lon2,lat2) is the upper right corner.')
Lng, Lat = col
data1 = data.copy()
data1 = data1[(data1[Lng] > bounds[0]) & (data1[Lng] < bounds[2]) & (
data1[Lat] > bounds[1]) & (data1[Lat] < bounds[3])]
return data1
def clean_outofshape(data, shape, col=['Lng', 'Lat'], accuracy=500):
'''
Input the GeoDataFrame of the study area and exclude the data beyond
the study area
Parameters
-------
data : DataFrame
Data
shape : GeoDataFrame
The GeoDataFrame of the study area
col : List
Column name of longitude and latitude
accuracy : number
The size of grid. The principle is to do the data gridding first
and then do the data cleaning. The smaller the size is, the higher
accuracy it has
Returns
-------
data1 : DataFrame
Data within the scope of the study
'''
Lng, Lat = col
shape_unary = shape.unary_union
bounds = shape_unary.bounds
params = area_to_params(bounds, accuracy)
data1 = data.copy()
data1['LONCOL'], data1['LATCOL'] = GPS_to_grid(
data1[Lng], data1[Lat], params)
data1_gdf = data1[['LONCOL', 'LATCOL']].drop_duplicates()
data1_gdf['geometry'] = grid_to_polygon(
[data1_gdf['LONCOL'], data1_gdf['LATCOL']], params)
data1_gdf = gpd.GeoDataFrame(data1_gdf)
data1_gdf = data1_gdf[data1_gdf.intersects(shape_unary)]
data1 = pd.merge(data1, data1_gdf[['LONCOL', 'LATCOL']]).drop(
['LONCOL', 'LATCOL'], axis=1)
return data1
def clean_traj(data, col=['uid', 'str_time', 'lon', 'lat'], tripgap=1800,
disgap=50000, speedlimit=80):
'''
A combo for trajectory data cleaning, including defining the the time
length threshold considered as a new trip, and the distance threshold
considered as a new trip
Parameters
-------
data : DataFrame
Trajectory data
col : List
Column names, in the order of [‘VehicleNum’, ‘Time’, ‘Lng’, ‘Lat’]
tripgap : number
The time length threshold considered as a new trip
disgap : number
The distance threshold considered as a new trip
speedlimit : number
Speed limit
Returns
-------
data1 : DataFrame
Cleaned data
'''
uid, timecol, lon, lat = col
data[timecol] = pd.to_datetime(data[timecol])
data = data.sort_values(by=[uid, timecol])
cols = []
for i in data.columns:
if i not in [uid, timecol, lon, lat]:
cols.append(i)
data = clean_same(data, col=[uid, timecol, lon, lat]+cols)
data = clean_drift(data, col=[uid, timecol, lon, lat],
speedlimit=speedlimit)
data = id_reindex(data, uid, timecol=timecol, timegap=tripgap)
data = data.rename(columns={uid+'_new': 'tripid'})
data = id_reindex_disgap(
data, col=['tripid', lon, lat], disgap=disgap, suffix='')
data1 = data.copy()
data1['lon1'] = data1[lon].shift(-1)
data1['lat1'] = data1[lat].shift(-1)
data1['tripid1'] = data1['tripid'].shift(-1)
data1 = data1[data1['tripid'] == data1['tripid1']]
data1['dis'] = getdistance(
data1[lon], data1[lat], data1['lon1'], data1['lat1'])
a = data1.groupby(['tripid'])['dis'].sum()
a = a[-(a < 50)].reset_index()['tripid']
data = pd.merge(data, a)
data = data.drop('tripid', axis=1)
data = id_reindex(data, uid, timecol=timecol, timegap=tripgap)
data = data.rename(columns={uid+'_new': 'tripid'})
data = id_reindex_disgap(
data, col=['tripid', lon, lat], disgap=disgap, suffix='')
return data
def dataagg(data, shape, col=['Lng', 'Lat', 'count'], accuracy=500):
'''
Aggregate data to traffic zone
Parameters
-------
data : DataFrame
The origin DataFrame
shape : GeoDataFrame
The shape of the traffic zone
col : List
You can either choose to input two columns, i.e., [‘Lng’,’Lat’], or
to input three columns, i.e., [‘Lng’,’Lat’,’count’]”, where count
means the points count
accuracy : number
The idea is to first implement data gridding and then the aggregation.
Here, the grid size will be determined. The less the size is, the
higher the accuracy will have.
Returns
-------
aggresult : GeoDataFrame
Traffic zone. The count column is the output result
data1 : DataFrame
The zone-matched data
'''
if len(col) == 2:
Lng, Lat = col
aggcol = None
else:
Lng, Lat, aggcol = col
shape['index'] = range(len(shape))
shape_unary = shape.unary_union
bounds = shape_unary.bounds
params = area_to_params(bounds, accuracy)
data1 = data.copy()
data1['LONCOL'], data1['LATCOL'] | |
import sys
import os
import unittest
import shutil
from io import StringIO
sys.path.append(".")
from mock_gff3 import Create_generator
import annogesiclib.merge_manual as mm
class TestGensRNAOutput(unittest.TestCase):
def setUp(self):
self.example = Example()
self.test_folder = "test_folder"
if (not os.path.exists(self.test_folder)):
os.mkdir(self.test_folder)
def tearDown(self):
if os.path.exists(self.test_folder):
shutil.rmtree(self.test_folder)
def test_get_primary_locus_tag(self):
out = mm.get_primary_locus_tag(self.example.tsss[-1])
self.assertDictEqual(out[0], {"type": "Primary", "utr": 25,
"locus": "AAA_00004"})
def test_detect_coverage(self):
wigs = {"aaa": {"track_1": [{"pos": 1, "coverage": 200},
{"pos": 2, "coverage": 300},
{"pos": 3, "coverage": 400},
{"pos": 4, "coverage": 600},
{"pos": 5, "coverage": 650}]}}
tss_dict = {"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 3,
"end": 3, "phase": ".", "strand": "+", "score": "."}
attributes_tss = {"ID": "CDS0", "Name": "CDS_0", "type": "Primary",
"associated_gene": "AAA_00001",
"UTR_length": "Primary_25"}
ref_dict = {"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 4,
"end": 4, "phase": ".", "strand": "+", "score": "."}
attributes_ref = {"ID": "CDS1", "Name": "CDS_1", "type": "Primary",
"associated_gene": "AAA_00002",
"UTR_length": "Primary_40"}
tss = Create_generator(tss_dict, attributes_tss, "gff")
ref = Create_generator(ref_dict, attributes_ref, "gff")
tss_cover, ref_cover = mm.detect_coverage(wigs, tss, ref)
self.assertEqual(tss_cover, 100)
self.assertEqual(ref_cover, 200)
def test_fix_attributes(self):
tss_dict = {"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 3,
"end": 3, "phase": ".", "strand": "+", "score": "."}
attributes_tss = {"ID": "CDS0", "Name": "CDS_0", "type": "Primary",
"associated_gene": "AAA_00001",
"utr_length": "Primary_25"}
tss = Create_generator(tss_dict, attributes_tss, "gff")
tss_entry = {"locus": "AAA_00001"}
mm.fix_attributes(tss, tss_entry)
self.assertEqual(tss.attributes["type"], "Secondary")
def test_del_repeat(self):
tss_dict = [{"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 3,
"end": 3, "phase": ".", "strand": "+", "score": "."},
{"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 5,
"end": 5, "phase": ".", "strand": "+", "score": "."}]
attributes_tss = [{"ID": "CDS0", "Name": "CDS_0",
"type": "Primary,Primary",
"associated_gene": "AAA_00001,AAA_00002",
"utr_length": "Primary_25,Primary_200"},
{"ID": "CDS1", "Name": "CDS_1",
"type": "Primary,Antisense",
"associated_gene": "AAA_00003,AAA_00004",
"utr_length": "Primary_25,Antisense_NA"}]
tsss = []
for index in range(0, 2):
tsss.append(Create_generator(
tss_dict[index], attributes_tss[index], "gff"))
mm.del_repeat(tsss)
self.assertEqual(tsss[0].attributes["type"], "Primary")
self.assertEqual(tsss[1].attributes["type"], "Antisense,Primary")
def test_fix_primary_type(self):
wigs = {"aaa": {"track_1": [{"pos": 1, "coverage": 200},
{"pos": 2, "coverage": 300},
{"pos": 3, "coverage": 400},
{"pos": 4, "coverage": 600},
{"pos": 5, "coverage": 650},
{"pos": 6, "coverage": 655}]}}
tss_dict = [{"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 3,
"end": 3, "phase": ".", "strand": "+", "score": "."},
{"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 5,
"end": 5, "phase": ".", "strand": "+", "score": "."}]
attributes_tss = [{"ID": "CDS0", "Name": "CDS_0",
"type": "Primary,Primary",
"associated_gene": "AAA_00001,AAA_00002",
"utr_length": "Primary_25,Primary_200"},
{"ID": "CDS1", "Name": "CDS_1",
"type": "Primary,Antisense",
"associated_gene": "AAA_00001,AAA_00004",
"utr_length": "Primary_27,Antisense_NA"}]
tsss = []
for index in range(0, 2):
tsss.append(Create_generator(
tss_dict[index], attributes_tss[index], "gff"))
mm.fix_primary_type(tsss, wigs, "test")
self.assertEqual(tsss[0].attributes["type"], "Primary")
self.assertEqual(tsss[1].attributes["type"], "Antisense,Secondary")
def test_remove_primary(self):
tss_dict = {"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 3,
"end": 3, "phase": ".", "strand": "+", "score": "."}
attributes_tss = {"ID": "CDS0", "Name": "CDS_0",
"type": "Primary,Internal",
"associated_gene": "AAA_00001,AAA_00004",
"utr_length": "Primary_25,Internal_NA"}
tss = Create_generator(tss_dict, attributes_tss, "gff")
tss_entry = [tss.attribute_string, {
"utr_length": "Primary_25,Internal_NA", "type": "Primary,Internal",
"associated_gene": "AAA_00001,AAA_00004"}]
tss_output = mm.remove_primary(tss, tss_entry)
self.assertEqual(tss_output[0],
'utr_length=Internal_NA;associated_gene=AAA_00004;type=Internal;Name=TSS_3+')
self.assertDictEqual(tss_output[1], {
'associated_gene': 'AAA_00004', 'type': 'Internal',
'Name': 'TSS_3+', 'utr_length': 'Internal_NA'})
def test_import_to_tss(self):
tss_dict = {"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 3,
"end": 3, "phase": ".", "strand": "+", "score": "."}
attributes_tss = {"ID": "CDS0", "Name": "CDS_0",
"type": "Primary,Internal",
"associated_gene": "AAA_00001,AAA_00004",
"utr_length": "Primary_25,Internal_NA"}
tss = Create_generator(tss_dict, attributes_tss, "gff")
tss_entry = [tss.attribute_string, {
"utr_length": "Primary_25,Internal_NA", "type": "Primary,Internal",
"associated_gene": "AAA_00001,AAA_00004"}]
tss_type = "Primary"
cds_pos = 10
locus_tag = "AAA_00001"
output = mm.import_to_tss(tss_type, cds_pos, tss, locus_tag, tss_entry)
self.assertEqual(output[0],
'utr_length=Primary_7,Internal_NA;associated_gene=AAA_00001,AAA_00004;type=Primary,Internal;Name=TSS_3+')
self.assertDictEqual(output[1], {
'Name': 'TSS_3+', 'utr_length': 'Primary_7,Internal_NA',
'type': 'Primary,Internal', 'associated_gene': 'AAA_00001,AAA_00004'})
def test_same_strand_tss_gene(self):
tss_dict = {"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 3,
"end": 3, "phase": ".", "strand": "+", "score": "."}
attributes_tss = {"ID": "TSS0", "Name": "TSS_0",
"type": "Primary,Internal",
"associated_gene": "AAA_00001,AAA_00004",
"utr_length": "Primary_25,Internal_NA"}
tss = Create_generator(tss_dict, attributes_tss, "gff")
tss_entry = [tss.attribute_string, {
"utr_length": "Primary_25,Internal_NA", "type": "Primary,Internal",
"associated_gene": "AAA_00001,AAA_00004"}]
anti_ends = {"forward": 1, "reverse": -1}
gene_ends = {"forward": -1, "reverse": -1}
gff_dict = {"seq_id": "aaa", "source": "Refseq",
"feature": "CDS", "start": 6,
"end": 12, "phase": ".", "strand": "+", "score": "."}
attributes_gff = {"ID": "CDS0", "Name": "CDS_0",
"locus_tag": "AAA_00001"}
gene = Create_generator(gff_dict, attributes_gff, "gff")
checks = {"orphan": False, "int_anti": False}
output = mm.same_strand_tss_gene(gene, tss, anti_ends, gene_ends,
checks, tss_entry)
self.assertEqual(output[0],
'utr_length=Primary_3,Internal_NA;associated_gene=AAA_00001,AAA_00004;type=Primary,Internal;Name=TSS_3+')
self.assertDictEqual(output[1], {
'Name': 'TSS_3+', 'utr_length': 'Primary_3,Internal_NA',
'type': 'Primary,Internal', 'associated_gene': 'AAA_00001,AAA_00004'})
def test_diff_strand_tss_gene(self):
tss_dict = {"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 3,
"end": 3, "phase": ".", "strand": "+", "score": "."}
attributes_tss = {"ID": "TSS0", "Name": "TSS_0",
"type": "Primary,Internal",
"associated_gene": "AAA_00001,AAA_00004",
"utr_length": "Primary_25,Internal_NA"}
tss = Create_generator(tss_dict, attributes_tss, "gff")
tss_entry = [tss.attribute_string, {
"utr_length": "Primary_25", "type": "Primary",
"associated_gene": "AAA_00001"}]
anti_ends = {"forward": 1, "reverse": -1}
gene_ends = {"forward": -1, "reverse": -1}
gff_dict = {"seq_id": "aaa", "source": "Refseq",
"feature": "CDS", "start": 6,
"end": 12, "phase": ".", "strand": "-", "score": "."}
attributes_gff = {"ID": "CDS0", "Name": "CDS_0", "locus_tag": "AAA_00005"}
gene = Create_generator(gff_dict, attributes_gff, "gff")
checks = {"orphan": False, "int_anti": False}
output = mm.diff_strand_tss_gene(gene, tss, anti_ends, gene_ends,
checks, tss_entry)
self.assertEqual(output[0],
'utr_length=Primary_25,Antisense_NA;associated_gene=AAA_00001,AAA_00005;type=Primary,Antisense;Name=TSS_3+')
self.assertDictEqual(output[1], {
'Name': 'TSS_3+', 'utr_length': 'Primary_25,Antisense_NA',
'type': 'Primary,Antisense', 'associated_gene': 'AAA_00001,AAA_00005'})
def test_compare_tss_gene(self):
tss_dict = {"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 3,
"end": 3, "phase": ".", "strand": "+", "score": "."}
attributes_tss = {"ID": "TSS0", "Name": "TSS_0",
"type": "Primary,Internal",
"associated_gene": "AAA_00001,AAA_00004",
"UTR_length": "Primary_25,Internal_NA"}
tss = Create_generator(tss_dict, attributes_tss, "gff")
output = mm.compare_tss_gene(tss, self.example.genes)
self.assertEqual(output[0],
'utr_length=Primary_3;associated_gene=AAA_00001;type=Primary;Name=TSS_3+')
self.assertDictEqual(output[1], {
'Name': 'TSS_3+', 'utr_length': 'Primary_3',
'type': 'Primary', 'associated_gene': 'AAA_00001'})
def test_check_overlap(self):
tss_m_dict = {"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 7,
"end": 7, "phase": ".", "strand": "+", "score": "."}
attributes_tss_m = {"ID": "TSS0", "Name": "TSS_0",
"type": "Primary,Internal",
"associated_gene": "AAA_00001,AAA_00004",
"UTR_length": "Primary_25,Internal_NA"}
tss_m = Create_generator(tss_m_dict, attributes_tss_m, "gff")
tss_p_dict = {"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 7,
"end": 7, "phase": ".", "strand": "+", "score": "."}
attributes_tss_p = {"ID": "TSS0", "Name": "TSS_0",
"type": "Primary,Internal",
"associated_gene": "AAA_00001,AAA_00004",
"UTR_length": "Primary_25,Internal_NA"}
tss_p = Create_generator(tss_p_dict, attributes_tss_p, "gff")
tss_pre_dict = {"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 3,
"end": 3, "phase": ".", "strand": "+", "score": "."}
attributes_tss_pre = {"ID": "TSS0", "Name": "TSS_0",
"type": "Primary,Internal",
"associated_gene": "AAA_00001,AAA_00004",
"UTR_length": "Primary_25,Internal_NA"}
tss_pre = Create_generator(tss_pre_dict, attributes_tss_pre, "gff")
nums = {"tss_p": 0, "tss_m": 0, "tss": 0}
tsss = {"tsss_p":[], "tsss_m": [], "merge": []}
num_strain = {"aaa": {"overlap": 0, "tsspredator": 0, "manual": 0}}
overlap_num = 0
output = mm.check_overlap(True, tss_pre, nums, False, num_strain,
overlap_num, tss_m, tss_p, tsss, 1000,
self.example.genes, self.example.genes)
self.assertEqual(output, (False, 3, 1))
output = mm.check_overlap(False, tss_pre, nums, 100, num_strain,
overlap_num, tss_m, tss_p, tsss, 1000,
self.example.genes, self.example.genes)
self.assertEqual(output, (False, 1000, 0))
def test_intersection(self):
nums = {"tss_p": 0, "tss_m": 0, "tss": 0}
tsss = {"tsss_m": [], "tsss_p": [], "merge": []}
for tss in self.example.tsss:
tss.attributes["print"] = False
for tss in self.example.tsss2:
tss.attributes["print"] = False
tsss["tsss_m"] = self.example.tsss
tsss["tsss_p"] = self.example.tsss2
overlap_num, num_strain = mm.intersection(
tsss, 3, nums, {"aaa":1000},
self.example.genes, self.example.genes, "test")
self.assertEqual(overlap_num, 2)
self.assertDictEqual(num_strain, {
'aaa': {'tsspredator': 1, 'overlap': 2, 'manual': 1}})
class Example(object):
tss_dict = [{"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 3,
"end": 3, "phase": ".", "strand": "+", "score": "."},
{"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 16,
"end": 16, "phase": ".", "strand": "-", "score": "."},
{"seq_id": "aaa", "source": "Refseq",
"feature": "TSS", "start": 54,
"end": 54, "phase": ".", "strand": "+", "score": "."}]
attributes_tss = [{"ID": "CDS0", "Name": "CDS_0", "type": "Primary",
"associated_gene": "AAA_00001",
"utr_length": "Primary_25"},
{"ID": "CDS1", "Name": "CDS_1", "type": "Internal",
"associated_gene": "AAA_00002",
"utr_length": "Internal_NA"},
{"ID": "CDS2", "Name": "CDS_2",
"type": "Primary,Antisense",
"associated_gene": "AAA_00004,AAA_00006",
"utr_length": "Primary_25,Internal_NA"}]
tss2_dict = [
{"seq_id": "aaa", "source": "Refseq", "feature": "TSS", "start": 3,
"end": 3, "phase": ".", "strand": "+", "score": "."},
{"seq_id": "aaa", "source": "Refseq", "feature": "TSS", "start": 18,
"end": 18, "phase": ".", "strand": "-", "score": "."},
{"seq_id": "aaa", "source": "Refseq", "feature": "TSS", "start": 23,
"end": 23, "phase": ".", "strand": "+", "score": "."}]
attributes_tss2 = [{"ID": "CDS0", "Name": "CDS_0", "type": "Primary",
"associated_gene": "AAA_00001",
"utr_length": "Primary_25"},
{"ID": "CDS1", "Name": "CDS_1", "type": "Internal",
"associated_gene": "AAA_00002",
"utr_length": "Internal_NA"},
{"ID": "CDS2", "Name": "CDS_2",
"type": "Primary,Antisense",
"associated_gene": "AAA_00004,AAA_00006",
"utr_length": "Primary_25,Internal_NA"}]
gff_dict = [{"start": 6, "end": 15, "phase": ".",
"strand": "+", "seq_id": "aaa", "score": ".",
"source": "Refseq", "feature": "gene"},
{"start": 1258, "end": 2234, "phase": ".",
"strand": "+", "seq_id": "aaa", "score": ".",
"source": "Refseq", "feature": "gene"},
{"start": 3544, "end": 6517, "phase": ".",
"strand": "-", "seq_id": "aaa", "score": ".",
"source": "Refseq", "feature": "gene"}]
attributes_gff = [
{"ID": "gene0", "Name": "gene_0", "locus_tag": "AAA_00001"},
{"ID": "gene0", "Name": "gene_1", "locus_tag": "AAA_00002"},
{"ID": "gene1", "Name": "gene_2", "locus_tag": "AAA_00003"}]
tsss = []
tsss2 = []
genes = []
for index in range(0, 3):
tsss.append(Create_generator(
| |
mut=m.mut,
masked=m.masked,
)
mdel_num += 1
upd_mut_list.append(new_m)
ex_stat_[m.exon] = "M"
continue
elif frame_pres and fp_ex_len_cond:
# exon del is frame-preserving and short
masked_m = mask_mut(m) # mask this mutation then
upd_mut_list.append(masked_m)
ex_stat_[m.exon] = "mD" # mD -> minor deletion
else:
# just add the rest of mutations
upd_mut_list.append(m)
# return filtered exon Del/Mis list + updated exon status
return upd_mut_list, ex_stat_
def infer_big_indel_thresholds(ex_lens):
"""For each exon define big indel threshold."""
ex_T = {}
if ex_lens is None:
# no exon lengths data -> no result
return {}
for ex_num, ex_len in ex_lens.items():
# if exon is small enough: use standard 40bp
if ex_len <= BIG_EXON_THR:
ex_T[ex_num] = BIG_INDEL_SIZE
continue
# exon is quite long, require 20% of it's length
# to call indel inactivating
thr = int(ex_len / 5)
ex_T[ex_num] = thr
return ex_T
def get_exon_pairs(exon_stat):
"""Get pairs of I exons separated by Deleted."""
# for example, it we have exon start like this:
# exon_num: 0-1-2-3-4-5-6-7-8-9-10-11
# exon_stat: X-I-D-D-I-I-D-I-I-M-D---I
# then we need the following output:
# (1, 4), (5, 7). Pair (8, 11) is skipped
# because there is a M exon between them.
pairs = []
pair_init = None
for num, stat in enumerate(exon_stat):
if num == 0: # X - placeholder
continue
prev_num = num - 1
prev_stat = exon_stat[prev_num]
if stat == "I" and pair_init is None:
continue
elif stat == "I" and pair_init:
# pair is initiated, let's close it
pair = (pair_init, num)
pairs.append(pair)
pair_init = None
if stat == "M":
# something like I-D-D-M-D-I -> then we skip it
pair_init = None
continue
if stat == "D" or stat == "mD":
# the most interesting case
# deleted or masked deleted
if pair_init is None and prev_stat == "I":
# initiate new pair
# prev elem was I
pair_init = prev_num
continue
else:
continue
return pairs
def detect_split_stops(codon_table, gene, q_name, exon_stat):
"""Considering all exon deletions find all split stop codons."""
# we need to get pairs of intact (not deleted or missing) exons
# between what there is a row of Deleted exons (but not missing)
# if there is a missing exon in between -> do not make any conclusions
# split stop codons may occur between these pairs
i_exon_pairs = get_exon_pairs(exon_stat)
if len(i_exon_pairs) == 0:
# no such pairs -> no way to detect split stop
return [] # return nothing
mut_num = 1
muts = []
for pair in i_exon_pairs:
first_exon = pair[0]
second_exon = pair[1]
# in codon table numbers are 0-based, so correct
c_f_exon = first_exon - 1
c_s_exon = second_exon - 1
# get split codon for first exon
# there are two exons, N and M, between them - deleted guys
# exon_NX -------- Xexon_M
# X marks split codons that potentially contain stop
# to take last codon of N, if it's split, I should take 0'st codon for exon N+1
# if it's split -> it goes to N + 1 exon with "split" != 0 field
# for exon M -> just take the 0'st codon
try:
f_ex_split = [c for c in codon_table if c["t_exon_num"] == c_f_exon + 1][0]
s_ex_split = [c for c in codon_table if c["t_exon_num"] == c_s_exon][0]
except IndexError:
# case of ultrashort exons (1-2bp)
# better to avoid any conclusions
continue
if f_ex_split["split_"] == 0 or s_ex_split["split_"] == 0:
# one of those codons is complete -> split stop is impossible
continue
# cut corresponding seq
f_ex_seq = f_ex_split["que_codon"][: f_ex_split["split_"]]
s_ex_seq = s_ex_split["que_codon"][s_ex_split["split_"] :]
split_codon_seq = f_ex_seq + s_ex_seq
split_triplets = parts(split_codon_seq, 3)
stops_in = [x for x in split_triplets if x in STOPS]
if len(stops_in) == 0:
# no stops on split
continue
# there are stop codons!
mut_ = stops_in[0]
# ex_num = second_exon if s_part_len > f_part_len else first_exon
mut_id = f"SP_STOP_{mut_num}"
mut_num += 1
mut = Mutation(
gene=gene,
chain=q_name,
exon=second_exon,
position=0,
mclass=STOP,
mut=mut_,
masked=False,
mut_id=mut_id,
)
muts.append(mut)
return muts
def get_out_of_borders_prop(codon_table, miss_exons):
"""Compute a proportion of out-of-chain-borders bases."""
gene_len = len(codon_table)
m_codons_len = 0
for m_exon in miss_exons:
# ++ lengths of missing exons
m_codons_len += len([c for c in codon_table if c["t_exon_num"] == m_exon])
if gene_len == 0:
# to avoid zerodivision error
return 0.0
m_prop = m_codons_len / gene_len
return m_prop
def inact_mut_check(
cesar_data,
u12_introns=None,
v=False,
gene="None",
ex_prop=None,
ref_ss=None,
sec_codons=None,
no_fpi=False,
):
"""Detect inactivating mutations in the CESAR output."""
# read cesar output
# note that CESAR accepts only one set of reference exons but
# also it can process multiple query sequences
# so we need check them one-by-one
cesar_fractions = read_cesar_out(cesar_data)
# parse U12 data
u12_introns_data = parse_u12_opt(gene, u12_introns)
if ref_ss:
# non-canonical splice sites in the reference
# process them as U12 splice sites -> the same logic
u12_introns_data = u12_introns_data.union(ref_ss)
# for each projection (CESAR unit) we extract 6
# features + list of inactivating mutations (if they exist)
# initiate lists/ dicts for them
p_intact_ignore_M = {}
p_intact_intact_M = {}
middle_80_intact = {}
middle_80_present = {}
i_codons_prop = {}
out_of_b_vals = {}
mutations = []
ex_prop_provided = ex_prop is not None
for cesar_fraction in cesar_fractions:
# analyze cesar fractions one-by-one
fraction_mutations = [] # save inact mutations for this fraction here
# cesar_fraction: (query_name, ref_sequence, query_sequence)
q_name = cesar_fraction[0] # need to distinguish with other queries
# if called by TOGA: q_name is numeric (basically just chainID)
q_name_d_key = int(q_name) if q_name.lstrip("-").isdigit() else q_name
ref = cesar_fraction[1]
query = cesar_fraction[2]
if v:
eprint(
f"Detecting inactivating mutations for query: {q_name} ({q_name_d_key})"
)
eprint(
f"Types of q_name: {type(q_name)}/ of q_name_d_key: {type(q_name_d_key)}"
)
# parse additional information provided by CESAR wrapper
# chain_to_exon_to_properties = (chain_exon_class, chain_exon_gap, pIDs, pBl, chain_missed)
if ex_prop is None:
# if not provided: create empty dicts
exon_class = {}
exon_gap = {}
exon_pid = {}
exon_blosum = {}
missing_exons = {}
ex_inc = {}
ex_lens = {}
else:
# if provided: extract data related to this query
exon_class = ex_prop[0].get(q_name_d_key, {})
exon_gap = ex_prop[1].get(q_name_d_key, {})
exon_pid = ex_prop[2].get(q_name_d_key, {})
exon_blosum = ex_prop[3].get(q_name_d_key, {})
missing_exons = ex_prop[4].get(q_name_d_key, set())
ex_inc = ex_prop[5].get(q_name_d_key, {})
ex_lens = ex_prop[6]
# if not exon_class:
# err_msg = f"Cannot find CESAR wrapper features for query {q_name}"
# raise ValueError(err_msg)
# now we extract inactivation mutations
# then add them to fraction_mutations list
# extract splice site mutations
sps_mutations = analyse_splice_sites(
ref, query, gene, q_name, u12_introns_data, v=v
)
fraction_mutations.extend(sps_mutations)
# create codon table to extract other mutations
# codon table: list of objects, describing a codon
# such as sequence in reference and query, exon number and so on
codon_table = parse_cesar_out(ref, query)
# next loop -> for deleted/missed exons
if ex_prop: # if extra data provided by CESAR wrapper we can classify exons
exon_del_miss_, exon_stat_ = classify_exons(
gene,
q_name,
codon_table,
exon_class,
exon_gap,
exon_pid,
exon_blosum,
missing_exons,
ex_inc,
v=v,
)
# get lists of deleted/missing exons
# also find "safe" exon deletions: if they are in-frame
# or a series of exon deletions in a row is frame-preserving
exon_del_miss, exon_stat = find_safe_ex_dels(
exon_del_miss_, exon_stat_, ex_lens, no_fpi=no_fpi
)
fraction_mutations.extend(exon_del_miss) # also add this
else:
# we don't have extra exons data
# will just skip this part
exon_stat = None
pass
# big indels may be classified as inactivating mutations
# but the bigger the exon: the bigger an indel should be
# define the thresholds
big_indel_thrs = infer_big_indel_thresholds(ex_lens)
# scan reading frame (codon table) for the rest on inact mutations
inact_muts = scan_rf(
codon_table,
gene,
q_name,
exon_stat=exon_stat,
v=v,
big_indel_thrs=big_indel_thrs,
sec_codons=sec_codons,
no_fpi=no_fpi,
)
# save this data
fraction_mutations.extend(inact_muts)
# get a list of split stop codons: stop codons that appear after exon deletions
# such as:
# GCAAACGCAGCt-------------[DELETED EXON]-------agTCCCATTTCCAACTGATC
# exon deletion raises an inframe stop codon: t + ag
split_stop_codons = detect_split_stops(codon_table, gene, q_name, exon_stat)
fraction_mutations.extend(split_stop_codons)
# detect compensated frameshifts
compensations = detect_compensations(inact_muts, codon_table)
fraction_mutations.extend(compensations)
# also mask compensated frameshifts:
| |
index):
"""Loads 1 image from dataset, returns img, original hw, resized hw
:param self: self
:param index: index
:return: img, original hw, resized hw
"""
img = self.imgs[index]
if img is None: # not cached images
path = self.img_files[index]
img = cv2.imread(path) # BGR
if img is None:
raise AssertionError(f'Image Not Found {path}')
orig_h, orig_w = img.shape[:2]
size_ratio = self.img_size / max(orig_h, orig_w) # resize image to img_size
if size_ratio != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if size_ratio < 1 and self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(orig_w * size_ratio), int(orig_h * size_ratio)), interpolation=interp)
return img, (orig_h, orig_w), img.shape[:2]
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
"""Augment colorspace
:param img: images
:param hgain: Hue gain
:param sgain: Saturation gain
:param vgain: Value gain
:return: None
"""
random_gains = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
pixel_value = np.arange(0, 256, dtype=np.int16)
lut_hue = ((pixel_value * random_gains[0]) % 180).astype(dtype)
lut_sat = np.clip(pixel_value * random_gains[1], 0, 255).astype(dtype)
lut_val = np.clip(pixel_value * random_gains[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def load_mosaic(self, index):
"""Loads images in a mosaic
:param self: self
:param index: index
:return: img4, labels4
"""
labels4 = []
img_size = self.img_size
y_center, x_center = [int(random.uniform(-x, 2 * img_size + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [self.indices[random.randint(0, self.num_img - 1)] for _ in range(3)] # 3 additional img indice
for i, idx in enumerate(indices):
img, _, (height, width) = load_image(self, idx)
# place img in img4
if i == 0: # top left
img4 = np.full((img_size * 2, img_size * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(x_center - width, 0), max(y_center - height, 0), x_center, y_center
x1b, y1b, x2b, y2b = width - (x2a - x1a), height - (y2a - y1a), width, height # xmin, ymin, xmax, ymax
elif i == 1: # top right
x1a, y1a, x2a, y2a = x_center, max(y_center - height, 0), min(x_center + width, img_size * 2), y_center
x1b, y1b, x2b, y2b = 0, height - (y2a - y1a), min(width, x2a - x1a), height
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(x_center - width, 0), y_center, x_center, min(img_size * 2, y_center + height)
x1b, y1b, x2b, y2b = width - (x2a - x1a), 0, width, min(y2a - y1a, height)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = x_center, y_center, min(x_center + width, img_size * 2), \
min(img_size * 2, y_center + height)
x1b, y1b, x2b, y2b = 0, 0, min(width, x2a - x1a), min(y2a - y1a, height)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
label = self.labels[idx]
label4 = label.copy()
if label.size > 0: # Normalized xywh to pixel xyxy format
label4[:, 1] = width * (label[:, 1] - label[:, 3] / 2) + padw
label4[:, 2] = height * (label[:, 2] - label[:, 4] / 2) + padh
label4[:, 3] = width * (label[:, 1] + label[:, 3] / 2) + padw
label4[:, 4] = height * (label[:, 2] + label[:, 4] / 2) + padh
labels4.append(label4)
# Concat/clip labels
if labels4:
labels4 = np.concatenate(labels4, 0)
np.clip(labels4[:, 1:], 0, 2 * img_size, out=labels4[:, 1:]) # use with random_perspective
# Augment
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
"""random perspective
torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
:param img: image
:param targets: [cls, xyxy]
:param degrees: range of degrees to select from
:param translate: tuple of maximum absolute fraction for horizontal and vertical translations
:param shear: range of degrees to select from
:param perspective: perspective transformation
:param border: border
:return: img, targets
"""
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
center = np.eye(3)
center[0, 2] = -img.shape[1] / 2 # x translation (pixels)
center[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
perspectives = np.eye(3)
perspectives[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
perspectives[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
rotation = np.eye(3)
r_angle = random.uniform(-degrees, degrees)
r_scale = random.uniform(1 - scale, 1 + scale)
rotation[:2] = cv2.getRotationMatrix2D(angle=r_angle, center=(0, 0), scale=r_scale)
# Shear
shears = np.eye(3)
shears[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
shears[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
translation = np.eye(3)
translation[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
translation[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
matrix = translation @ shears @ rotation @ perspectives @ center # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (matrix != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, matrix, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, matrix[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Transform label coordinates
num_targets = len(targets)
if num_targets:
# warp points
box_xy = np.ones((num_targets * 4, 3))
box_xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(num_targets * 4, 2) # x1y1, x2y2, x1y2, x2y1
box_xy = box_xy @ matrix.T # transform
if perspective:
box_xy = (box_xy[:, :2] / box_xy[:, 2:3]).reshape(num_targets, 8) # rescale
else: # affine
box_xy = box_xy[:, :2].reshape(num_targets, 8)
# create new boxes
box_x = box_xy[:, [0, 2, 4, 6]]
box_y = box_xy[:, [1, 3, 5, 7]]
box_xy = np.concatenate((box_x.min(1), box_y.min(1), box_x.max(1), box_y.max(1))).reshape(4, num_targets).T
# clip boxes
box_xy[:, [0, 2]] = box_xy[:, [0, 2]].clip(0, width)
box_xy[:, [1, 3]] = box_xy[:, [1, 3]].clip(0, height)
# filter candidates
filter_candidate = _box_candidates(box1=targets[:, 1:5].T * r_scale, box2=box_xy.T)
targets = targets[filter_candidate]
targets[:, 1:5] = box_xy[filter_candidate]
return img, targets
def _box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
"""
Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
"""
box1_w, box1_h = box1[2] - box1[0], box1[3] - box1[1]
box2_w, box2_h = box2[2] - box2[0], box2[3] - box2[1]
aspect_ratio = np.maximum(box2_w / (box2_h + 1e-16), box2_h / (box2_w + 1e-16))
return (box2_w > wh_thr) & (box2_h > wh_thr) & (box2_w * box2_h / (box1_w * box1_h + 1e-16) > area_thr) & \
(aspect_ratio < ar_thr)
def _letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scalefill=False, scaleup=True):
"""
Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
"""
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
scale_ratio = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
scale_ratio = min(scale_ratio, 1.0)
# Compute padding
ratio = scale_ratio, scale_ratio # width, height ratios
new_unpad = int(round(shape[1] * scale_ratio)), int(round(shape[0] * scale_ratio))
padding_w, padding_h = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
padding_w, padding_h = np.mod(padding_w, 32), np.mod(padding_h, 32) # wh padding
elif scalefill: # stretch
padding_w, padding_h = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
padding_w /= 2 # divide padding into 2 sides
padding_h /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(padding_h - 0.1)), int(round(padding_h + 0.1))
left, right = int(round(padding_w - 0.1)), int(round(padding_w + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, | |
from __future__ import unicode_literals
from builtins import bytes, dict, list, int, float, str
import json
import re
import traceback
version = '0.7.6'
json_types = ['int', 'bool', 'float', 'string', 'array', 'hash', 'base64']
def json2py(json_type):
mapping = {'bool': 'bool', 'int': 'int', 'float': 'float', 'string':
'str', 'array': 'list', 'hash': 'dict', 'base64': 'str'}
if json_type not in mapping:
return None
return mapping[json_type]
def isstring(value):
return type(value).__name__ in ['str', 'unicode']
class JsonRpcError(Exception):
"""
Generic JSON-RPC error class
All exceptions that are to be serialized and sent back to the user over
JSON-RPC have to derive from this class. All exceptions not derived from
this class will be suppressed for security reasons.
Example:
The JSON representation of this error looks like this::
{"name": "JsonRpcError", "message": "Your error message"}
"""
def __init__(self, msg):
"""
Constructor
Args:
msg (str): Error message
"""
self.msg = msg
self.name = type(self).__name__
def __str__(self):
return '%s: %s' % (self.name, self.msg)
def to_dict(self):
"""
Convert the error to a dictionary
Returns:
dict: Dictionary representing this error
"""
error = {}
error['name'] = self.name
error['message'] = self.msg
return error
class JsonRpcInvalidRequest(JsonRpcError):
"""
JSON-RPC error class for invalid requests
Example:
The JSON representation of this error looks like this::
{"name": "InvalidRequest", "message": "Your error message"}
"""
def __init__(self, msg):
"""
Constructor
Args:
msg (str): Error message
"""
self.msg = msg
self.name = 'InvalidRequest'
class JsonRpcParamError(JsonRpcInvalidRequest):
"""
JSON-RPC error class for requests with wrong number of parameters
Example:
The JSON representation of this error looks like this::
{"name": "ParamError", "message": "Expected [expected_count] parameters for '[function_name]' but got [real_count]""}
"""
def __init__(self, function_name, expected_count, real_count):
"""
Constructor
Args:
function_name (str): Name of the function the user tried to call
expected_count (int): Expected number of parameters
real_count (int): Number of parameters actually received
"""
self.msg = "Expected %d parameters for '%s' but got %d" % (expected_count, function_name, real_count)
self.name = 'ParamError'
class JsonRpcTypeError(JsonRpcInvalidRequest):
"""
Generic JSON-RPC error class for requests with parameters of invalid type
Example:
The JSON representation of this error looks like this::
{"name": "TypeError", "message": "Your error message"}
"""
def __init__(self, msg):
"""
Constructor
Args:
msg (str): Error message
"""
self.msg = msg
self.name = 'TypeError'
class JsonRpcParamTypeError(JsonRpcInvalidRequest):
"""
JSON-RPC error class for requests with parameters of invalid type
Example:
The JSON representation of this error looks like this::
{"name": "TypeError", "message": "[function_name]: Expected value of type '[param_name]' for parameter '[expected_type]' but got value of type '[real_type]'"}
"""
def __init__(self, function_name, param_name, expected_type, real_type):
"""
Constructor
Args:
function_name (str): Name of the function the user tried to call
param_name (str): Name of the parameter
expected_type (str): Name of the type this parameter expects
real_type (str): Type of the value the user actually passed
"""
self.function_name = function_name
self.param_name = param_name
self.expected_type = expected_type
self.real_type = real_type
self.msg = "%s: Expected value of type '%s' for parameter '%s' but got value of type '%s'" % (function_name, expected_type, param_name, real_type)
self.name = 'TypeError'
class JsonRpcInternalError(JsonRpcError):
"""
JSON-RPC error class for internal errors
This error is used when the details of the error are to be hidden from the
user for security reasons (e.g. when an exception is raised which is not
derived from JsonRpcError).
Example:
The JSON representation of this error looks like this::
{"name": "InternalError", "message": "Your error message"}
"""
def __init__(self, msg):
self.msg = msg
self.name = 'InternalError'
class InvalidEnumValueError(Exception):
def __init__(self, name, expected_type, value):
self.name = name
self.expected_type = expected_type
self.value = value
class InvalidEnumTypeError(Exception):
def __init__(self, name, real_type):
self.name = name
self.real_type = real_type
class InvalidNamedHashError(Exception):
def __init__(self, name, expected_type, real_type):
self.name = name
self.expected_type = expected_type
self.real_type = real_type
class InvalidPrimitiveTypeError(Exception):
def __init__(self, name, expected_type, real_type):
self.name = name
self.expected_type = expected_type
self.real_type = real_type
class JsonEnumType(object):
"""
Self-describing enum types
An enum is a list of named integers. Translation between integer value and
string names is supported by this class as well as validation of integer
and string values to check if they are valid values of an enum.
"""
def __init__(self, name, description, start=0):
"""
Constructor
Args:
name (str): Name of the enum type, has to start with an upper-case letter
description (str): Description of this enum type
start (int): enum integer values are assigned starting from this value
Raises:
ValueError: If name doesn't start with an upper-case letter
"""
self.startvalue = start
self.nextvalue = start
if not name[0].isupper():
raise ValueError("The Name of a custom type has to start with an upper-case letter")
self.name = name
self.typ = 'enum'
self.description = description
self.values = []
def validate(self, value):
"""
Check if a string or integer value is a valid value for this enum
Args:
value (int|str): Value to check
Returns:
bool: True if value is valid, False if not
"""
result = self.resolve_to_intvalue(value)
if result is None:
return False
return True
def add_value(self, name, description):
"""
Add a new value to the enum
Args:
name (str): String name of the value
description (str): Description of this value
"""
value = {}
value['name'] = name
value['description'] = description
value['intvalue'] = self.nextvalue
self.nextvalue += 1
self.values.append(value)
def resolve_name(self, name):
"""
Resolves a string name to its integer value
Args:
name (str): String name of a value
Returns:
int: Integer value of name on success
None: If name is not a valid value for this enum
Raises:
ValueError: If name is not a string
"""
if not isstring(name):
raise ValueError("'name' must be a string but is '%s'" % (type(name).__name__))
for v in self.values:
if v['name'] == name: return v['intvalue']
return None
def resolve_intvalue(self, intvalue):
"""
Resolves an integer value to its string name
Args:
intvalue (int): Integer value to resolve
Returns:
str: Name of intvalue on success
None: If intvalue is not a valid value for this enum
Raises:
ValueError: If intvalue is not an integer
"""
if type(intvalue).__name__ != 'int':
raise ValueError("'intvalue' must be of type 'int'")
for v in self.values:
if v['intvalue'] == intvalue: return v['name']
return None
def resolve_to_name(self, value):
"""
Resolves an integer value or string name to the corresponding string name
Args:
value (int|str): Integer value or string name
Returns:
str: String name if value is a valid enum value
None: If value is not a valid enum value
Raises:
ValueError: If value is neither interger or string
"""
if isstring(value):
if self.resolve_name(value) is not None:
return value
elif type(value).__name__ == 'int':
return self.resolve_intvalue(value)
else:
raise ValueError("'value' must be either string or int")
return None
def resolve_to_intvalue(self, value):
"""
Resolves an integer value or string name to the corresponding integer value
Args:
value (int|str): Integer value or string name
Returns:
int: Integer value if value is a valid enum value
None: If value is not a valid enum value
Raises:
ValueError: If value is neither interger or string
"""
if isstring(value):
return self.resolve_name(value)
elif type(value).__name__ == 'int':
if value >= self.startvalue and value < self.nextvalue:
return value
else:
raise ValueError("'value' must be either string or int")
return None
def to_dict(self):
"""
Convert the enum to a dictionary
Returns:
dict: Dictionary representing this enum type
"""
d = {}
d['name'] = self.name
d['type'] = self.typ
d['description'] = self.description
d['values'] = self.values
return d
class JsonHashType(object):
"""
Self-describing named hashes
A named hash is a hash with a description of its members and their types
"""
def __init__(self, name, description):
"""
Constructor
Args:
name (str): Name of the hash type to be defined. Must start with an upper-case letter
description (str): Description of this named hash type
Raises:
ValueError: If name does not start with an upper-case letter
"""
if not name[0].isupper():
raise ValueError("The Name of a custom type has to start with an upper-case letter")
self.name = name
self.typ = 'hash'
self.description = description
self.fields = []
self.fields_dict = {}
self.fieldnames = []
def add_field(self, name, typ, description):
"""
Add a new field to the named hash
Args:
name (str): Name of the field
typ (str): Type of the field (JSON type or enum or named hash)
description (str): | |
"""
Модуль леса
"""
import math
from pygame.draw import *
from random import *
from apple import Apple
from house import House
from tree import Tree
from village import Village
from stick import Stick
class Forest(object):
"""
Описывает лес
"""
def __init__(self, game):
"""
Параметры
game - объект игры
"""
# Логика
self.logical_dict: dict = {None: None} # Логический словарь определяется в forest.setup()
# Физика
self.borders_dict = None # Словари границ определяется в forest.setup()
# Расстояние между границами леса по горизонтали в [м] определяется в forest.setup()
self.borders_distance_x = None
# Расстояние между границами леса по вертикали в [м] определяется в forest.setup()
self.borders_distance_y = None
self.draw_distance_max = None # Максимальное расстояние прорисовки в [м] определяется в forest.setup()
self.temperature_passive: float = 265 # Температура пустого леса в [К]
self.scale: int = 35 # Масштаб в [px/м]
# Объекты
self.apples_list: list = [] # Список яблок
self.campfires_list: list = [] # Список костров
self.game = game
self.houses_list: list = [] # Список домов
self.trees_list: list = [] # Список деревьев
self.villages_amount: int = 1 # Количество деревень
self.villages_list: list = [] # Список деревень
self.sticks_list: list = [] # Список палок
self.border_color: tuple = (185, 250, 250) # Цвет границ
self.border_width: int = 1 # Толщина границ в [px]
self.color: tuple = (193, 86, 217) # Цвет леса
self.graphical_dict: dict = {'walk': [self.draw_borders,
self.draw_apples,
self.draw_campfires,
self.draw_houses,
self.draw_sticks,
self.draw_villages,
game.hero.manage_graphics,
self.draw_trees],
'act': [self.draw_borders,
self.draw_apples,
self.draw_campfires,
self.draw_houses,
self.draw_sticks,
self.draw_villages,
game.hero.manage_graphics,
self.draw_trees],
'crafts': [None],
'inventory': [None]}
# --- Инициализация ---
def count_max_distance(self):
"""
Вычисляет размеры экрана в [м]
"""
down_border_dict: dict = self.borders_dict['down'] # Словарь нижней границы
left_border_dict: dict = self.borders_dict['left'] # Словарь левой границы
right_border_dict: dict = self.borders_dict['right'] # Словарь правой границы
up_border_dict: dict = self.borders_dict['up'] # Словарь верхней границы
x_left: float = left_border_dict['value'] # Координата x левой границы в [м]
x_right: float = right_border_dict['value'] # Координата x правой границы в [м]
y_down: float = down_border_dict['value'] # Координата y нижней границы в [м]
y_up: float = up_border_dict['value'] # Координата y верхней границы в [м]
self.borders_distance_x: float = x_right - x_left # Расстояние между границами вдоль оси x в [м]
self.borders_distance_y: float = y_down - y_up # Расстояние между границами по y в [м]
def count_draw_distance(self):
"""
Вычисляет расстояние прорисовки в [м]
"""
screen_height: int = self.game.graphic_engine.screen.get_height() # Высота экрана в [px]
screen_width: int = self.game.graphic_engine.screen.get_width() # Ширина экрана в [px]
# Максимальное расстояние на экране в [px]
max_screen_distance: float = math.sqrt(screen_height ** 2 + screen_width ** 2)
max_distance: float = max_screen_distance / self.scale # Максимальное расстояние до видимого объекта в [м]
self.draw_distance_max: int = math.ceil(max_distance) # Максимальное расстояние прорисовки в []
def create_borders(self):
"""
Создаёт словарь границ
"""
down_border_dict: dict = {'coordinate': 'y',
'value': 50} # Словарь нижней границы леса
left_border_dict: dict = {'coordinate': 'x',
'value': -50} # Словаь левой границы леса
right_border_dict: dict = {'coordinate': 'x',
'value': 50} # Словарь правой границы леса
up_border_dict: dict = {'coordinate': 'y',
'value': -50} # Словарь верхней границы леса
self.borders_dict: dict = {'down': down_border_dict, # Словарь границ
'left': left_border_dict,
'right': right_border_dict,
'up': up_border_dict}
def generate_apples(self):
"""
Создаёт яблоки
"""
apples_amount: int = 64 # Количество яблок
for apple_number in range(apples_amount):
# Координата x яблока в [м]
x_apple: float = random() * self.borders_distance_x + self.borders_dict['left']['value']
# Координата y яблока в [м]
y_apple: float = random() * self.borders_distance_y + self.borders_dict['up']['value']
apple = Apple(self, x_apple, y_apple) # Объект яблока
self.apples_list.append(apple)
def generate_houses(self):
"""
Создаёт дома
"""
# Физика
houses_amount: int = 18 # Количество домов
distance_min: float = 5 # Минимальное расстояние между домами в [м]
for house_number in range(houses_amount):
generated_too_close: bool = False # Флаг слишком близкой генерации домов
# Физическая координата x дома в [м]
house_x: float = random() * self.borders_distance_x + self.borders_dict['left']['value']
# Физическая координата y дома в [м]
house_y: float = random() * self.borders_distance_y + self.borders_dict['up']['value']
for other in self.houses_list:
distance: float = self.game.physical_engine.get_physical_distance(house_x, house_y, other.physical_x,
other.physical_y)
if distance < distance_min: # Если дома сгенерировались слишком близко
generated_too_close: bool = True # Запомнить это
if not generated_too_close: # Если дома не очень близко
house = House(self, house_x, house_y) # Объект яблока
house.setup()
self.houses_list.append(house)
def generate_trees(self):
"""
Создаёт деревья
"""
# Объекты
physical_engine = self.game.physical_engine # Объект физического движка
trees_amount: int = 1250 # Максимальное количество деревьев
min_distance_from_center: float = 5 # Минимальное расстояние от дерева до спавна в [м]
# draw_allowed: bool = True # Флаг возможности рисования
for tree_number in range(trees_amount):
generation_allowed: bool = True # Флаг генерации
# Физическая координата x дерева в [м]
tree_physical_x: float = random() * self.borders_distance_x + self.borders_dict['left']['value']
# Физическая координата y дерева в [м]
tree_physical_y: float = random() * self.borders_distance_y + self.borders_dict['up']['value']
if math.sqrt(tree_physical_x ** 2 + tree_physical_y ** 2) >= min_distance_from_center:
for house in self.houses_list:
# Расстояние от дерева до дома в [м]
distance: float = physical_engine.get_physical_distance(tree_physical_x, tree_physical_y,
house.physical_x, house.physical_y)
if distance < house.safe_radius:
generation_allowed: bool = False # Генерация завершена
for village in self.villages_list:
# Расстояние от дерева до дома в [м]
distance: float = physical_engine.get_physical_distance(tree_physical_x, tree_physical_y,
village.physical_x, village.physical_y)
if distance < village.safe_radius:
generation_allowed: bool = False # Генерация завершена
if generation_allowed:
tree = Tree(self, tree_physical_x, tree_physical_y) # Объект дерева
tree.setup()
self.trees_list.append(tree)
self.trees_list.sort(key=lambda sort_tree: sort_tree.physical_y)
def generate_villages(self, villages_amount: int):
"""
Создаёт деревни
villages_amount - количество деревень, которое надо сгенерировать
"""
for village_number in range(villages_amount):
# Физическая координата x деревни в [м]
village_physical_x: float = random() * self.borders_distance_x + self.borders_dict['left']['value']
# Физическая координата y деревни в [м]
village_physical_y: float = random() * self.borders_distance_y + self.borders_dict['up']['value']
if math.sqrt(village_physical_x ** 2 + village_physical_y ** 2) < 25: # Если деревня близко к спавну
self.generate_villages(1) # Генерировать 1 деревню заново
else:
village = Village(self, village_physical_x, village_physical_y) # Объект деревни
self.villages_list.append(village)
def generate_sticks(self):
"""
Создаёт палки
"""
sticks_amount: int = 125 # Количество палок
for stick_number in range(sticks_amount):
# Координата x палки в [м]
stick_x: float = random() * self.borders_distance_x + self.borders_dict['left']['value']
# Координата y палки в [м]
stick_y: float = random() * self.borders_distance_y + self.borders_dict['up']['value']
stick = Stick(self, stick_x, stick_y) # Объект палки
self.sticks_list.append(stick)
def set_logical_dict(self):
"""
Создаёт словарь, сопоставляющий статус героя и действия окружающей среды
"""
self.logical_dict: dict = {'walk': [None], # Логический действий
'act': [self.manage_apples_logic,
self.manage_campfires_logic,
self.manage_houses_logic,
self.manage_villages_logic,
self.manage_sticks_logic],
'crafts': [self.game.hero.inventory.process],
'inventory': [self.game.hero.inventory.process],
'exit': [None]}
def setup(self):
"""
Действия при создании леса
"""
self.set_logical_dict()
self.create_borders()
self.count_max_distance()
self.count_draw_distance()
self.generate_apples()
self.generate_houses()
self.generate_villages(self.villages_amount)
self.generate_trees()
self.generate_sticks()
# --- Логика ---
def convert_horizontal_m_to_px(self, coordinate_m: float):
"""
Преобразует [м] в [px] по горизонтали
coordinate_m - координата объекта в [м]
"""
distance_m: float = coordinate_m - self.game.hero.x # Расстояние от героя до объекта в [м]
distance_raw: float = distance_m * self.scale # Расстояние в [px]
distance_px: int = round(distance_raw) # Округлённое расстояние в [px]
# Координата объекта в [px]
distance_px_cooked: int = distance_px + self.game.graphic_engine.screen.get_width() // 2
return distance_px_cooked
def convert_vertical_m_to_px(self, coordinate_m: float):
"""
Преобразует [м] в [px] по вертикали
coordinate_m - координата объекта в [м]
"""
distance_m: float = coordinate_m - self.game.hero.y # Расстояние от героя до объекта в [м]
distance_raw: float = distance_m * self.scale # Расстояние в [px]
distance_px: int = round(distance_raw) # Округлённое расстояние в [px]
# Координата объекта в [px]
distance_px_cooked: int = distance_px + self.game.graphic_engine.screen.get_height() // 2
return distance_px_cooked
def manage_apples_logic(self):
"""
Обрабатывает действия героя над яблоками
"""
# Движки
physical_engine = self.game.physical_engine
# Объекты
close_apple = physical_engine.find_close_object(self.apples_list) # Близкое к герою яблоко
if close_apple is not None: # Если существует близкое к герою яблоко
close_apple.manage_logic()
def manage_campfires_logic(self):
"""
Обрабатывает действия героя над кострами
"""
# Движки
physical_engine = self.game.physical_engine
# Объекты
close_campfire = physical_engine.find_close_object(self.campfires_list) # Близкий к герою костёр
if close_campfire is not None: # Если рядом с героем есть костёр
close_campfire.manage_logic()
def manage_houses_logic(self):
"""
Обрабатывает действия героя над домами
"""
for house in self.houses_list:
# Список компонент физического расстояния до дома в [м]
distance_list: list = self.calculate_distance_to_point(house.physical_x, house.physical_y)
# Физическое расстояние до дома в [м]
distance: float = math.sqrt(distance_list[0] ** 2 + distance_list[1] ** 2)
if distance <= self.game.hero.action_radius:
house.manage_logic()
def manage_villages_logic(self):
"""
Обрабатывает действия героя над деревнями
"""
# | |
# Global Sinks:
delCStores_SINKS[scenario][Q10][O3[1]]={}
for exp in FactExps:
#print(exp)
delCStores_SINKS[scenario][Q10][O3[1]][exp] = pd.DataFrame(
{ sink: pd.Series({gcm:COMPS[exp][scenario][Q10][O3[1]][sink][gcm].sum()
for gcm in GCMs } ) for sink in StoreSinks } )
# Append to full uncertatinty lists:
delCStores_SINKS_unc[scenario][exp].append(delCStores_SINKS[scenario][Q10][O3[1]][exp])
delCStores_mitSINKS_unc[scenario][exp].append( delCStores_SINKS[scenario][Q10][O3[1]][exp]
-delCStores_SINKS[scenario][Q10][O3[1]]['CTL'] )
# Land uptake on land points
LULUC_mit_landpts = { gcm: COMPS['LULUC_opt'][scenario][Q10][O3[1]]['Land'][gcm]
- COMPS['CTL'][scenario][Q10][O3[1]]['Land'][gcm] for gcm in GCMs}
CCS_mit_landpts = {gcm: COMPS['LULUC_opt'][scenario][Q10][O3[1]]['CCS'][gcm]
- COMPS['CTL'][scenario][Q10][O3[1]]['CCS'][gcm] for gcm in GCMs}
Total_Land_C_med = ( np.median( [LULUC_mit_landpts[gcm] for gcm in GCMs], axis=0 )
+ np.median( [CCS_mit_landpts[gcm] for gcm in GCMs], axis=0 ) )
plot_data = np.ma.masked_array( (Total_Land_C_med/AREA_1D)[grindex], mask = grindex.mask )/kg_to_Gt
PTs.plot_map(plot_data,lons_2d,lats_2d,
DATA_RANGE=[0.1,10.],
COLOURS=['#f6e8c3','#dfc27d','#d8b365','#bf812d','#8c510a'],INTERPOLATE_COLOURS=True,
TickLEVELS=[0.1,2,4,6,8,10],NLEVELS=50,SET_OVER='#8c510a',SET_UNDER='#f5f5f5',
PLOT_TITLE='Total Natural Land and BECCS carbon uptake by 2100',
CBAR_LABEL='kgC m$^{2}$',FONTSIZES=[12,12,14,18],RESOLUTION='c', MAP_TYPE='Mesh',
FILE_PLOT=PLOT_DIR+'Maps/TotalLULUC_Cuptake_Map_'+scenario+'_'+O3[1]+'_'+Q10+'.png',
iCLOSE='Y' )
# Regional Breakdown of the mitigaiton options
for iregion in range(REGION_dict['Nregions']):
region =REGION_dict['Name'][iregion]
region_anthFrac=REGION_dict['AnthroFraction'][iregion]
#region_map_index = REGION_dict['Index'][iregion] #Not required as stored in oreder of map index
region_mask=REGIONS_1D==(iregion+1)
if region=='Global': region_mask[:]=True
if region=='International Transportation': region_mask[:]=False
regional_CH4_mit = DF['CH4_mit']*region_anthFrac
regional_Land_mit = pd.Series({gcm:LULUC_mit_landpts[gcm][region_mask].sum() for gcm in GCMs} )
regional_CCS_mit = pd.Series({gcm:CCS_mit_landpts[gcm][region_mask].sum() for gcm in GCMs } )
regional_LULUC_mit = regional_CCS_mit+regional_Land_mit
regional_Linear_mit = regional_CH4_mit+regional_LULUC_mit
DFreg = pd.concat([regional_CH4_mit,regional_Land_mit,regional_CCS_mit,
regional_LULUC_mit,regional_Linear_mit], axis=1)
DFreg.columns = regional_DF_columns
delCStores_regions[scenario][region][Q10][O3[1]] = DFreg
delCStores_regions_unc[scenario][region].append(
delCStores_regions[scenario][region][Q10][O3[1]] )
outfreg.write('%15s: '%(region))
DFreg.describe()[5:6].to_csv(outfreg,header=False,float_format='%10.2f',index=False)
#print(region, DFreg.describe()[5:6])
# compend Global Carbon Stores, full uncertainty to DataFrame and output to csv
delCStores_unc[scenario] = pd.concat(scen_list)
outfunc.write(scenario+': \n')
outfunc.write('Global: \n')
delCStores_unc[scenario].describe().to_csv(outfunc,float_format='%10.2f')
# Compend the global carbon stores by pool to DataFrame and output to csv
delCStores_SINKS_unc[scenario] = pd.concat( { EXP: pd.concat(delCStores_SINKS_unc[scenario][EXP])
for EXP in FactExps }, axis=1)
delCStores_mitSINKS_unc[scenario] = pd.concat( { EXP: pd.concat(delCStores_mitSINKS_unc[scenario][EXP])
for EXP in FactExps }, axis=1)
outfsink.write(scenario+': \n')
outfsink.write('Total Sink: \n')
for EXP in FactExps:
outfsink.write('%16s'%(EXP+': \n'))
delCStores_SINKS_unc[scenario][EXP].describe().to_csv(outfsink,float_format='%10.2f')
outfsink.write('Mitigation Potential: \n')
for EXP in FactExps:
outfsink.write('%16s'%(EXP+': \n'))
delCStores_mitSINKS_unc[scenario][EXP].describe().to_csv(outfsink,float_format='%10.2f')
# Compend the regional breakdown into DataFrame:
delCStores_regions_unc[scenario] = pd.concat({ region: pd.concat(delCStores_regions_unc[scenario][region])
for region in REGION_dict['Name'] }, axis=1 )
# Output regional breakdown to csv file
outfuncreg.write(scenario+': \nMedian of GCMs:\n')
outfuncreg.write('%25s '%('Region,')+nREGcolumns*'%9s,' % tuple(regional_DF_columns)+'\n')
for region in REGION_dict['Name']:
outfuncreg.write('%25s '%(region+','))
delCStores_regions_unc[scenario][region].describe()[5:6].to_csv(outfuncreg,float_format='%10.2f',
header=False,index=False)
outfuncreg.write('\n\n\n25% of GCMs: \n')
outfuncreg.write('%25s '%('Region,')+nREGcolumns*'%9s,' % tuple(regional_DF_columns)+'\n')
for region in REGION_dict['Name']:
outfuncreg.write('%25s '%(region+','))
delCStores_regions_unc[scenario][region].describe()[4:5].to_csv(outfuncreg,float_format='%10.2f',
header=False,index=False)
outfuncreg.write('\n\n\n75% of GCMs: \n')
outfuncreg.write('%25s '%('Region,')+nREGcolumns*'%9s,' % tuple(regional_DF_columns)+'\n')
for region in REGION_dict['Name']:
outfuncreg.write('%25s '%(region+','))
delCStores_regions_unc[scenario][region].describe()[6:7].to_csv(outfuncreg,float_format='%10.2f',
header=False,index=False)
outfunc.close()
outfuncreg.close()
outfsink.close()
outf.close()
outfreg.close()
#ipdb.set_trace()
if True:
Atmoscolor = '#ffff99'
Oceancolor = '#a6cee3'
CCScolor = '#e6ab02'
Landcolor = '#1b9e77'
totLULUCcolor='#66a61e'
CH4color = '#7570b3'
totcolor = '#d95f02'
totcolor_lin = '#a6761d'
CTLcolor = '#666666'
bnwcolor = '#e7298a'
###################################################################################################
# Used for Figure 9 in ESD paper (BECCS scale factors = 1 & 3)
# 2 Bar plots:
# 1a - AFFEB = Delta C stock from Present Day
# 1b - Mitigation potential = AFFEB - Control_AFFEB
# Open fig with 2 wide rows for plotting
if True:
# Open figure
fig,axes = plt.subplots(ncols=1,nrows=2,figsize=[8,6])
fig.subplots_adjust(top=0.94,left=0.07,right=0.9998)
# Y limits for plots, manually selected
AFFEB_limit = [-200,800]
MP_limit = [0,350]
# Plot variables, colours and labels
plotvar_list_tots = ['CTL_tot', 'CH4_tot', 'LULUC_opt_tot', 'Coupled_opt_tot', 'Linear_opt_tot'] #
plotvar_list_mits = ['CTL_mit', 'CH4_mit', 'LULUC_opt_mit', 'Coupled_opt_mit', 'Linear_opt_mit'] #
plotvar_cols = [CTLcolor,CH4color,totLULUCcolor,totcolor,totcolor_lin]#
legend_names = ['Control', 'CH$_4$ Mitigation', 'Land Based Mitigation', 'Coupled', 'Linear'] #
plotvar_list_pools= ['CTL', 'CH4', 'LULUC_opt', 'Coupled_opt', 'Linear_opt' ]
pool_list = ['Atmos', 'Ocean', 'Land', 'CCS']
poollabellist= ['Atmosphere', 'Ocean', 'Land', 'BECCS']
pool_colours = [Atmoscolor,Oceancolor,Landcolor,CCScolor]
npools = len(pool_list)
# Bar spacing options
scenario_space = 0.8 # how much space all the bars should take up for a scenario
nbars = len(plotvar_list_tots) # Number of bars per scenario (i.e. Veg, Soil, Amos)
bar_fraction=0.75 # fraction of bar space to fill with bar, 1 means bars touch
bar_space = (scenario_space/nbars) # total space for a bar
bar_width = (bar_space)*(bar_fraction) # width of bar
bar_gap = (bar_space)*(1.-bar_fraction) # gap between bars
bar_positions = [ -(scenario_space/2.)+(i*bar_space)+(bar_gap/2.) for i in range(nbars) ]
pool_bar_frac = 0.2 # fraction of bar taken up by pool breakdown
pool_bar_width = pool_bar_frac *bar_width # fraction of bar taken up by pool breakdown
pool_pos = bar_width-pool_bar_width
pool_scat_inc = (pool_bar_width/npools)
scatter_pos = pool_pos*0.5 # position of the scatter points for total bar
# Loop and plot
for iscenario in range(nSCENARIOs):
scenario=SCENARIOs[iscenario]
scen_cen_pos = iscenario+0.5
tot_bar_list=[] # Append the bar objects to list for legend
mit_bar_list=[] # Append the bar objects to list for legend
pool_bar_list=[] # Append the bar objects to list for legend
for ibar in range(nbars):
totbar = plotvar_list_tots[ibar]
mitbar = plotvar_list_mits[ibar]
xpos = scen_cen_pos + bar_positions[ibar]
barcolour = plotvar_cols[ibar]
# Plot AFFEB on the first axis:
tot_plotdata = delCStores_unc[scenario][totbar]
tot_bar_list.append(axes[0].bar(xpos,np.median(tot_plotdata),color=barcolour,width=bar_width,))
axes[0].plot([xpos+scatter_pos for i in range(len(tot_plotdata))],tot_plotdata,c='k',ls='',marker='.')
# Plot MP on the scedond axis:
mit_plotdata = delCStores_unc[scenario][mitbar]
mit_bar_list.append(axes[1].bar(xpos,np.median(mit_plotdata),color=barcolour,width=bar_width,))
axes[1].plot([xpos+scatter_pos for i in range(len(mit_plotdata))],mit_plotdata,c='k',ls='',marker='.')
# Overplot pools on the axes:
poolbar = plotvar_list_pools[ibar]
for poolDF,ax in zip([delCStores_SINKS_unc,delCStores_mitSINKS_unc],axes):
plotDF = poolDF[scenario][poolbar]
negative_min,positive_max = 0,0 # vars for stacking bars
if len(pool_bar_list)<npools+1:
pool_bar_list=[] # Append the bar objects to list for legend
else:
pool_bar_list=bar_list[:npools+1]
for ipool in range(npools):
pool = pool_list[ipool]
colour = pool_colours[ipool]
plotdata = plotDF[pool]
median = np.median(plotdata)
if median>0:
pool_bar_list.append(ax.bar(xpos+pool_pos, median,
bottom=positive_max,color=colour,width=pool_bar_width,))
positive_max += median
elif median<0:
pool_bar_list.append(ax.bar(xpos+pool_pos, median,
bottom=negative_min,color=colour,width=pool_bar_width,))
negative_min += median
# Axis options
for iax in range(len(axes)):
ax=axes[iax]
ax.set_xlim([0,nSCENARIOs])
ax.set_xticks(np.arange(0.5,nSCENARIOs,1.))
ax.set_xticklabels(SCENARIO_names,fontsize=14)
ax.tick_params(axis='y',labelsize=14)
ax.plot([0,nSCENARIOs],[0,0],c='k',lw=1)
# labels:
ax.text(0.01,1.05,'('+ALPHABET[iax+Alpha_Offset]+')',
transform=ax.transAxes, fontsize=18, fontweight='bold')
# AFFEB axis options:
axes[0].set_ylabel('AFFEB 2015-2100 (GtC)',fontsize=15)
axes[0].set_ylim(AFFEB_limit)
# MP axis options
axes[1].set_ylim(MP_limit)
axes[1].set_ylabel('Mitigation Potential (GtC)',fontsize=15)
# Legends
if Alpha_Offset == 0:
fig.legend(tot_bar_list,legend_names, loc='upper right', ncol=nbars+1,
fontsize=12, columnspacing=1.5, handlelength=1, handletextpad = 0.5 )
else:
fig.legend(pool_bar_list,poollabellist, loc='upper right', ncol=npools+1,
fontsize=12, columnspacing=1.5, handlelength=1, handletextpad = 0.5 )
#Save figs:
fig.savefig(PLOT_DIR+'BarCharts/Figure_1.png',bbox_inches='tight') # Store as png
fig.savefig(PLOT_DIR+'BarCharts/Figure_1.eps',bbox_inches='tight') # Store as eps
fig.savefig(PLOT_DIR+'BarCharts/Figure_1.pdf',bbox_inches='tight') # Store as pdf
#plt.show()
plt.close()
#ipdb.set_trace()
############################################################################################################
###################################################################################################
# Figure for synthesis paper.
# 2 Bar plots:
# 1a - AFFEB = Delta C stock from Present Day
# 1b - Mitigation potential = AFFEB - Control_AFFEB
# Open fig with 2 wide rows for plotting
if True:
# Open figure
fig,axes = plt.subplots(ncols=1,nrows=2,figsize=[8,6])
fig.subplots_adjust(top=0.95,left=0.07,right=0.92)
# Plot variables, colours and labels
plotvar_list_tots = ['CTL_tot', 'CH4_tot', 'LULUC_opt_tot', 'Coupled_opt_tot', 'Linear_opt_tot'] #
plotvar_list_mits = ['CTL_mit', 'CH4_mit', 'LULUC_opt_mit', 'Coupled_opt_mit', 'Linear_opt_mit'] #
plotvar_cols = [CTLcolor,CH4color,totLULUCcolor,totcolor,totcolor_lin]#
legend_names = ['Control', 'CH$_4$ Mitigaiton', 'Land Based Mitigation', 'Coupled', 'Linear'] #
plotvar_list_pools= ['CTL', 'CH4', 'LULUC_opt', 'Coupled_opt', 'Linear_opt' ]
pool_list = ['Atmos', 'Ocean', 'Land', 'CCS']
poollabellist= ['Atmosphere', 'Ocean', 'Land', 'BECCS']
pool_colours = [Atmoscolor,Oceancolor,Landcolor,CCScolor]
npools = len(pool_list)
# Bar spacing options
scenario_space = 0.9 # how much space all the bars should take up for a scenario
nbars = len(plotvar_list_tots) # Number of bars per scenario (i.e. Veg, Soil, Amos)
bar_fraction=0.8 # fraction of bar space to fill with bar, 1 means bars touch
bar_space = (scenario_space/nbars) # total space for a bar
bar_width = (bar_space)*(bar_fraction) # width of bar
bar_gap = (bar_space)*(1.-bar_fraction) # gap between bars
bar_positions = [ -(scenario_space/2.)+(i*bar_space)+(bar_gap/2.) for i in range(nbars) ]
pool_bar_frac = 0.4 # fraction of bar taken up by pool breakdown
pool_bar_width = pool_bar_frac *bar_width # fraction of bar taken up by pool breakdown
pool_pos = bar_width-pool_bar_width
pool_scat_inc = (pool_bar_width/npools)
scatter_pos = pool_pos*0.5 # position of the scatter points for total bar
# Loop and plot
for iscenario in range(nSCENARIOs):
scenario=SCENARIOs[iscenario]
scen_cen_pos = iscenario+0.5
tot_bar_list=[] # Append the bar objects to list for legend
mit_bar_list=[] # Append the bar objects to list for legend
pool_bar_list=[] # Append the bar objects to list for legend
for ibar in range(nbars):
totbar = plotvar_list_tots[ibar]
mitbar = plotvar_list_mits[ibar]
xpos = scen_cen_pos + bar_positions[ibar]
barcolour = plotvar_cols[ibar]
# Plot AFFEB on the first axis:
tot_plotdata = delCStores_unc[scenario][totbar]
tot_bar_list.append(axes[0].bar(xpos,np.median(tot_plotdata),color=barcolour,width=bar_width,))
axes[0].plot([xpos+scatter_pos for i in range(len(tot_plotdata))],tot_plotdata,c='k',ls='',marker='.')
# Plot MP on the scedond axis:
mit_plotdata = delCStores_unc[scenario][mitbar]
mit_bar_list.append(axes[1].bar(xpos,np.median(mit_plotdata),color=barcolour,width=bar_width,))
axes[1].plot([xpos+scatter_pos for i in range(len(mit_plotdata))],mit_plotdata,c='k',ls='',marker='.')
# Overplot pools on the axes:
poolbar = plotvar_list_pools[ibar]
for poolDF,ax in zip([delCStores_SINKS_unc,delCStores_mitSINKS_unc],axes):
plotDF = poolDF[scenario][poolbar]
negative_min,positive_max = 0,0 # vars for stacking bars
if len(pool_bar_list)<npools+1:
pool_bar_list=[] # Append the bar objects to list for legend
else:
pool_bar_list=bar_list[:npools+1]
for ipool in range(npools):
pool = pool_list[ipool]
colour = pool_colours[ipool]
plotdata = plotDF[pool]
median = np.median(plotdata)
if median>0:
pool_bar_list.append(ax.bar(xpos+pool_pos, | |
<gh_stars>1-10
"""
BSD 3-Clause License
Copyright (c) 2019, <NAME>, Aalto University, Finland
All rights reserved.
Edited: 2019, <NAME>, Aalto University, Finland - for CES cooperative firewalling solution.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import logging
import time
import pprint
import asyncio
import H2HTransaction
import host
from helpers_n_wrappers import container3
from helpers_n_wrappers import utils3
KEY_RGW = 'KEY_RGW'
KEY_RGW_FQDN = 'KEY_RGW_FQDN'
KEY_RGW_PRIVATE_IP = 'KEY_RGW_PRIVATE_IP'
KEY_RGW_PUBLIC_IP = 'KEY_RGW_PUBLIC_IP'
KEY_RGW_3TUPLE = 'KEY_RGW_3TUPLE'
KEY_RGW_5TUPLE = 'KEY_RGW_5TUPLE'
# Keys for indexing CETP-based connections
KEY_MAP_CETP_CONN = 1
KEY_MAP_LOCAL_FQDN = 2 # Indexes host connections against FQDN of local host
KEY_MAP_REMOTE_FQDN = 3 # Indexes host connections against FQDN of remote host in another CES node
KEY_MAP_LOCAL_HOST = 4 # Indexes host connections against local host's IP
KEY_MAP_CETP_PRIVATE_NW = 5 # Indexes host connections against (lip, lpip) pair
KEY_MAP_REMOTE_CESID = 6 # Indexes host connections against remote CESID
KEY_MAP_CES_FQDN = 7 # Indexes all host connections across two CES nodes, as pair of the (local and remote host) FQDN
KEY_MAP_LOCAL_FQDNs = 8 # Indexes all host connections within same CES node, as pair of the (local and remote host) FQDN
KEY_MAP_HOST_FQDNs = 9 # Indexes all host connections using local and remote FQDNs
KEY_MAP_CES_TO_CES = 10 # Indexes host connection against an (SST, DST) pair
KEY_MAP_RCESID_C2C = 11 # Indexes C2C connection against a remote CESID
# Global variable for Data-plane connections
DP_CONN_cookie = 0
# Setting deafult log levels (for Connection objects)
LOGLEVEL_CETP_DPConnection_Template = logging.DEBUG
LOGLEVEL_H2HConnection = logging.INFO
LOGLEVEL_LocalConnection = logging.DEBUG
class ConnectionTable(container3.Container):
def __init__(self, name='ConnectionTable'):
""" Initialize as a Container """
super().__init__(name)
def _update_set(self, s):
myset = set(s)
for node in myset:
if node.hasexpired():
self.remove(node)
def update_all_rgw(self):
conn_set = self.lookup(KEY_RGW, update=False, check_expire=False)
if conn_set is None:
return
self._update_set(conn_set)
def get_all_rgw(self, update=True):
conn_set = self.lookup(KEY_RGW, update=False, check_expire=False)
if conn_set is None:
return []
if update:
self._update_set(conn_set)
return conn_set
def stats(self, key):
data = self.lookup(key, update=False, check_expire=False)
if data is None:
return 0
return len(data)
class ConnectionLegacy(container3.ContainerNode):
TIMEOUT = 2.0
def __init__(self, name='ConnectionLegacy', **kwargs):
""" Initialize as a ContainerNode.
@param name: A description of the object.
@type name: String
@param private_ip: Private IPv4 address.
@type private_ip: String
@param private_port: Private port number.
@type private_port: Integer
@param outbound_ip: Outbound IPv4 address.
@type outbound_ip: String
@param outbound_port: Outbound port number.
@type outbound_port: Integer
@param remote_ip: Remote IPv4 address.
@type remote_ip: String
@param remote_port: Remote port number.
@type remote_port: Integer
@param protocol: Protocol number.
@type protocol: Integer
@param fqdn: Allocating FQDN.
@type fqdn: String
@param dns_resolver: IPv4 address of the DNS server.
@type dns_resolver: String
@param dns_host: IPv4 address of the DNS client.
@type dns_host: String
@param timeout: Time to live (sec).
@type timeout: Integer or float
"""
super().__init__(name)
# Set default values
self.autobind = True
self._autobind_flag = False
self.dns_bind = False
# Set attributes
utils3.set_attributes(self, override=True, **kwargs)
# Set default values of unset attributes
attrlist_zero = ['private_ip', 'private_port', 'outbound_ip', 'outbound_port',
'remote_ip', 'remote_port', 'protocol', 'loose_packet']
attrlist_none = ['fqdn', 'dns_resolver', 'dns_host', 'host_fqdn', 'timeout']
utils3.set_default_attributes(self, attrlist_zero, 0)
utils3.set_default_attributes(self, attrlist_none, None)
# Set default timeout if not overriden
if not self.timeout:
self.timeout = ConnectionLegacy.TIMEOUT
# Take creation timestamp
self.timestamp_zero = time.time()
## Override timeout ##
#self.timeout = 600.0
######################
self.timestamp_eol = self.timestamp_zero + self.timeout
self._build_lookupkeys()
def _build_lookupkeys(self):
# Build set of lookupkeys
self._built_lookupkeys = []
# Basic indexing
self._built_lookupkeys.append((KEY_RGW, False))
# Host FQDN based indexing
self._built_lookupkeys.append(((KEY_RGW_FQDN, self.host_fqdn), False))
# Private IP-based indexing
#self._built_lookupkeys.append(((KEY_RGW_PRIVATE_IP, self.private_ip), False))
# Outbound IP-based indexing
self._built_lookupkeys.append(((KEY_RGW_PUBLIC_IP, self.outbound_ip), False))
## The type of unique key come determined by the parameters available
if not self.remote_ip and not self.remote_port:
# 3-tuple semi-fledged based indexing
self._built_lookupkeys.append(((KEY_RGW_3TUPLE, self.outbound_ip, self.outbound_port, self.protocol), True))
else:
# 5-tuple full-fledged based indexing
self._built_lookupkeys.append(((KEY_RGW_5TUPLE, self.outbound_ip, self.outbound_port, self.remote_ip, self.remote_port, self.protocol), True))
def lookupkeys(self):
""" Return the lookup keys """
# Return an iterable (key, isunique)
return self._built_lookupkeys
def hasexpired(self):
""" Return True if the timeout has expired """
return time.time() > self.timestamp_eol
def post_processing(self, connection_table, remote_ip, remote_port):
""" Return True if no further actions are required """
# TODO: I think the case of loose_packet < 0 does not work as standard DNAT (permanent hole) because of the autobind flag?
# This is the normal case for incoming connections via RealmGateway
if self.loose_packet == 0:
return True
# This is a special case for opening a hole in the NAT temporarily
elif self.loose_packet > 0:
# Consume loose packet token
self.loose_packet -= 1
# This is a special case for opening a hole in the NAT permanently
elif self.loose_packet < 0:
pass
if self.autobind and not self._autobind_flag:
self._logger.info('Binding connection / {}'.format(self))
# Bind connection to 5-tuple match
self.remote_ip, self.remote_port = remote_ip, remote_port
self._built_lookupkeys = [(KEY_RGW, False),
((KEY_RGW_FQDN, self.host_fqdn), False),
((KEY_RGW_PUBLIC_IP, self.outbound_ip), False),
((KEY_RGW_5TUPLE, self.outbound_ip, self.outbound_port, self.remote_ip, self.remote_port, self.protocol), True)]
# Update keys in connection table
connection_table.updatekeys(self)
# Set autobind flag to True
self._autobind_flag = True
return False
@property
def age(self):
return time.time() - self.timestamp_zero
def __repr__(self):
ret = ''
ret += '({})'.format(self.host_fqdn)
ret += ' [{}]'.format(self.protocol)
if self.private_port:
ret += ' {}:{} <- {}:{}'.format(self.private_ip, self.private_port, self.outbound_ip, self.outbound_port)
else:
ret += ' {} <- {}'.format(self.private_ip, self.outbound_ip)
if self.remote_ip:
ret += ' <=> {}:{}'.format(self.remote_ip, self.remote_port)
ret += ' ({} sec)'.format(self.timeout)
if self.fqdn:
ret += ' | FQDN {}'.format(self.fqdn)
if self.dns_resolver:
ret += ' | DNS {} <- {}'.format(self.dns_resolver, self.dns_host)
if self.loose_packet:
ret += ' / bucket={}'.format(self.loose_packet)
if not self.autobind:
ret += ' / autobind={}'.format(self.autobind)
return ret
class CETP_DPConnection_Template(container3.ContainerNode):
def __init__(self, payloadID_table, l_cesid, r_cesid, lrlocs, rrlocs, lpayloads, rpayloads, name="C2CConnectionTemplate"):
"""
Initialize a CETP_DPConnection_Template object.
@param l_cesid: Local CES-ID
@param r_cesid: Remote CES-ID
@param lrlocs: List of dataplane connection RLOCs of local CES -- Each RLOC represented as [(int:order, int:preference, int:addrtype, str:addrvalue)]
@param rrlocs: List of dataplane connection RLOCs of remote CES -- Each RLOC represented as [(int:order, int:preference, int:addrtype, str:addrvalue)]
@param lpayloads: List of negotiated dataplane payloads of local CES -- Each payload represented as [(str:type, int:preference, int:tunnel_id_out)]
@param rpayloads: List of negotiated dataplane payloads of remote CES-- Each payload represented as [(str:type, int:preference, int:tunnel_id_in)]
"""
super().__init__(name)
self.payloadID_table = payloadID_table
self.l_cesid, self.r_cesid = l_cesid, r_cesid
self.lrlocs, self.rrlocs = lrlocs, rrlocs
self.lpayloads, self.rpayloads = lpayloads, rpayloads # (typ, pref, tun_id)
self._select_conn_params()
self.connectiontype = "CONNECTION_C2C"
self._logger = logging.getLogger(name)
self._logger.setLevel(LOGLEVEL_CETP_DPConnection_Template)
self._build_lookupkeys()
def _select_conn_params(self):
# Selects the most preferred entry out of a list of negotiated RLOC and payloads
lrloc = self.lrlocs[0]
rrloc = self.rrlocs[0]
lpayload = self.lpayloads[0]
rpayload = self.rpayloads[0]
# Extracts the RLOC and payload values
self.lrloc = lrloc[3]
self.rrloc = rrloc[3]
self.lpayload = (lpayload[0], lpayload[2])
self.rpayload = (rpayload[0], rpayload[2])
def get_rlocs(self):
return (self.lrloc, self.rrloc)
def get_payloads(self):
return (self.lpayload, self.rpayload)
def _build_lookupkeys(self):
self._built_lookupkeys = []
self._built_lookupkeys +=[((KEY_MAP_RCESID_C2C, self.r_cesid), True)]
def lookupkeys(self):
return self._built_lookupkeys
def delete(self):
self._logger.debug("Deleting a '{}' connection!".format(self.connectiontype))
for pld in self.lpayloads:
p = pld[0] # Payload type
k = (self.r_cesid, p)
n = self.payloadID_table.lookup(k)
if n is not None:
self.payloadID_table.remove(n)
"""
# Manage the DP identifiers, negotiated with a remote | |
0x19, 0x01, 0x81,
0x42, 0x42, 0x00, 0x82, 0x8c, 0x09, 0x11, 0x20,
0x12, 0x04, 0x20, 0x42, 0x49, 0x82, 0x04, 0xa0,
0x42, 0x40, 0x22, 0x01, 0x40, 0x04, 0x2d, 0x24,
0x80, 0x01, 0x11, 0x82, 0x42, 0x12, 0x20, 0x2c,
0x44, 0x32, 0x82, 0x8c, 0x08, 0x30, 0x44, 0x10,
0x04, 0xdf, 0x16, 0x0e, 0x6f, 0x62, 0xd2, 0x22,
0xa2, 0x32, 0x2b, 0x11, 0x12, 0x29, 0x01, 0x2c,
0x51, 0x22, 0x29, 0xf8, 0x12, 0x13, 0x3f, 0x11,
0xb1, 0x81, 0xe9, 0x99, 0xd9, 0x88, 0xf8, 0x88,
0xc1, 0x4e, 0x48, 0x42, 0x1d, 0x49, 0x15, 0xc1,
0x41, 0x95, 0x59, 0x88, 0x4e, 0x48, 0x8f, 0xc4,
0xd4, 0x44, 0xb4, 0x44, 0xe6, 0x42, 0x22, 0xc2,
0x24, 0x80, 0x52, 0x44, 0x45, 0xe4, 0x42, 0xf2,
0x26, 0x26, 0x2d, 0x22, 0x2a, 0xb3, 0x12, 0xa1,
0x81, 0x2b, 0x98, 0x82, 0x2c, 0x59, 0x22, 0x29,
0xf8, 0x12, 0x13, 0x3f, 0x11, 0xb1, 0x81, 0xe9,
0x99, 0xd9, 0x88, 0xb8, 0x88, 0xec, 0x84, 0x24,
0xc4, 0x48, 0x80, 0x54, 0x88, 0x85, 0xe8, 0x84,
0xd4, 0xd3, 0x05, 0x6f, 0x22, 0x72, 0x12, 0xa2,
0x12, 0x1a, 0x31, 0x12, 0x80, 0x01, 0x25, 0x12,
0xc3, 0x13, 0x1f, 0x11, 0x91, 0x89, 0x9e, 0x88,
0x87, 0x94, 0x8b, 0x48, 0x4a, 0x64, 0x84, 0x14,
0x1d, 0x41, 0x50, 0x81, 0x85, 0x98, 0x44, 0x8f,
0x44, 0x74, 0x24, 0xb4, 0x44, 0xa2, 0x22, 0x26,
0x04, 0x28, 0x40, 0x54, 0x64, 0x28, 0x6f, 0x22,
0x72, 0x12, 0xa2, 0x12, 0x9a, 0xb1, 0x12, 0xa8,
0x88, 0x8a, 0x01, 0x2d, 0x82, 0x31, 0x3c, 0xf1,
0x11, 0x11, 0x99, 0xe8, 0x89, 0x78, 0x48, 0xb9,
0x88, 0xa4, 0x44, 0x46, 0x08, 0x48, 0x40, 0x58,
0x88, 0x49, 0xf4, 0x41, 0xed, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f,
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0,
0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f,
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0,
0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xf0, 0x4f, 0xfe, | |
# Copyright 2019 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
# This file is automatically generated by mkgrokdump and should not
# be modified manually.
# List of known V8 instance types.
# yapf: disable
INSTANCE_TYPES = {
0: "INTERNALIZED_STRING_TYPE",
2: "EXTERNAL_INTERNALIZED_STRING_TYPE",
8: "ONE_BYTE_INTERNALIZED_STRING_TYPE",
10: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
18: "UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE",
26: "UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
32: "STRING_TYPE",
33: "CONS_STRING_TYPE",
34: "EXTERNAL_STRING_TYPE",
35: "SLICED_STRING_TYPE",
37: "THIN_STRING_TYPE",
40: "ONE_BYTE_STRING_TYPE",
41: "CONS_ONE_BYTE_STRING_TYPE",
42: "EXTERNAL_ONE_BYTE_STRING_TYPE",
43: "SLICED_ONE_BYTE_STRING_TYPE",
45: "THIN_ONE_BYTE_STRING_TYPE",
50: "UNCACHED_EXTERNAL_STRING_TYPE",
58: "UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE",
96: "SHARED_STRING_TYPE",
101: "SHARED_THIN_STRING_TYPE",
104: "SHARED_ONE_BYTE_STRING_TYPE",
109: "SHARED_THIN_ONE_BYTE_STRING_TYPE",
128: "SYMBOL_TYPE",
129: "BIG_INT_BASE_TYPE",
130: "HEAP_NUMBER_TYPE",
131: "ODDBALL_TYPE",
132: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
133: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
134: "CALLABLE_TASK_TYPE",
135: "CALLBACK_TASK_TYPE",
136: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
137: "LOAD_HANDLER_TYPE",
138: "STORE_HANDLER_TYPE",
139: "FUNCTION_TEMPLATE_INFO_TYPE",
140: "OBJECT_TEMPLATE_INFO_TYPE",
141: "ACCESS_CHECK_INFO_TYPE",
142: "ACCESSOR_INFO_TYPE",
143: "ACCESSOR_PAIR_TYPE",
144: "ALIASED_ARGUMENTS_ENTRY_TYPE",
145: "ALLOCATION_MEMENTO_TYPE",
146: "ALLOCATION_SITE_TYPE",
147: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
148: "ASM_WASM_DATA_TYPE",
149: "ASYNC_GENERATOR_REQUEST_TYPE",
150: "BREAK_POINT_TYPE",
151: "BREAK_POINT_INFO_TYPE",
152: "CACHED_TEMPLATE_OBJECT_TYPE",
153: "CALL_HANDLER_INFO_TYPE",
154: "CALL_SITE_INFO_TYPE",
155: "CLASS_POSITIONS_TYPE",
156: "DEBUG_INFO_TYPE",
157: "ENUM_CACHE_TYPE",
158: "ERROR_STACK_DATA_TYPE",
159: "FEEDBACK_CELL_TYPE",
160: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
161: "INTERCEPTOR_INFO_TYPE",
162: "INTERPRETER_DATA_TYPE",
163: "MODULE_REQUEST_TYPE",
164: "PROMISE_CAPABILITY_TYPE",
165: "PROMISE_ON_STACK_TYPE",
166: "PROMISE_REACTION_TYPE",
167: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
168: "PROTOTYPE_INFO_TYPE",
169: "REG_EXP_BOILERPLATE_DESCRIPTION_TYPE",
170: "SCRIPT_TYPE",
171: "SCRIPT_OR_MODULE_TYPE",
172: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
173: "STACK_FRAME_INFO_TYPE",
174: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
175: "TUPLE2_TYPE",
176: "WASM_CONTINUATION_OBJECT_TYPE",
177: "WASM_EXCEPTION_TAG_TYPE",
178: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
179: "FIXED_ARRAY_TYPE",
180: "HASH_TABLE_TYPE",
181: "EPHEMERON_HASH_TABLE_TYPE",
182: "GLOBAL_DICTIONARY_TYPE",
183: "NAME_DICTIONARY_TYPE",
184: "NAME_TO_INDEX_HASH_TABLE_TYPE",
185: "NUMBER_DICTIONARY_TYPE",
186: "ORDERED_HASH_MAP_TYPE",
187: "ORDERED_HASH_SET_TYPE",
188: "ORDERED_NAME_DICTIONARY_TYPE",
189: "REGISTERED_SYMBOL_TABLE_TYPE",
190: "SIMPLE_NUMBER_DICTIONARY_TYPE",
191: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
192: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
193: "SCRIPT_CONTEXT_TABLE_TYPE",
194: "BYTE_ARRAY_TYPE",
195: "BYTECODE_ARRAY_TYPE",
196: "FIXED_DOUBLE_ARRAY_TYPE",
197: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
198: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
199: "TURBOFAN_BITSET_TYPE_TYPE",
200: "TURBOFAN_HEAP_CONSTANT_TYPE_TYPE",
201: "TURBOFAN_OTHER_NUMBER_CONSTANT_TYPE_TYPE",
202: "TURBOFAN_RANGE_TYPE_TYPE",
203: "TURBOFAN_UNION_TYPE_TYPE",
204: "FOREIGN_TYPE",
205: "WASM_INTERNAL_FUNCTION_TYPE",
206: "WASM_TYPE_INFO_TYPE",
207: "AWAIT_CONTEXT_TYPE",
208: "BLOCK_CONTEXT_TYPE",
209: "CATCH_CONTEXT_TYPE",
210: "DEBUG_EVALUATE_CONTEXT_TYPE",
211: "EVAL_CONTEXT_TYPE",
212: "FUNCTION_CONTEXT_TYPE",
213: "MODULE_CONTEXT_TYPE",
214: "NATIVE_CONTEXT_TYPE",
215: "SCRIPT_CONTEXT_TYPE",
216: "WITH_CONTEXT_TYPE",
217: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
218: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_AND_JOB_TYPE",
219: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
220: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_WITH_JOB_TYPE",
221: "WASM_FUNCTION_DATA_TYPE",
222: "WASM_CAPI_FUNCTION_DATA_TYPE",
223: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
224: "WASM_JS_FUNCTION_DATA_TYPE",
225: "EXPORTED_SUB_CLASS_BASE_TYPE",
226: "EXPORTED_SUB_CLASS_TYPE",
227: "EXPORTED_SUB_CLASS2_TYPE",
228: "SMALL_ORDERED_HASH_MAP_TYPE",
229: "SMALL_ORDERED_HASH_SET_TYPE",
230: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
231: "ABSTRACT_INTERNAL_CLASS_SUBCLASS1_TYPE",
232: "ABSTRACT_INTERNAL_CLASS_SUBCLASS2_TYPE",
233: "DESCRIPTOR_ARRAY_TYPE",
234: "STRONG_DESCRIPTOR_ARRAY_TYPE",
235: "SOURCE_TEXT_MODULE_TYPE",
236: "SYNTHETIC_MODULE_TYPE",
237: "WEAK_FIXED_ARRAY_TYPE",
238: "TRANSITION_ARRAY_TYPE",
239: "CELL_TYPE",
240: "CODE_TYPE",
241: "CODE_DATA_CONTAINER_TYPE",
242: "COVERAGE_INFO_TYPE",
243: "EMBEDDER_DATA_ARRAY_TYPE",
244: "FEEDBACK_METADATA_TYPE",
245: "FEEDBACK_VECTOR_TYPE",
246: "FILLER_TYPE",
247: "FREE_SPACE_TYPE",
248: "INTERNAL_CLASS_TYPE",
249: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
250: "MAP_TYPE",
251: "MEGA_DOM_HANDLER_TYPE",
252: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
253: "PREPARSE_DATA_TYPE",
254: "PROPERTY_ARRAY_TYPE",
255: "PROPERTY_CELL_TYPE",
256: "SCOPE_INFO_TYPE",
257: "SHARED_FUNCTION_INFO_TYPE",
258: "SMI_BOX_TYPE",
259: "SMI_PAIR_TYPE",
260: "SORT_STATE_TYPE",
261: "SWISS_NAME_DICTIONARY_TYPE",
262: "WASM_API_FUNCTION_REF_TYPE",
263: "WASM_ON_FULFILLED_DATA_TYPE",
264: "WEAK_ARRAY_LIST_TYPE",
265: "WEAK_CELL_TYPE",
266: "WASM_ARRAY_TYPE",
267: "WASM_STRUCT_TYPE",
268: "JS_PROXY_TYPE",
1057: "JS_OBJECT_TYPE",
269: "JS_GLOBAL_OBJECT_TYPE",
270: "JS_GLOBAL_PROXY_TYPE",
271: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
1041: "JS_PRIMITIVE_WRAPPER_TYPE",
1058: "JS_API_OBJECT_TYPE",
2058: "JS_LAST_DUMMY_API_OBJECT_TYPE",
2059: "JS_DATA_VIEW_TYPE",
2060: "JS_TYPED_ARRAY_TYPE",
2061: "JS_ARRAY_BUFFER_TYPE",
2062: "JS_PROMISE_TYPE",
2063: "JS_BOUND_FUNCTION_TYPE",
2064: "JS_WRAPPED_FUNCTION_TYPE",
2065: "JS_FUNCTION_TYPE",
2066: "BIGINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
2067: "BIGUINT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
2068: "FLOAT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
2069: "FLOAT64_TYPED_ARRAY_CONSTRUCTOR_TYPE",
2070: "INT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
2071: "INT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
2072: "INT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
2073: "UINT16_TYPED_ARRAY_CONSTRUCTOR_TYPE",
2074: "UINT32_TYPED_ARRAY_CONSTRUCTOR_TYPE",
2075: "UINT8_CLAMPED_TYPED_ARRAY_CONSTRUCTOR_TYPE",
2076: "UINT8_TYPED_ARRAY_CONSTRUCTOR_TYPE",
2077: "JS_ARRAY_CONSTRUCTOR_TYPE",
2078: "JS_PROMISE_CONSTRUCTOR_TYPE",
2079: "JS_REG_EXP_CONSTRUCTOR_TYPE",
2080: "JS_CLASS_CONSTRUCTOR_TYPE",
2081: "JS_ARRAY_ITERATOR_PROTOTYPE_TYPE",
2082: "JS_ITERATOR_PROTOTYPE_TYPE",
2083: "JS_MAP_ITERATOR_PROTOTYPE_TYPE",
2084: "JS_OBJECT_PROTOTYPE_TYPE",
2085: "JS_PROMISE_PROTOTYPE_TYPE",
2086: "JS_REG_EXP_PROTOTYPE_TYPE",
2087: "JS_SET_ITERATOR_PROTOTYPE_TYPE",
2088: "JS_SET_PROTOTYPE_TYPE",
2089: "JS_STRING_ITERATOR_PROTOTYPE_TYPE",
2090: "JS_TYPED_ARRAY_PROTOTYPE_TYPE",
2091: "JS_MAP_KEY_ITERATOR_TYPE",
2092: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
2093: "JS_MAP_VALUE_ITERATOR_TYPE",
2094: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
2095: "JS_SET_VALUE_ITERATOR_TYPE",
2096: "JS_GENERATOR_OBJECT_TYPE",
2097: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
2098: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
2099: "JS_MAP_TYPE",
2100: "JS_SET_TYPE",
2101: "JS_WEAK_MAP_TYPE",
2102: "JS_WEAK_SET_TYPE",
2103: "JS_ARGUMENTS_OBJECT_TYPE",
2104: "JS_ARRAY_TYPE",
2105: "JS_ARRAY_ITERATOR_TYPE",
2106: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
2107: "JS_COLLATOR_TYPE",
2108: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
2109: "JS_DATE_TYPE",
2110: "JS_DATE_TIME_FORMAT_TYPE",
2111: "JS_DISPLAY_NAMES_TYPE",
2112: "JS_ERROR_TYPE",
2113: "JS_EXTERNAL_OBJECT_TYPE",
2114: "JS_FINALIZATION_REGISTRY_TYPE",
2115: "JS_LIST_FORMAT_TYPE",
2116: "JS_LOCALE_TYPE",
2117: "JS_MESSAGE_OBJECT_TYPE",
2118: "JS_NUMBER_FORMAT_TYPE",
2119: "JS_PLURAL_RULES_TYPE",
2120: "JS_REG_EXP_TYPE",
2121: "JS_REG_EXP_STRING_ITERATOR_TYPE",
2122: "JS_RELATIVE_TIME_FORMAT_TYPE",
2123: "JS_SEGMENT_ITERATOR_TYPE",
2124: "JS_SEGMENTER_TYPE",
2125: "JS_SEGMENTS_TYPE",
2126: "JS_SHADOW_REALM_TYPE",
2127: "JS_SHARED_STRUCT_TYPE",
2128: "JS_STRING_ITERATOR_TYPE",
2129: "JS_TEMPORAL_CALENDAR_TYPE",
2130: "JS_TEMPORAL_DURATION_TYPE",
2131: "JS_TEMPORAL_INSTANT_TYPE",
2132: "JS_TEMPORAL_PLAIN_DATE_TYPE",
2133: "JS_TEMPORAL_PLAIN_DATE_TIME_TYPE",
2134: "JS_TEMPORAL_PLAIN_MONTH_DAY_TYPE",
2135: "JS_TEMPORAL_PLAIN_TIME_TYPE",
2136: "JS_TEMPORAL_PLAIN_YEAR_MONTH_TYPE",
2137: "JS_TEMPORAL_TIME_ZONE_TYPE",
2138: "JS_TEMPORAL_ZONED_DATE_TIME_TYPE",
2139: "JS_V8_BREAK_ITERATOR_TYPE",
2140: "JS_WEAK_REF_TYPE",
2141: "WASM_GLOBAL_OBJECT_TYPE",
2142: "WASM_INSTANCE_OBJECT_TYPE",
2143: "WASM_MEMORY_OBJECT_TYPE",
2144: "WASM_MODULE_OBJECT_TYPE",
2145: "WASM_SUSPENDER_OBJECT_TYPE",
2146: "WASM_TABLE_OBJECT_TYPE",
2147: "WASM_TAG_OBJECT_TYPE",
2148: "WASM_VALUE_OBJECT_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
("read_only_space", 0x02151): (250, "MetaMap"),
("read_only_space", 0x02179): (131, "NullMap"),
("read_only_space", 0x021a1): (234, "StrongDescriptorArrayMap"),
("read_only_space", 0x021c9): (264, "WeakArrayListMap"),
("read_only_space", 0x0220d): (157, "EnumCacheMap"),
("read_only_space", 0x02241): (179, "FixedArrayMap"),
("read_only_space", 0x0228d): (8, "OneByteInternalizedStringMap"),
("read_only_space", 0x022d9): (247, "FreeSpaceMap"),
("read_only_space", 0x02301): (246, "OnePointerFillerMap"),
("read_only_space", 0x02329): (246, "TwoPointerFillerMap"),
("read_only_space", 0x02351): (131, "UninitializedMap"),
("read_only_space", 0x023c9): (131, "UndefinedMap"),
("read_only_space", 0x0240d): (130, "HeapNumberMap"),
("read_only_space", 0x02441): (131, "TheHoleMap"),
("read_only_space", 0x024a1): (131, "BooleanMap"),
("read_only_space", 0x02545): (194, "ByteArrayMap"),
("read_only_space", 0x0256d): (179, "FixedCOWArrayMap"),
("read_only_space", 0x02595): (180, "HashTableMap"),
("read_only_space", 0x025bd): (128, "SymbolMap"),
("read_only_space", 0x025e5): (40, "OneByteStringMap"),
("read_only_space", 0x0260d): (256, "ScopeInfoMap"),
("read_only_space", 0x02635): (257, "SharedFunctionInfoMap"),
("read_only_space", 0x0265d): (240, "CodeMap"),
("read_only_space", 0x02685): (239, "CellMap"),
("read_only_space", 0x026ad): (255, "GlobalPropertyCellMap"),
("read_only_space", 0x026d5): (204, "ForeignMap"),
("read_only_space", 0x026fd): (238, "TransitionArrayMap"),
("read_only_space", 0x02725): (45, "ThinOneByteStringMap"),
("read_only_space", 0x0274d): (245, "FeedbackVectorMap"),
("read_only_space", 0x02785): (131, "ArgumentsMarkerMap"),
("read_only_space", 0x027e5): (131, "ExceptionMap"),
("read_only_space", 0x02841): (131, "TerminationExceptionMap"),
("read_only_space", 0x028a9): (131, "OptimizedOutMap"),
("read_only_space", 0x02909): (131, "StaleRegisterMap"),
("read_only_space", 0x02969): (193, "ScriptContextTableMap"),
("read_only_space", 0x02991): (191, "ClosureFeedbackCellArrayMap"),
("read_only_space", 0x029b9): (244, "FeedbackMetadataArrayMap"),
("read_only_space", 0x029e1): (179, "ArrayListMap"),
("read_only_space", 0x02a09): (129, "BigIntMap"),
("read_only_space", 0x02a31): (192, "ObjectBoilerplateDescriptionMap"),
("read_only_space", 0x02a59): (195, "BytecodeArrayMap"),
("read_only_space", 0x02a81): (241, "CodeDataContainerMap"),
("read_only_space", 0x02aa9): (242, "CoverageInfoMap"),
("read_only_space", 0x02ad1): (196, "FixedDoubleArrayMap"),
("read_only_space", 0x02af9): (182, "GlobalDictionaryMap"),
("read_only_space", 0x02b21): (159, "ManyClosuresCellMap"),
("read_only_space", 0x02b49): (251, "MegaDomHandlerMap"),
("read_only_space", 0x02b71): (179, "ModuleInfoMap"),
("read_only_space", 0x02b99): (183, "NameDictionaryMap"),
("read_only_space", 0x02bc1): (159, "NoClosuresCellMap"),
("read_only_space", 0x02be9): (185, "NumberDictionaryMap"),
("read_only_space", 0x02c11): (159, "OneClosureCellMap"),
("read_only_space", 0x02c39): (186, "OrderedHashMapMap"),
("read_only_space", 0x02c61): (187, "OrderedHashSetMap"),
("read_only_space", 0x02c89): (184, "NameToIndexHashTableMap"),
("read_only_space", 0x02cb1): (189, "RegisteredSymbolTableMap"),
("read_only_space", 0x02cd9): (188, "OrderedNameDictionaryMap"),
("read_only_space", 0x02d01): (253, "PreparseDataMap"),
("read_only_space", 0x02d29): (254, "PropertyArrayMap"),
("read_only_space", 0x02d51): (153, "SideEffectCallHandlerInfoMap"),
("read_only_space", 0x02d79): (153, "SideEffectFreeCallHandlerInfoMap"),
("read_only_space", 0x02da1): (153, "NextCallSideEffectFreeCallHandlerInfoMap"),
("read_only_space", 0x02dc9): (190, "SimpleNumberDictionaryMap"),
("read_only_space", 0x02df1): (228, "SmallOrderedHashMapMap"),
("read_only_space", 0x02e19): (229, "SmallOrderedHashSetMap"),
("read_only_space", 0x02e41): (230, "SmallOrderedNameDictionaryMap"),
("read_only_space", 0x02e69): (235, "SourceTextModuleMap"),
("read_only_space", 0x02e91): (261, "SwissNameDictionaryMap"),
("read_only_space", 0x02eb9): (236, "SyntheticModuleMap"),
("read_only_space", 0x02ee1): (262, "WasmApiFunctionRefMap"),
("read_only_space", 0x02f09): (222, "WasmCapiFunctionDataMap"),
("read_only_space", 0x02f31): (223, "WasmExportedFunctionDataMap"),
("read_only_space", 0x02f59): (205, "WasmInternalFunctionMap"),
("read_only_space", 0x02f81): (224, "WasmJSFunctionDataMap"),
("read_only_space", 0x02fa9): (263, "WasmOnFulfilledDataMap"),
("read_only_space", 0x02fd1): (206, "WasmTypeInfoMap"),
("read_only_space", 0x02ff9): (237, "WeakFixedArrayMap"),
("read_only_space", 0x03021): (181, "EphemeronHashTableMap"),
("read_only_space", 0x03049): (243, "EmbedderDataArrayMap"),
("read_only_space", 0x03071): (265, "WeakCellMap"),
("read_only_space", 0x03099): (32, "StringMap"),
("read_only_space", 0x030c1): (41, "ConsOneByteStringMap"),
("read_only_space", 0x030e9): (33, "ConsStringMap"),
("read_only_space", 0x03111): (37, "ThinStringMap"),
("read_only_space", 0x03139): (35, "SlicedStringMap"),
("read_only_space", 0x03161): (43, "SlicedOneByteStringMap"),
("read_only_space", 0x03189): (34, "ExternalStringMap"),
("read_only_space", 0x031b1): (42, "ExternalOneByteStringMap"),
("read_only_space", 0x031d9): (50, "UncachedExternalStringMap"),
("read_only_space", 0x03201): (0, "InternalizedStringMap"),
("read_only_space", 0x03229): (2, "ExternalInternalizedStringMap"),
("read_only_space", 0x03251): (10, "ExternalOneByteInternalizedStringMap"),
("read_only_space", 0x03279): (18, "UncachedExternalInternalizedStringMap"),
("read_only_space", 0x032a1): (26, "UncachedExternalOneByteInternalizedStringMap"),
("read_only_space", 0x032c9): (58, "UncachedExternalOneByteStringMap"),
("read_only_space", 0x032f1): (104, "SharedOneByteStringMap"),
("read_only_space", 0x03319): (96, "SharedStringMap"),
("read_only_space", 0x03341): (109, "SharedThinOneByteStringMap"),
("read_only_space", 0x03369): (101, "SharedThinStringMap"),
("read_only_space", 0x03391): (96, "TwoByteSeqStringMigrationSentinelMap"),
("read_only_space", 0x033b9): (104, "OneByteSeqStringMigrationSentinelMap"),
("read_only_space", 0x033e1): (131, "SelfReferenceMarkerMap"),
("read_only_space", 0x03409): (131, "BasicBlockCountersMarkerMap"),
("read_only_space", 0x0344d): (147, "ArrayBoilerplateDescriptionMap"),
("read_only_space", 0x0354d): (161, "InterceptorInfoMap"),
("read_only_space", 0x0601d): (132, "PromiseFulfillReactionJobTaskMap"),
("read_only_space", 0x06045): (133, "PromiseRejectReactionJobTaskMap"),
("read_only_space", 0x0606d): (134, "CallableTaskMap"),
("read_only_space", 0x06095): (135, "CallbackTaskMap"),
("read_only_space", 0x060bd): (136, "PromiseResolveThenableJobTaskMap"),
("read_only_space", 0x060e5): (139, "FunctionTemplateInfoMap"),
("read_only_space", 0x0610d): (140, "ObjectTemplateInfoMap"),
("read_only_space", 0x06135): (141, "AccessCheckInfoMap"),
("read_only_space", 0x0615d): (142, "AccessorInfoMap"),
("read_only_space", 0x06185): (143, "AccessorPairMap"),
("read_only_space", 0x061ad): (144, "AliasedArgumentsEntryMap"),
("read_only_space", 0x061d5): (145, "AllocationMementoMap"),
("read_only_space", 0x061fd): (148, "AsmWasmDataMap"),
("read_only_space", 0x06225): (149, "AsyncGeneratorRequestMap"),
("read_only_space", 0x0624d): (150, "BreakPointMap"),
("read_only_space", 0x06275): (151, "BreakPointInfoMap"),
("read_only_space", 0x0629d): (152, "CachedTemplateObjectMap"),
("read_only_space", 0x062c5): (154, "CallSiteInfoMap"),
("read_only_space", 0x062ed): (155, "ClassPositionsMap"),
("read_only_space", 0x06315): (156, "DebugInfoMap"),
("read_only_space", 0x0633d): (158, "ErrorStackDataMap"),
("read_only_space", 0x06365): (160, "FunctionTemplateRareDataMap"),
("read_only_space", 0x0638d): (162, "InterpreterDataMap"),
("read_only_space", 0x063b5): (163, "ModuleRequestMap"),
("read_only_space", 0x063dd): (164, "PromiseCapabilityMap"),
("read_only_space", 0x06405): (165, "PromiseOnStackMap"),
("read_only_space", 0x0642d): (166, "PromiseReactionMap"),
("read_only_space", 0x06455): (167, "PropertyDescriptorObjectMap"),
("read_only_space", 0x0647d): (168, "PrototypeInfoMap"),
("read_only_space", 0x064a5): (169, "RegExpBoilerplateDescriptionMap"),
("read_only_space", 0x064cd): (170, "ScriptMap"),
("read_only_space", 0x064f5): (171, "ScriptOrModuleMap"),
("read_only_space", 0x0651d): (172, "SourceTextModuleInfoEntryMap"),
("read_only_space", 0x06545): (173, "StackFrameInfoMap"),
("read_only_space", 0x0656d): (174, "TemplateObjectDescriptionMap"),
("read_only_space", 0x06595): (175, "Tuple2Map"),
("read_only_space", 0x065bd): (176, "WasmContinuationObjectMap"),
("read_only_space", 0x065e5): (177, "WasmExceptionTagMap"),
("read_only_space", 0x0660d): (178, "WasmIndirectFunctionTableMap"),
("read_only_space", 0x06635): (198, "SloppyArgumentsElementsMap"),
("read_only_space", 0x0665d): (233, "DescriptorArrayMap"),
("read_only_space", 0x06685): (219, "UncompiledDataWithoutPreparseDataMap"),
("read_only_space", 0x066ad): (217, "UncompiledDataWithPreparseDataMap"),
("read_only_space", 0x066d5): (220, "UncompiledDataWithoutPreparseDataWithJobMap"),
("read_only_space", 0x066fd): (218, "UncompiledDataWithPreparseDataAndJobMap"),
("read_only_space", 0x06725): (252, "OnHeapBasicBlockProfilerDataMap"),
("read_only_space", 0x0674d): (199, "TurbofanBitsetTypeMap"),
("read_only_space", 0x06775): (203, "TurbofanUnionTypeMap"),
("read_only_space", 0x0679d): (202, "TurbofanRangeTypeMap"),
("read_only_space", 0x067c5): (200, "TurbofanHeapConstantTypeMap"),
("read_only_space", 0x067ed): (201, "TurbofanOtherNumberConstantTypeMap"),
("read_only_space", 0x06815): (248, "InternalClassMap"),
("read_only_space", 0x0683d): (259, "SmiPairMap"),
("read_only_space", 0x06865): (258, "SmiBoxMap"),
("read_only_space", 0x0688d): (225, "ExportedSubClassBaseMap"),
("read_only_space", 0x068b5): (226, "ExportedSubClassMap"),
("read_only_space", 0x068dd): (231, "AbstractInternalClassSubclass1Map"),
("read_only_space", 0x06905): (232, "AbstractInternalClassSubclass2Map"),
("read_only_space", 0x0692d): (197, "InternalClassWithSmiElementsMap"),
("read_only_space", 0x06955): (249, "InternalClassWithStructElementsMap"),
("read_only_space", 0x0697d): (227, "ExportedSubClass2Map"),
("read_only_space", 0x069a5): (260, "SortStateMap"),
("read_only_space", 0x069cd): (146, "AllocationSiteWithWeakNextMap"),
("read_only_space", 0x069f5): (146, "AllocationSiteWithoutWeakNextMap"),
("read_only_space", 0x06a1d): (137, "LoadHandler1Map"),
("read_only_space", 0x06a45): (137, "LoadHandler2Map"),
("read_only_space", 0x06a6d): (137, "LoadHandler3Map"),
("read_only_space", 0x06a95): (138, "StoreHandler0Map"),
("read_only_space", 0x06abd): (138, "StoreHandler1Map"),
("read_only_space", 0x06ae5): (138, | |
conv.events.store(EV_PROTOCOL_RESPONSE, ACCESS_TOKEN_RESPONSE_1)
chk = CheckIdTokenNonce()
kwargs = {}
chk._kwargs = kwargs
res = chk._func(conv)
assert chk._status == ERROR
def test_check_query_part():
"""
arg={'foo': 'bar'}
"""
_info = setup_conv()
conv = _info['conv']
# Need AuthorizationRequest
_areq_arg = {
'response_type': ['code'],
'state': 'some'
}
_ar = AuthorizationRequest(**_areq_arg)
conv.events.store(EV_PROTOCOL_REQUEST, _ar)
# Need AuthorizationResponse
_arg = {
'code': '12345678',
'foo': 'bar',
'state': 'some',
}
_ar = AuthorizationResponse(**_arg)
conv.events.store(EV_PROTOCOL_RESPONSE, _ar)
chk = CheckQueryPart()
kwargs = {'foo': 'bar'}
chk._kwargs = kwargs
_ = chk._func(conv)
assert chk._status == OK
def test_check_query_part_not():
"""
arg={'foo': 'bar'}
"""
_info = setup_conv()
conv = _info['conv']
# Need AuthorizationRequest
_areq_arg = {
'response_type': ['code'],
'state': 'some'
}
_ar = AuthorizationRequest(**_areq_arg)
conv.events.store(EV_PROTOCOL_REQUEST, _ar)
# Need AuthorizationResponse
_arg = {
'code': '12345678',
'state': 'some',
}
_ar = AuthorizationResponse(**_arg)
conv.events.store(EV_PROTOCOL_RESPONSE, _ar)
chk = CheckQueryPart()
kwargs = {'foo': 'bar'}
chk._kwargs = kwargs
_ = chk._func(conv)
assert chk._status == ERROR
def test_check_request_parameter_supported_support():
"""
arg=None
"""
_info = setup_conv()
conv = _info['conv']
# Need provider info
pcr = {
'jwks_uri': 'http://example.com/jwks.json',
'request_parameter_supported': True
}
conv.entity.provider_info = ProviderConfigurationResponse(**pcr)
chk = CheckRequestParameterSupported()
kwargs = {}
chk._kwargs = kwargs
res = chk._func(conv)
assert chk._status == OK
def test_check_request_parameter_supported_not_support():
"""
arg=None
"""
_info = setup_conv()
conv = _info['conv']
# Need provider info
pcr = {
'jwks_uri': 'http://example.com/jwks.json',
}
conv.entity.provider_info = ProviderConfigurationResponse(**pcr)
chk = CheckRequestParameterSupported()
kwargs = {}
chk._kwargs = kwargs
res = chk._func(conv)
assert chk._status == CRITICAL
def test_check_request_uri_parameter_supported_support():
"""
arg=None
"""
_info = setup_conv()
conv = _info['conv']
# Need provider info
pcr = {
'jwks_uri': 'http://example.com/jwks.json',
'request_uri_parameter_supported': True
}
conv.entity.provider_info = ProviderConfigurationResponse(**pcr)
chk = CheckRequestURIParameterSupported()
kwargs = {}
chk._kwargs = kwargs
res = chk._func(conv)
assert chk._status == OK
def test_check_request_uri_parameter_supported_missing():
"""
arg=None
"""
_info = setup_conv()
conv = _info['conv']
# Need provider info
pcr = {
'jwks_uri': 'http://example.com/jwks.json',
}
_pr = ProviderConfigurationResponse(**pcr)
# To deal with default values
del _pr['request_uri_parameter_supported']
conv.entity.provider_info = _pr
chk = CheckRequestURIParameterSupported()
kwargs = {}
chk._kwargs = kwargs
_ = chk._func(conv)
# Missing value is interpreted as supported
assert chk._status == OK
def test_check_request_uri_parameter_supported_not_support():
"""
arg=None
"""
_info = setup_conv()
conv = _info['conv']
# Need provider info
pcr = {
'jwks_uri': 'http://example.com/jwks.json',
'request_uri_parameter_supported': False
}
conv.entity.provider_info = ProviderConfigurationResponse(**pcr)
chk = CheckRequestURIParameterSupported()
kwargs = {}
chk._kwargs = kwargs
res = chk._func(conv)
assert chk._status == CRITICAL
def test_claims_check():
"""
arg={'required': True, 'id_token': ['auth_time']}
"""
_info = setup_conv()
conv = _info['conv']
# Need IdToken
conv.events.store(EV_PROTOCOL_RESPONSE, ACCESS_TOKEN_RESPONSE_1)
chk = ClaimsCheck()
kwargs = {'required': True, 'id_token': ['auth_time']}
chk._kwargs = kwargs
res = chk._func(conv)
assert chk._status == OK
def test_claims_check_missing():
"""
arg={'required': True, 'id_token': ['auth_time']}
"""
_info = setup_conv()
conv = _info['conv']
# Need IdToken
conv.events.store(EV_PROTOCOL_RESPONSE, ACCESS_TOKEN_RESPONSE_2)
chk = ClaimsCheck()
kwargs = {'required': True, 'id_token': ['auth_time']}
chk._kwargs = kwargs
res = chk._func(conv)
assert chk._status == ERROR
def test_different_sub():
"""
arg=None
"""
_info = setup_conv()
conv = _info['conv']
# Need 2 IdToken one with public and one pairwise sub
atr = {
"access_token":
"<KEY>",
"expires_in": 7200,
"id_token": {
"aud": ["one"],
"exp": 1493066674,
"iat": 1493059474,
"iss": "https://guarded-cliffs-8635.herokuapp.com",
"nonce": "WZ3PuYEnGxcM6ddf",
"sub": "public sub"
},
"token_type": "Bearer"
}
atr_pub = AccessTokenResponse(**atr)
conv.events.store(EV_PROTOCOL_RESPONSE, atr_pub)
atr = {
"access_token":
"<KEY>",
"expires_in": 7200,
"id_token": {
"aud": ["other"],
"exp": 1493066674,
"iat": 1493059474,
"iss": "https://guarded-cliffs-8635.herokuapp.com",
"nonce": "WZ3PuYEnGxcM6ddf",
"sub": "pairwise sub"
},
"token_type": "Bearer"
}
atr_pw = AccessTokenResponse(**atr)
conv.events.store(EV_PROTOCOL_RESPONSE, atr_pw)
chk = CheckUserID()
kwargs = {}
chk._kwargs = kwargs
chk._func(conv)
assert chk._status == OK
def test_different_sub_same():
"""
arg=None
"""
_info = setup_conv()
conv = _info['conv']
# Need 2 IdToken one with public and one pairwise sub
atr = {
"access_token":
"<KEY>",
"expires_in": 7200,
"id_token": {
"aud": ["one"],
"exp": 1493066674,
"iat": 1493059474,
"iss": "https://guarded-cliffs-8635.herokuapp.com",
"nonce": "WZ3PuYEnGxcM6ddf",
"sub": "pairwise sub"
},
"token_type": "Bearer"
}
atr_pub = AccessTokenResponse(**atr)
conv.events.store(EV_PROTOCOL_RESPONSE, atr_pub)
atr = {
"access_token":
"<KEY>",
"expires_in": 7200,
"id_token": {
"aud": ["other"],
"exp": 1493066674,
"iat": 1493059474,
"iss": "https://guarded-cliffs-8635.herokuapp.com",
"nonce": "WZ3PuYEnGxcM6ddf",
"sub": "pairwise sub"
},
"token_type": "Bearer"
}
atr_pw = AccessTokenResponse(**atr)
conv.events.store(EV_PROTOCOL_RESPONSE, atr_pw)
chk = CheckUserID()
kwargs = {}
chk._kwargs = kwargs
chk._func(conv)
assert chk._status == ERROR
def test_encrypted_userinfo():
"""
arg=None
"""
_info = setup_conv()
conv = _info['conv']
# Create encrypted UserInfo
ui = OpenIDSchema(**{'sub': 'an identifier'})
_keys = [k.key for k in
KEYJAR.keys_by_alg_and_usage('', alg="RSA1_5", usage='enc')]
_jwe = ui.to_jwe({'rsa': _keys}, alg="RSA1_5", enc="A128CBC-HS256")
conv.events.store(EV_RESPONSE, _jwe)
krs = keyitems2keyreps({'rsa': _keys})
jwe = factory(_jwe)
_res = jwe.decrypt(_jwe, krs)
_ui = OpenIDSchema().from_json(as_unicode(_res))
_ui.jwe_header = jwe.jwt.headers
conv.events.store(EV_PROTOCOL_RESPONSE, _ui)
chk = CheckEncryptedUserInfo()
kwargs = {}
chk._kwargs = kwargs
chk._func(conv)
assert chk._status == OK
def test_is_idtoken_signed():
"""
arg={'alg': 'RS256'}
"""
_info = setup_conv()
conv = _info['conv']
atr = {
"access_token":
"<KEY>",
"expires_in": 7200,
"id_token": {
"aud": ["one"],
"exp": 1493066674,
"iat": 1493059474,
"iss": "https://guarded-cliffs-8635.herokuapp.com",
"nonce": "WZ3PuYEnGxcM6ddf",
"sub": "pairwise sub"
},
"token_type": "Bearer"
}
atr = AccessTokenResponse(**atr)
# This is really cheating
idt = IdToken(**atr['id_token'])
idt.jws_header = {'alg': 'RSA'}
atr['id_token'] = idt
conv.events.store(EV_PROTOCOL_RESPONSE, atr)
chk = IsIDTokenSigned()
kwargs = {}
chk._kwargs = kwargs
chk._func(conv)
assert chk._status == OK
def test_is_idtoken_signed_alg_none():
"""
arg={'alg': 'RS256'}
"""
_info = setup_conv()
conv = _info['conv']
atr = {
"access_token": "<KEY>",
"expires_in": 7200,
"id_token": {
"aud": ["one"],
"exp": 1493066674,
"iat": 1493059474,
"iss": "https://guarded-cliffs-8635.herokuapp.com",
"nonce": "WZ3PuYEnGxcM6ddf",
"sub": "pairwise sub"
},
"token_type": "Bearer"
}
atr = AccessTokenResponse(**atr)
# This is really cheating
idt = IdToken(**atr['id_token'])
idt.jws_header = {'alg': 'none'}
atr['id_token'] = idt
conv.events.store(EV_PROTOCOL_RESPONSE, atr)
chk = IsIDTokenSigned()
kwargs = {}
chk._kwargs = kwargs
chk._func(conv)
assert chk._status == OK
assert chk._message == 'ID Token signed using alg=none'
def test_multiple_sign_on():
"""
arg=None
arg={'status': 2}
"""
_info = setup_conv()
conv = _info['conv']
arg0 = {
"access_token": "<KEY>",
"expires_in": 7200,
"id_token": {
"aud": ["one"],
"exp": 1493066674,
"iat": 1493059474,
"iss": "https://guarded-cliffs-8635.herokuapp.com",
"nonce": "WZ3PuYEnGxcM6ddf",
"sub": "pairwise sub",
'auth_time': 1493059470
},
"token_type": "Bearer"
}
atr0 = AccessTokenResponse(**arg0)
conv.events.store(EV_PROTOCOL_RESPONSE, atr0)
arg1 = {
"access_token": "<KEY>",
"expires_in": 7200,
"id_token": {
"aud": ["one"],
"exp": 1493066674,
"iat": 1493059474,
"iss": "https://guarded-cliffs-8635.herokuapp.com",
"nonce": "WZ3PuYEnGxcM6ddf",
"sub": "pairwise sub",
'auth_time': 1493059480
},
"token_type": "Bearer"
}
atr1 = AccessTokenResponse(**arg1)
conv.events.store(EV_PROTOCOL_RESPONSE, atr1)
chk = MultipleSignOn()
kwargs = {}
chk._kwargs = kwargs
res = chk._func(conv)
assert chk._status == OK
def test_multiple_sign_on_same():
"""
arg=None
arg={'status': 2}
"""
_info = setup_conv()
conv = _info['conv']
arg0 = {
"access_token": "<KEY>",
"expires_in": 7200,
"id_token": {
"aud": ["one"],
"exp": 1493066674,
"iat": 1493059474,
"iss": "https://guarded-cliffs-8635.herokuapp.com",
"nonce": "WZ3PuYEnGxcM6ddf",
"sub": "pairwise sub",
'auth_time': 1493059470
},
"token_type": "Bearer"
}
atr0 = AccessTokenResponse(**arg0)
conv.events.store(EV_PROTOCOL_RESPONSE, atr0)
arg1 = {
"access_token": "<KEY>",
"expires_in": 7200,
"id_token": {
"aud": ["one"],
"exp": 1493066674,
"iat": 1493059474,
"iss": "https://guarded-cliffs-8635.herokuapp.com",
"nonce": "WZ3PuYEnGxcM6ddf",
"sub": "pairwise sub",
'auth_time': 1493059470
},
"token_type": "Bearer"
}
atr1 = AccessTokenResponse(**arg1)
conv.events.store(EV_PROTOCOL_RESPONSE, atr1)
chk = MultipleSignOn()
kwargs = {}
chk._kwargs = kwargs
res = chk._func(conv)
assert chk._status == ERROR
def test_new_encryption_keys():
"""
arg=None
"""
_info = setup_conv()
conv = _info['conv']
rsa = RSA.generate(2048)
kb0 = KeyBundle([{"kty": "rsa", "key": rsa, "use": "enc"}])
conv.keybundle = [kb0]
rsa = RSA.generate(2048)
kb1 = KeyBundle([{"kty": "rsa", "key": rsa, "use": "enc"}])
conv.keybundle.append(kb1)
chk = NewEncryptionKeys()
kwargs = {}
chk._kwargs = kwargs
chk._func(conv)
assert chk._status == OK
def test_new_encryption_keys_same():
"""
arg=None
"""
_info = setup_conv()
conv = _info['conv']
# ----------------------
rsa = RSA.generate(2048)
kb0 = KeyBundle([{"kty": "rsa", "key": rsa, "use": "enc"}])
conv.keybundle = [kb0]
# rsa = RSA.generate(2048)
kb1 = KeyBundle([{"kty": "rsa", "key": rsa, "use": "enc"}])
conv.keybundle.append(kb1)
# ----------------------
chk = NewEncryptionKeys()
kwargs = {}
chk._kwargs = kwargs
chk._func(conv)
assert chk._status == WARNING
def test_new_signing_keys():
"""
arg=None
"""
_info = setup_conv()
conv = _info['conv']
# ----------------------
rsa = RSA.generate(2048)
kb0 = KeyBundle([{"kty": "rsa", "key": rsa, "use": "sig"}])
conv.keybundle = [kb0]
rsa = RSA.generate(2048)
kb1 = KeyBundle([{"kty": "rsa", "key": rsa, "use": "sig"}])
conv.keybundle.append(kb1)
# ----------------------
chk = NewSigningKeys()
kwargs = {}
chk._kwargs = kwargs
res = chk._func(conv)
assert chk._status == OK
def test_new_signing_keys_same():
"""
arg=None
"""
_info = setup_conv()
conv = _info['conv']
# ----------------------
rsa = RSA.generate(2048)
kb0 = KeyBundle([{"kty": "rsa", "key": rsa, "use": "sig"}])
conv.keybundle = [kb0]
kb1 = KeyBundle([{"kty": "rsa", "key": rsa, "use": "sig"}])
conv.keybundle.append(kb1)
# ----------------------
chk = NewSigningKeys()
kwargs = {}
chk._kwargs = kwargs
res = chk._func(conv)
assert chk._status == WARNING
def test_providerinfo_has_jwks_uri():
"""
arg=None
"""
_info = setup_conv()
conv = _info['conv']
pcr = {
'jwks_uri': 'http://example.com/jwks.json'
}
conv.entity.provider_info = ProviderConfigurationResponse(**pcr)
chk = CheckHasJwksURI()
kwargs = {}
chk._kwargs = kwargs
res = chk._func(conv)
assert | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import itertools
import os, shutil
"""Used for process original data."""
class Seq:
def __init__(self, name, seq, no):
self.name = name
self.seq = seq.upper()
self.no = no
self.length = len(seq)
def __str__(self):
"""Output seq when 'print' method is called."""
return "%s\tNo:%s\tlength:%s\n%s" % (self.name, str(self.no), str(self.length), self.seq)
def is_under_alphabet(s, alphabet):
"""Judge the string is within the scope of the alphabet or not.
:param s: The string.
:param alphabet: alphabet.
Return True or the error character.
"""
for e in s:
if e not in alphabet:
return e
return True
def is_fasta(seq):
"""Judge the Seq object is in FASTA format.
Two situation:
1. No seq name.
2. Seq name is illegal.
3. No sequence.
:param seq: Seq object.
"""
if not seq.name:
error_info = 'Error, sequence ' + str(seq.no) + ' has no sequence name.'
print(seq)
sys.stderr.write(error_info)
return False
if -1 != seq.name.find('>'):
error_info = 'Error, sequence ' + str(seq.no) + ' name has > character.'
sys.stderr.write(error_info)
return False
if 0 == seq.length:
error_info = 'Error, sequence ' + str(seq.no) + ' is null.'
sys.stderr.write(error_info)
return False
return True
def read_fasta(f):
"""Read a fasta file.
:param f: HANDLE to input. e.g. sys.stdin, or open(<file>)
Return Seq obj list.
"""
name, seq = '', ''
count = 0
seq_list = []
lines = f.readlines()
for line in lines:
if not line:
break
if '>' == line[0]:
if 0 != count or (0 == count and seq != ''):
if is_fasta(Seq(name, seq, count)):
seq_list.append(Seq(name, seq, count))
else:
sys.exit(0)
seq = ''
name = line[1:].strip()
count += 1
else:
seq += line.strip()
count += 1
if is_fasta(Seq(name, seq, count)):
seq_list.append(Seq(name, seq, count))
else:
sys.exit(0)
return seq_list
def read_fasta_yield(f):
"""Yields a Seq object.
:param f: HANDLE to input. e.g. sys.stdin, or open(<file>)
"""
name, seq = '', ''
count = 0
while True:
line = f.readline()
if not line:
break
if '>' == line[0]:
if 0 != count or (0 == count and seq != ''):
if is_fasta(Seq(name, seq, count)):
yield Seq(name, seq, count)
else:
sys.exit(0)
seq = ''
name = line[1:].strip()
count += 1
else:
seq += line.strip()
if is_fasta(Seq(name, seq, count)):
yield Seq(name, seq, count)
else:
sys.exit(0)
def read_fasta_check_dna(f, alphabet):
"""Read the fasta file, and check its legality.
:param f: HANDLE to input. e.g. sys.stdin, or open(<file>)
Return the seq list.
"""
seq_list = []
for e in read_fasta_yield(f):
res = is_under_alphabet(e.seq, alphabet)
if res:
seq_list.append(e)
else:
error_info = 'Sorry, sequence ' + str(e.no) \
+ ' has character ' + str(res) + '.(The character must be ' + alphabet + ').'
sys.exit(error_info)
return seq_list
def get_sequence_check_dna(f, alphabet):
"""Read the fasta file.
Input: f: HANDLE to input. e.g. sys.stdin, or open(<file>)
Return the sequence list.
"""
sequence_list = []
for e in read_fasta_yield(f):
res = is_under_alphabet(e.seq, alphabet)
if res is not True:
error_info = 'Error, sequence ' + str(e.no) \
+ ' has character ' + str(res) + '.(The character must be ' + alphabet + ').'
sys.exit(error_info)
else:
sequence_list.append(e.seq)
return sequence_list
def is_sequence_list(sequence_list, alphabet):
"""Judge the sequence list is within the scope of alphabet and change the lowercase to capital."""
count = 0
new_sequence_list = []
for e in sequence_list:
e = e.upper()
count += 1
res = is_under_alphabet(e, alphabet)
if res is not True:
error_info = 'Sorry, sequence ' + str(count) \
+ ' has illegal character ' + str(res) + '.(The character must be A, C, G or T)'
sys.stderr.write(error_info)
return False
else:
new_sequence_list.append(e)
return new_sequence_list
def get_data(input_data, alphabet, desc=False):
"""Get sequence data from file or list with check.
:param input_data: type file or list
:param desc: with this option, the return value will be a Seq object list(it only works in file object).
:return: sequence data or shutdown.
"""
if hasattr(input_data, 'read'):
if desc is False:
return get_sequence_check_dna(input_data, alphabet)
else:
return read_fasta_check_dna(input_data, alphabet)
elif isinstance(input_data, list):
input_data = is_sequence_list(input_data, alphabet)
if input_data is not False:
return input_data
else:
sys.exit(0)
else:
error_info = 'Sorry, the parameter in get_data method must be list or file type.'
sys.exit(error_info)
"""Some basic function for generate feature vector."""
def frequency(tol_str, tar_str):
"""Generate the frequency of tar_str in tol_str.
:param tol_str: mother string.
:param tar_str: substring.
"""
i, j, tar_count = 0, 0, 0
len_tol_str = len(tol_str)
len_tar_str = len(tar_str)
while i < len_tol_str and j < len_tar_str:
if tol_str[i] == tar_str[j]:
i += 1
j += 1
if j >= len_tar_str:
tar_count += 1
i = i - j + 1
j = 0
else:
i = i - j + 1
j = 0
return tar_count
def write_libsvm(vector_list, label_list, write_file):
"""Write the vectors into disk in livSVM format."""
len_vector_list = len(vector_list)
len_label_list = len(label_list)
if len_vector_list == 0:
sys.exit("The vector is none.")
if len_label_list == 0:
sys.exit("The label is none.")
if len_vector_list != len_label_list:
sys.exit("The length of vector and label is different.")
with open(write_file, 'w') as f:
for ind1, vec in enumerate(vector_list):
temp_write = str(label_list[ind1])
for ind2, val in enumerate(vec):
temp_write += ' ' + str(ind2+1) + ':' + str(vec[ind2])
f.write(temp_write)
f.write('\n')
def write_tab(_vecs, write_file):
"""Write the vectors into disk in tab format."""
with open(write_file, 'w') as f:
for vec in _vecs:
f.write(str(vec[0]))
for val in vec[1:]:
f.write('\t' + str(val))
f.write('\n')
def write_csv(_vecs, write_file):
"""Write the vectors into disk in csv format."""
import csv
with open(write_file, 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for vec in _vecs:
spamwriter.writerow(vec)
def convert_phyche_index_to_dict(phyche_index, alphabet):
"""Convert phyche index from list to dict."""
# for e in phyche_index:
# print e
len_index_value = len(phyche_index[0])
k = 0
for i in range(1, 10):
if len_index_value < 4**i:
sys.exit("Sorry, the number of each index value is must be 4^k.")
if len_index_value == 4**i:
k = i
break
kmer_list = make_kmer_list(k, alphabet)
# print kmer_list
len_kmer = len(kmer_list)
phyche_index_dict = {}
for kmer in kmer_list:
phyche_index_dict[kmer] = []
# print phyche_index_dict
phyche_index = list(zip(*phyche_index))
for i in range(len_kmer):
phyche_index_dict[kmer_list[i]] = list(phyche_index[i])
return phyche_index_dict
def make_kmer_list(k, alphabet):
try:
return ["".join(e) for e in itertools.product(alphabet, repeat=k)]
except TypeError:
print("TypeError: k must be an inter and larger than 0, alphabet must be a string.")
raise TypeError
except ValueError:
print("TypeError: k must be an inter and larger than 0")
raise ValueError
def standard_deviation(value_list):
"""Return standard deviation."""
from math import sqrt
from math import pow
n = len(value_list)
average_value = sum(value_list) * 1.0 / n
return sqrt(sum([pow(e - average_value, 2) for e in value_list]) * 1.0 / (n - 1))
def normalize_index(phyche_index, alphabet, is_convert_dict=False):
"""Normalize the physicochemical index."""
normalize_phyche_value = []
for phyche_value in phyche_index:
average_phyche_value = sum(phyche_value) * 1.0 / len(phyche_value)
sd_phyche = standard_deviation(phyche_value)
normalize_phyche_value.append([round((e - average_phyche_value) / sd_phyche, 2) for e in phyche_value])
if is_convert_dict is True:
return convert_phyche_index_to_dict(normalize_phyche_value, alphabet)
print(normalize_phyche_value)
return normalize_phyche_value
def read_k(alphabet, _method, k):
import const
if alphabet == 'Protein':
return 1
elif alphabet == 'RNA':
return 2
if _method in const.K_2_DNA_METHODS:
return 2
elif _method in const.K_3_DNA_METHODS:
return 3
elif _method == 'PseKNC':
return k
else:
print("Error in read_k.")
def check_args(args, filename):
"""Check pse and acc method args."""
import const
if 'w' in args:
if args.w < 0 or args.w > 1:
print("Error: The value of w must be no less than 0 and no larger than 1.")
return False
if 'method' in args:
if args.alphabet == 'DNA' and args.method not in const.METHODS_DNA:
if filename == const.ACC_FILENAME:
print("Error: the DNA method parameter can only be " + str(const.METHODS_DNA_ACC))
if filename == const.PSE_FILENAME:
print("Error: the DNA method parameter can only be " + str(const.METHODS_DNA_PSE))
else:
print("Error: the DNA method parameter error.")
return False
elif args.alphabet == 'RNA' and args.method not in const.METHODS_RNA:
if filename == const.ACC_FILENAME:
print("Error: the RNA method parameter can only be " + str(const.METHODS_RNA_ACC))
if filename == const.PSE_FILENAME:
print("Error: the RNA method parameter can only be " + str(const.METHODS_RNA_PSE))
else:
print("Error: the RNA method parameter error.")
return False
elif args.alphabet == 'Protein' and args.method not in const.METHODS_PROTEIN:
if filename == const.ACC_FILENAME:
print("Error: the protein method parameter can only be " + str(const.METHODS_PROTEIN_ACC))
if filename == | |
"b_bot")
ps = geometry_msgs.msg.PoseStamped()
ps.header.frame_id = set_name + "tray_2_screw_m" + str(screw_size) + "_" + str(screw_number)
if set_name == "set_1_":
self.go_to_named_pose("feeder_pick_ready", "c_bot")
ps.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(pi/2, 0, 0))
elif set_name == "set_2_":
self.go_to_named_pose("feeder_pick_ready", "c_bot")
ps.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(pi/4, 0, 0))
elif set_name == "set_3_":
self.go_to_named_pose("screw_place_ready_near_b_bot", "c_bot")
ps.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, 0, 0))
self.do_place_action("c_bot", ps, tool_name="screw_tool", screw_size = 4)
return
def assembly_calibration_initial_plates(self):
rospy.loginfo("============ Going to above plate 2 and 3 with c_bot. ============")
self.go_to_named_pose("home", "c_bot")
pickup_plate_3 = geometry_msgs.msg.PoseStamped()
pickup_plate_3.header.frame_id = "initial_assy_part_03_pulley_ridge_bottom" # The top corner of the big plate
pickup_plate_3.pose.orientation = geometry_msgs.msg.Quaternion(*tf.transformations.quaternion_from_euler(0, pi/2, -pi/2))
pickup_plate_3.pose.position.x = 0.0025
pickup_plate_3.pose.position.z = .05
self.go_to_pose_goal("c_bot", pickup_plate_3, speed=.5,end_effector_link="", move_lin = True)
pickup_plate_3.pose.position.z = -0.03
self.go_to_pose_goal("c_bot", pickup_plate_3, speed=.5,end_effector_link="", move_lin = True)
self.send_gripper_command("c_bot", "close")
rospy.loginfo("============ Press `Enter` to open gripper, go to plate 3 and close gripper")
raw_input()
self.send_gripper_command("c_bot", "open")
rospy.sleep(2.0)
pickup_plate_3.pose.position.z = .05
self.go_to_pose_goal("c_bot", pickup_plate_3, speed=.5,end_effector_link="", move_lin = True)
pickup_plate_2 = copy.deepcopy(pickup_plate_3)
pickup_plate_2.header.frame_id = "initial_assy_part_02_back_hole" # The top corner of the motor plate
pickup_plate_2.pose.orientation = geometry_msgs.msg.Quaternion(*tf.transformations.quaternion_from_euler(0, pi/2, pi/2))
self.go_to_pose_goal("c_bot", pickup_plate_2, speed=.5,end_effector_link="", move_lin = True)
pickup_plate_2.pose.position.z = -.03
self.go_to_pose_goal("c_bot", pickup_plate_2, speed=.5,end_effector_link="", move_lin = True)
self.send_gripper_command("c_bot", "close")
rospy.loginfo("============ Press `Enter` to open gripper, go to plate 3 and close gripper")
raw_input()
self.send_gripper_command("c_bot", "open")
rospy.sleep(2.0)
pickup_plate_2.pose.position.z = .05
self.go_to_pose_goal("c_bot", pickup_plate_2, speed=.5,end_effector_link="", move_lin = True)
self.go_to_named_pose("home", "c_bot")
return
def screw_tool_tests(self):
rospy.loginfo("============ Calibrating screw_tool M4 with b_bot. ============")
self.go_to_named_pose("screw_ready", "b_bot")
poses = []
ps = geometry_msgs.msg.PoseStamped()
ps.header.frame_id = "workspace_center"
ps.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, pi, 0))
ps.pose.position.x = .0
ps.pose.position.y = .0
ps.pose.position.z = .05
rospy.loginfo("============ Press enter to hold tool vertically. ============")
i = raw_input()
ps.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, pi/2, -pi/2))
ps.pose.position.x = -.01
ps.pose.position.y = .0
ps.pose.position.z = .05
self.publish_marker(ps, "pose")
self.groups["b_bot"].set_pose_target(ps, end_effector_link="b_bot_screw_tool_m4_tip_link")
self.groups["b_bot"].set_max_velocity_scaling_factor(.05)
self.groups["b_bot"].go()
self.groups["b_bot"].stop()
self.groups["b_bot"].clear_pose_targets()
rospy.loginfo("============ Press enter to go home. ============")
raw_input()
self.go_to_named_pose("screw_ready", "b_bot")
return
def screw_holder_tests(self, robot_name="b_bot"):
rospy.loginfo("============ Going to screw tool holder with " + robot_name + ". ============")
if robot_name == "b_bot":
self.groups["b_bot"].set_joint_value_target([-30.0 * pi/180.0, -48 * pi/180.0, 96 * pi/180.0,
-50 * pi/180.0, -27 * pi/180.0, -180 * pi/180.0])
self.groups["b_bot"].set_max_velocity_scaling_factor(.2)
self.groups["b_bot"].go(wait=True)
self.groups["b_bot"].stop()
poses = []
pose0 = geometry_msgs.msg.PoseStamped()
pose0.header.frame_id = "screw_tool_m4_helper_link"
if robot_name == "b_bot":
pose0.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, 0, 0))
pose0.pose.position.x -= .03
pose0.pose.position.z = .017
elif robot_name == "c_bot":
pose0.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, pi/2, pi))
pose0.pose.position.z = .02
for i in range(3):
poses.append(copy.deepcopy(pose0))
poses[1].header.frame_id = "screw_tool_m3_helper_link"
poses[2].header.frame_id = "screw_tool_m6_helper_link"
self.cycle_through_calibration_poses(poses, robot_name, speed=0.3, go_home=False)
return
def idler_pin_holder_calibration(self, robot_name="b_bot"):
rospy.loginfo("============ Going to screw tool holder with " + robot_name + ". ============")
if robot_name == "b_bot":
self.go_to_named_pose("back", "a_bot")
elif robot_name == "a_bot":
self.go_to_named_pose("back", "b_bot")
self.go_to_named_pose("back", "c_bot")
elif robot_name == "c_bot":
rospy.loginfo("c_bot cannot reach.")
return
pick_pose = geometry_msgs.msg.PoseStamped()
pick_pose.header.frame_id = "retainer_pin_holder_link"
pick_pose.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, pi/2, 0))
# pick_pose.pose.position.z = .001 #up/down
approach_pose = copy.deepcopy(pick_pose)
approach_pose.pose.position.z = .03 #up/down
self.go_to_named_pose("home", robot_name)
self.send_gripper_command(robot_name, "open")
self.go_to_pose_goal(robot_name, approach_pose,speed=.5, move_lin = True)
self.go_to_pose_goal(robot_name, pick_pose,speed=.1, move_lin = True)
rospy.loginfo("Press enter to close the gripper")
raw_input()
self.send_gripper_command(robot_name, "close")
rospy.loginfo("Press enter to open and go back up")
raw_input()
self.send_gripper_command(robot_name, "open")
self.send_gripper_command(robot_name, .06)
self.send_gripper_command(robot_name, "open")
self.go_to_pose_goal(robot_name, approach_pose,speed=.5, move_lin = True)
self.go_to_named_pose("home", robot_name)
return
def screw_tool_test_assembly(self, robot_name = "b_bot", tool_name="_screw_tool_m4_tip_link"):
rospy.loginfo("============ Moving the screw tool m4 to the four corners of the base plate ============")
rospy.loginfo("============ The screw tool m4 has to be carried by the robot! ============")
if robot_name=="b_bot":
self.go_to_named_pose("back", "c_bot")
elif robot_name=="c_bot":
self.go_to_named_pose("back", "b_bot")
self.go_to_named_pose("screw_ready", robot_name)
poses = []
pose0 = geometry_msgs.msg.PoseStamped()
pose0.header.frame_id = "assembled_assy_part_01_corner_2"
if robot_name=="b_bot":
pose0.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(pi/4, 0, 0))
pose0.pose.position.x -= .02
elif robot_name=="c_bot":
pose0.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(pi/2, 0, 0))
pose0.pose.position.x -= .02
for i in range(4):
poses.append(copy.deepcopy(pose0))
poses[1].header.frame_id = "assembled_assy_part_01_corner_3"
poses[2].header.frame_id = "assembled_assy_part_01_corner_4"
poses[3].header.frame_id = "assembled_assy_part_01_corner_1"
end_effector_link=robot_name+ tool_name
self.cycle_through_calibration_poses(poses, robot_name, speed=0.3, go_home=False, move_lin=True, end_effector_link=end_effector_link)
return
def tray_screw_calibration(self, robot_name = "b_bot", end_effector_link="", task="assembly", set_number=1):
rospy.loginfo("============ Moving " + robot_name + " " + end_effector_link + " to the screws in the tray ============")
if robot_name=="b_bot":
self.go_to_named_pose("back", "c_bot")
elif robot_name=="c_bot":
self.go_to_named_pose("back", "b_bot")
if end_effector_link=="":
self.go_to_named_pose("home", robot_name)
elif "screw" in end_effector_link:
self.go_to_named_pose("screw_ready", robot_name)
elif "suction" in end_effector_link:
self.go_to_named_pose("screw_ready", robot_name)
poses = []
pose0 = geometry_msgs.msg.PoseStamped()
pose0.header.frame_id = "set_" + str(set_number) + "_tray_2_screw_m4_1"
pose0.pose.position.x = -0.007
if robot_name=="b_bot" or robot_name=="a_bot":
pose0.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(-pi/2, 0, 0))
if task=="kitting":
pose0.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(-pi/2, 0, 0))
if task=="assembly":
pose0.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(-pi*11/12, 0, 0))
elif robot_name=="c_bot":
if set_number == 1:
pose0.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(pi/2, 0, 0))
elif set_number == 2:
pose0.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(pi/4, 0, 0))
elif set_number == 3:
pose0.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, 0, 0))
for i in range(8):
poses.append(copy.deepcopy(pose0))
poses[0].pose.position.x = -.05
poses[2].header.frame_id = "set_" + str(set_number) + "_tray_2_screw_m4_4"
poses[3].header.frame_id = "set_" + str(set_number) + "_tray_2_screw_m4_7"
poses[4].header.frame_id = "set_" + str(set_number) + "_tray_2_screw_m3_1"
poses[5].header.frame_id = "set_" + str(set_number) + "_tray_2_screw_m3_4"
poses[6].header.frame_id = "set_" + str(set_number) + "_tray_2_screw_m4_3"
poses[7].header.frame_id = "set_" + str(set_number) + "_tray_2_screw_m3_6"
if task=="assembly":
for pose in poses:
pose.header.frame_id = pose.header.frame_id[6:] # Removes "set_1_"
rospy.loginfo("Shortened tray frame to: " + pose.header.frame_id)
self.cycle_through_calibration_poses(poses, robot_name, speed=0.3, go_home=False, move_lin=True, end_effector_link=end_effector_link)
return
def screw_pickup_test(self, robot_name = "b_bot"):
rospy.loginfo("============ Picking up an m4 screw with the tool ============")
rospy.loginfo("============ The screw tool m4 has to be carried by the robot! ============")
if robot_name=="b_bot":
self.go_to_named_pose("back", "c_bot")
elif robot_name=="c_bot":
self.go_to_named_pose("back", "b_bot")
self.go_to_named_pose("screw_ready", robot_name)
if robot_name=="b_bot":
self.go_to_named_pose("screw_pick_ready", robot_name)
pose0 = geometry_msgs.msg.PoseStamped()
pose0.header.frame_id = "tray_2_screw_m4_1"
if robot_name=="b_bot":
pose0.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(-pi*11/12, 0, 0))
pose0.pose.position.x = -.01
elif robot_name=="c_bot":
pose0.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(-pi/2, 0, 0))
pose0.pose.position.x = -.01
self.publish_marker(pose0, "pose")
print("published marker")
return
self.do_pick_action(robot_name, pose0, screw_size = 4, z_axis_rotation = 0.0, use_complex_planning = True, tool_name = "screw_tool")
return
def screw_action_test(self, robot_name = "b_bot"):
rospy.loginfo("============ Screwing in one of the plate screws with the tool using the action ============")
rospy.loginfo("============ The screw tool m4 and a screw have to be carried by the robot! ============")
if robot_name=="b_bot":
self.go_to_named_pose("back", "c_bot")
elif robot_name=="c_bot":
self.go_to_named_pose("back", "b_bot")
self.go_to_named_pose("screw_ready", robot_name)
if robot_name=="b_bot":
self.go_to_named_pose("screw_plate_ready", robot_name)
elif robot_name=="c_bot":
self.go_to_named_pose("screw_ready_high", robot_name)
pose0 = geometry_msgs.msg.PoseStamped()
if robot_name=="b_bot":
pose0.header.frame_id = "assembled_assy_part_03_bottom_screw_hole_aligner_1"
pose0.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(-pi/4, 0, 0))
pose0.pose.position.x = -.01
elif robot_name=="c_bot":
pose0.header.frame_id = "assembled_assy_part_11_screw_head_2"
pose0.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(pi, 0, 0))
pose0.pose.position.x = -.08
self.go_to_pose_goal(robot_name, pose0,speed=.3,end_effector_link=robot_name + "_screw_tool_m4_tip_link", move_lin = True)
pose0.pose.position.x = -.01
self.do_screw_action(robot_name, pose0, screw_size = 4, screw_height = .02)
if robot_name=="b_bot":
self.go_to_named_pose("screw_plate_ready", robot_name)
elif robot_name=="c_bot":
pose0.pose.position.x = -.08
self.go_to_pose_goal(robot_name, pose0,speed=.3,end_effector_link=robot_name + "_screw_tool_m4_tip_link", move_lin = True)
return
def screw_feeder_calibration(self, robot_name = "c_bot"):
rospy.loginfo("============ Moving the screw tool m4 to the screw feeder ============")
rospy.loginfo("============ The screw tool m4 has to be carried by the robot! ============")
if robot_name=="c_bot":
self.go_to_named_pose("back", "b_bot")
self.go_to_named_pose("back", "a_bot")
self.go_to_named_pose("feeder_pick_ready", robot_name)
# Turn to the right
self.groups["c_bot"].set_joint_value_target([0, -2.0980, 1.3992, -1.6153, -1.5712, -3.1401])
self.groups["c_bot"].set_max_velocity_scaling_factor(1.0)
self.groups["c_bot"].go(wait=True)
self.groups["c_bot"].stop()
poses = []
pose0 = geometry_msgs.msg.PoseStamped()
self.toggle_collisions(collisions_on=False)
pose0.header.frame_id = "m3_feeder_outlet_link"
if robot_name=="c_bot":
pose0.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, 0, 0))
pose0.pose.position.x = -.03
for i in range(6):
poses.append(copy.deepcopy(pose0))
poses[1].pose.position.x = 0
poses[3].header.frame_id = "m4_feeder_outlet_link"
poses[4].header.frame_id = "m4_feeder_outlet_link"
poses[4].pose.position.x = 0
poses[5].header.frame_id = "m4_feeder_outlet_link"
self.cycle_through_calibration_poses(poses, robot_name, speed=0.3, go_home=False, move_lin=True, end_effector_link=robot_name + "_screw_tool_m4_tip_link")
self.toggle_collisions(collisions_on=True)
return
def screw_feeder_pick_test(self, robot_name = "c_bot", screw_size = 4):
rospy.loginfo("============ Picking a screw from a feeder ============")
rospy.loginfo("============ The screw tool has to be carried by c_bot! ============")
if robot_name=="c_bot":
self.go_to_named_pose("back", "b_bot")
self.go_to_named_pose("back", "a_bot")
self.go_to_named_pose("feeder_pick_ready", robot_name)
# Turn to the right
self.groups["c_bot"].set_joint_value_target([0, -2.0980, 1.3992, -1.6153, -1.5712, -3.1401])
self.groups["c_bot"].set_max_velocity_scaling_factor(1.0)
self.groups["c_bot"].go(wait=True)
self.groups["c_bot"].stop()
ps = geometry_msgs.msg.PoseStamped()
ps.header.frame_id = "m" + str(screw_size) + "_feeder_outlet_link"
if robot_name=="c_bot":
ps.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, 0, 0))
self.do_pick_action("c_bot", ps, screw_size=screw_size, tool_name="screw_tool")
return
def tray_partition_calibration(self, robot_name="b_bot", end_effector_link="", task="assembly", set_number=1, tray_number=1):
rospy.loginfo("============ Calibrating trays. ============")
rospy.loginfo(robot_name + " end effector should be 2 cm above tray partition.")
rospy.loginfo("set number: " + str(set_number) + ", tray_number: " + str(tray_number))
if robot_name=="1_bot":
self.go_to_named_pose("back", "b_bot")
self.go_to_named_pose("back", "c_bot")
elif robot_name=="b_bot":
self.go_to_named_pose("back", "c_bot")
self.go_to_named_pose("back", "a_bot")
elif robot_name=="c_bot":
self.go_to_named_pose("back", "a_bot")
self.go_to_named_pose("back", "b_bot")
if end_effector_link=="":
self.go_to_named_pose("home", robot_name)
elif "screw" in end_effector_link:
self.go_to_named_pose("screw_ready", robot_name)
if robot_name=="b_bot":
self.go_to_named_pose("screw_pick_ready", robot_name)
elif "suction" in end_effector_link:
self.go_to_named_pose("screw_ready", robot_name)
poses = []
pose0 = geometry_msgs.msg.PoseStamped()
if tray_number == 2:
pose0.header.frame_id = "set_" + str(set_number) + "_tray_2_partition_1"
pose0.pose.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, pi/2, 0))
if "suction" in end_effector_link or "screw" in end_effector_link:
if robot_name == "b_bot" and task=="kitting":
| |
out just about everything from the style
st = re.sub('fill-opacity:[0-9\.]+','',st)
st = re.sub('stroke-opacity:[\d.]+;','',st)
st = re.sub('stroke-width:[^;]+;','',st)
st = re.sub('stroke:[^;]+;','',st)
st = re.sub('fill:[^;]+;','',st)
st = re.sub('stroke-dasharray:[^;]+;','',st)
st = re.sub('stroke-dashoffset:[^;]+;','',st)
st = re.sub('marker-start:url\([^\)]+\);','',st)
st = re.sub('marker-end:url\([^\)]+\);','',st)
hasStartMarker = False
if 'hasStartMarker' in q.attrib:
hasStartMarker = True
hasEndMarker = False
if 'hasEndMarker' in q.attrib:
hasEndMarker = True
# If null fluxValue, mark the line as absent from the map data
if not isinstance(fluxValue, float):
st = st + 'stroke:#444444;fill:none;stroke-width:0.5;stroke-dasharray:0.625,1.25;stroke-opacity:0.5;'
q.set('style', st)
continue
# Save this style for later, so we can use it for start and end marker paths.
capSt = st
# Parse the path description
parsedPath = parse_path(pathD)
# Find the length of the original path, by manually parsing the path description attribute,
# and building rough estimates at each complex curve.
# You'd think there'd be an easier way to do this, right?
pLength = parsedPath.length()
if fluxValue == 0:
flatSt = st + 'stroke:#000000;fill:none;stroke-width:0.5;stroke-opacity:0.5;'
if hasStartMarker:
flatSt = flatSt + 'marker-start:url(#fluxInMarker);'
if hasEndMarker:
flatSt = flatSt + 'marker-end:url(#fluxOutMarker);'
q.set('style', flatSt)
wStart = 1
wEnd = 1
newPOffset = 0.25
maxMagnitude = 1
# Report what we're doing
self.logStrings.append("Building paths for " + name + " fluxValue 0")
else:
# First we convert the target flux fluxValue into a stroke width
scaledFlux = self.fluxToArrowScaler(abs(fluxValue))
# Then we use that to find the start width and the end width
wStart = min(2.0,utils.old_div(scaledFlux,2))
wEnd = max(scaledFlux,0.5)
# Report what we're doing
self.logStrings.append("Building paths for " + name + " fluxValue " + str(fluxValue) + " wStart " + str(wStart) + " wEnd " + str(wEnd))
# Rebuild the path as the definition for a solid shape
newPOffset = utils.old_div(wEnd,2)
newPStart = utils.old_div(wStart,wEnd)
newPEnd = 1
if fluxValue < 0:
newPStart = 1
newPEnd = utils.old_div(wStart,wEnd)
offsetPath = parsedPath.simpleOffestPathToShape(newPOffset, 0-newPOffset, newPStart, newPEnd)
# Embed it back in the original SVG path element
q.set('d', offsetPath.getPathFragment())
q.set('style', st + 'stroke:none;fill-opacity:1;fill:#000000;')
maxMagnitude = max(newPStart, newPEnd)
prependedPathsList = []
appendedPathsList = []
if transcriptomicsPresent is True:
newSt = st + "stroke:none;fill-opacity:1;fill:"+transcriptomicsHaloColor+";"
newOffsetPath = parsedPath.simpleOffestPathToShape(0, 0-((newPOffset*1.6)+1.1), maxMagnitude, maxMagnitude)
pNew = etree.Element("{}path", d=newOffsetPath.getPathFragment(), id=origID + "-tomicsB", style=newSt)
prependedPathsList.append(pNew)
if proteomicsPresent is True:
newSt = st + "stroke:none;fill-opacity:1;fill:"+proteomicsHaloColor+";"
newOffsetPath = parsedPath.simpleOffestPathToShape((newPOffset*1.6)+1.1, 0, maxMagnitude, maxMagnitude)
pNew = etree.Element("{}path", d=newOffsetPath.getPathFragment(), id=origID + "-pomicsB", style=newSt)
prependedPathsList.append(pNew)
# For the paths that will host the start and end markers, we want a dasharray
# that will effectively render the path invisible, so we can stick the paths
# (and therefore the markers) on top without obscuring the other paths.
# This allows us to control the size of the markers independent of everything else.
# TODO: Replace this with one or two basic near-zero length lines that reproduce the needed angles,
# to simplify the geometry.
capSt = capSt + 'stroke-dasharray:1.0,{1:5.3f};'.format(1,pLength+1)
capSt = capSt + 'stroke-dashoffset:1.5;stroke-opacity:1;stroke:#000000;fill:none;'
if hasStartMarker:
if (fluxValue < 0):
sw = (wEnd * self.arrowScaleFactor) + 0.4
else:
sw = (wStart * self.arrowScaleFactor) + 0.4
startCapSt = capSt + 'stroke-width:{0:5.3f};'.format(sw)
newID = origID + "-startcap"
pNew = etree.Element("{}path", d=pathD, id=newID, style=startCapSt + 'marker-start:url(#fluxInMarker);')
appendedPathsList.append(pNew)
if (fluxValue < 0):
if transcriptomicsPresent is True:
newID = origID + "-startcaptran"
pNew = etree.Element("{}path", d=pathD, id=newID, style=startCapSt + 'marker-start:url(#fluxInTranMarker);')
prependedPathsList.append(pNew)
if proteomicsPresent is True:
newID = origID + "-startcapprot"
pNew = etree.Element("{}path", d=pathD, id=newID, style=startCapSt + 'marker-start:url(#fluxInProtMarker);')
prependedPathsList.append(pNew)
if hasEndMarker:
if (fluxValue < 0):
sw = (wStart * self.arrowScaleFactor) + 0.4
else:
sw = (wEnd * self.arrowScaleFactor) + 0.4
endCapSt = capSt + 'stroke-width:{0:5.3f};'.format(sw)
newID = origID + "-endcap"
pNew = etree.Element("{}path", d=pathD, id=newID, style=endCapSt + 'marker-end:url(#fluxOutMarker);')
appendedPathsList.append(pNew)
if (fluxValue > 0):
if transcriptomicsPresent is True:
newID = origID + "-endcaptran"
pNew = etree.Element("{}path", d=pathD, id=newID, style=endCapSt + 'marker-end:url(#fluxOutTranMarker);')
prependedPathsList.append(pNew)
if proteomicsPresent is True:
newID = origID + "-endcapprot"
pNew = etree.Element("{}path", d=pathD, id=newID, style=endCapSt + 'marker-end:url(#fluxOutProtMarker);')
prependedPathsList.append(pNew)
for i in prependedPathsList:
endToPrependFrom.addprevious(i)
endToPrependFrom = i
endToAddFrom = q
for i in appendedPathsList:
endToAddFrom.addnext(i)
endToAddFrom = i
def _changeFluxText(self, name, fluxValue):
"private method, changes text labels"
labels = None
try:
labels = self.textDict[name]
except KeyError:
self.logStrings.append("Unable to find text label " + name + " to change content")
self.errorCount += 1
return
for label in labels:
if 'style' in label.attrib:
st = label.attrib['style']
if not st.endswith(';'):
st = st + ';'
else:
st = ';'
parts = []
for s in label.iterchildren():
if not hasattr(s, 'tag'):
continue
if re.search(r"\}tspan$", s.tag):
parts.append(s)
if not parts[0:1]:
# If there isn't even a first item, give up
continue
# Formatting values
stdInsert = ''
fill = 1
if isinstance(fluxValue, core.rangedNumber):
if fluxValue.lo == 0 and fluxValue.hi == 0 and fluxValue.best == 0:
#fill = 0.5
txtInsert = '(0.0)'
else:
txtInsert = '({0:5.3f})'.format(fluxValue.best)
if abs(fluxValue.hi-fluxValue.lo) != 0:
fLow = '{0:5.2f}'.format(fluxValue.lo) # Float formatting introduces unnecessary spaces that
fHigh = '{0:5.2f}'.format(fluxValue.hi) # interfere with spacing when the value is positive
stdInsert = '(' + fLow.strip() + '/' + fHigh.strip() + ')'
elif isinstance(fluxValue, float):
if fluxValue == 0:
txtInsert = '(0.0)'
#fill = 0.5
else:
txtInsert = '({0:4.3G})'.format(fluxValue)
elif fluxValue == '?':
txtInsert = ''
fill = 0.2
else:
txtInsert = fluxValue
st = re.sub('fill-opacity:[0-9\.]+','',st)
st = st + 'fill-opacity:{0:5.2f};'.format(fill)
label.set('style', st)
# Updating svg with new text
if parts[1:2]: # No need to catch an "IndexError" with this.
parts[1].text = txtInsert
if parts[2:3]:
parts[2].text = stdInsert
def changeTitle(self,title):
"Changes title for file"
#self.svg[self.titleLoc] = title
def decorateMap(self, rNetwork, fluxDict):
"changes all arrows and labels based on flux values, protein measurements, and gene transcriptions"
# First change all present arrows to non existent (to flag non-included reactions)
for name in self.textDict:
self._decorateReactionPath(name, '?', None, None)
self._changeFluxText(name, '?')
reactDict = rNetwork.reactionList.getReactionDictionary()
# Only do further decoration for reactions that have a flux value.
for fluxName in list(fluxDict.keys()):
value = fluxDict[fluxName]
transcriptomicsPresent = False
proteomicsPresent = False
if fluxName in reactDict:
transcriptomicsPresent = reactDict[fluxName].getSignificantTranscriptomicsValue()
proteomicsPresent = reactDict[fluxName].getSignificantProteomicsValue()
self._decorateReactionPath(fluxName, value, transcriptomicsPresent, proteomicsPresent)
self._changeFluxText(fluxName, value)
def findorphans(self, fluxDict):
"debug info on orphanfluxes (fluxDict entries without map paths) and orphanpaths (map paths without fluxDict entries)"
pathset = set(self.pathDict.keys())
fluxset = set(fluxDict.keys())
self.orphanfluxes = fluxset - pathset
self.orphanpaths = pathset - fluxset
# Class for drawing flux maps
# TODO: Delete this when all maps are actualized
class FluxMapOLD(object):
"class representing a flux map"
def __init__(self,basefile,strokewidth_multiplier=3,max_strokewidth=5,min_strokewidth=0.2):
import svgfig
svg = svgfig.load(basefile)
# build path and text dicts used to reference fluxes in the svg
rawPathList = []
rawTextDict = {}
for index,item in svg:
# Title info
try:
attr = item.attr
try:
id = attr['id']
if 'title' in id:
titleLoc = (index,0,0)
except KeyError:
pass
except AttributeError:
pass
# The rest
try:
# ungrouped paths
if item.t == "path" and len(index) == 1:
rawPathList.append((item.attr['id'],[index]))
# grouped paths
# if item type is group and the ID isn't a generic "gNNNN"
if item.t == "g" and not item.attr['id'][1:].isnumeric():
# if every subitem in the group is a path
pathtest = []
for s in item.sub:
pathtest.append(s.t == "path")
if all(pathtest):
rawPathList.append((item.attr['id'],[(index[0],u) for u in range(len(item.sub))]))
# get text entry names and numbers
if item.t == "text":
# set up a dict lookup by name, store top-level index as the dict contents
# index,0,0 : the name
# index,1,0 : the rate number
name = svg[index,0,0].strip()
rate = (index[0],1,0)
stds = (index[0],2,0)
if name in rawTextDict:
rawTextDict[name].append((rate,stds))
else:
rawTextDict[name]=[(rate,stds)]
# svgfig silently handles IndexError exception if svg[index,1,0] does not exist
except:
pass
# clean misc paths and cofactors out of the path list
cleanPathList = []
badTerms = ["path","atpc","adpc","coac","co2c","nadc","nadhc","nadpc","nadphc"]
for entry in rawPathList:
flagBad = False
for bad in badTerms:
if bad in entry[0]:
flagBad = True
break
if not flagBad:
cleanPathList.append(entry)
pathdict = dict(cleanPathList)
# clean metabolite labels and pure numeric labels out of the text list
cleanTextDict = {}
badTerms = ["[c]","[e]"]
for entry in rawTextDict:
flagBad = False
for bad in badTerms:
if bad in entry:
flagBad = True
break
#if not flagBad and entry[0].isnumeric() == False:
if not flagBad and entry.isnumeric() == False:
cleanTextDict[entry] = rawTextDict[entry]
textdict = cleanTextDict
#textdict = dict(cleanTextDict)
# assign attributes
self.basefile = basefile
self.basesvg = svg
self.svg = svg
self.pathdict = pathdict
self.textdict = textdict
self.strokewidth_multiplier = strokewidth_multiplier
self.max_strokewidth = max_strokewidth
self.min_strokewidth = min_strokewidth
self.titleLoc = titleLoc
def changestrokewidths(self,strokewidth_multiplier=3,max_strokewidth=5,min_strokewidth=0.2):
"change strokewidth from initialization settings"
self.strokewidth_multiplier = strokewidth_multiplier
self.max_strokewidth = max_strokewidth
self.min_strokewidth = min_strokewidth
def resetsvg(self):
"resets svg to basesvg"
self.svg = self.basesvg
def writesvg(self,outfile):
"writes svg to outfile"
self.svg.save(outfile)
def _changearrowwidth(self,name,value):
"private method, changes arrow widths"
# Only use best value for arrow widths
if value.__class__.__name__ == 'rangedNumber':
value = value.best
# If null value, make it zero
if not isinstance(value, float):
value = 0
if value == 0:
for q in self.pathdict[name]:
self.svg[q].attr['style'] = re.sub('stroke-width:[\d.]+','stroke-width:1.0',self.svg[q].attr['style'])
self.svg[q].attr['style'] = re.sub('stroke-dasharray:none','stroke-dasharray:0.625, 1.25',self.svg[q].attr['style'])
self.svg[q].attr['style'] = re.sub('marker-end:url\(.*\)','marker-end:url(#TriangleOutS)',self.svg[q].attr['style'])
else:
for q in self.pathdict[name]:
# Changing line width
finalvalue = min(self.strokewidth_multiplier*abs(value),self.max_strokewidth);
finalvalue = max(finalvalue,self.min_strokewidth);
insert = 'stroke-width:{0:5.3f}'.format(finalvalue)
self.svg[q].attr['style'] = re.sub('stroke-width:[\d.]+',insert,self.svg[q].attr['style'])
self.svg[q].attr['style'] = re.sub('stroke-dasharray:[^;]','stroke-dasharray:none',self.svg[q].attr['style'])
#Arrow point correction
if 10*abs(value) >= 3:
self.svg[q].attr['style'] = re.sub('marker-end:url\(.*\)','marker-end:url(#TriangleOutS)',self.svg[q].attr['style'])
elif 10*abs(value) >= 0.8 and 10*abs(value) < 3:
self.svg[q].attr['style'] = re.sub('marker-end:url\(.*\)','marker-end:url(#TriangleOutM)',self.svg[q].attr['style'])
else:
self.svg[q].attr['style'] = re.sub('marker-end:url\(.*\)','marker-end:url(#TriangleOutL)',self.svg[q].attr['style'])
def _changetextlabel(self,name,value):
"private method, changes text labels"
for group in self.textdict[name]:
| |
If no source points or polygons can be associated to a target polygon (e.g.
no intersection), the created destination layer will be empty.
Data Model is built upon OGR Implementation of ESRI Shapefile
* one src DataSource (named 'src') holding source polygons or points
* one trg DataSource (named 'trg') holding target polygons
* one dst DataSource (named 'dst') holding intersection polygons/points
related to target polygons with attached index and weights fields
By using OGR there are no restrictions for the used source grids.
Warning
-------
Writing shapefiles with the wrong locale settings can have impact on the
type of the decimal. If problem arise use LC_NUMERIC=C in your environment.
Parameters
----------
src : sequence of source points (shape Nx2) or polygons (shape NxMx2) or
ESRI Shapefile filename containing source points/polygons or
DataSource object
trg : sequence of target polygons (shape Nx2, num vertices x 2) or
ESRI Shapefile filename containing target polygons or DataSource object
Keyword arguments
-----------------
buf : float
(same unit as coordinates)
Points/Polygons will be considered inside the target if they are
contained in the buffer.
srs : object
OGR.SpatialReference
will be used for DataSource object.
src and trg data have to be in the same srs-format
silent : bool
If True no ProgressBar is shown. Defaults to False.
Examples
--------
See \
:ref:`/notebooks/zonalstats/wradlib_zonalstats_classes.ipynb#ZonalData`.
"""
def __init__(self, src, trg=None, buf=0.0, srs=None, **kwargs):
self._buffer = buf
self._srs = srs
silent = kwargs.pop("silent", False)
if trg is None:
# try to read complete dump (src, trg, dst)
self.load_vector(src)
else:
if isinstance(src, DataSource):
self.src = src
else:
self.src = DataSource(src, name="src", srs=srs, **kwargs)
if isinstance(trg, DataSource):
self.trg = trg
else:
self.trg = DataSource(trg, name="trg", srs=srs, **kwargs)
self.dst = DataSource(name="dst")
self.dst.ds = self._create_dst_datasource(silent)
self.dst._create_spatial_index()
self.dst._create_table_index("trg_index")
self._count_intersections = self.dst.ds.GetLayer().GetFeatureCount()
@property
def count_intersections(self):
"""Returns number of intersections
"""
return self._count_intersections
@property
def srs(self):
"""Returns SpatialReferenceSystem object
"""
return self._srs
@property
def isecs(self):
"""Returns intersections
Returns
-------
array : :class:`numpy:numpy.ndarray`
of Nx2 point coordinate arrays
"""
return np.array(
[
self._get_intersection(idx=idx)
for idx in range(self.trg.ds.GetLayerByName("trg").GetFeatureCount())
]
)
def get_isec(self, idx):
"""Returns intersections
Parameters
----------
idx : int
index of target polygon
Returns
-------
array : :class:`numpy:numpy.ndarray`
of Nx2 point coordinate arrays
"""
return self._get_intersection(idx=idx)
def get_source_index(self, idx):
"""Returns source indices referring to target polygon idx
Parameters
----------
idx : int
index of target polygon
Returns
-------
array : :class:`numpy:numpy.ndarray`
indices
"""
return np.array(
self.dst.get_attributes(["src_index"], filt=("trg_index", idx))[0]
)
def _create_dst_datasource(self, silent):
"""Create destination target gdal.Dataset
Creates one layer for each target polygon, consisting of
the needed source data attributed with index and weights fields
Returns
-------
ds_mem : object
gdal.Dataset object
"""
progress = None if (silent or isWindows) else gdal.TermProgress
# create mem-mapped temp file dataset
tmpfile = tempfile.NamedTemporaryFile(mode="w+b").name
ds_out = io.gdal.gdal_create_dataset(
"ESRI Shapefile", os.path.join("/vsimem", tmpfile), gdal_type=gdal.OF_VECTOR
)
# create intermediate mem dataset
ds_mem = io.gdal.gdal_create_dataset("Memory", "out", gdal_type=gdal.OF_VECTOR)
# get src geometry layer
src_lyr = self.src.ds.GetLayerByName("src")
src_lyr.ResetReading()
src_lyr.SetSpatialFilter(None)
geom_type = src_lyr.GetGeomType()
# get trg geometry layer
trg_lyr = self.trg.ds.GetLayerByName("trg")
trg_lyr.ResetReading()
trg_lyr.SetSpatialFilter(None)
# buffer handling (time consuming)
if self._buffer > 0:
for i in range(trg_lyr.GetFeatureCount()):
feat = trg_lyr.GetFeature(i)
feat.SetGeometryDirectly(feat.GetGeometryRef().Buffer(self._buffer))
trg_lyr.SetFeature(feat)
# reset target layer
trg_lyr.ResetReading()
# create tmp dest layer
self.tmp_lyr = georef.vector.ogr_create_layer(
ds_mem, "dst", srs=self._srs, geom_type=geom_type
)
trg_lyr.Intersection(
src_lyr,
self.tmp_lyr,
options=[
"SKIP_FAILURES=YES",
"INPUT_PREFIX=trg_",
"METHOD_PREFIX=src_",
"PROMOTE_TO_MULTI=YES",
"USE_PREPARED_GEOMETRIES=YES",
"PRETEST_CONTAINMENT=YES",
],
callback=progress,
)
georef.vector.ogr_copy_layer(ds_mem, 0, ds_out)
return ds_out
def dump_vector(self, filename, driver="ESRI Shapefile", remove=True):
"""Output source/target grid points/polygons to ESRI_Shapefile
target layer features are attributed with source index and weight
Parameters
----------
filename : string
path to shape-filename
driver : string
OGR Vector Driver String, defaults to 'ESRI Shapefile'
remove : bool
if True, existing file will be removed before creation
"""
self.src.dump_vector(filename, driver, remove=remove)
self.trg.dump_vector(filename, driver, remove=False)
self.dst.dump_vector(filename, driver, remove=False)
def load_vector(self, filename):
"""Load source/target grid points/polygons into in-memory Shapefile
Parameters
----------
filename : string
path to vector file
"""
self.src = DataSource(filename, name="src", source="src")
self.trg = DataSource(filename, name="trg", source="trg")
self.dst = DataSource(filename, name="dst", source="dst")
# get spatial reference object
self._srs = self.src.ds.GetLayer().GetSpatialRef()
def _get_idx_weights(self):
"""Retrieve index and weight from dst DataSource
"""
raise NotImplementedError
def _get_intersection(self, trg=None, idx=None, buf=0.0):
"""Just a toy function if you want to inspect the intersection
points/polygons of an arbitrary target or an target by index.
"""
# TODO: kwargs necessary?
# check wether idx is given
if idx is not None:
if self.trg:
try:
lyr = self.trg.ds.GetLayerByName("trg")
feat = lyr.GetFeature(idx)
trg = feat.GetGeometryRef()
except Exception:
raise TypeError("No target polygon found at index {0}".format(idx))
else:
raise TypeError("No target polygons found in object!")
# check for trg
if trg is None:
raise TypeError("Either *trg* or *idx* keywords must be given!")
# check for geometry
if not type(trg) == ogr.Geometry:
trg = georef.vector.numpy_to_ogr(trg, "Polygon")
# apply Buffer value
trg = trg.Buffer(buf)
if idx is None:
intersecs = self.dst.get_data_by_geom(trg)
else:
intersecs = self.dst.get_data_by_att("trg_index", idx)
return intersecs
class ZonalDataPoly(ZonalDataBase):
"""ZonalData object for source polygons
Parameters
----------
src : sequence of source polygons (shape NxMx2) or
ESRI Shapefile filename containing source polygons
trg : sequence of target polygons (shape Nx2, num vertices x 2) or
ESRI Shapefile filename containing target polygons
Keyword Arguments
-----------------
buf : float
(same unit as coordinates)
Polygons will be considered inside the target if they are contained
in the buffer.
srs : object
OGR.SpatialReference
will be used for DataSource object.
src and trg data have to be in the same srs-format
Examples
--------
See \
:ref:`/notebooks/zonalstats/wradlib_zonalstats_classes.ipynb#ZonalData`.
"""
def _get_idx_weights(self):
"""Retrieve index and weight from dst DataSource
Iterates over all trg DataSource Polygons
Returns
-------
ret : tuple
(index, weight) arrays
"""
trg = self.trg.ds.GetLayer()
cnt = trg.GetFeatureCount()
ret = [[] for _ in range(2)]
for index in range(cnt):
arr, w = self.dst.get_attrs_and_props(
attrs=["src_index"], props=["Area"], filt=("trg_index", index)
)
arr.append(w[0])
for i, l in enumerate(arr):
ret[i].append(np.array(l))
return tuple(ret)
class ZonalDataPoint(ZonalDataBase):
"""ZonalData object for source points
Parameters
----------
src : sequence of source points (shape Nx2) or
ESRI Shapefile filename containing source points
trg : sequence of target polygons (shape Nx2, num vertices x 2) or
ESRI Shapefile filename containing target polygons
Keyword Arguments
-----------------
buf : float
(same unit as coordinates)
Points will be considered inside the target if they are contained
in the buffer.
srs : object
OGR.SpatialReference
will be used for DataSource object.
src and trg data have to be in the same srs-format
Examples
--------
See \
:ref:`/notebooks/zonalstats/wradlib_zonalstats_classes.ipynb#ZonalData`.
"""
def _get_idx_weights(self):
"""Retrieve index and weight from dst DataSource
Iterates over all trg DataSource Polygons
Returns
-------
ret : tuple
(index, weight) arrays
"""
trg = self.trg.ds.GetLayer()
cnt = trg.GetFeatureCount()
ret = [[] for _ in range(2)]
for index in range(cnt):
arr = self.dst.get_attributes(["src_index"], filt=("trg_index", index))
arr.append([1.0 / len(arr[0])] * len(arr[0]))
for i, l in enumerate(arr):
ret[i].append(np.array(l))
return tuple(ret)
class ZonalStatsBase(object):
"""Base class for all 2-dimensional zonal statistics.
The base class for computing 2-dimensional zonal statistics for target
polygons from source points or polygons as built up with ZonalDataBase
and derived classes. Provides the basic design for all other classes.
If no source points or polygons can be associated to a target polygon (e.g.
no intersection), the zonal statistic for that target will be NaN.
Parameters
----------
src : object | string
ZonalDataPoly object or filename pointing to ZonalDataPoly ESRI
shapefile containing necessary ZonalData
ZonalData is available as ``zdata``-property inside class instance.
Examples
--------
See \
:ref:`/notebooks/zonalstats/wradlib_zonalstats_classes.ipynb#ZonalStats`.
"""
def __init__(self, src=None, ix=None, w=None):
self._ix = None
self._w = None
if src is not None:
if isinstance(src, ZonalDataBase):
if not src.count_intersections:
raise ValueError(
"No intersections found in destination "
"layer of ZonalDataBase object."
)
self._zdata = src
else:
raise TypeError("Parameter mismatch in calling ZonalDataBase")
self.ix, self.w = self._check_ix_w(*self.zdata._get_idx_weights())
else:
self._zdata = None
self.ix, self.w = self._check_ix_w(ix, w)
# TODO: check which properties are really needed
@property
def zdata(self):
return self._zdata
@zdata.setter
def zdata(self, value):
self._zdata = value
@property
def ix(self):
return self._ix
@ix.setter
def ix(self, value):
self._ix = value
@property
def w(self):
return self._w
@w.setter
| |
<filename>QDTModel.py<gh_stars>1-10
from __future__ import division
from scipy import stats, std
import numpy
import warnings
from DecisionMaking.Configuration import ConfigurationError
from DecisionMaking.Constants import *
from DecisionMaking.QModel import QState
from pprint import pprint
"""
Class to represent a q-state in a Decision Tree MDP model.
"""
class QStateDT(QState):
def __init__(self, action, qvalue=0):
super(QStateDT, self).__init__(action, qvalue)
self.incr_measurements = []
self.decr_measurements = []
"""
Returns the recorded transitions that increased the q-value.
"""
def get_incr_measurements(self):
return self.incr_measurements
"""
Returns the recoreded transitions that decreased the q-value.
"""
def get_decr_measurements(self):
return self.decr_measurements
"""
Stores a transition that increased the q-value.
"""
def store_incr_measurement(self, measurement):
self.incr_measurements.append(measurement)
"""
Stores a transition that decreased the q-value.
"""
def store_decr_measurement(self, measurement):
self.decr_measurements.append(measurement)
"""
String representation for a Q-state
"""
def __str__(self):
meas_str = "\tIncr: %d, Decr: %d" % (len(self.incr_measurements),
len(self.decr_measurements))
#meas_str += "\nIncr:"
#for i in self.incr_measurements:
# meas_str += "\n" + str(i)
#meas_str += "\nDecr:"
#for d in self.decr_measurements:
# meas_str += "\n" + str(d)
return super(QStateDT, self).__str__() + meas_str
def __repr__(self):
return str(self)
"""
A leaf node in the decision tree, and one of the states of the MDP.
"""
class LeafNode(object):
def __init__(self, parent, model, actions, qvalues=None):
self.parent = parent
self.actions = actions
self.initial_qvalues = qvalues
self.model = model
self.value = 0
self.qstates = []
for name, values in actions.items():
for value in values:
action = (name, value)
if qvalues is None:
qstate = QStateDT(action, 0)
else:
qstate = QStateDT(action, qvalues[action])
self.qstates.append(qstate)
self.update_value()
"""
Sets the q-values for all the q-states to the given value
"""
def set_all_qvalues(self, qvalue):
for qs in self.get_qstates():
qs.set_qvalue(qvalue)
"""
This is a leaf node.
"""
def is_leaf(self):
return True
"""
Replaces this leaf node with a decision node in the decision tree
and updates all the MDP states accordingly.
"""
def split(self, param, limits, qvalues=None):
if qvalues is None:
qvalues = {}
for qs in self.get_qstates():
qvalues[qs.get_action()] = qs.get_qvalue()
# remove the leaf node from the model
self.model.remove_state(self)
# create the decision node to replace it and add it to the model
d_node = DecisionNode(self.parent, self.model, param, limits, self.actions, qvalues)
new_states = d_node.get_leaves()
self.model.add_states(new_states)
self.parent.replace_node(self, d_node)
return new_states
"""
The optimal action is the one with the biggest Q value
"""
def get_optimal_action(self):
max_value = float("-inf")
best_action = None
for q in self.qstates:
if max_value < q.get_qvalue():
max_value = q.get_qvalue()
best_action = q.get_action()
return best_action
"""
Returns all the possible actions from this state
"""
def get_legal_actions(self):
return [qs.get_action() for qs in self.qstates]
"""
Returns all the leaves contained in this subtree, which is itself.
"""
def get_leaves(self):
return [self]
"""
Returns the state on this subtree that corresponds to the given measurements.
"""
def get_state(self, measurements):
return self
"""
Returns all the qstates for all the actions from this state
"""
def get_qstates(self):
return self.qstates
"""
Returns the qstate that corresponds to the given action from this state
"""
def get_qstate(self, action):
for qs in self.qstates:
if qs.get_action() == action:
return qs
"""
Return the value of the state
"""
def get_value(self):
return self.value
"""
Updates the value of the state to be equal to the value of the best qstate
"""
def update_value(self):
self.value = max([qs.get_qvalue() for qs in self.qstates])
"""
String representation for a leaf node
"""
def __str__(self):
return "Q-Model State"
def __repr__(self):
return str(self)
"""
Prints the node along with its Q-states.
"""
def print_detailed(self):
print(self)
for qs in self.get_qstates():
print(qs)
"""
A decision node in the decision tree. This will only hold references to other nodes
and does not represent a state of the MDP.
"""
class DecisionNode(object):
def __init__(self, parent, model, parameter, limits, actions, initial_qvalues):
self.parent = parent
self.parameter = parameter
self.limits = limits
self.model = model
self.children = []
num_children = len(limits) + 1
for i in range(num_children):
l = LeafNode(self, model, actions, initial_qvalues)
self.children.append(l)
"""
This is not a leaf node
"""
def is_leaf(self):
return False
"""
Replaces the given child node with the new one.
This happens when one of the child nodes is split.
"""
def replace_node(self, old_node, new_node):
for i, c in enumerate(self.children):
if c is old_node:
self.children[i] = new_node
return
raise InternalError("Tried to replace a node that did not exist")
"""
Splits all the children nodes.
This should only be used when initializing the model with multiple parameters.
"""
def split(self, param, limits):
for c in self.children:
c.split(param, limits)
"""
Returns all the leaves in the current subtree
"""
def get_leaves(self):
leaves = []
for c in self.children:
leaves += c.get_leaves()
return leaves
"""
Returns the state on this subtree that corresponds to the given measurements.
"""
def get_state(self, measurements):
if not self.parameter in measurements:
raise ParameterError("Missing measurement: " + self.parameter)
m = measurements[self.parameter]
for i, l in enumerate(self.limits):
if m < l:
return self.children[i].get_state(measurements)
return self.children[-1].get_state(measurements)
"""
Class that represents a Q-Learning model with a decision tree state structure.
"""
class QDTModel:
"""
Creates a model from a given configuration dict
"""
def __init__(self, conf):
required_fields = [INITIAL_PARAMETERS, PARAMETERS, ACTIONS, DISCOUNT, LEARNING_RATE,
INITIAL_QVALUES, SPLIT_ERROR, MIN_MEASUREMENTS]
for f in required_fields:
if not f in conf:
raise ConfigurationError("%s not provided in the configuration" % f)
self.discount = conf[DISCOUNT]
self.learning_rate = conf[LEARNING_RATE]
self.parameters = list(conf[PARAMETERS])
self.min_measurements = max(conf[MIN_MEASUREMENTS], 1)
self.split_error = conf[SPLIT_ERROR]
self.root = LeafNode(self, self, conf[ACTIONS])
self.root.set_all_qvalues(conf[INITIAL_QVALUES])
self.current_state = None
self.current_meas = None
self.update_qvalues = True
self.reuse_meas = False
self.states = [self.root]
self.transition_data = []
self.splits = {}
# create all the initial decision nodes of the model
parameters = self._get_parameters(conf[INITIAL_PARAMETERS])
for name, limits in parameters.items():
self.root.split(name, limits)
# initialize the split counters
self.allow_splitting = True
for p in self.parameters:
self.splits[p] = 0
# initialize the reverse transition indexes and priorities for prioritized sweeping
self.reverse_transitions = []
self.priorities = [0] * len(self.states)
for i in range(len(self.states)):
self.reverse_transitions.append({})
"""
Configure the defined limits or values for the initial parameters
so that they can be used by a decision node.
"""
def _get_parameters(self, parameters):
new_pars = {}
for name, par in parameters.items():
# for discrete values we define the midpoint as the margin
if VALUES in par:
if not isinstance(par[VALUES], list):
raise ConfigurationError("Provided values for %s must be in a list" % name)
if len(par[VALUES]) <= 1:
raise ConfigurationError("At least two values must be provided for " + name)
limits = []
for i in range(len(par[VALUES]) - 1):
limits.append((par[VALUES][i] + par[VALUES][i+1]) / 2)
new_pars[name] = limits
# for continuous values we just ignore the outer margins
elif LIMITS in par:
if not isinstance(par[LIMITS], list):
raise ConfigurationError("Provided limits for %s must be in a list" % name)
if len(par[LIMITS]) <= 2:
raise ConfigurationError("At least three limits must be provided for " + name)
new_pars[name] = par[LIMITS][1:-1]
else:
raise ConfigurationError("Values or limits must be provided for parameter " + name)
return new_pars
"""
Replaces the root node with the given decision node.
This should happen when the root node splits.
"""
def replace_node(self, old_node, new_node):
if not old_node is self.root:
raise InternalError("Tried to replace the root node with a different initial node")
self.root = new_node
"""
Initializes the current state based on the given measurements
"""
def set_state(self, measurements):
self.current_meas = measurements
self.current_state = self.root.get_state(measurements)
"""
Allow updates to q-values
"""
def set_update_qvalues(self, update=True):
self.update_qvalues = update
"""
Removes the state with the given state_num from the model
"""
def remove_state(self, state):
state_num = None
for i in range(len(self.states)):
if self.states[i] is state:
state_num = i
if state_num is None:
raise InternalError("Tried to remove a state that did not exist")
del self.states[state_num]
"""
Stores the given transition data to be used later on for retraining
"""
def store_transition_data(self, data):
self.transition_data += data
"""
Adds new states to the model
"""
def add_states(self, states):
self.states += states
"""
Suggest the optimal action to take from the current state
"""
def suggest_action(self):
if self.current_state is None:
raise StateNotSetError()
return self.current_state.get_optimal_action()
"""
Returns all the legal actions from the current_state
"""
def get_legal_actions(self):
if self.current_state is None:
raise StateNotSetError()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.