id stringlengths 3 8 | content stringlengths 100 981k |
|---|---|
430485 | import time
log_filename = '/code/log/django.log'
# log_filename = 'log/django.log'
with open(log_filename) as file:
while 1:
where = file.tell()
line = file.readline()
if not line:
time.sleep(1)
file.seek(where)
else:
print(line) # already has newline
|
430491 | import secrets
from typing import Tuple
from urllib.parse import quote_plus
import aiohttp
from quart import Blueprint, g
from quart import jsonify as json
from quart import redirect, request, session
auth = Blueprint("auth", __name__)
API_BASE = "https://discordapp.com/api/v6"
def redirect_url() -> Tuple[str, str]:
"""Generate a redirect URL, returning the state and URL."""
state = secrets.token_hex(64)
client_id = g.bot.config.oauth.client_id
redirect_uri = g.bot.config.oauth.redirect_uri
url = (
f"https://discordapp.com/oauth2/authorize"
f"?client_id={client_id}"
f"&redirect_uri={quote_plus(redirect_uri)}"
"&response_type=code"
"&scope=identify"
f"&state={state}"
)
return state, url
async def fetch_user(bearer: str) -> dict:
"""Fetch information about a user from their bearer token."""
headers = {"Authorization": f"Bearer {bearer}"}
async with aiohttp.ClientSession(headers=headers, raise_for_status=True) as sess:
resp = await sess.get(f"{API_BASE}/users/@me")
return await resp.json()
async def fetch_access_token(code: str, *, refresh: bool = False) -> str:
ENDPOINT = f"{API_BASE}/oauth2/token"
data = {
"client_id": str(g.bot.config.oauth.client_id),
"client_secret": g.bot.config.oauth.client_secret,
"grant_type": "authorization_code",
"redirect_uri": g.bot.config.oauth.redirect_uri,
}
if refresh:
data["refresh_token"] = code
else:
data["code"] = code
headers = {"Content-Type": "application/x-www-form-urlencoded"}
async with aiohttp.ClientSession(raise_for_status=True) as sess:
resp = await sess.post(ENDPOINT, data=data, headers=headers)
data = await resp.json()
return data["access_token"]
@auth.route("/redirect")
async def auth_redirect():
if session.get("oauth_state") != request.args.get("state"):
return "invalid state", 401
if "code" not in request.args:
return "no code", 400
access_token = await fetch_access_token(request.args["code"])
session["token"] = access_token
user = await fetch_user(access_token)
session["user"] = user
return redirect("/guilds")
@auth.route("/logout")
def auth_logout():
if "token" in session:
del session["token"]
del session["user"]
return redirect("/")
@auth.route("/login")
def auth_login():
state, url = redirect_url()
session["oauth_state"] = state
return redirect(url)
@auth.route("/profile")
async def auth_profile():
active = "token" in session
if not active:
return json(None)
return json(session["user"])
|
430547 | from ._expquad_gauss import _kernel_mean_expquad_gauss, _kernel_variance_expquad_gauss
from ._expquad_lebesgue import (
_kernel_mean_expquad_lebesgue,
_kernel_variance_expquad_lebesgue,
)
from ._kernel_embedding import KernelEmbedding
|
430554 | from river import compose, preprocessing
def test_left_is_pipeline():
group_1 = compose.Select("a", "b")
group_2 = compose.Select("x", "y") | preprocessing.OneHotEncoder(sparse=True)
product = group_1 + group_2 + group_1 * group_2
assert product.transform_one(dict(a=1, b=2, x=4, y=4, z=5)) == {
"a*y_4": 1,
"a*x_4": 1,
"b*y_4": 2,
"b*x_4": 2,
"y_4": 1,
"x_4": 1,
"a": 1,
"b": 2,
}
def test_right_is_pipeline():
group_1 = compose.Select("a", "b") | preprocessing.OneHotEncoder(sparse=True)
group_2 = compose.Select("x", "y")
product = group_1 + group_2 + group_1 * group_2
assert product.transform_one(dict(a=1, b=2, x=4, y=4, z=5)) == {
"a_1*x": 4,
"a_1*y": 4,
"b_2*x": 4,
"b_2*y": 4,
"x": 4,
"y": 4,
"a_1": 1,
"b_2": 1,
}
def test_both_are_pipelines():
group_1 = compose.Select("a", "b") | preprocessing.OneHotEncoder(sparse=True)
group_2 = compose.Select("x", "y") | preprocessing.OneHotEncoder(sparse=True)
product = group_1 + group_2 + group_1 * group_2
assert product.transform_one(dict(a=1, b=2, x=4, y=4, z=5)) == {
"b_2*x_4": 1,
"b_2*y_4": 1,
"a_1*x_4": 1,
"a_1*y_4": 1,
"x_4": 1,
"y_4": 1,
"b_2": 1,
"a_1": 1,
}
def test_renaming():
renamer = compose.Renamer(dict(a="z", b="y", c="x"))
assert renamer.transform_one(dict(a=1, b=2, d=3)) == dict(z=1, y=2, d=3)
def test_prefixing():
prefixer = compose.Prefixer("x_")
assert prefixer.transform_one(dict(a=1, b=2, d=3)) == dict(x_a=1, x_b=2, x_d=3)
def test_suffixing():
suffixer = compose.Suffixer("_x")
assert suffixer.transform_one(dict(a=1, b=2, d=3)) == dict(a_x=1, b_x=2, d_x=3)
|
430571 | from typing import Any
import torch
import numpy as np
def herd_random(x: np.ndarray, y: np.ndarray, t: np.ndarray, z: Any, nb_per_class: int) -> np.ndarray:
"""Herd randomly examples for rehearsal.
:param x: Input data (images, paths, etc.)
:param y: Labels of the data.
:param t: Task ids of the data.
:param z: Extra info, here unused.
:param nb_per_class: Number of samples to herd per class.
:return: The sampled data x, y, t.
"""
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
indexes.append(
np.random.choice(
class_indexes,
size=min(nb_per_class, len(class_indexes)),
replace=False
)
)
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes]
def herd_closest_to_cluster(
x: np.ndarray,
y: np.ndarray,
t: np.ndarray,
features: np.ndarray,
nb_per_class: np.ndarray
) -> np.ndarray:
"""Herd the samples whose features is the closest to their class mean.
:param x: Input data (images, paths, etc.)
:param y: Labels of the data.
:param t: Task ids of the data.
:param features: Features of shape (nb_samples, nb_dim).
:param nb_per_class: Number of samples to herd per class.
:return: The sampled data x, y, t.
"""
if len(features.shape) != 2:
raise ValueError(f"Expected features to have 2 dimensions, not {len(features.shape)}d.")
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
class_features = features[class_indexes]
class_mean = np.mean(class_features, axis=1, keepdims=True)
dist_to_mean = np.linalg.norm(class_mean - class_features, axis=1)
tmp_indexes = dist_to_mean.argsort()[:nb_per_class]
indexes.append(class_indexes[tmp_indexes])
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes]
def herd_closest_to_barycenter(
x: np.ndarray,
y: np.ndarray,
t: np.ndarray,
features: np.ndarray,
nb_per_class: int
) -> np.ndarray:
"""Herd the samples whose features is the closest to their moving barycenter.
Reference:
* iCaRL: Incremental Classifier and Representation Learning
<NAME>, <NAME>, <NAME>, <NAME>
CVPR 2017
:param x: Input data (images, paths, etc.)
:param y: Labels of the data.
:param t: Task ids of the data.
:param features: Features of shape (nb_samples, nb_dim).
:param nb_per_class: Number of samples to herd per class.
:return: The sampled data x, y, t.
"""
if len(features.shape) != 2:
raise ValueError(f"Expected features to have 2 dimensions, not {len(features.shape)}d.")
indexes = []
for class_id in np.unique(y):
class_indexes = np.where(y == class_id)[0]
class_features = features[class_indexes]
D = class_features.T
D = D / (np.linalg.norm(D, axis=0) + 1e-8)
mu = np.mean(D, axis=1)
herding_matrix = np.zeros((class_features.shape[0],))
w_t = mu
iter_herding, iter_herding_eff = 0, 0
while not (
np.sum(herding_matrix != 0) == min(nb_per_class, class_features.shape[0])
) and iter_herding_eff < 1000:
tmp_t = np.dot(w_t, D)
ind_max = np.argmax(tmp_t)
iter_herding_eff += 1
if herding_matrix[ind_max] == 0:
herding_matrix[ind_max] = 1 + iter_herding
iter_herding += 1
w_t = w_t + mu - D[:, ind_max]
herding_matrix[np.where(herding_matrix == 0)[0]] = 10000
tmp_indexes = herding_matrix.argsort()[:nb_per_class]
indexes.append(class_indexes[tmp_indexes])
indexes = np.concatenate(indexes)
return x[indexes], y[indexes], t[indexes]
|
430610 | import subprocess,os,sys
import tmp as sprint
def main():
#import subprocess,os,sys
print ''
print "##############################################################################################"
print ''
print " SPRINT: SNP-free RNA editing Identification Toolkit"
print ""
print " http://sprint.tianlab.cn/SPRINT/"
print ""
print " Please contact <EMAIL> when questions arise."
print ""
print "##############################################################################################"
def help_doc():
print ""
print " Attention:"
print ""
print " Before using 'sprint main', please use 'sprint prepare' to build mapping index."
print ""
print " Usage:"
print ""
print " sprint main [options] reference_genome(.fa) output_path bwa_path samtools_path"
print ""
print " options:"
print " -1 read1(.fq) # Required !!!"
print " -2 read2(.fq) # Optional"
print " -rp repeat_file # Optional, you can download it from http://sprint.software/SPRINT/dbrep/"
print " -ss INT # when input is strand-specific sequencing data, please clarify the direction of read1. [0 for antisense; 1 for sense] (default is 0)"
#print " -b INT # the format of read file [0: fq, 1: bam] (default is 0)"
print " -c INT # Remove the fist INT bp of each read (default is 0)"
print " -p INT # Mapping CPU (default is 1)"
print " -cd INT # The distance cutoff of SNV duplets (default is 200)"
print " -csad1 INT # Regular - [-rp is required] cluster size - Alu - AD >=1 (default is 3)"
print " -csad2 INT # Regular - [-rp is required] cluster size - Alu - AD >=2 (default is 2)"
print " -csnar INT # Regular - [-rp is required] cluster size - nonAlu Repeat - AD >=1 (default is 5)"
print " -csnr INT # Regular - [-rp is required] cluster size - nonRepeat - AD >=1 (default is 7)"
print " -csrg INT # Regular - [without -rp] cluster size - AD >=1 (default is 5)"
print " -csahp INT # Hyper - [-rp is required] cluster size - Alu - AD >=1 (default is 5)"
print " -csnarhp INT # Hyper - [-rp is required] cluster size - nonAlu Repeat - AD >=1 (default is 5)"
print " -csnrhp INT # Hyper - [-rp is required] cluster size - nonRepeat - AD >=1 (default is 5)"
print " -cshp INT # Hyper - [without -rp] cluster size - AD >=1 (default is 5)"
print ""
print " Example:"
print ""
print " sprint main -rp hg19_repeat.txt -c 6 -p 6 -1 read1.fq -2 read2.fq hg19.fa output ./bwa-0.7.12/bwa ./samtools-1.2/samtools"
print ""
print " Notes: Default protocol of strand-specific RNA-seq is dUTP (read1: '-'; read2: '+')"
print ""
#print sys.argv[0]
sys.exit(0)
if len(sys.argv)<2:
#print sys.argv[0]
help_doc()
read_format=0
cutbp=0
cluster_distance=200
cluster_size_alu_ad1 = 3
cluster_size_alu_ad2 = 2
cluster_size_nalurp = 5
cluster_size_nrp = 7
cluster_size_rg = 5
cluster_size_hp = 5
cluster_size_alu_hp = 5
cluster_size_nalurp_hp = 5
cluster_size_nrp_hp = 5
strand_specify=0
mapcpu = 1
var_limit=20
poly_limit=10
rm_multi=0
paired_end=False
repeat=False
options=[]
read2=''
read1=''
#print sys.argv
i=1
while i< len(sys.argv):
#if sys.argv[i]=='-b':
# try:
# read_format=int(sys.argv[i+1])
# options.append(i)
# options.append(i+1)
# except Exception, e:
# print 'options error!'
# help_doc()
if sys.argv[i]=='-1':
try:
read1=sys.argv[i+1]
options.append(i)
options.append(i+1)
except Exception, e:
print 'options error!'
help_doc()
elif sys.argv[i]=='-2':
paired_end=True
try:
read2=sys.argv[i+1]
options.append(i)
options.append(i+1)
except Exception, e:
print 'options error!'
help_doc()
exit()
elif sys.argv[i]=='-rp':
try:
repeat=sys.argv[i+1]
options.append(i)
options.append(i+1)
except Exception, e:
print 'options error!'
help_doc()
exit()
elif sys.argv[i]=='-ss':
try:
strand_specify=int(sys.argv[i+1])
options.append(i)
options.append(i+1)
except Exception, e:
print 'options error!'
help_doc()
exit()
elif sys.argv[i]=='-c':
try:
cutbp=int(sys.argv[i+1])
options.append(i)
options.append(i+1)
except Exception, e:
print 'options error!'
help_doc()
exit()
elif sys.argv[i]=='-p':
try:
mapcpu=int(sys.argv[i+1])
options.append(i)
options.append(i+1)
except Exception, e:
print 'options error!'
help_doc()
exit()
elif sys.argv[i]=='-cd':
try:
cluster_distance=int(sys.argv[i+1])
options.append(i)
options.append(i+1)
except Exception, e:
print 'options error!'
help_doc()
exit()
elif sys.argv[i]=='-csad1':
try:
cluster_size_alu_ad1=int(sys.argv[i+1])
options.append(i)
options.append(i+1)
except Exception, e:
print 'options error!'
help_doc()
exit()
elif sys.argv[i]=='-csad2':
try:
cluster_size_alu_ad2=int(sys.argv[i+1])
options.append(i)
options.append(i+1)
except Exception, e:
print 'options error!'
help_doc()
exit()
elif sys.argv[i]=='-csnar':
try:
cluster_size_nalurp=int(sys.argv[i+1])
options.append(i)
options.append(i+1)
except Exception, e:
print 'options error!'
help_doc()
exit()
elif sys.argv[i]=='-csnr':
try:
cluster_size_nrp=int(sys.argv[i+1])
options.append(i)
options.append(i+1)
except Exception, e:
print 'options error!'
help_doc()
exit()
elif sys.argv[i]=='-csrg':
try:
cluster_size_rg=int(sys.argv[i+1])
options.append(i)
options.append(i+1)
except Exception, e:
print 'options error!'
help_doc()
exit()
elif sys.argv[i]=='-cshp':
try:
cluster_size_hp=int(sys.argv[i+1])
options.append(i)
options.append(i+1)
except Exception, e:
print 'options error!'
help_doc()
exit()
elif sys.argv[i]=='-csahp':
try:
cluster_size_alu_hp=int(sys.argv[i+1])
options.append(i)
options.append(i+1)
except Exception, e:
print 'options error!'
help_doc()
exit()
elif sys.argv[i]=='-csnarhp':
try:
cluster_size_nalurp_hp=int(sys.argv[i+1])
options.append(i)
options.append(i+1)
except Exception, e:
print 'options error!'
help_doc()
exit()
elif sys.argv[i]=='-csnrhp':
try:
cluster_size_nrp_hp=int(sys.argv[i+1])
options.append(i)
options.append(i+1)
except Exception, e:
print 'options error!'
help_doc()
exit()
i += 1
all_argv=[]
i=1
while i< len(sys.argv):
if i not in options:
all_argv.append(i)
i=i+1
if len(all_argv)!=4 or read1=='':
help_doc()
exit()
refgenome=sys.argv[all_argv[0]]
output=sys.argv[all_argv[1]]+'/'
tmp=output+'/tmp/'
bwa=sys.argv[all_argv[2]]
samtools=sys.argv[all_argv[3]]
if os.path.exists(output)==False:
os.mkdir(output)
if os.path.exists(tmp)==False:
os.mkdir(tmp)
frc=open(tmp+'PARAMETER.txt','w')
frc.write(sys.argv[0])
for one in sys.argv[1:]:
frc.write(' '+one)
frc.write('\n')
frc.close()
def fq2sam(TAG,paired_end,read1,read2,tmp,refgenome,bwa,samtools,mapcpu, read_format):
if paired_end==True:
mapcpu=max([int(int(mapcpu)/2.0),1])
ori_tmp=tmp
tmp=tmp+'/'+TAG+'/'
if os.path.exists(tmp)==False:
os.mkdir(tmp)
step1_1=subprocess.Popen(bwa+' aln -t '+str(mapcpu)+' '+refgenome+' '+read1+' > '+tmp+'read1.sai',shell=True)
if paired_end==True:
step1_2=subprocess.Popen(bwa+' aln -t '+str(mapcpu)+' '+refgenome+' '+read2+' > '+tmp+'read2.sai',shell=True)
step1_1.wait()
if paired_end==True:
step1_2.wait()
step1_3=subprocess.Popen(bwa+' samse -n4 '+refgenome+' '+tmp+'read1.sai '+read1+' > '+tmp+'name_read1.sam',shell=True)
if paired_end==True:
step1_4=subprocess.Popen(bwa+' samse -n4 '+refgenome+' '+tmp+'read2.sai '+read2+' > '+tmp+'name_read2.sam',shell=True)
step1_3.wait()
if paired_end==True:
step1_4.wait()
if os.path.exists(tmp+'name_read1.sam'):
if os.path.exists(tmp+'read1.sai'):
os.remove(tmp+'read1.sai')
if os.path.exists(ori_tmp+'cut_read1.fastq'):
os.remove(ori_tmp+'cut_read1.fastq')
if os.path.exists(tmp+'name_read2.sam'):
if os.path.exists(tmp+'read2.sai'):
os.remove(tmp+'read2.sai')
if os.path.exists(ori_tmp+'cut_read2.fastq'):
os.remove(ori_tmp+'cut_read2.fastq')
step1_7=subprocess.Popen(samtools+' view -bS '+tmp+'name_read1.sam >'+tmp+'name_read1.bam',shell=True)
if paired_end==True:
step1_8=subprocess.Popen(samtools+' view -bS '+tmp+'name_read2.sam >'+tmp+'name_read2.bam',shell=True)
step1_7.wait()
if paired_end==True:
step1_8.wait()
if paired_end==True:
step1_9=subprocess.Popen(samtools+' sort '+tmp+'name_read1.bam '+tmp+'name_read1_sorted',shell=True)
step1_10=subprocess.Popen(samtools+' sort '+tmp+'name_read2.bam '+tmp+'name_read2_sorted',shell=True)
step1_9.wait()
step1_10.wait()
step1_11=subprocess.Popen(samtools+' merge -f '+tmp+'all.bam '+tmp+'name_read1_sorted.bam '+tmp+'name_read2_sorted.bam',shell=True)
step1_11.wait()
if os.path.exists(tmp+'all.bam'):
if os.path.exists(tmp+'name_read1.sam'):
os.remove(tmp+'name_read1.sam')
if os.path.exists(tmp+'name_read1.bam'):
os.remove(tmp+'name_read1.bam')
if os.path.exists(tmp+'name_read1_sorted.bam'):
os.remove(tmp+'name_read1_sorted.bam')
if os.path.exists(tmp+'name_read2.sam'):
os.remove(tmp+'name_read2.sam')
if os.path.exists(tmp+'name_read2.bam'):
os.remove(tmp+'name_read2.bam')
if os.path.exists(tmp+'name_read2_sorted.bam'):
os.remove(tmp+'name_read2_sorted.bam')
else:
step1_9=subprocess.Popen(samtools+' sort '+tmp+'name_read1.bam '+tmp+'all',shell=True)
step1_9.wait()
if os.path.exists(tmp+'all.bam'):
if os.path.exists(tmp+'name_read1.sam'):
os.remove(tmp+'name_read1.sam')
if os.path.exists(tmp+'name_read1.bam'):
os.remove(tmp+'name_read1.bam')
step2_2=subprocess.Popen(samtools+' view -h -o '+tmp+'all.sam '+tmp+'all.bam',shell=True)
step2_2.wait()
subprocess.Popen('cp '+tmp+'./all.sam '+ori_tmp+'/'+TAG+'_all.sam',shell=True).wait()
if os.path.exists(tmp+'all.sam'):
os.remove(tmp+'all.sam')
#try:
if 1==1:
print 'preprocessing...'
if read_format !=0:
subprocess.Popen(samtools+' view -h -o '+tmp+'read1.sam '+read1,shell=True).wait()
sprint.sam2fq(tmp+'read1.sam', tmp+'read1.fq')
read1=tmp+'read1.fq'
sprint.cut(read1,tmp+'cut_read1.fastq',cutbp,'read1')
if paired_end==True:
subprocess.Popen(samtools+' view -h -o '+tmp+'read2.sam '+read2,shell=True).wait()
sprint.sam2fq(tmp+'read2.sam', tmp+'read2.fq')
read2=tmp+'read2.fq'
sprint.cut(read2,tmp+'cut_read2.fastq',cutbp,'read2')
else:
if strand_specify==0:
sprint.cut(read1,tmp+'cut_read1.fastq',cutbp,'read1')
if paired_end==True:
sprint.cut(read2,tmp+'cut_read2.fastq',cutbp,'read2')
else:
sprint.cut(read1,tmp+'cut_read1.fastq',cutbp,'read2')
if paired_end==True:
sprint.cut(read2,tmp+'cut_read2.fastq',cutbp,'read1')
sprint.get_baseq_cutoff(read1,tmp+'baseq.cutoff')
print 'mapping...'
TAG='genome'
fq2sam(TAG,paired_end,tmp+'cut_read1.fastq',tmp+'cut_read2.fastq',tmp,refgenome,bwa,samtools,mapcpu,read_format)
subprocess.Popen(samtools+' view -f4 '+tmp+'/'+TAG+'/all.bam > '+tmp+'/'+TAG+'_unmapped.sam',shell=True).wait()
sprint.umsam2fq(tmp+'/'+TAG+'_unmapped.sam',tmp+'/'+TAG+'_unmapped.fq')
if os.path.exists(refgenome+'.trans.fa'):
TAG='transcript'
fq2sam(TAG,False,tmp+'/genome_unmapped.fq',read2,tmp,refgenome+'.trans.fa',bwa,samtools,mapcpu,read_format)
subprocess.Popen(samtools+' view -f4 '+tmp+'/'+TAG+'/all.bam > '+tmp+'/'+TAG+'_unmapped.sam',shell=True).wait()
sprint.umsam2fq(tmp+'/'+TAG+'_unmapped.sam',tmp+'/regular_unmapped.fq')
sprint.maskfq(tmp+'/regular_unmapped.fq','A','G')
else:
sprint.umsam2fq(tmp+'/'+TAG+'_unmapped.sam',tmp+'/regular_unmapped.fq')
sprint.maskfq(tmp+'/regular_unmapped.fq','A','G')
TAG='genome_mskAG'
fq2sam(TAG,False,tmp+'/regular_unmapped_A_to_G.fq',read2,tmp,refgenome+'.mskAG.fa',bwa,samtools,mapcpu,read_format)
subprocess.Popen(samtools+' view -f4 '+tmp+'/'+TAG+'/all.bam > '+tmp+'/'+TAG+'_unmapped.sam',shell=True).wait()
sprint.umsam2fq(tmp+'/'+TAG+'_unmapped.sam',tmp+'/'+TAG+'_unmapped.fq')
TAG='genome_mskTC'
fq2sam(TAG,False,tmp+'/regular_unmapped_A_to_G.fq',read2,tmp,refgenome+'.mskTC.fa',bwa,samtools,mapcpu,read_format)
subprocess.Popen(samtools+' view -f4 '+tmp+'/'+TAG+'/all.bam > '+tmp+'/'+TAG+'_unmapped.sam',shell=True).wait()
sprint.umsam2fq(tmp+'/'+TAG+'_unmapped.sam',tmp+'/'+TAG+'_unmapped.fq')
if os.path.exists(refgenome+'.trans.fa'):
TAG='transcript_mskAG'
fq2sam(TAG,False,tmp+'/genome_mskAG_unmapped.fq',read2,tmp,refgenome+'.trans.fa.mskAG.fa',bwa,samtools,mapcpu,read_format)
TAG='transcript_mskTC'
fq2sam(TAG,False,tmp+'/genome_mskTC_unmapped.fq',read2,tmp,refgenome+'.trans.fa.mskTC.fa',bwa,samtools,mapcpu,read_format)
if os.path.exists(tmp+'genome_mskAG_unmapped.sam'):
if os.path.exists(tmp+'cut_read1.fastq'):
os.remove(tmp+'cut_read1.fastq')
if os.path.exists(tmp+'cut_read2.fastq'):
os.remove(tmp+'cut_read2.fastq')
if os.path.exists(tmp+'genome_mskAG_unmapped.fq'):
os.remove(tmp+'genome_mskAG_unmapped.fq')
if os.path.exists(tmp+'genome_mskAG_unmapped.sam'):
os.remove(tmp+'genome_mskAG_unmapped.sam')
if os.path.exists(tmp+'genome_mskTC_unmapped.fq'):
os.remove(tmp+'genome_mskTC_unmapped.fq')
if os.path.exists(tmp+'genome_mskTC_unmapped.sam'):
os.remove(tmp+'genome_mskTC_unmapped.sam')
if os.path.exists(tmp+'genome_unmapped.fq'):
os.remove(tmp+'genome_unmapped.fq')
if os.path.exists(tmp+'genome_unmapped.sam'):
os.remove(tmp+'genome_unmapped.sam')
if os.path.exists(tmp+'transcript_unmapped_A_to_G.fq'):
os.remove(tmp+'transcript_unmapped_A_to_G.fq')
if os.path.exists(tmp+'transcript_unmapped.fq'):
os.remove(tmp+'transcript_unmapped.fq')
if os.path.exists(tmp+'transcript_unmapped.sam'):
os.remove(tmp+'transcript_unmapped.sam')
if os.path.exists(tmp+'regular_unmapped.fq'):
os.remove(tmp+'regular_unmapped.fq')
if os.path.exists(tmp+'regular_unmapped_A_to_G.fq'):
os.remove(tmp+'regular_unmapped_A_to_G.fq')
if os.path.exists(refgenome+'.trans.fa'):
sprint.recover_sam(tmp+'transcript_mskAG_all.sam',tmp+'transcript_mskAG_all.sam.rcv', var_limit, poly_limit, rm_multi)
sprint.sam2zz(tmp+'transcript_mskAG_all.sam.rcv',refgenome+'.trans.fa',tmp+'transcript_mskAG_all.zz')
sprint.recover_sam(tmp+'transcript_mskTC_all.sam',tmp+'transcript_mskTC_all.sam.rcv', var_limit, poly_limit, rm_multi)
sprint.sam2zz(tmp+'transcript_mskTC_all.sam.rcv',refgenome+'.trans.fa',tmp+'transcript_mskTC_all.zz')
sprint.sam2zz(tmp+'transcript_all.sam',refgenome+'.trans.fa',tmp+'transcript_all.zz')
if os.path.exists(tmp+'transcript_mskAG_all.sam.rcv'):
os.remove(tmp+'transcript_mskAG_all.sam.rcv')
if os.path.exists(tmp+'transcript_mskAG_all.sam'):
os.remove(tmp+'transcript_mskAG_all.sam')
if os.path.exists(tmp+'transcript_mskTC_all.sam.rcv'):
os.remove(tmp+'transcript_mskTC_all.sam.rcv')
if os.path.exists(tmp+'transcript_mskTC_all.sam'):
os.remove(tmp+'transcript_mskTC_all.sam')
if os.path.exists(tmp+'transcript_all.sam'):
os.remove(tmp+'transcript_all.sam')
sprint.recover_sam(tmp+'genome_mskAG_all.sam',tmp+'genome_mskAG_all.sam.rcv', var_limit, poly_limit, rm_multi)
sprint.sam2zz(tmp+'genome_mskAG_all.sam.rcv',refgenome,tmp+'genome_mskAG_all.zz')
sprint.recover_sam(tmp+'genome_mskTC_all.sam',tmp+'genome_mskTC_all.sam.rcv', var_limit, poly_limit, rm_multi)
sprint.sam2zz(tmp+'genome_mskTC_all.sam.rcv',refgenome,tmp+'genome_mskTC_all.zz')
sprint.sam2zz(tmp+'genome_all.sam',refgenome,tmp+'genome_all.zz')
if os.path.exists(tmp+'genome_mskAG_all.sam.rcv'):
os.remove(tmp+'genome_mskAG_all.sam.rcv')
if os.path.exists(tmp+'genome_mskAG_all.sam'):
os.remove(tmp+'genome_mskAG_all.sam')
if os.path.exists(tmp+'genome_mskTC_all.sam.rcv'):
os.remove(tmp+'genome_mskTC_all.sam.rcv')
if os.path.exists(tmp+'genome_mskTC_all.sam'):
os.remove(tmp+'genome_mskTC_all.sam')
if os.path.exists(tmp+'genome_all.sam'):
os.remove(tmp+'genome_all.sam')
if os.path.exists(refgenome+'.trans.fa'):
sprint.dedup(tmp+'transcript_mskAG_all.zz',tmp+'transcript_mskAG_all.zz.dedup')
sprint.dedup(tmp+'transcript_mskTC_all.zz',tmp+'transcript_mskTC_all.zz.dedup')
sprint.dedup(tmp+'transcript_all.zz',tmp+'transcript_all.zz.dedup')
sprint.dedup(tmp+'genome_mskAG_all.zz',tmp+'genome_mskAG_all.zz.dedup')
sprint.dedup(tmp+'genome_mskTC_all.zz',tmp+'genome_mskTC_all.zz.dedup')
sprint.dedup(tmp+'genome_all.zz',tmp+'genome_all.zz.dedup')
if os.path.exists(tmp+'transcript_mskAG_all.zz'):
os.remove(tmp+'transcript_mskAG_all.zz')
if os.path.exists(tmp+'transcript_mskTC_all.zz'):
os.remove(tmp+'transcript_mskTC_all.zz')
if os.path.exists(tmp+'transcript_all.zz'):
os.remove(tmp+'transcript_all.zz')
if os.path.exists(tmp+'genome_mskAG_all.zz'):
os.remove(tmp+'genome_mskAG_all.zz')
if os.path.exists(tmp+'genome_mskTC_all.zz'):
os.remove(tmp+'genome_mskTC_all.zz')
if os.path.exists(tmp+'genome_all.zz'):
os.remove(tmp+'genome_all.zz')
print 'identifying SNVs...'
if os.path.exists(refgenome+'.trans.fa'):
sprint.mask_zz2snv(tmp+'transcript_mskAG_all.zz.dedup',tmp+'transcript_mskAG_all.zz.dedup.snv',tmp+'baseq.cutoff')
sprint.mask_zz2snv(tmp+'transcript_mskTC_all.zz.dedup',tmp+'transcript_mskTC_all.zz.dedup.snv',tmp+'baseq.cutoff')
sprint.mask_zz2snv(tmp+'transcript_all.zz.dedup',tmp+'transcript_all.zz.dedup.snv',tmp+'baseq.cutoff')
sprint.tzz2gzz(refgenome+'.trans.fa.loc', tmp+'transcript_mskAG_all.zz.dedup', tmp+'transcript_mskAG_all.zz.dedup.genome.zz')
sprint.tzz2gzz(refgenome+'.trans.fa.loc', tmp+'transcript_mskTC_all.zz.dedup', tmp+'transcript_mskTC_all.zz.dedup.genome.zz')
sprint.tzz2gzz(refgenome+'.trans.fa.loc', tmp+'transcript_all.zz.dedup', tmp+'transcript_all.zz.dedup.genome.zz')
sprint.mask_zz2snv(tmp+'genome_mskAG_all.zz.dedup',tmp+'genome_mskAG_all.zz.dedup.snv',tmp+'baseq.cutoff')
sprint.mask_zz2snv(tmp+'genome_mskTC_all.zz.dedup',tmp+'genome_mskTC_all.zz.dedup.snv',tmp+'baseq.cutoff')
sprint.mask_zz2snv(tmp+'genome_all.zz.dedup',tmp+'genome_all.zz.dedup.snv',tmp+'baseq.cutoff')
if os.path.exists(refgenome+'.trans.fa'):
subprocess.Popen('cat '+tmp+'/genome_mskAG_all.zz.dedup '+tmp+'/genome_mskTC_all.zz.dedup '+tmp+'/genome_all.zz.dedup '+tmp+'/transcript_mskAG_all.zz.dedup.genome.zz '+tmp+'/transcript_mskTC_all.zz.dedup.genome.zz '+tmp+'/transcript_all.zz.dedup.genome.zz '+' > '+tmp+'/all_combined.zz',shell=True).wait()
sprint.sort_zz(tmp+'/all_combined.zz', tmp+'/all_combined.zz.sorted')
else:
subprocess.Popen('cat '+tmp+'/genome_mskAG_all.zz.dedup '+tmp+'/genome_mskTC_all.zz.dedup '+tmp+'/genome_all.zz.dedup '+' > '+tmp+'/all_combined.zz',shell=True).wait()
sprint.sort_zz(tmp+'/all_combined.zz', tmp+'/all_combined.zz.sorted')
if os.path.exists(refgenome+'.trans.fa'):
sprint.transcript_locator(tmp+'transcript_mskAG_all.zz.dedup.snv',refgenome+'.trans.fa.loc', tmp+'transcript_mskAG_all.zz.dedup.snv.genome.snv')
sprint.transcript_locator(tmp+'transcript_mskTC_all.zz.dedup.snv',refgenome+'.trans.fa.loc', tmp+'transcript_mskTC_all.zz.dedup.snv.genome.snv')
sprint.transcript_locator(tmp+'transcript_all.zz.dedup.snv',refgenome+'.trans.fa.loc', tmp+'transcript_all.zz.dedup.snv.genome.snv')
sprint.transcript_sort(tmp+'transcript_all.zz.dedup.snv.genome.snv',tmp+'transcript_all.zz.dedup.snv.genome.snv.sort')
sprint.transcript_sort(tmp+'transcript_mskTC_all.zz.dedup.snv.genome.snv',tmp+'transcript_mskTC_all.zz.dedup.snv.genome.snv.sort')
sprint.transcript_sort(tmp+'transcript_mskAG_all.zz.dedup.snv.genome.snv',tmp+'transcript_mskAG_all.zz.dedup.snv.genome.snv.sort')
sprint.snv_or(tmp+'transcript_all.zz.dedup.snv.genome.snv.sort',tmp+'genome_all.zz.dedup.snv',tmp+'regular.snv')
sprint.snv_or(tmp+'transcript_mskTC_all.zz.dedup.snv.genome.snv.sort',tmp+'genome_mskTC_all.zz.dedup.snv', tmp+'hyper_mskTC.snv')
sprint.snv_or(tmp+'transcript_mskAG_all.zz.dedup.snv.genome.snv.sort',tmp+'genome_mskAG_all.zz.dedup.snv', tmp+'hyper_mskAG.snv')
else:
subprocess.Popen('cp '+tmp+'/genome_all.zz.dedup.snv '+tmp+'/regular.snv',shell=True).wait()
subprocess.Popen('cp '+tmp+'/genome_mskTC_all.zz.dedup.snv '+tmp+'/hyper_mskTC.snv',shell=True).wait()
subprocess.Popen('cp '+tmp+'/genome_mskAG_all.zz.dedup.snv '+tmp+'/hyper_mskAG.snv',shell=True).wait()
print 'identifying RESs...'
if repeat !=False:
sprint.annotate(tmp+'regular.snv',repeat,tmp+'regular.snv.anno')
sprint.seperate(tmp+'regular.snv.anno',tmp+'regular.snv.anno.alu',tmp+'regular.snv.anno.nalurp',tmp+'regular.snv.anno.nrp','Alu')
sprint.get_snv_with_ad(tmp+'regular.snv.anno.alu',tmp+'regular.snv.anno.alu.ad2',2)
sprint.snv_cluster(tmp+'regular.snv.anno.alu',tmp+'regular_alu.res.ad1', cluster_distance, cluster_size_alu_ad1)
sprint.snv_cluster(tmp+'regular.snv.anno.alu.ad2',tmp+'regular_alu.res.ad2', cluster_distance, cluster_size_alu_ad2)
sprint.bed_or(tmp+'regular_alu.res.ad1',tmp+'regular_alu.res.ad2',tmp+'regular_alu.res')
sprint.snv_cluster(tmp+'regular.snv.anno.nalurp',tmp+'regular_nalurp.res', cluster_distance, cluster_size_nalurp)
sprint.snv_cluster(tmp+'regular.snv.anno.nrp',tmp+'regular_nrp.res', cluster_distance, cluster_size_nrp)
sprint.combine_res(tmp+'regular_alu.res',tmp+'regular_nalurp.res',tmp+'regular_nrp.res',tmp+'regular_split.res')
cluster_size_regular_max=max([cluster_size_alu_ad1,cluster_size_alu_ad2,cluster_size_nalurp,cluster_size_nrp])
sprint.combine_res(tmp+'regular.snv.anno.alu',tmp+'regular.snv.anno.nalurp',tmp+'regular.snv.anno.nrp',tmp+'regular.snv.anno.rmsrp')
sprint.snv_cluster(tmp+'regular.snv.anno.rmsrp',tmp+'regular_overall.res', cluster_distance, cluster_size_regular_max)
sprint.res_or(tmp+'regular_split.res',tmp+'regular_overall.res',tmp+'regular.res')
sprint.annotate(tmp+'hyper_mskTC.snv',repeat,tmp+'hyper_mskTC.snv.anno')
sprint.seperate(tmp+'hyper_mskTC.snv.anno',tmp+'hyper_mskTC.snv.anno.alu',tmp+'hyper_mskTC.snv.anno.nalurp',tmp+'hyper_mskTC.snv.anno.nrp','Alu')
sprint.snv_cluster(tmp+'hyper_mskTC.snv.anno.alu',tmp+'hyper_mskTC_alu.res', cluster_distance, cluster_size_alu_hp)
sprint.snv_cluster(tmp+'hyper_mskTC.snv.anno.nalurp',tmp+'hyper_mskTC_nalurp.res', cluster_distance, cluster_size_nalurp_hp)
sprint.snv_cluster(tmp+'hyper_mskTC.snv.anno.nrp',tmp+'hyper_mskTC_nrp.res', cluster_distance, cluster_size_nrp_hp)
sprint.combine_res(tmp+'hyper_mskTC_alu.res',tmp+'hyper_mskTC_nalurp.res',tmp+'hyper_mskTC_nrp.res',tmp+'hyper_mskTC_split.res')
cluster_size_hyper_max=max([cluster_size_alu_hp,cluster_size_nalurp_hp,cluster_size_nrp_hp])
sprint.combine_res(tmp+'hyper_mskTC.snv.anno.alu',tmp+'hyper_mskTC.snv.anno.nalurp',tmp+'hyper_mskTC.snv.anno.nrp',tmp+'hyper_mskTC.snv.anno.rmsrp')
sprint.snv_cluster(tmp+'hyper_mskTC.snv.anno.rmsrp',tmp+'hyper_mskTC_overall.res', cluster_distance, cluster_size_hyper_max)
sprint.res_or(tmp+'hyper_mskTC_split.res',tmp+'hyper_mskTC_overall.res',tmp+'hyper_mskTC.res')
sprint.annotate(tmp+'hyper_mskAG.snv',repeat,tmp+'hyper_mskAG.snv.anno')
sprint.seperate(tmp+'hyper_mskAG.snv.anno',tmp+'hyper_mskAG.snv.anno.alu',tmp+'hyper_mskAG.snv.anno.nalurp',tmp+'hyper_mskAG.snv.anno.nrp','Alu')
sprint.snv_cluster(tmp+'hyper_mskAG.snv.anno.alu',tmp+'hyper_mskAG_alu.res', cluster_distance, cluster_size_alu_hp)
sprint.snv_cluster(tmp+'hyper_mskAG.snv.anno.nalurp',tmp+'hyper_mskAG_nalurp.res', cluster_distance, cluster_size_nalurp_hp)
sprint.snv_cluster(tmp+'hyper_mskAG.snv.anno.nrp',tmp+'hyper_mskAG_nrp.res', cluster_distance, cluster_size_nrp_hp)
sprint.combine_res(tmp+'hyper_mskAG_alu.res',tmp+'hyper_mskAG_nalurp.res',tmp+'hyper_mskAG_nrp.res',tmp+'hyper_mskAG_split.res')
cluster_size_hyper_max=max([cluster_size_alu_hp,cluster_size_nalurp_hp,cluster_size_nrp_hp])
sprint.combine_res(tmp+'hyper_mskAG.snv.anno.alu',tmp+'hyper_mskAG.snv.anno.nalurp',tmp+'hyper_mskAG.snv.anno.nrp',tmp+'hyper_mskAG.snv.anno.rmsrp')
sprint.snv_cluster(tmp+'hyper_mskAG.snv.anno.rmsrp',tmp+'hyper_mskAG_overall.res', cluster_distance, cluster_size_hyper_max)
sprint.res_or(tmp+'hyper_mskAG_split.res',tmp+'hyper_mskAG_overall.res',tmp+'hyper_mskAG.res')
sprint.snv_or(tmp+'hyper_mskTC.res',tmp+'hyper_mskAG.res',tmp+'hyper.res')
else:
sprint.snv_cluster(tmp+'regular.snv',tmp+'regular.res_tmp',cluster_distance,cluster_size_rg)
sprint.o2b(tmp+'regular.res_tmp',tmp+'regular.res')
sprint.snv_cluster(tmp+'hyper_mskTC.snv',tmp+'hyper_mskTC.res',cluster_distance,cluster_size_hp)
sprint.snv_cluster(tmp+'hyper_mskAG.snv',tmp+'hyper_mskAG.res',cluster_distance,cluster_size_hp)
sprint.snv_or(tmp+'hyper_mskTC.res',tmp+'hyper_mskAG.res',tmp+'hyper.res')
try:
subprocess.Popen('rm -rf '+tmp+'/*.anno.*',shell=True).wait()
except Exception, e:
pass
'''
if repeat !=False:
sprint.get_res(tmp+'regular_alu.res',tmp+'regular_nalurp.res',tmp+'regular_nrp.res', tmp+'hyper.res', tmp+'SPRINT_identified')
sprint.bed_sort(tmp+'SPRINT_identified_A_to_I_regular.res',tmp+'SPRINT_identified_A_to_I_regular.res_sort')
sprint.bed_sort(tmp+'SPRINT_identified_A_to_I_hyper.res',tmp+'SPRINT_identified_A_to_I_hyper.res_sort')
sprint.bed_sort(tmp+'SPRINT_identified_C_to_U.res',tmp+'SPRINT_identified_C_to_U.res_sort')
sprint.o2b(tmp+'SPRINT_identified_A_to_I_regular.res_sort',output+'SPRINT_identified_A_to_I_regular.res')
sprint.o2b(tmp+'SPRINT_identified_A_to_I_hyper.res_sort',output+'SPRINT_identified_A_to_I_hyper.res')
sprint.o2b(tmp+'SPRINT_identified_C_to_U.res_sort',output+'SPRINT_identified_C_to_U.res')
sprint.snv_or(tmp+'SPRINT_identified_A_to_I_hyper.res',tmp+'SPRINT_identified_A_to_I_regular.res',output+'SPRINT_identified_A_to_I_all.res')
'''
# subprocess.Popen('cp '+tmp+'/regular.res '+output+'/SPRINT_identified_regular.res',shell=True).wait()
sprint.get_depth( tmp+'/all_combined.zz.sorted' , tmp+'/regular.res', tmp+'/regular.res.depth')
subprocess.Popen('echo "#Chrom\tStart(0base)\tEnd(1base)\tType\tSupporting_reads\tStrand\tAD:DP" | cat - '+tmp +'/regular.res.depth > '+output+'/SPRINT_identified_regular.res',shell=True).wait()
# subprocess.Popen('cp '+tmp+'/hyper.res '+output+'/SPRINT_identified_hyper.res',shell=True).wait()
sprint.get_depth( tmp+'/all_combined.zz.sorted' , tmp+'/hyper.res', tmp+'/hyper.res.depth')
subprocess.Popen('echo "#Chrom\tStart(0base)\tEnd(1base)\tType\tSupporting_reads\tStrand\tAD:DP" | cat - '+tmp +'/hyper.res.depth > '+output+'/SPRINT_identified_hyper.res',shell=True).wait()
subprocess.Popen('cp '+tmp+'/PARAMETER.txt '+output+'/PARAMETER.txt',shell=True).wait()
#subprocess.Popen('grep "AG" '+tmp+'/hyper.res | grep "+" > '+tmp+'/hyper_AG+.res',shell=True).wait()
#subprocess.Popen('grep "TC" '+tmp+'/hyper.res | grep "-" > '+tmp+'/hyper_TC-.res',shell=True).wait()
#sprint.snv_or(tmp+'/hyper_AG+.res',tmp+'/hyper_TC-.res',tmp+'/hyper_AG.res')
#sprint.snv_or(tmp+'/regular.res',tmp+'/hyper_AG.res',tmp+'/all.res')
sprint.snv_or(tmp+'/regular.res',tmp+'/hyper.res',tmp+'/all.res')
sprint.get_depth( tmp+'/all_combined.zz.sorted' , tmp+'/all.res', tmp+'/all.res.depth')
subprocess.Popen('echo "#Chrom\tStart(0base)\tEnd(1base)\tType\tSupporting_reads\tStrand\tAD:DP" | cat - '+tmp +'/all.res.depth > '+output+'/SPRINT_identified_all.res',shell=True).wait()
print 'finished !'
sys.exit(0)
try:
pass
except Exception,e:
print ''
print 'ERROR!'
print ''
print e
print ''
help_doc()
#if __name__=='__main__':
# main()
|
430623 | import numpy as np
import torch
from torch.autograd import Variable
import matplotlib.pyplot as plt
from matplotlib.image import imread
#from dataset_loader import *
import skimage
#load parameters theta from file
def load_theta_npy(path, num_stages):
theta = np.load(path)
theta = np.reshape(theta, (num_stages, -1)).astype("float32")
return theta
#load parameters for the gamma transformation
#the parameters are particular for the given data, and taken from
#the MSR demosaicing dataset
def init_colortransformation_gamma():
gammaparams = np.load('gammaparams.npy').astype('float32')
colortrans_mtx = np.load('colortrans.npy').astype('float32')
colortrans_mtx = np.expand_dims(np.expand_dims(colortrans_mtx,0),0)
param_dict = {
'UINT8': 255.0,
'UINT16': 65535.0,
'corr_const': 15.0,
'gammaparams': gammaparams,
'colortrans_mtx': colortrans_mtx,
}
return param_dict
# compute the gamma function
# we fitted a function according to the given gamma mapping in the
# Microsoft demosaicing data set
def _f_gamma(img, param_dict):
params = param_dict['gammaparams']
UINT8 = param_dict['UINT8']
UINT16 = param_dict['UINT16']
return UINT8*(((1 + params[0]) * \
np.power(UINT16*(img/UINT8), 1.0/params[1]) - \
params[0] +
params[2]*(UINT16*(img/UINT8)))/UINT16)
# apply the color transformation matrix
def _f_color_t(img, param_dict):
return np.tensordot(param_dict['colortrans_mtx'], img, axes=([1,2],[0,1]))
# apply the black level correction constant
def _f_corr(img, param_dict):
return img - param_dict['UINT8'] * \
(param_dict['corr_const']/param_dict['UINT16'])
# wrapper for the conversion from linear to sRGB space with given parameters
def apply_colortransformation_gamma(img, param_dict):
#assert img.dtype == np.uint8
assert img.min() >= 0 and img.max() <= 255
img = _f_color_t(img, param_dict)
img = np.where(img > 0.0, _f_gamma(img, param_dict), img )
img = _f_corr(img, param_dict)
return img
'''
if __name__ == '__main__':
demosaic_dataset = MSRDemosaicDataset(root_dir='data/MSR-Demosaicing/Dataset_LINEAR_without_noise/bayer_panasonic/',
selection_file='data/MSR-Demosaicing/Dataset_LINEAR_without_noise/bayer_panasonic/validation.txt')
img = demosaic_dataset[0]['image_gt']
img = skimage.img_as_ubyte(img)
img = swapimdims_HW3_3HW(img)
img_linear = img.copy()
print(img.shape, img.min(), img.max())
srgb_params = init_colortransformation_gamma()
result_rgb = apply_colortransformation_gamma(np.expand_dims(img,0), srgb_params)
result_rgb = np.clip(result_rgb[0], 0, 255)
print(result_rgb.shape, result_rgb.min(), result_rgb.max())
fig = plt.figure()
plt.subplot(221)
plt.imshow(swapimdims_3HW_HW3(img_linear))
plt.subplot(222)
plt.imshow(swapimdims_3HW_HW3(result_rgb).astype('uint8'))
#plt.show()
#x = Variable(torch.Tensor(img))
#srgb = from_linear(x).data.numpy().astype('uint8')
#b = plt.imshow(srgb.astype('uint8'), interpolation=None)
plt.show()
'''
|
430669 | from typing import Any, Dict
from uuid import UUID
from eventsourcing.application import Application
from eventsourcing.examples.aggregate5.domainmodel import Dog
class DogSchool(Application):
is_snapshotting_enabled = True
def register_dog(self, name: str) -> UUID:
event = Dog.register(name)
self.save(event)
return event.originator_id
def add_trick(self, dog_id: UUID, trick: str) -> None:
dog = self.repository.get(dog_id, projector_func=Dog.projector)
event = dog.add_trick(trick)
self.save(event)
def get_dog(self, dog_id: UUID) -> Dict[str, Any]:
dog = self.repository.get(dog_id, projector_func=Dog.projector)
return {"name": dog.name, "tricks": dog.tricks}
|
430696 | import pymel.core as pm
from pulse.buildItems import BuildAction, BuildActionError
class DisplayLayerAction(BuildAction):
def validate(self):
if not len(self.name):
raise BuildActionError('name cannot be empty')
def run(self):
layer = pm.ls(self.name)
if len(layer) and isinstance(layer[0], pm.nt.DisplayLayer):
pm.editDisplayLayerMembers(layer[0], self.objects)
layer = layer[0]
else:
layer = pm.createDisplayLayer(self.objects, n=self.name)
layer.visibility.set(self.visible)
layer.displayType.set(self.displayType)
layer.shading.set(self.shading)
layer.texturing.set(self.texturing)
layer.playback.set(self.playback)
|
430732 | from selenium import webdriver
from selenium.webdriver import ActionChains
from os import path
#NOTE: this demo uses images under images subfolder to find by name.
# Be sure to configure AutoPyDriverServer to use that folder for images by name
driver = webdriver.Remote( command_executor='http://127.0.0.1:4723/wd/hub', desired_capabilities={'browserName':'AutoPy','imageRecognitionToleranceValue':0.0})
print "Desired Capabilities returned by server:\n"
print driver.desired_capabilities
print ""
# Example opening the test drag & drop webpage for Windows:
#driver.execute_script("start http://html5demos.com/drag")
# or
#driver.execute_script("start http://jqueryui.com/resources/demos/droppable/default.html")
# except that it hangs the script until you close calculator
# maybe different on Linux/Mac if you launch as background process but not sure. Try at your own risk.
# so, we assume the test webpage is open, in default state, & w/ focus so can start testing at this point
# AutoPy has no APIs for handling windows
# example for http://html5demos.com/drag
src = driver.find_element_by_name('drag_src_html5.png')
target = driver.find_element_by_name('drop_target_html5.png')
actions = ActionChains(driver)
actions.drag_and_drop(src,target).perform()
# or http://jqueryui.com/resources/demos/droppable/default.html
#src = driver.find_element_by_name('drag_src.png')
#target = driver.find_element_by_name('drop_target.png')
#actions = ActionChains(driver)
#actions.drag_and_drop(src,target).perform()
# result check for http://html5demos.com/drag
if len(driver.find_elements_by_name('drag_src_html5.png')) == 0: # as drag src disappeared into drop target
# or http://jqueryui.com/resources/demos/droppable/default.html
#if driver.find_element_by_name('dnd_result.png').is_displayed():
print 'Drag & drop succeeded.\n'
else:
screenshot = path.join(path.curdir,'failed_drag_and_drop.png')
driver.get_screenshot_as_file(screenshot)
print 'Drag & drop failed. See screenshot for actual result: %s.\n' % screenshot
# AutoPy may fail below due to problems trying to find the drag & drop result,
# may need to tweak AutoPy tolerance values. Also actual drag & drop operation
# may not be perfect in placement location each time, so actual result may not
# match the saved result image
driver.quit() |
430743 | import math
from .conllu_wrapper import parse_conllu, serialize_conllu, parse_odin, conllu_to_odin, parsed_tacred_json
from .converter import Convert, get_conversion_names as inner_get_conversion_names, init_conversions
from spacy.language import Language
from .spacy_wrapper import parse_spacy_sent, enhance_to_spacy_doc
def convert_bart_conllu(conllu_text, enhance_ud=True, enhanced_plus_plus=True, enhanced_extra=True, preserve_comments=False, conv_iterations=math.inf, remove_eud_info=False, remove_extra_info=False, remove_node_adding_conversions=False, remove_unc=False, query_mode=False, funcs_to_cancel=None, ud_version=1):
parsed, all_comments = parse_conllu(conllu_text)
con = Convert(parsed, enhance_ud, enhanced_plus_plus, enhanced_extra, conv_iterations, remove_eud_info, remove_extra_info, remove_node_adding_conversions, remove_unc, query_mode, funcs_to_cancel, ud_version)
converted, _ = con()
return serialize_conllu(converted, all_comments, remove_eud_info, remove_extra_info, preserve_comments)
def _convert_bart_odin_sent(doc, enhance_ud, enhanced_plus_plus, enhanced_extra, conv_iterations, remove_eud_info, remove_extra_info, remove_node_adding_conversions, remove_unc, query_mode, funcs_to_cancel, ud_version):
sents = parse_odin(doc)
con = Convert(sents, enhance_ud, enhanced_plus_plus, enhanced_extra, conv_iterations, remove_eud_info, remove_extra_info, remove_node_adding_conversions, remove_unc, query_mode, funcs_to_cancel, ud_version)
converted_sents, _ = con()
return conllu_to_odin(converted_sents, doc, remove_eud_info, remove_extra_info)
def convert_bart_odin(odin_json, enhance_ud=True, enhanced_plus_plus=True, enhanced_extra=True, conv_iterations=math.inf, remove_eud_info=False, remove_extra_info=False, remove_node_adding_conversions=False, remove_unc=False, query_mode=False, funcs_to_cancel=None, ud_version=1):
if "documents" in odin_json:
for doc_key, doc in odin_json["documents"].items():
odin_json["documents"][doc_key] = _convert_bart_odin_sent(doc, enhance_ud, enhanced_plus_plus, enhanced_extra, conv_iterations, remove_eud_info, remove_extra_info, remove_node_adding_conversions, remove_unc, query_mode, funcs_to_cancel, ud_version)
else:
odin_json = _convert_bart_odin_sent(odin_json, enhance_ud, enhanced_plus_plus, enhanced_extra, conv_iterations, remove_eud_info, remove_extra_info, remove_node_adding_conversions, remove_unc, query_mode, funcs_to_cancel, ud_version)
return odin_json
def convert_bart_tacred(tacred_json, enhance_ud=True, enhanced_plus_plus=True, enhanced_extra=True, conv_iterations=math.inf, remove_eud_info=False, remove_extra_info=False, remove_node_adding_conversions=False, remove_unc=False, query_mode=False, funcs_to_cancel=None, ud_version=1):
sents = parsed_tacred_json(tacred_json)
con = Convert(sents, enhance_ud, enhanced_plus_plus, enhanced_extra, conv_iterations, remove_eud_info, remove_extra_info, remove_node_adding_conversions, remove_unc, query_mode, funcs_to_cancel, ud_version)
converted_sents, _ = con()
return converted_sents
def convert_spacy_doc(doc, enhance_ud=True, enhanced_plus_plus=True, enhanced_extra=True, conv_iterations=math.inf, remove_eud_info=False, remove_extra_info=False, remove_node_adding_conversions=False, remove_unc=False, query_mode=False, funcs_to_cancel=None, ud_version=1, one_time_initialized_conversions=None):
parsed_doc = [parse_spacy_sent(sent) for sent in doc.sents]
con = Convert(parsed_doc, enhance_ud, enhanced_plus_plus, enhanced_extra, conv_iterations, remove_eud_info, remove_extra_info, remove_node_adding_conversions, remove_unc, query_mode, funcs_to_cancel, ud_version, one_time_initialized_conversions)
converted, convs_done = con()
enhance_to_spacy_doc(doc, converted, remove_eud_info, remove_extra_info)
return converted, convs_done
class Converter:
def __init__(self, enhance_ud=True, enhanced_plus_plus=True, enhanced_extra=True, conv_iterations=math.inf, remove_eud_info=False, remove_extra_info=False, remove_node_adding_conversions=False, remove_unc=False, query_mode=False, funcs_to_cancel=None, ud_version=1):
self.config = (enhance_ud, enhanced_plus_plus, enhanced_extra, conv_iterations, remove_eud_info, remove_extra_info, remove_node_adding_conversions, remove_unc, query_mode, funcs_to_cancel, ud_version)
# make conversions and (more importantly) constraint initialization, a one timer.
self.conversions = init_conversions(remove_node_adding_conversions, ud_version)
def __call__(self, doc):
converted_sents, convs_done = convert_spacy_doc(doc, *self.config, self.conversions)
self._converted_sents = converted_sents
self._convs_done = convs_done
return doc
def get_converted_sents(self):
return self._converted_sents
def get_max_convs(self):
return self._convs_done
def get_conversion_names():
return inner_get_conversion_names()
@Language.factory(
"pybart_spacy_pipe",
default_config={"enhance_ud": True, "enhanced_plus_plus": True, "enhanced_extra": True, "conv_iterations": math.inf, "remove_eud_info": False, "remove_extra_info": False, "remove_node_adding_conversions": False, "remove_unc": False, "query_mode": False, "funcs_to_cancel": None, "ud_version": 1},
)
def create_pybart_spacy_pipe(nlp, name, enhance_ud, enhanced_plus_plus, enhanced_extra, conv_iterations, remove_eud_info, remove_extra_info, remove_node_adding_conversions, remove_unc, query_mode, funcs_to_cancel, ud_version):
return Converter(enhance_ud, enhanced_plus_plus, enhanced_extra, conv_iterations, remove_eud_info, remove_extra_info, remove_node_adding_conversions, remove_unc, query_mode, funcs_to_cancel, ud_version)
|
430754 | from abc import abstractmethod
import numpy as np
import pandas as pd
from mizarlabs.static import EVENT_END_TIME
from mizarlabs.transformers.utils import check_missing_columns
from mizarlabs.transformers.utils import convert_to_timestamp
from numba import jit
from numba import prange
from scipy.stats import norm
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
BET_SIZE = "bet_size"
PROBABILITY = "prob"
PREDICTION = "pred"
SIDE = "side"
class BetSizingBase(BaseEstimator, TransformerMixin):
"""
Base class for bet sizing transformers
"""
def transform(self, X: pd.DataFrame) -> pd.Series:
bet_sizing_signal = self._transform(X)
assert (bet_sizing_signal >= -1).all(), "The bet size signal should be >= -1"
assert (bet_sizing_signal <= 1).all(), "The bet size signal should be <= 1"
return bet_sizing_signal
@abstractmethod
def _transform(self, X: pd.DataFrame) -> pd.Series:
pass
class BetSizingFromProbabilities(BetSizingBase):
"""
Calculate the bet size using the predicted probability.
:param num_classes: Number of labeled classes
:type num_classes: int
:param average_active: Whether we need to apply the average
active to the bet sizing signal
:type average_active: bool, optional
:param meta_labeling: Whether the bet sizing is calculated from a
metalabeling signal
:type meta_labeling: bool, optional
:param discretise: Whether the output needs to be discretised
:type discretise: bool, optional
:param step_size: The step size of the discretisation
:type step_size: int, optional
:param probability_column_name: The column name of the probabilities
:type probability_column_name: str, optional
:param prediction_column_name: The column name of the predictions
:type prediction_column_name: str, optional
:param side_column_name: The column name of the side of the 'simpler'
metalabeling model
:type side_column_name: str, optional
:param event_end_time_column_name: The column name of the event end time
:rtype event_end_time_column_name: str, optional
"""
def __init__(
self,
num_classes: int,
average_active: bool = False,
meta_labeling: bool = False,
discretise: bool = False,
step_size: float = None,
probability_column_name: str = PROBABILITY,
prediction_column_name: str = PREDICTION,
side_column_name: str = SIDE,
event_end_time_column_name: str = EVENT_END_TIME,
bet_size_column_name: str = BET_SIZE,
):
self._side_column_name = side_column_name
self._metalabeling = meta_labeling
self._average_active = average_active
self._step_size = step_size
self._num_classes = num_classes
self._probability_column_name = probability_column_name
self._prediction_column_name = prediction_column_name
self._event_end_time_column_name = event_end_time_column_name
self._discretise = discretise
self._bet_size_column_name = bet_size_column_name
if self._discretise:
assert self._discretise and self._step_size, (
"When discretise is activated, step size should be "
"set with value between 0 and 1"
)
assert (
0 < self._step_size < 1
), "The step size should be greater than zero and less than 1"
def _transform(self, X: pd.DataFrame) -> pd.Series:
check_missing_columns(
X, [self._probability_column_name, self._prediction_column_name]
)
# generate signals from multinomial classification (one-vs-rest, OvR)
test_statistic_z = (
X[self._probability_column_name] - 1.0 / self._num_classes
) / (
X[self._probability_column_name] * (1.0 - X[self._probability_column_name])
) ** 0.5
# signal=side*size
bet_sizing_signal = X[self._prediction_column_name] * (
2 * norm.cdf(test_statistic_z) - 1
)
if self._metalabeling:
assert set(X[self._side_column_name].unique()).issubset(
{1, -1, 0}
), "The side should be 1, -1 or 0"
check_missing_columns(X, [self._side_column_name])
bet_sizing_signal *= X.loc[bet_sizing_signal.index, self._side_column_name]
if self._average_active:
bet_sizing_signal_with_barrier = bet_sizing_signal.to_frame(BET_SIZE).join(
X[[self._event_end_time_column_name]], how="left"
)
bet_sizing_signal = avg_active_signals(
bet_sizing_signal_with_barrier,
self._event_end_time_column_name,
self._bet_size_column_name,
)
if self._discretise:
bet_sizing_signal = discretise_signal(bet_sizing_signal, self._step_size)
return bet_sizing_signal.abs()
def avg_active_signals(
signals: pd.DataFrame,
event_end_time_column_name: str = EVENT_END_TIME,
bet_size_column_name: str = BET_SIZE,
) -> pd.Series:
"""
Average the bet sizes of all concurrently not closed positions
(e.i. no barrier has been hit yet)
:param signals: Signal from which the active average is calculated
:rtype signals: pd.DataFrame
:param event_end_time_column_name: the name of the event end time
:type event_end_time_column_name: str, optional
:param bet_size_column_name: the name of the bet size column
:type bet_size_column_name: str, optional
:return: The active average signal
:rtype: pd.DataFrame
"""
# compute the average bet size among those active
# time points were bet size change (either one starts or one ends)
active_bet_size_time_indices = set(
signals[event_end_time_column_name].dropna().values
)
active_bet_size_time_indices = active_bet_size_time_indices.union(
signals.index.values
)
active_bet_size_time_indices = list(active_bet_size_time_indices)
active_bet_size_time_indices.sort()
active_bet_size_time_indices = np.array(active_bet_size_time_indices)
avg_active_bet_size_list = _get_avg_active_signals(
signals.loc[:, bet_size_column_name].values,
convert_to_timestamp(active_bet_size_time_indices),
convert_to_timestamp(signals.index.values),
convert_to_timestamp(signals[event_end_time_column_name].values),
)
avg_active_bet_size = pd.Series(
avg_active_bet_size_list, index=active_bet_size_time_indices, dtype=float
)
return avg_active_bet_size
@jit(parallel=True, nopython=True)
def _get_avg_active_signals(
bet_size_signal: np.ndarray,
active_bet_size_time_indices: np.ndarray,
signal_timestamp_index: np.ndarray,
expiration_barrier_timestamp: np.ndarray,
) -> np.ndarray:
"""
Calculate the average active bet signal from the overlapping bets
:param bet_size_signal: The bet size signal not averaged by active signals
:type bet_size_signal: np.ndarray
:param active_bet_size_time_indices: The timestamps when at least one
signal is active
:type active_bet_size_time_indices: np.ndarray
:param signal_timestamp_index: The timestamps of the signal bet signal
:type signal_timestamp_index: np.ndarray
:param expiration_barrier_timestamp: The timestamps of the expiration
barriers
:type expiration_barrier_timestamp: np.ndarray
:return: The average active bet size
:rtype: np.ndarray
"""
# init the average active bet sizes array with zeros
avg_active_bet_size = np.zeros_like(active_bet_size_time_indices, dtype=np.float64)
for i in prange(len(active_bet_size_time_indices)):
active_bet_size_time = active_bet_size_time_indices[i]
# mask that finds where the bet signals are overlapping
mask = np.less_equal(
signal_timestamp_index, active_bet_size_time
) * np.logical_or(
np.less(active_bet_size_time, expiration_barrier_timestamp),
np.less(expiration_barrier_timestamp, 0),
)
# select the active bet sizes signals and calculates the mean
active_bets_timestamps = signal_timestamp_index[mask]
if len(active_bets_timestamps) > 0:
avg_active_bet_size[i] = np.mean(bet_size_signal[mask])
return avg_active_bet_size
def discretise_signal(signal: pd.Series, step_size: float) -> pd.Series:
"""
Discretise the bet size signal based on the step size given.
:param signal: Signal to discretise
:type signal: pd.Series
:param step_size: the step size to use for the discretisation
:type step_size: float
:return: Discretised signal
:rtype: pd.Series
"""
assert 0 < step_size < 1, "The step size should be between 0 and 1"
discretised_signal = ((signal / step_size).round() * step_size).round(3)
# Capping the discretised signal to 1
discretised_signal[discretised_signal > 1] = 1
# Flooring the discretised signal to 0
discretised_signal[discretised_signal < -1] = -1
return discretised_signal
|
430784 | import ptypes
from ptypes import ptype,parray,pstruct,pint,pstr,dyn,pbinary
ptypes.setbyteorder(ptypes.config.byteorder.littleendian)
### header markers
class ElfXX_File(ptype.boundary): pass
class ElfXX_Header(ptype.boundary): pass
class ElfXX_Ehdr(ElfXX_Header): pass
### base
class uchar(pint.uint8_t): pass
class ElfXX_BaseOff(ptype.rpointer_t):
'''Always an offset relative to base of file.'''
_object_ = ptype.undefined
def _baseobject_(self):
return self.getparent(ElfXX_File)
class ElfXX_BaseAddr(ptype.opointer_t):
'''Always a virtual address relative to base of file.'''
@classmethod
def typename(cls):
return cls.__name__
def classname(self):
try: object = self._object_() if callable(self._object_) else self._object_
except Exception: object = self._object_
try: type = object.classname() if ptypes.isinstance(object) else object.typename()
except Exception: pass
else: return "{:s}<{:s}>".format(self.typename(), type)
type = object.__name__
return "{:s}<{:s}>".format(self.typename(), type)
def _calculate_(self, offset):
base = self.getparent(ElfXX_File)
return base.getoffset() + offset
class ElfXX_Off(ElfXX_BaseAddr):
'''Always an offset that will be converted to an address when its in memory.'''
_object_ = ptype.undefined
def _calculate_(self, offset):
base = self.getparent(ElfXX_File)
try:
if isinstance(self.source, ptypes.provider.memorybase):
p = self.getparent(ElfXX_Ehdr)
phentries = p['e_phoff'].d.li
ph = phentries.byoffset(offset)
return base.getoffset() + ph.getaddressbyoffset(offset)
except ptypes.error.ItemNotFoundError:
pass
return base.getoffset() + offset
class ElfXX_VAddr(ElfXX_BaseAddr):
'''Always a virtual address that will be converted to an offset when its a file.'''
_object_ = ptype.undefined
def _calculate_(self, address):
base = self.getparent(ElfXX_File)
try:
if isinstance(self.source, ptypes.provider.fileobj):
p = self.getparent(ElfXX_Ehdr)
phentries = p['e_phoff'].d.li
ph = phentries.byaddress(address)
return base.getoffset() + ph.getoffsetbyaddress(address)
except ptypes.error.ItemNotFoundError:
pass
return base.getoffset() + address
class ElfXX_Addr(ptype.pointer_t):
'''Just a regular address.'''
_object_ = ptype.undefined
class ULEB128(pbinary.terminatedarray):
class septet(pbinary.struct):
_fields_ = [
(1, 'more'),
(7, 'value'),
]
_object_ = septet
def isTerminator(self, value):
return not bool(value['more'])
def int(self): return self.get()
def get(self):
res = 0
for n in reversed(self):
res = (res << 7) | n['value']
return res
def set(self, value):
result, mask = [], pow(2, 7) - 1
while value > 0:
item = self.new(self.septet).set((1, value & mask))
result.append(item)
value //= pow(2, 7)
result[-1].set(more=0)
self.value[:] = result[:]
return self
def summary(self):
res = self.int()
return "{:s} : {:d} : ({:#x}, {:d})".format(self.__element__(), res, res, 7*len(self))
### elf32
class Elf32_BaseOff(ElfXX_BaseOff):
_value_ = pint.uint32_t
class Elf32_BaseAddr(ElfXX_BaseAddr):
_value_ = pint.uint32_t
class Elf32_Off(ElfXX_Off):
_value_ = pint.uint32_t
class Elf32_Addr(ElfXX_Addr):
_value_ = pint.uint32_t
class Elf32_VAddr(ElfXX_VAddr):
_value_ = pint.uint32_t
class Elf32_Half(pint.uint16_t): pass
class Elf32_Sword(pint.int32_t): pass
class Elf32_Word(pint.uint32_t): pass
### elf64
class Elf64_BaseOff(ElfXX_BaseOff):
_value_ = pint.uint64_t
class Elf64_BaseAddr(ElfXX_BaseAddr):
_value_ = pint.uint64_t
class Elf64_Off(ElfXX_Off):
_value_ = pint.uint64_t
class Elf64_Addr(ElfXX_Addr):
_value_ = pint.uint64_t
class Elf64_VAddr(ElfXX_VAddr):
_value_ = pint.uint64_t
class Elf64_Half(Elf32_Half): pass
class Elf64_Word(Elf32_Word): pass
class Elf64_Sword(Elf32_Sword): pass
class Elf64_Xword(pint.uint64_t): pass
class Elf64_Sxword(pint.int64_t): pass
class padstring(pstr.string):
def set(self, string):
res, bs = "{!s}".format(string), self.blocksize()
return super(padstring, self).set("{:{:d}s}".format(padding, bs))
def str(self):
res = super(padstring, self).str()
return res.rstrip()
class stringinteger(padstring):
def set(self, integer):
res, bs = "{!s}".format(integer), self.blocksize()
return super(padstring, self).set("{:<{:d}s}".format(res, bs))
def int(self):
res = super(padstring, self).str()
return int(res.rstrip())
class octalinteger(padstring):
def set(self, integer):
res, bs = "{!s}".format(integer), self.blocksize()
return super(padstring, self).set("{: {:d}s}".format(res, bs))
def int(self):
res = super(padstring, self).str()
return int(res.rstrip(), 8)
|
430788 | import pandas as pd
import numpy as np
import torch
import matplotlib.pyplot as plt
import os
import sys
sys.path.append(os.path.join("..", ".."))
from torchid.ssfitter import NeuralStateSpaceSimulator
from torchid.ssmodels import CartPoleStateSpaceModel
from torchid.util import get_sequential_batch_idx
if __name__ == '__main__':
seq_len = 512 # simulation sequence length - we evaluate performance in terms of seq_len-step simulation error
dataset_filename = "pendulum_data_oloop_val.csv"
model_filename = "model_SS_64step_noise.pkl"
# Column names in the dataset
COL_T = ['time']
COL_Y = ['p_meas', 'theta_meas']
COL_X = ['p', 'v', 'theta', 'omega']
COL_U = ['u']
# Load dataset
df_X = pd.read_csv(os.path.join("data", dataset_filename), sep=",")
time_data = np.array(df_X[COL_T], dtype=np.float32)
y = np.array(df_X[COL_Y], dtype=np.float32)
x = np.array(df_X[COL_X], dtype=np.float32)
u = np.array(df_X[COL_U], dtype=np.float32)
x0_torch = torch.from_numpy(x[0, :])
N = np.shape(y)[0]
Ts = time_data[1] - time_data[0]
# Load model and parameters
ss_model = CartPoleStateSpaceModel(Ts, init_small=True)
nn_solution = NeuralStateSpaceSimulator(ss_model)
nn_solution.ss_model.load_state_dict(torch.load(os.path.join("models", model_filename)))
# Build validation data
t_val_start = 0
t_val_end = time_data[-1]
idx_val_start = int(t_val_start//Ts)
idx_val_end = int(t_val_end//Ts)
u_val = u[idx_val_start:idx_val_end]
x_val = x[idx_val_start:idx_val_end]
y_val = y[idx_val_start:idx_val_end]
time_val = time_data[idx_val_start:idx_val_end]
# Predict batch data
batch_start, batch_idx = get_sequential_batch_idx(y_val.shape[0], seq_len)
batch_time = torch.tensor(time_val[batch_idx])
batch_x0 = torch.tensor(x_val[batch_start])
batch_u = torch.tensor(u_val[batch_idx])
batch_x = torch.tensor(x_val[batch_idx])
batch_x_pred = nn_solution.f_sim_multistep(batch_x0, batch_u)
# Plot data
batch_x_pred_np = np.array(batch_x_pred.detach())
batch_time_np = np.array(batch_time.detach()).squeeze()
fig, ax = plt.subplots(3, 1, sharex=True)
ax[0].plot(time_val, x_val[:, 0], 'b')
ax[0].plot(batch_time_np.T, batch_x_pred_np[:, :, 0].T, 'r')
ax[0].grid(True)
ax[1].plot(time_val, x_val[:,2], 'b')
ax[1].plot(batch_time_np.T, batch_x_pred_np[:, :, 2].T, 'r')
ax[1].grid(True)
ax[2].plot(time_val, u_val, label='Input')
ax[2].grid(True)
|
430796 | import os
import nltk
import pandas as pd
from nltk.stem.lancaster import LancasterStemmer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.model_selection import train_test_split as tts
from sklearn.preprocessing import LabelEncoder as LE
from sklearn.svm import SVC
from vectorizers.factory import get_vectoriser
class FaqEngine:
def __init__(self, faqslist, type='tfidf'):
self.faqslist = faqslist
self.stemmer = LancasterStemmer()
self.le = LE()
self.classifier = None
self.build_model(type)
def cleanup(self, sentence):
word_tok = nltk.word_tokenize(sentence)
stemmed_words = [self.stemmer.stem(w) for w in word_tok]
return ' '.join(stemmed_words)
def build_model(self, type):
self.vectorizer = get_vectoriser(type) # TfidfVectorizer(min_df=1, stop_words='english')
dataframeslist = [pd.read_csv(csvfile).dropna() for csvfile in self.faqslist]
self.data = pd.concat(dataframeslist, ignore_index=True)
self.questions = self.data['Question'].values
questions_cleaned = []
for question in self.questions:
questions_cleaned.append(self.cleanup(question))
X = self.vectorizer.vectorize(questions_cleaned)
# Under following cases, we dont do classification
# 'Class' column abscent
# 'Class' column has same values
if 'Class' not in list(self.data.columns):
return
y = self.data['Class'].values.tolist()
if len(set(y)) < 2: # 0 or 1
return
y = self.le.fit_transform(y)
trainx, testx, trainy, testy = tts(X, y, test_size=.25, random_state=42)
self.classifier = SVC(kernel='linear')
self.classifier.fit(trainx, trainy)
# print("SVC:", self.model.score(testx, testy))
def query(self, usr):
# print("User typed : " + usr)
try:
cleaned_usr = self.cleanup(usr)
t_usr_array = self.vectorizer.query(cleaned_usr)
if self.classifier:
prediction = self.classifier.predict(t_usr_array)[0]
class_ = self.le.inverse_transform([prediction])[0]
# print("Class " + class_)
questionset = self.data[self.data['Class'] == class_]
else:
questionset = self.data
# threshold = 0.7
cos_sims = []
for question in questionset['Question']:
cleaned_question = self.cleanup(question)
question_arr = self.vectorizer.query(cleaned_question)
sims = cosine_similarity(question_arr, t_usr_array)
# if sims > threshold:
cos_sims.append(sims)
# print("scores " + str(cos_sims))
if len(cos_sims) > 0:
ind = cos_sims.index(max(cos_sims))
# print(ind)
# print(questionset.index[ind])
return self.data['Answer'][questionset.index[ind]]
except Exception as e:
print(e)
return "Could not follow your question [" + usr + "], Try again"
if __name__ == "__main__":
base_path = os.path.join(os.path.dirname(os.path.abspath( __file__ )),"data")
faqslist = [os.path.join(base_path,"Greetings.csv"), os.path.join(base_path,"GST FAQs 2.csv")]
faqmodel = FaqEngine(faqslist, 'tfidf')
response = faqmodel.query("Hi")
print(response)
|
430872 | from oi import util
def test_qsplit():
tests = [
('one', ['one']),
(' one ', ['one']),
(' " one " ', ['one']),
('one two', ['one', 'two']),
('one two', ['one', 'two']),
('one "two three" four', ['one', 'two three', 'four']),
('1 2 "3 4" 5 6 "7 8"', ['1', '2', '3 4', '5', '6', '7 8'])
]
def check(a, b):
assert a == b
for text, expected in tests:
yield check, util.split(text), expected
|
430911 | import os
import pytest
import types
from pnlp.piop import write_json, write_file
from pnlp.piop import Reader, read_file, read_lines, read_json, read_yaml, read_csv
from pnlp.piop import check_dir
DATA_PATH = os.path.join('tests', 'piop_data')
@pytest.fixture(params=['*.md', '*.txt', '*.data', 'f*.*', '*c.*'])
def get_Reader_path_match_res(request):
res = []
reader = Reader(request.param)
for line in reader(DATA_PATH):
res.append(line)
return res
def test_Reader_path_match(get_Reader_path_match_res):
assert len(get_Reader_path_match_res) == 9
assert get_Reader_path_match_res[0].lid == 0
assert get_Reader_path_match_res[-1].lid == 2
def test_Reader_file():
res = []
reader = Reader()
for line in reader(os.path.join(DATA_PATH, 'a.md')):
res.append(line)
assert len(res) == 3
assert res[0].text == 'line 1 in a.'
def test_Reader_gen_files():
paths = Reader.gen_files(DATA_PATH, '*.md')
assert isinstance(paths, types.GeneratorType) == True
assert len(list(paths)) == 3
def test_Reader_gen_files_with_regex():
paths = Reader.gen_files(DATA_PATH, "(md)|(txt)", True)
assert isinstance(paths, types.GeneratorType) == True
assert len(list(paths)) == 6
def test_Reader_gen_articles():
paths = Reader.gen_files(DATA_PATH, '*.txt')
articles = Reader.gen_articles(paths)
assert isinstance(articles, types.GeneratorType) == True
assert len(list(articles)) == 3
def test_Reader_gen_flines():
paths = Reader.gen_files(DATA_PATH, '*.txt')
articles = Reader.gen_articles(paths)
lines = Reader.gen_flines(articles)
assert isinstance(lines, types.GeneratorType) == True
assert len(list(lines)) == 9
def test_Reader_gen_plines():
lines = Reader.gen_plines(os.path.join(DATA_PATH, 'b.txt'))
assert isinstance(lines, types.GeneratorType) == True
assert len(list(lines)) == 3
@pytest.fixture
def get_read_data():
return os.path.join(DATA_PATH, 'c.data')
def test_read_file(get_read_data):
data = read_file(get_read_data)
assert data == 'line 1 in c.\nline 2 in c.\nline 3 in c.'
assert type(data) == str
def test_read_lines(get_read_data):
data = read_lines(get_read_data)
assert data == ['line 1 in c.', 'line 2 in c.', 'line 3 in c.']
assert type(data) == list
def test_read_json():
data = read_json(os.path.join(DATA_PATH, 'json.json'))
assert type(data) == dict
assert data == {
"json1": "this is line 1",
"json2": "这是第二行。"
}
def test_read_yaml():
data = read_yaml(os.path.join(DATA_PATH, 'yaml.yaml'))
assert type(data) == dict
assert data == {'元旦': ['新年快乐', '元旦快乐', '节日快乐'],
'周末': ['周末快乐!', '周末愉快!']}
def test_read_csv():
data = read_csv(os.path.join(DATA_PATH, 'csv.csv'))
assert type(data) == list
assert data == [['id', 'title'], ['1', 'title1'], ['2', 'title2']]
def test_write_json():
data = {"outjson1": "this is line 1.",
"outjson2": "这是第二行。"}
write_json(os.path.join(DATA_PATH, 'outjson.json'),
data, indent=4, ensure_ascii=False)
def test_write_file():
data = ['line 1 of outfile.', '这是 outfile 的第二行。']
write_file(os.path.join(DATA_PATH, 'outfile.file'), data)
def test_check_dir():
assert check_dir(DATA_PATH) == None
if __name__ == '__main__':
print(ROOT_PATH)
print(IODATA_PATH)
|
430919 | from django.contrib import admin
from .models import Debtor, Invoice
admin.site.register(Debtor)
admin.site.register(Invoice) |
430932 | from setuptools import setup
with open("../README.md", 'r') as f:
long_description = f.read()
setup(
name='tensor_classification',
version='0.0.1',
description='Code for tensor classification',
license="MIT",
long_description=long_description,
author='<NAME> and <NAME>',
author_email='<EMAIL>',
url="https://github.com/laurafroelich/tensor_classification",
packages=['tensor_classification'] #same as name
)
|
430951 | import torch
import torch.nn as nn
import numpy as np
from .base import Learner
from .aggregator import SSARAggregator, FrameStackPreprocessor
from surreal.model.ddpg_net import DDPGModel
from surreal.session import BASE_LEARNER_CONFIG, ConfigError
import surreal.utils as U
import torchx as tx
class DDPGLearner(Learner):
'''
DDPGLearner: subclass of Learner that contains DDPG algorithm logic
Attributes:
gpu_option: 'cpu' if not using GPU, 'cuda:all' otherwise
model: instance of DDPGModel from surreal.model.ddpg_net
model_target: instance of DDPGModel, used as a reference policy
for Bellman updates
use_action_regularization: boolean flag -- regularization method based on
https://arxiv.org/pdf/1802.09477.pdf
use_double_critic: boolean flag -- overestimation bias correction based on
https://arxiv.org/pdf/1802.09477.pdf
[actor/critic]_optim: Adam Optimizer for policy and baseline network
aggregator: experience aggregator used to batch experiences from
a list of experiences into a format usable by the model.
For available aggregators, see surreal.learner.aggregator
target_update_type: 'hard' update sets the weights of model_target equal to model
after target_update_interval steps, whereas 'soft' update moves the parameters
of model_target towards model after every step
total_learn_time, forward_time, etc: timers that measure average time spent in
each operation of the learner. These timers will be reported in tensorboard.
important member functions:
private methods:
_optimize: function that makes policy and value function updates
public methods:
learn: method to perform optimization and send to tensorplex for log
module_dict: returns the corresponding parameters
preprocess: this function is called in learner/main prior to learn(),
This operation occurs in a separate thread, meaning that conversion
from numpy arrays to gpu tensors can occur asynchronously to gpu
processing operations in learn().
Arguments:
learner_config, env_config, session_config: experiment setup configurations. An example set of configs
can be found at surreal/main/ddpg_configs.py. Note that the surreal/env/make_env function adds attributes
env_config.action_spec and env_config.obs_spec, which are required for this init method to function
properly.
'''
def __init__(self, learner_config, env_config, session_config):
super().__init__(learner_config, env_config, session_config)
self.current_iteration = 0
# load multiple optimization instances onto a single gpu
self.batch_size = self.learner_config.replay.batch_size
self.discount_factor = self.learner_config.algo.gamma
self.n_step = self.learner_config.algo.n_step
self.is_pixel_input = self.env_config.pixel_input
self.use_layernorm = self.learner_config.model.use_layernorm
self.use_double_critic = self.learner_config.algo.network.use_double_critic
self.use_action_regularization = self.learner_config.algo.network.use_action_regularization
self.frame_stack_concatenate_on_env = self.env_config.frame_stack_concatenate_on_env
self.log.info('Initializing DDPG learner')
self._num_gpus = session_config.learner.num_gpus
if not torch.cuda.is_available():
self.gpu_ids = 'cpu'
self.log.info('Using CPU')
else:
self.gpu_ids = 'cuda:all'
self.log.info('Using GPU')
self.log.info('cudnn version: {}'.format(torch.backends.cudnn.version()))
torch.backends.cudnn.benchmark = True
self._num_gpus = 1
with tx.device_scope(self.gpu_ids):
self._target_update_init()
self.clip_actor_gradient = self.learner_config.algo.network.clip_actor_gradient
if self.clip_actor_gradient:
self.actor_gradient_clip_value = self.learner_config.algo.network.actor_gradient_value_clip
self.log.info('Clipping actor gradient at {}'.format(self.actor_gradient_clip_value))
self.clip_critic_gradient = self.learner_config.algo.network.clip_critic_gradient
if self.clip_critic_gradient:
self.critic_gradient_clip_value = self.learner_config.algo.network.critic_gradient_value_clip
self.log.info('Clipping critic gradient at {}'.format(self.critic_gradient_clip_value))
self.action_dim = self.env_config.action_spec.dim[0]
self.model = DDPGModel(
obs_spec=self.env_config.obs_spec,
action_dim=self.action_dim,
use_layernorm=self.use_layernorm,
actor_fc_hidden_sizes=self.learner_config.model.actor_fc_hidden_sizes,
critic_fc_hidden_sizes=self.learner_config.model.critic_fc_hidden_sizes,
conv_out_channels=self.learner_config.model.conv_spec.out_channels,
conv_kernel_sizes=self.learner_config.model.conv_spec.kernel_sizes,
conv_strides=self.learner_config.model.conv_spec.strides,
conv_hidden_dim=self.learner_config.model.conv_spec.hidden_output_dim,
)
self.model_target = DDPGModel(
obs_spec=self.env_config.obs_spec,
action_dim=self.action_dim,
use_layernorm=self.use_layernorm,
actor_fc_hidden_sizes=self.learner_config.model.actor_fc_hidden_sizes,
critic_fc_hidden_sizes=self.learner_config.model.critic_fc_hidden_sizes,
conv_out_channels=self.learner_config.model.conv_spec.out_channels,
conv_kernel_sizes=self.learner_config.model.conv_spec.kernel_sizes,
conv_strides=self.learner_config.model.conv_spec.strides,
conv_hidden_dim=self.learner_config.model.conv_spec.hidden_output_dim,
)
if self.use_double_critic:
self.model2 = DDPGModel(
obs_spec=self.env_config.obs_spec,
action_dim=self.action_dim,
use_layernorm=self.use_layernorm,
actor_fc_hidden_sizes=self.learner_config.model.actor_fc_hidden_sizes,
critic_fc_hidden_sizes=self.learner_config.model.critic_fc_hidden_sizes,
conv_out_channels=self.learner_config.model.conv_spec.out_channels,
conv_kernel_sizes=self.learner_config.model.conv_spec.kernel_sizes,
conv_strides=self.learner_config.model.conv_spec.strides,
conv_hidden_dim=self.learner_config.model.conv_spec.hidden_output_dim,
critic_only=True,
)
self.model_target2 = DDPGModel(
obs_spec=self.env_config.obs_spec,
action_dim=self.action_dim,
use_layernorm=self.use_layernorm,
actor_fc_hidden_sizes=self.learner_config.model.actor_fc_hidden_sizes,
critic_fc_hidden_sizes=self.learner_config.model.critic_fc_hidden_sizes,
conv_out_channels=self.learner_config.model.conv_spec.out_channels,
conv_kernel_sizes=self.learner_config.model.conv_spec.kernel_sizes,
conv_strides=self.learner_config.model.conv_spec.strides,
conv_hidden_dim=self.learner_config.model.conv_spec.hidden_output_dim,
critic_only=True,
)
self.critic_criterion = nn.MSELoss()
self.log.info('Using Adam for critic with learning rate {}'.format(self.learner_config.algo.network.lr_critic))
self.critic_optim = torch.optim.Adam(
self.model.get_critic_parameters(),
lr=self.learner_config.algo.network.lr_critic,
weight_decay=self.learner_config.algo.network.critic_regularization # Weight regularization term
)
self.log.info('Using Adam for actor with learning rate {}'.format(self.learner_config.algo.network.lr_actor))
self.actor_optim = torch.optim.Adam(
self.model.get_actor_parameters(),
lr=self.learner_config.algo.network.lr_actor,
weight_decay=self.learner_config.algo.network.actor_regularization # Weight regularization term
)
if self.use_double_critic:
self.log.info('Using Adam for critic with learning rate {}'.format(self.learner_config.algo.network.lr_critic))
self.critic_optim2 = torch.optim.Adam(
self.model2.get_critic_parameters(),
lr=self.learner_config.algo.network.lr_critic,
weight_decay=self.learner_config.algo.network.critic_regularization # Weight regularization term
)
self.log.info('Using {}-step bootstrapped return'.format(self.learner_config.algo.n_step))
self.frame_stack_preprocess = FrameStackPreprocessor(self.env_config.frame_stacks)
self.aggregator = SSARAggregator(self.env_config.obs_spec, self.env_config.action_spec)
self.model_target.actor.hard_update(self.model.actor)
self.model_target.critic.hard_update(self.model.critic)
if self.use_double_critic:
self.model_target2.critic.hard_update(self.model2.critic)
self.total_learn_time = U.TimeRecorder()
self.forward_time = U.TimeRecorder()
self.critic_update_time = U.TimeRecorder()
self.actor_update_time = U.TimeRecorder()
# override
def preprocess(self, batch):
'''
Override for learner/base/preprocess. Before learn() is called, preprocess() takes the batch and converts
the numpy arrays to pytorch tensors. Note that this operation will transfer the data to gpu if a gpu is used.
Arguments:
batch: a batch of numpy arrays from the replay memory
'''
# Convert all numpy arrays to pytorch tensors, and transfers to gpu if applicable
with tx.device_scope(self.gpu_ids):
obs, actions, rewards, obs_next, done = (
batch['obs'],
batch['actions'],
batch['rewards'],
batch['obs_next'],
batch['dones']
)
device_name = 'cpu'
if self._num_gpus > 0:
device_name = 'cuda'
for modality in obs:
for key in obs[modality]:
if modality == 'pixel':
obs[modality][key] = (torch.tensor(obs[modality][key], dtype=torch.uint8)
.to(torch.device(device_name))).float().detach()
else:
obs[modality][key] = (torch.tensor(obs[modality][key], dtype=torch.float32)
.to(torch.device(device_name))).detach()
for modality in obs_next:
for key in obs_next[modality]:
if modality == 'pixel':
obs_next[modality][key] = (torch.tensor(obs_next[modality][key], dtype=torch.uint8)
.to(torch.device(device_name))).float().detach()
else:
obs_next[modality][key] = (torch.tensor(obs_next[modality][key], dtype=torch.float32)
.to(torch.device(device_name))).detach()
actions = torch.tensor(actions, dtype=torch.float32).to(torch.device(device_name))
rewards = torch.tensor(rewards, dtype=torch.float32).to(torch.device(device_name))
done = torch.tensor(done, dtype=torch.float32).to(torch.device(device_name))
(
batch['obs'],
batch['actions'],
batch['rewards'],
batch['obs_next'],
batch['dones']
) = (
obs,
actions,
rewards,
obs_next,
done
)
return batch
def _optimize(self, obs, actions, rewards, obs_next, done):
'''
Note that while the replay contains uint8, the
aggregator returns float32 tensors
Arguments:
obs: an observation from the minibatch, often represented as s_n in literature. Dimensionality: (N, C) for
low dimensional inputs, (N, C, H, W) for pixel inputs
actions: actions taken given observations obs, often represented as a_n in literature.
Dimensionality: (N, A), where A is the dimensionality of a single action
rewards: rewards received after action is taken. Dimensionality: N
obs_next: an observation from the minibatch, often represented as s_{n+1} in literature
done: 1 if obs_next is terminal, 0 otherwise. Dimensionality: N
'''
with tx.device_scope(self.gpu_ids):
with self.forward_time.time():
assert actions.max().item() <= 1.0
assert actions.min().item() >= -1.0
# estimate rewards using the next state: r + argmax_a Q'(s_{t+1}, u'(a))
model_policy, next_Q_target = self.model_target.forward(obs_next)
if self.use_action_regularization:
# https://github.com/sfujim/TD3/blob/master/TD3.py -- action regularization
policy_noise = 0.2
noise_clip = 0.5
batch_size = self.batch_size
noise = np.clip(np.random.normal(0, policy_noise, size=(batch_size, self.action_dim)), -noise_clip,
noise_clip)
device_name = 'cpu'
if self._num_gpus > 0:
device_name = 'cuda'
model_policy += torch.tensor(noise, dtype=torch.float32).to(device_name).detach()
model_policy = model_policy.clamp(-1, 1).to(device_name)
y = rewards + pow(self.discount_factor, self.n_step) * next_Q_target * (1.0 - done)
if self.use_double_critic:
_, next_Q_target2 = self.model_target2.forward(obs_next, action=model_policy)
y2 = rewards + pow(self.discount_factor, self.n_step) * next_Q_target2 * (1.0 - done)
y = torch.min(y, y2)
y = y.detach()
# compute Q(s_t, a_t)
perception = self.model.forward_perception(obs)
y_policy = self.model.forward_critic(
perception,
actions.detach()
)
y_policy2 = None
if self.use_double_critic:
perception2 = self.model2.forward_perception(obs)
y_policy2 = self.model2.forward_critic(
perception2,
actions.detach()
)
# critic update
with self.critic_update_time.time():
self.model.critic.zero_grad()
if self.is_pixel_input:
self.model.perception.zero_grad()
critic_loss = self.critic_criterion(y_policy, y)
critic_loss.backward()
if self.clip_critic_gradient:
self.model.critic.clip_grad_value(self.critic_gradient_clip_value)
self.critic_optim.step()
if self.use_double_critic:
self.model2.critic.zero_grad()
if self.is_pixel_input:
self.model2.perception.zero_grad()
critic_loss = self.critic_criterion(y_policy2, y)
critic_loss.backward()
if self.clip_critic_gradient:
self.model2.critic.clip_grad_value(self.critic_gradient_clip_value)
self.critic_optim2.step()
# actor update
with self.actor_update_time.time():
self.model.actor.zero_grad()
actor_loss = -self.model.forward_critic(
perception.detach(),
self.model.forward_actor(perception.detach())
)
actor_loss = actor_loss.mean()
actor_loss.backward()
if self.clip_actor_gradient:
self.model.actor.clip_grad_value(self.actor_gradient_clip_value)
self.actor_optim.step()
tensorplex_update_dict = {
'actor_loss': actor_loss.item(),
'critic_loss': critic_loss.item(),
'action_norm': actions.norm(2, 1).mean().item(),
'rewards': rewards.mean().item(),
'Q_target': y.mean().item(),
'Q_policy': y_policy.mean().item(),
'performance/forward_time': self.forward_time.avg,
'performance/critic_update_time': self.critic_update_time.avg,
'performance/actor_update_time': self.actor_update_time.avg,
}
if self.use_double_critic:
tensorplex_update_dict['Q_policy2'] = y_policy2.mean().item()
# (possibly) update target networks
self._target_update()
return tensorplex_update_dict
def learn(self, batch):
'''
Performs a gradient descent step on 'batch'
Arguments:
batch: a minibatch sampled from the replay memory, after preprocessing steps such as transfer to pytorch
tensors and aggregation step
'''
self.current_iteration += 1
with self.total_learn_time.time():
tensorplex_update_dict = self._optimize(
batch.obs,
batch.actions,
batch.rewards,
batch.obs_next,
batch.dones
)
tensorplex_update_dict['performance/total_learn_time'] = self.total_learn_time.avg
self.tensorplex.add_scalars(tensorplex_update_dict, global_step=self.current_iteration)
self.periodic_checkpoint(
global_steps=self.current_iteration,
score=None,
)
def module_dict(self):
return {
'ddpg': self.model,
}
def checkpoint_attributes(self):
return [
'current_iteration',
'model', 'model_target'
]
def _target_update_init(self):
target_update_config = self.learner_config.algo.network.target_update
self.target_update_type = target_update_config.type
if self.target_update_type == 'soft':
self.target_update_tau = target_update_config.tau
self.log.info('Using soft target update with tau = {}'.format(self.target_update_tau))
elif self.target_update_type == 'hard':
self.target_update_counter = 0
self.target_update_interval = target_update_config.interval
self.log.info('Using hard target update every {} steps'.format(self.target_update_interval))
else:
raise ConfigError('Unsupported ddpg update type: {}'.format(target_update_config.type))
def _target_update(self):
'''
Perform update on target model networks. This update is either 'soft', meaning the target model drifts towards
the current model at a rate tau, or 'hard', meaning the target model performs a hard copy operation on the
current model every target_update_interval steps.
'''
if self.target_update_type == 'soft':
self.model_target.actor.soft_update(self.model.actor, self.target_update_tau)
self.model_target.critic.soft_update(self.model.critic, self.target_update_tau)
if self.use_double_critic:
self.model_target2.critic.soft_update(self.model2.critic, self.target_update_tau)
if self.is_pixel_input:
self.model_target2.perception.soft_update(self.model2.perception, self.target_update_tau)
if self.is_pixel_input:
self.model_target.perception.soft_update(self.model.perception, self.target_update_tau)
elif self.target_update_type == 'hard':
self.target_update_counter += 1
if self.target_update_counter % self.target_update_interval == 0:
self.model_target.actor.hard_update(self.model.actor)
self.model_target.critic.hard_update(self.model.critic)
if self.use_double_critic:
self.model_target2.critic.hard_update(self.model2.critic)
if self.is_pixel_input:
self.model_target2.perception.hard_update(self.model2.perception)
if self.is_pixel_input:
self.model_target.perception.hard_update(self.model.perception)
# override
def _prefetcher_preprocess(self, batch):
'''
If frame_stack_preprocess is not set, each experience in the replay will be stored as a list of frames, as
opposed to a single numpy array. We must condense them into a single numpy array as that is what the
aggregator expects.
'''
if not self.frame_stack_concatenate_on_env:
batch = self.frame_stack_preprocess.preprocess_list(batch)
batch = self.aggregator.aggregate(batch)
return batch
|
431029 | from django.db import models
# Create your models here.
class Model_example(models.Model):
id = models.AutoField(primary_key=True)
subject = models.CharField(max_length=100)
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.subject
|
431047 | import csv
import sqlite3
from path import Path
# this script sorts the images in one folder using the output of the NIMA scripts
# you need to have a csv file containing the results (RESULTS_CSV)
# and a collection in lightroom containing the images of the folder you
# analyzed
DEFAULT_CATALOG = r's:\Pictures\Lightroom\Lightroom Catalog.lrcat'
LIGHTROOM_COLLECTION_NAME = 'expert_A'
RESULTS_CSV = r's:\projects\neural-image-assessment\results.csv'
def open_catalog():
catalog = DEFAULT_CATALOG
db_conn = sqlite3.connect(catalog)
db = db_conn.cursor()
db.row_factory = sqlite3.Row
return db
DB = open_catalog()
def get_collection_id(collection_name):
collections_query = DB.execute(
'select * from AgLibraryCollection where name=\'{}\''.format(
collection_name))
collection_sets = {}
collections = {}
for cxn in collections_query:
if cxn['creationId'] == 'com.adobe.ag.library.group':
path = cxn['genealogy']
cmps = path.split('/')
if len(cmps) == 2:
# root level collection/set
collection_sets[path] = cxn['name']
else:
parent_path = "/".join(cmps[:-1])
parent_name = collection_sets[parent_path]
current_name = parent_name + "/" + cxn['name']
collection_sets[path] = current_name
elif cxn['creationId'] == 'com.adobe.ag.library.collection':
path = cxn['genealogy']
cmps = path.split('/')
parent_path = "/".join(cmps[:-1])
if parent_path:
parent_name = collection_sets[parent_path]
current_name = parent_name + "/" + cxn['name']
else:
current_name = cxn['name']
collection_id = cxn['id_local']
collections[collection_id] = {'name': current_name}
return collections
def get_new_file_order(csv_file):
with open(csv_file, 'r', encoding="utf-8") as csvfile:
csvreader = csv.reader(csvfile, delimiter=';', lineterminator='\n')
header = next(csvreader)
files = [str(Path(row[0]).basename()) for row in csvreader]
files = dict([(fn, float(i) + 0.01) for i, fn in enumerate(files)])
return files
def changeOrder(collection_name=LIGHTROOM_COLLECTION_NAME,
csv_file=RESULTS_CSV):
collections = get_collection_id(collection_name)
assert (len(collections) == 1)
id = list(collections.keys())[0]
query = """
select t1.id_local,t1.positionInCollection,t3.originalFilename
from AgLibraryCollectionImage t1
inner join Adobe_images t2
ON t1.image=t2.id_local
inner join AgLibraryFile t3
ON t2.rootFile=t3.id_local
where t1.collection='{}'
""".format(id)
results = DB.execute(query)
id_local_to_fn = {}
for row in results:
id_local_to_fn[row['id_local']] = row['originalFilename']
files = get_new_file_order(csv_file)
next_pos = len(files) + 0.01
update = []
for id_local, fn in id_local_to_fn.items():
if fn in files:
update.append((id_local, files[fn]))
else:
update.append((id_local, next_pos))
next_pos += 1.0
for id_local, pos in update:
query = """
UPDATE AgLibraryCollectionImage SET positionInCollection={}
WHERE id_local={}
""".format(pos, id_local)
DB.execute(query)
print(query)
# Save (commit) the changes
DB.connection.commit()
DB.connection.close()
changeOrder()
|
431059 | from pyapproxmc import Counter
def minimal_test():
counter = Counter(seed=2157, epsilon=0.8, delta=0.2)
counter.add_clause(list(range(1,100)))
assert counter.count() == (512, 90)
def sampling_set_test():
counter = Counter(seed=2157, epsilon=0.8, delta=0.2, sampling_set=list(range(1,50)))
counter.add_clause(range(1,100))
assert counter.count() == (64, 43)
def real_example_test():
counter = Counter(seed=120, epsilon=0.8, delta=0.2, sampling_set=list(range(1,21)))
with open("test_1.cnf") as test_cnf:
# Pop sampling set and metadata lines
lines = test_cnf.readlines()[2:]
# Add clauses to counter
for line in lines:
literals = [int(i) for i in line.split()[:-1]]
counter.add_clause(literals)
assert counter.count() == (64,14)
if __name__ == '__main__':
minimal_test()
sampling_set_test()
real_example_test()
|
431081 | import torch
from torch.utils.data import Dataset
class LatentsDataset(Dataset):
def __init__(self, latents, opts, transforms=None):
self.latents = latents
self.transforms = transforms
self.opts = opts
def __len__(self):
return self.latents.shape[0]
def __getitem__(self, index):
if self.transforms is not None:
return self.latents[index], torch.from_numpy(self.transforms[index][3]).float()
return self.latents[index]
|
431114 | from insights.tests import context_wrap
from insights.parsers.httpd_log import HttpdSSLErrorLog, HttpdErrorLog
from insights.parsers.httpd_log import HttpdSSLAccessLog, HttpdAccessLog
from datetime import datetime
SSL_ACCESS_LOG = """
10.68.5.20 - - [29/Mar/2017:05:57:21 -0400] "GET / HTTP/1.1" 403 202 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36"
10.68.5.20 - - [29/Mar/2017:05:59:38 -0400] "GET / HTTP/1.1" 200 84 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36"
""".strip()
ACCESS_LOG = """
10.68.5.20 - - [29/Mar/2017:05:57:21 -0400] "GET / HTTP/1.1" 403 202 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36"
10.68.5.20 - - [29/Mar/2017:05:58:54 -0400] "GET / HTTP/1.1" 403 202 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36"
10.68.5.20 - - [29/Mar/2017:05:59:38 -0400] "GET / HTTP/1.1" 200 84 "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36"
10.68.5.20 - - [29/Mar/2017:05:59:41 -0400] "GET / HTTP/1.1" 304 - "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36"
10.68.5.20 - - [29/Mar/2017:05:59:43 -0400] "GET / HTTP/1.1" 304 - "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36"
10.68.5.20 - - [29/Mar/2017:05:59:44 -0400] "GET / HTTP/1.1" 304 - "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36"
10.68.5.20 - - [29/Mar/2017:06:01:13 -0400] "GET / HTTP/1.1" 304 - "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36"
10.68.5.20 - - [29/Mar/2017:06:01:17 -0400] "GET / HTTP/1.1" 304 - "-" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36"
10.68.5.20 - - [29/Mar/2017:21:47:54 -0400] "GET /favicon.ico HTTP/1.1" 404 209 "http://10.66.208.208/" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36"
""".strip()
ERROR_LOG = """
[Tue Mar 28 03:56:00.804140 2017] [core:notice] [pid 4343:tid 139992054929536] AH00094: Command line: '/usr/sbin/httpd -D FOREGROUND'
[Tue Mar 28 03:56:38.610607 2017] [mpm_worker:notice] [pid 4343:tid 139992054929536] AH00296: caught SIGWINCH, shutting down gracefully
[Tue Mar 28 03:56:39.737815 2017] [suexec:notice] [pid 4471:tid 140592082000000] AH01232: suEXEC mechanism enabled (wrapper: /usr/sbin/suexec)
AH00558: httpd: Could not reliably determine the server's fully qualified domain name, using fe80::21a:4aff:fe01:160. Set the 'ServerName' directive globally to suppress this message
[Tue Mar 28 03:56:39.771605 2017] [auth_digest:notice] [pid 4471:tid 140592082000000] AH01757: generating secret for digest authentication ...
[Tue Mar 28 03:56:39.772272 2017] [lbmethod_heartbeat:notice] [pid 4471:tid 140592082000000] AH02282: No slotmem from mod_heartmonitor
[Tue Mar 28 03:56:39.772364 2017] [ssl:warn] [pid 4471:tid 140592082000000] AH01873: Init: Session Cache is not configured [hint: SSLSessionCache]
[Tue Mar 28 03:56:39.775833 2017] [mpm_worker:notice] [pid 4471:tid 140592082000000] AH00292: Apache/2.4.6 (Red Hat Enterprise Linux) OpenSSL/1.0.1e-fips configured -- resuming normal operations
""".strip()
HTTPD24_ERROR_LOG = """
[Fri Mar 29 01:42:23.497294 2019] [suexec:notice] [pid 1967] AH01232: suEXEC mechanism enabled (wrapper: /opt/rh/jbcs-httpd24/root/usr/sbin/suexec)
[Fri Mar 29 01:42:23.498726 2019] [:notice] [pid 1967] ModSecurity for Apache/2.9.1 (http://www.modsecurity.org/) configured.
[Fri Mar 29 01:45:23.498736 2019] [:notice] [pid 1967] ModSecurity: APR compiled version="1.6.3"; loaded version="1.6.3-31"
[Fri Mar 29 01:45:23.498743 2019] [:notice] [pid 1967] ModSecurity: PCRE compiled version="8.32 "; loaded version="8.32 2012-11-30"
[Fri Mar 29 01:45:23.498745 2019] [:notice] [pid 1967] ModSecurity: LUA compiled version="Lua 5.1"
[Fri Mar 29 01:47:23.498747 2019] [:notice] [pid 1967] ModSecurity: LIBXML compiled version="2.9.1"
[Fri Mar 29 01:47:23.498749 2019] [:notice] [pid 1967] ModSecurity: Status engine is currently disabled, enable it by set SecStatusEngine to On.
"""
JBCS_HTTPD24_ERROR_LOG = """
[Wed Apr 03 03:52:39.014686 2019] [core:notice] [pid 4499] SELinux policy enabled; httpd running as context system_u:system_r:httpd_t:s0
[Wed Apr 03 03:54:39.016900 2019] [suexec:notice] [pid 4499] AH01232: suEXEC mechanism enabled (wrapper: /opt/rh/httpd24/root/usr/sbin/suexec)
[Wed Apr 03 03:55:39.038125 2019] [http2:warn] [pid 4499] AH10034: The mpm module (prefork.c) is not supported by mod_http2. The mpm determines how things are processed in your server. HTTP/2 has more demands in this regard and the currently selected mpm will just not do. This is an advisory warning. Your server will continue to work, but the HTTP/2 protocol will be inactive.
[Wed Apr 03 03:56:39.038140 2019] [http2:warn] [pid 4499] AH02951: mod_ssl does not seem to be enabled
[Wed Apr 03 03:57:39.038835 2019] [lbmethod_heartbeat:notice] [pid 4499] AH02282: No slotmem from mod_heartmonitor
"""
SSL_ERROR_LOG = """
[Tue Mar 28 03:56:00.804140 2017] [core:notice] [pid 4343:tid 139992054929536] AH00094: Command line: '/usr/sbin/httpd -D FOREGROUND'
[Tue Mar 28 03:56:38.610607 2017] [mpm_worker:notice] [pid 4343:tid 139992054929536] AH00296: caught SIGWINCH, shutting down gracefully
[Tue Mar 28 03:56:39.737815 2017] [suexec:notice] [pid 4471:tid 140592082000000] AH01232: suEXEC mechanism enabled (wrapper: /usr/sbin/suexec)
AH00558: httpd: Could not reliably determine the server's fully qualified domain name, using fe80::21a:4aff:fe01:160. Set the 'ServerName' directive globally to suppress this message
""".strip()
def test_ssl_access_log():
log = HttpdSSLAccessLog(context_wrap(SSL_ACCESS_LOG))
assert 2 == len(log.get("10.68.5.20"))
assert len(list(log.get_after(datetime(2017, 3, 29, 5, 59, 0)))) == 1
def test_access_log():
log = HttpdAccessLog(context_wrap(ACCESS_LOG))
assert 9 == len(log.get("10.68.5.20"))
assert "favicon.ico" in log
assert len(list(log.get_after(datetime(2017, 3, 29, 6, 0, 0)))) == 3
def test_ssl_error_log():
log = HttpdSSLErrorLog(context_wrap(SSL_ERROR_LOG))
assert 1 == len(log.get("mpm_worker:notice"))
assert "AH00558" in log
# Includes continuation line
assert len(list(log.get_after(datetime(2017, 3, 28, 3, 56, 39)))) == 2
def test_error_log():
log = HttpdErrorLog(context_wrap(ERROR_LOG))
assert 2 == len(log.get("mpm_worker:notice"))
assert "AH00558" in log
# Includes continuation line
assert len(list(log.get_after(datetime(2017, 3, 28, 3, 56, 39)))) == 6
def test_httpd24_error_log():
log = HttpdErrorLog(context_wrap(HTTPD24_ERROR_LOG))
assert 1 == len(log.get("suexec:notice"))
assert "ModSecurity" in log
# Includes continuation line
assert len(list(log.get_after(datetime(2019, 3, 29, 1, 45, 23)))) == 5
def test_jbcs_httpd24_error_log():
log = HttpdErrorLog(context_wrap(JBCS_HTTPD24_ERROR_LOG))
assert 2 == len(log.get("http2:warn"))
assert "suEXEC" in log
# Includes continuation line
assert len(list(log.get_after(datetime(2019, 4, 3, 3, 54, 39)))) == 4
|
431158 | import spacy
# from spacy.lang.en.stop_words import STOP_WORDS # Imports Default List of Stop Words
# STOP_WORDS.add("ORION") # This adds
# Imports List of Custom Stop Words
with open("watchfilter.txt", "r") as filter:
custom_stop_words = filter.read().splitlines()
# Init spaCy Language Model + Example Title
nlp = spacy.load('en_core_web_sm')
doc = nlp(u"Good Condition Jaeger LeCoultre Master Ultra Thin 145.2.79.S Diamond 18K Rose Gold") # Doc Type is <class 'spacy.tokens.doc.Doc'>
# Add Custom Stop Words to spaCy
for stopword in custom_stop_words:
lexeme = nlp.vocab[stopword]
lexeme.is_stop = True
# Removes Keywords Included on Stop Word List
words_filtered =[]
for token in doc:
#print(token.is_stop)
if token.is_stop != True:
words_filtered.append(token)
print(*words_filtered, sep=" ")
|
431177 | from .exportacquisitioncsv import export_acquisition_csv
from .mcdfolder2imcfolder import mcdfolder_to_imcfolder
from .ome2analysis import omefile_2_analysisfolder, omefolder_to_analysisfolder
from .ome2histocat import (
omefile_to_histocatfolder,
omefile_to_tifffolder,
omefolder_to_histocatfolder,
)
from .v1tov2 import v1_to_v2
|
431189 | from copy import deepcopy
import numpy as np
from const import *
from data_loader import to_tensor, to_numpy
class TreeNode(object):
def __init__(self,
action=None,
props=None,
parent=None):
self.parent = parent
self.action = action
self.children = []
self.N = 0 # visit count
self.Q = .0 # mean action value
self.W = .0 # total action value
self.P = props # prior probability
def is_leaf(self):
return len(self.children) == 0
def select_child(self):
index = np.argmax(np.asarray([c.uct() for c in self.children]))
return self.children[index]
def uct(self):
return self.Q + self.P * CPUCT * (np.sqrt(self.parent.N) / (1 + self.N))
def expand_node(self, props):
self.children = [TreeNode(action=action, props=p, parent=self)
for action, p in enumerate(props) if p > 0.]
def backup(self, v):
self.N += 1
self.W += v
self.Q = self.W / self.N
class MonteCarloTreeSearch(object):
def __init__(self, net,
ms_num=MCTSSIMNUM):
self.net = net
self.ms_num = ms_num
def search(self, borad, node, temperature=.001):
self.borad = borad
self.root = node
for _ in range(self.ms_num):
node = self.root
borad = self.borad.clone()
while not node.is_leaf():
node = node.select_child()
borad.move(node.action)
borad.trigger()
# be carefull - opponent state
value, props = self.net(
to_tensor(borad.gen_state(), unsqueeze=True))
value = to_numpy(value, USECUDA)[0]
props = np.exp(to_numpy(props, USECUDA))
# add dirichlet noise for root node
if node.parent is None:
props = self.dirichlet_noise(props)
# normalize
props[borad.invalid_moves] = 0.
total_p = np.sum(props)
if total_p > 0:
props /= total_p
# winner, draw or continue
if borad.is_draw():
value = 0.
else:
done = borad.is_game_over(player=borad.last_player)
if done:
value = -1.
else:
node.expand_node(props)
while node is not None:
value = -value
node.backup(value)
node = node.parent
action_times = np.zeros(borad.size**2)
for child in self.root.children:
action_times[child.action] = child.N
action, pi = self.decision(action_times, temperature)
for child in self.root.children:
if child.action == action:
return pi, child
@staticmethod
def dirichlet_noise(props, eps=DLEPS, alpha=DLALPHA):
return (1 - eps) * props + eps * np.random.dirichlet(np.full(len(props), alpha))
@staticmethod
def decision(pi, temperature):
pi = (1.0 / temperature) * np.log(pi + 1e-10)
pi = np.exp(pi - np.max(pi))
pi /= np.sum(pi)
action = np.random.choice(len(pi), p=pi)
return action, pi
|
431225 | from __future__ import annotations
from enums.embedtype import EmbedType
from pydantic import BaseModel
from ..color import Color
class EmbedFooter(BaseModel):
icon_url: str
proxy_icon_url: str
text: str
class EmbedField(BaseModel):
inline: bool
name: str
value: str
class EmbedThumbnail(BaseModel):
url: str
proxy_url: str
height: int
width: int
class EmbedVideo(BaseModel):
url: str
proxy_url: str
height: str
width: str
class EmbedImage(BaseModel):
url: str
proxy_url: str
height: int
width: int
class EmbedAuthor(BaseModel):
name: str
url: str
icon_url: str
proxy_icon_url: str
class EmbedProvider(BaseModel):
name: str
url: str
class EmbedPayload(BaseModel):
title: str
type: EmbedType
description: str
url: str
timestamp: str
color: Color
footer: EmbedFooter
image: EmbedImage
thumbnail: EmbedThumbnail
video: EmbedVideo
provider: EmbedProvider
author: EmbedAuthor
fields: list[EmbedField]
|
431258 | img_norm_cfg = dict(
mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=False)
crop_size = (288, 960)
# KITTI config
kitti_train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', sparse=True),
dict(
type='ColorJitter',
asymmetric_prob=0.0,
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.5 / 3.14),
dict(type='Erase', prob=0.5, bounds=[50, 100], max_num=3),
dict(
type='SpacialTransform',
spacial_prob=0.8,
stretch_prob=0.8,
crop_size=crop_size,
min_scale=-0.2,
max_scale=0.4,
max_stretch=0.2),
dict(type='RandomCrop', crop_size=crop_size),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['imgs', 'flow_gt', 'valid'],
meta_keys=[
'filename1', 'filename2', 'ori_filename1', 'ori_filename2',
'filename_flow', 'ori_filename_flow', 'ori_shape', 'img_shape',
'erase_bounds', 'erase_num', 'scale_factor'
])
]
kitti_train = dict(
type='KITTI2015',
data_root='data/kitti2015',
pipeline=kitti_train_pipeline,
test_mode=False)
kitti_test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', sparse=True),
dict(type='InputPad', exponent=3),
dict(type='Normalize', **img_norm_cfg),
dict(type='TestFormatBundle'),
dict(
type='Collect',
keys=['imgs'],
meta_keys=[
'flow_gt', 'valid', 'filename1', 'filename2', 'ori_filename1',
'ori_filename2', 'ori_shape', 'img_shape', 'img_norm_cfg',
'scale_factor', 'pad_shape', 'pad'
])
]
kitti2015_val_test = dict(
type='KITTI2015',
data_root='data/kitti2015',
pipeline=kitti_test_pipeline,
test_mode=True)
data = dict(
train_dataloader=dict(
samples_per_gpu=2,
workers_per_gpu=5,
drop_last=True,
shuffle=True,
persistent_workers=True),
val_dataloader=dict(
samples_per_gpu=1,
workers_per_gpu=5,
shuffle=False,
persistent_workers=True),
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=2, shuffle=False),
train=kitti_train,
val=kitti2015_val_test,
test=kitti2015_val_test)
|
431306 | YOLO = dict()
# Datasets Parameter
YOLO['classes'] = ["aeroplane", "bicycle", "bird", "boat", "bottle"]
YOLO['datasets_path'] = '/home/tshzzz/disk_m2/jinnan_round2/datasets/round1_train/'
YOLO['anno_path'] = '/home/tshzzz/disk_m2/jinnan_round2/datasets/round1_train/jinnan_round1_train.json'
YOLO['val_path'] = '/home/tshzzz/disk_m2/jinnan_round2/datasets/round1_train/jinnan_round1_val.json'
YOLO['pretrain_model'] = './darknet53.conv.74'
YOLO['class_num'] = len(YOLO['classes'])
# Training Parameter
YOLO['save_dir'] = './608/'
YOLO['pretrain_model'] = './darknet53.conv.74'
YOLO['epochs'] = 80
YOLO['epochs_start'] = 0
YOLO['steps'] = [50,60]
YOLO['batch_size'] = 10
YOLO['start_lr'] = 1e-3
YOLO['image_size'] = [608, 608]
YOLO['featmap_size'] = [[19, 19]]
YOLO['anchor'] = [[[2.8523827,2.4452496 ],
[1.3892268,1.8958333 ],
[1.6490009,0.95596665],
[0.7680278,1.3883946 ],
[0.5605738,0.69167805]]]
|
431341 | import inspect
import re
import time
from types import FunctionType
from typing import List
from dataframe_sql.tests.pandas_sql_functionality_test import * # noqa
DONT_TEST = [
test_add_remove_temp_table, # noqa
test_for_valid_query, # noqa
test_for_non_existent_table, # noqa
]
INDENT_REGEX = re.compile(r"(\t|\s{4})(?P<code>.*)")
def get_pandas_tests():
global_dict = globals()
test_list = []
for global_key in global_dict:
global_var = global_dict[global_key]
if (
isinstance(global_var, FunctionType)
and "test" in global_var.__name__
and global_var not in DONT_TEST
):
test_list.append(global_var)
return test_list
def get_function_code(function_object: FunctionType) -> List[str]:
"""
Return the function code only for the function (not def name or docstring)
:param function_object:
:return:
"""
doc_string = function_object.__doc__
function_code = inspect.getsource(function_object)
remove_doc_string = function_code.replace(f'"""{doc_string}"""', "")
remove_function_name = remove_doc_string.split("\n")[1:]
return remove_function_name
def fix_code_indent(function_code: List[str]):
"""
Return code indented back one to account for the def indent
:param function_code:
:return:
"""
for i, code_line in enumerate(function_code):
match = INDENT_REGEX.match(code_line)
if match:
function_code[i] = match.group("code")
def remove_assertion(function_code: List[str]):
"""
Remove assertion lines
:param function_code:
:return:
"""
for i, code_line in enumerate(function_code):
if code_line == "tm.assert_frame_equal(pandas_frame, my_frame)":
function_code[i] = ""
def find_end_paren(function_code: str, start: int):
"""
Find the end location given a starting parenthesis location
:param function_code:
:param start:
:return:
"""
parentheses = []
for i, character in enumerate(function_code[start:]):
if character == "(":
parentheses.append(character)
elif character == ")":
parentheses.pop()
if not parentheses:
return i + start
def split_into_pandas_and_dataframe_sql(function_code: str):
"""
Returns the code split into the half using dataframe_sql and the half using
the direct pandas api
:param function_code:
:return:
"""
data_frame_sql_code_init = "my_frame = query"
dataframe_sql_code_start = function_code.find(data_frame_sql_code_init)
text_offset = len(data_frame_sql_code_init)
dataframe_sql_code_call_first_paren = dataframe_sql_code_start + text_offset
end_paren = find_end_paren(function_code, dataframe_sql_code_call_first_paren) + 1
dataframe_sql_code = function_code[dataframe_sql_code_start:end_paren]
pandas_code = function_code[end_paren:]
return dataframe_sql_code, pandas_code
def timeit(function: FunctionType):
"""
Wrapper for measuring time based performance
:param function:
:return:
"""
def timed(*args, **kw):
ts = time.time()
result = function(*args, **kw) # noqa
te = time.time()
total_time = te - ts
print(f"func: {function.__name__} took {total_time}")
return total_time
return timed
def test_performance(dataframe_sql_code: str, pandas_code: str):
@timeit
def dataframe_sql_time():
exec(dataframe_sql_code) # noqa
@timeit
def pandas_code_time():
exec(pandas_code) # noqa
time_diff = dataframe_sql_time() - pandas_code_time()
print(f"Time difference was {time_diff}\n")
if __name__ == "__main__":
register_env_tables() # noqa
tests = get_pandas_tests()
for test in tests[3:]:
print(f"######### {test.__name__} #########")
code = get_function_code(test)
fix_code_indent(code)
remove_assertion(code)
code = list(filter(lambda x: x, code))
code_string = "\n".join(code)
code = split_into_pandas_and_dataframe_sql(code_string)
try:
test_performance(*code)
except Exception as err:
print("Code Failed")
print("#####################")
print("#### Your Code ####")
print(code[0])
print("#### Pandas Code ####")
print(code[1])
print("#####################")
raise err
remove_env_tables() # noqa
|
431348 | from data import payloads
def breakline():
print("------------------------------------------------------------")
def payloadReplace(llist,lvar,rvar):
rlist = []
for i in llist:
rlist.append(i.replace(lvar, rvar))
return rlist |
431351 | from django import forms
from ... import models
from wazimap_ng.general.admin.forms import HistoryAdminForm
class IndicatorAdminForm(HistoryAdminForm):
groups = forms.ChoiceField(required=True)
class Meta:
model = models.Indicator
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.instance.id:
self.fields['groups'].choices = self.group_choices
self.fields['universe'].queryset = self.universe_queryset
def clean(self,*args,**kwargs):
cleaned_data = super().clean(*args,**kwargs)
cleaned_data['groups'] = [cleaned_data.get("groups")]
return cleaned_data |
431356 | from django.core.management.base import BaseCommand, CommandError
from register.models import RegistrationCenter
class Command(BaseCommand):
help = """
Set the reg_open parameter for 1 or more subconstituencies.
Open all centers:
set_reg_open --all
Close all centers:
set_reg_open --false --all
Open centers in subcon 5:
set_reg_open 5
Close centers in subcons 2 and 8:
set_reg_open --false 2 8
Note: --all cannot be provided along with subcons. They are mutually exclusive
"""
def add_arguments(self, parser):
parser.add_argument('subcon_id', nargs='*', type=int)
parser.add_argument(
'--all',
action='store_true',
dest='all',
default=False,
help='Set for all centers')
parser.add_argument(
'--false',
action='store_false',
dest='reg_open',
default=True, # default reg_open to True
help='Set reg_open to False')
def handle(self, *args, **options):
reg_open = options['reg_open']
subcon_list = options['subcon_id']
if options['all'] and subcon_list:
raise CommandError('Choose either --all or provide a list of subcons, not both.')
if options['all']:
centers = RegistrationCenter.objects.all()
elif subcon_list:
centers = RegistrationCenter.objects.filter(subconstituency_id__in=subcon_list)
else:
raise CommandError('Neither --all nor subcon_ids were provided.')
updated = centers.update(reg_open=reg_open)
self.stdout.write('%d centers updated to reg_open=%s.' % (updated, reg_open))
|
431384 | import os
from datetime import timedelta
from airflow import DAG
from airflow.contrib.operators.emr_add_steps_operator import EmrAddStepsOperator
from airflow.contrib.operators.emr_create_job_flow_operator import EmrCreateJobFlowOperator
from airflow.contrib.sensors.emr_step_sensor import EmrStepSensor
from airflow.models import Variable
from airflow.utils.dates import days_ago
# ************** AIRFLOW VARIABLES **************
bootstrap_bucket = Variable.get('bootstrap_bucket')
emr_ec2_key_pair = Variable.get('emr_ec2_key_pair')
job_flow_role = Variable.get('job_flow_role')
logs_bucket = Variable.get('logs_bucket')
release_label = Variable.get('release_label')
service_role = Variable.get('service_role')
work_bucket = Variable.get('work_bucket')
# ***********************************************
DAG_ID = os.path.basename(__file__).replace('.py', '')
DEFAULT_ARGS = {
'owner': 'airflow',
'depends_on_past': False,
'email': ["{{ dag_run.conf['airflow_email'] }}"],
'email_on_failure': ["{{ dag_run.conf['email_on_failure'] }}"],
'email_on_retry': ["{{ dag_run.conf['email_on_retry'] }}"],
}
SPARK_STEPS = [
{
'Name': 'Bakery Sales',
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': [
'spark-submit',
'--deploy-mode',
'cluster',
'--master',
'yarn',
'--conf',
'spark.yarn.submit.waitAppCompletion=true',
's3a://{{ var.value.work_bucket }}/analyze/bakery_sales_ssm.py'
]
}
}
]
JOB_FLOW_OVERRIDES = {
'Name': 'demo-cluster-airflow',
'ReleaseLabel': '{{ var.value.release_label }}',
'LogUri': 's3n://{{ var.value.logs_bucket }}',
'Applications': [
{
'Name': 'Spark'
},
],
'Instances': {
'InstanceFleets': [
{
'Name': 'MASTER',
'InstanceFleetType': 'MASTER',
'TargetSpotCapacity': 1,
'InstanceTypeConfigs': [
{
'InstanceType': 'm5.xlarge',
},
]
},
{
'Name': 'CORE',
'InstanceFleetType': 'CORE',
'TargetSpotCapacity': 2,
'InstanceTypeConfigs': [
{
'InstanceType': 'r5.xlarge',
},
],
},
],
'KeepJobFlowAliveWhenNoSteps': False,
'TerminationProtected': False,
'Ec2KeyName': '{{ var.value.emr_ec2_key_pair }}',
},
'BootstrapActions': [
{
'Name': 'string',
'ScriptBootstrapAction': {
'Path': 's3://{{ var.value.bootstrap_bucket }}/bootstrap_actions.sh',
}
},
],
'Configurations': [
{
'Classification': 'spark-hive-site',
'Properties': {
'hive.metastore.client.factory.class': 'com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory'
}
}
],
'VisibleToAllUsers': True,
'JobFlowRole': '{{ var.value.job_flow_role }}',
'ServiceRole': '{{ var.value.service_role }}',
'EbsRootVolumeSize': 32,
'StepConcurrencyLevel': 1,
'Tags': [
{
'Key': 'Environment',
'Value': 'Development'
},
{
'Key': 'Name',
'Value': 'Airflow EMR Demo Project'
},
{
'Key': 'Owner',
'Value': 'Data Analytics Team'
}
]
}
with DAG(
dag_id=DAG_ID,
description='Analyze Bakery Sales with Amazon EMR',
default_args=DEFAULT_ARGS,
dagrun_timeout=timedelta(hours=2),
start_date=days_ago(1),
schedule_interval=None,
tags=['emr', 'spark', 'pyspark']
) as dag:
cluster_creator = EmrCreateJobFlowOperator(
task_id='create_job_flow',
job_flow_overrides=JOB_FLOW_OVERRIDES
)
step_adder = EmrAddStepsOperator(
task_id='add_steps',
job_flow_id="{{ task_instance.xcom_pull(task_ids='create_job_flow', key='return_value') }}",
aws_conn_id='aws_default',
steps=SPARK_STEPS
)
step_checker = EmrStepSensor(
task_id='watch_step',
job_flow_id="{{ task_instance.xcom_pull('create_job_flow', key='return_value') }}",
step_id="{{ task_instance.xcom_pull(task_ids='add_steps', key='return_value')[0] }}",
aws_conn_id='aws_default'
)
cluster_creator >> step_adder >> step_checker
|
431420 | from .combine_slices import combine_slices, sort_by_slice_position
from .exceptions import DicomImportException
from .version import __version__
__all__ = [
'combine_slices',
'sort_by_slice_position',
'DicomImportException',
'__version__'
]
|
431535 | import os
from typing import Callable, List
import pickle
import multiprocessing as mp
from functools import partial
from .._logger import progress_bar
__all__ = [
'load_pickle',
'save_pickle',
]
def load_pickle(path: str):
"""
Load pickle from path.
Args:
path (str): Path to the pickle file.
Raises:
IOError: Path does not exist.
Returns:
[any]: File saved in a pickle.
"""
if not os.path.exists(path):
raise IOError(f'{path} does not exist!')
with open(path, "rb") as f:
data = pickle.load(f)
return data
def save_pickle(data, path: str):
"""
Save data to pickle.
Args:
data (any): Data to be saved.
path (str): Filepath.
"""
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as f:
pickle.dump(data, f)
def remove_extension(path: str) -> str:
"""Return filename with the extension removed."""
if '.' in path:
return '.'.join(path.split('.')[:-1])
else:
return path
def remove_images(image_dir: str) -> None:
"""
Remove all images in the image folder
Args:
image_dir (str): Directory to be removed
"""
paths = [x.path for x in os.scandir(image_dir)]
if len(paths) > 0:
multiprocess_map(func=remove, lst=paths, total=len(paths),
desc='Removing images')
def remove(path: str) -> None:
if os.path.exists(path):
os.remove(path)
def flatten(l):
"""Flatten list of lists."""
if all(isinstance(x, list) for x in l):
return [item for sublist in l for item in sublist]
else:
return l
def format_seconds(n: int) -> str:
"""Format seconds into pretty string format."""
days = int(n // (24 * 3600))
n = n % (24 * 3600)
hours = int(n // 3600)
n %= 3600
minutes = int(n // 60)
n %= 60
seconds = int(n)
if days > 0:
strtime = f'{days}d{(hours)}h:{minutes}m:{seconds}s'
elif hours > 0:
strtime = f'{(hours)}h:{minutes}m:{seconds}s'
else:
strtime = f'{minutes}m:{seconds}s'
return strtime
def multiprocess_map(
func: Callable,
lst: list,
processes: int = None,
func_args: dict = {},
**kwargs
):
"""Map function to a iterable and process with multiple processes."""
results = []
if processes is None:
processes = os.cpu_count() - 1
if kwargs.get('total') is None:
try:
kwargs['total'] = len(lst)
except:
pass
func = partial(func, **func_args)
with mp.Pool(processes=processes) as p:
if kwargs.get('desc') is None or kwargs.get('desc') == "":
loop = p.imap(func, lst)
else:
loop = progress_bar(p.imap(func, lst), **kwargs)
for result in loop:
results.append(result)
return results
|
431559 | import argparse
import json
import os
from stix2.v20 import Bundle
import requests
def save_bundle(bundle, path):
"""helper function to write a STIX bundle to file"""
print(f"{'overwriting' if os.path.exists(path) else 'writing'} {path}... ", end="", flush=True)
with open(path, "w", encoding="utf-8") as outfile:
bundle.fp_serialize(outfile, indent=4, sort_keys=True, ensure_ascii=False)
print("done!")
def substitute(attack_bundle, controls_bundle, mappings_bundle, allow_unmapped=False):
"""substitute the controls bundle and mappings bundle for the mitigations in attack_bundle.
attack_bundle, controls_bundle and mappings_bundle are of type stix2.Bundle
allow_unmapped, if true, allows controls in the output bundle if they don't have mappings to ATT&CK techniques
Returns a new bundle resembling attack_bundle but with mitigations and mitigates relationships
from controls_bundle and mappings_bundle
"""
# add attack data which are not mitigations or mitigation relationships
out_objects = list(filter(
lambda sdo: not (sdo["type"] == "course-of-action") and not
(sdo["type"] == "relationship" and sdo["relationship_type"] == "mitigates"),
attack_bundle.objects))
if allow_unmapped: # add all controls
out_objects += controls_bundle.objects
else: # add only controls which have associated mappings
used_ids = set()
for mapping in mappings_bundle.objects:
used_ids.add(mapping["source_ref"])
out_objects += list(filter(lambda sdo: sdo["id"] in used_ids, controls_bundle.objects))
# add mappings
out_objects += mappings_bundle.objects
return Bundle(*out_objects, allow_custom=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="substitute the mitigations in ATT&CK with a controls framework")
parser.add_argument("-controls",
dest="controls",
help="filepath to the stix bundle representing the control framework",
default=os.path.join("..", "frameworks", "ATT&CK-v9.0", "nist800-53-r5",
"stix", "nist800-53-r5-controls.json"))
parser.add_argument("-mappings",
dest="mappings",
help="filepath to the stix bundle mapping the controls to ATT&CK",
default=os.path.join("..", "frameworks", "ATT&CK-v9.0", "nist800-53-r5",
"stix", "nist800-53-r5-mappings.json"))
parser.add_argument("-domain",
choices=["enterprise-attack", "mobile-attack", "pre-attack"],
help="the domain of ATT&CK to substitute",
default="enterprise-attack")
parser.add_argument("-version",
dest="version",
help="which ATT&CK version to use",
default="v9.0")
parser.add_argument("--allow-unmapped",
dest="allow_unmapped",
action="store_true",
help="if flag is present, output bundle will include controls that don't map to techniques. "
"By default only controls that have technique mappings will be included",
default=False)
parser.add_argument("-output",
help="filepath to write the output stix bundle to",
default=os.path.join("..", "frameworks", "ATT&CK-v9.0", "nist800-53-r5",
"stix", "nist800-53-r5-enterprise-attack.json"))
args = parser.parse_args()
if args.version != "v9.0":
args.controls = args.controls.replace("ATT&CK-v9.0", f"ATT&CK-{args.version}")
args.mappings = args.mappings.replace("ATT&CK-v9.0", f"ATT&CK-{args.version}")
args.output = args.output.replace("ATT&CK-v9.0", f"ATT&CK-{args.version}")
print("downloading ATT&CK data... ", end="", flush=True)
url = f"https://raw.githubusercontent.com/mitre/cti/ATT%26CK-{args.version}/{args.domain}/{args.domain}.json"
attack_data = Bundle(
requests.get(url, verify=True).json()["objects"],
allow_custom=True
)
print("done")
print("loading controls framework... ", end="", flush=True)
with open(args.controls, "r") as f:
controls = Bundle(json.load(f)["objects"], allow_custom=True)
print("done")
print("loading mappings... ", end="", flush=True)
with open(args.mappings, "r") as f:
mappings = Bundle(json.load(f)["objects"])
print("done")
print("substituting... ", end="", flush=True)
out_bundle = substitute(attack_data, controls, mappings, args.allow_unmapped)
print("done")
save_bundle(out_bundle, args.output)
|
431581 | import os
import urllib.request
import librosa
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.augmentation import SpecAugmentation
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from rainforest.config import SAMPLE_RATE, N_FFT, N_MELS, FMIN, FMAX, HOP_LENGTH, N_SPECIES, CHUNK_SIZE
MODEL_URI = 'https://storage.googleapis.com/birds-external-data/temp_model_GPU2.pth'
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class Cnn14(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(Cnn14, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
self.dataset_mean = 0.
self.dataset_std = 1.
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=32, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
self.fc1 = nn.Linear(2048, 2048, bias=True)
self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = (x - self.dataset_mean) / self.dataset_std
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
embedding = F.dropout(x, p=0.5, training=self.training)
clipwise_output = torch.sigmoid(self.fc_audioset(x))
output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding}
return output_dict
def my_cnn14():
_model_config = {
'sample_rate': 32000,
'window_size': 1024,
'hop_size': 320,
'mel_bins': 64,
'fmin': 50,
'fmax': 14000,
'classes_num': 527
}
model = Cnn14(**_model_config)
model.fc_audioset = nn.Linear(2048, N_SPECIES, bias=True)
init_layer(model.fc_audioset)
model.spectrogram_extractor = Spectrogram(
n_fft=N_FFT, hop_length=HOP_LENGTH, win_length=N_FFT
)
model.logmel_extractor = LogmelFilterBank(
sr=SAMPLE_RATE, n_fft=N_FFT, n_mels=N_MELS, fmin=FMIN, fmax=FMAX,
)
model.bn0 = nn.BatchNorm2d(N_MELS)
init_bn(model.bn0)
return model
def load_pretrained_model():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = my_cnn14()
model_path = './data/temp_model_GPU2.pth'
if not os.path.exists(model_path):
print('Downloading model')
urllib.request.urlretrieve(MODEL_URI, model_path)
model.load_state_dict(torch.load(model_path, map_location=device))
_ = model.eval()
return model
def read_audio_fast(path, duration=30):
clip, sr_native = librosa.core.audio.__audioread_load(
path, offset=0.0, duration=duration, dtype=np.float32)
clip = librosa.to_mono(clip)
if sr_native > 0:
clip = librosa.resample(clip, sr_native, SAMPLE_RATE, res_type='kaiser_fast')
return clip
def get_model_predictions_for_clip(y, model):
duration = y.shape[0] // SAMPLE_RATE
batch = []
start_seconds = []
for start in range(0, duration - CHUNK_SIZE + 1, 1):
end = start + CHUNK_SIZE
start_seconds.append(start)
chunk = y[start * SAMPLE_RATE: end * SAMPLE_RATE]
if len(chunk) != CHUNK_SIZE * SAMPLE_RATE:
print(chunk.shape)
break
batch.append(chunk)
batch = np.asarray(batch)
tensors = torch.from_numpy(batch)
with torch.no_grad():
preds = model(tensors)['clipwise_output']
test_preds = preds.cpu().numpy()
pred_df = pd.DataFrame(test_preds, columns=range(N_SPECIES))
pred_df['start_second'] = start_seconds
# Smooth predictions
pred_df = pred_df.append(
pd.DataFrame([
[np.nan] * N_SPECIES + [pred_df.start_second.max() + 1],
[np.nan] * N_SPECIES + [pred_df.start_second.max() + 2],
], columns=pred_df.columns)
).fillna(method='ffill')
pred_df = pred_df.set_index('start_second').rolling(window=3, min_periods=1).mean()
return pred_df
|
431602 | from typing import Optional
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.orm import column_property
from sqlalchemy.orm import deferred
from sqlalchemy.orm import registry
from sqlalchemy.orm import Session
from sqlalchemy.orm import synonym
from sqlalchemy.sql.functions import func
from sqlalchemy.sql.sqltypes import Text
reg: registry = registry()
@reg.mapped
class User:
__tablename__ = "user"
id = Column(Integer(), primary_key=True)
name = Column(String)
# this gets inferred
big_col = deferred(Column(Text))
# this gets inferred
explicit_col = column_property(Column(Integer))
# EXPECTED: Can't infer type from ORM mapped expression assigned to attribute 'lower_name'; # noqa
lower_name = column_property(func.lower(name))
# EXPECTED: Can't infer type from ORM mapped expression assigned to attribute 'syn_name'; # noqa
syn_name = synonym("name")
# this uses our type
lower_name_exp: str = column_property(func.lower(name))
# this uses our type
syn_name_exp: Optional[str] = synonym("name")
s = Session()
u1: Optional[User] = s.get(User, 5)
assert u1
q1: Optional[str] = u1.big_col
q2: Optional[int] = u1.explicit_col
# EXPECTED_MYPY: Incompatible types in assignment (expression has type "str", variable has type "int") # noqa
x: int = u1.lower_name_exp
# EXPECTED_MYPY: Incompatible types in assignment (expression has type "Optional[str]", variable has type "int") # noqa
y: int = u1.syn_name_exp
|
431613 | import discord
from discord.ext import commands
from random import randint, sample, shuffle, choice
import os
import json
from typing import List, Dict, Optional, Any, Union
import asyncio
from extra import utils
from extra.slothclasses.player import Player
from extra.minigames.connect_four import ConnectFour
from extra.minigames.blackjack.blackjack import BlackJack
from extra.minigames.view import MoveObjectGameView, TicTacToeView, FlagsGameView
minigames_cogs: List[commands.Cog] = [
ConnectFour, BlackJack
]
class Games(*minigames_cogs):
""" A category for a minigames. """
def __init__(self, client) -> None:
""" Class init method. """
# Initiates all inherited cogs
for minigame_cog in minigames_cogs:
minigame_cog.__init__(self, client)
self.client = client
@commands.Cog.listener()
async def on_ready(self) -> None:
""" Tells when the Games cog is ready to go. """
print("Game cog is online!")
@commands.command()
@Player.poisoned()
@commands.cooldown(1, 600, commands.BucketType.user)
async def destiny(self, ctx) -> None:
""" Plays the Destiny game. """
member = ctx.author
embed = discord.Embed(
title="__Destiny__",
color=discord.Color.blue(),
timestamp=ctx.message.created_at
)
view: discord.ui.View = MoveObjectGameView(ctx, member)
square = await view.make_game_square(update=True)
square = '\n'.join(map(lambda r: ''.join(r), square))
embed.description = square
msg = await ctx.send(embed=embed, view=view)
await view.wait()
if view.status == 'Timeout':
embed.title += ' (Timeout)'
embed.color = discord.Color.red()
ctx.command.reset_cooldown(ctx)
await msg.edit(embed=embed)
@commands.command(aliases=["ttt", "jogo_da_idosa", "jdi", "jogo_da_velha", "#"])
@Player.poisoned()
@commands.cooldown(1, 60, commands.BucketType.user)
async def tic_tac_toe(self, ctx, *, member: discord.Member = None) -> None:
""" Plays Tic Tac Toe.
:param member: The opponent. """
author: discord.Member = ctx.author
if not member:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**Please, inform a member to play against, {author.mention}!**")
if author.id == member.id:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You cannot play with yourself, {author.mention}! <:sheesh:872621063987679243>**")
if member.bot:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**You cannot play against a bot, {author.mention}! 🤖**")
embed: discord.Embed = discord.Embed(
title="__Tic Tac Toe__",
color=member.color,
timestamp=ctx.message.created_at
)
embed.set_author(name=author, icon_url=author.display_avatar)
embed.set_footer(text=member, icon_url=member.display_avatar)
view: discord.ui.View = TicTacToeView(self.client, player=author, opponent=member)
embed.add_field(name="__Players__:", value=f"{author.mention} = ❌ | {member.mention} = ⭕", inline=False)
embed.add_field(name="__Turn__:", value=f"Now it's {view.turn_member.mention}'s turn!")
msg = await ctx.send(embed=embed, view=view)
await view.wait()
await asyncio.sleep(0.3)
if view.state is False:
embed.color = discord.Color.brand_red()
embed.set_field_at(1, name="__Timeout__", value="The game has timeouted!")
await msg.edit(embed=embed)
@commands.command(aliases=['flag', 'flag_game', 'flags'])
@Player.poisoned()
@commands.cooldown(1, 60, commands.BucketType.user)
async def flag_quiz(self, ctx) -> None:
""" Plays Country Flags Quiz"""
json_flags = json.load(open("extra/random/json/flag_game.json"))
# Selects twenty unique flags
flags = [json_flags[number] for number in sample(range(0, len(json_flags)), 20)]
await self.generate_flag_game(ctx=ctx, points=0, round=0, flags=flags)
async def generate_flag_game(self, ctx: commands.Context, message: Optional[discord.Message] = None, points: int = 0, round: int = 0, flags: List[Any] = None, timeout_count: int = 0):
# Open JSON file
json_flags = json.load(open("extra/random/json/flag_game.json", encoding='utf-8'))
# Creates the name options
countries_options = []
# Gets three random countries name
while len(countries_options) != 3:
name = json_flags[randint(0, len(json_flags) - 1)]['name']
if name + '0' not in countries_options and name != flags[round]['name']:
countries_options.append(name + '0')
countries_options.append(flags[round]['name'] + '1')
shuffle(countries_options)
# Game embed
embed = discord.Embed(
title='__Guess the flag__',
description= f"\u200b\n**🪙 Points: {points}**",
colour=1,
)
embed.set_image(url=f"https://flagcdn.com/224x168/{flags[round]['code']}.png")
embed.set_author(name=ctx.author, icon_url=ctx.author.display_avatar)
embed.set_footer(text=f"Round {round + 1} of 20")
# Creates the buttons
view = FlagsGameView(ctx=ctx, client=self.client, countries_names=countries_options, flags=flags, points=points, round=round, timeout_count=timeout_count)
if message:
await message.edit('\u200b', embed=embed, view=view)
else:
message = await ctx.send('\u200b', embed=embed, view=view)
await view.wait()
# Timeout
if not view.used:
view.stop()
# Shows the correct button
correct_button = [button for button in view.children if button.label == flags[round]['name']]
correct_button[0].style = discord.ButtonStyle.primary
embed.description = f"\n** 🪙 Points: {points}**\n\n**🔺 Timeout!**"
await message.edit('\u200b', embed=embed, view=view)
await asyncio.sleep(1)
if view.timeout_count == 3:
return await self.end_flag_game(ctx=ctx, message=message, member=ctx.message.author, points=points)
else:
view.timeout_count += 1
if round >= 19:
return await self.end_flag_game(ctx=ctx, message=message, member=ctx.message.author, points=points)
await self.generate_flag_game(ctx=ctx, message=message, points=points, round=round + 1, flags=flags, timeout_count=view.timeout_count)
else:
view.timeout_count = 0
async def end_flag_game(self, ctx: commands.Context, message: discord.Message, member: discord.Member, points: int):
# Generates the end game embed
embed = discord.Embed(
title='__Guess the flag__',
description= f"✅ Correct Answers: {points}/20.",
)
embed.set_author(name=member, icon_url=member.display_avatar)
await message.edit('\u200b', embed=embed, view=None)
#=== Flag games settings ===#
@commands.command(hidden=True)
@commands.is_owner()
async def check_flags(self, ctx) -> None:
""" Shows all flags and their names. This command is used to check the link of the images. """
json_flags = json.load(open("extra/random/json/flag_game.json", encoding='utf-8'))
for flag in json_flags:
embed = discord.Embed()
embed.set_image(url=flag['link'] + '.png')
await ctx.send(flag['name'], embed=embed)
@commands.command(aliases=['lotto'])
@Player.poisoned()
async def lottery(self, ctx, g1 = None, g2 = None, g3 = None):
""" Enter the lottery and see if you win!
:param g1: Guess 1.
:param g2: Guess 2.
:param g3: Guess 3.
* Cost: 1łł.
* Prize: 500łł """
author = ctx.author
await ctx.message.delete()
if not g1:
return await ctx.send("**You informed 0 guesses, 3 guesses are needed!**", delete_after=3)
elif not g2:
return await ctx.send("**You informed 1 guess, 3 guesses are needed!**", delete_after=3)
elif not g3:
return await ctx.send("**You informed 2 guesses, 3 guesses are needed!**", delete_after=3)
try:
g1 = int(g1)
g2 = int(g2)
g3 = int(g3)
except ValueError:
return await ctx.send("**All guesses must be integers!**", delete_after=3)
for n in [g1, g2, g3]:
if n <= 0 or n > 5:
return await ctx.send(f"**Each number must be between 1-5!**", delete_after=3)
SlothCurrency = self.client.get_cog('SlothCurrency')
# Check if user is not on cooldown
user_secs = await SlothCurrency.get_user_currency(author.id)
current_ts = await utils.get_timestamp()
if not user_secs:
await SlothCurrency.insert_user_currency(author.id, current_ts - 61)
user_secs = await SlothCurrency.get_user_currency(author.id)
if user_secs[0][1] >= 1:
await SlothCurrency.update_user_money(author.id, -1)
else:
return await ctx.send(f"**You need 1łł to play the lottery, {author.mention}!**")
if user_secs[0][6]:
sub_time = current_ts - user_secs[0][6]
if sub_time >= 1200:
await SlothCurrency.update_user_lotto_ts(author.id, current_ts)
else:
m, s = divmod(1200 - int(sub_time), 60)
h, m = divmod(m, 60)
if h > 0:
return await ctx.send(f"**You're on cooldown, try again in {h:d} hours, {m:02d} minutes and {s:02d} seconds.**", delete_after=5)
elif m > 0:
return await ctx.send(
f"**You're on cooldown, try again in {m:02d} minutes and {s:02d} seconds.**",
delete_after=5)
else:
return await ctx.send(
f"**You're on cooldown, try again in {s:02d} seconds.**",
delete_after=5)
else:
await SlothCurrency.update_user_lotto_ts(author.id, current_ts)
author = author
numbers = []
for x in range(3):
numbers.append(randint(1, 5))
string_numbers = [str(i) for i in numbers]
if g1 == numbers[0] and g2 == numbers[1] and g3 == numbers[2]:
await ctx.send(f'**{author.mention} You won! Congratulations on winning the lottery with the numbers ({g1}, {g2}, {g3})!🍃+500łł!**')
if not await SlothCurrency.get_user_currency(author.id):
await SlothCurrency.insert_user_currency(author.id, current_ts - 61)
await SlothCurrency.update_user_money(author.id, 500)
else:
await ctx.send(
f"**{author.mention}, better luck next time... You guessed {g1}, {g2}, {g3}...\nThe numbers were:** `{', '.join(string_numbers)}`")
@commands.command(aliases=['dice'])
@Player.poisoned()
async def roll(self, ctx, sides = None):
""" Roll a dice with the number of faces given.
:param sides: The number of faces to roll. """
await ctx.message.delete()
if not sides:
sides = 6
try:
sides = int(sides)
except ValueError:
sides = 6
if sides > 1000000000000 or sides < 0:
return await ctx.send("**Enter a valid integer value**", delete_after=3)
embed = discord.Embed(color=ctx.author.color, title=f":game_die: **YOU GOT:** **{randint(1, sides)}** :game_die: `(1 - {sides})`",
timestamp=ctx.message.created_at)
embed.set_footer(text=f"Rolled by {ctx.author}", icon_url=ctx.author.display_avatar)
await ctx.send(embed=embed)
@commands.command(aliases=['flip_coin', 'flipcoin', 'coinflip', 'cf', 'fc'])
@Player.poisoned()
@commands.cooldown(1, 10, commands.BucketType.user)
async def coin_flip(self, ctx, bet: int = None, side: str = None) -> None:
""" Command for flipping a coin.
:param bet: The amount of money you want to bet.
:param side: The side you wanna bet on. (Heads/Tail)
* Minimum bet: 50łł """
member: discord.Member = ctx.author
if not bet:
ctx.command.reset_cooldown(ctx)
return await ctx.reply("**Please, inform how much you wanna bet!**")
minimum_bet: int = 50
bet_limit: int = 5000
if bet > bet_limit:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(f"**You cannot bet more than {bet_limit}łł at a time, {member.mention}!**")
if not side:
ctx.command.reset_cooldown(ctx)
return await ctx.reply("**Please, inform the side you wanna bet on!**")
SlothCurrency = self.client.get_cog('SlothCurrency')
user_currency: List[int] = await SlothCurrency.get_user_currency(member.id)
if not user_currency:
return await ctx.reply("**You don't even have a Profile!**")
if user_currency[0][1] < bet:
return await ctx.reply(f"**You don't have `{bet} leaves` to bet!**")
if bet < minimum_bet:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**The minimum bet is `{minimum_bet} leaves`!**")
side_options: Dict[str, List[str]] = {
'Tail': {'aliases': ['t', 'tail', 'tails'], 'image': 'https://upload.wikimedia.org/wikipedia/commons/thumb/a/a1/2021_Native_American_%241_Coin_Reverse.png/220px-2021_Native_American_%241_Coin_Reverse.png'},
'Heads': {'aliases': ['h', 'head', 'heads'], 'image': 'https://upload.wikimedia.org/wikipedia/en/f/fe/Sacagawea_dollar_obverse.png'}
}
if side.lower() not in side_options['Tail']['aliases'] + side_options['Heads']['aliases']:
ctx.command.reset_cooldown(ctx)
return await ctx.reply("**Please, inform a valid side!**")
side = 'Tail' if side.lower() in side_options['Tail']['aliases'] else 'Heads'
coin_var: str = choice(['Tail', 'Heads'])
win_var: str = 'won' if side.lower() == coin_var.lower() else 'lost'
# Makes the embed
embed: discord.Embed = discord.Embed(
description = f"It's **{coin_var}**",
)
embed.add_field(name=f"Amount {win_var}", value=f"{bet} leaves", inline=False)
if win_var == 'won':
embed.color=discord.Color.green()
embed.add_field(name="New balance", value=f"{user_currency[0][1]+bet} leaves")
await SlothCurrency.update_user_money(member.id, bet)
else:
embed.color=discord.Color.dark_red()
embed.add_field(name="New balance", value=f"{user_currency[0][1]-bet} leaves")
await SlothCurrency.update_user_money(member.id, -bet)
embed.set_author(name=f"You've {win_var}!", icon_url=member.display_avatar)
embed.set_thumbnail(url=side_options[coin_var]['image'])
embed.set_footer(text=f"Command by {member}")
await ctx.reply(embed=embed)
@commands.command()
@Player.poisoned()
@commands.cooldown(1, 25, commands.BucketType.user)
async def slots(self, ctx, bet: int = None) -> None:
""" Command for playing Slots.
:param bet: The amount you wanna bet.
* Minimum bet: 50łł """
author: discord.Member = ctx.author
if not bet:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(f"**Please inform how much you wanna bet, {author.mention}**")
minimum_bet: int = 50
bet_limit: int = 5000
if bet > bet_limit:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(f"**You cannot bet more than {bet_limit}łł at a time, {author.mention}!**")
SlothCurrency = self.client.get_cog('SlothCurrency')
user_currency = await SlothCurrency.get_user_currency(author.id)
if not user_currency:
view = discord.ui.View()
view.add_item(discord.ui.Button(style=5, label="Create Account", emoji="🦥", url="https://thelanguagesloth.com/profile/update"))
return await ctx.reply(
embed=discord.Embed(description=f"**{author.mention}, you don't have an account yet. Click [here](https://thelanguagesloth.com/profile/update) to create one, or in the button below!**"),
view=view)
try:
bet = int(bet)
except ValueError:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(f"**Please, inform a valid bet value, {author.mention}!**")
if bet > user_currency[0][1]:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(f"**You don't have {bet} to bet, {author.mention}!**")
if bet < minimum_bet:
ctx.command.reset_cooldown(ctx)
return await ctx.send(f"**The minimum bet is `{minimum_bet} leaves`!**")
if bet < 0:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(f"**You must inform a positive amount to bet, {author.mention}**")
# slots = ['bus', 'train', 'horse', 'heart', 'monkey', 'cow', 'parrot', 'leaves', 'money_mouth']
slots: List[Dict[str, Union[str, int, float]]] = [
{"emoji": ':bus:', "multiplier": 2},
{"emoji": ':train:', "multiplier": 2},
{"emoji": ':heart:', "multiplier": 4},
{"emoji": ':monkey:', "multiplier": 2},
{"emoji": ':cow:', "multiplier": 2},
{"emoji": ':money_mouth:', "multiplier": 3},
]
slot1 = slots[randint(0, 5)]
slot2 = slots[randint(0, 5)]
slot3 = slots[randint(0, 5)]
slotOutput = '| {} | {} | {} |\n'.format(slot1["emoji"], slot2["emoji"], slot3["emoji"])
ok = discord.Embed(title="__Slots Machine__", color=discord.Color(0xFFEC))
ok.set_footer(text=f"Bet from {author}", icon_url=author.display_avatar)
rolling_emoji: str = '<a:slots_emoji:903335419725373490>'
ok.add_field(name='Rolling...', value='| {} | {} | {} |\n'.format(rolling_emoji, rolling_emoji, rolling_emoji))
msg = await ctx.send(embed=ok)
await asyncio.sleep(0.8)
ok.set_field_at(0, name='Rolling...', value='| {} | {} | {} |\n'.format(slot1["emoji"], rolling_emoji, rolling_emoji))
await msg.edit(embed=ok)
await asyncio.sleep(0.8)
ok.set_field_at(0, name='Rolling...', value='| {} | {} | {} |\n'.format(slot1["emoji"], slot2["emoji"], rolling_emoji))
await msg.edit(embed=ok)
await asyncio.sleep(0.8)
money_won: int = bet * slot1['multiplier']
# User won with 3
won = discord.Embed(title = "Slots Machine", color = discord.Color(0xFFEC))
won.add_field(name="{}\nWon".format(slotOutput), value=f'You won {money_won} leaves')
won.set_footer(text=f"Bet from {author}", icon_url=author.display_avatar)
# User broke even with 2
be = discord.Embed(title = "Slots Machine", color = discord.Color(0xFFEC))
be.add_field(name="{}\nBroke even".format(slotOutput), value=f'You broke even, so you keep your {bet} leaves')
be.set_footer(text=f"Bet from {author}", icon_url=author.display_avatar)
# User lost
lost = discord.Embed(title = "Slots Machine", color = discord.Color(0xFFEC))
lost.add_field(name="{}\nLost".format(slotOutput), value=f'You lost {bet} leaves')
lost.set_footer(text=f"Bet from {author}", icon_url=author.display_avatar)
if slot1["emoji"] == slot2["emoji"] == slot3["emoji"]:
await SlothCurrency.update_user_money(ctx.author.id, money_won)
return await msg.edit(embed=won)
elif slot1["emoji"] == slot2["emoji"] or slot2["emoji"] == slot3["emoji"]:
return await msg.edit(embed=be)
else:
await SlothCurrency.update_user_money(ctx.author.id, -bet)
return await msg.edit(embed=lost)
def setup(client) -> None:
""" Cog's setup function. """
client.add_cog(Games(client))
|
431629 | import unittest
try:
from unittest import mock
except ImportError:
import mock
from betamax.adapter import BetamaxAdapter
from requests.adapters import HTTPAdapter
class TestBetamaxAdapter(unittest.TestCase):
def setUp(self):
http_adapter = mock.Mock()
self.adapters_dict = {'http://': http_adapter}
self.adapter = BetamaxAdapter(old_adapters=self.adapters_dict)
def tearDown(self):
self.adapter.eject_cassette()
def test_has_http_adatper(self):
assert self.adapter.http_adapter is not None
assert isinstance(self.adapter.http_adapter, HTTPAdapter)
def test_empty_initial_state(self):
assert self.adapter.cassette is None
assert self.adapter.cassette_name is None
assert self.adapter.serialize is None
def test_load_cassette(self):
filename = 'test'
self.adapter.load_cassette(filename, 'json', {
'record': 'none',
'cassette_library_dir': 'tests/cassettes/'
})
assert self.adapter.cassette is not None
assert self.adapter.cassette_name == filename
|
431713 | from builtins import complex
import pytest
from json_to_models.dynamic_typing import DUnion, StringLiteral, get_hash_string
# *args | MetaData
test_dunion = [
pytest.param(
[int, int],
DUnion(int),
id="unique_types"
),
pytest.param(
[int, DUnion(int)],
DUnion(int),
id="nested_union_&_merge"
),
pytest.param(
[str, DUnion(int, DUnion(float, complex))],
DUnion(int, float, complex, str),
id="complex_merge"
),
pytest.param(
[str, StringLiteral({'a'})],
DUnion(str),
id="str_literal_to_string"
),
pytest.param(
[StringLiteral({'b'}), StringLiteral({'a'})],
DUnion(StringLiteral({'a', 'b'})),
id="str_literal_merge"
),
pytest.param(
[StringLiteral({str(i)}) for i in range(100)],
DUnion(str),
id="str_literal_too_much"
),
]
@pytest.mark.parametrize("value,expected", test_dunion)
def test_dunion_creation(value, expected):
result = DUnion(*value)
assert result == expected
def test_hash_string():
a = {'a': int}
b = {'b': int}
c = {'a': float}
assert len(set(map(get_hash_string, (a, b, c)))) == 3
union = DUnion(str, float)
h1 = union.to_hash_string()
union.replace(complex, index=0)
h2 = union.to_hash_string()
assert h1 != h2, f"{h1}, {h2}"
|
431798 | import torch
import torch.nn as nn
"""
@article{vandeven2019three,
title={Three scenarios for continual learning},
author={<NAME>, <NAME> and <NAME>},
journal={arXiv preprint arXiv:1904.07734},
year={2019}
}
@article{vandeven2018generative,
title={Generative replay with feedback connections as a general strategy for continual learning},
author={<NAME>, <NAME> and <NAME>},
journal={arXiv preprint arXiv:1809.10635},
year={2018}
}
"""
class AutoEncoder(nn.Module):
def __init__(self, kernel_num, in_channel=1, img_sz=32, hidden_dim=256, z_size=100, bn = False):
super(AutoEncoder, self).__init__()
self.BN = bn
self.in_dim = in_channel*img_sz*img_sz
self.image_size = img_sz
self.channel_num = in_channel
self.kernel_num = kernel_num
self.z_size = z_size
# -weigths of different components of the loss function
self.lamda_rcl = 1.
self.lamda_vl = 1.
# Training related components that should be set before training
# -criterion for reconstruction
self.recon_criterion = None
self.encoder = nn.Sequential(
self._conv(in_channel, 64),
self._conv(64, 128),
self._conv(128, 512),
)
self.decoder = nn.Sequential(
self._deconv(512, 256),
self._deconv(256, 64),
self._deconv(64, in_channel, ReLU=False),
nn.Sigmoid()
)
self.feature_size = img_sz // 8
self.kernel_num = 512
self.feature_volume = self.kernel_num * (self.feature_size ** 2)
# q
self.q_mean = self._linear(self.feature_volume, z_size, relu=False)
self.q_logvar = self._linear(self.feature_volume, z_size, relu=False)
# projection
self.project = self._linear(z_size, self.feature_volume, relu=False)
def reparameterize(self, mu, logvar):
'''Perform "reparametrization trick" to make these stochastic variables differentiable.'''
std = logvar.mul(0.5).exp_()
eps = std.new(std.size()).normal_()
return eps.mul(std).add_(mu)
def forward(self, x):
# encode (forward), reparameterize and decode (backward)
mu, logvar, hE = self.encode(x)
z = self.reparameterize(mu, logvar) if self.training else mu
x_recon = self.decode(z)
return (x_recon, mu, logvar, z)
def sample(self, size):
# set model to eval()-mode
mode = self.training
self.eval()
# sample z
z = torch.randn(size, self.z_size)
z = z.cuda()
with torch.no_grad():
X = self.decode(z)
# set model back to its initial mode
self.train(mode=mode)
# return samples as [batch_size]x[channels]x[image_size]x[image_size] tensor, plus classes-labels
return X
def loss_function(self, recon_x, x, dw, mu=None, logvar=None):
batch_size = x.size(0)
###-----Reconstruction loss-----###
reconL = (self.recon_criterion(input=recon_x.view(batch_size, -1), target=x.view(batch_size, -1))).mean(dim=1)
reconL = torch.mean(reconL * dw)
###-----Variational loss-----###
if logvar is not None:
#---- see Appendix B from: Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 ----#
variatL = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1).mean()
# -normalise by same number of elements as in reconstruction
variatL /= self.in_dim
# --> because self.recon_criterion averages over batch-size but also over all pixels/elements in recon!!
else:
variatL = torch.tensor(0.)
variatL = variatL.cuda()
# Return a tuple of the calculated losses
return reconL, variatL
def train_batch(self, x, data_weights, allowed_predictions):
'''Train model for one batch ([x],[y]), possibly supplemented with replayed data ([x_],[y_]).
[x] <tensor> batch of inputs (could be None, in which case only 'replayed' data is used)'''
# Set model to training-mode
self.train()
##--(1)-- CURRENT DATA --##
# Run the model
recon_batch, mu, logvar, z = self.forward(x)
# Calculate all losses
reconL, variatL = self.loss_function(recon_x=recon_batch, x=x, dw = data_weights, mu=mu, logvar=logvar)
# Weigh losses as requested
loss_total = self.lamda_rcl*reconL + self.lamda_vl*variatL
# perform update
self.optimizer.zero_grad()
loss_total.backward()
self.optimizer.step()
return loss_total.detach()
def decode(self, z):
'''Pass latent variable activations through feedback connections, to give reconstructed image [image_recon].'''
z_projected = self.project(z).view(
-1, self.kernel_num,
self.feature_size,
self.feature_size,
)
return self.decoder(z_projected)
def encode(self, x):
'''Pass input through feed-forward connections, to get [hE], [z_mean] and [z_logvar].'''
# encode x
encoded = self.encoder(x)
# sample latent code z from q given x.
z_mean, z_logvar = self.q(encoded)
return z_mean, z_logvar, encoded
def q(self, encoded):
unrolled = encoded.view(-1, self.feature_volume)
return self.q_mean(unrolled), self.q_logvar(unrolled)
def _conv(self, channel_size, kernel_num, kernel_size_=4, stride_=2):
if self.BN:
return nn.Sequential(
nn.Conv2d(
channel_size, kernel_num,
kernel_size=kernel_size_, stride=stride_, padding=1,
),
nn.BatchNorm2d(kernel_num),
nn.ReLU(),
)
else:
return nn.Sequential(
nn.Conv2d(
channel_size, kernel_num,
kernel_size=kernel_size_, stride=stride_, padding=1,
),
nn.ReLU(),
)
def _deconv(self, channel_num, kernel_num,ReLU=True, kernel_size_=4, stride_=2):
if ReLU:
if self.BN:
return nn.Sequential(
nn.ConvTranspose2d(
channel_num, kernel_num,
kernel_size=kernel_size_, stride=stride_, padding=1,
),
nn.BatchNorm2d(kernel_num),
nn.ReLU(),
)
else:
return nn.Sequential(
nn.ConvTranspose2d(
channel_num, kernel_num,
kernel_size=kernel_size_, stride=stride_, padding=1,
),
nn.ReLU(),
)
else:
if self.BN:
return nn.Sequential(
nn.ConvTranspose2d(
channel_num, kernel_num,
kernel_size=kernel_size_, stride=stride_, padding=1,
),
nn.BatchNorm2d(kernel_num),
)
else:
return nn.Sequential(
nn.ConvTranspose2d(
channel_num, kernel_num,
kernel_size=kernel_size_, stride=stride_, padding=1,
),
)
def _linear(self, in_size, out_size, relu=True):
return nn.Sequential(
nn.Linear(in_size, out_size),
nn.ReLU(),
) if relu else nn.Linear(in_size, out_size)
def CIFAR_GEN(bn = False):
return AutoEncoder(in_channel=3, img_sz=32, kernel_num=512, z_size=1024)
|
431829 | from DemoApp import App
import wx
#from gui import skin as skincore
from gui.uberwidgets.simplemenu import SimpleMenu,SimpleMenuItem
from gui.uberwidgets.UberButton import UberButton
class Frame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,title='Simple Menu Test')
self.panel=wx.Panel(self)
self.panel.Sizer=wx.BoxSizer(wx.VERTICAL)
menu=SimpleMenu(self, skinkey='simplemenu',maxheight=10,width=100)
items=[
SimpleMenuItem('Test1'),#,self.DifferentMethodTest),
SimpleMenuItem('Test2'),
SimpleMenuItem('Test3'),
SimpleMenuItem(id=-1),
SimpleMenuItem('Test4')
]
menu.SetItems(items)
skin='button'
size=None#(100,100)#
type='menu'#None#'toggle'#
#menu=None#self.menu#
icon=None#wx.Bitmap('../../../res/skins/default/statusicons/mobile.png',wx.BITMAP_TYPE_PNG)#wx.Bitmap('../../res/skins/default/tinydigsby.png',wx.BITMAP_TYPE_PNG)
self.smb1=UberButton(self.panel,wx.NewId(),"SMB",skin,icon=icon,style=wx.HORIZONTAL,size=size,type=type,menu=menu)
self.panel.Sizer.Add(self.smb1)
def Go():
f=Frame()
f.Show(True)
if __name__=='__main__':
a = App( Go )
a.MainLoop() |
431853 | from zentral.utils.apps import ZentralAppConfig
class ZentralAccountsAppConfig(ZentralAppConfig):
name = "accounts"
verbose_name = "Zentral accounts app"
permission_models = ("user",)
|
431911 | import os.path as osp
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
import os
import torch
import random
import numpy as np
class MiniImageNet(Dataset):
def __init__(self, setname, args):
IMAGE_PATH = os.path.join(args.data_dir, 'miniimagenet/images')
SPLIT_PATH = os.path.join(args.data_dir, 'miniimagenet/split')
csv_path = osp.join(SPLIT_PATH, setname + '.csv')
lines = [x.strip() for x in open(csv_path, 'r').readlines()][1:]
data = []
label = []
lb = -1
self.setname=setname
self.wnids = []
if 'patch_list' not in vars(args).keys():
self.patch_list=[2,3]
print('do not assign num_patch , set default:',self.patch_list)
else:
self.patch_list=args.patch_list
if 'patch_ratio' not in vars(args).keys():
self.patch_ratio = 2
print('do not assign patch_ratio, set as default:',self.patch_ratio)
else:
self.patch_ratio=args.patch_ratio
for l in lines:
name, wnid = l.split(',')
path = osp.join(IMAGE_PATH, name)
if wnid not in self.wnids:
self.wnids.append(wnid)
lb += 1
data.append(path)
label.append(lb)
self.data = data#data path of all data
self.label = label #label of all data
self.num_class = len(set(label))
if setname=='val' or setname=='test':
image_size = 84
self.transform = transforms.Compose([
transforms.Resize([image_size,image_size]),
transforms.ToTensor(),
transforms.Normalize(np.array([x / 255.0 for x in [125.3, 123.0, 113.9]]),
np.array([x / 255.0 for x in [63.0, 62.1, 66.7]])) ])
elif setname=='train':
image_size = 84
self.transform = transforms.Compose([
transforms.Resize([image_size,image_size]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(np.array([x / 255.0 for x in [125.3, 123.0, 113.9]]),
np.array([x / 255.0 for x in [63.0, 62.1, 66.7]]))
])
else:
raise ValueError('no such set')
def __len__(self):
return len(self.data)
def get_grid_location(self,size, ratio, num_grid):
'''
:param size: size of the height/width
:param ratio: generate grid size/ even divided grid size
:param num_grid: number of grid
:return: a list containing the coordinate of the grid
'''
raw_grid_size = int(size / num_grid)
enlarged_grid_size = int(size / num_grid * ratio)
center_location = raw_grid_size // 2
location_list = []
for i in range(num_grid):
location_list.append((max(0, center_location - enlarged_grid_size // 2),
min(size, center_location + enlarged_grid_size // 2)))
center_location = center_location + raw_grid_size
return location_list
def get_pyramid(self,img,num_patch):
if self.setname == 'val' or self.setname == 'test':
num_grid=num_patch
grid_ratio=self.patch_ratio
elif self.setname=='train':
num_grid=num_patch
grid_ratio=1+2*random.random()
else:
raise ValueError('Unkown set')
w, h = img.size
grid_locations_w=self.get_grid_location(w,grid_ratio,num_grid)
grid_locations_h=self.get_grid_location(h,grid_ratio,num_grid)
patches_list=[]
for i in range(num_grid):
for j in range(num_grid):
patch_location_w=grid_locations_w[j]
patch_location_h=grid_locations_h[i]
left_up_corner_w=patch_location_w[0]
left_up_corner_h=patch_location_h[0]
right_down_cornet_w=patch_location_w[1]
right_down_cornet_h = patch_location_h[1]
patch=img.crop((left_up_corner_w,left_up_corner_h,right_down_cornet_w,right_down_cornet_h))
patch=self.transform(patch)
patches_list.append(patch)
return patches_list
def __getitem__(self, i):# return the ith data in the set.
path, label = self.data[i], self.label[i]
image=Image.open(path).convert('RGB')
patch_list=[]
for num_patch in self.patch_list:
patches=self.get_pyramid(image,num_patch)
patch_list.extend(patches)
patch_list=torch.stack(patch_list,dim=0)
return patch_list, label
if __name__ == '__main__':
pass |
431914 | import argparse
import hashlib
from functools import partial
import aioredis
async def redis_create_connection(connection_string: str):
async def redis_create_index(connection):
try:
await connection.execute(
"FT.CREATE",
"s3_index",
"SCHEMA",
"bucket",
"TEXT",
"filename",
"TEXT",
"content",
"TEXT",
"WEIGHT",
"5.0"
)
except Exception as e:
if "Index already exists" not in str(e):
raise
redis_con = await aioredis.create_redis_pool(connection_string)
await redis_create_index(redis_con)
return redis_con
async def redis_add_document(connection,
bucket_name: str,
object_path: str,
content: bytes):
object_id = f"{bucket_name}{object_path}".encode("utf-8")
try:
await connection.execute(
"FT.ADD",
"s3_index",
hashlib.sha512(object_id).hexdigest(),
"1.0",
"FIELDS",
"bucket",
bucket_name,
"filename",
object_path,
"content",
content
)
except Exception as e:
message = str(e)
if "Document already exists" not in message:
print(f" !> Insertion error: {message}")
__all__ = ("redis_add_document", "redis_create_connection")
|
431916 | from smol_evm.constants import MAX_UINT256
from smol_evm.context import ExecutionContext, Calldata
from smol_evm.opcodes import CALLDATALOAD, CALLDATASIZE
from shared import with_stack_contents, with_calldata
import pytest
@pytest.fixture
def calldata() -> Calldata:
return Calldata()
@pytest.fixture
def context() -> ExecutionContext:
return ExecutionContext()
def test_simple_calldataload(context):
ctx = with_calldata(context, range(32))
ctx.stack.push(0)
CALLDATALOAD.execute(ctx)
assert ctx.stack.pop() == 0x0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f
def test_empty_calldataload(context):
context.stack.push(0)
CALLDATALOAD.execute(context)
assert context.stack.pop() == 0
def test_calldataload_uint256_overflow(context):
# 7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff35
context.stack.push(MAX_UINT256)
CALLDATALOAD.execute(context)
assert context.stack.pop() == 0
def test_calldatasize_empty(context):
CALLDATASIZE.execute(context)
assert context.stack.pop() == 0
def test_calldatasize_one(context):
CALLDATASIZE.execute(with_calldata(context, [0]))
assert context.stack.pop() == 1
|
431920 | import numpy as np
from brancher.standard_variables import DirichletVariable, GeometricVariable, Chi2Variable, \
GumbelVariable, HalfCauchyVariable, HalfNormalVariable, NegativeBinomialVariable, PoissonVariable, StudentTVariable, UniformVariable, BernoulliVariable
## Distributions and samples ##
a = DirichletVariable(concentration=np.ones((10, 10)), name="a")
print(a.get_sample(2))
b = Chi2Variable(3, "b")
print(b.get_sample(2))
c = GeometricVariable(logits=0, name="c")
print(c.get_sample(2))
d = GumbelVariable(loc=0, scale=1, name="d")
print(d.get_sample(2))
e = HalfCauchyVariable(scale=1, name="e")
print(e.get_sample(2))
f = HalfCauchyVariable(scale=1, name="f")
print(f.get_sample(2))
g = HalfNormalVariable(scale=1, name="g")
print(g.get_sample(2))
h = NegativeBinomialVariable(1, logits=0, name="h")
print(h.get_sample(2))
i = PoissonVariable(1, name="i")
print(i.get_sample(2))
j = StudentTVariable(1, 0, 1, name="j")
print(j.get_sample(2))
l = UniformVariable(1, 2, name="l")
print(l.get_sample(2))
m = BernoulliVariable(probs=0.5, name="m")
print(m.get_sample(2))
## Moments ##
print("Moments :", m.distribution.get_moments(0.1, 5, **{"probs": 0.1}))
## Taylor ##
#print("Taylor :", m.distribution.get_log_p_taylor_expansion(0, 5, **{"probs": 0.1}))
|
431925 | import os
from datetime import datetime
from typing import Any, Optional
def load_file(
file_path: Optional[str] = None, mode: str = "r", loader: Any = None
) -> Any:
"""
Safely load a file.
:param file_path: The path to the file to load.
:type file_path: [type]
:param mode: The mode to use when opening the file., defaults to "r"
:type mode: str, optional
:return: The file contents.
:rtype: Any
"""
if not isinstance(file_path, str):
return None
if os.path.exists(file_path):
with open(file_path, mode) as file:
if not loader:
return file.read()
else:
return loader(file)
else:
return None
def save_file(
file_path: Optional[str] = None,
content: Any = None,
mode: str = "w",
encoding: str = "utf-8",
newline: str = "\n",
writer: Any = None,
) -> None:
"""
Save a file.
:param file_path: The path to the file to save.
:type file_path: str
:param content: The content to save.
:type content: Any
:param mode: The mode to use when opening the file., defaults to "w"
:type mode: str, optional
:param encoding: The encoding to use when writing the file, defaults to "utf-8"
:type encoding: str, optional
:param newline: The newline character to use when writing the file, defaults to "\n"
:type newline: str, optional
"""
if not isinstance(file_path, str):
return None
if mode == "wb":
with open(file_path, mode) as file:
_ = file.write(content) if not writer else writer(content, file)
else:
with open(file_path, mode, encoding=encoding, newline=newline) as file:
_ = file.write(content) if not writer else writer(content, file)
def create_timestamps_path(
directory: str,
file_name: str,
timestamp: Optional[datetime] = None,
dry_run: bool = False,
) -> str:
timestamp = timestamp or datetime.now()
dir_path = os.path.join(
directory, timestamp.strftime("%d-%B-%Y"), timestamp.strftime("%H-%M-%S-%f")
)
if not dry_run:
os.makedirs(dir_path, exist_ok=True)
return os.path.join(dir_path, file_name)
|
431946 | import tensorflow as tf
import numpy as np
from .NeuronLayer import *
from .DenseLayer import *
#inverse softplus transformation
def softplus_inverse(x):
return x + np.log(-np.expm1(-x))
#radial basis function expansion
class RBFLayer(NeuronLayer):
def __str__(self):
return "radial_basis_function_layer"+super().__str__()
def __init__(self, K, cutoff, scope=None, dtype=tf.float32):
super().__init__(1, K, None)
self._K = K
self._cutoff = cutoff
with tf.variable_scope(scope):
#initialize centers
centers = softplus_inverse(np.linspace(1.0,np.exp(-cutoff),K))
self._centers = tf.nn.softplus(tf.Variable(np.asarray(centers), name="centers", dtype=dtype))
tf.summary.histogram("rbf_centers", self.centers)
#initialize widths (inverse softplus transformation is applied, such that softplus can be used to guarantee positive values)
widths = [softplus_inverse((0.5/((1.0-np.exp(-cutoff))/K))**2)]*K
self._widths = tf.nn.softplus(tf.Variable(np.asarray(widths), name="widths", dtype=dtype))
tf.summary.histogram("rbf_widths", self.widths)
@property
def K(self):
return self._K
@property
def cutoff(self):
return self._cutoff
@property
def centers(self):
return self._centers
@property
def widths(self):
return self._widths
#cutoff function that ensures a smooth cutoff
def cutoff_fn(self, D):
x = D/self.cutoff
x3 = x**3
x4 = x3*x
x5 = x4*x
return tf.where(x < 1, 1 - 6*x5 + 15*x4 - 10*x3, tf.zeros_like(x))
def __call__(self, D):
D = tf.expand_dims(D, -1) #necessary for proper broadcasting behaviour
rbf = self.cutoff_fn(D)*tf.exp(-self.widths*(tf.exp(-D)-self.centers)**2)
return rbf
|
431958 | from typing import Optional
from pydantic import BaseModel, validator
from pydantic.fields import ModelField
from app.model.recursive_data import RecursiveDataModel, PotentialDataModelKeysMixin
class InvalidEligiblityError(ValueError):
"""Exception thrown in case the eligibility check failed."""
pass
def declarations_must_be_set_yes(v):
if not v == 'yes':
raise InvalidEligiblityError
return v
def declarations_must_be_set_no(v):
if not v == 'no':
raise InvalidEligiblityError
return v
class MarriedEligibilityData(BaseModel, PotentialDataModelKeysMixin):
marital_status_eligibility: str
@validator('marital_status_eligibility')
def must_be_married(cls, v):
if v not in 'married':
raise ValueError
return v
class WidowedEligibilityData(BaseModel, PotentialDataModelKeysMixin):
marital_status_eligibility: str
@validator('marital_status_eligibility')
def must_be_widowed(cls, v):
if v not in 'widowed':
raise ValueError
return v
class SingleEligibilityData(BaseModel, PotentialDataModelKeysMixin):
marital_status_eligibility: str
@validator('marital_status_eligibility')
def must_be_single(cls, v):
if v not in 'single':
raise ValueError
return v
class DivorcedEligibilityData(BaseModel, PotentialDataModelKeysMixin):
marital_status_eligibility: str
@validator('marital_status_eligibility')
def must_be_divorced(cls, v):
if v not in 'divorced':
raise ValueError
return v
class SeparatedEligibilityData(RecursiveDataModel):
is_married: Optional[MarriedEligibilityData]
separated_since_last_year_eligibility: str
@validator('separated_since_last_year_eligibility')
def separated_couple_must_be_separated_since_last_year(cls, v):
return declarations_must_be_set_yes(v)
@validator('is_married', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class NotSeparatedEligibilityData(RecursiveDataModel):
is_married: Optional[MarriedEligibilityData]
separated_since_last_year_eligibility: str
@validator('separated_since_last_year_eligibility')
def married_couples_are_not_separated_since_last_year(cls, v):
return declarations_must_be_set_no(v)
@validator('is_married', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class SeparatedLivedTogetherEligibilityData(RecursiveDataModel):
is_separated: Optional[SeparatedEligibilityData]
separated_lived_together_eligibility: str
@validator('separated_lived_together_eligibility')
def separated_couple_must_have_lived_together(cls, v):
return declarations_must_be_set_yes(v)
@validator('is_separated', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class SeparatedNotLivedTogetherEligibilityData(RecursiveDataModel):
is_separated: Optional[SeparatedEligibilityData]
separated_lived_together_eligibility: str
@validator('separated_lived_together_eligibility')
def married_couples_must_not_have_lived_together(cls, v):
return declarations_must_be_set_no(v)
@validator('is_separated', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class SeparatedJointTaxesEligibilityData(RecursiveDataModel):
separated_lived_together: Optional[SeparatedLivedTogetherEligibilityData]
separated_joint_taxes_eligibility: str
@validator('separated_joint_taxes_eligibility')
def separated_couple_must_do_joint_taxes(cls, v):
return declarations_must_be_set_yes(v)
@validator('separated_lived_together', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class SeparatedNoJointTaxesEligibilityData(RecursiveDataModel):
separated_lived_together: Optional[SeparatedLivedTogetherEligibilityData]
separated_joint_taxes_eligibility: str
@validator('separated_joint_taxes_eligibility')
def married_couples_must_not_do_joint_taxes(cls, v):
return declarations_must_be_set_no(v)
@validator('separated_lived_together', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class MarriedJointTaxesEligibilityData(RecursiveDataModel):
not_separated: Optional[NotSeparatedEligibilityData]
joint_taxes_eligibility: str
@validator('joint_taxes_eligibility')
def married_couples_must_do_joint_taxes(cls, v):
return declarations_must_be_set_yes(v)
@validator('not_separated', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class AlimonyMarriedEligibilityData(RecursiveDataModel):
married_joint_taxes: Optional[MarriedJointTaxesEligibilityData]
separated_joint_taxes: Optional[SeparatedJointTaxesEligibilityData]
alimony_eligibility: str
@validator('alimony_eligibility')
def do_not_receive_or_pay_alimony(cls, v):
return declarations_must_be_set_no(v)
@validator('separated_joint_taxes', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class UserANoElsterAccountEligibilityData(RecursiveDataModel):
alimony: Optional[AlimonyMarriedEligibilityData]
user_a_has_elster_account_eligibility: str
@validator('user_a_has_elster_account_eligibility')
def must_not_have_elster_account(cls, v):
return declarations_must_be_set_no(v)
@validator('alimony', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class UserAElsterAccountEligibilityData(RecursiveDataModel):
alimony: Optional[AlimonyMarriedEligibilityData]
user_a_has_elster_account_eligibility: str
@validator('user_a_has_elster_account_eligibility')
def has_elster_account(cls, v):
return declarations_must_be_set_yes(v)
@validator('alimony', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class UserBNoElsterAccountEligibilityData(RecursiveDataModel):
user_a_has_elster_account: Optional[UserAElsterAccountEligibilityData]
user_b_has_elster_account_eligibility: str
@validator('user_b_has_elster_account_eligibility')
def user_b_must_not_have_elster_account(cls, v):
return declarations_must_be_set_no(v)
@validator('user_a_has_elster_account', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class UserBElsterAccountEligibilityData(RecursiveDataModel):
user_a_has_elster_account: Optional[UserAElsterAccountEligibilityData]
user_b_has_elster_account_eligibility: str
@validator('user_b_has_elster_account_eligibility')
def user_b_must_have_elster_account(cls, v):
return declarations_must_be_set_yes(v)
@validator('user_a_has_elster_account', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class DivorcedJointTaxesEligibilityData(RecursiveDataModel):
familienstand: Optional[DivorcedEligibilityData]
joint_taxes_eligibility: str
@validator('joint_taxes_eligibility')
def divorced_couples_must_do_separate_taxes(cls, v, values):
return declarations_must_be_set_no(v)
@validator('familienstand', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class AlimonyEligibilityData(RecursiveDataModel):
is_widowed: Optional[WidowedEligibilityData]
is_single: Optional[SingleEligibilityData]
divorced_joint_taxes: Optional[DivorcedJointTaxesEligibilityData]
no_separated_lived_together: Optional[SeparatedNotLivedTogetherEligibilityData]
no_separated_joint_taxes: Optional[SeparatedNoJointTaxesEligibilityData]
alimony_eligibility: str
@validator('alimony_eligibility')
def do_not_receive_or_pay_alimony(cls, v):
return declarations_must_be_set_no(v)
@validator('no_separated_joint_taxes', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class SingleUserNoElsterAccountEligibilityData(RecursiveDataModel):
no_alimony: Optional[AlimonyEligibilityData]
user_a_has_elster_account_eligibility: str
@validator('user_a_has_elster_account_eligibility')
def must_not_have_elster_account(cls, v):
return declarations_must_be_set_no(v)
@validator('no_alimony', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class SingleUserElsterAccountEligibilityData(RecursiveDataModel):
no_alimony: Optional[AlimonyEligibilityData]
user_a_has_elster_account_eligibility: str
@validator('user_a_has_elster_account_eligibility')
def must_have_elster_account(cls, v):
return declarations_must_be_set_yes(v)
@validator('no_alimony', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class PensionEligibilityData(RecursiveDataModel):
single_user_a_has_elster_account: Optional[SingleUserElsterAccountEligibilityData]
single_user_has_no_elster_account: Optional[SingleUserNoElsterAccountEligibilityData]
user_a_has_no_elster_account: Optional[UserANoElsterAccountEligibilityData]
user_b_has_no_elster_account: Optional[UserBNoElsterAccountEligibilityData]
user_b_has_elster_account: Optional[UserBElsterAccountEligibilityData]
pension_eligibility: str
@validator('pension_eligibility')
def has_to_get_pension(cls, v):
return declarations_must_be_set_yes(v)
@validator('user_b_has_elster_account', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class InvestmentIncomeEligibilityData(RecursiveDataModel):
has_pension: Optional[PensionEligibilityData]
investment_income_eligibility: str
@validator('investment_income_eligibility')
def has_to_get_pension(cls, v):
return declarations_must_be_set_yes(v)
@validator('has_pension', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class MinimalInvestmentIncome(RecursiveDataModel):
has_investment_income: Optional[InvestmentIncomeEligibilityData]
minimal_investment_income_eligibility: str
@validator('minimal_investment_income_eligibility')
def has_only_minimal_invesment_income(cls, v):
return declarations_must_be_set_yes(v)
@validator('has_investment_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class MoreThanMinimalInvestmentIncome(RecursiveDataModel):
has_investment_income: Optional[InvestmentIncomeEligibilityData]
minimal_investment_income_eligibility: str
@validator('minimal_investment_income_eligibility')
def has_more_than_minimal_investment_income(cls, v):
return declarations_must_be_set_no(v)
@validator('has_investment_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class NoTaxedInvestmentIncome(RecursiveDataModel):
has_more_than_minimal_inv_income: Optional[MoreThanMinimalInvestmentIncome]
taxed_investment_income_eligibility: str
@validator('taxed_investment_income_eligibility')
def has_to_have_taxed_investment_income(cls, v):
return declarations_must_be_set_yes(v)
@validator('has_more_than_minimal_inv_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class CheaperCheckEligibilityData(RecursiveDataModel):
has_taxed_investment_income: Optional[NoTaxedInvestmentIncome]
cheaper_check_eligibility: str
@validator('cheaper_check_eligibility')
def has_to_want_no_cheaper_check(cls, v):
return declarations_must_be_set_no(v)
@validator('has_taxed_investment_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class NoInvestmentIncomeEligibilityData(RecursiveDataModel):
has_pension: Optional[PensionEligibilityData]
investment_income_eligibility: str
@validator('investment_income_eligibility')
def has_no_investment_income(cls, v):
return declarations_must_be_set_no(v)
@validator('has_pension', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class NoEmploymentIncomeEligibilityData(RecursiveDataModel):
only_taxed_inv_income: Optional[MinimalInvestmentIncome]
wants_no_cheaper_check: Optional[CheaperCheckEligibilityData]
has_no_investment_income: Optional[NoInvestmentIncomeEligibilityData]
employment_income_eligibility: str
@validator('employment_income_eligibility')
def has_no_employment_income(cls, v):
return declarations_must_be_set_no(v)
@validator('has_no_investment_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class EmploymentIncomeEligibilityData(RecursiveDataModel):
wants_no_cheaper_check: Optional[CheaperCheckEligibilityData]
has_no_investment_income: Optional[NoInvestmentIncomeEligibilityData]
only_taxed_inv_income: Optional[MinimalInvestmentIncome]
employment_income_eligibility: str
@validator('employment_income_eligibility')
def has_employment_income(cls, v):
return declarations_must_be_set_yes(v)
@validator('only_taxed_inv_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class MarginalEmploymentEligibilityData(RecursiveDataModel):
has_other_empl_income: Optional[EmploymentIncomeEligibilityData]
marginal_employment_eligibility: str
@validator('marginal_employment_eligibility')
def has_only_taxed_investment_income(cls, v):
return declarations_must_be_set_yes(v)
@validator('has_other_empl_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class OtherIncomeEligibilityData(RecursiveDataModel):
no_employment_income: Optional[NoEmploymentIncomeEligibilityData]
only_marginal_empl_income: Optional[MarginalEmploymentEligibilityData]
other_income_eligibility: str
@validator('other_income_eligibility')
def has_only_taxed_investment_income(cls, v):
return declarations_must_be_set_no(v)
@validator('only_marginal_empl_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class ForeignCountrySuccessEligibility(RecursiveDataModel):
"""
This is the only point where we have additional fields of previous steps on a step model.
That's because the ForeignCountry step is the last step of the flow and needs to decide which result page is
displayed: 'success' or 'maybe'.
"""
has_no_other_income: Optional[OtherIncomeEligibilityData]
foreign_country_eligibility: str
user_a_has_elster_account_eligibility: str
user_b_has_elster_account_eligibility: Optional[str]
@validator('user_b_has_elster_account_eligibility', always=True)
def users_must_not_all_have_elster_accounts(cls,v, values):
user_a_has_elster_account = values.get('user_a_has_elster_account_eligibility')
user_b_has_elster_account = v
# One person case
if not user_b_has_elster_account:
declarations_must_be_set_no(user_a_has_elster_account)
else:
# Two person case
try:
declarations_must_be_set_no(user_a_has_elster_account)
except:
declarations_must_be_set_no(user_b_has_elster_account)
return user_b_has_elster_account
@validator('foreign_country_eligibility')
def has_only_taxed_investment_income(cls, v):
return declarations_must_be_set_no(v)
@validator('has_no_other_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
class ForeignCountryMaybeEligibility(RecursiveDataModel):
"""
This is the only point where we have additional fields of previous steps on a step model.
That's because the ForeignCountry step is the last step of the flow and needs to decide which result page is
displayed: 'success' or 'maybe'.
"""
has_no_other_income: Optional[OtherIncomeEligibilityData]
foreign_country_eligibility: str
user_a_has_elster_account_eligibility: str
user_b_has_elster_account_eligibility: Optional[str]
@validator('foreign_country_eligibility')
def has_only_taxed_investment_income(cls, v):
return declarations_must_be_set_no(v)
@validator('user_a_has_elster_account_eligibility')
def has_user_a_elster_account_eligibility(cls,v):
return declarations_must_be_set_yes(v)
@validator('user_b_has_elster_account_eligibility')
def has_user_b_elster_account_eligibility(cls,v):
return declarations_must_be_set_yes(v)
@validator('has_no_other_income', always=True, check_fields=False)
def one_previous_field_has_to_be_set(cls, v, values):
return super().one_previous_field_has_to_be_set(cls, v, values)
|
431961 | from qupulse.hardware.dacs.dac_base import *
try:
from qupulse.hardware.dacs.alazar import *
except ImportError:
pass
|
431967 | import numpy as np
import os.path as osp
from torch_geometric.utils.num_nodes import maybe_num_nodes
import torch
import unittest
from pd_mesh_net.nn import DualPrimalEdgePooling
from pd_mesh_net.utils import create_graphs, create_dual_primal_batch
current_dir = osp.dirname(__file__)
class TestDualEdgePooling(unittest.TestCase):
def test_large_simple_mesh_config_A_no_output_self_loops(self):
# In all cases, we aim at pooling the following pairs of primal edges,
# out of the 21 in the mesh:
# - 0->10 / 10->0;
# - 6->7 / 7->6;
# - 7->11 / 11->7;
# - 10->11 / 11->10;
# - 1->5 / 5->1;
# - 2->3 / 3->2;
# - 3->8 / 8->3;
# - 4->13 / 13->4.
# All the three experiments are repeated by considering once pooling
# based on decreasing attention coefficients and in the other pooling
# based on increasing attention coefficient (cf.
# `pd_mesh_net.nn.pool.DualPrimalEdgePooling`).
for use_decreasing_attention_coefficient in [True, False]:
# Test also with more than one attention head.
for num_heads in range(1, 4):
# Test with number of primal edges to keep.
self.__test_large_simple_mesh_config_A_no_output_self_loops(
num_primal_edges_to_keep=21 - 8,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
# Test with fraction of primal edges to keep. Pooling the top-8
# out of the 21 primal-edge pairs corresponds to keeping a
# fraction of the primal edges around (21 - 8) / 21 = 0.6190...
# Since the pooling layer internally finds the number of primal
# edges to pool as
# floor((1 - fraction_primal_edges_to_keep) * num_edges) =
# floor((1 - fraction_primal_edges_to_keep) * 21) = 8, one needs
# to have:
# 8 <= (1 - fraction_primal_edges_to_keep) * 21 < 9;
# <=> -13 <= -21* fraction_primal_edges_to_keep < -12;
# <=> 12 / 21 < fraction_primal_edges_to_keep <= 13/21;
# <=> 0.5714... < fraction_primal_edges_to_keep <= 0.6190...;
# e.g., 0.5715 < fraction_primal_edges_to_keep < 0.6190.
self.__test_large_simple_mesh_config_A_no_output_self_loops(
fraction_primal_edges_to_keep=0.619,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
# Test with minimal attention coefficient.
self.__test_large_simple_mesh_config_A_no_output_self_loops(
primal_att_coeff_threshold=0.5,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
def test_large_simple_mesh_config_A_no_output_self_loops_nonconsecutive(
self):
# Repeat the experiment by considering once pooling based on decreasing
# attention coefficients and in the other pooling based on increasing
# attention coefficient (cf. `pd_mesh_net.nn.pool.DualPrimalEdgePooling`).
for use_decreasing_attention_coefficient in [True, False]:
# Test also with more than one attention head.
for num_heads in range(1, 4):
self.__test_config_A_no_output_self_loops_nonconsecutive(
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
def test_large_simple_mesh_config_A_with_output_self_loops_nonconsecutive(
self):
# Repeat the experiment by considering once pooling based on decreasing
# attention coefficients and in the other pooling based on increasing
# attention coefficient (cf.
# `pd_mesh_net.nn.pool.DualPrimalEdgePooling`).
for use_decreasing_attention_coefficient in [True, False]:
# Test also with more than one attention head.
for num_heads in range(1, 4):
self.__test_config_A_with_output_self_loops_nonconsecutive(
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
def test_large_simple_mesh_config_A_with_output_self_loops(self):
# In all cases, we aim at pooling the following pairs of primal edges,
# out of the 21 in the mesh:
# - 0->10 / 10->0;
# - 6->7 / 7->6;
# - 7->11 / 11->7;
# - 10->11 / 11->10;
# - 1->5 / 5->1;
# - 2->3 / 3->2;
# - 3->8 / 8->3;
# - 4->13 / 13->4.
# All the three experiments are repeated by considering once pooling
# based on decreasing attention coefficients and in the other pooling
# based on increasing attention coefficient (cf.
# `pd_mesh_net.nn.pool.DualPrimalEdgePooling`).
for use_decreasing_attention_coefficient in [True, False]:
# Test also with more than one attention head.
for num_heads in range(1, 4):
# Test with number of primal edges to keep.
self.__test_large_simple_mesh_config_A_with_output_self_loops(
num_primal_edges_to_keep=21 - 8,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
# Test with fraction of primal edges to keep. Pooling the top-8
# out of the 21 primal-edge pairs corresponds to keeping a
# fraction of the primal edges around (21 - 8) / 21 = 0.6190...
# Since the pooling layer internally finds the number of primal
# edges to pool as
# floor((1 - fraction_primal_edges_to_keep) * num_edges) =
# floor((1 - fraction_primal_edges_to_keep) * 21) = 8, one needs
# to have:
# 8 <= (1 - fraction_primal_edges_to_keep) * 21 < 9;
# <=> -13 <= -21* fraction_primal_edges_to_keep < -12;
# <=> 12 / 21 < fraction_primal_edges_to_keep <= 13/21;
# <=> 0.5714... < fraction_primal_edges_to_keep <= 0.6190...;
# e.g., 0.5715 < fraction_primal_edges_to_keep < 0.6190.
self.__test_large_simple_mesh_config_A_with_output_self_loops(
fraction_primal_edges_to_keep=0.619,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
# Test with minimal attention coefficient.
self.__test_large_simple_mesh_config_A_with_output_self_loops(
primal_att_coeff_threshold=0.5,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
def test_large_simple_mesh_config_B_with_output_self_loops(self):
# In all cases, we aim at pooling the following pairs of primal edges,
# out of the 21 in the mesh:
# - 0->10 / 10->0;
# - 6->7 / 7->6;
# - 7->11 / 11->7;
# - 10->11 / 11->10;
# - 1->5 / 5->1;
# - 2->3 / 3->2;
# - 3->8 / 8->3;
# - 4->13 / 13->4.
# All the three experiments are repeated by considering once pooling
# based on decreasing attention coefficients and in the other pooling
# based on increasing attention coefficient (cf.
# `pd_mesh_net.nn.pool.DualPrimalEdgePooling`).
for use_decreasing_attention_coefficient in [True, False]:
# Test also with more than one attention head.
for num_heads in range(1, 4):
# Test with number of primal edges to keep.
self.__test_large_simple_mesh_config_B_with_output_self_loops(
num_primal_edges_to_keep=21 - 8,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
# Test with fraction of primal edges to keep. Pooling the top-8
# out of the 21 primal-edge pairs corresponds to keeping a
# fraction of the primal edges around (21 - 8) / 21 = 0.6190...
# Since the pooling layer internally finds the number of primal
# edges to pool as
# floor((1 - fraction_primal_edges_to_keep) * num_edges) =
# floor((1 - fraction_primal_edges_to_keep) * 21) = 8, one needs
# to have:
# 8 <= (1 - fraction_primal_edges_to_keep) * 21 < 9;
# <=> -13 <= -21* fraction_primal_edges_to_keep < -12;
# <=> 12 / 21 < fraction_primal_edges_to_keep <= 13/21;
# <=> 0.5714... < fraction_primal_edges_to_keep <= 0.6190...;
# e.g., 0.5715 < fraction_primal_edges_to_keep < 0.6190.
self.__test_large_simple_mesh_config_B_with_output_self_loops(
fraction_primal_edges_to_keep=0.619,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
# Test with minimal attention coefficient.
self.__test_large_simple_mesh_config_B_with_output_self_loops(
primal_att_coeff_threshold=0.5,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
def test_large_simple_mesh_config_B_with_output_self_loops_nonconsecutive(
self):
# Repeat the experiment by considering once pooling based on decreasing
# attention coefficients and in the other pooling based on increasing
# attention coefficient (cf.
# `pd_mesh_net.nn.pool.DualPrimalEdgePooling`).
for use_decreasing_attention_coefficient in [True, False]:
# Test also with more than one attention head.
for num_heads in range(1, 4):
self.__test_config_B_with_output_self_loops_nonconsecutive(
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
def test_large_simple_mesh_config_C_with_output_self_loops(self):
# In all cases, we aim at pooling the following pairs of primal edges,
# out of the 21 in the mesh:
# - 0->10 / 10->0;
# - 6->7 / 7->6;
# - 7->11 / 11->7;
# - 10->11 / 11->10;
# - 1->5 / 5->1;
# - 2->3 / 3->2;
# - 3->8 / 8->3;
# - 4->13 / 13->4.
# All the three experiments are repeated by considering once pooling
# based on decreasing attention coefficients and in the other pooling
# based on increasing attention coefficient (cf.
# `pd_mesh_net.nn.pool.DualPrimalEdgePooling`).
for use_decreasing_attention_coefficient in [True, False]:
# Test also with more than one attention head.
for num_heads in range(1, 4):
# Test with number of primal edges to keep.
self.__test_large_simple_mesh_config_C_with_output_self_loops(
num_primal_edges_to_keep=21 - 8,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
# Test with fraction of primal edges to keep. Pooling the top-8
# out of the 21 primal-edge pairs corresponds to keeping a
# fraction of the primal edges around (21 - 8) / 21 = 0.6190...
# Since the pooling layer internally finds the number of primal
# edges to pool as
# floor((1 - fraction_primal_edges_to_keep) * num_edges) =
# floor((1 - fraction_primal_edges_to_keep) * 21) = 8, one needs
# to have:
# 8 <= (1 - fraction_primal_edges_to_keep) * 21 < 9;
# <=> -13 <= -21* fraction_primal_edges_to_keep < -12;
# <=> 12 / 21 < fraction_primal_edges_to_keep <= 13/21;
# <=> 0.5714... < fraction_primal_edges_to_keep <= 0.6190...;
# e.g., 0.5715 < fraction_primal_edges_to_keep < 0.6190.
self.__test_large_simple_mesh_config_C_with_output_self_loops(
fraction_primal_edges_to_keep=0.619,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
# Test with minimal attention coefficient.
self.__test_large_simple_mesh_config_C_with_output_self_loops(
primal_att_coeff_threshold=0.5,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
def test_large_simple_mesh_config_C_with_output_self_loops_nonconsecutive(
self):
# Repeat the experiment by considering once pooling based on decreasing
# attention coefficients and in the other pooling based on increasing
# attention coefficient (cf.
# `pd_mesh_net.nn.pool.DualPrimalEdgePooling`).
for use_decreasing_attention_coefficient in [True, False]:
# Test also with more than one attention head.
for num_heads in range(1, 4):
self.__test_config_C_with_output_self_loops_nonconsecutive(
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
num_heads=num_heads)
def __test_large_simple_mesh_config_A_no_output_self_loops(
self,
num_primal_edges_to_keep=None,
fraction_primal_edges_to_keep=None,
primal_att_coeff_threshold=None,
use_decreasing_attention_coefficient=True,
num_heads=1):
# - Dual-graph configuration A.
single_dual_nodes = True
undirected_dual_edges = True
graph_creator = create_graphs.GraphCreator(
mesh_filename=osp.join(current_dir,
'../../common_data/simple_mesh_large.ply'),
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
primal_features_from_dual_features=False)
primal_graph, dual_graph = graph_creator.create_graphs()
petdni = graph_creator.primal_edge_to_dual_node_idx
(primal_graph_batch, dual_graph_batch,
petdni_batch) = create_dual_primal_batch(
primal_graphs_list=[primal_graph],
dual_graphs_list=[dual_graph],
primal_edge_to_dual_node_idx_list=[petdni])
# Primal graph.
num_primal_edges = primal_graph_batch.num_edges
num_primal_nodes = maybe_num_nodes(primal_graph_batch.edge_index)
self.assertEqual(num_primal_edges, 42)
self.assertEqual(num_primal_nodes, 14)
# - Check existence of primal edges.
for edge in [(0, 1), (0, 7), (0, 10), (1, 2), (1, 5), (2, 3), (2, 9),
(3, 4), (3, 8), (4, 5), (4, 13), (5, 6), (6, 7), (6, 12),
(7, 11), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12),
(12, 13)]:
self.assertEqual(petdni_batch[edge], petdni_batch[edge[::-1]])
# - Set the features of each primal node randomly.
dim_primal_features = primal_graph_batch.num_node_features
for primal_feature in primal_graph_batch.x:
primal_feature[:] = torch.rand(dim_primal_features,
dtype=torch.float)
# Dual graph.
num_dual_edges = dual_graph_batch.num_edges
num_dual_nodes = maybe_num_nodes(dual_graph_batch.edge_index)
# - Since the mesh is watertight, the medial graph of the triangulation
# is 4-regular, hence each node in the dual graph has 4 incoming edges
# and 4 outgoing edges. However, since there are no self-loops in the
# dual graph, each incoming edge for a certain dual node is also an
# outgoing edge for another dual node, and the total number of
# (directed) edges in the dual graph is 4 times the number of dual
# nodes.
self.assertEqual(num_dual_edges, num_dual_nodes * 4)
self.assertEqual(num_dual_nodes, num_primal_edges // 2)
# - Set the features of each dual node randomly.
dim_dual_features = dual_graph_batch.num_node_features
for dual_feature in dual_graph_batch.x:
dual_feature[:] = torch.rand(dim_dual_features,
dtype=torch.float) * 3
# Randomly shuffle the primal edge-index matrix.
permutation = np.random.permutation(num_primal_edges)
primal_graph_batch.edge_index = (
primal_graph_batch.edge_index[:, permutation])
# Set the attention coefficients manually, so as to pool the following
# primal edges:
# - 0->10 / 10->0;
# - 6->7 / 7->6;
# - 7->11 / 11->7;
# - 10->11 / 11->10;
# - 1->5 / 5->1;
# - 2->3 / 3->2;
# - 3->8 / 8->3;
# - 4->13 / 13->4.
# (cf. file `../../common_data/simple_mesh_large_pool_1.png`)
if (primal_att_coeff_threshold is not None):
attention_threshold = primal_att_coeff_threshold
else:
attention_threshold = 0.5
primal_attention_coeffs = torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * attention_threshold
if (use_decreasing_attention_coefficient):
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in [[0, 10], [6, 7], [7, 11], [10, 11],
[1, 5], [2, 3], [3, 8], [4, 13]]):
primal_attention_coeffs[edge_idx] += (1 -
attention_threshold)
elif (primal_edge == [1, 2]):
# Further test: set \alpha_{2, 1} = 0.7 > 0.5, but
# \alpha_{1, 2} = 0.2, so that
# (\alpha_{1, 2} + \alpha_{2, 1}) / 2 = 0.45 < 0.5, and the
# edges 1->2 / 2->1 do not get pooled.
primal_attention_coeffs[edge_idx] = 0.2
elif (primal_edge == [2, 1]):
primal_attention_coeffs[edge_idx] = 0.7
else:
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) not in [[0, 10], [6, 7], [7, 11],
[10, 11], [1, 5], [2, 3],
[3, 8], [4, 13], [1, 2]]):
primal_attention_coeffs[edge_idx] += (1 -
attention_threshold)
elif (primal_edge == [1, 2]):
# Further test: set \alpha_{1, 2} = 0.4 < 0.5, but
# \alpha_{2, 1} = 0.7, so that
# (\alpha_{1, 2} + \alpha_{2, 1}) / 2 = 0.55 > 0.5, and the
# edges 1->2 / 2->1 do not get pooled.
primal_attention_coeffs[edge_idx] = 0.4
elif (primal_edge == [2, 1]):
primal_attention_coeffs[edge_idx] = 0.7
# Create a single dual-primal edge-pooling layer.
pool = DualPrimalEdgePooling(
self_loops_in_output_dual_graph=False,
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
num_primal_edges_to_keep=num_primal_edges_to_keep,
fraction_primal_edges_to_keep=fraction_primal_edges_to_keep,
primal_att_coeff_threshold=primal_att_coeff_threshold,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
return_old_dual_node_to_new_dual_node=True)
# Perform primal-edge pooling.
(new_primal_graph_batch, new_dual_graph_batch, new_petdni_batch,
pooling_log) = pool(primal_graph_batch=primal_graph_batch,
dual_graph_batch=dual_graph_batch,
primal_edge_to_dual_node_idx_batch=petdni_batch,
primal_attention_coeffs=primal_attention_coeffs)
# Tests on the new primal graph.
num_new_primal_nodes = maybe_num_nodes(
new_primal_graph_batch.edge_index)
num_new_primal_edges = new_primal_graph_batch.num_edges
self.assertEqual(num_new_primal_nodes, 6)
# - Check correspondence of the old primal nodes with the new primal
# nodes (i.e., node clusters).
old_primal_node_to_new_one = pooling_log.old_primal_node_to_new_one
for old_primal_node in range(num_primal_nodes):
if (old_primal_node in [0, 6, 7, 10, 11]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 0)
elif (old_primal_node in [1, 5]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 1)
elif (old_primal_node in [4, 13]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 2)
elif (old_primal_node in [2, 3, 8]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 3)
elif (old_primal_node == 9):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 4)
elif (old_primal_node == 12):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 5)
# - Check that the features of each new primal node correspond to the
# average of the features of the primal nodes merged together into
# that node.
for new_primal_node in range(num_new_primal_nodes):
old_primal_nodes_per_new_primal_node = [[0, 6, 7, 10, 11], [1, 5],
[4, 13], [2, 3, 8], 9, 12]
old_primal_nodes = old_primal_nodes_per_new_primal_node[
new_primal_node]
self.assertAlmostEqual(
new_primal_graph_batch.x[new_primal_node, 0].item(),
primal_graph_batch.x[old_primal_nodes, 0].mean().item(), 5)
# - Check the edges between the new primal nodes, which should be the
# following:
# - 0->1 / 1->0;
# - 0->4 / 4->0;
# - 0->5 / 5->0;
# - 1->2 / 2->1;
# - 1->3 / 3->1;
# - 2->3 / 3->2;
# - 2->5 / 5->2;
# - 3->4 / 4->3.
self.assertEqual(num_new_primal_edges, 16)
new_primal_edge_index_list = new_primal_graph_batch.edge_index.t(
).tolist()
for new_primal_edge in [[0, 1], [0, 4], [0, 5], [1, 2], [1, 3], [2, 3],
[2, 5], [3, 4]]:
self.assertTrue(new_primal_edge in new_primal_edge_index_list)
self.assertTrue(new_primal_edge[::-1] in new_primal_edge_index_list)
# Check that opposite primal edges are associated to the same dual
# node.
self.assertEqual(new_petdni_batch[tuple(new_primal_edge)],
new_petdni_batch[tuple(new_primal_edge[::-1])])
# Tests on the new dual graph.
num_new_dual_nodes = maybe_num_nodes(new_dual_graph_batch.edge_index)
num_new_dual_edges = new_dual_graph_batch.num_edges
self.assertEqual(num_new_dual_nodes, num_new_primal_edges // 2)
# - Check that in case the border between two new face clusters is made
# of multiple edges of the original mesh, the dual feature associated
# to the new primal edge is the average of the dual features
# associated with the 'multiple edges of the original mesh'. This
# happens between new primal nodes 0--1, 0--5, 2--3 and 3--4.
idx_new_dual_node = new_petdni_batch[(0, 1)]
idx_old_dual_node_1 = petdni_batch[(0, 1)]
idx_old_dual_node_2 = petdni_batch[(5, 6)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
idx_new_dual_node = new_petdni_batch[(0, 5)]
idx_old_dual_node_1 = petdni_batch[(6, 12)]
idx_old_dual_node_2 = petdni_batch[(11, 12)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
idx_new_dual_node = new_petdni_batch[(2, 3)]
idx_old_dual_node_1 = petdni_batch[(3, 4)]
idx_old_dual_node_2 = petdni_batch[(8, 13)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
idx_new_dual_node = new_petdni_batch[(3, 4)]
idx_old_dual_node_1 = petdni_batch[(2, 9)]
idx_old_dual_node_2 = petdni_batch[(8, 9)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - For all other cases, check that the dual feature associated to the
# new primal edge is the dual feature associated with edge of the
# original mesh that is now between the new primal nodes.
new_dual_nodes = [(0, 4), (1, 2), (1, 3), (2, 5)]
old_dual_nodes = [(9, 10), (4, 5), (1, 2), (12, 13)]
for new_dual_node, old_dual_node in zip(new_dual_nodes, old_dual_nodes):
idx_new_dual_node = new_petdni_batch[new_dual_node]
idx_old_dual_node = petdni_batch[old_dual_node]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# - Check that the mapping between old and new dual nodes is correct.
old_dual_node_to_new_one = pooling_log.old_dual_node_to_new_one
self.assertEqual(len(old_dual_node_to_new_one), num_dual_nodes)
old_dual_nodes_index_with_corresponding_new_one = [
petdni_batch[primal_edge]
for primal_edge in [(0, 1), (1, 2), (2, 9), (3, 4), (4, 5), (
5, 6), (6, 12), (8, 9), (8, 13), (9, 10), (11, 12), (12, 13)]
]
corresponding_new_dual_nodes = [
new_petdni_batch[primal_edge]
for primal_edge in [(0, 1), (1, 3), (3, 4), (2, 3), (1, 2), (
0, 1), (0, 5), (3, 4), (2, 3), (0, 4), (0, 5), (2, 5)]
]
for dual_node_idx in range(num_dual_nodes):
if (dual_node_idx in old_dual_nodes_index_with_corresponding_new_one
):
# - The old dual node has a corresponding new dual node.
self.assertEqual(
old_dual_node_to_new_one[dual_node_idx],
corresponding_new_dual_nodes[
old_dual_nodes_index_with_corresponding_new_one.index(
dual_node_idx)])
else:
# - The old dual node has no corresponding new dual node.
self.assertEqual(old_dual_node_to_new_one[dual_node_idx], -1)
# - Check the edges between the new dual nodes, which should be the
# following (with dual nodes indicated by the corresponding primal
# nodes as a set):
# - {0, 1} -> {0, 4};
# - {0, 1} -> {0, 5};
# - {0, 1} -> {1, 2};
# - {0, 1} -> {1, 3};
# - {0, 4} -> {0, 1};
# - {0, 4} -> {0, 5};
# - {0, 4} -> {3, 4};
# - {0, 5} -> {0, 1};
# - {0, 5} -> {0, 4};
# - {0, 5} -> {2, 5};
# - {1, 2} -> {0, 1};
# - {1, 2} -> {1, 3};
# - {1, 2} -> {2, 3};
# - {1, 2} -> {2, 5};
# - {1, 3} -> {0, 1};
# - {1, 3} -> {1, 2};
# - {1, 3} -> {2, 3};
# - {1, 3} -> {3, 4};
# - {2, 3} -> {1, 2};
# - {2, 3} -> {2, 5};
# - {2, 3} -> {1, 3};
# - {2, 3} -> {3, 4};
# - {2, 5} -> {1, 2};
# - {2, 5} -> {2, 3};
# - {2, 5} -> {0, 5};
# - {3, 4} -> {1, 3};
# - {3, 4} -> {2, 3};
# - {3, 4} -> {0, 4}.
self.assertEqual(num_new_dual_edges, 28)
new_dual_edge_index_list = new_dual_graph_batch.edge_index.t().tolist()
dual_node_1 = (0, 1)
other_dual_nodes = [(0, 4), (0, 5), (1, 2), (1, 3)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
dual_node_1 = (0, 4)
other_dual_nodes = [(0, 1), (0, 5), (3, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
dual_node_1 = (0, 5)
other_dual_nodes = [(0, 1), (0, 4), (2, 5)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
dual_node_1 = (1, 2)
other_dual_nodes = [(0, 1), (1, 3), (2, 3), (2, 5)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
dual_node_1 = (1, 3)
other_dual_nodes = [(0, 1), (1, 2), (2, 3), (3, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
dual_node_1 = (2, 3)
other_dual_nodes = [(1, 2), (2, 5), (1, 3), (3, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
dual_node_1 = (2, 5)
other_dual_nodes = [(1, 2), (2, 3), (0, 5)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
dual_node_1 = (3, 4)
other_dual_nodes = [(1, 3), (2, 3), (0, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
def __test_large_simple_mesh_config_A_with_output_self_loops(
self,
num_primal_edges_to_keep=None,
fraction_primal_edges_to_keep=None,
primal_att_coeff_threshold=None,
use_decreasing_attention_coefficient=True,
num_heads=1):
# - Dual-graph configuration A.
single_dual_nodes = True
undirected_dual_edges = True
graph_creator = create_graphs.GraphCreator(
mesh_filename=osp.join(current_dir,
'../../common_data/simple_mesh_large.ply'),
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
primal_features_from_dual_features=False)
primal_graph, dual_graph = graph_creator.create_graphs()
petdni = graph_creator.primal_edge_to_dual_node_idx
(primal_graph_batch, dual_graph_batch,
petdni_batch) = create_dual_primal_batch(
primal_graphs_list=[primal_graph],
dual_graphs_list=[dual_graph],
primal_edge_to_dual_node_idx_list=[petdni])
# Primal graph.
num_primal_edges = primal_graph_batch.num_edges
num_primal_nodes = maybe_num_nodes(primal_graph_batch.edge_index)
self.assertEqual(num_primal_edges, 42)
self.assertEqual(num_primal_nodes, 14)
# - Check existence of primal edges.
for edge in [(0, 1), (0, 7), (0, 10), (1, 2), (1, 5), (2, 3), (2, 9),
(3, 4), (3, 8), (4, 5), (4, 13), (5, 6), (6, 7), (6, 12),
(7, 11), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12),
(12, 13)]:
self.assertEqual(petdni_batch[edge], petdni_batch[edge[::-1]])
# - Set the features of each primal node randomly.
dim_primal_features = primal_graph_batch.num_node_features
for primal_feature in primal_graph_batch.x:
primal_feature[:] = torch.rand(dim_primal_features,
dtype=torch.float)
# Dual graph.
num_dual_edges = dual_graph_batch.num_edges
num_dual_nodes = maybe_num_nodes(dual_graph_batch.edge_index)
# - Since the mesh is watertight, the medial graph of the triangulation
# is 4-regular, hence each node in the dual graph has 4 incoming edges
# and 4 outgoing edges. However, since there are no self-loops in the
# dual graph, each incoming edge for a certain dual node is also an
# outgoing edge for another dual node, and the total number of
# (directed) edges in the dual graph is 4 times the number of dual
# nodes.
self.assertEqual(num_dual_edges, num_dual_nodes * 4)
self.assertEqual(num_dual_nodes, num_primal_edges // 2)
# - Set the features of each dual node randomly.
dim_dual_features = dual_graph_batch.num_node_features
for dual_feature in dual_graph_batch.x:
dual_feature[:] = torch.rand(dim_dual_features,
dtype=torch.float) * 3
# Randomly shuffle the primal edge-index matrix.
permutation = np.random.permutation(num_primal_edges)
primal_graph_batch.edge_index = (
primal_graph_batch.edge_index[:, permutation])
# Set the attention coefficients manually, so as to pool the following
# primal edges:
# - 0->10 / 10->0;
# - 6->7 / 7->6;
# - 7->11 / 11->7;
# - 10->11 / 11->10;
# - 1->5 / 5->1;
# - 2->3 / 3->2;
# - 3->8 / 8->3;
# - 4->13 / 13->4.
# (cf. file `../../common_data/simple_mesh_large_pool_1.png`)
if (primal_att_coeff_threshold is not None):
attention_threshold = primal_att_coeff_threshold
else:
attention_threshold = 0.5
primal_attention_coeffs = torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * attention_threshold
if (use_decreasing_attention_coefficient):
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in [[0, 10], [6, 7], [7, 11], [10, 11],
[1, 5], [2, 3], [3, 8], [4, 13]]):
primal_attention_coeffs[edge_idx] += (1 -
attention_threshold)
elif (primal_edge == [1, 2]):
# Further test: set \alpha_{2, 1} = 0.7 > 0.5, but
# \alpha_{1, 2} = 0.2, so that
# (\alpha_{1, 2} + \alpha_{2, 1}) / 2 = 0.45 < 0.5, and the
# edges 1->2 / 2->1 do not get pooled.
primal_attention_coeffs[edge_idx] = 0.2
elif (primal_edge == [2, 1]):
primal_attention_coeffs[edge_idx] = 0.7
else:
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) not in [[0, 10], [6, 7], [7, 11],
[10, 11], [1, 5], [2, 3],
[3, 8], [4, 13], [1, 2]]):
primal_attention_coeffs[edge_idx] += (1 -
attention_threshold)
elif (primal_edge == [1, 2]):
# Further test: set \alpha_{1, 2} = 0.4 < 0.5, but
# \alpha_{2, 1} = 0.7, so that
# (\alpha_{1, 2} + \alpha_{2, 1}) / 2 = 0.55 > 0.5, and the
# edges 1->2 / 2->1 do not get pooled.
primal_attention_coeffs[edge_idx] = 0.4
elif (primal_edge == [2, 1]):
primal_attention_coeffs[edge_idx] = 0.7
# Create a single dual-primal edge-pooling layer.
pool = DualPrimalEdgePooling(
self_loops_in_output_dual_graph=True,
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
num_primal_edges_to_keep=num_primal_edges_to_keep,
fraction_primal_edges_to_keep=fraction_primal_edges_to_keep,
primal_att_coeff_threshold=primal_att_coeff_threshold,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
return_old_dual_node_to_new_dual_node=True)
# Perform primal-edge pooling.
(new_primal_graph_batch, new_dual_graph_batch, new_petdni_batch,
pooling_log) = pool(primal_graph_batch=primal_graph_batch,
dual_graph_batch=dual_graph_batch,
primal_edge_to_dual_node_idx_batch=petdni_batch,
primal_attention_coeffs=primal_attention_coeffs)
# Tests on the new primal graph.
num_new_primal_nodes = maybe_num_nodes(
new_primal_graph_batch.edge_index)
num_new_primal_edges = new_primal_graph_batch.num_edges
self.assertEqual(num_new_primal_nodes, 6)
# - Check correspondence of the old primal nodes with the new primal
# nodes (i.e., node clusters).
old_primal_node_to_new_one = pooling_log.old_primal_node_to_new_one
for old_primal_node in range(num_primal_nodes):
if (old_primal_node in [0, 6, 7, 10, 11]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 0)
elif (old_primal_node in [1, 5]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 1)
elif (old_primal_node in [4, 13]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 2)
elif (old_primal_node in [2, 3, 8]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 3)
elif (old_primal_node == 9):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 4)
elif (old_primal_node == 12):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 5)
# - Check that the features of each new primal node correspond to the
# average of the features of the primal nodes merged together into
# that node.
for new_primal_node in range(num_new_primal_nodes):
old_primal_nodes_per_new_primal_node = [[0, 6, 7, 10, 11], [1, 5],
[4, 13], [2, 3, 8], 9, 12]
old_primal_nodes = old_primal_nodes_per_new_primal_node[
new_primal_node]
self.assertAlmostEqual(
new_primal_graph_batch.x[new_primal_node, 0].item(),
primal_graph_batch.x[old_primal_nodes, 0].mean().item(), 5)
# - Check the edges between the new primal nodes, which should be the
# following:
# - 0->1 / 1->0;
# - 0->4 / 4->0;
# - 0->5 / 5->0;
# - 1->2 / 2->1;
# - 1->3 / 3->1;
# - 2->3 / 3->2;
# - 2->5 / 5->2;
# - 3->4 / 4->3.
self.assertEqual(num_new_primal_edges, 16)
new_primal_edge_index_list = new_primal_graph_batch.edge_index.t(
).tolist()
for new_primal_edge in [[0, 1], [0, 4], [0, 5], [1, 2], [1, 3], [2, 3],
[2, 5], [3, 4]]:
self.assertTrue(new_primal_edge in new_primal_edge_index_list)
self.assertTrue(new_primal_edge[::-1] in new_primal_edge_index_list)
# Check that opposite primal edges are associated to the same dual
# node.
self.assertEqual(new_petdni_batch[tuple(new_primal_edge)],
new_petdni_batch[tuple(new_primal_edge[::-1])])
# Tests on the new dual graph.
num_new_dual_nodes = maybe_num_nodes(new_dual_graph_batch.edge_index)
num_new_dual_edges = new_dual_graph_batch.num_edges
self.assertEqual(num_new_dual_nodes, num_new_primal_edges // 2)
# - Check that in case the border between two new face clusters is made
# of multiple edges of the original mesh, the dual feature associated
# to the new primal edge is the average of the dual features
# associated with the 'multiple edges of the original mesh'. This
# happens between new primal nodes 0--1, 0--5, 2--3 and 3--4.
idx_new_dual_node = new_petdni_batch[(0, 1)]
idx_old_dual_node_1 = petdni_batch[(0, 1)]
idx_old_dual_node_2 = petdni_batch[(5, 6)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
idx_new_dual_node = new_petdni_batch[(0, 5)]
idx_old_dual_node_1 = petdni_batch[(6, 12)]
idx_old_dual_node_2 = petdni_batch[(11, 12)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
idx_new_dual_node = new_petdni_batch[(2, 3)]
idx_old_dual_node_1 = petdni_batch[(3, 4)]
idx_old_dual_node_2 = petdni_batch[(8, 13)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
idx_new_dual_node = new_petdni_batch[(3, 4)]
idx_old_dual_node_1 = petdni_batch[(2, 9)]
idx_old_dual_node_2 = petdni_batch[(8, 9)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - For all other cases, check that the dual feature associated to the
# new primal edge is the dual feature associated with edge of the
# original mesh that is now between the new primal nodes.
new_dual_nodes = [(0, 4), (1, 2), (1, 3), (2, 5)]
old_dual_nodes = [(9, 10), (4, 5), (1, 2), (12, 13)]
for new_dual_node, old_dual_node in zip(new_dual_nodes, old_dual_nodes):
idx_new_dual_node = new_petdni_batch[new_dual_node]
idx_old_dual_node = petdni_batch[old_dual_node]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# - Check that the mapping between old and new dual nodes is correct.
old_dual_node_to_new_one = pooling_log.old_dual_node_to_new_one
self.assertEqual(len(old_dual_node_to_new_one), num_dual_nodes)
old_dual_nodes_index_with_corresponding_new_one = [
petdni_batch[primal_edge]
for primal_edge in [(0, 1), (1, 2), (2, 9), (3, 4), (4, 5), (
5, 6), (6, 12), (8, 9), (8, 13), (9, 10), (11, 12), (12, 13)]
]
corresponding_new_dual_nodes = [
new_petdni_batch[primal_edge]
for primal_edge in [(0, 1), (1, 3), (3, 4), (2, 3), (1, 2), (
0, 1), (0, 5), (3, 4), (2, 3), (0, 4), (0, 5), (2, 5)]
]
for dual_node_idx in range(num_dual_nodes):
if (dual_node_idx in old_dual_nodes_index_with_corresponding_new_one
):
# - The old dual node has a corresponding new dual node.
self.assertEqual(
old_dual_node_to_new_one[dual_node_idx],
corresponding_new_dual_nodes[
old_dual_nodes_index_with_corresponding_new_one.index(
dual_node_idx)])
else:
# - The old dual node has no corresponding new dual node.
self.assertEqual(old_dual_node_to_new_one[dual_node_idx], -1)
# - Check the edges between the new dual nodes, which should be the
# following (with dual nodes indicated by the corresponding primal
# nodes as a set), plus the self-loops:
# - {0, 1} -> {0, 4};
# - {0, 1} -> {0, 5};
# - {0, 1} -> {1, 2};
# - {0, 1} -> {1, 3};
# - {0, 4} -> {0, 1};
# - {0, 4} -> {0, 5};
# - {0, 4} -> {3, 4};
# - {0, 5} -> {0, 1};
# - {0, 5} -> {0, 4};
# - {0, 5} -> {2, 5};
# - {1, 2} -> {0, 1};
# - {1, 2} -> {1, 3};
# - {1, 2} -> {2, 3};
# - {1, 2} -> {2, 5};
# - {1, 3} -> {0, 1};
# - {1, 3} -> {1, 2};
# - {1, 3} -> {2, 3};
# - {1, 3} -> {3, 4};
# - {2, 3} -> {1, 2};
# - {2, 3} -> {2, 5};
# - {2, 3} -> {1, 3};
# - {2, 3} -> {3, 4};
# - {2, 5} -> {1, 2};
# - {2, 5} -> {2, 3};
# - {2, 5} -> {0, 5};
# - {3, 4} -> {1, 3};
# - {3, 4} -> {2, 3};
# - {3, 4} -> {0, 4}.
self.assertEqual(num_new_dual_edges, 28 + num_new_dual_nodes)
new_dual_edge_index_list = new_dual_graph_batch.edge_index.t().tolist()
dual_node_1 = (0, 1)
other_dual_nodes = [(0, 4), (0, 5), (1, 2), (1, 3)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (0, 4)
other_dual_nodes = [(0, 1), (0, 5), (3, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (0, 5)
other_dual_nodes = [(0, 1), (0, 4), (2, 5)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (1, 2)
other_dual_nodes = [(0, 1), (1, 3), (2, 3), (2, 5)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (1, 3)
other_dual_nodes = [(0, 1), (1, 2), (2, 3), (3, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (2, 3)
other_dual_nodes = [(1, 2), (2, 5), (1, 3), (3, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (2, 5)
other_dual_nodes = [(1, 2), (2, 3), (0, 5)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (3, 4)
other_dual_nodes = [(1, 3), (2, 3), (0, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[dual_node_1], new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
def __test_large_simple_mesh_config_B_with_output_self_loops(
self,
num_primal_edges_to_keep=None,
fraction_primal_edges_to_keep=None,
primal_att_coeff_threshold=None,
use_decreasing_attention_coefficient=True,
num_heads=1):
# - Dual-graph configuration B.
single_dual_nodes = False
undirected_dual_edges = True
graph_creator = create_graphs.GraphCreator(
mesh_filename=osp.join(current_dir,
'../../common_data/simple_mesh_large.ply'),
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
primal_features_from_dual_features=False)
primal_graph, dual_graph = graph_creator.create_graphs()
petdni = graph_creator.primal_edge_to_dual_node_idx
(primal_graph_batch, dual_graph_batch,
petdni_batch) = create_dual_primal_batch(
primal_graphs_list=[primal_graph],
dual_graphs_list=[dual_graph],
primal_edge_to_dual_node_idx_list=[petdni])
# Primal graph.
num_primal_edges = primal_graph_batch.num_edges
num_primal_nodes = maybe_num_nodes(primal_graph_batch.edge_index)
self.assertEqual(num_primal_edges, 42)
self.assertEqual(num_primal_nodes, 14)
# - Check existence of primal edges.
for edge in [(0, 1), (0, 7), (0, 10), (1, 2), (1, 5), (2, 3), (2, 9),
(3, 4), (3, 8), (4, 5), (4, 13), (5, 6), (6, 7), (6, 12),
(7, 11), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12),
(12, 13)]:
# Configuration B has double dual nodes.
self.assertNotEqual(petdni_batch[edge], petdni_batch[edge[::-1]])
# - Set the features of each primal node randomly.
dim_primal_features = primal_graph_batch.num_node_features
for primal_feature in primal_graph_batch.x:
primal_feature[:] = torch.rand(dim_primal_features,
dtype=torch.float)
# Dual graph.
num_dual_edges = dual_graph_batch.num_edges
num_dual_nodes = maybe_num_nodes(dual_graph_batch.edge_index)
# - Since the mesh is watertight, the medial graph of the triangulation
# is 4-regular, hence each node in the dual graph has 4 incoming edges
# and 4 outgoing edges. However, since there are no self-loops in the
# dual graph, each incoming edge for a certain dual node is also an
# outgoing edge for another dual node, and the total number of
# (directed) edges in the dual graph is 4 times the number of dual
# nodes.
self.assertEqual(num_dual_edges, num_dual_nodes * 4)
self.assertEqual(num_dual_nodes, num_primal_edges)
# - Set the features of each dual node randomly.
dim_dual_features = dual_graph_batch.num_node_features
for dual_feature in dual_graph_batch.x:
dual_feature[:] = torch.rand(dim_dual_features,
dtype=torch.float) * 3
# Randomly shuffle the primal edge-index matrix.
permutation = np.random.permutation(num_primal_edges)
primal_graph_batch.edge_index = (
primal_graph_batch.edge_index[:, permutation])
# Set the attention coefficients manually, so as to pool the following
# primal edges:
# - 0->10 / 10->0;
# - 6->7 / 7->6;
# - 7->11 / 11->7;
# - 10->11 / 11->10;
# - 1->5 / 5->1;
# - 2->3 / 3->2;
# - 3->8 / 8->3;
# - 4->13 / 13->4.
# (cf. file `../../common_data/simple_mesh_large_pool_1.png`)
if (primal_att_coeff_threshold is not None):
attention_threshold = primal_att_coeff_threshold
else:
attention_threshold = 0.5
primal_attention_coeffs = torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * attention_threshold
if (use_decreasing_attention_coefficient):
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in [[0, 10], [6, 7], [7, 11], [10, 11],
[1, 5], [2, 3], [3, 8], [4, 13]]):
primal_attention_coeffs[edge_idx] += (1 -
attention_threshold)
elif (primal_edge == [1, 2]):
# Further test: set \alpha_{2, 1} = 0.7 > 0.5, but
# \alpha_{1, 2} = 0.2, so that
# (\alpha_{1, 2} + \alpha_{2, 1}) / 2 = 0.45 < 0.5, and the
# edges 1->2 / 2->1 do not get pooled.
primal_attention_coeffs[edge_idx] = 0.2
elif (primal_edge == [2, 1]):
primal_attention_coeffs[edge_idx] = 0.7
else:
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) not in [[0, 10], [6, 7], [7, 11],
[10, 11], [1, 5], [2, 3],
[3, 8], [4, 13], [1, 2]]):
primal_attention_coeffs[edge_idx] += (1 -
attention_threshold)
elif (primal_edge == [1, 2]):
# Further test: set \alpha_{1, 2} = 0.4 < 0.5, but
# \alpha_{2, 1} = 0.7, so that
# (\alpha_{1, 2} + \alpha_{2, 1}) / 2 = 0.55 > 0.5, and the
# edges 1->2 / 2->1 do not get pooled.
primal_attention_coeffs[edge_idx] = 0.4
elif (primal_edge == [2, 1]):
primal_attention_coeffs[edge_idx] = 0.7
# Create a single dual-primal edge-pooling layer.
pool = DualPrimalEdgePooling(
self_loops_in_output_dual_graph=True,
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
num_primal_edges_to_keep=num_primal_edges_to_keep,
fraction_primal_edges_to_keep=fraction_primal_edges_to_keep,
primal_att_coeff_threshold=primal_att_coeff_threshold,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
return_old_dual_node_to_new_dual_node=True)
# Perform primal-edge pooling.
(new_primal_graph_batch, new_dual_graph_batch, new_petdni_batch,
pooling_log) = pool(primal_graph_batch=primal_graph_batch,
dual_graph_batch=dual_graph_batch,
primal_edge_to_dual_node_idx_batch=petdni_batch,
primal_attention_coeffs=primal_attention_coeffs)
# Tests on the new primal graph.
num_new_primal_nodes = maybe_num_nodes(
new_primal_graph_batch.edge_index)
num_new_primal_edges = new_primal_graph_batch.num_edges
self.assertEqual(num_new_primal_nodes, 6)
# - Check correspondence of the old primal nodes with the new primal
# nodes (i.e., node clusters).
old_primal_node_to_new_one = pooling_log.old_primal_node_to_new_one
for old_primal_node in range(num_primal_nodes):
if (old_primal_node in [0, 6, 7, 10, 11]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 0)
elif (old_primal_node in [1, 5]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 1)
elif (old_primal_node in [4, 13]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 2)
elif (old_primal_node in [2, 3, 8]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 3)
elif (old_primal_node == 9):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 4)
elif (old_primal_node == 12):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 5)
# - Check that the features of each new primal node correspond to the
# average of the features of the primal nodes merged together into
# that node.
for new_primal_node in range(num_new_primal_nodes):
old_primal_nodes_per_new_primal_node = [[0, 6, 7, 10, 11], [1, 5],
[4, 13], [2, 3, 8], 9, 12]
old_primal_nodes = old_primal_nodes_per_new_primal_node[
new_primal_node]
self.assertAlmostEqual(
new_primal_graph_batch.x[new_primal_node, 0].item(),
primal_graph_batch.x[old_primal_nodes, 0].mean().item(), 5)
# - Check the edges between the new primal nodes, which should be the
# following:
# - 0->1 / 1->0;
# - 0->4 / 4->0;
# - 0->5 / 5->0;
# - 1->2 / 2->1;
# - 1->3 / 3->1;
# - 2->3 / 3->2;
# - 2->5 / 5->2;
# - 3->4 / 4->3.
self.assertEqual(num_new_primal_edges, 16)
new_primal_edge_index_list = new_primal_graph_batch.edge_index.t(
).tolist()
for new_primal_edge in [[0, 1], [0, 4], [0, 5], [1, 2], [1, 3], [2, 3],
[2, 5], [3, 4]]:
self.assertTrue(new_primal_edge in new_primal_edge_index_list)
self.assertTrue(new_primal_edge[::-1] in new_primal_edge_index_list)
# Check that opposite primal edges are not associated to the same
# dual node (configuration with double dual nodes).
self.assertNotEqual(new_petdni_batch[tuple(new_primal_edge)],
new_petdni_batch[tuple(new_primal_edge[::-1])])
# Tests on the new dual graph.
num_new_dual_nodes = maybe_num_nodes(new_dual_graph_batch.edge_index)
num_new_dual_edges = new_dual_graph_batch.num_edges
self.assertEqual(num_new_dual_nodes, num_new_primal_edges)
# - Check that in case the border between two new face clusters is made
# of multiple edges of the original mesh, the dual feature associated
# to the new primal edge is the average of the dual features
# associated with the 'multiple edges of the original mesh'. This
# happens between new primal nodes 0--1, 0--5, 2--3 and 3--4, in both
# directions.
# - New (directed) primal edge 0->1 corresponds to old (directed)
# primal edges 0->1 and 6->5.
idx_new_dual_node = new_petdni_batch[(0, 1)]
idx_old_dual_node_1 = petdni_batch[(0, 1)]
idx_old_dual_node_2 = petdni_batch[(6, 5)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 1->0 corresponds to old (directed)
# primal edges 1->0 and 5->6.
idx_new_dual_node = new_petdni_batch[(1, 0)]
idx_old_dual_node_1 = petdni_batch[(1, 0)]
idx_old_dual_node_2 = petdni_batch[(5, 6)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 0->5 corresponds to old (directed)
# primal edges 6->12 and 11->12.
idx_new_dual_node = new_petdni_batch[(0, 5)]
idx_old_dual_node_1 = petdni_batch[(6, 12)]
idx_old_dual_node_2 = petdni_batch[(11, 12)]
# - New (directed) primal edge 5->0 corresponds to old (directed)
# primal edges 12->6 and 12->11.
idx_new_dual_node = new_petdni_batch[(5, 0)]
idx_old_dual_node_1 = petdni_batch[(12, 6)]
idx_old_dual_node_2 = petdni_batch[(12, 11)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 2->3 corresponds to old (directed)
# primal edges 4->3 and 13->8.
idx_new_dual_node = new_petdni_batch[(2, 3)]
idx_old_dual_node_1 = petdni_batch[(4, 3)]
idx_old_dual_node_2 = petdni_batch[(13, 8)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 3->2 corresponds to old (directed)
# primal edges 3->4 and 8->13.
idx_new_dual_node = new_petdni_batch[(3, 2)]
idx_old_dual_node_1 = petdni_batch[(3, 4)]
idx_old_dual_node_2 = petdni_batch[(8, 13)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 3->4 corresponds to old (directed)
# primal edges 2->9 and 8->9.
idx_new_dual_node = new_petdni_batch[(3, 4)]
idx_old_dual_node_1 = petdni_batch[(2, 9)]
idx_old_dual_node_2 = petdni_batch[(8, 9)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 4->3 corresponds to old (directed)
# primal edges 9->2 and 9->8.
idx_new_dual_node = new_petdni_batch[(4, 3)]
idx_old_dual_node_1 = petdni_batch[(9, 2)]
idx_old_dual_node_2 = petdni_batch[(9, 8)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - For all other cases, check that the dual feature associated to the
# new primal edge is the dual feature associated with edge of the
# original mesh that is now between the new primal nodes.
new_dual_nodes = [(0, 4), (1, 2), (1, 3), (2, 5)]
old_dual_nodes = [(10, 9), (5, 4), (1, 2), (13, 12)]
for new_dual_node, old_dual_node in zip(new_dual_nodes, old_dual_nodes):
# 'Forward' edge.
idx_new_dual_node = new_petdni_batch[new_dual_node]
idx_old_dual_node = petdni_batch[old_dual_node]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# 'Backward' edge.
idx_new_dual_node = new_petdni_batch[new_dual_node[::-1]]
idx_old_dual_node = petdni_batch[old_dual_node[::-1]]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# - Check that the mapping between old and new dual nodes is correct.
old_dual_node_to_new_one = pooling_log.old_dual_node_to_new_one
self.assertEqual(len(old_dual_node_to_new_one), num_dual_nodes)
old_dual_nodes_index_with_corresponding_new_one = [
petdni_batch[primal_edge]
for primal_edge in [(0, 1), (1, 2), (2, 9), (3, 4), (4, 5), (
5, 6), (6, 12), (8, 9), (8, 13), (9, 10), (11, 12), (12, 13)]
] + [
petdni_batch[primal_edge[::-1]]
for primal_edge in [(0, 1), (1, 2), (2, 9), (3, 4), (4, 5), (
5, 6), (6, 12), (8, 9), (8, 13), (9, 10), (11, 12), (12, 13)]
]
corresponding_new_dual_nodes = [
new_petdni_batch[primal_edge]
for primal_edge in [(0, 1), (1, 3), (3, 4), (3, 2), (2, 1), (1, 0),
(0, 5), (3, 4), (3, 2), (4, 0), (0, 5), (5, 2)]
] + [
new_petdni_batch[primal_edge[::-1]]
for primal_edge in [(0, 1), (1, 3), (3, 4), (3, 2), (2, 1), (1, 0),
(0, 5), (3, 4), (3, 2), (4, 0), (0, 5), (5, 2)]
]
for dual_node_idx in range(num_dual_nodes):
if (dual_node_idx in old_dual_nodes_index_with_corresponding_new_one
):
# - The old dual node has a corresponding new dual node.
self.assertEqual(
old_dual_node_to_new_one[dual_node_idx],
corresponding_new_dual_nodes[
old_dual_nodes_index_with_corresponding_new_one.index(
dual_node_idx)])
else:
# - The old dual node has no corresponding new dual node.
self.assertEqual(old_dual_node_to_new_one[dual_node_idx], -1)
# - Check the edges between the new dual nodes, which should be the
# following (with dual nodes indicated by the corresponding primal
# nodes as a set), plus the self-loops:
# - (0->1) -> (4->0);
# - (0->1) -> (5->0);
# - (0->1) -> (1->2);
# - (0->1) -> (1->3);
# - (1->0) -> (0->4);
# - (1->0) -> (0->5);
# - (1->0) -> (2->1);
# - (1->0) -> (3->1);
# - (0->4) -> (1->0);
# - (0->4) -> (5->0);
# - (0->4) -> (4->3);
# - (4->0) -> (0->1);
# - (4->0) -> (0->5);
# - (4->0) -> (3->4);
# - (0->5) -> (1->0);
# - (0->5) -> (4->0);
# - (0->5) -> (5->2);
# - (5->0) -> (0->1);
# - (5->0) -> (0->4);
# - (5->0) -> (2->5);
# - (1->2) -> (0->1);
# - (1->2) -> (3->1);
# - (1->2) -> (2->3);
# - (1->2) -> (2->5);
# - (2->1) -> (1->0);
# - (2->1) -> (1->3);
# - (2->1) -> (3->2);
# - (2->1) -> (5->2);
# - (1->3) -> (0->1);
# - (1->3) -> (2->1);
# - (1->3) -> (3->2);
# - (1->3) -> (3->4);
# - (3->1) -> (1->0);
# - (3->1) -> (1->2);
# - (3->1) -> (2->3);
# - (3->1) -> (4->3);
# - (2->3) -> (1->2);
# - (2->3) -> (5->2);
# - (2->3) -> (3->1);
# - (2->3) -> (3->4);
# - (3->2) -> (2->1);
# - (3->2) -> (2->5);
# - (3->2) -> (1->3);
# - (3->2) -> (4->3);
# - (2->5) -> (1->2);
# - (2->5) -> (3->2);
# - (2->5) -> (5->0);
# - (5->2) -> (2->1);
# - (5->2) -> (2->3);
# - (5->2) -> (0->5);
# - (3->4) -> (1->3);
# - (3->4) -> (2->3);
# - (3->4) -> (4->0);
# - (4->3) -> (3->1);
# - (4->3) -> (3->2);
# - (4->3) -> (0->4).
self.assertEqual(num_new_dual_edges, 56 + num_new_dual_nodes)
new_dual_edge_index_list = new_dual_graph_batch.edge_index.t().tolist()
dual_node_to_neighbors = {
(0, 1): [(4, 0), (5, 0), (1, 2), (1, 3)],
(0, 4): [(1, 0), (5, 0), (4, 3)],
(0, 5): [(1, 0), (4, 0), (5, 2)],
(1, 2): [(0, 1), (3, 1), (2, 3), (2, 5)],
(1, 3): [(0, 1), (2, 1), (3, 2), (3, 4)],
(2, 3): [(1, 2), (5, 2), (3, 1), (3, 4)],
(2, 5): [(1, 2), (3, 2), (5, 0)],
(3, 4): [(1, 3), (2, 3), (4, 0)]
}
for new_dual_node, other_dual_nodes in dual_node_to_neighbors.items():
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[new_dual_node],
new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# 'Opposite' dual node.
self.assertTrue([
new_petdni_batch[new_dual_node[::-1]], new_petdni_batch[
other_dual_node[::-1]]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue([
new_petdni_batch[new_dual_node], new_petdni_batch[new_dual_node]
] in new_dual_edge_index_list)
# Self-loop of 'opposite' dual node.
self.assertTrue([
new_petdni_batch[new_dual_node[::-1]], new_petdni_batch[
new_dual_node[::-1]]
] in new_dual_edge_index_list)
def __test_large_simple_mesh_config_C_with_output_self_loops(
self,
num_primal_edges_to_keep=None,
fraction_primal_edges_to_keep=None,
primal_att_coeff_threshold=None,
use_decreasing_attention_coefficient=True,
num_heads=1):
# - Dual-graph configuration C.
single_dual_nodes = False
undirected_dual_edges = False
graph_creator = create_graphs.GraphCreator(
mesh_filename=osp.join(current_dir,
'../../common_data/simple_mesh_large.ply'),
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
primal_features_from_dual_features=False)
primal_graph, dual_graph = graph_creator.create_graphs()
petdni = graph_creator.primal_edge_to_dual_node_idx
(primal_graph_batch, dual_graph_batch,
petdni_batch) = create_dual_primal_batch(
primal_graphs_list=[primal_graph],
dual_graphs_list=[dual_graph],
primal_edge_to_dual_node_idx_list=[petdni])
# Primal graph.
num_primal_edges = primal_graph_batch.num_edges
num_primal_nodes = maybe_num_nodes(primal_graph_batch.edge_index)
self.assertEqual(num_primal_edges, 42)
self.assertEqual(num_primal_nodes, 14)
# - Check existence of primal edges.
for edge in [(0, 1), (0, 7), (0, 10), (1, 2), (1, 5), (2, 3), (2, 9),
(3, 4), (3, 8), (4, 5), (4, 13), (5, 6), (6, 7), (6, 12),
(7, 11), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12),
(12, 13)]:
# Configuration C has double dual nodes.
self.assertNotEqual(petdni_batch[edge], petdni_batch[edge[::-1]])
# - Set the features of each primal node randomly.
dim_primal_features = primal_graph_batch.num_node_features
for primal_feature in primal_graph_batch.x:
primal_feature[:] = torch.rand(dim_primal_features,
dtype=torch.float)
# Dual graph.
num_dual_edges = dual_graph_batch.num_edges
num_dual_nodes = maybe_num_nodes(dual_graph_batch.edge_index)
# - Since the mesh is watertight, the medial graph of the triangulation
# is 4-regular, but by definition of dual-graph configuration C each
# node in the dual graph has 2 incoming edges and 2 outgoing edges.
# However, since there are no self-loops in the dual graph, each
# incoming edge for a certain dual node is also an outgoing edge for
# another dual node, and the total number of (directed) edges in the
# dual graph is 2 times the number of dual nodes.
self.assertEqual(num_dual_edges, num_dual_nodes * 2)
self.assertEqual(num_dual_nodes, num_primal_edges)
# - Set the features of each dual node randomly.
dim_dual_features = dual_graph_batch.num_node_features
for dual_feature in dual_graph_batch.x:
dual_feature[:] = torch.rand(dim_dual_features,
dtype=torch.float) * 3
# Randomly shuffle the primal edge-index matrix.
permutation = np.random.permutation(num_primal_edges)
primal_graph_batch.edge_index = (
primal_graph_batch.edge_index[:, permutation])
# Set the attention coefficients manually, so as to pool the following
# primal edges:
# - 0->10 / 10->0;
# - 6->7 / 7->6;
# - 7->11 / 11->7;
# - 10->11 / 11->10;
# - 1->5 / 5->1;
# - 2->3 / 3->2;
# - 3->8 / 8->3;
# - 4->13 / 13->4.
# (cf. file `../../common_data/simple_mesh_large_pool_1.png`)
if (primal_att_coeff_threshold is not None):
attention_threshold = primal_att_coeff_threshold
else:
attention_threshold = 0.5
primal_attention_coeffs = torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * attention_threshold
if (use_decreasing_attention_coefficient):
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in [[0, 10], [6, 7], [7, 11], [10, 11],
[1, 5], [2, 3], [3, 8], [4, 13]]):
primal_attention_coeffs[edge_idx] += (1 -
attention_threshold)
elif (primal_edge == [1, 2]):
# Further test: set \alpha_{2, 1} = 0.7 > 0.5, but
# \alpha_{1, 2} = 0.2, so that
# (\alpha_{1, 2} + \alpha_{2, 1}) / 2 = 0.45 < 0.5, and the
# edges 1->2 / 2->1 do not get pooled.
primal_attention_coeffs[edge_idx] = 0.2
elif (primal_edge == [2, 1]):
primal_attention_coeffs[edge_idx] = 0.7
else:
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) not in [[0, 10], [6, 7], [7, 11],
[10, 11], [1, 5], [2, 3],
[3, 8], [4, 13], [1, 2]]):
primal_attention_coeffs[edge_idx] += (1 -
attention_threshold)
elif (primal_edge == [1, 2]):
# Further test: set \alpha_{1, 2} = 0.4 < 0.5, but
# \alpha_{2, 1} = 0.7, so that
# (\alpha_{1, 2} + \alpha_{2, 1}) / 2 = 0.55 > 0.5, and the
# edges 1->2 / 2->1 do not get pooled.
primal_attention_coeffs[edge_idx] = 0.4
elif (primal_edge == [2, 1]):
primal_attention_coeffs[edge_idx] = 0.7
# Create a single dual-primal edge-pooling layer.
pool = DualPrimalEdgePooling(
self_loops_in_output_dual_graph=True,
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
num_primal_edges_to_keep=num_primal_edges_to_keep,
fraction_primal_edges_to_keep=fraction_primal_edges_to_keep,
primal_att_coeff_threshold=primal_att_coeff_threshold,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
return_old_dual_node_to_new_dual_node=True)
# Perform primal-edge pooling.
(new_primal_graph_batch, new_dual_graph_batch, new_petdni_batch,
pooling_log) = pool(primal_graph_batch=primal_graph_batch,
dual_graph_batch=dual_graph_batch,
primal_edge_to_dual_node_idx_batch=petdni_batch,
primal_attention_coeffs=primal_attention_coeffs)
# Tests on the new primal graph.
num_new_primal_nodes = maybe_num_nodes(
new_primal_graph_batch.edge_index)
num_new_primal_edges = new_primal_graph_batch.num_edges
self.assertEqual(num_new_primal_nodes, 6)
# - Check correspondence of the old primal nodes with the new primal
# nodes (i.e., node clusters).
old_primal_node_to_new_one = pooling_log.old_primal_node_to_new_one
for old_primal_node in range(num_primal_nodes):
if (old_primal_node in [0, 6, 7, 10, 11]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 0)
elif (old_primal_node in [1, 5]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 1)
elif (old_primal_node in [4, 13]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 2)
elif (old_primal_node in [2, 3, 8]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 3)
elif (old_primal_node == 9):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 4)
elif (old_primal_node == 12):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 5)
# - Check that the features of each new primal node correspond to the
# average of the features of the primal nodes merged together into
# that node.
for new_primal_node in range(num_new_primal_nodes):
old_primal_nodes_per_new_primal_node = [[0, 6, 7, 10, 11], [1, 5],
[4, 13], [2, 3, 8], 9, 12]
old_primal_nodes = old_primal_nodes_per_new_primal_node[
new_primal_node]
self.assertAlmostEqual(
new_primal_graph_batch.x[new_primal_node, 0].item(),
primal_graph_batch.x[old_primal_nodes, 0].mean().item(), 5)
# - Check the edges between the new primal nodes, which should be the
# following:
# - 0->1 / 1->0;
# - 0->4 / 4->0;
# - 0->5 / 5->0;
# - 1->2 / 2->1;
# - 1->3 / 3->1;
# - 2->3 / 3->2;
# - 2->5 / 5->2;
# - 3->4 / 4->3.
self.assertEqual(num_new_primal_edges, 16)
new_primal_edge_index_list = new_primal_graph_batch.edge_index.t(
).tolist()
for new_primal_edge in [[0, 1], [0, 4], [0, 5], [1, 2], [1, 3], [2, 3],
[2, 5], [3, 4]]:
self.assertTrue(new_primal_edge in new_primal_edge_index_list)
self.assertTrue(new_primal_edge[::-1] in new_primal_edge_index_list)
# Check that opposite primal edges are not associated to the same
# dual node (configuration with double dual nodes).
self.assertNotEqual(new_petdni_batch[tuple(new_primal_edge)],
new_petdni_batch[tuple(new_primal_edge[::-1])])
# Tests on the new dual graph.
num_new_dual_nodes = maybe_num_nodes(new_dual_graph_batch.edge_index)
num_new_dual_edges = new_dual_graph_batch.num_edges
self.assertEqual(num_new_dual_nodes, num_new_primal_edges)
# - Check that in case the border between two new face clusters is made
# of multiple edges of the original mesh, the dual feature associated
# to the new primal edge is the average of the dual features
# associated with the 'multiple edges of the original mesh'. This
# happens between new primal nodes 0--1, 0--5, 2--3 and 3--4, in both
# directions.
# - New (directed) primal edge 0->1 corresponds to old (directed)
# primal edges 0->1 and 6->5.
idx_new_dual_node = new_petdni_batch[(0, 1)]
idx_old_dual_node_1 = petdni_batch[(0, 1)]
idx_old_dual_node_2 = petdni_batch[(6, 5)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 1->0 corresponds to old (directed)
# primal edges 1->0 and 5->6.
idx_new_dual_node = new_petdni_batch[(1, 0)]
idx_old_dual_node_1 = petdni_batch[(1, 0)]
idx_old_dual_node_2 = petdni_batch[(5, 6)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 0->5 corresponds to old (directed)
# primal edges 6->12 and 11->12.
idx_new_dual_node = new_petdni_batch[(0, 5)]
idx_old_dual_node_1 = petdni_batch[(6, 12)]
idx_old_dual_node_2 = petdni_batch[(11, 12)]
# - New (directed) primal edge 5->0 corresponds to old (directed)
# primal edges 12->6 and 12->11.
idx_new_dual_node = new_petdni_batch[(5, 0)]
idx_old_dual_node_1 = petdni_batch[(12, 6)]
idx_old_dual_node_2 = petdni_batch[(12, 11)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 2->3 corresponds to old (directed)
# primal edges 4->3 and 13->8.
idx_new_dual_node = new_petdni_batch[(2, 3)]
idx_old_dual_node_1 = petdni_batch[(4, 3)]
idx_old_dual_node_2 = petdni_batch[(13, 8)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 3->2 corresponds to old (directed)
# primal edges 3->4 and 8->13.
idx_new_dual_node = new_petdni_batch[(3, 2)]
idx_old_dual_node_1 = petdni_batch[(3, 4)]
idx_old_dual_node_2 = petdni_batch[(8, 13)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 3->4 corresponds to old (directed)
# primal edges 2->9 and 8->9.
idx_new_dual_node = new_petdni_batch[(3, 4)]
idx_old_dual_node_1 = petdni_batch[(2, 9)]
idx_old_dual_node_2 = petdni_batch[(8, 9)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 4->3 corresponds to old (directed)
# primal edges 9->2 and 9->8.
idx_new_dual_node = new_petdni_batch[(4, 3)]
idx_old_dual_node_1 = petdni_batch[(9, 2)]
idx_old_dual_node_2 = petdni_batch[(9, 8)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - For all other cases, check that the dual feature associated to the
# new primal edge is the dual feature associated with edge of the
# original mesh that is now between the new primal nodes.
new_dual_nodes = [(0, 4), (1, 2), (1, 3), (2, 5)]
old_dual_nodes = [(10, 9), (5, 4), (1, 2), (13, 12)]
for new_dual_node, old_dual_node in zip(new_dual_nodes, old_dual_nodes):
# 'Forward' edge.
idx_new_dual_node = new_petdni_batch[new_dual_node]
idx_old_dual_node = petdni_batch[old_dual_node]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# 'Backward' edge.
idx_new_dual_node = new_petdni_batch[new_dual_node[::-1]]
idx_old_dual_node = petdni_batch[old_dual_node[::-1]]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# - Check that the mapping between old and new dual nodes is correct.
old_dual_node_to_new_one = pooling_log.old_dual_node_to_new_one
self.assertEqual(len(old_dual_node_to_new_one), num_dual_nodes)
old_dual_nodes_index_with_corresponding_new_one = [
petdni_batch[primal_edge]
for primal_edge in [(0, 1), (1, 2), (2, 9), (3, 4), (4, 5), (
5, 6), (6, 12), (8, 9), (8, 13), (9, 10), (11, 12), (12, 13)]
] + [
petdni_batch[primal_edge[::-1]]
for primal_edge in [(0, 1), (1, 2), (2, 9), (3, 4), (4, 5), (
5, 6), (6, 12), (8, 9), (8, 13), (9, 10), (11, 12), (12, 13)]
]
corresponding_new_dual_nodes = [
new_petdni_batch[primal_edge]
for primal_edge in [(0, 1), (1, 3), (3, 4), (3, 2), (2, 1), (1, 0),
(0, 5), (3, 4), (3, 2), (4, 0), (0, 5), (5, 2)]
] + [
new_petdni_batch[primal_edge[::-1]]
for primal_edge in [(0, 1), (1, 3), (3, 4), (3, 2), (2, 1), (1, 0),
(0, 5), (3, 4), (3, 2), (4, 0), (0, 5), (5, 2)]
]
for dual_node_idx in range(num_dual_nodes):
if (dual_node_idx in old_dual_nodes_index_with_corresponding_new_one
):
# - The old dual node has a corresponding new dual node.
self.assertEqual(
old_dual_node_to_new_one[dual_node_idx],
corresponding_new_dual_nodes[
old_dual_nodes_index_with_corresponding_new_one.index(
dual_node_idx)])
else:
# - The old dual node has no corresponding new dual node.
self.assertEqual(old_dual_node_to_new_one[dual_node_idx], -1)
# - Check the edges between the new dual nodes, which should be the
# following (with dual nodes indicated by the corresponding primal
# nodes as a set), plus the self-loops:
# - (0->1) -> (1->2);
# - (0->1) -> (1->3);
# - (1->0) -> (0->4);
# - (1->0) -> (0->5);
# - (0->4) -> (4->3);
# - (4->0) -> (0->1);
# - (4->0) -> (0->5);
# - (0->5) -> (5->2);
# - (5->0) -> (0->1);
# - (5->0) -> (0->4);
# - (1->2) -> (2->3);
# - (1->2) -> (2->5);
# - (2->1) -> (1->0);
# - (2->1) -> (1->3);
# - (1->3) -> (3->2);
# - (1->3) -> (3->4);
# - (3->1) -> (1->0);
# - (3->1) -> (1->2);
# - (2->3) -> (3->1);
# - (2->3) -> (3->4);
# - (3->2) -> (2->1);
# - (3->2) -> (2->5);
# - (2->5) -> (5->0);
# - (5->2) -> (2->1);
# - (5->2) -> (2->3);
# - (3->4) -> (4->0);
# - (4->3) -> (3->1);
# - (4->3) -> (3->2);
self.assertEqual(num_new_dual_edges, 28 + num_new_dual_nodes)
new_dual_edge_index_list = new_dual_graph_batch.edge_index.t().tolist()
dual_node_to_neighbors = {
(0, 1): [(1, 2), (1, 3)],
(1, 0): [(0, 4), (0, 5)],
(0, 4): [(4, 3)],
(4, 0): [(0, 1), (0, 5)],
(0, 5): [(5, 2)],
(5, 0): [(0, 1), (0, 4)],
(1, 2): [(2, 3), (2, 5)],
(2, 1): [(1, 0), (1, 3)],
(1, 3): [(3, 2), (3, 4)],
(3, 1): [(1, 0), (1, 2)],
(2, 3): [(3, 1), (3, 4)],
(3, 2): [(2, 1), (2, 5)],
(2, 5): [(5, 0)],
(5, 2): [(2, 1), (2, 3)],
(3, 4): [(4, 0)],
(4, 3): [(3, 1), (3, 2)]
}
for new_dual_node, other_dual_nodes in dual_node_to_neighbors.items():
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[new_dual_node],
new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue([
new_petdni_batch[new_dual_node], new_petdni_batch[new_dual_node]
] in new_dual_edge_index_list)
# * Allow only non-consecutive edges.
def __test_config_A_no_output_self_loops_nonconsecutive(
self, use_decreasing_attention_coefficient=True, num_heads=1):
# - Dual-graph configuration A.
single_dual_nodes = True
undirected_dual_edges = True
graph_creator = create_graphs.GraphCreator(
mesh_filename=osp.join(current_dir,
'../../common_data/simple_mesh_large.ply'),
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
primal_features_from_dual_features=False)
primal_graph, dual_graph = graph_creator.create_graphs()
petdni = graph_creator.primal_edge_to_dual_node_idx
(primal_graph_batch, dual_graph_batch,
petdni_batch) = create_dual_primal_batch(
primal_graphs_list=[primal_graph],
dual_graphs_list=[dual_graph],
primal_edge_to_dual_node_idx_list=[petdni])
# Primal graph.
num_primal_edges = primal_graph_batch.num_edges
num_primal_nodes = maybe_num_nodes(primal_graph_batch.edge_index)
self.assertEqual(num_primal_edges, 42)
self.assertEqual(num_primal_nodes, 14)
# - Check existence of primal edges.
for edge in [(0, 1), (0, 7), (0, 10), (1, 2), (1, 5), (2, 3), (2, 9),
(3, 4), (3, 8), (4, 5), (4, 13), (5, 6), (6, 7), (6, 12),
(7, 11), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12),
(12, 13)]:
self.assertEqual(petdni_batch[edge], petdni_batch[edge[::-1]])
# - Set the features of each primal node randomly.
dim_primal_features = primal_graph_batch.num_node_features
for primal_feature in primal_graph_batch.x:
primal_feature[:] = torch.rand(dim_primal_features,
dtype=torch.float)
# Dual graph.
num_dual_edges = dual_graph_batch.num_edges
num_dual_nodes = maybe_num_nodes(dual_graph_batch.edge_index)
# - Since the mesh is watertight, the medial graph of the triangulation
# is 4-regular, hence each node in the dual graph has 4 incoming edges
# and 4 outgoing edges. However, since there are no self-loops in the
# dual graph, each incoming edge for a certain dual node is also an
# outgoing edge for another dual node, and the total number of
# (directed) edges in the dual graph is 4 times the number of dual
# nodes.
self.assertEqual(num_dual_edges, num_dual_nodes * 4)
self.assertEqual(num_dual_nodes, num_primal_edges // 2)
# - Set the features of each dual node randomly.
dim_dual_features = dual_graph_batch.num_node_features
for dual_feature in dual_graph_batch.x:
dual_feature[:] = torch.rand(dim_dual_features,
dtype=torch.float) * 3
# Randomly shuffle the primal edge-index matrix.
permutation = np.random.permutation(num_primal_edges)
primal_graph_batch.edge_index = (
primal_graph_batch.edge_index[:, permutation])
# Set the attention coefficients manually, so that the primal edges have
# associated attention coefficients in this order:
# - 4->13 / 13->4;
# - 10->11 / 11->10;
# - 0->10 / 10->0 [not pooled, because 10->11 / 11->10 was pooled];
# - 2->3 / 3->2;
# - 3->8 / 8->3 [not pooled, because 2->3 / 3->2 was pooled];
# - 6->7 / 7->6;
# - 1->5 / 5->1;
# - 7->11 / 11->7 [not pooled, because 10->11 / 11->10 and 6->7 / 7->6
# were pooled];
# - 1->2 / 2->1 [not pooled, because 2->3 / 3->2 and 1->5 / 5->1 were
# pooled];
# - 8->9 / 9->8;
# - ... [other edges that are not pooled]
# (cf. file `../../common_data/simple_mesh_large_pool_2.png`)
attention_threshold = 0.5
edges_to_pool = [[8, 9], [1, 2], [7, 11], [1, 5], [6, 7], [3, 8],
[2, 3], [0, 10], [10, 11], [4, 13]]
if (use_decreasing_attention_coefficient):
primal_attention_coeffs = torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * attention_threshold
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in edges_to_pool):
pooling_idx = edges_to_pool.index(sorted(primal_edge))
primal_attention_coeffs[edge_idx] = attention_threshold + (
1 - attention_threshold) * (
float(pooling_idx) / len(edges_to_pool) +
torch.rand([num_heads], dtype=torch.float) * 1. /
len(edges_to_pool))
else:
primal_attention_coeffs = attention_threshold + torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * (1 - attention_threshold)
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in edges_to_pool):
pooling_idx = edges_to_pool.index(sorted(primal_edge))
primal_attention_coeffs[edge_idx] = (
attention_threshold - attention_threshold *
(float(pooling_idx) / len(edges_to_pool) +
torch.rand([num_heads], dtype=torch.float) * 1. /
len(edges_to_pool)))
# Create a single dual-primal edge-pooling layer.
pool = DualPrimalEdgePooling(
self_loops_in_output_dual_graph=False,
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
num_primal_edges_to_keep=15,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
allow_pooling_consecutive_edges=False,
return_old_dual_node_to_new_dual_node=True)
# Perform primal-edge pooling.
(new_primal_graph_batch, new_dual_graph_batch, new_petdni_batch,
pooling_log) = pool(primal_graph_batch=primal_graph_batch,
dual_graph_batch=dual_graph_batch,
primal_edge_to_dual_node_idx_batch=petdni_batch,
primal_attention_coeffs=primal_attention_coeffs)
# Tests on the new primal graph.
num_new_primal_nodes = maybe_num_nodes(
new_primal_graph_batch.edge_index)
num_new_primal_edges = new_primal_graph_batch.num_edges
self.assertEqual(num_new_primal_nodes, 8)
# - Check correspondence of the old primal nodes with the new primal
# nodes (i.e., node clusters).
old_primal_node_to_new_one = pooling_log.old_primal_node_to_new_one
for old_primal_node in range(num_primal_nodes):
if (old_primal_node in [0]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 0)
elif (old_primal_node in [1, 5]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 1)
elif (old_primal_node in [2, 3]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 2)
elif (old_primal_node in [4, 13]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 3)
elif (old_primal_node in [6, 7]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 4)
elif (old_primal_node in [8, 9]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 5)
elif (old_primal_node in [10, 11]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 6)
elif (old_primal_node == 12):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 7)
# - Check that the features of each new primal node correspond to the
# average of the features of the primal nodes merged together into
# that node.
for new_primal_node in range(num_new_primal_nodes):
old_primal_nodes_per_new_primal_node = [
0, [1, 5], [2, 3], [4, 13], [6, 7], [8, 9], [10, 11], 12
]
old_primal_nodes = old_primal_nodes_per_new_primal_node[
new_primal_node]
self.assertAlmostEqual(
new_primal_graph_batch.x[new_primal_node, 0].item(),
primal_graph_batch.x[old_primal_nodes, 0].mean().item(), 5)
# - Check the edges between the new primal nodes, which should be the
# following:
# - 0->1 / 1->0;
# - 0->4 / 4->0;
# - 0->6 / 6->0;
# - 1->2 / 2->1;
# - 1->3 / 3->1;
# - 1->4 / 4->1;
# - 2->3 / 3->2;
# - 2->5 / 5->2;
# - 3->5 / 5->3;
# - 3->7 / 7->3;
# - 4->6 / 6->4;
# - 4->7 / 7->4;
# - 5->6 / 6->5;
# - 6->7 / 7->6.
self.assertEqual(num_new_primal_edges, 28)
new_primal_edge_index_list = new_primal_graph_batch.edge_index.t(
).tolist()
for new_primal_edge in [[0, 1], [0, 4], [0, 6], [1, 2], [1, 3], [1, 4],
[2, 3], [2, 5], [3, 5], [3, 7], [4, 6], [4, 7],
[5, 6], [6, 7]]:
self.assertTrue(new_primal_edge in new_primal_edge_index_list)
self.assertTrue(new_primal_edge[::-1] in new_primal_edge_index_list)
# Check that opposite primal edges are associated to the same dual
# node.
self.assertEqual(new_petdni_batch[tuple(new_primal_edge)],
new_petdni_batch[tuple(new_primal_edge[::-1])])
# Tests on the new dual graph.
num_new_dual_nodes = maybe_num_nodes(new_dual_graph_batch.edge_index)
num_new_dual_edges = new_dual_graph_batch.num_edges
self.assertEqual(num_new_dual_nodes, num_new_primal_edges // 2)
# - Check that in case the border between two new face clusters is made
# of multiple edges of the original mesh, the dual feature associated
# to the new primal edge is the average of the dual features
# associated with the 'multiple edges of the original mesh'. This
# happens between new primal nodes 2--5.
# - New (directed) primal edge 2->5 corresponds to old (directed)
# primal edges 2->9 and 3->8.
idx_new_dual_node = new_petdni_batch[(2, 5)]
idx_old_dual_node_1 = petdni_batch[(2, 9)]
idx_old_dual_node_2 = petdni_batch[(3, 8)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - For all other cases, check that the dual feature associated to the
# new primal edge is the dual feature associated with edge of the
# original mesh that is now between the new primal nodes.
new_dual_nodes = [(0, 1), (0, 4), (0, 6), (1, 2), (1, 3), (1, 4),
(2, 3), (3, 5), (3, 7), (4, 6), (4, 7), (5, 6),
(6, 7)]
old_dual_nodes = [(0, 1), (0, 7), (0, 10), (1, 2), (4, 5), (5, 6),
(3, 4), (8, 13), (12, 13), (7, 11), (6, 12), (9, 10),
(11, 12)]
for new_dual_node, old_dual_node in zip(new_dual_nodes, old_dual_nodes):
idx_new_dual_node = new_petdni_batch[new_dual_node]
idx_old_dual_node = petdni_batch[old_dual_node]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# - Check that the mapping between old and new dual nodes is correct.
old_dual_node_to_new_one = pooling_log.old_dual_node_to_new_one
self.assertEqual(len(old_dual_node_to_new_one), num_dual_nodes)
old_dual_nodes_index_with_corresponding_new_one = [
petdni_batch[primal_edge]
for primal_edge in [(0, 1), (0, 7), (0, 10), (1, 2), (2, 9), (
3, 4), (3, 8), (4,
5), (5,
6), (6,
12), (7,
11), (8,
13), (9,
10), (11,
12), (12, 13)]
]
corresponding_new_dual_nodes = [
new_petdni_batch[primal_edge]
for primal_edge in [(0, 1), (0, 4), (0, 6), (1, 2), (2, 5), (
2, 3), (2, 5), (1, 3), (1, 4), (4, 7), (4,
6), (3,
5), (5,
6), (6,
7), (3,
7)]
]
for dual_node_idx in range(num_dual_nodes):
if (dual_node_idx in old_dual_nodes_index_with_corresponding_new_one
):
# - The old dual node has a corresponding new dual node.
self.assertEqual(
old_dual_node_to_new_one[dual_node_idx],
corresponding_new_dual_nodes[
old_dual_nodes_index_with_corresponding_new_one.index(
dual_node_idx)])
else:
# - The old dual node has no corresponding new dual node.
self.assertEqual(old_dual_node_to_new_one[dual_node_idx], -1)
# - Check the edges between the new dual nodes, which should be the
# following (with dual nodes indicated by the corresponding primal
# nodes as a set):
# - {0, 1} -> {0, 4};
# - {0, 1} -> {0, 6};
# - {0, 1} -> {1, 2};
# - {0, 1} -> {1, 3};
# - {0, 1} -> {1, 4};
# - {0, 4} -> {0, 1};
# - {0, 4} -> {0, 6};
# - {0, 4} -> {1, 4};
# - {0, 4} -> {4, 6};
# - {0, 4} -> {4, 7};
# - {0, 6} -> {0, 1};
# - {0, 6} -> {0, 4};
# - {0, 6} -> {4, 6};
# - {0, 6} -> {5, 6};
# - {0, 6} -> {6, 7};
# - {1, 2} -> {0, 1};
# - {1, 2} -> {1, 3};
# - {1, 2} -> {1, 4};
# - {1, 2} -> {2, 3};
# - {1, 2} -> {2, 5};
# - {1, 3} -> {0, 1};
# - {1, 3} -> {1, 2};
# - {1, 3} -> {1, 4};
# - {1, 3} -> {2, 3};
# - {1, 3} -> {3, 5};
# - {1, 3} -> {3, 7};
# - {1, 4} -> {0, 1};
# - {1, 4} -> {0, 4};
# - {1, 4} -> {1, 2};
# - {1, 4} -> {1, 3};
# - {1, 4} -> {4, 6};
# - {1, 4} -> {4, 7};
# - {2, 3} -> {1, 2};
# - {2, 3} -> {1, 3};
# - {2, 3} -> {2, 5};
# - {2, 3} -> {3, 5};
# - {2, 3} -> {3, 7};
# - {2, 5} -> {1, 2};
# - {2, 5} -> {2, 3};
# - {2, 5} -> {3, 5};
# - {2, 5} -> {5, 6};
# - {3, 5} -> {1, 3};
# - {3, 5} -> {2, 3};
# - {3, 5} -> {2, 5};
# - {3, 5} -> {3, 7};
# - {3, 5} -> {5, 6};
# - {3, 7} -> {1, 3};
# - {3, 7} -> {2, 3};
# - {3, 7} -> {3, 5};
# - {3, 7} -> {4, 7};
# - {3, 7} -> {6, 7};
# - {4, 6} -> {0, 4};
# - {4, 6} -> {0, 6};
# - {4, 6} -> {1, 4};
# - {4, 6} -> {4, 7};
# - {4, 6} -> {5, 6};
# - {4, 6} -> {6, 7};
# - {4, 7} -> {0, 4};
# - {4, 7} -> {1, 4};
# - {4, 7} -> {3, 7};
# - {4, 7} -> {4, 6};
# - {4, 7} -> {6, 7};
# - {5, 6} -> {0, 6};
# - {5, 6} -> {2, 5};
# - {5, 6} -> {3, 5};
# - {5, 6} -> {4, 6};
# - {5, 6} -> {6, 7};
# - {6, 7} -> {0, 6};
# - {6, 7} -> {3, 7};
# - {6, 7} -> {4, 6};
# - {6, 7} -> {4, 7};
# - {6, 7} -> {5, 6}.
self.assertEqual(num_new_dual_edges, 72)
new_dual_edge_index_list = new_dual_graph_batch.edge_index.t().tolist()
dual_node_1 = (0, 1)
other_dual_nodes = [(0, 4), (0, 6), (1, 2), (1, 3), (1, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (0, 4)
other_dual_nodes = [(0, 1), (0, 6), (1, 4), (4, 6), (4, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (0, 6)
other_dual_nodes = [(0, 1), (0, 4), (4, 6), (5, 6), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (1, 2)
other_dual_nodes = [(0, 1), (1, 3), (1, 4), (2, 3), (2, 5)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (1, 3)
other_dual_nodes = [(0, 1), (1, 2), (1, 4), (2, 3), (3, 5), (3, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (1, 4)
other_dual_nodes = [(0, 1), (0, 4), (1, 2), (1, 3), (4, 6), (4, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (2, 3)
other_dual_nodes = [(1, 2), (1, 3), (2, 5), (3, 5), (3, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (2, 5)
other_dual_nodes = [(1, 2), (2, 3), (3, 5), (5, 6)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (3, 5)
other_dual_nodes = [(1, 3), (2, 3), (2, 5), (3, 7), (5, 6)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (3, 7)
other_dual_nodes = [(1, 3), (2, 3), (3, 5), (4, 7), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (4, 6)
other_dual_nodes = [(0, 4), (0, 6), (1, 4), (4, 7), (5, 6), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (4, 7)
other_dual_nodes = [(0, 4), (1, 4), (3, 7), (4, 6), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (5, 6)
other_dual_nodes = [(0, 6), (2, 5), (3, 5), (4, 6), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (6, 7)
other_dual_nodes = [(0, 6), (3, 7), (4, 6), (4, 7), (5, 6)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
def __test_config_A_with_output_self_loops_nonconsecutive(
self, use_decreasing_attention_coefficient=True, num_heads=1):
# - Dual-graph configuration A.
single_dual_nodes = True
undirected_dual_edges = True
graph_creator = create_graphs.GraphCreator(
mesh_filename=osp.join(current_dir,
'../../common_data/simple_mesh_large.ply'),
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
primal_features_from_dual_features=False)
primal_graph, dual_graph = graph_creator.create_graphs()
petdni = graph_creator.primal_edge_to_dual_node_idx
(primal_graph_batch, dual_graph_batch,
petdni_batch) = create_dual_primal_batch(
primal_graphs_list=[primal_graph],
dual_graphs_list=[dual_graph],
primal_edge_to_dual_node_idx_list=[petdni])
# Primal graph.
num_primal_edges = primal_graph_batch.num_edges
num_primal_nodes = maybe_num_nodes(primal_graph_batch.edge_index)
self.assertEqual(num_primal_edges, 42)
self.assertEqual(num_primal_nodes, 14)
# - Check existence of primal edges.
for edge in [(0, 1), (0, 7), (0, 10), (1, 2), (1, 5), (2, 3), (2, 9),
(3, 4), (3, 8), (4, 5), (4, 13), (5, 6), (6, 7), (6, 12),
(7, 11), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12),
(12, 13)]:
self.assertEqual(petdni_batch[edge], petdni_batch[edge[::-1]])
# - Set the features of each primal node randomly.
dim_primal_features = primal_graph_batch.num_node_features
for primal_feature in primal_graph_batch.x:
primal_feature[:] = torch.rand(dim_primal_features,
dtype=torch.float)
# Dual graph.
num_dual_edges = dual_graph_batch.num_edges
num_dual_nodes = maybe_num_nodes(dual_graph_batch.edge_index)
# - Since the mesh is watertight, the medial graph of the triangulation
# is 4-regular, hence each node in the dual graph has 4 incoming edges
# and 4 outgoing edges. However, since there are no self-loops in the
# dual graph, each incoming edge for a certain dual node is also an
# outgoing edge for another dual node, and the total number of
# (directed) edges in the dual graph is 4 times the number of dual
# nodes.
self.assertEqual(num_dual_edges, num_dual_nodes * 4)
self.assertEqual(num_dual_nodes, num_primal_edges // 2)
# - Set the features of each dual node randomly.
dim_dual_features = dual_graph_batch.num_node_features
for dual_feature in dual_graph_batch.x:
dual_feature[:] = torch.rand(dim_dual_features,
dtype=torch.float) * 3
# Randomly shuffle the primal edge-index matrix.
permutation = np.random.permutation(num_primal_edges)
primal_graph_batch.edge_index = (
primal_graph_batch.edge_index[:, permutation])
# Set the attention coefficients manually, so that the primal edges have
# associated attention coefficients in this order:
# - 4->13 / 13->4;
# - 10->11 / 11->10;
# - 0->10 / 10->0 [not pooled, because 10->11 / 11->10 was pooled];
# - 2->3 / 3->2;
# - 3->8 / 8->3 [not pooled, because 2->3 / 3->2 was pooled];
# - 6->7 / 7->6;
# - 1->5 / 5->1;
# - 7->11 / 11->7 [not pooled, because 10->11 / 11->10 and 6->7 / 7->6
# were pooled];
# - 1->2 / 2->1 [not pooled, because 2->3 / 3->2 and 1->5 / 5->1 were
# pooled];
# - 8->9 / 9->8;
# - ... [other edges that are not pooled]
# (cf. file `../../common_data/simple_mesh_large_pool_2.png`)
attention_threshold = 0.5
edges_to_pool = [[8, 9], [1, 2], [7, 11], [1, 5], [6, 7], [3, 8],
[2, 3], [0, 10], [10, 11], [4, 13]]
if (use_decreasing_attention_coefficient):
primal_attention_coeffs = torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * attention_threshold
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in edges_to_pool):
pooling_idx = edges_to_pool.index(sorted(primal_edge))
primal_attention_coeffs[edge_idx] = attention_threshold + (
1 - attention_threshold) * (
float(pooling_idx) / len(edges_to_pool) +
torch.rand([num_heads], dtype=torch.float) * 1. /
len(edges_to_pool))
else:
primal_attention_coeffs = attention_threshold + torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * (1 - attention_threshold)
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in edges_to_pool):
pooling_idx = edges_to_pool.index(sorted(primal_edge))
primal_attention_coeffs[edge_idx] = (
attention_threshold - attention_threshold *
(float(pooling_idx) / len(edges_to_pool) +
torch.rand([num_heads], dtype=torch.float) * 1. /
len(edges_to_pool)))
# Create a single dual-primal edge-pooling layer.
pool = DualPrimalEdgePooling(
self_loops_in_output_dual_graph=True,
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
num_primal_edges_to_keep=15,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
allow_pooling_consecutive_edges=False,
return_old_dual_node_to_new_dual_node=True)
# Perform primal-edge pooling.
(new_primal_graph_batch, new_dual_graph_batch, new_petdni_batch,
pooling_log) = pool(primal_graph_batch=primal_graph_batch,
dual_graph_batch=dual_graph_batch,
primal_edge_to_dual_node_idx_batch=petdni_batch,
primal_attention_coeffs=primal_attention_coeffs)
# Tests on the new primal graph.
num_new_primal_nodes = maybe_num_nodes(
new_primal_graph_batch.edge_index)
num_new_primal_edges = new_primal_graph_batch.num_edges
self.assertEqual(num_new_primal_nodes, 8)
# - Check correspondence of the old primal nodes with the new primal
# nodes (i.e., node clusters).
old_primal_node_to_new_one = pooling_log.old_primal_node_to_new_one
for old_primal_node in range(num_primal_nodes):
if (old_primal_node in [0]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 0)
elif (old_primal_node in [1, 5]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 1)
elif (old_primal_node in [2, 3]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 2)
elif (old_primal_node in [4, 13]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 3)
elif (old_primal_node in [6, 7]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 4)
elif (old_primal_node in [8, 9]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 5)
elif (old_primal_node in [10, 11]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 6)
elif (old_primal_node == 12):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 7)
# - Check that the features of each new primal node correspond to the
# average of the features of the primal nodes merged together into
# that node.
for new_primal_node in range(num_new_primal_nodes):
old_primal_nodes_per_new_primal_node = [
0, [1, 5], [2, 3], [4, 13], [6, 7], [8, 9], [10, 11], 12
]
old_primal_nodes = old_primal_nodes_per_new_primal_node[
new_primal_node]
self.assertAlmostEqual(
new_primal_graph_batch.x[new_primal_node, 0].item(),
primal_graph_batch.x[old_primal_nodes, 0].mean().item(), 5)
# - Check the edges between the new primal nodes, which should be the
# following:
# - 0->1 / 1->0;
# - 0->4 / 4->0;
# - 0->6 / 6->0;
# - 1->2 / 2->1;
# - 1->3 / 3->1;
# - 1->4 / 4->1;
# - 2->3 / 3->2;
# - 2->5 / 5->2;
# - 3->5 / 5->3;
# - 3->7 / 7->3;
# - 4->6 / 6->4;
# - 4->7 / 7->4;
# - 5->6 / 6->5;
# - 6->7 / 7->6.
self.assertEqual(num_new_primal_edges, 28)
new_primal_edge_index_list = new_primal_graph_batch.edge_index.t(
).tolist()
for new_primal_edge in [[0, 1], [0, 4], [0, 6], [1, 2], [1, 3], [1, 4],
[2, 3], [2, 5], [3, 5], [3, 7], [4, 6], [4, 7],
[5, 6], [6, 7]]:
self.assertTrue(new_primal_edge in new_primal_edge_index_list)
self.assertTrue(new_primal_edge[::-1] in new_primal_edge_index_list)
# Check that opposite primal edges are associated to the same dual
# node.
self.assertEqual(new_petdni_batch[tuple(new_primal_edge)],
new_petdni_batch[tuple(new_primal_edge[::-1])])
# Tests on the new dual graph.
num_new_dual_nodes = maybe_num_nodes(new_dual_graph_batch.edge_index)
num_new_dual_edges = new_dual_graph_batch.num_edges
self.assertEqual(num_new_dual_nodes, num_new_primal_edges // 2)
# - Check that in case the border between two new face clusters is made
# of multiple edges of the original mesh, the dual feature associated
# to the new primal edge is the average of the dual features
# associated with the 'multiple edges of the original mesh'. This
# happens between new primal nodes 2--5.
# - New (directed) primal edge 2->5 corresponds to old (directed)
# primal edges 2->9 and 3->8.
idx_new_dual_node = new_petdni_batch[(2, 5)]
idx_old_dual_node_1 = petdni_batch[(2, 9)]
idx_old_dual_node_2 = petdni_batch[(3, 8)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - For all other cases, check that the dual feature associated to the
# new primal edge is the dual feature associated with edge of the
# original mesh that is now between the new primal nodes.
new_dual_nodes = [(0, 1), (0, 4), (0, 6), (1, 2), (1, 3), (1, 4),
(2, 3), (3, 5), (3, 7), (4, 6), (4, 7), (5, 6),
(6, 7)]
old_dual_nodes = [(0, 1), (0, 7), (0, 10), (1, 2), (4, 5), (5, 6),
(3, 4), (8, 13), (12, 13), (7, 11), (6, 12), (9, 10),
(11, 12)]
for new_dual_node, old_dual_node in zip(new_dual_nodes, old_dual_nodes):
idx_new_dual_node = new_petdni_batch[new_dual_node]
idx_old_dual_node = petdni_batch[old_dual_node]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# - Check that the mapping between old and new dual nodes is correct.
old_dual_node_to_new_one = pooling_log.old_dual_node_to_new_one
self.assertEqual(len(old_dual_node_to_new_one), num_dual_nodes)
old_dual_nodes_index_with_corresponding_new_one = [
petdni_batch[primal_edge]
for primal_edge in [(0, 1), (0, 7), (0, 10), (1, 2), (2, 9), (
3, 4), (3, 8), (4,
5), (5,
6), (6,
12), (7,
11), (8,
13), (9,
10), (11,
12), (12, 13)]
]
corresponding_new_dual_nodes = [
new_petdni_batch[primal_edge]
for primal_edge in [(0, 1), (0, 4), (0, 6), (1, 2), (2, 5), (
2, 3), (2, 5), (1, 3), (1, 4), (4, 7), (4,
6), (3,
5), (5,
6), (6,
7), (3,
7)]
]
for dual_node_idx in range(num_dual_nodes):
if (dual_node_idx in old_dual_nodes_index_with_corresponding_new_one
):
# - The old dual node has a corresponding new dual node.
self.assertEqual(
old_dual_node_to_new_one[dual_node_idx],
corresponding_new_dual_nodes[
old_dual_nodes_index_with_corresponding_new_one.index(
dual_node_idx)])
else:
# - The old dual node has no corresponding new dual node.
self.assertEqual(old_dual_node_to_new_one[dual_node_idx], -1)
# - Check the edges between the new dual nodes, which should be the
# following (with dual nodes indicated by the corresponding primal
# nodes as a set), plus the self-loops:
# - {0, 1} -> {0, 4};
# - {0, 1} -> {0, 6};
# - {0, 1} -> {1, 2};
# - {0, 1} -> {1, 3};
# - {0, 1} -> {1, 4};
# - {0, 4} -> {0, 1};
# - {0, 4} -> {0, 6};
# - {0, 4} -> {1, 4};
# - {0, 4} -> {4, 6};
# - {0, 4} -> {4, 7};
# - {0, 6} -> {0, 1};
# - {0, 6} -> {0, 4};
# - {0, 6} -> {4, 6};
# - {0, 6} -> {5, 6};
# - {0, 6} -> {6, 7};
# - {1, 2} -> {0, 1};
# - {1, 2} -> {1, 3};
# - {1, 2} -> {1, 4};
# - {1, 2} -> {2, 3};
# - {1, 2} -> {2, 5};
# - {1, 3} -> {0, 1};
# - {1, 3} -> {1, 2};
# - {1, 3} -> {1, 4};
# - {1, 3} -> {2, 3};
# - {1, 3} -> {3, 5};
# - {1, 3} -> {3, 7};
# - {1, 4} -> {0, 1};
# - {1, 4} -> {0, 4};
# - {1, 4} -> {1, 2};
# - {1, 4} -> {1, 3};
# - {1, 4} -> {4, 6};
# - {1, 4} -> {4, 7};
# - {2, 3} -> {1, 2};
# - {2, 3} -> {1, 3};
# - {2, 3} -> {2, 5};
# - {2, 3} -> {3, 5};
# - {2, 3} -> {3, 7};
# - {2, 5} -> {1, 2};
# - {2, 5} -> {2, 3};
# - {2, 5} -> {3, 5};
# - {2, 5} -> {5, 6};
# - {3, 5} -> {1, 3};
# - {3, 5} -> {2, 3};
# - {3, 5} -> {2, 5};
# - {3, 5} -> {3, 7};
# - {3, 5} -> {5, 6};
# - {3, 7} -> {1, 3};
# - {3, 7} -> {2, 3};
# - {3, 7} -> {3, 5};
# - {3, 7} -> {4, 7};
# - {3, 7} -> {6, 7};
# - {4, 6} -> {0, 4};
# - {4, 6} -> {0, 6};
# - {4, 6} -> {1, 4};
# - {4, 6} -> {4, 7};
# - {4, 6} -> {5, 6};
# - {4, 6} -> {6, 7};
# - {4, 7} -> {0, 4};
# - {4, 7} -> {1, 4};
# - {4, 7} -> {3, 7};
# - {4, 7} -> {4, 6};
# - {4, 7} -> {6, 7};
# - {5, 6} -> {0, 6};
# - {5, 6} -> {2, 5};
# - {5, 6} -> {3, 5};
# - {5, 6} -> {4, 6};
# - {5, 6} -> {6, 7};
# - {6, 7} -> {0, 6};
# - {6, 7} -> {3, 7};
# - {6, 7} -> {4, 6};
# - {6, 7} -> {4, 7};
# - {6, 7} -> {5, 6}.
self.assertEqual(num_new_dual_edges, 72 + num_new_dual_nodes)
new_dual_edge_index_list = new_dual_graph_batch.edge_index.t().tolist()
dual_node_1 = (0, 1)
other_dual_nodes = [(0, 4), (0, 6), (1, 2), (1, 3), (1, 4)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (0, 4)
other_dual_nodes = [(0, 1), (0, 6), (1, 4), (4, 6), (4, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (0, 6)
other_dual_nodes = [(0, 1), (0, 4), (4, 6), (5, 6), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (1, 2)
other_dual_nodes = [(0, 1), (1, 3), (1, 4), (2, 3), (2, 5)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (1, 3)
other_dual_nodes = [(0, 1), (1, 2), (1, 4), (2, 3), (3, 5), (3, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (1, 4)
other_dual_nodes = [(0, 1), (0, 4), (1, 2), (1, 3), (4, 6), (4, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (2, 3)
other_dual_nodes = [(1, 2), (1, 3), (2, 5), (3, 5), (3, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (2, 5)
other_dual_nodes = [(1, 2), (2, 3), (3, 5), (5, 6)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (3, 5)
other_dual_nodes = [(1, 3), (2, 3), (2, 5), (3, 7), (5, 6)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (3, 7)
other_dual_nodes = [(1, 3), (2, 3), (3, 5), (4, 7), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (4, 6)
other_dual_nodes = [(0, 4), (0, 6), (1, 4), (4, 7), (5, 6), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (4, 7)
other_dual_nodes = [(0, 4), (1, 4), (3, 7), (4, 6), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (5, 6)
other_dual_nodes = [(0, 6), (2, 5), (3, 5), (4, 6), (6, 7)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
dual_node_1 = (6, 7)
other_dual_nodes = [(0, 6), (3, 7), (4, 6), (4, 7), (5, 6)]
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[other_dual_node], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue(
[new_petdni_batch[dual_node_1], new_petdni_batch[dual_node_1]
] in new_dual_edge_index_list)
def __test_config_B_with_output_self_loops_nonconsecutive(
self, use_decreasing_attention_coefficient=True, num_heads=1):
# - Dual-graph configuration B.
single_dual_nodes = False
undirected_dual_edges = True
graph_creator = create_graphs.GraphCreator(
mesh_filename=osp.join(current_dir,
'../../common_data/simple_mesh_large.ply'),
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
primal_features_from_dual_features=False)
primal_graph, dual_graph = graph_creator.create_graphs()
petdni = graph_creator.primal_edge_to_dual_node_idx
(primal_graph_batch, dual_graph_batch,
petdni_batch) = create_dual_primal_batch(
primal_graphs_list=[primal_graph],
dual_graphs_list=[dual_graph],
primal_edge_to_dual_node_idx_list=[petdni])
# Primal graph.
num_primal_edges = primal_graph_batch.num_edges
num_primal_nodes = maybe_num_nodes(primal_graph_batch.edge_index)
self.assertEqual(num_primal_edges, 42)
self.assertEqual(num_primal_nodes, 14)
# - Check existence of primal edges.
for edge in [(0, 1), (0, 7), (0, 10), (1, 2), (1, 5), (2, 3), (2, 9),
(3, 4), (3, 8), (4, 5), (4, 13), (5, 6), (6, 7), (6, 12),
(7, 11), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12),
(12, 13)]:
self.assertNotEqual(petdni_batch[edge], petdni_batch[edge[::-1]])
# - Set the features of each primal node randomly.
dim_primal_features = primal_graph_batch.num_node_features
for primal_feature in primal_graph_batch.x:
primal_feature[:] = torch.rand(dim_primal_features,
dtype=torch.float)
# Dual graph.
num_dual_edges = dual_graph_batch.num_edges
num_dual_nodes = maybe_num_nodes(dual_graph_batch.edge_index)
# - Since the mesh is watertight, the medial graph of the triangulation
# is 4-regular, hence each node in the dual graph has 4 incoming edges
# and 4 outgoing edges. However, since there are no self-loops in the
# dual graph, each incoming edge for a certain dual node is also an
# outgoing edge for another dual node, and the total number of
# (directed) edges in the dual graph is 4 times the number of dual
# nodes.
self.assertEqual(num_dual_edges, num_dual_nodes * 4)
self.assertEqual(num_dual_nodes, num_primal_edges)
# - Set the features of each dual node randomly.
dim_dual_features = dual_graph_batch.num_node_features
for dual_feature in dual_graph_batch.x:
dual_feature[:] = torch.rand(dim_dual_features,
dtype=torch.float) * 3
# Randomly shuffle the primal edge-index matrix.
permutation = np.random.permutation(num_primal_edges)
primal_graph_batch.edge_index = (
primal_graph_batch.edge_index[:, permutation])
# Set the attention coefficients manually, so that the primal edges have
# associated attention coefficients in this order:
# - 4->13 / 13->4;
# - 10->11 / 11->10;
# - 0->10 / 10->0 [not pooled, because 10->11 / 11->10 was pooled];
# - 2->3 / 3->2;
# - 3->8 / 8->3 [not pooled, because 2->3 / 3->2 was pooled];
# - 6->7 / 7->6;
# - 1->5 / 5->1;
# - 7->11 / 11->7 [not pooled, because 10->11 / 11->10 and 6->7 / 7->6
# were pooled];
# - 1->2 / 2->1 [not pooled, because 2->3 / 3->2 and 1->5 / 5->1 were
# pooled];
# - 8->9 / 9->8;
# - ... [other edges that are not pooled]
# (cf. file `../../common_data/simple_mesh_large_pool_2.png`)
attention_threshold = 0.5
edges_to_pool = [[8, 9], [1, 2], [7, 11], [1, 5], [6, 7], [3, 8],
[2, 3], [0, 10], [10, 11], [4, 13]]
if (use_decreasing_attention_coefficient):
primal_attention_coeffs = torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * attention_threshold
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in edges_to_pool):
pooling_idx = edges_to_pool.index(sorted(primal_edge))
primal_attention_coeffs[edge_idx] = attention_threshold + (
1 - attention_threshold) * (
float(pooling_idx) / len(edges_to_pool) +
torch.rand([num_heads], dtype=torch.float) * 1. /
len(edges_to_pool))
else:
primal_attention_coeffs = attention_threshold + torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * (1 - attention_threshold)
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in edges_to_pool):
pooling_idx = edges_to_pool.index(sorted(primal_edge))
primal_attention_coeffs[edge_idx] = (
attention_threshold - attention_threshold *
(float(pooling_idx) / len(edges_to_pool) +
torch.rand([num_heads], dtype=torch.float) * 1. /
len(edges_to_pool)))
# Create a single dual-primal edge-pooling layer.
pool = DualPrimalEdgePooling(
self_loops_in_output_dual_graph=True,
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
num_primal_edges_to_keep=15,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
allow_pooling_consecutive_edges=False,
return_old_dual_node_to_new_dual_node=True)
# Perform primal-edge pooling.
(new_primal_graph_batch, new_dual_graph_batch, new_petdni_batch,
pooling_log) = pool(primal_graph_batch=primal_graph_batch,
dual_graph_batch=dual_graph_batch,
primal_edge_to_dual_node_idx_batch=petdni_batch,
primal_attention_coeffs=primal_attention_coeffs)
# Tests on the new primal graph.
num_new_primal_nodes = maybe_num_nodes(
new_primal_graph_batch.edge_index)
num_new_primal_edges = new_primal_graph_batch.num_edges
self.assertEqual(num_new_primal_nodes, 8)
# - Check correspondence of the old primal nodes with the new primal
# nodes (i.e., node clusters).
old_primal_node_to_new_one = pooling_log.old_primal_node_to_new_one
for old_primal_node in range(num_primal_nodes):
if (old_primal_node in [0]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 0)
elif (old_primal_node in [1, 5]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 1)
elif (old_primal_node in [2, 3]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 2)
elif (old_primal_node in [4, 13]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 3)
elif (old_primal_node in [6, 7]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 4)
elif (old_primal_node in [8, 9]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 5)
elif (old_primal_node in [10, 11]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 6)
elif (old_primal_node == 12):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 7)
# - Check that the features of each new primal node correspond to the
# average of the features of the primal nodes merged together into
# that node.
for new_primal_node in range(num_new_primal_nodes):
old_primal_nodes_per_new_primal_node = [
0, [1, 5], [2, 3], [4, 13], [6, 7], [8, 9], [10, 11], 12
]
old_primal_nodes = old_primal_nodes_per_new_primal_node[
new_primal_node]
self.assertAlmostEqual(
new_primal_graph_batch.x[new_primal_node, 0].item(),
primal_graph_batch.x[old_primal_nodes, 0].mean().item(), 5)
# - Check the edges between the new primal nodes, which should be the
# following:
# - 0->1 / 1->0;
# - 0->4 / 4->0;
# - 0->6 / 6->0;
# - 1->2 / 2->1;
# - 1->3 / 3->1;
# - 1->4 / 4->1;
# - 2->3 / 3->2;
# - 2->5 / 5->2;
# - 3->5 / 5->3;
# - 3->7 / 7->3;
# - 4->6 / 6->4;
# - 4->7 / 7->4;
# - 5->6 / 6->5;
# - 6->7 / 7->6.
self.assertEqual(num_new_primal_edges, 28)
new_primal_edge_index_list = new_primal_graph_batch.edge_index.t(
).tolist()
for new_primal_edge in [[0, 1], [0, 4], [0, 6], [1, 2], [1, 3], [1, 4],
[2, 3], [2, 5], [3, 5], [3, 7], [4, 6], [4, 7],
[5, 6], [6, 7]]:
self.assertTrue(new_primal_edge in new_primal_edge_index_list)
self.assertTrue(new_primal_edge[::-1] in new_primal_edge_index_list)
# Check that opposite primal edges are associated to the same dual
# node.
self.assertNotEqual(new_petdni_batch[tuple(new_primal_edge)],
new_petdni_batch[tuple(new_primal_edge[::-1])])
# Tests on the new dual graph.
num_new_dual_nodes = maybe_num_nodes(new_dual_graph_batch.edge_index)
num_new_dual_edges = new_dual_graph_batch.num_edges
self.assertEqual(num_new_dual_nodes, num_new_primal_edges)
# - Check that in case the border between two new face clusters is made
# of multiple edges of the original mesh, the dual feature associated
# to the new primal edge is the average of the dual features
# associated with the 'multiple edges of the original mesh'. This
# happens between new primal nodes 2--5, in both directions.
# - New (directed) primal edge 2->5 corresponds to old (directed)
# primal edges 2->9 and 3->8.
idx_new_dual_node = new_petdni_batch[(2, 5)]
idx_old_dual_node_1 = petdni_batch[(2, 9)]
idx_old_dual_node_2 = petdni_batch[(3, 8)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 5->2 corresponds to old (directed)
# primal edges 9->2 and 8->3.
idx_new_dual_node = new_petdni_batch[(5, 2)]
idx_old_dual_node_1 = petdni_batch[(9, 2)]
idx_old_dual_node_2 = petdni_batch[(8, 3)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - For all other cases, check that the dual feature associated to the
# new primal edge is the dual feature associated with edge of the
# original mesh that is now between the new primal nodes.
new_dual_nodes = [(0, 1), (0, 4), (0, 6), (1, 2), (1, 3), (1, 4),
(2, 3), (3, 5), (3, 7), (4, 6), (4, 7), (5, 6),
(6, 7)]
old_dual_nodes = [(0, 1), (0, 7), (0, 10), (1, 2), (5, 4), (5, 6),
(3, 4), (13, 8), (13, 12), (7, 11), (6, 12), (9, 10),
(11, 12)]
for new_dual_node, old_dual_node in zip(new_dual_nodes, old_dual_nodes):
# 'Forward' edge.
idx_new_dual_node = new_petdni_batch[new_dual_node]
idx_old_dual_node = petdni_batch[old_dual_node]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# 'Backward' edge.
idx_new_dual_node = new_petdni_batch[new_dual_node[::-1]]
idx_old_dual_node = petdni_batch[old_dual_node[::-1]]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# - Check that the mapping between old and new dual nodes is correct.
old_dual_node_to_new_one = pooling_log.old_dual_node_to_new_one
self.assertEqual(len(old_dual_node_to_new_one), num_dual_nodes)
old_dual_nodes_index_with_corresponding_new_one = [
petdni_batch[primal_edge]
for primal_edge in [(0, 1), (0, 7), (0, 10), (1, 2), (2, 9), (3, 4),
(3, 8), (4, 5), (5, 6), (6, 12), (7, 11),
(8, 13), (9, 10), (11, 12), (12, 13)]
] + [
petdni_batch[primal_edge[::-1]]
for primal_edge in [(0, 1), (0, 7), (0, 10), (1, 2), (2, 9), (3, 4),
(3, 8), (4, 5), (5, 6), (6, 12), (7, 11),
(8, 13), (9, 10), (11, 12), (12, 13)]
]
corresponding_new_dual_nodes = [
new_petdni_batch[primal_edge]
for primal_edge in [(0, 1), (0, 4), (0, 6), (1, 2), (2, 5), (2, 3),
(2, 5), (3, 1), (1, 4), (4, 7), (4, 6), (5, 3),
(5, 6), (6, 7), (7, 3)]
] + [
new_petdni_batch[primal_edge[::-1]]
for primal_edge in [(0, 1), (0, 4), (0, 6), (1, 2), (2, 5), (2, 3),
(2, 5), (3, 1), (1, 4), (4, 7), (4, 6), (5, 3),
(5, 6), (6, 7), (7, 3)]
]
for dual_node_idx in range(num_dual_nodes):
if (dual_node_idx in old_dual_nodes_index_with_corresponding_new_one
):
# - The old dual node has a corresponding new dual node.
self.assertEqual(
old_dual_node_to_new_one[dual_node_idx],
corresponding_new_dual_nodes[
old_dual_nodes_index_with_corresponding_new_one.index(
dual_node_idx)])
else:
# - The old dual node has no corresponding new dual node.
self.assertEqual(old_dual_node_to_new_one[dual_node_idx], -1)
# - Check the edges between the new dual nodes, which should be the
# following (with dual nodes indicated by the corresponding primal
# nodes as a set), plus the self-loops:
# - (0->1) -> (1->2);
# - (0->1) -> (1->3);
# - (0->1) -> (1->4);
# - (0->1) -> (4->0);
# - (0->1) -> (6->0);
# - (1->0) -> (2->1);
# - (1->0) -> (3->1);
# - (1->0) -> (4->1);
# - (1->0) -> (0->4);
# - (1->0) -> (0->6);
# - (0->4) -> (4->1);
# - (0->4) -> (4->6);
# - (0->4) -> (4->7);
# - (0->4) -> (1->0);
# - (0->4) -> (6->0);
# - (4->0) -> (1->4);
# - (4->0) -> (6->4);
# - (4->0) -> (7->4);
# - (4->0) -> (0->1);
# - (4->0) -> (0->6);
# - (0->6) -> (6->4);
# - (0->6) -> (6->5);
# - (0->6) -> (6->7);
# - (0->6) -> (1->0);
# - (0->6) -> (4->0);
# - (6->0) -> (4->6);
# - (6->0) -> (5->6);
# - (6->0) -> (7->6);
# - (6->0) -> (0->1);
# - (6->0) -> (0->4);
# - (1->2) -> (2->3);
# - (1->2) -> (2->5);
# - (1->2) -> (0->1);
# - (1->2) -> (3->1);
# - (1->2) -> (4->1);
# - (2->1) -> (3->2);
# - (2->1) -> (5->2);
# - (2->1) -> (1->0);
# - (2->1) -> (1->3);
# - (2->1) -> (1->4);
# - (1->3) -> (3->2);
# - (1->3) -> (3->5);
# - (1->3) -> (3->7);
# - (1->3) -> (0->1);
# - (1->3) -> (2->1);
# - (1->3) -> (4->1);
# - (3->1) -> (2->3);
# - (3->1) -> (5->3);
# - (3->1) -> (7->3);
# - (3->1) -> (1->0);
# - (3->1) -> (1->2);
# - (3->1) -> (1->4);
# - (1->4) -> (4->0);
# - (1->4) -> (4->6);
# - (1->4) -> (4->7);
# - (1->4) -> (0->1);
# - (1->4) -> (2->1);
# - (1->4) -> (3->1);
# - (4->1) -> (0->4);
# - (4->1) -> (6->4);
# - (4->1) -> (7->4);
# - (4->1) -> (1->0);
# - (4->1) -> (1->2);
# - (4->1) -> (1->3);
# - (2->3) -> (3->1);
# - (2->3) -> (3->5);
# - (2->3) -> (3->7);
# - (2->3) -> (1->2);
# - (2->3) -> (5->2);
# - (3->2) -> (1->3);
# - (3->2) -> (5->3);
# - (3->2) -> (7->3);
# - (3->2) -> (2->1);
# - (3->2) -> (2->5);
# - (2->5) -> (5->3);
# - (2->5) -> (5->6);
# - (2->5) -> (1->2);
# - (2->5) -> (3->2);
# - (5->2) -> (3->5);
# - (5->2) -> (6->5);
# - (5->2) -> (2->1);
# - (5->2) -> (2->3);
# - (3->5) -> (5->2);
# - (3->5) -> (5->6);
# - (3->5) -> (1->3);
# - (3->5) -> (2->3);
# - (3->5) -> (7->3);
# - (5->3) -> (2->5);
# - (5->3) -> (6->2);
# - (5->3) -> (3->1);
# - (5->3) -> (3->2);
# - (5->3) -> (3->7);
# - (3->7) -> (7->4);
# - (3->7) -> (7->6);
# - (3->7) -> (1->3);
# - (3->7) -> (2->3);
# - (3->7) -> (5->3);
# - (7->3) -> (4->7);
# - (7->3) -> (6->7);
# - (7->3) -> (3->1);
# - (7->3) -> (3->2);
# - (7->3) -> (3->5);
# - (4->6) -> (6->0);
# - (4->6) -> (6->5);
# - (4->6) -> (6->7);
# - (4->6) -> (0->4);
# - (4->6) -> (1->4);
# - (4->6) -> (7->4);
# - (6->4) -> (0->6);
# - (6->4) -> (5->6);
# - (6->4) -> (7->6);
# - (6->4) -> (4->0);
# - (6->4) -> (4->1);
# - (6->4) -> (4->7);
# - (4->7) -> (7->3);
# - (4->7) -> (7->6);
# - (4->7) -> (0->4);
# - (4->7) -> (1->4);
# - (4->7) -> (6->4);
# - (7->4) -> (3->7);
# - (7->4) -> (6->7);
# - (7->4) -> (4->0);
# - (7->4) -> (4->1);
# - (7->4) -> (4->6);
# - (5->6) -> (6->0);
# - (5->6) -> (6->4);
# - (5->6) -> (6->7);
# - (5->6) -> (2->5);
# - (5->6) -> (3->5);
# - (6->5) -> (0->6);
# - (6->5) -> (4->6);
# - (6->5) -> (7->6);
# - (6->5) -> (5->2);
# - (6->5) -> (5->3);
# - (6->7) -> (7->3);
# - (6->7) -> (7->4);
# - (6->7) -> (0->6);
# - (6->7) -> (4->6);
# - (6->7) -> (5->6).
# - (7->6) -> (3->7);
# - (7->6) -> (4->7);
# - (7->6) -> (6->0);
# - (7->6) -> (6->4);
# - (7->6) -> (6->5).
self.assertEqual(num_new_dual_edges, 144 + num_new_dual_nodes)
new_dual_edge_index_list = new_dual_graph_batch.edge_index.t().tolist()
dual_node_to_neighbors = {
(0, 1): [(1, 2), (1, 3), (1, 4), (4, 0), (6, 0)],
(0, 4): [(4, 1), (4, 6), (4, 7), (1, 0), (6, 0)],
(0, 6): [(6, 4), (6, 5), (6, 7), (1, 0), (4, 0)],
(1, 2): [(2, 3), (2, 5), (0, 1), (3, 1), (4, 1)],
(1, 3): [(3, 2), (3, 5), (3, 7), (0, 1), (2, 1), (4, 1)],
(1, 4): [(4, 0), (4, 6), (4, 7), (0, 1), (2, 1), (3, 1)],
(2, 3): [(3, 1), (3, 5), (3, 7), (1, 2), (5, 2)],
(2, 5): [(5, 3), (5, 6), (1, 2), (3, 2)],
(3, 5): [(5, 2), (5, 6), (1, 3), (2, 3), (7, 3)],
(3, 7): [(7, 4), (7, 6), (1, 3), (2, 3), (5, 3)],
(4, 6): [(6, 0), (6, 5), (6, 7), (0, 4), (1, 4), (7, 4)],
(4, 7): [(7, 3), (7, 6), (0, 4), (1, 4), (6, 4)],
(5, 6): [(6, 0), (6, 4), (6, 7), (2, 5), (3, 5)],
(6, 7): [(7, 3), (7, 4), (0, 6), (4, 6), (5, 6)]
}
for new_dual_node, other_dual_nodes in dual_node_to_neighbors.items():
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[new_dual_node],
new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# - Self-loop.
self.assertTrue([
new_petdni_batch[new_dual_node], new_petdni_batch[new_dual_node]
] in new_dual_edge_index_list)
# 'Opposite' dual node.
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[new_dual_node[::-1]], new_petdni_batch[
other_dual_node[::-1]]
] in new_dual_edge_index_list)
# - Self-loop of 'opposite' dual node.
self.assertTrue([
new_petdni_batch[new_dual_node[::-1]], new_petdni_batch[
new_dual_node[::-1]]
] in new_dual_edge_index_list)
def __test_config_C_with_output_self_loops_nonconsecutive(
self, use_decreasing_attention_coefficient=True, num_heads=1):
# - Dual-graph configuration C.
single_dual_nodes = False
undirected_dual_edges = False
graph_creator = create_graphs.GraphCreator(
mesh_filename=osp.join(current_dir,
'../../common_data/simple_mesh_large.ply'),
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
primal_features_from_dual_features=False)
primal_graph, dual_graph = graph_creator.create_graphs()
petdni = graph_creator.primal_edge_to_dual_node_idx
(primal_graph_batch, dual_graph_batch,
petdni_batch) = create_dual_primal_batch(
primal_graphs_list=[primal_graph],
dual_graphs_list=[dual_graph],
primal_edge_to_dual_node_idx_list=[petdni])
# Primal graph.
num_primal_edges = primal_graph_batch.num_edges
num_primal_nodes = maybe_num_nodes(primal_graph_batch.edge_index)
self.assertEqual(num_primal_edges, 42)
self.assertEqual(num_primal_nodes, 14)
# - Check existence of primal edges.
for edge in [(0, 1), (0, 7), (0, 10), (1, 2), (1, 5), (2, 3), (2, 9),
(3, 4), (3, 8), (4, 5), (4, 13), (5, 6), (6, 7), (6, 12),
(7, 11), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12),
(12, 13)]:
self.assertNotEqual(petdni_batch[edge], petdni_batch[edge[::-1]])
# - Set the features of each primal node randomly.
dim_primal_features = primal_graph_batch.num_node_features
for primal_feature in primal_graph_batch.x:
primal_feature[:] = torch.rand(dim_primal_features,
dtype=torch.float)
# Dual graph.
num_dual_edges = dual_graph_batch.num_edges
num_dual_nodes = maybe_num_nodes(dual_graph_batch.edge_index)
# - Since the mesh is watertight, the medial graph of the triangulation
# is 4-regular, but by definition of dual-graph configuration C each
# node in the dual graph has 2 incoming edges and 2 outgoing edges.
# However, since there are no self-loops in the dual graph, each
# incoming edge for a certain dual node is also an outgoing edge for
# another dual node, and the total number of (directed) edges in the
# dual graph is 2 times the number of dual nodes.
self.assertEqual(num_dual_edges, num_dual_nodes * 2)
self.assertEqual(num_dual_nodes, num_primal_edges)
# - Set the features of each dual node randomly.
dim_dual_features = dual_graph_batch.num_node_features
for dual_feature in dual_graph_batch.x:
dual_feature[:] = torch.rand(dim_dual_features,
dtype=torch.float) * 3
# Randomly shuffle the primal edge-index matrix.
permutation = np.random.permutation(num_primal_edges)
primal_graph_batch.edge_index = (
primal_graph_batch.edge_index[:, permutation])
# Set the attention coefficients manually, so that the primal edges have
# associated attention coefficients in this order:
# - 4->13 / 13->4;
# - 10->11 / 11->10;
# - 0->10 / 10->0 [not pooled, because 10->11 / 11->10 was pooled];
# - 2->3 / 3->2;
# - 3->8 / 8->3 [not pooled, because 2->3 / 3->2 was pooled];
# - 6->7 / 7->6;
# - 1->5 / 5->1;
# - 7->11 / 11->7 [not pooled, because 10->11 / 11->10 and 6->7 / 7->6
# were pooled];
# - 1->2 / 2->1 [not pooled, because 2->3 / 3->2 and 1->5 / 5->1 were
# pooled];
# - 8->9 / 9->8;
# - ... [other edges that are not pooled]
# (cf. file `../../common_data/simple_mesh_large_pool_2.png`)
attention_threshold = 0.5
edges_to_pool = [[8, 9], [1, 2], [7, 11], [1, 5], [6, 7], [3, 8],
[2, 3], [0, 10], [10, 11], [4, 13]]
if (use_decreasing_attention_coefficient):
primal_attention_coeffs = torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * attention_threshold
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in edges_to_pool):
pooling_idx = edges_to_pool.index(sorted(primal_edge))
primal_attention_coeffs[edge_idx] = attention_threshold + (
1 - attention_threshold) * (
float(pooling_idx) / len(edges_to_pool) +
torch.rand([num_heads], dtype=torch.float) * 1. /
len(edges_to_pool))
else:
primal_attention_coeffs = attention_threshold + torch.rand(
[num_primal_edges, num_heads],
dtype=torch.float) * (1 - attention_threshold)
for edge_idx, primal_edge in enumerate(
primal_graph_batch.edge_index.t().tolist()):
if (sorted(primal_edge) in edges_to_pool):
pooling_idx = edges_to_pool.index(sorted(primal_edge))
primal_attention_coeffs[edge_idx] = (
attention_threshold - attention_threshold *
(float(pooling_idx) / len(edges_to_pool) +
torch.rand([num_heads], dtype=torch.float) * 1. /
len(edges_to_pool)))
# Create a single dual-primal edge-pooling layer.
pool = DualPrimalEdgePooling(
self_loops_in_output_dual_graph=True,
single_dual_nodes=single_dual_nodes,
undirected_dual_edges=undirected_dual_edges,
num_primal_edges_to_keep=15,
use_decreasing_attention_coefficient=
use_decreasing_attention_coefficient,
allow_pooling_consecutive_edges=False,
return_old_dual_node_to_new_dual_node=True)
# Perform primal-edge pooling.
(new_primal_graph_batch, new_dual_graph_batch, new_petdni_batch,
pooling_log) = pool(primal_graph_batch=primal_graph_batch,
dual_graph_batch=dual_graph_batch,
primal_edge_to_dual_node_idx_batch=petdni_batch,
primal_attention_coeffs=primal_attention_coeffs)
# Tests on the new primal graph.
num_new_primal_nodes = maybe_num_nodes(
new_primal_graph_batch.edge_index)
num_new_primal_edges = new_primal_graph_batch.num_edges
self.assertEqual(num_new_primal_nodes, 8)
# - Check correspondence of the old primal nodes with the new primal
# nodes (i.e., node clusters).
old_primal_node_to_new_one = pooling_log.old_primal_node_to_new_one
for old_primal_node in range(num_primal_nodes):
if (old_primal_node in [0]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 0)
elif (old_primal_node in [1, 5]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 1)
elif (old_primal_node in [2, 3]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 2)
elif (old_primal_node in [4, 13]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 3)
elif (old_primal_node in [6, 7]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 4)
elif (old_primal_node in [8, 9]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 5)
elif (old_primal_node in [10, 11]):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 6)
elif (old_primal_node == 12):
self.assertEqual(old_primal_node_to_new_one[old_primal_node], 7)
# - Check that the features of each new primal node correspond to the
# average of the features of the primal nodes merged together into
# that node.
for new_primal_node in range(num_new_primal_nodes):
old_primal_nodes_per_new_primal_node = [
0, [1, 5], [2, 3], [4, 13], [6, 7], [8, 9], [10, 11], 12
]
old_primal_nodes = old_primal_nodes_per_new_primal_node[
new_primal_node]
self.assertAlmostEqual(
new_primal_graph_batch.x[new_primal_node, 0].item(),
primal_graph_batch.x[old_primal_nodes, 0].mean().item(), 5)
# - Check the edges between the new primal nodes, which should be the
# following:
# - 0->1 / 1->0;
# - 0->4 / 4->0;
# - 0->6 / 6->0;
# - 1->2 / 2->1;
# - 1->3 / 3->1;
# - 1->4 / 4->1;
# - 2->3 / 3->2;
# - 2->5 / 5->2;
# - 3->5 / 5->3;
# - 3->7 / 7->3;
# - 4->6 / 6->4;
# - 4->7 / 7->4;
# - 5->6 / 6->5;
# - 6->7 / 7->6.
self.assertEqual(num_new_primal_edges, 28)
new_primal_edge_index_list = new_primal_graph_batch.edge_index.t(
).tolist()
for new_primal_edge in [[0, 1], [0, 4], [0, 6], [1, 2], [1, 3], [1, 4],
[2, 3], [2, 5], [3, 5], [3, 7], [4, 6], [4, 7],
[5, 6], [6, 7]]:
self.assertTrue(new_primal_edge in new_primal_edge_index_list)
self.assertTrue(new_primal_edge[::-1] in new_primal_edge_index_list)
# Check that opposite primal edges are associated to the same dual
# node.
self.assertNotEqual(new_petdni_batch[tuple(new_primal_edge)],
new_petdni_batch[tuple(new_primal_edge[::-1])])
# Tests on the new dual graph.
num_new_dual_nodes = maybe_num_nodes(new_dual_graph_batch.edge_index)
num_new_dual_edges = new_dual_graph_batch.num_edges
self.assertEqual(num_new_dual_nodes, num_new_primal_edges)
# - Check that in case the border between two new face clusters is made
# of multiple edges of the original mesh, the dual feature associated
# to the new primal edge is the average of the dual features
# associated with the 'multiple edges of the original mesh'. This
# happens between new primal nodes 2--5, in both directions.
# - New (directed) primal edge 2->5 corresponds to old (directed)
# primal edges 2->9 and 3->8.
idx_new_dual_node = new_petdni_batch[(2, 5)]
idx_old_dual_node_1 = petdni_batch[(2, 9)]
idx_old_dual_node_2 = petdni_batch[(3, 8)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - New (directed) primal edge 5->2 corresponds to old (directed)
# primal edges 9->2 and 8->3.
idx_new_dual_node = new_petdni_batch[(5, 2)]
idx_old_dual_node_1 = petdni_batch[(9, 2)]
idx_old_dual_node_2 = petdni_batch[(8, 3)]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[[idx_old_dual_node_1, idx_old_dual_node_2],
0].mean().item(), 5)
# - For all other cases, check that the dual feature associated to the
# new primal edge is the dual feature associated with edge of the
# original mesh that is now between the new primal nodes.
new_dual_nodes = [(0, 1), (0, 4), (0, 6), (1, 2), (1, 3), (1, 4),
(2, 3), (3, 5), (3, 7), (4, 6), (4, 7), (5, 6),
(6, 7)]
old_dual_nodes = [(0, 1), (0, 7), (0, 10), (1, 2), (5, 4), (5, 6),
(3, 4), (13, 8), (13, 12), (7, 11), (6, 12), (9, 10),
(11, 12)]
for new_dual_node, old_dual_node in zip(new_dual_nodes, old_dual_nodes):
# 'Forward' edge.
idx_new_dual_node = new_petdni_batch[new_dual_node]
idx_old_dual_node = petdni_batch[old_dual_node]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# 'Backward' edge.
idx_new_dual_node = new_petdni_batch[new_dual_node[::-1]]
idx_old_dual_node = petdni_batch[old_dual_node[::-1]]
self.assertAlmostEqual(
new_dual_graph_batch.x[idx_new_dual_node, 0].item(),
dual_graph_batch.x[idx_old_dual_node, 0].item(), 5)
# - Check that the mapping between old and new dual nodes is correct.
old_dual_node_to_new_one = pooling_log.old_dual_node_to_new_one
self.assertEqual(len(old_dual_node_to_new_one), num_dual_nodes)
old_dual_nodes_index_with_corresponding_new_one = [
petdni_batch[primal_edge]
for primal_edge in [(0, 1), (0, 7), (0, 10), (1, 2), (2, 9), (3, 4),
(3, 8), (4, 5), (5, 6), (6, 12), (7, 11),
(8, 13), (9, 10), (11, 12), (12, 13)]
] + [
petdni_batch[primal_edge[::-1]]
for primal_edge in [(0, 1), (0, 7), (0, 10), (1, 2), (2, 9), (3, 4),
(3, 8), (4, 5), (5, 6), (6, 12), (7, 11),
(8, 13), (9, 10), (11, 12), (12, 13)]
]
corresponding_new_dual_nodes = [
new_petdni_batch[primal_edge]
for primal_edge in [(0, 1), (0, 4), (0, 6), (1, 2), (2, 5), (2, 3),
(2, 5), (3, 1), (1, 4), (4, 7), (4, 6), (5, 3),
(5, 6), (6, 7), (7, 3)]
] + [
new_petdni_batch[primal_edge[::-1]]
for primal_edge in [(0, 1), (0, 4), (0, 6), (1, 2), (2, 5), (2, 3),
(2, 5), (3, 1), (1, 4), (4, 7), (4, 6), (5, 3),
(5, 6), (6, 7), (7, 3)]
]
for dual_node_idx in range(num_dual_nodes):
if (dual_node_idx in old_dual_nodes_index_with_corresponding_new_one
):
# - The old dual node has a corresponding new dual node.
self.assertEqual(
old_dual_node_to_new_one[dual_node_idx],
corresponding_new_dual_nodes[
old_dual_nodes_index_with_corresponding_new_one.index(
dual_node_idx)])
else:
# - The old dual node has no corresponding new dual node.
self.assertEqual(old_dual_node_to_new_one[dual_node_idx], -1)
# - Check the edges between the new dual nodes, which should be the
# following (with dual nodes indicated by the corresponding primal
# nodes as a set), plus the self-loops:
# - (0->1) -> (1->2);
# - (0->1) -> (1->3);
# - (0->1) -> (1->4);
# - (1->0) -> (0->4);
# - (1->0) -> (0->6);
# - (0->4) -> (4->1);
# - (0->4) -> (4->6);
# - (0->4) -> (4->7);
# - (4->0) -> (0->1);
# - (4->0) -> (0->6);
# - (0->6) -> (6->4);
# - (0->6) -> (6->5);
# - (0->6) -> (6->7);
# - (6->0) -> (0->1);
# - (6->0) -> (0->4);
# - (1->2) -> (2->3);
# - (1->2) -> (2->5);
# - (2->1) -> (1->0);
# - (2->1) -> (1->3);
# - (2->1) -> (1->4);
# - (1->3) -> (3->2);
# - (1->3) -> (3->5);
# - (1->3) -> (3->7);
# - (3->1) -> (1->0);
# - (3->1) -> (1->2);
# - (3->1) -> (1->4);
# - (1->4) -> (4->0);
# - (1->4) -> (4->6);
# - (1->4) -> (4->7);
# - (4->1) -> (1->0);
# - (4->1) -> (1->2);
# - (4->1) -> (1->3);
# - (2->3) -> (3->1);
# - (2->3) -> (3->5);
# - (2->3) -> (3->7);
# - (3->2) -> (2->1);
# - (3->2) -> (2->5);
# - (2->5) -> (5->3);
# - (2->5) -> (5->6);
# - (5->2) -> (2->1);
# - (5->2) -> (2->3);
# - (3->5) -> (5->2);
# - (3->5) -> (5->6);
# - (5->3) -> (3->1);
# - (5->3) -> (3->2);
# - (5->3) -> (3->7);
# - (3->7) -> (7->4);
# - (3->7) -> (7->6);
# - (7->3) -> (3->1);
# - (7->3) -> (3->2);
# - (7->3) -> (3->5);
# - (4->6) -> (6->0);
# - (4->6) -> (6->5);
# - (4->6) -> (6->7);
# - (6->4) -> (4->0);
# - (6->4) -> (4->1);
# - (6->4) -> (4->7);
# - (4->7) -> (7->3);
# - (4->7) -> (7->6);
# - (7->4) -> (4->0);
# - (7->4) -> (4->1);
# - (7->4) -> (4->6);
# - (5->6) -> (6->0);
# - (5->6) -> (6->4);
# - (5->6) -> (6->7);
# - (6->5) -> (5->2);
# - (6->5) -> (5->3);
# - (6->7) -> (7->3);
# - (6->7) -> (7->4);
# - (7->6) -> (6->0);
# - (7->6) -> (6->4);
# - (7->6) -> (6->5).
self.assertEqual(num_new_dual_edges, 72 + num_new_dual_nodes)
new_dual_edge_index_list = new_dual_graph_batch.edge_index.t().tolist()
dual_node_to_neighbors = {
(0, 1): [(1, 2), (1, 3), (1, 4)],
(1, 0): [(0, 4), (0, 6)],
(0, 4): [(4, 1), (4, 6), (4, 7)],
(4, 0): [(0, 1), (0, 6)],
(0, 6): [(6, 4), (6, 5), (6, 7)],
(6, 0): [(0, 1), (0, 4)],
(1, 2): [(2, 3), (2, 5)],
(2, 1): [(1, 0), (1, 3), (1, 4)],
(1, 3): [(3, 2), (3, 5), (3, 7)],
(3, 1): [(1, 0), (1, 2), (1, 4)],
(1, 4): [(4, 0), (4, 6), (4, 7)],
(4, 1): [(1, 0), (1, 2), (1, 3)],
(2, 3): [(3, 1), (3, 5), (3, 7)],
(3, 2): [(2, 1), (2, 5)],
(2, 5): [(5, 3), (5, 6)],
(5, 2): [(2, 1), (2, 3)],
(3, 5): [(5, 2), (5, 6)],
(5, 3): [(3, 1), (3, 2), (3, 7)],
(3, 7): [(7, 4), (7, 6)],
(7, 3): [(3, 1), (3, 2), (3, 5)],
(4, 6): [(6, 0), (6, 5), (6, 7)],
(6, 4): [(4, 0), (4, 1), (4, 7)],
(4, 7): [(7, 3), (7, 6)],
(7, 4): [(4, 0), (4, 1), (4, 6)],
(5, 6): [(6, 0), (6, 4), (6, 7)],
(6, 5): [(5, 2), (5, 3)],
(6, 7): [(7, 3), (7, 4)],
(7, 6): [(6, 0), (6, 4), (6, 5)]
}
for new_dual_node, other_dual_nodes in dual_node_to_neighbors.items():
for other_dual_node in other_dual_nodes:
self.assertTrue([
new_petdni_batch[new_dual_node],
new_petdni_batch[other_dual_node]
] in new_dual_edge_index_list)
# Self-loop.
self.assertTrue([
new_petdni_batch[new_dual_node], new_petdni_batch[new_dual_node]
] in new_dual_edge_index_list)
|
432002 | import numpy as np
from sklearn.metrics import f1_score, confusion_matrix
"""
Evalution Metrics: F1 score, accuracy and CCC
borrow from https://github.com/wtomin/Multitask-Emotion-Recognition-with-Incomplete-Labels/
"""
epsilon = 1e-5
def averaged_f1_score(input, target):
N, label_size = input.shape
f1s = []
for i in range(label_size):
f1 = f1_score(input[:, i], target[:, i], zero_division=True)
f1s.append(f1)
return np.mean(f1s), f1s
def accuracy(input, target):
assert len(input.shape) == 1
return sum(input==target)/input.shape[0]
def averaged_accuracy(x, y):
assert len(x.shape) == 2
N, C =x.shape
accs = []
for i in range(C):
acc = accuracy(x[:, i], y[:, i])
accs.append(acc)
return np.mean(accs), accs
def CCC_score(x, y):
vx = x - np.mean(x)
vy = y - np.mean(y)
rho = np.sum(vx * vy) / (np.sqrt(np.sum(vx**2)) * np.sqrt(np.sum(vy**2)) + epsilon)
x_m = np.mean(x)
y_m = np.mean(y)
x_s = np.std(x)
y_s = np.std(y)
ccc = 2*rho*x_s*y_s/(x_s**2 + y_s**2 + (x_m - y_m)**2 + epsilon)
return ccc
def VA_metric(x, y):
x = np.clip(x, -0.99, 0.99)
items = [CCC_score(x[:,0], y[:,0]), CCC_score(x[:,1], y[:,1])]
return items, sum(items)/2
def EXPR_metric(x, y):
if not len(x.shape) == 1:
if x.shape[1] == 1:
x = x.reshape(-1)
else:
x = np.argmax(x, axis=-1)
if not len(y.shape) == 1:
if y.shape[1] == 1:
y = y.reshape(-1)
else:
y = np.argmax(y, axis=-1)
f1 = f1_score(x, y, average='macro')
acc = accuracy(x, y)
matrix = confusion_matrix(x,y)
return [f1, acc], 0.67*f1 + 0.33*acc, matrix
def AU_metric(x, y):
x = (x > 0.5).astype(int)
y = y.clip(0, 1).astype(int)
f1_av, _ = averaged_f1_score(x, y)
x = x.reshape(-1)
y = y.reshape(-1)
acc_av = accuracy(x, y)
return [f1_av, acc_av], 0.5*f1_av + 0.5*acc_av |
432038 | from mongoengine import *
from datetime import datetime
class BaseDocument(Document):
meta = {
'abstract': True
}
# last updated timestamp
updated_at = DateTimeField(default=datetime.now)
# timestamp of when entry was created
created_at = DateTimeField(default=datetime.now)
def save(self, *args, **kwargs):
if not self.created_at:
self.created_at = datetime.now()
self.updated_at = datetime.now()
return super(BaseDocument, self).save(*args, **kwargs)
def to_dict(self):
return self.schema().dump(self).data
def schema(self):
raise NotImplementedError
class User(BaseDocument):
def schema(self):
pass
full_name = StringField()
username = StringField(unique=True)
email = EmailField(unique=True)
password = <PASSWORD>Field()
type = IntField(min_value=0, max_value=1) # 0 for freelancer, 1 for client
profile_image = StringField()
gender = IntField(min_value=-1, max_value=1) # -1 male 0 other 1 female
bio = StringField(max_length=400)
tags = ListField(ReferenceField('SemanticTag'), default=[])
meta = {'collection': 'users'}
class Rating(BaseDocument):
def schema(self):
pass
project = ReferenceField('Project')
rated = ReferenceField('User')
rater = ReferenceField('User')
comment = StringField(max_length=2000, default="")
value = FloatField(min_value=0, max_value=5)
meta = {'collection': 'ratings'}
class Portfolio(BaseDocument):
def schema(self):
pass
title = StringField(required=True)
description = StringField()
user = ReferenceField('User', required=True)
date = DateTimeField()
attachments = ListField(default=[])
project_id = StringField(blank=True, null=True)
tags = ListField(ReferenceField('SemanticTag'), default=[])
meta = {'collection': 'portfolios'}
class Wallet(BaseDocument):
def schema(self):
pass
user = ReferenceField('User')
balance = FloatField(min_value=0)
meta = {'collection': 'wallets'}
class Message(BaseDocument):
def schema(self):
pass
sender = ReferenceField('User', required=True)
receiver = ReferenceField('User', required=True)
body = StringField(max_length=2000)
meta = {'collection': 'messages'}
class Conversation(BaseDocument):
def schema(self):
pass
user1 = ReferenceField('User', required=True)
user2 = ReferenceField('User', required=True)
messages = ListField(ReferenceField('Message'), default=[])
meta = {'collection': 'conversations'}
|
432052 | import os
import sys
import datetime
from functools import partial
from multiprocessing.dummy import Pool
from subprocess import call
command_file = sys.argv[1]
commands = [line.rstrip() for line in open(command_file)]
report_step = 32
pool = Pool(report_step)
for idx, return_code in enumerate(pool.imap(partial(call, shell=True), commands)):
if idx % report_step == 0:
print('[%s] command %d of %d' % (datetime.datetime.now().time(), idx, len(commands)))
if return_code != 0:
print('!! command %d of %d (\"%s\") failed' % (idx, len(commands), commands[idx]))
|
432104 | import subprocess
import os
import optparse
# This script changes test run classpath by unpacking tests.jar -> tests-dir. The goal
# is to launch tests with the same classpath as maven does.
def parse_args():
parser = optparse.OptionParser()
parser.disable_interspersed_args()
parser.add_option('--jar-binary')
parser.add_option('--tests-jar-path')
return parser.parse_args()
def main():
opts, args = parse_args()
# unpack tests jar
try:
dest = os.path.join(args[args.index('--build-root') + 1], 'test-classes')
except Exception:
dest = os.path.abspath('test-classes')
os.makedirs(dest)
subprocess.check_output([opts.jar_binary, 'xf', opts.tests_jar_path], cwd=dest)
# fix java classpath
i = args.index('-classpath')
args[i + 1] = args[i + 1].replace(opts.tests_jar_path, dest)
# run java cmd
os.execv(args[0], args)
if __name__ == '__main__':
main()
|
432201 | import io
import logging
import matplotlib.pyplot as plt
import numpy as np
try:
from pygifsicle import optimize
except ImportError:
pass
try:
import imageio
except ImportError:
pass
logger = logging.getLogger(__name__)
def kwargs_log_scale(unique_val, mode="equidistant", base=None):
"""Return arguments to set log_scale as one would wish.
Parameters
----------
unique_val : np.array
All unique values that will be plotted on the axis that should be put in log scale.
axis : {"x","y"}
Axis for which to use log_scales.
mode : ["smooth","equidistant"], optional
How to deal with the zero, which cannot be dealt with by default as log would give -infitiy.
The key is that we will use a region close to zero which is linear instead of log.
In the case of `equidistant` we use ensure that the large tick at zero is at the same distanec
of other ticks than if there was no linear. The problem is that this will give rise to
inexistant kinks when the plot goes from linear to log scale. `Smooth` tries to deal
with that by smoothly varying vetwen linear and log. For examples see
https://github.com/matplotlib/matplotlib/issues/7008.
base : int, optional
Base to use for the log plot. If `None` automatically tries to find it. If `1` doeesn't use
any log scale.
"""
unique_val.sort()
# automatically compute base
if base is None:
# take avg multiplier between each consecutive elements as base i.e 2,8,32 would be 4
# but 0.1,1,10 would be 10
diffs = unique_val[unique_val > 0][1:] / unique_val[unique_val > 0][:-1]
base = int(diffs.mean().round())
# if constant diff don't use logscale
if base == 1 or np.diff(unique_val).var() == 0:
return dict(value="linear")
# only need to use symlog if there are negative values (i.e. need some linear region)
if (unique_val <= 0).any():
min_nnz = np.abs(unique_val[unique_val != 0]).min()
if mode == "smooth":
linscale = np.log(np.e) / np.log(base) * (1 - (1 / base))
elif mode == "equidistant":
linscale = 1 - (1 / base)
else:
raise ValueError(f"Unkown mode={mode}")
return {
"value": "symlog",
"linthresh": min_nnz,
"base": base,
"subs": list(range(base)),
"linscale": linscale,
}
else:
return {
"value": "log",
"base": base,
"subs": list(range(base)),
}
|
432221 | from abc import ABC
import json
import inspect
import logging
from hashlib import sha1
import attr
from dateutil import parser as DateTimeParser
from ..utils import SmartJSONEncoder
from ..exceptions import EndpointFactoryException
@attr.s(cmp=False, hash=False)
class AttrSerializable(ABC):
"""
Provides id, hashing, and serializing funcs via "attrs". Useful for extending
the generic CRUD endpoint.
"""
_epid = attr.ib(default=None, init=False)
@property
def epid(self):
"""CRUD endpoint id"""
if not self._epid:
self._epid = self.sha1().hexdigest()
return self._epid
@epid.setter
def epid(self, value):
self._epid = value
def sha1(self) -> sha1:
return sha1(self.json().encode("UTF-8"))
def __hash__(self):
return hash(self.sha1().digest())
def __eq__(self, other: "AttrSerializable"):
return hash(self) == hash(other)
@classmethod
def register(cls, override=None):
cls.Factory.registry[cls.__name__] = override or cls
def asdict(self):
# Remove non-init and default variables
d = attr.asdict(self,
filter=lambda attr, val: attr.init and (val != attr.default))
for k, v in d.items():
if k.startswith("_"):
d[k[1:]] = v
del d[k]
d['ctype'] = self.__class__.__name__
self.Factory.registry[self.__class__.__name__] = self.__class__
return d
def json(self):
map = self.asdict()
data = json.dumps(map, cls=SmartJSONEncoder)
return data
class AttrFactory(object):
registry = {}
@classmethod
def create(cls, **kwargs):
ctype = kwargs.pop('ctype')
if not ctype:
raise EndpointFactoryException("No ctype, cannot instantiate")
# Anything that has been "asdict" serialized will be registered
_cls = cls.registry.get(ctype)
if not _cls:
# Voodoo for unregistered root objects
_cls = inspect.stack()[1][0].f_globals.get(ctype)
if not _cls:
raise EndpointFactoryException(f"No class {ctype} is registered, cannot instantiate")
for k, v in kwargs.items():
if hasattr(v, "keys"):
for kk, vv in v.items():
if "DateTime" in kk:
try:
v[kk] = DateTimeParser.parse(vv)
except:
logging.warning(f"Failed to parse dt from {kk}: {vv}")
pass
return _cls(**kwargs)
def copy(cls, ref: "AttrSerializable"):
kwargs = ref.asdict()
obj = cls.create(**kwargs)
return obj
Factory = AttrFactory()
|
432251 | from rest_framework import permissions
from tacticalrmm.permissions import _has_perm
class ManageClientsPerms(permissions.BasePermission):
def has_permission(self, r, view):
if r.method == "GET":
return True
return _has_perm(r, "can_manage_clients")
class ManageSitesPerms(permissions.BasePermission):
def has_permission(self, r, view):
if r.method == "GET":
return True
return _has_perm(r, "can_manage_sites")
class ManageDeploymentPerms(permissions.BasePermission):
def has_permission(self, r, view):
if r.method == "GET":
return True
return _has_perm(r, "can_manage_deployments")
|
432261 | from flask import Flask, request, jsonify
import paramiko
import time
import telepot
from librouteros import connect
import MySQLdb as mdb
app = Flask(__name__)
@app.route('/configure', methods=['POST'])
def configure():
dats = request.get_json()
ip = dats['ip_router']
username = 'admin'
password = ''
# get info from router
api = connect(username=username, password=password, host=ip)
router_board_info = api(cmd="/system/routerboard/print")
identity_info = api(cmd="/system/identity/print")
identity = identity_info[0]['name']
serial_number = router_board_info[0]['serial-number']
model = router_board_info[0]['model']
version = router_board_info[0]['upgrade-firmware']
# connect to router using ssh
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=ip, username=username, password=password, allow_agent=False, look_for_keys=False)
config_list = [
'ip dns set servers=8.8.8.8',
'ip address add address=192.168.1.1/24 interface=ether2',
'ip pool add name=dhcp-server ranges=192.168.1.2-192.168.1.254',
'ip dhcp-server add name=dhcp-server interface=ether2 address-pool=dhcp-server disabled=no',
'ip dhcp-server network add address=192.168.1.0/24 gateway=192.168.1.1 dns-server=8.8.8.8',
'ip service disable telnet,ftp,www,api-ssl',
'ip firewall nat add chain=srcnat out-interface=pppoe-client action=masquerade',
'ip firewall address-list add address=192.168.1.2-192.168.1.10 list=allowed_to_router',
'ip firewall address-list add address=10.10.10.1 list=allowed_to_router',
'ip firewall filter add action=accept chain=input src-address-list=allowed_to_router',
'ip firewall filter add action=accept chain=input protocol=icmp',
'ip firewall filter add action=drop chain=input',
'ip firewall filter add action=drop chain=forward comment="Drop new connections from internet which are not dst-natted" connection-nat-state=!dstnat connection-state=new in-interface=pppoe-client',
'password old-password="" new-password="<PASSWORD>" confirm-new-password="<PASSWORD>"',
'user add name=noc password=<PASSWORD> disabled=no group=read',
'tool bandwidth-server set enabled=no',
'system clock set time-zone-name=Asia/Jakarta',
'system ntp client set enabled=yes primary-ntp=192.168.3.11',
'tool mac-server set allowed-interface-list=none',
'tool mac-server mac-winbox set allowed-interface-list=none',
'tool mac-server ping set enabled=no',
'ip neighbor discovery-settings set discover-interface-list=none',
]
# configure router
for config in config_list:
ssh_client.exec_command(config)
time.sleep(0.2)
# add info to the database
sql_host = 'localhost'
sql_username = 'root'
sql_password = '***'
sql_database = 'ztp_mikrotik'
sql_conn = mdb.connect(sql_host, sql_username, sql_password, sql_database)
cursor = sql_conn.cursor()
cursor.execute("Use {}".format(sql_database))
cursor.execute("INSERT INTO customer ({}, {}, {}, {}, {}) VALUES('{}', '{}', '{}', '{}', '{}')".format('identity', 'ip_address', 'serial_number', 'model', 'version', identity, ip, serial_number, model, version))
sql_conn.commit()
# send notification to telegram
telegram_token = '<your_token>'
chat_id = '<your_chat_id>'
bot = telepot.Bot(telegram_token)
bot.sendMessage(chat_id, 'Client Baru UP!\nIdentity: {}\nIP: {}\nSerial Number: {}\nModel: {}\nVersion: {}'.format(identity, ip, serial_number, model, version))
data = {'status': 'ok'}
return jsonify(data)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
432336 | import numpy as np
import pytest
from sklearn.datasets import make_blobs
from incdbscan import IncrementalDBSCAN
EPS = 1.5
@pytest.fixture
def incdbscan3():
return IncrementalDBSCAN(eps=EPS, min_pts=3)
@pytest.fixture
def incdbscan4():
return IncrementalDBSCAN(eps=EPS, min_pts=4)
@pytest.fixture
def blob_in_middle():
# pylint: disable=unbalanced-tuple-unpacking
blob, _ = make_blobs(
n_samples=10,
centers=[[0, 0]],
n_features=2,
cluster_std=0.4,
random_state=123,
return_centers=False
)
return blob
@pytest.fixture
def object_far_away():
return np.array([[10., 10.]])
@pytest.fixture
def point_at_origin():
return np.array([[0., 0.]])
@pytest.fixture
def three_points_on_the_left():
return np.array([
[-EPS, 0],
[-EPS * 2, 0],
[-EPS * 3, 0],
])
@pytest.fixture
def three_points_on_the_top():
return np.array([
[0, EPS],
[0, EPS * 2],
[0, EPS * 3],
])
@pytest.fixture
def three_points_at_the_bottom():
return np.array([
[0, -EPS],
[0, -EPS * 2],
[0, -EPS * 3],
])
@pytest.fixture
def hourglass_on_the_right():
return np.array([
[EPS, EPS * 2],
[EPS, EPS * 2],
[EPS, EPS],
[EPS, 0],
[EPS, -EPS],
[EPS, -EPS * 2],
[EPS, -EPS * 2],
])
|
432339 | import unittest
import at_checks
import daemons_check
import modem_checks
import python_checks
import sim_checks
import simple_cmd_checks
from plmn.utils import *
from plmn.results import *
if __name__ == '__main__':
nargs = process_args()
suite = unittest.TestSuite()
# Add all regression test-cases to this test-suite.
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(python_checks.PythonChecks))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(daemons_check.DaemonChecks))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(modem_checks.ModemChecks))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(at_checks.AtCmdChecks))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(sim_checks.SimChecks))
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(simple_cmd_checks.SimpleCmdChecks))
# Run the regression suite.
unittest.TextTestRunner().run(suite)
# Print final system state.
Results.print_results()
|
432350 | from enum import Enum
class triggerConditions(Enum):
# values over 1000 are initiated by the player (e.g. 'WHENEVER a creature etb...')
# 2000+ are controller specific; 1000+ are game/any-player specific
# otherwise the conditions refer to the card itself
# e.g. onDiscard means when THIS card is discarded
# zone changes
onPlay = 0
onDraw = 1
onDiscard = 2
onEtB = 3
onPlayfromHand = 4
onEnterGrave = 5
onDeath = 6
onLeaveBattlefield = 7
# phase (ALL PLAYERS)
onUpkeep = 1110
onMain1 = 1111
onMain2 = 1112
onEnterCombat = 1113
onDeclareAttackers = 1114
onDeclareBlockers = 1115
onEndofCombat = 1116
onEndstep = 1117
onCleanup = 1118
# phase (CONTROLLER ONLY)
onControllerUpkeep = 2110
onControllerMain1 = 2111
onControllerMain2 = 2112
onControllerEnterCombat = 2113
onControllerDeclareAttackers = 2114
onControllerDeclareBlockers = 2115
onControllerEndofCombat = 2116
onControllerEndstep = 2117
onControllerCleanup = 2118
# events
onUntap = 8
onTap = 9
onDealDamageToPlayers = 19
onDealDamageToCreatures = 20
onDealDamage = 21
onTakeDamage = 22
onAttack = 23
onBlock = 24
onCombatDamage = 25
onCombatDamageToPlayers = 26
onCombatDamageToCreatures = 27
onTakeCombatDamage = 28
# global events
onLifeLoss = 1000
onControllerLifeLoss = 2000
onLifeGain = 1001
onControllerLifeGain = 2001
onCounterPutOnPermanent = 1002
onControllerCounterPutOnPermanent = 2002
onDrawCard = 1003
onControllerDrawCard = 2003
onPlayerDiscard = 1004
onControllerDiscard = 2004
onPermanentLtB = 1030
onRevolt = 2030 # controller permanent leaving battlefield
onPermanentEtB = 1031
onControllerPermanentEtB = 2031
onCreatureEtB = 1032
onControllerCreatureEtB = 2032
|
432365 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import h5py
import argparse
from tqdm import tqdm
def merge(config):
dir_name = os.path.join('datasets/', config.dir_name)
check_path(dir_name)
f = h5py.File(os.path.join(dir_name, 'data.hdf5'), 'w')
id_file = open(os.path.join(dir_name, 'id.txt'), 'w')
new_dataset_paths = list(set(config.dataset_paths))
if len(new_dataset_paths) != len(config.dataset_paths):
raise ValueError('There is overlap in the dataset paths')
num_train, num_test, num_val = 0, 0, 0
h, w, c = None, None, None
max_demo_length = 0
max_program_length = 0
num_program_tokens = None
num_demo_per_program = 0
num_test_demo_per_program = 0
num_action_tokens = None
percepts = None
vizdoom_pos_keys = None
vizdoom_max_init_pos_len = 0
perception_type = None
print('data_info checking')
for i, dataset_path in enumerate(config.dataset_paths):
print('dataset [{}/{}]'.format(i, len(config.dataset_paths)))
fs = h5py.File(os.path.join(dataset_path, 'data.hdf5'), 'r')
fs_max_demo_length = fs['data_info']['max_demo_length'].value
fs_max_program_length = fs['data_info']['max_program_length'].value
fs_num_program_tokens = fs['data_info']['num_program_tokens'].value
fs_num_demo_per_program = fs['data_info']['num_demo_per_program'].value
fs_num_test_demo_per_program = fs['data_info']['num_test_demo_per_program'].value
fs_num_action_tokens = fs['data_info']['num_action_tokens'].value
fs_num_train = fs['data_info']['num_train'].value
fs_num_test = fs['data_info']['num_test'].value
fs_num_val = fs['data_info']['num_val'].value
fs_h = fs['data_info']['s_h_h'].value
fs_w = fs['data_info']['s_h_w'].value
fs_c = fs['data_info']['s_h_c'].value
fs_percepts = list(fs['data_info']['percepts'].value)
fs_vizdoom_pos_keys = list(fs['data_info']['vizdoom_pos_keys'].value)
fs_vizdoom_max_init_pos_len = fs['data_info']['vizdoom_max_init_pos_len'].value
fs_perception_type = fs['data_info']['perception_type'].value
max_demo_length = max(max_demo_length, fs_max_demo_length)
max_program_length = max(max_program_length, fs_max_program_length)
if num_program_tokens is None: num_program_tokens = fs_num_program_tokens
elif num_program_tokens != fs_num_program_tokens:
raise ValueError('program token mismatch: {}'.format(dataset_path))
num_demo_per_program = max(num_demo_per_program, fs_num_demo_per_program)
num_test_demo_per_program = max(num_test_demo_per_program,
fs_num_test_demo_per_program)
if num_action_tokens is None: num_action_tokens = fs_num_action_tokens
elif num_action_tokens != fs_num_action_tokens:
raise ValueError('num action token mismatch: {}'.format(dataset_path))
num_train += fs_num_train
num_test += fs_num_test
num_val += fs_num_val
if h is None: h = fs_h
elif h != fs_h: raise ValueError('image height mismatch: {}'.format(dataset_path))
if w is None: w = fs_w
elif w != fs_w: raise ValueError('image width mismatch: {}'.format(dataset_path))
if c is None: c = fs_c
elif c != fs_c: raise ValueError('image channel mismatch: {}'.format(dataset_path))
if percepts is None: percepts = fs_percepts
elif percepts != fs_percepts:
raise ValueError('percepts mismatch: {}'.format(dataset_path))
if vizdoom_pos_keys is None: vizdoom_pos_keys = fs_vizdoom_pos_keys
elif vizdoom_pos_keys != fs_vizdoom_pos_keys:
raise ValueError('vizdoom_pos_keys mismatch: {}'.format(dataset_path))
vizdoom_max_init_pos_len = max(vizdoom_max_init_pos_len, fs_vizdoom_max_init_pos_len)
if perception_type is None: perception_type = fs_perception_type
elif perception_type != fs_perception_type:
raise ValueError('perception_type mismatch: {}'.format(dataset_path))
fs.close()
print('copy data')
for i, dataset_path in enumerate(config.dataset_paths):
print('dataset [{}/{}]'.format(i, len(config.dataset_paths)))
fs = h5py.File(os.path.join(dataset_path, 'data.hdf5'), 'r')
ids = open(os.path.join(dataset_path, 'id.txt'),
'r').read().splitlines()
for id in tqdm(ids):
new_id = '{}_{}'.format(i, id)
id_file.write(new_id+'\n')
grp = f.create_group(new_id)
for key in fs[id].keys():
grp[key] = fs[id][key].value
fs.close()
grp = f.create_group('data_info')
grp['max_demo_length'] = max_demo_length
grp['max_program_length'] = max_program_length
grp['num_program_tokens'] = num_program_tokens
grp['num_demo_per_program'] = num_demo_per_program
grp['num_test_demo_per_program'] = num_test_demo_per_program
grp['num_action_tokens'] = num_action_tokens
grp['num_train'] = num_train
grp['num_test'] = num_test
grp['num_val'] = num_val
grp['s_h_h'] = h
grp['s_h_w'] = w
grp['s_h_c'] = c
grp['percepts'] = percepts
grp['vizdoom_pos_keys'] = vizdoom_pos_keys
grp['vizdoom_max_init_pos_len'] = vizdoom_max_init_pos_len
grp['perception_type'] = perception_type
f.close()
id_file.close()
print('Dataset generated under {} with {}'
' samples ({} for training and {} for testing '
'and {} for val'.format(dir_name, num_train + num_test + num_val,
num_train, num_test, num_val))
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
else:
raise ValueError('Be careful, you are trying to overwrite some dir')
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dir_name', type=str, default='vizdoom_dataset')
parser.add_argument('--dataset_paths', nargs='+',
help='list of existing dataset paths')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args()
merge(args)
|
432370 | def main(request, response):
allow = request.GET.first(b"allow", b"false")
headers = [(b"Content-Type", b"application/javascript")]
if allow != b"false":
headers.append((b"Access-Control-Allow-Origin", b"*"))
body = b"""
function handleRejectedPromise(promise) {
promise.catch(() => {});
}
(function() {
new Promise(function(resolve, reject) { reject(42); });
})();
"""
return headers, body
|
432378 | import os, argparse, glob
import numpy as np
import cv2
from scipy.misc import imread, imsave
from skimage.measure import compare_ssim
import psnr
import fastaniso
import py_utils as utils
parser = argparse.ArgumentParser()
parser.add_argument('--input_root', default='')
parser.add_argument('--dir_list', default='dir_list.txt')
parser.add_argument('--max_images', default=-1, type=int)
parser.add_argument('--smoothing', default=True, action='store_false')
args = parser.parse_args()
def setupDirList():
if args.input_root == '':
raise Exception('Input root not defined')
print('[Input root]: %s' % (args.input_root))
print('[Dir list]: %s' % (args.dir_list))
args.dir_list = os.path.join(args.input_root, args.dir_list)
dir_list = utils.readList(args.dir_list)
if args.max_images > 0:
dir_list = dir_list[:args.max_images]
return dir_list
def loadData(dir_name):
flow_name = glob.glob(os.path.join(dir_name, '*.flo'))[0]
prefix, _ = os.path.splitext(flow_name)
in_img = imread(prefix + '_input.jpg').astype(float)
bg_img = imread(prefix + '_bg.jpg').astype(float)
mask = imread(prefix + '_mask.png').astype(float) / 255
rho = imread(prefix + '_rho.png').astype(float) / 255
flow = utils.readFloFile(flow_name).astype(float)
fcolor = utils.flowToColor(flow)
imsave(prefix + '_fcolor.jpg', fcolor)
h, w, c = in_img.shape
mask = np.expand_dims(mask, 2).repeat(3, 2)
rho = np.expand_dims(rho, 2).repeat(3, 2)
return {'in':in_img, 'bg':bg_img, 'mask':mask, 'rho':rho,
'flow':flow, 'fcolor':fcolor, 'h':h, 'w': w, 'name': prefix}
def renderFinalImg(ref, warped, mask, rho):
final = mask * (warped * rho) + (1 - mask) * ref
return final
def warpImage(ref, flow, grid_x, grid_y):
h, w = grid_x.shape
flow_x = np.clip(flow[:,:,1] + grid_x, 0, w-1)
flow_y = np.clip(flow[:,:,0] + grid_y, 0, h-1)
flow_x, flow_y = cv2.convertMaps(flow_x.astype(np.float32), flow_y.astype(np.float32), cv2.CV_32FC2)
warped_img = cv2.remap(ref, flow_x, flow_y, cv2.INTER_LINEAR)
return warped_img
def computeError(img1, img2):
img_psnr = psnr.psnr(img1, img2)
gt_y = cv2.cvtColor(cv2.cvtColor(img1.astype(np.uint8), cv2.COLOR_RGB2BGR), cv2.COLOR_BGR2YCR_CB)[:,:,0]
pred_y = cv2.cvtColor(cv2.cvtColor(img2.astype(np.uint8), cv2.COLOR_RGB2BGR), cv2.COLOR_BGR2YCR_CB)[:,:,0]
img_ssim = compare_ssim(gt_y, pred_y, gaussian_weight=True)
return img_psnr, img_ssim
def smoothingMask(mask):
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, np.ones((5,5), np.uint8))
mask = cv2.GaussianBlur(mask, (5,5), 0)
return mask
def smoothingFlow(flow):
flow[:,:,0] = fastaniso.anisodiff(flow[:,:,0], niter=9)
flow[:,:,1] = fastaniso.anisodiff(flow[:,:,1], niter=9)
return flow
def smoothingRho(rho, mask):
rho[mask < 0.2] = 1
rho = cv2.GaussianBlur(rho, (5,5), 0)
return rho
def smoothingEstimation(data, grid_x, grid_y):
smooth = {}
smooth['mask'] = smoothingMask(data['mask'])
smooth['rho'] = smoothingRho(data['rho'], smooth['mask'])
smooth['flow'] = smoothingFlow(data['flow'])
smooth['flow'][(smooth['mask'] < 0.2)[:,:,0:2]] = 0
smooth['fcolor'] = utils.flowToColor(smooth['flow'])
smooth['warped'] = warpImage(data['bg'], smooth['flow'], grid_x, grid_y)
smooth['final'] = renderFinalImg(data['bg'], smooth['warped'], smooth['mask'], smooth['rho'])
results = {}
out = ['mask', 'rho', 'fcolor', 'final']
for i, name in enumerate(out):
key = '%s' % (name)
if name in ['mask', 'rho']:
results.update({key: smooth[name] * 255})
else:
results.update({key: smooth[name]})
utils.saveResultsSeparate(data['name'] + "_smooth", results)
def evalList(dir_list):
print('Total number of directories: %d' % len(dir_list))
loss = {'psnr': 0, 'ssim': 0, 'psnr_bg': 0, 'ssim_bg': 0}
for idx, dir_name in enumerate(dir_list):
data = loadData(os.path.join(args.input_root, dir_name))
h, w = data['h'], data['w']
print('[%d/%d] Dir: %s, size %dx%d' % (idx, len(dir_list), dir_name, h, w))
# Reconstructed Input Image with the estimated matte and background image
grid_x = np.tile(np.linspace(0, w-1, w), (h, 1)).astype(float)
grid_y = np.tile(np.linspace(0, h-1, h), (w, 1)).T.astype(float)
data['warped'] = warpImage(data['bg'], data['flow'], grid_x, grid_y)
data['final'] = renderFinalImg(data['bg'], data['warped'], data['mask'], data['rho'])
imsave(data['name'] + '_final.jpg', data['final'])
# Background Error
p, s = computeError(data['bg'], data['in'])
print('\t BG psnr: %f, ssim: %f' % (p, s))
loss['psnr_bg'] += p; loss['ssim_bg'] += s
# TOM-Net Error
p, s = computeError(data['final'], data['in'])
loss['psnr'] += p; loss['ssim'] += s
print('\t TOMNet psnr: %f, ssim: %f' % (p, s))
# Smoothing Environment Matte
if args.smoothing:
smoothingEstimation(data, grid_x, grid_y)
print('******* Finish Testing Dir: %s\nList: %s' % (args.input_root, args.dir_list))
with open(os.path.join(args.input_root, dir_name, 'Log'), 'w') as f:
f.write('Input_root: %s\n' % (args.input_root))
f.write('dir_list: %s\n' % (args.dir_list))
for k in loss.keys():
print('[%s]: %f' % (k, loss[k]/len(dir_list)))
f.write('[%s]: %f\n' % (k, loss[k]/len(dir_list)))
if __name__ == '__main__':
dir_list = setupDirList()
evalList(dir_list)
|
432458 | from .property import LiteralProperty
import packaging.version as pv
import rdflib
class VersionProperty(LiteralProperty):
def convert_to_user(self, value):
result = str(value)
if result == '':
# special case, empty strings are equivalent to None
return None
return result
def convert_from_user(self, value):
# Empty string is equivalent to None
if value == '':
value = None
# None is ok iff upper bound is 1 and lower bound is 0.
# If upper bound > 1, attribute is a list and None is not a valid list
# If lower bound > 0, attribute must have a value, so None is unacceptable
if value is None and self.upper_bound == 1 and self.lower_bound == 0:
return None
try:
version = pv.Version(value)
except pv.InvalidVersion as e:
raise ValueError(e)
except TypeError as e:
raise ValueError(e)
return rdflib.Literal(str(version))
@staticmethod
def _make_version(major: int, minor: int, micro: int) -> pv.Version:
return pv.Version(f'{major}.{minor}.{micro}')
@staticmethod
def increment_major(version: str) -> str:
old = pv.Version(version)
new = VersionProperty._make_version(old.major + 1, old.minor, old.micro)
return str(new)
|
432507 | CASSANDRA_VERSION = "3.11.11"
CASSANDRA_DEB_VERSION = CASSANDRA_VERSION + "_all"
DNSMASQ_VERSION = "2.80-1"
DNSMASQ_DEB_VERSION = DNSMASQ_VERSION + "+deb10u1"
ELASTICSEARCH_VERSION = "7.3.1"
ENVOY_VERSION = "1.15.5"
ERLANG_VERSION = "22.3.4.9-1"
ERLANG_DEB_VERSION = ERLANG_VERSION + ""
GERRIT_VERSION = "3.4.0"
GRAFANA_VERSION = "8.1.1"
GRAFANA_DEB_VERSION = GRAFANA_VERSION
JAVA_8_VERSION = "8.0.302"
ZULU_8_VERSION = "8.0.302-1"
JAVA_11_VERSION = "11.0.12"
ZULU_11_VERSION = "11.0.12-1"
GRAAL_VERSION = "21.2.0"
JENKINS_VERSION = "2.289.3"
JENKINS_SWARM_VERSION = "3.27"
KAFKA_VERSION = "2.8.0"
KIBANA_VERSION = "7.3.1"
NODEJS_FOR_KIBANA_VERSION = "10.15.2"
MAVEN_VERSION = "3.8.1"
NEXUS_VERSION = "2.14.20-02"
NGINX_VERSION = "1.21.1-1"
NGINX_DEB_VERSION = NGINX_VERSION + "~buster"
NODEJS_VERSION = "14.17.5"
PHP_VERSION = "7.3.29-1"
PHP_DEB_VERSION = PHP_VERSION + "~deb10u1"
POSTGRESQL_MAJOR_VERSION = "13"
POSTGRESQL_VERSION = POSTGRESQL_MAJOR_VERSION + "." + "4-1"
POSTGRESQL_DEB_VERSION = POSTGRESQL_VERSION + "." + "pgdg100+1"
POSTGIS_MINOR_VERSION = "3"
POSTGIS_VERSION = POSTGIS_MINOR_VERSION + ".1.3"
POSTGIS_CONTAINER_VERSION = POSTGRESQL_VERSION + "-" + POSTGIS_VERSION
POSTGIS_DEB_VERSION = POSTGIS_VERSION + "+dfsg-1~exp1.pgdg100+1+b1"
POSTGIS_POSTGRESQL_DEB_VERSION = POSTGIS_VERSION + "+dfsg-1~exp1.pgdg100+1"
PROMETHEUS_VERSION = "2.27.1"
PROMETHEUS_JMX_JAVAAGENT = "0.16.0"
RABBITMQ_VERSION = "3.8.7"
REDIS_VERSION = "5.0.3-4"
REDIS_DEB_VERSION = REDIS_VERSION + "+deb10u2"
SBT_VERSION = "1.5.3"
TOMCAT9_VERSION = "9.0.31-1"
TOMCAT9_DEB_VERSION = TOMCAT9_VERSION + "~deb10u4"
YARN_VERSION = "1.22.5"
ZIPKIN_VERSION = "2.23.2"
ZOOKEEPER_VERSION = "3.6.3"
JASPERREPORTS_SERVER_VERSION = "6.4.2"
PENATHO_DI_VERSION = "7.1.0.0-12"
def _version_shell_script_impl(ctx):
# (.+)=(%\{.+\}) => "$2": $1,
ctx.actions.expand_template(
template=ctx.file._template,
substitutions={
"%{CASSANDRA_VERSION}": CASSANDRA_VERSION,
"%{CASSANDRA_DEB_VERSION}": CASSANDRA_DEB_VERSION,
"%{DNSMASQ_VERSION}": DNSMASQ_VERSION,
"%{DNSMASQ_DEB_VERSION}": DNSMASQ_DEB_VERSION,
"%{ELASTICSEARCH_VERSION}": ELASTICSEARCH_VERSION,
"%{ENVOY_VERSION}": ENVOY_VERSION,
"%{ERLANG_VERSION}": ERLANG_VERSION,
"%{ERLANG_DEB_VERSION}": ERLANG_DEB_VERSION,
"%{GERRIT_VERSION}": GERRIT_VERSION,
"%{GRAFANA_VERSION}": GRAFANA_VERSION,
"%{GRAFANA_DEB_VERSION}": GRAFANA_DEB_VERSION,
"%{JAVA_8_VERSION}": JAVA_8_VERSION,
"%{ZULU_8_VERSION}": ZULU_8_VERSION,
"%{JAVA_11_VERSION}": JAVA_11_VERSION,
"%{ZULU_11_VERSION}": ZULU_11_VERSION,
"%{GRAAL_VERSION}": GRAAL_VERSION,
"%{JENKINS_VERSION}": JENKINS_VERSION,
"%{JENKINS_SWARM_VERSION}": JENKINS_SWARM_VERSION,
"%{KAFKA_VERSION}": KAFKA_VERSION,
"%{KIBANA_VERSION}": KIBANA_VERSION,
"%{NODEJS_FOR_KIBANA_VERSION}": NODEJS_FOR_KIBANA_VERSION,
"%{MAVEN_VERSION}": MAVEN_VERSION,
"%{NEXUS_VERSION}": NEXUS_VERSION,
"%{NGINX_VERSION}": NGINX_VERSION,
"%{NGINX_DEB_VERSION}": NGINX_DEB_VERSION,
"%{NODEJS_VERSION}": NODEJS_VERSION,
"%{PHP_VERSION}": PHP_VERSION,
"%{PHP_DEB_VERSION}": PHP_DEB_VERSION,
"%{POSTGRESQL_MAJOR_VERSION}": POSTGRESQL_MAJOR_VERSION,
"%{POSTGRESQL_VERSION}": POSTGRESQL_VERSION,
"%{POSTGRESQL_DEB_VERSION}": POSTGRESQL_DEB_VERSION,
"%{POSTGIS_MINOR_VERSION}": POSTGIS_MINOR_VERSION,
"%{POSTGIS_VERSION}": POSTGIS_VERSION,
"%{POSTGIS_CONTAINER_VERSION}": POSTGIS_CONTAINER_VERSION,
"%{POSTGIS_DEB_VERSION}": POSTGIS_DEB_VERSION,
"%{POSTGIS_POSTGRESQL_DEB_VERSION}": POSTGIS_POSTGRESQL_DEB_VERSION,
"%{PROMETHEUS_VERSION}": PROMETHEUS_VERSION,
"%{RABBITMQ_VERSION}": RABBITMQ_VERSION,
"%{REDIS_VERSION}": REDIS_VERSION,
"%{REDIS_DEB_VERSION}": REDIS_DEB_VERSION,
"%{SBT_VERSION}": SBT_VERSION,
"%{TOMCAT9_VERSION}": TOMCAT9_VERSION,
"%{TOMCAT9_DEB_VERSION}": TOMCAT9_DEB_VERSION,
"%{YARN_VERSION}": YARN_VERSION,
"%{ZIPKIN_VERSION}": ZIPKIN_VERSION,
"%{ZOOKEEPER_VERSION}": ZOOKEEPER_VERSION,
"%{JASPERREPORTS_SERVER_VERSION}": JASPERREPORTS_SERVER_VERSION,
"%{PENATHO_DI_VERSION}": PENATHO_DI_VERSION,
},
output=ctx.outputs.script
)
version_shell_script = rule(
implementation=_version_shell_script_impl,
attrs={
"_template": attr.label(
default=Label("//scripts/versions:template"),
allow_single_file=True,
)
},
outputs={
"script": "%{name}.sh"
},
)
|
432515 | import itertools
import os
import pickle
from soap import logger
from soap.common import timeit
from soap.flopoco.common import (
flopoco_operators, operators_map, we_range, wf_range, wi_range,
flopoco_key, flopoco, xilinx, default_file
)
from soap.semantics import IntegerInterval, ErrorSemantics
INVALID = -1
def _eval_operator(key, dir_name=None):
file_name, dir_name = flopoco(key)
return xilinx(file_name, dir_name)
@timeit
def _para_synth(key):
import sh
work_dir_name = 'syn_{}'.format(os.getpid())
try:
value = _eval_operator(key, dir_name=work_dir_name)
logger.info('Processed {}, LUTs {}'.format(key, value))
return key, value
except sh.ErrorReturnCode:
logger.error('Error processing {}'.format(key))
return key, INVALID
_pool_ = None
def _pool():
global _pool_
if _pool_ is None:
import multiprocessing
_pool_ = multiprocessing.Pool()
return _pool_
@timeit
def _batch_synth(we_range, wf_range, existing_results=None):
existing_results = existing_results or {}
logger.info('Generating synthesis schedule...')
iterator = itertools.product(
flopoco_operators, we_range, wf_range, wi_range)
key_list = []
for key in iterator:
key = flopoco_key(*key)
if key in existing_results:
continue
if key in key_list:
continue
key_list.append(key)
logger.info('Synthesizing...')
results = _pool().imap_unordered(_para_synth, key_list)
results_dict = dict(existing_results)
for r in results:
key, value = r
results_dict[key] = value
logger.info('Synthesis complete')
return results_dict
def _load(file_name):
with open(file_name, 'rb') as f:
return pickle.load(f)
def _save(file_name, results):
with open(file_name, 'wb') as f:
pickle.dump(results, f)
def _plot(results):
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
vl = []
for key, value in results.items():
op, xv, yv = key
zv = value
if zv < 0:
continue
vl.append((xv, yv, zv))
ax.scatter(*zip(*vl))
plt.show()
class FlopocoMissingImplementationError(Exception):
"""Unsynthesizable operator"""
_stats = None
def operator_luts(op, datatype, exp=0, prec=0):
global _stats
if not _stats:
if not os.path.isfile(default_file):
raise FlopocoMissingImplementationError(
'No flopoco statistics available, please consider regenerate.')
_stats = _load(default_file)
fop = operators_map[op]
if fop == 'Multiplexer':
return exp + prec
if fop == 'OneLUT':
return 1
if fop == 'Null':
return 0
if isinstance(fop, list):
if datatype is ErrorSemantics:
fop = fop[0]
we, wf, wi = exp, prec, 0
elif datatype is IntegerInterval:
fop = fop[1]
we, wf, wi = 0, 0, exp
else:
raise TypeError('Datatype {} not recognized.'.format(datatype))
value = _stats.get(flopoco_key(fop, we, wf, wi), INVALID)
if value != INVALID:
return value
if fop not in flopoco_operators:
raise FlopocoMissingImplementationError(
'Operator {} has no statistics'.format(op))
if wf not in wf_range:
raise FlopocoMissingImplementationError(
'Precision {} out of range'.format(wf))
if we > max(we_range):
raise FlopocoMissingImplementationError(
'Exponent width {} out of range'.format(we))
if datatype == 'int':
raise FlopocoMissingImplementationError(
'Failed to get statistics for integer operator {} with width {}'
.format(op, wi))
try:
return operator_luts(op, datatype, we + 1, wf, 0)
except FlopocoMissingImplementationError:
pass
try:
return operator_luts(op, datatype, we, wf + 1, 0)
except FlopocoMissingImplementationError:
pass
raise FlopocoMissingImplementationError(
'Failed to get statistics for operator {} with exponent and mantissa '
'widths {}, {}'.format(op, we, wf))
def generate():
logger.set_context(level=logger.levels.info)
existing_results = _load(default_file)
_save(default_file, _batch_synth(we_range, wf_range, existing_results))
|
432533 | from builtins import object
import json
import logging
import ckanext.harvest.model as harvest_model
import mock_static_file_server
from ckan import model
from ckanext.geodatagov.harvesters.base import GeoDataGovWAFHarvester
from factories import HarvestJobObj, WafHarvestSourceObj
from ckan.tests.helpers import reset_db
from ckan.tests.factories import Organization
log = logging.getLogger(__name__)
class TestWafHarvester(object):
@classmethod
def setup_class(cls):
log.info('Starting mock http server')
mock_static_file_server.serve()
@classmethod
def setup(cls):
reset_db()
cls.organization = Organization()
def run_gather(self, url, source_config):
sc = json.loads(source_config)
source = WafHarvestSourceObj(url=url,
owner_org=self.organization['id'],
config=source_config,
**sc)
log.info('Created source {}'.format(repr(source)))
self.job = HarvestJobObj(source=source)
self.harvester = GeoDataGovWAFHarvester()
# gather stage
log.info('GATHERING %s', url)
obj_ids = self.harvester.gather_stage(self.job)
log.info('job.gather_errors=%s', self.job.gather_errors)
if len(self.job.gather_errors) > 0:
raise Exception(self.job.gather_errors[0])
log.info('obj_ids=%s', obj_ids)
if obj_ids is None or len(obj_ids) == 0:
# nothing to see
return
self.harvest_objects = []
for obj_id in obj_ids:
harvest_object = harvest_model.HarvestObject.get(obj_id)
log.info('ho guid=%s', harvest_object.guid)
log.info('ho content=%s', harvest_object.content)
self.harvest_objects.append(harvest_object)
# this is a list of harvestObjects IDs. One for dataset
return obj_ids
def run_fetch(self):
# fetch stage
for harvest_object in self.harvest_objects:
log.info('FETCHING %s' % harvest_object.id)
result = self.harvester.fetch_stage(harvest_object)
log.info('ho errors=%s', harvest_object.errors)
log.info('result 1=%s', result)
if len(harvest_object.errors) > 0:
raise Exception(harvest_object.errors[0])
def run_import(self):
# fetch stage
datasets = []
for harvest_object in self.harvest_objects:
log.info('IMPORTING %s' % harvest_object.id)
result = self.harvester.import_stage(harvest_object)
log.info('ho errors 2=%s', harvest_object.errors)
log.info('result 2=%s', result)
if len(harvest_object.errors) > 0:
raise Exception(harvest_object.errors[0])
log.info('ho pkg id=%s', harvest_object.package_id)
dataset = model.Package.get(harvest_object.package_id)
datasets.append(dataset)
log.info('dataset name=%s', dataset.name)
return datasets
def get_datasets_from_waf_gmi_sample(self):
""" harvest waf-gmi/ folder as waf source """
url = 'http://127.0.0.1:%s/waf-gmi/index.html' % mock_static_file_server.PORT
self.config1 = '{"private_datasets": "false"}'
self.run_gather(url=url, source_config=self.config1)
self.run_fetch()
datasets = self.run_import()
return datasets
def test_waf_gmi_datasets_count(self):
""" Get datasets from waf/ folder as waf source
and test we have one dataset with the expected name """
datasets = self.get_datasets_from_waf_gmi_sample()
assert len(datasets) == 1
def test_waf_gmi_datasets_privacy(self):
""" Harvest waf-gmi/ folder as waf source and check the datasets are public"""
datasets = self.get_datasets_from_waf_gmi_sample()
for dataset in datasets:
assert dataset.private is False
def test_waf_gmi_names(self):
""" Harvest waf-gmi/ folder as waf source and test we have the names we expect """
expected_names = [
'2014-cartographic-boundary-file-new-england-city-and-town-area-for-united-states-1-500000'
]
datasets = self.get_datasets_from_waf_gmi_sample()
for dataset in datasets:
assert dataset.name in expected_names
|
432546 | import sys
import numpy as np
from vispy import scene
from .axes import AxesVisual3D
from ..utils import NestedSTTransform
from matplotlib.colors import ColorConverter
from glue.config import settings
rgb = ColorConverter().to_rgb
LIMITS_PROPS = [coord + attribute for coord in 'xyz' for attribute in ['_min', '_max', '_stretch']]
class VispyWidgetHelper(object):
def __init__(self, parent=None, viewer_state=None):
# Prepare Vispy canvas. We set the depth_size to 24 to avoid issues
# with isosurfaces on MacOS X
self.canvas = scene.SceneCanvas(keys=None, show=False,
config={'depth_size': 24},
bgcolor=rgb(settings.BACKGROUND_COLOR))
# Set up a viewbox
self.view = self.canvas.central_widget.add_view()
self.view.parent = self.canvas.scene
# Set whether we are emulating a 3D texture. This needs to be enabled
# as a workaround on Windows otherwise VisPy crashes.
self.emulate_texture = (sys.platform == 'win32' and
sys.version_info[0] < 3)
self.scene_transform = scene.STTransform()
self.limit_transforms = {}
fc = rgb(settings.FOREGROUND_COLOR)
self.axis = AxesVisual3D(axis_color=fc, tick_color=fc, text_color=fc,
tick_width=1, minor_tick_length=2,
major_tick_length=4, axis_width=0,
tick_label_margin=10, axis_label_margin=25,
tick_font_size=6, axis_font_size=8,
view=self.view,
transform=self.scene_transform)
# Create a turntable camera. For now, this is the only camerate type
# we support, but if we support more in future, we should implement
# that here
# Orthographic perspective view as default
self.view.camera = scene.cameras.TurntableCamera(parent=self.view.scene,
fov=0., distance=4.0)
# We need to call render here otherwise we'll later encounter an OpenGL
# program validation error.
# self.canvas.render()
self.viewer_state = viewer_state
try:
self.viewer_state.add_callback('*', self._update_from_state, as_kwargs=True)
except TypeError: # glue-core >= 0.11
self.viewer_state.add_global_callback(self._update_from_state)
self._update_from_state(force=True)
def _update_appearance_from_settings(self):
self.canvas.bgcolor = rgb(settings.BACKGROUND_COLOR)
self.axis.axis_color = rgb(settings.FOREGROUND_COLOR)
self.axis.tick_color = rgb(settings.FOREGROUND_COLOR)
self.axis.label_color = rgb(settings.FOREGROUND_COLOR)
def add_data_visual(self, visual):
self.limit_transforms[visual] = NestedSTTransform()
self._update_limits()
visual.transform = self.limit_transforms[visual]
self.view.add(visual)
def _update_from_state(self, force=False, **props):
if force or 'visible_axes' in props:
self._toggle_axes()
if force or 'perspective_view' in props:
self._toggle_perspective()
if force or any(key in props for key in ('x_att', 'y_att', 'z_att')):
self._update_attributes()
if force or any(key in props for key in ('x_stretch', 'y_stretch',
'z_stretch', 'native_aspect')):
self._update_stretch()
if force or any(p in props for p in LIMITS_PROPS) or 'native_aspect' in props:
self._update_limits()
self.canvas.update()
def _toggle_axes(self):
if self.viewer_state.visible_axes:
self.axis.parent = self.view.scene
else:
self.axis.parent = None
def _toggle_perspective(self):
if self.viewer_state.perspective_view:
self.view.camera.fov = 30
self.axis.tick_font_size = 28
self.axis.axis_font_size = 35
else:
self.view.camera.fov = 0
self.axis.tick_font_size = 6
self.axis.axis_font_size = 8
def _update_attributes(self):
if self.viewer_state.x_att is not None:
self.axis.xlabel = self.viewer_state.x_att.label
if self.viewer_state.y_att is not None:
self.axis.ylabel = self.viewer_state.y_att.label
if self.viewer_state.z_att is not None:
self.axis.zlabel = self.viewer_state.z_att.label
def _update_stretch(self):
self.scene_transform.scale = (self.viewer_state.x_stretch * self.viewer_state.aspect[0],
self.viewer_state.y_stretch * self.viewer_state.aspect[1],
self.viewer_state.z_stretch * self.viewer_state.aspect[2])
def _update_limits(self):
dx = self.viewer_state.x_max - self.viewer_state.x_min
sx = (np.inf if dx == 0 else 2. / dx *
self.viewer_state.x_stretch * self.viewer_state.aspect[0])
dy = self.viewer_state.y_max - self.viewer_state.y_min
sy = (np.inf if dy == 0 else 2. / dy *
self.viewer_state.y_stretch * self.viewer_state.aspect[1])
dz = self.viewer_state.z_max - self.viewer_state.z_min
sz = (np.inf if dz == 0 else 2. / dz *
self.viewer_state.z_stretch * self.viewer_state.aspect[2])
scale = [sx, sy, sz]
translate = [-0.5 * (self.viewer_state.x_min + self.viewer_state.x_max) * scale[0],
-0.5 * (self.viewer_state.y_min + self.viewer_state.y_max) * scale[1],
-0.5 * (self.viewer_state.z_min + self.viewer_state.z_max) * scale[2]]
for visual in self.limit_transforms:
self.limit_transforms[visual].scale = scale
self.limit_transforms[visual].translate = translate
self.axis.xlim = self.viewer_state.x_min, self.viewer_state.x_max
self.axis.ylim = self.viewer_state.y_min, self.viewer_state.y_max
self.axis.zlim = self.viewer_state.z_min, self.viewer_state.z_max
|
432547 | import logging
from datetime import datetime
from pathlib import Path
from bs4 import BeautifulSoup
from .. import utils
from ..cache import Cache
__authors__ = [
"zstumgoren",
"Dilcia19",
"stucka",
]
__tags__ = ["html"]
__source__ = {
"name": "Connecticut Department of Labor",
"url": "https://www.ctdol.state.ct.us/progsupt/bussrvce/warnreports/warnreports.htm",
}
logger = logging.getLogger(__name__)
def scrape(
data_dir: Path = utils.WARN_DATA_DIR,
cache_dir: Path = utils.WARN_CACHE_DIR,
) -> Path:
"""
Scrape data from Connecticut.
Keyword arguments:
data_dir -- the Path were the result will be saved (default WARN_DATA_DIR)
cache_dir -- the Path where results can be cached (default WARN_CACHE_DIR)
Returns: the Path where the file is written
"""
# Open the cache
cache = Cache(cache_dir)
# We start in 2015
current_year = datetime.now().year
# Get the full range of years
year_range = range(2015, current_year + 1)
output_rows = []
for year in year_range:
url = f"https://www.ctdol.state.ct.us/progsupt/bussrvce/warnreports/warn{year}.htm"
cache_key = f"ct/{year}.html"
if cache.exists(cache_key) and year < current_year:
html = cache.read(cache_key)
else:
r = utils.get_url(url)
html = r.text
cache.write(cache_key, html)
# Parse out the table
soup = BeautifulSoup(html, "html.parser")
if year == 2016:
table = soup.find_all("table", "style15")
else:
table = soup.find_all("table", "MsoNormalTable")
# Parse out the data
row_list = _scrape_table(table)
# Add data to the big list
output_rows.extend(row_list)
# Tack headers on the top
header_row = [
"warn_date",
"affected_company",
"layoff_location",
"number_workers",
"layoff_date",
"closing",
"closing_date",
"union",
"union_address",
]
row_list = [header_row] + output_rows
# Set the export path
data_path = data_dir / "ct.csv"
# Write out to csv
utils.write_rows_to_csv(data_path, row_list)
# Return the path
return data_path
def _scrape_table(table) -> list:
"""Scrape the provided table.
Returns: List of data rows.
"""
row_list = []
# loop over table to process each row, skipping the header
for table_row in table[0].find_all("tr")[1:]:
# Get all the cells
table_cells = table_row.find_all("td")
# if a row has more than 9 cells it is handled separately
# the 2016 table has some cells with nested tags
if len(table_cells) > 9:
output_row = _problem_cells(table_cells)
row_list.append(output_row)
continue
# if a row has less than 9 it is skipped because it is incomplete
elif len(table_cells) < 9:
continue
# for the rest, loop over cells for each row
output_row = []
for table_cell in table_cells:
cell = table_cell.text.strip()
cell = " ".join(cell.split())
output_row.append(cell)
# test to see if the row is blank
if not output_row:
continue
# Add row to the big list
row_list.append(output_row)
# Pass it back
logger.debug(f"{len(row_list)} rows parsed")
return row_list
def _problem_cells(table_cells):
"""Deal with problem rows in the 2016 table."""
output_row = []
for table_cell in table_cells:
current_cell = table_cell.text.strip()
current_cell = " ".join(current_cell.split())
if table_cells.index(table_cell) == 0:
output_row.append(current_cell)
else:
previous_index = table_cells.index(table_cell) - 1
previous_cell = table_cells[previous_index].text.strip()
previous_cell = " ".join(previous_cell.split())
if current_cell == previous_cell:
continue
else:
output_row.append(current_cell)
return output_row
if __name__ == "__main__":
scrape()
|
432549 | import system
def solver(n, node):
node.sort()
queue = node[0]
while queue:
curNode = queue.pop()
count = 1
for n in node:
pass
|
432588 | import algopy, numpy
def house(x):
""" computes the Householder vector v and twice its norm beta
(v,beta) = house(x)
Parameters
----------
x: array_like
len(x) = N
Returns
-------
v: array_like
len(v) = N
beta: Float
two times the 2-norm of v
Description
-----------
computes beta and v to be used in the Householder reflector
H(v) = 1 - beta dot(v,v.T)
where v[0] = 1
such that H(v)x = alpha * e_1
i.e., H(v)x is a multiple of the first Cartesian basis vector
"""
sigma = algopy.sqrt(algopy.dot(x.T,x))[0,0]
v = x.copy()
if x[0] <= 0:
v[0] -= sigma
else:
v[0] += sigma
v = v/v[0]
beta = 2./algopy.dot(v.T,v)[0,0]
return v, beta
def qr_house(A):
""" computes QR decomposition using Householder relections
(Q,R) = qr_house(A)
such that
0 = Q R - A
0 = dot(Q.T,Q) - eye(M)
R upper triangular
Parameters
----------
A: array_like
shape(A) = (M, N), M >= N
overwritten on exit
Returns
-------
R: array_like
strict lower triangular part contains the Householder vectors v
upper triangular matrix R
Q: array_like
orthogonal matrix
"""
M,N = A.shape
Q = algopy.zeros((M,M),dtype=A)
Q += numpy.eye(M)
H = algopy.zeros((M,M),dtype=A)
for n in range(N):
v,beta = house(A[n:,n:n+1])
A[n:,n:] -= beta * algopy.dot(v, algopy.dot(v.T,A[n:,n:]))
H[...] = numpy.eye(M)
H[n:,n:] -= beta * algopy.dot(v,v.T)
Q = algopy.dot(Q,H)
return Q, algopy.triu(A)
def qr_house_basic(A):
""" computes QR decomposition using Householder relections
qr_house_basic and build_Q have the same effect as qr_house
Parameters
----------
A: array_like
shape(A) = (M, N), M >= N
overwritten on exit
Returns
-------
A: array_like
strict lower triangular part contains the Householder vectors v
upper triangular matrix R
betas: array_like
2-norms of the Householder vectors v
"""
M,N = A.shape
beta_list = []
for n in range(N):
v,beta = house(A[n:,n:n+1])
A[n:,n:] -= beta * algopy.dot(v, algopy.dot(v.T,A[n:,n:]))
beta_list.append(beta)
if n < M:
A[n+1:,n] = v[1:,0]
return A, numpy.asarray(beta_list)
def build_Q(A, betas):
""" computes orthogonal matrix from output of qr_house_basic
Parameters
----------
A: array_likse
shape(A) = (M,N)
upper triangular part contains R
lower triangular part contains v with v[0] = 1
betas: array_like
list of beta
Returns
-------
Q: array_like
shape(Q) = (M,M)
"""
M,N = A.shape
Q = algopy.zeros((M,M),dtype=A)
Q += numpy.eye(M)
H = algopy.zeros((M,M),dtype=A)
for n in range(N):
v = A[n:,n:n+1].copy()
v[0] = 1
H[...] = numpy.eye(M)
H[n:,n:] -= betas[n] * algopy.dot(v,v.T)
Q = algopy.dot(Q,H)
return Q
def pb_qr_house(A, Abar, Q, Qbar, R, Rbar):
""" computes the pullback of qr_house
Parameters
----------
A: array_like
shape(A) = (M, N), M >= N
Abar: array_like
shape(Abar) = (M, N), M >= N
changed on exit
Q: array_like
shape(Q) = (M, M)
Qbar: array_like
shape(Qbar) = (M, M)
changed on exit
R: array_like
shape(R) = (M, N)
Rbar: array_like
shape(R) = (M, N)
changed on exit
"""
raise NotImplementedError('')
import time
D,P,M,N = 50,1,3,2
# # STEP 1: qr_house_basic + build_Q
# A = numpy.random.random((M,N))
# B,betas = qr_house_basic(A.copy())
# R = algopy.triu(B)
# Q = build_Q(B,betas)
# print algopy.dot(Q.T,Q) - numpy.eye(M)
# print algopy.dot(Q,R) - A
# # STEP 2: qr_house
# Q,R = qr_house(A.copy())
# print algopy.dot(Q.T,Q) - numpy.eye(M)
# print algopy.dot(Q,R) - A
# # STEP 3: qr_full
# Q,R = algopy.qr_full(A.copy())
# print algopy.dot(Q.T,Q) - numpy.eye(M)
# print algopy.dot(Q,R) - A
# data = numpy.random.random((D,P,M,N))
# data = numpy.asarray(data, dtype=numpy.float64)
# A = algopy.UTPM(data)
# # STEP 1: qr_house_basic + build_Q
# print 'QR decomposition based on basic Householder'
# st = time.time()
# B,betas = qr_house_basic(A.copy())
# R = algopy.triu(B)
# Q = build_Q(B,betas)
# # print algopy.dot(Q.T,Q) - numpy.eye(M)
# # print algopy.dot(Q,R) - A
# print 'runtime = ',time.time() - st
# # STEP 2: qr_house
# print 'QR decomposition based on Householder'
# st = time.time()
# Q2,R2 = qr_house(A.copy())
# print algopy.dot(Q2.T,Q2) - numpy.eye(M)
# print algopy.dot(Q2,R2) - A
# print 'runtime = ',time.time() - st
# # STEP 3: qr_full
# print 'QR decomposition based on defining equations'
# st = time.time()
# Q,R = algopy.qr_full(A.copy())
# print algopy.dot(Q.T,Q) - numpy.eye(M)
# print algopy.dot(Q,R) - A
# print 'runtime = ',time.time() - st
# SAVE matrices in .mat format for <NAME> (who uses Matlab...)
# -----------------------------------------------------------------
# import scipy.io
# scipy.io.savemat('matrix_polynomial.mat', {"A_coeffs": A.data,
# "Q_coeffs_house":Q2.data,
# "R_coeffs_house":R2.data,
# "defect_QR_minus_A_house": (algopy.dot(Q2,R2) - A).data,
# "defect_QTQ_minus_Id_house": (algopy.dot(Q2.T,Q2) - numpy.eye(M)).data,
# "Q_coeffs":Q.data,
# "R_coeffs":R.data,
# "defect_QR_minus_A": (algopy.dot(Q,R) - A).data,
# "defect_QTQ_minus_Id": (algopy.dot(Q.T,Q) - numpy.eye(M)).data
# })
import mpmath
mpmath.mp.prec = 200 # increase lenght of mantissa
print(mpmath.mp)
print('QR decomposition based on Householder')
D,P,M,N = 50,1,3,2
# in float64 arithmetic
data = numpy.random.random((D,P,M,N))
data = numpy.asarray(data, dtype=numpy.float64)
A = algopy.UTPM(data)
Q,R = qr_house(A.copy())
# in multiprecision arithmetic
data2 = numpy.asarray(data)*mpmath.mpf(1)
A2 = algopy.UTPM(data2)
Q2,R2 = qr_house(A2.copy())
print('-'*20)
print(A.data[-1])
print('-'*20)
print((Q.data[-1] - Q2.data[-1])/Q2.data[-1])
print('-'*20)
print(algopy.dot(Q, R).data[-1] - A.data[-1])
print('-'*20)
print(algopy.dot(Q2, R2).data[-1] - A2.data[-1])
# # print algopy.dot(Q2.T,Q2) - numpy.eye(M)
# # print algopy.dot(Q2,R2) - A
|
432598 | from magma import *
from .logic import DefineInvert, Not
from .fulladder import FullAdder
__all__ = ['DefineAdd']
__all__ += ['DefineSub']
__all__ += ['DefineNegate']
def _AdderName(basename, n, cin, cout):
name = f"{basename}{n}"
if cin: name += '_CIN'
if cout: name += '_COUT'
return name
def _AdderArgs(n, cin, cout):
T = Bits[ n ]
args = ["I0", In(T), "I1", In(T)]
if cin:
args += ['CIN', In(Bit)]
args += ["O", Out(T)]
if cout:
args += ['COUT', Out(Bit)]
return args
def DefineAdders(name, n, cin, cout, forkargs=[]):
def f(y):
return FullAdder()
c = braid( col(f, n), foldargs={"CIN":"COUT"}, forkargs=forkargs)
wire(Adders.I0, c.I0)
wire(Adders.I1, c.I1)
wire(c.O, Adders.O)
if cin:
wire(Adders.CIN, c.CIN)
else:
wire(0, c.CIN)
if cout:
wire(c.COUT, Adders.COUT)
#
# create an n-bit Adder from n FullAdders
#
# I0:In(Bits(n)), I1:In(Bits(n)), CIN:In(Bit), O:Out(Bits(n)), COUT:Out(Bit)
#
# if cin, CIN is added to the circuit
# if cout: COUT is added to the circuit
#
def DefineAdd(n, cin=False, cout=False):
class _Add(Circuit):
name = _AdderName('Add', n, cin, cout)
IO = _AdderArgs(n, cin, cout)
@classmethod
def definition(io):
def f(y):
return FullAdder()
add = braid( col(f, n), foldargs={"CIN":"COUT"})
wire(io.I0, add.I0)
wire(io.I1, add.I1)
if cin:
wire(io.CIN, add.CIN)
else:
wire(0, add.CIN)
if cout:
wire(add.COUT, io.COUT)
wire(add.O, io.O)
return _Add
def DefineSub(n, cin=False, cout=False):
class _Sub(Circuit):
name = _AdderName('Sub', n, cin, cout)
IO = _AdderArgs(n, cin, cout)
@classmethod
def definition(io):
invert = DefineInvert(n)()
add = DefineAdd(n, True, cout)()
wire(io.I0, add.I0)
wire(io.I1, invert.I)
wire(invert.O, add.I1)
wire(add.O, io.O)
if cin:
wire( Not()(io.CIN), add.CIN )
else:
wire( 1, add.CIN )
if cout:
wire(add.COUT, io.COUT)
return _Sub
def DefineNegate(width):
T = Bits[width]
class _Negate(Circuit):
name = 'Negate{}'.format(width)
IO = ['I', In(T), 'O', Out(T)]
@classmethod
def definition(io):
invert = DefineInvert(width)()
add = DefineAdd(width, False, False)()
wire( add( invert(io.I), (array(1,width))), io.O )
return _Negate
|
432618 | import i2c_bus, unit
from micropython import const
import ustruct
_ADDR = const(0x27)
_INPUT_REG = const(0x00)
_OUTPUT_REG = const(0x01)
_POLINV_REG = const(0x02)
_CONFIG_REG = const(0x03)
class Ext_io:
ALL_OUTPUT = const(0x00)
ALL_INPUT = const(0xff)
OUTPUT = const(0x00)
INPUT = const(0x01)
def __init__(self, port):
self.i2c = i2c_bus.get(port)
self._available()
self.config = Ext_io.ALL_INPUT
self.setPortMode(self.config)
def _available(self):
if self.i2c.is_ready(_ADDR) or self.i2c.is_ready(_ADDR):
pass
else:
raise unit.Unit("Ext IO unit maybe not connect")
def _get_mode(self):
return self.i2c.readfrom_mem(_ADDR, _CONFIG_REG, 1)[0]
def setPortMode(self, mode):
buf = bytearray(1)
buf[0] = mode
self.i2c.writeto_mem(_ADDR, _CONFIG_REG, buf)
def setPinMode(self, pin, mode):
config = self._get_mode()
if (config >> pin) & 0x01 != mode:
config ^= 1 << pin
self.setPortMode(config)
self.config = config
def digitReadPort(self):
return self.i2c.readfrom_mem(_ADDR, _INPUT_REG, 1)[0]
def digitWritePort(self, state):
buf = bytearray(1)
buf[0] = state
self.i2c.writeto_mem(_ADDR, _OUTPUT_REG, buf)
def digitRead(self, pin):
if (self.config >> pin) & 0x01 != Ext_io.INPUT:
return (self.i2c.readfrom_mem(_ADDR, _OUTPUT_REG, 1)[0] >> pin) & 0x01
return (self.digitReadPort() >> pin) & 0x01
def digitWrite(self, pin, value):
if (self.config >> pin) & 0x01 != Ext_io.OUTPUT:
self.setPinMode(pin, Ext_io.OUTPUT)
old_value = self.i2c.readfrom_mem(_ADDR, _OUTPUT_REG, 1)[0]
if (old_value >> pin) & 0x01 != value:
old_value ^= 1 << pin
self.digitWritePort(old_value)
def deinit(self):
pass |
432627 | import os.path
import numpy as np
import base64
__all__ = ['data']
class Data(object):
BASE_DIR = os.path.join(os.path.dirname(__file__), 'data')
def __init__(self):
self._dt = {}
def __getitem__(self, item):
if item in self._dt:
return self._dt[item]
file_path = os.path.join(self.BASE_DIR, item)
self._dt[item] = np.loadtxt(file_path)
return self._dt[item]
data = Data()
|
432648 | from django.shortcuts import render, redirect
from . import forms
from .models import District, Upazilla, Union, PersonalInfo
# Create your views here.
def load_upazilla(request):
district_id = request.GET.get('district')
upazilla = Upazilla.objects.filter(district_id=district_id).order_by('name')
upazilla_id = request.GET.get('upazilla')
union = Union.objects.filter(upazilla_id=upazilla_id).order_by('name')
context = {
'upazilla': upazilla,
'union': union
}
return render(request, 'others/upazilla_dropdown_list_options.html', context)
def teacher_registration(request):
form = forms.PersonalInfoForm()
address_forms = forms.AddressInfoForm()
education_form = forms.EducationInfoForm()
training_form = forms.TrainingInfoForm()
job_form = forms.JobInfoForm()
experience_form = forms.ExperienceInfoForm()
if request.method == 'POST':
form = forms.PersonalInfoForm(request.POST, request.FILES)
address_form = forms.AddressInfoForm(request.POST)
education_form = forms.EducationInfoForm(request.POST)
training_form = forms.TrainingInfoForm(request.POST)
job_form = forms.JobInfoForm(request.POST)
experience_form = forms.ExperienceInfoForm(request.POST)
if form.is_valid() and address_form.is_valid() and education_form.is_valid() and training_form.is_valid() and job_form.is_valid() and experience_form.is_valid():
personal_info = form.save()
address_info = address_form.save(commit=False)
address_info.address = personal_info
address_info.save()
education_info = education_form.save(commit=False)
education_info.education = personal_info
education_info.save()
training_info = training_form.save(commit=False)
training_info.training = personal_info
training_info.save()
job_info = job_form.save(commit=False)
job_info.job = personal_info
job_info.save()
experience_info = experience_form.save(commit=False)
experience_info.experience = personal_info
experience_info.save()
return redirect('employee-list')
context = {
'form': form,
'address_forms': address_forms,
'education_form': education_form,
'training_form': training_form,
'job_form': job_form,
'experience_form': experience_form
}
return render(request, 'employee/employee-registration.html', context)
def teacher_list(request):
teacher = PersonalInfo.objects.all()
context = {'teacher': teacher}
return render(request, 'employee/employee-list.html', context)
|
432655 | from io import StringIO
from OpenGL.GL import *
import gl
import gx
MATRIX_INDEX_ATTRIBUTE_LOCATION = 0
POSITION_ATTRIBUTE_LOCATION = 1
NORMAL_ATTRIBUTE_LOCATION = 2
BINORMAL_ATTRIBUTE_LOCATION = 3
TANGENT_ATTRIBUTE_LOCATION = 4
COLOR_ATTRIBUTE_LOCATION = 5
TEXCOORD_ATTRIBUTE_LOCATIONS = [6,7,8,9,10,11,12,13]
ATTRIBUTE_LOCATION_TABLE = {
gx.VA_PTNMTXIDX:MATRIX_INDEX_ATTRIBUTE_LOCATION,
gx.VA_POS:POSITION_ATTRIBUTE_LOCATION,
gx.VA_NRM:NORMAL_ATTRIBUTE_LOCATION,
gx.VA_CLR0:COLOR_ATTRIBUTE_LOCATION,
gx.VA_CLR1:COLOR_ATTRIBUTE_LOCATION,
gx.VA_TEX0:TEXCOORD_ATTRIBUTE_LOCATIONS[0],
gx.VA_TEX1:TEXCOORD_ATTRIBUTE_LOCATIONS[2],
gx.VA_TEX2:TEXCOORD_ATTRIBUTE_LOCATIONS[2],
gx.VA_TEX3:TEXCOORD_ATTRIBUTE_LOCATIONS[3],
gx.VA_TEX4:TEXCOORD_ATTRIBUTE_LOCATIONS[4],
gx.VA_TEX5:TEXCOORD_ATTRIBUTE_LOCATIONS[5],
gx.VA_TEX6:TEXCOORD_ATTRIBUTE_LOCATIONS[6],
gx.VA_TEX7:TEXCOORD_ATTRIBUTE_LOCATIONS[7]}
class MatrixBlock(gl.UniformBlock):
projection_matrix = gl.mat4
view_matrix = gl.mat4x3
def convert_material_source(source,index):
if source == gx.SRC_VTX:
return 'color'
elif source == gx.SRC_REG:
return 'material_color{}'.format(index)
else:
raise ValueError('invalid material source')
def convert_ambient_source(source,index):
if source == gx.SRC_VTX:
return 'color'
elif source == gx.SRC_REG:
return 'ambient_color{}'.format(index)
else:
raise ValueError('invalid ambient source')
def write_channel(stream,index,channel):
material_color = convert_material_source(channel.color_mode.material_source,index)
material_alpha = convert_material_source(channel.alpha_mode.material_source,index)
stream.write('channel{} = vec4('.format(index))
#XXX Lighting can't be properly implemented as BMD/BDL files doesn't
# store any light information, but this seems to work pretty well
if channel.color_mode.light_enable:
ambient_color = convert_ambient_source(channel.color_mode.ambient_source,index)
stream.write('0.5*({}.rgb + vec3(1.0))*'.format(ambient_color))
#stream.write('clamp({}.rgb + vec3(0.3),0.0,1.0)*'.format(ambient_color))
stream.write('{}.rgb,{}.a);\n'.format(material_color,material_alpha))
def write_identity_texcoord_generator(stream,generator):
if generator.source == gx.TG_POS:
source = 'position.xyz'
elif generator.source == gx.TG_NRM:
source = 'normal'
elif generator.source == gx.TG_BINRM:
source = 'binormal'
elif generator.source == gx.TG_TANGENT:
source = 'tangent'
elif generator.source in gx.TG_TEX:
source_index = gx.TG_TEX.index(generator.source)
source = 'vec3(texcoord{}, 1.0)'.format(source_index)
else:
source = 'vec3(1.0)'
stream.write(source)
def write_matrix_texcoord_generator(stream,generator,texture_matrices):
if generator.source == gx.TG_POS:
source = 'position'
elif generator.source == gx.TG_NRM:
source = 'vec4(normal,1.0)'
elif generator.source == gx.TG_BINRM:
source = 'vec4(binormal,1.0)'
elif generator.source == gx.TG_TANGENT:
source = 'vec4(tangent,1.0)'
elif generator.source in gx.TG_TEX:
source_index = gx.TG_TEX.index(generator.source)
source = 'vec4(texcoord{},1.0,1.0)'.format(source_index)
else:
raise ValueError('invalid texture coordinate generator source')
matrix_index = gx.TEXMTX.index(generator.matrix)
matrix = texture_matrices[matrix_index]
#if matrix.shape != generator.function:
# raise ValueError() #<-?
stream.write('texture_matrix{}*'.format(matrix_index))
if matrix.matrix_type in {0x06,0x07}:
stream.write('vec4(view_matrix*vec4({}.xyz,0.0),1.0)'.format(source))
elif matrix.matrix_type == 0x09:
stream.write('vec4(view_matrix*{},1.0)'.format(source))
else:
stream.write(source)
def write_texcoord_generator(stream,index,generator,texture_matrices):
stream.write('generated_texcoord{} = '.format(index))
if generator.function in {gx.TG_MTX2x4,gx.TG_MTX3x4}:
if generator.matrix == gx.IDENTITY:
write_identity_texcoord_generator(stream,generator)
else:
write_matrix_texcoord_generator(stream,generator,texture_matrices)
elif generator.function in gx.TG_BUMP:
if generator.source not in gx.TG_TEXCOORD:
stream.write('vec3(1.0)')
else:
source_index = gx.TG_TEXCOORD.index(generator.source)
if source_index >= index:
stream.write('vec3(1.0)')
else:
stream.write('generated_texcoord{}'.format(source_index))
elif generator.function == gx.TG_SRTG:
if generator.source not in gx.TG_COLOR:
stream.write('vec3(1.0)')
else:
source_index = gx.TG_COLOR.index(generator.source)
stream.write('vec3(channel{}.rg, 1.0)'.format(source_index))
else:
raise ValueError('invalid texture coordinate generator function')
stream.write(';\n')
def create_shader_string(material, transformation_type):
stream = StringIO()
stream.write('#version 330\n')
stream.write('{}\n'.format(MatrixBlock.glsl_type))
stream.write('{}\n'.format(material.gl_block.glsl_type))
if transformation_type == 0:
stream.write('uniform int matrix_index;\n')
stream.write('uniform samplerBuffer matrix_table;\n')
stream.write('#define MATRIX_ROW(i) texelFetch(matrix_table,3*matrix_index + i)\n')
position = 'view_matrix*vec4(dot(MATRIX_ROW(0),position),dot(MATRIX_ROW(1),position),dot(MATRIX_ROW(2),position),1.0)'
elif transformation_type == 1:
position = '(position.xyz + view_matrix[3])'
elif transformation_type == 2:
raise Exception('y billboard matrix not implemented') #TODO
elif transformation_type == 3:
stream.write('layout(location={}) in int matrix_index;\n'.format(MATRIX_INDEX_ATTRIBUTE_LOCATION))
stream.write('uniform samplerBuffer matrix_table;\n')
stream.write('#define MATRIX_ROW(i) texelFetch(matrix_table,3*matrix_index + i)\n')
position = 'view_matrix*vec4(dot(MATRIX_ROW(0),position),dot(MATRIX_ROW(1),position),dot(MATRIX_ROW(2),position),1.0)'
else:
raise ValueError('invalid matrix type')
use_normal = False
use_binormal = False
use_tangent = False
use_color = False
use_texcoord = [False]*8
for i, channel in enumerate(material.enabled_channels):
if channel.color_mode.material_source == gx.SRC_VTX:
use_color = True
if channel.alpha_mode.material_source == gx.SRC_VTX:
use_color = True
if channel.color_mode.light_enable and channel.color_mode.ambient_source == gx.SRC_VTX:
use_color = True
if channel.alpha_mode.light_enable and channel.alpha_mode.ambient_source == gx.SRC_VTX:
use_color = True
for generator in material.enabled_texcoord_generators:
if generator.function not in {gx.TG_MTX2x4,gx.TG_MTX3x4}:
continue
if generator.source == gx.TG_NRM:
use_normal = True
elif generator.source == gx.TG_BINRM:
use_binormal = True
elif generator.source == gx.TG_TANGENT:
use_tangent = True
elif generator.source in gx.TG_TEX:
source_index = gx.TG_TEX.index(generator.source)
use_texcoord[source_index] = True
stream.write('layout(location={}) in vec4 position;\n'.format(POSITION_ATTRIBUTE_LOCATION))
if use_normal:
stream.write('layout(location={}) in vec3 normal;\n'.format(NORMAL_ATTRIBUTE_LOCATION))
if use_binormal:
stream.write('layout(location={}) in vec3 binormal;\n'.format(BINORMAL_ATTRIBUTE_LOCATION))
if use_tangent:
stream.write('layout(location={}) in vec3 tangent;\n'.format(TANGENT_ATTRIBUTE_LOCATION))
if use_color:
stream.write('layout(location={}) in vec4 color;\n'.format(COLOR_ATTRIBUTE_LOCATION))
for i in range(8):
if not use_texcoord[i]: continue
stream.write('layout(location={}) in vec2 texcoord{};\n'.format(TEXCOORD_ATTRIBUTE_LOCATIONS[i],i))
for i,channel in enumerate(material.enabled_channels):
stream.write('out vec4 channel{};\n'.format(i))
for i,generator in enumerate(material.enabled_texcoord_generators):
stream.write('out vec3 generated_texcoord{};\n'.format(i))
stream.write('\nvoid main()\n{\n')
stream.write('gl_Position = projection_matrix*vec4({},1.0);\n'.format(position))
for i,channel in enumerate(material.channels):
if i < material.channel_count:
write_channel(stream,i,channel)
else:
stream.write('const vec4 channel{} = vec4(1.0);\n'.format(i))
for i,generator in enumerate(material.enabled_texcoord_generators):
write_texcoord_generator(stream,i,generator,material.texture_matrices)
stream.write('}\n')
return stream.getvalue()
|
432705 | import unittest
from conda_env import env
from conda_env.specs.notebook import NotebookSpec
from ..utils import support_file
class TestNotebookSpec(unittest.TestCase):
def test_no_notebook_file(self):
spec = NotebookSpec(support_file('simple.yml'))
self.assertEqual(spec.can_handle(), False)
def test_notebook_no_env(self):
spec = NotebookSpec(support_file('notebook.ipynb'))
self.assertEqual(spec.can_handle(), False)
def test_notebook_with_env(self):
spec = NotebookSpec(support_file('notebook_with_env.ipynb'))
self.assertTrue(spec.can_handle())
self.assertIsInstance(spec.environment, env.Environment)
|
432727 | import torch
import torch.nn as nn
import torchvision.models as backbone_
class EncoderCNN(nn.Module):
def __init__(self, hp=None):
super(EncoderCNN, self).__init__()
self.feature = Unet_Encoder(in_channels=3)
self.fc_mu = nn.Linear(512, 128)
self.fc_std = nn.Linear(512, 128)
def forward(self, x):
x = self.feature(x)
mean = self.fc_mu(x)
log_var = self.fc_std(x)
posterior_dist = torch.distributions.Normal(mean, torch.exp(0.5 * log_var))
return posterior_dist
class DecoderCNN(nn.Module):
def __init__(self, hp=None):
super(DecoderCNN, self).__init__()
self.model = Unet_Decoder(out_channels=3)
def forward(self, x):
return self.model(x)
class Unet_Encoder(nn.Module):
def __init__(self, in_channels=3):
super(Unet_Encoder, self).__init__()
self.down_1 = Unet_DownBlock(in_channels, 32, normalize=False)
self.down_2 = Unet_DownBlock(32, 64)
self.down_3 = Unet_DownBlock(64, 128)
self.down_4 = Unet_DownBlock(128, 256)
self.down_5 = Unet_DownBlock(256, 256)
self.linear_encoder = nn.Linear(256 * 8 * 8, 512)
self.dropout = nn.Dropout(0.5)
def forward(self, x):
x = self.down_1(x)
x = self.down_2(x)
x = self.down_3(x)
x = self.down_4(x)
x = self.down_5(x)
x = torch.flatten(x, start_dim=1)
x = self.linear_encoder(x)
x = self.dropout(x)
return x
class Unet_Decoder(nn.Module):
def __init__(self, out_channels=3):
super(Unet_Decoder, self).__init__()
self.linear_1 = nn.Linear(128, 8*8*256)
self.dropout = nn.Dropout(0.5)
self.deconv_1 = Unet_UpBlock(256, 256)
self.deconv_2 = Unet_UpBlock(256, 128)
self.deconv_3 = Unet_UpBlock(128, 64)
self.deconv_4 = Unet_UpBlock(64, 32)
self.final_image = nn.Sequential(*[nn.ConvTranspose2d(32, out_channels,
kernel_size=4, stride=2,
padding=1), nn.Tanh()])
def forward(self, x):
x = self.linear_1(x)
x = x.view(-1, 256, 8, 8)
x = self.dropout(x)
x = self.deconv_1(x)
x = self.deconv_2(x)
x = self.deconv_3(x)
x = self.deconv_4(x)
x = self.final_image(x)
return x
class Unet_UpBlock(nn.Module):
def __init__(self, inner_nc, outer_nc):
super(Unet_UpBlock, self).__init__()
layers = [
nn.ConvTranspose2d(inner_nc, outer_nc, 4, 2, 1, bias=True),
nn.InstanceNorm2d(outer_nc),
nn.ReLU(inplace=True),
]
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
class Unet_DownBlock(nn.Module):
def __init__(self, inner_nc, outer_nc, normalize=True):
super(Unet_DownBlock, self).__init__()
layers = [nn.Conv2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=True)]
if normalize:
layers.append(nn.InstanceNorm2d(outer_nc))
layers.append(nn.LeakyReLU(0.2, True))
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
class VGG_encoder(nn.Module):
def __init__(self, hp):
super(VGG_encoder, self).__init__()
self.feature = backbone_.vgg16(pretrained=True).features
self.pool_method = nn.AdaptiveMaxPool2d(1)
self.dropout = nn.Dropout(0.5)
def forward(self, x):
x = self.backbone(input)
x = self.pool_method(x).view(-1, 512)
x = self.dropout(x)
return x
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
if hasattr(m, "bias") and m.bias is not None:
torch.nn.init.constant_(m.bias.data, 0.0)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
if __name__ == '__main__':
pass |
432840 | from geth.accounts import parse_geth_accounts
raw_accounts = b"""Account #0: {<KEY>}
Account #1: {<KEY>}\n"""
accounts = (b"<KEY>", b"0x6f137a71a6f197df2cbbf010dcbd3c444ef5c925")
def test_parsing_accounts_output():
assert parse_geth_accounts(raw_accounts) == accounts
|
432884 | import os
from PIL import Image, ImageDraw, ImageFont
from django.conf import settings
def saveTargetImage(letters, pk):
height = 300
width = 300
image = Image.new(mode='L', size=(height, width), color=255)
draw = ImageDraw.Draw(image)
x = 0
y_start = 0
y_end = image.height
line = ((x, y_start), (x, y_end))
draw.line(line, fill=128)
x = image.width / 3
y_start = 0
y_end = image.height
line = ((x, y_start), (x, y_end))
draw.line(line, fill=128)
x = x + image.width / 3
line = ((x, y_start), (x, y_end))
draw.line(line, fill=128)
x = image.width - 1
line = ((x, y_start), (x, y_end))
draw.line(line, fill=128)
y = 0
x_start = 0
x_end = image.width
line = ((x_start, y), (x_end, y))
draw.line(line, fill=128)
y = image.height / 3
x_start = 0
x_end = image.width
line = ((x_start, y), (x_end, y))
draw.line(line, fill=128)
y = y + image.height / 3
line = ((x_start, y), (x_end, y))
draw.line(line, fill=128)
y = image.height - 1
line = ((x_start, y), (x_end, y))
draw.line(line, fill=128)
x_start = image.width / 3
y_start = image.height / 3
x_end = x_start + image.width / 3
y_end = y_start + image.height / 3
area = ((x_start, y_start), (x_end, y_end))
draw.rectangle(area, fill=0)
font_size = int(image.height / 3 - 8)
font = ImageFont.truetype(os.path.join(
settings.STATIC_ROOT, "target/fonts/VeraMono-Bold.ttf"), font_size)
x_start = 15
y_start = -4
x = x_start
y = y_start
w, h = draw.textsize(letters[1])
xy = (x + w, y,)
draw.text(xy, letters[1], font=font)
x = x + image.width / 3
xy = (x + w, y)
draw.text(xy, letters[2], font=font)
x = x + image.width / 3
xy = (x + w, y)
draw.text(xy, letters[3], font=font)
x = x_start
y = y + image.height / 3
xy = (x + w, y)
draw.text(xy, letters[4], font=font)
x = x + image.width / 3
xy = (x + w, y)
draw.text(xy, letters[0], fill=255, font=font)
x = x + image.width / 3
xy = (x + w, y,)
draw.text(xy, letters[5], font=font)
x = x_start
y = y + image.height / 3
xy = (x + w, y)
draw.text(xy, letters[6], font=font)
x = x + image.width / 3
xy = (x + w, y)
draw.text(xy, letters[7], font=font)
x = x + image.width / 3
xy = (x + w, y,)
draw.text(xy, letters[8], font=font)
image.save(os.path.join(settings.MEDIA_ROOT,
'targets/target_' + str(pk) + '.png'))
os.chmod(os.path.join(settings.MEDIA_ROOT,
'targets/target_' + str(pk) + '.png'), 0o0644)
|
432906 | from invoke import run, task
@task
def clean():
run('cd docs && make clean')
@task
def docs():
run('cd docs && make html')
@task
def package():
run('python setup.py sdist')
run('python setup.py bdist_wheel')
@task
def package_upload():
run('python setup.py sdist upload')
run('python setup.py bdist_wheel upload')
@task
def package_test():
run('python setup.py sdist -r test')
run('python setup.py bdist_wheel -r test')
def package_test_upload():
run('python setup.py sdist upload -r test')
run('python setup.py bdist_wheel upload -r test')
@task
def test():
run('tox')
|
432922 | import copy
from pybbn.graph.jointree import JoinTreeListener
from pybbn.pptc.initializer import Initializer
from pybbn.pptc.moralizer import Moralizer
from pybbn.pptc.potentialinitializer import PotentialInitializer
from pybbn.pptc.propagator import Propagator
from pybbn.pptc.transformer import Transformer
from pybbn.pptc.triangulator import Triangulator
class InferenceController(JoinTreeListener):
"""
Inference controller.
"""
@staticmethod
def apply(bbn):
"""
Sets up the specified BBN for probability propagation in tree clusters (PPTC).
:param bbn: BBN graph.
:return: Join tree.
"""
PotentialInitializer.init(bbn)
ug = Moralizer.moralize(bbn)
cliques = Triangulator.triangulate(ug)
join_tree = Transformer.transform(cliques)
join_tree.parent_info = {node.id: bbn.get_parents_ordered(node.id) for node in bbn.get_nodes()}
Initializer.initialize(join_tree)
Propagator.propagate(join_tree)
join_tree.set_listener(InferenceController())
return join_tree
@staticmethod
def reapply(join_tree, cpts):
"""
Reapply propagation to join tree with new CPTs. The join tree structure is kept but the BBN node CPTs
are updated. A new instance/copy of the join tree will be returned.
:param join_tree: Join tree.
:param cpts: Dictionary of new CPTs. Keys are id's of nodes and values are new CPTs.
:return: Join tree.
"""
jt = copy.deepcopy(join_tree)
jt.update_bbn_cpts(cpts)
jt.listener = None
jt.evidences = dict()
PotentialInitializer.reinit(jt)
Initializer.initialize(jt)
Propagator.propagate(jt)
jt.set_listener(InferenceController())
return jt
@staticmethod
def apply_from_serde(join_tree):
"""
Applies propagation to join tree from a deserialzed join tree.
:param join_tree: Join tree.
:return: Join tree (the same one passed in).
"""
join_tree.listener = None
join_tree.evidences = dict()
PotentialInitializer.reinit(join_tree)
Initializer.initialize(join_tree)
Propagator.propagate(join_tree)
join_tree.set_listener(InferenceController())
return join_tree
def evidence_retracted(self, join_tree):
"""
Evidence is retracted.
:param join_tree: Join tree.
"""
Initializer.initialize(join_tree)
Propagator.propagate(join_tree)
def evidence_updated(self, join_tree):
"""
Evidence is updated.
:param join_tree: Join tree.
"""
Propagator.propagate(join_tree)
|
432999 | import asyncio
import asynctnt
def get_push_iterator(connection):
fut = connection.call("infinite_push_loop", push_subscribe=True)
return fut, asynctnt.PushIterator(fut)
async def main():
async with asynctnt.Connection(port=3301) as conn:
fut, it = get_push_iterator(conn)
transport_id = id(conn._transport)
while True:
current_transport_id = id(conn._transport)
if current_transport_id != transport_id:
transport_id = current_transport_id
fut, it = get_push_iterator(conn)
try:
result = await it.__anext__()
# result = await asyncio.wait_for(it.__anext__(), timeout=10)
print(result)
except asyncio.TimeoutError:
print('timeout')
pass
except Exception as e:
# res = await fut
# print(res)
print(e)
return
if __name__ == "__main__":
asyncio.run(main())
|
433045 | from pyspark import since, keyword_only, SparkContext
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasInputCol, HasOutputCol, HasHandleInvalid
from pyspark.ml.util import JavaMLReadable, JavaMLWritable
from pyspark.ml.wrapper import _jvm
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper
from dirty_cat_spark.utils.java_reader import CustomJavaMLReader
class SimilarityEncoder(JavaEstimator, HasInputCol, HasOutputCol,
HasHandleInvalid, JavaMLReadable, JavaMLWritable):
"""
>>> encoder = SimilarityEncoder(inputCol="names", outputCol="encoderNames")
"""
vocabSize = Param(Params._dummy(), "vocabSize", "",
typeConverter=TypeConverters.toInt)
nGramSize = Param(Params._dummy(), "nGramSize", "",
typeConverter=TypeConverters.toInt)
similarityType = Param(Params._dummy(), "similarityType", "",
typeConverter=TypeConverters.toString)
handleInvalid = Param(Params._dummy(), "handleInvalid", "",
typeConverter=TypeConverters.toString)
stringOrderType = Param(Params._dummy(), "stringOrderType", "",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None,
nGramSize=3, similarityType="nGram",
handleInvalid="keep",
stringOrderType="frequencyDesc",
vocabSize=100):
"""
__init__(self, inputCol=None, outputCol=None,
nGramSize=3, similarityType="nGram",
handleInvalid="keep", stringOrderType="frequencyDesc",
vocabSize=100)
"""
super(SimilarityEncoder, self).__init__()
self._java_obj = self._new_java_obj(
"com.rakuten.dirty_cat.feature.SimilarityEncoder", self.uid)
self._setDefault(nGramSize=3,
# vocabSize=100,
stringOrderType="frequencyDesc",
handleInvalid="keep",
similarityType="nGram")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, inputCol=None, outputCol=None,
nGramSize=3, similarityType="nGram",
handleInvalid="keep",
stringOrderType="frequencyDesc",
vocabSize=100):
"""
setParams(self, inputCol=None, outputCol=None, nGramSize=3,
similarityType="nGram", handleInvalid="keep",
stringOrderType="frequencyDesc", vocabSize=100)
Set the params for the SimilarityEncoder
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def setStringOrderType(self, value):
return self._set(stringOrderType=value)
def setSimilarityType(self, value):
return self._set(similarityType=value)
def setNGramSize(self, value):
return self._set(nGramSize=value)
def setVocabSize(self, value):
return self._set(vocabSize=value)
def getStringOrderType(self):
return self.getOrDefault(self.stringOrderType)
def getSimilarityType(self):
return self.getOrDefault(self.similarityType)
def getNGramSize(self):
return self.getOrDefault(self.nGramSize)
def getVocabSize(self):
return self.getOrDefault(self.vocabSize)
def _create_model(self, java_model):
return SimilarityEncoderModel(java_model)
class SimilarityEncoderModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""Model fitted by :py:class:`SimilarityEncoder`. """
@property
def vocabularyReference(self):
"""
"""
return self._call_java("vocabularyReference")
# # @classmethod
# # def from_vocabularyReference(cls, vocabularyReference, inputCol,
# # outputCol=None, nGramSize=None,
# # similarityType=None, handleInvalid=None,
# # stringOrderType=None, vocabSize=None):
# # """
# # Construct the model directly from an array of label strings,
# # requires an active SparkContext.
# # """
# # sc = SparkContext._active_spark_context
# # java_class = sc._gateway.jvm.java.lang.String
# # jVocabularyReference = SimilarityEncoderModel._new_java_array(
# # vocabularyReference, java_class)
# # model = SimilarityEncoderModel._create_from_java_class(
# # 'dirty_cat.feature.SimilarityEncoderModel', jVocabularyReference)
# # model.setInputCol(inputCol)
# # if outputCol is not None:
# # model.setOutputCol(outputCol)
# # if nGramSize is not None:
# # model.setNGramSize(nGramSize)
# # if similarityType is not None:
# # model.setSimilarityType(similarityType)
# # if handleInvalid is not None:
# # model.setHandleInvalid(handleInvalid)
# # if stringOrderType is not None:
# # model.setStringOrderType(stringOrderType)
# # if vocabSize is not None:
# # model.setVocabSize(vocabSize)
# # return model
# # @staticmethod
# # def _from_java(java_stage):
# # """
# # Given a Java object, create and return a Python wrapper of it.
# # Used for ML persistence.
# # Meta-algorithms such as Pipeline should override this method as a classmethod.
# # """
# # # Generate a default new instance from the stage_name class.
# # py_type =SimilarityEncoderModel
# # if issubclass(py_type, JavaParams):
# # # Load information from java_stage to the instance.
# # py_stage = py_type()
# # py_stage._java_obj = java_stage
# # py_stage._resetUid(java_stage.uid())
# # py_stage._transfer_params_from_java()
# # return py_stage
# # @classmethod
# # def read(cls):
# # """Returns an MLReader instance for this class."""
# # return CustomJavaMLReader(
# # cls, 'dirty_cat.feature.SimilarityEncoderModel')
|
433087 | import logging
from py2neo import Graph
class GraphDao:
def __init__(self):
self.g = Graph(
host="127.0.0.1", # neo4j 搭载服务器的ip地址,ifconfig可获取到
http_port=7474, # neo4j 服务器监听的端口号
user="neo4j", # 数据库user name,如果没有更改过,应该是<PASSWORD>4j
password="<PASSWORD>")
# self.num_limit = 20
def execute_sql(self, sql):
answer = None
try:
answer = self.g.run(sql).data()
except:
logging.error("execute sql failed, sql: {0}".format(sql))
return answer
def get_all__entities(self):
sql = 'MATCH (n) return n'
result = self.execute_sql(sql)
return [i['n'] for i in result]
##故障修复知识图谱
# def get_all_log_entities(self):
# sql = 'MATCH (n:log) return n'
# result = self.execute_sql(sql)
# return [i['n'] for i in result]
#获取图谱log节点
def get_all_log_entities(self):
result = self.g.run("match (n:log) return n").data()
return result
#根据log获取故障节点列表
def get_fault_entity_by_log(self, log_name):
sql = 'MATCH (x:fault)-[r:has_log]->(y:log) where y.name = "{0}" return x'.format(
log_name)
result = self.execute_sql(sql)
return [i['x'] for i in result]
#根据falut获取解决方案列表
def get_solutions_by_fault(self, fault_name):
sql = 'MATCH (x:fault)-[r:has_solution]->(y:solution) where x.name = "{0}" return y'.format(
fault_name)
result = self.execute_sql(sql)
return [i['y'] for i in result]
#根据falut获取原因列表
def get_reasons_by_fault(self, fault_name):
sql = 'MATCH (x:fault)-[r:has_reason]->(y:reason) where x.name = "{0}" return y'.format(
fault_name)
result = self.execute_sql(sql)
return [i['y'] for i in result]
#根据原因获取解决方案列表
def get_solutions_by_reason(self, reason_name):
sql = 'MATCH (x:reason)-[r:has_solution]->(y:solution) where x.name = "{0}" return y'.format(
reason_name)
result = self.execute_sql(sql)
return [i['y'] for i in result] |
433094 | import os
import uuid
import pytest
import requests.exceptions
from django_webdav_storage import storage
def test_listdir_raises_not_implemented(settings):
del settings.WEBDAV_LISTING_BACKEND
webdav_storage = storage.WebDavStorage()
with pytest.raises(NotImplementedError):
webdav_storage.listdir('testdir')
def test_listdir_not_found(webdav_storage):
with pytest.raises(requests.exceptions.HTTPError):
webdav_storage.listdir('_this_dir_does_not_exist/')
@pytest.mark.parametrize(
argnames='var_name, expected_items',
argvalues=[
('files', {b'file.img', b'hello.pdf'}),
('dirs', {b'hello'}),
],
ids=['files', 'directories']
)
def test_listdir_works(webdav_storage, create_file, settings,
var_name, expected_items):
prefix = '{0}/listdir'.format(uuid.uuid4())
root = 'test-list/'
for f in ['file.img', 'hello.pdf', 'hello/image.png', 'hello/text.txt']:
create_file(root + f, prefix=prefix)
bla = webdav_storage.listdir(os.path.join(prefix, root))
dirs, files = bla
assert {_ for _ in locals()[var_name]} == expected_items
|
433123 | import importlib.resources
import pendulum
import plotman.job
import plotman.plotters.bladebit
import plotman._tests.resources
def test_byte_by_byte_full_load() -> None:
read_bytes = importlib.resources.read_binary(
package=plotman._tests.resources,
resource="bladebit.plot.log",
)
parser = plotman.plotters.bladebit.Plotter()
for byte in (bytes([byte]) for byte in read_bytes):
parser.update(chunk=byte)
assert parser.info == plotman.plotters.bladebit.SpecificInfo(
phase=plotman.job.Phase(major=5, minor=1),
started_at=pendulum.datetime(2021, 8, 29, 22, 22, 0, tz=None),
plot_id="1fc7b57baae24da78e3bea44d58ab51f162a3ed4d242bab2fbcc24f6577d88b3",
threads=88,
plot_size=32,
dst_dir="/mnt/tmp/01/manual-transfer/",
phase1_duration_raw=313.98,
phase2_duration_raw=44.60,
phase3_duration_raw=203.26,
phase4_duration_raw=1.11,
total_time_raw=582.91,
filename="plot-k32-2021-08-29-22-22-1fc7b57baae24da78e3bea44d58ab51f162a3ed4d242bab2fbcc24f6577d88b3.plot",
plot_name="plot-k32-2021-08-29-22-22-1fc7b57baae24da78e3bea44d58ab51f162a3ed4d242bab2fbcc24f6577d88b3",
)
def test_log_phases() -> None:
# TODO: CAMPid 0978413087474699698142013249869897439887
read_bytes = importlib.resources.read_binary(
package=plotman._tests.resources,
resource="bladebit.marked",
)
parser = plotman.plotters.bladebit.Plotter()
wrong = []
for marked_line in read_bytes.splitlines(keepends=True):
phase_bytes, _, line_bytes = marked_line.partition(b",")
major, _, minor = phase_bytes.decode("utf-8").partition(":")
phase = plotman.job.Phase(major=int(major), minor=int(minor))
parser.update(chunk=line_bytes)
if parser.info.phase != phase: # pragma: nocov
wrong.append([parser.info.phase, phase, line_bytes.decode("utf-8")])
assert wrong == []
def test_marked_log_matches() -> None:
# TODO: CAMPid 909831931987460871349879878609830987138931700871340870
marked_bytes = importlib.resources.read_binary(
package=plotman._tests.resources,
resource="bladebit.marked",
)
log_bytes = importlib.resources.read_binary(
package=plotman._tests.resources,
resource="bladebit.plot.log",
)
for marked_line, log_line in zip(
marked_bytes.splitlines(keepends=True), log_bytes.splitlines(keepends=True)
):
_, _, marked_just_line = marked_line.partition(b",")
assert marked_just_line == log_line
|
433170 | from transformers.modeling_bert import BertLMPredictionHead, BertPreTrainedModel, BertModel
from BERT.lm_finetune.grad_reverse_layer import GradReverseLayerFunction
from BERT.bert_text_dataset import BertTextDataset
from BERT.bert_pos_tagger import BertTokenClassificationDataset
from torch.nn import CrossEntropyLoss
import torch.nn as nn
import torch
class BertIMAPredictionHead(nn.Module):
def __init__(self, config):
super(BertIMAPredictionHead, self).__init__()
# self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, 2)
# p = float(i + epoch * len_dataloader) / n_epoch / len_dataloader
# self.alpha = 2. / (1. + np.exp(-10 * p)) - 1
self.alpha = 1.
def forward(self, hidden_states):
# hidden_states = self.transform(hidden_states)
reversed_hidden_states = GradReverseLayerFunction.apply(hidden_states, self.alpha)
output = self.decoder(reversed_hidden_states)
return output
class BertIMAPreTrainingHeads(nn.Module):
def __init__(self, config):
super(BertIMAPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config)
self.adj_predictions = BertIMAPredictionHead(config)
def forward(self, sequence_output, pooled_output):
lm_prediction_scores = self.predictions(sequence_output)
adj_prediction_scores = self.adj_predictions(sequence_output)
return lm_prediction_scores, adj_prediction_scores
class BertForIMAPreTraining(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
**masked_adj_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked adjective prediction (classification) loss.
Indices should be in ``[0, 1]``.
``0`` indicates masked word is not adjective,
``1`` indicates masked word is adjective.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
**lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**adj_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the masked adjective predictions (classification) head (scores of True/False before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForPreTraining.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, seq_relationship_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForIMAPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertIMAPreTrainingHeads(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None,
head_mask=None, masked_lm_labels=None, masked_adj_labels=None, pos_tagging_labels=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output, pooled_output = outputs[:2]
lm_prediction_scores, adj_prediction_scores = self.cls(sequence_output, pooled_output)
outputs = (lm_prediction_scores, adj_prediction_scores,) + outputs[2:] # add hidden states and attention if they are here
if masked_lm_labels is not None and masked_adj_labels is not None:
loss_f = CrossEntropyLoss(ignore_index=BertTextDataset.MLM_IGNORE_LABEL_IDX)
masked_lm_loss = loss_f(lm_prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
masked_adj_loss = loss_f(adj_prediction_scores.view(-1, 2), masked_adj_labels.view(-1))
total_loss = masked_lm_loss + masked_adj_loss
loss_f_per_sample = CrossEntropyLoss(ignore_index=BertTextDataset.MLM_IGNORE_LABEL_IDX, reduction='none')
mlm_loss_per_sample = self.calc_loss_per_sample(loss_f_per_sample, lm_prediction_scores, masked_lm_labels, self.config.vocab_size)
ima_loss_per_sample = self.calc_loss_per_sample(loss_f_per_sample, adj_prediction_scores, masked_adj_labels, 2)
outputs = (mlm_loss_per_sample, ima_loss_per_sample,) + outputs
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
@staticmethod
def calc_loss_per_sample(loss_f, scores, masked_labels, label_size, ignore_index=BertTextDataset.MLM_IGNORE_LABEL_IDX):
return torch.stack([loss_f(scores.view(-1, label_size), masked_labels.view(-1))
.view_as(masked_labels)[i, :].masked_select(masked_labels[i, :] > ignore_index).mean()
for i in range(masked_labels.size(0))])
class BertTokenClassificationHead(nn.Module):
def __init__(self, config):
super(BertTokenClassificationHead, self).__init__()
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, hidden_states):
output = self.classifier(hidden_states)
return output
class BertIMAwControlPreTrainingHeads(nn.Module):
def __init__(self, config):
super(BertIMAwControlPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config)
self.adj_predictions = BertIMAPredictionHead(config)
self.pos_tagging = BertTokenClassificationHead(config)
def forward(self, sequence_output, pooled_output):
lm_prediction_scores = self.predictions(sequence_output)
adj_prediction_scores = self.adj_predictions(sequence_output)
pos_tagging_scores = self.pos_tagging(sequence_output)
return lm_prediction_scores, adj_prediction_scores, pos_tagging_scores
class BertForIMAwControlPreTraining(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
**masked_adj_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked adjective prediction (classification) loss.
Indices should be in ``[0, 1]``.
``0`` indicates masked word is not adjective,
``1`` indicates masked word is adjective.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
**lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**adj_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the masked adjective predictions (classification) head (scores of True/False before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForPreTraining.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, seq_relationship_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForIMAwControlPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertIMAwControlPreTrainingHeads(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None,
head_mask=None, masked_lm_labels=None, masked_adj_labels=None, pos_tagging_labels=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output, pooled_output = outputs[:2]
lm_prediction_scores, adj_prediction_scores, pos_tagging_scores = self.cls(sequence_output, pooled_output)
outputs = (lm_prediction_scores, adj_prediction_scores, pos_tagging_scores,) + outputs[2:] # add hidden states and attention if they are here
total_loss = 0.0
if pos_tagging_labels is not None:
loss_f = CrossEntropyLoss(ignore_index=BertTokenClassificationDataset.POS_IGNORE_LABEL_IDX)
loss_f_per_sample = CrossEntropyLoss(ignore_index=BertTokenClassificationDataset.POS_IGNORE_LABEL_IDX, reduction='none')
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = pos_tagging_scores.view(-1, self.config.num_labels)
active_labels = torch.where(
active_loss, pos_tagging_labels.view(-1), torch.tensor(loss_f.ignore_index).type_as(pos_tagging_labels)
)
pos_tagging_loss = loss_f(active_logits, active_labels)
# pos_tagging_loss_per_sample = BertForIMAPreTraining.calc_loss_per_sample(loss_f_per_sample,
# active_logits,
# active_labels,
# self.config.num_labels)
else:
pos_tagging_loss = loss_f(pos_tagging_scores.view(-1, self.config.num_labels), pos_tagging_labels.view(-1))
pos_tagging_loss_per_sample = BertForIMAPreTraining.calc_loss_per_sample(loss_f_per_sample,
pos_tagging_scores,
pos_tagging_labels,
self.config.num_labels,
BertTokenClassificationDataset.POS_IGNORE_LABEL_IDX)
total_loss += pos_tagging_loss
outputs = (pos_tagging_loss_per_sample,) + outputs
if masked_lm_labels is not None and masked_adj_labels is not None:
loss_f = CrossEntropyLoss(ignore_index=BertTextDataset.MLM_IGNORE_LABEL_IDX)
masked_lm_loss = loss_f(lm_prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
masked_adj_loss = loss_f(adj_prediction_scores.view(-1, 2), masked_adj_labels.view(-1))
total_loss += masked_lm_loss + masked_adj_loss
loss_f_per_sample = CrossEntropyLoss(ignore_index=BertTextDataset.MLM_IGNORE_LABEL_IDX, reduction='none')
mlm_loss_per_sample = BertForIMAPreTraining.calc_loss_per_sample(loss_f_per_sample,
lm_prediction_scores,
masked_lm_labels,
self.config.vocab_size)
ima_loss_per_sample = BertForIMAPreTraining.calc_loss_per_sample(loss_f_per_sample,
adj_prediction_scores,
masked_adj_labels,
2)
outputs = (mlm_loss_per_sample, ima_loss_per_sample,) + outputs
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
|
433172 | from typing import Callable, List, Optional, Tuple, Union
import numpy as np
from scipy.interpolate import CubicSpline
from .base import _Augmenter, _default_seed
class Drift(_Augmenter):
"""
Drift the value of time series.
The augmenter drifts the value of time series from its original values
randomly and smoothly. The extent of drifting is controlled by the maximal
drift and the number of drift points.
Parameters
----------
max_drift : float or tuple, optional
The maximal amount of drift added to a time series.
- If float, all series (all channels if `per_channel` is True) are
drifted with the same maximum.
- If tuple, the maximal drift added to a time series (a channel if
`per_channel` is True) is sampled from this interval randomly.
Default: 0.5.
n_drift_points : int or list, optional
The number of time points a new drifting trend is defined in a series.
- If int, all series (all channels if `per_channel` is True) have the
same number of drift points.
- If list, the number of drift points defined in a series (a channel if
`per_channel` is True) is sampled from this list randomly.
kind : str, optional
How the noise is added to the original time series. It must be either
'additive' or 'multiplicative'. Default: 'additive'.
per_channel : bool, optional
Whether to sample independent drifting trends for each channel in a time
series or to use the same drifting trends for all channels in a time
series. Default: True.
normalize : bool, optional
Whether the drifting trend is added to the normalized time series. If
True, each channel of a time series is normalized to [0, 1] first.
Default: True.
repeats : int, optional
The number of times a series is augmented. If greater than one, a series
will be augmented so many times independently. This parameter can also
be set by operator `*`. Default: 1.
prob : float, optional
The probability of a series is augmented. It must be in (0.0, 1.0]. This
parameter can also be set by operator `@`. Default: 1.0.
seed : int, optional
The random seed. Default: None.
"""
def __init__(
self,
max_drift: Union[float, Tuple[float, float]] = 0.5,
n_drift_points: Union[int, List[int]] = 3,
kind: str = "additive",
per_channel: bool = True,
normalize: bool = True,
repeats: int = 1,
prob: float = 1.0,
seed: Optional[int] = _default_seed,
):
self.max_drift = max_drift
self.n_drift_points = n_drift_points
self.kind = kind
self.per_channel = per_channel
self.normalize = normalize
super().__init__(repeats=repeats, prob=prob, seed=seed)
@classmethod
def _get_param_name(cls) -> Tuple[str, ...]:
return (
"max_drift",
"n_drift_points",
"kind",
"per_channel",
"normalize",
)
@property
def max_drift(self) -> Union[float, Tuple[float, float]]:
return self._max_drift
@max_drift.setter
def max_drift(self, v: Union[float, Tuple[float, float]]) -> None:
MAX_DRIFT_ERROR_MSG = (
"Parameter `max_drift` must be a non-negative number "
"or a 2-tuple of non-negative numbers representing an interval. "
)
if not isinstance(v, (float, int)):
if isinstance(v, tuple):
if len(v) != 2:
raise ValueError(MAX_DRIFT_ERROR_MSG)
if (not isinstance(v[0], (float, int))) or (
not isinstance(v[1], (float, int))
):
raise TypeError(MAX_DRIFT_ERROR_MSG)
if v[0] > v[1]:
raise ValueError(MAX_DRIFT_ERROR_MSG)
if (v[0] < 0.0) or (v[1] < 0.0):
raise ValueError(MAX_DRIFT_ERROR_MSG)
else:
raise TypeError(MAX_DRIFT_ERROR_MSG)
elif v < 0.0:
raise ValueError(MAX_DRIFT_ERROR_MSG)
self._max_drift = v
@property
def n_drift_points(self) -> Union[int, List[int]]:
return self._n_drift_points
@n_drift_points.setter
def n_drift_points(self, n: Union[int, List[int]]) -> None:
N_DRIFT_POINTS_ERROR_MSG = (
"Parameter `n_drift_points` must be a positive integer "
"or a list of positive integers."
)
if not isinstance(n, int):
if isinstance(n, list):
if len(n) == 0:
raise ValueError(N_DRIFT_POINTS_ERROR_MSG)
if not all([isinstance(nn, int) for nn in n]):
raise TypeError(N_DRIFT_POINTS_ERROR_MSG)
if not all([nn > 0 for nn in n]):
raise ValueError(N_DRIFT_POINTS_ERROR_MSG)
else:
raise TypeError(N_DRIFT_POINTS_ERROR_MSG)
elif n <= 0:
raise ValueError(N_DRIFT_POINTS_ERROR_MSG)
self._n_drift_points = n
@property
def per_channel(self) -> bool:
return self._per_channel
@per_channel.setter
def per_channel(self, p: bool) -> None:
if not isinstance(p, bool):
raise TypeError("Paremeter `per_channel` must be boolean.")
self._per_channel = p
@property
def normalize(self) -> bool:
return self._normalize
@normalize.setter
def normalize(self, p: bool) -> None:
if not isinstance(p, bool):
raise TypeError("Paremeter `normalize` must be boolean.")
self._normalize = p
@property
def kind(self) -> str:
return self._kind
@kind.setter
def kind(self, k: str) -> None:
if not isinstance(k, str):
raise TypeError(
"Parameter `kind` must be either 'additive' or 'multiplicative'."
)
if k not in ("additive", "multiplicative"):
raise ValueError(
"Parameter `kind` must be either 'additive' or 'multiplicative'."
)
self._kind = k
def _augment_core(
self, X: np.ndarray, Y: Optional[np.ndarray]
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
N, T, C = X.shape
rand = np.random.RandomState(self.seed)
if isinstance(self.n_drift_points, int):
n_drift_points = set([self.n_drift_points])
else:
n_drift_points = set(self.n_drift_points)
ind = rand.choice(
len(n_drift_points), N * (C if self.per_channel else 1)
) # map series to n_drift_points
drift = np.zeros((N * (C if self.per_channel else 1), T))
for i, n in enumerate(n_drift_points):
if not (ind == i).any():
continue
anchors = np.cumsum(
rand.normal(size=((ind == i).sum(), n + 2)), axis=1
) # type: np.ndarray
interpFuncs = CubicSpline(
np.linspace(0, T, n + 2), anchors, axis=1
) # type: Callable
drift[ind == i, :] = interpFuncs(np.arange(T))
drift = drift.reshape((N, -1, T)).swapaxes(1, 2)
drift = drift - drift[:, 0, :].reshape(N, 1, -1)
drift = drift / abs(drift).max(axis=1, keepdims=True)
if isinstance(self.max_drift, (float, int)):
drift = drift * self.max_drift
else:
drift = drift * rand.uniform(
low=self.max_drift[0],
high=self.max_drift[1],
size=(N, 1, C if self.per_channel else 1),
)
if self.kind == "additive":
if self.normalize:
X_aug = X + drift * (
X.max(axis=1, keepdims=True) - X.min(axis=1, keepdims=True)
)
else:
X_aug = X + drift
else:
X_aug = X * (1 + drift)
if Y is not None:
Y_aug = Y.copy()
else:
Y_aug = None
return X_aug, Y_aug
|
433179 | import time
import threading
import os
import playsound
import random
import json
ftime=0
ptime=0
start = -1
with open('./config.json') as f:
config = json.load(f)
TIME = config['time']*60
a_files=os.listdir("./Audio_Files/")
def input_func():
global ftime,ptime,start
startevent.set()
print(f"Welcome to ReminderBot! 0 to Pause and -1 to exit! Ill Remind you every {TIME/60} minutes")
while(True):
s = int(input())
if s == 1:
if not pauseevent.is_set():
continue
print("Reminder Resumed! Select 0 to pause and -1 to exit")
ftime=ftime+time.time()-ptime
pauseevent.clear() # Resume Remindabot
elif s==0:
if pauseevent.is_set():
continue
ptime = time.time()
print(f"Reminder Paused! {(ptime-start)/60} mins elapsed (Select 1 to Resume and -1 to exit)")
pauseevent.set()
elif s==-1:
killevent.set()
break
print("Program Exited")
def rem_func():
global ftime,start
while True:
if killevent.is_set(): # Check if thread should be killed
break
if pauseevent.is_set(): # Check for Pause Condition
continue
if startevent.is_set(): # Check for initial Start condition of timer.
start = time.time()
startevent.clear()
if not startevent.is_set(): # If startevent is false, Check for time match.
if start!=-1:
cur = time.time()
if cur-start-ftime>=TIME:
print(f"{TIME/60} mins has elapsed")
playsound.playsound(f"./Audio_Files/{random.choice(a_files)}",True)
startevent.set()
ftime=0
pauseevent = threading.Event()
killevent = threading.Event()
startevent = threading.Event()
t1 = threading.Thread(target=input_func)
t2 = threading.Thread(target=rem_func)
t1.start()
t2.start() |
433219 | from django.core.mail import EmailMultiAlternatives
from django.conf import settings
from django.http import HttpResponse
from django.template import Context
from django.template.loader import render_to_string, get_template
import json
def sendEmailToken(request, token):
variables = Context({
'request': request,
'token': token,
})
html = get_template('mail/token_html.html').render(variables)
text = get_template('mail/token_text.html').render(variables)
msg = EmailMultiAlternatives(
settings.EMAIL_LOGIN_TITLE,
text,
settings.EMAIL_SENDER,
[token.email])
msg.attach_alternative(html, "text/html")
msg.send(fail_silently=False)
def render_json(data_dict):
return HttpResponse(json.dumps(data_dict),
'application/javascript')
def render_template_json(template, context):
return HttpResponse(render_to_string(template, context),
'application/javascript')
|
433220 | import sys
import collections
import random
import string
from io import StringIO
from itertools import tee
def import_class(cls):
module = '.'.join(cls.split('.')[:-1])
cls = cls.split('.')[-1]
module = __import__(module, fromlist=[module])
return getattr(module, cls)
def random_ascii(length):
return ''.join([random.SystemRandom().choice(string.hexdigits) for i in range(0, length)]).lower()
def format_exception(exception):
name = type(exception).__name__
return ': '.join((name, str(exception)))
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def __hash__(self):
return hash(id(self))
class CaptureStdout(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
sys.stdout = self._stdout
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
|
433224 | class Solution:
def letterCombinations(self, digits: str) -> List[str]:
d = {
2:"abc",
3:"def",
4:"ghi",
5:"jkl",
6:"mno",
7:"pqrs",
8:"tuv",
9:"wxyz"
}
if digits == "":
return []
ans = []
current = []
n = len(digits)
i = 0
def f(i):
if i == n:
ans.append("".join(current))
return
for c in d[int(digits[i])]:
current.append(c)
f(i+1)
current.pop()
f(0)
return ans
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.