file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
script.js | 50,
rotating:50,
}
window.timeouts = {
}
window.maps = [
map1,
map2,
map3,
map4,
map5
]
window.state = {
paused:true,
gameStart:true,
crashed:false,
completed:false,
mapIndex:0,
}
///GLOBAL METHODS-----------------------------------------
window.killScreen = function(){
let classList = document.getElementById('screen').classList
classList.add('fadeoutslide')
classList.remove('fadeinslide')
state.paused = false
state.gameStart = false
setTimeout(()=>{
document.getElementById('screen').remove()
},600)
}
window.rotationRatio = function(){
let ratio = (rotationAngle%360)/360
if(ratio < 0 ) ratio=Math.abs(ratio + 1)
return ratio
}
window.PausedTimeout = PausedTimeout
window.pauseTimeouts = function(){
for(let timeout in timeouts){
timeouts[timeout].pauseTimeout()
}
}
window.resumeTimeouts = function(){
for(let timeout in timeouts){
timeouts[timeout].resumeTimeout()
}
}
window.destroyTimeouts = function(){
for(let timeout in timeouts){
timeouts[timeout].stopTimeout()
delete timeouts[timeout]
}
}
window.rotationPercentage = function(){
let ratio
(rotationAngle%360)/360 < 0 ? ratio = Math.abs((rotationAngle%360)/360 + 1) : ratio = (rotationAngle%360)/360
if(ratio >= 0.5) ratio = (1 - ratio)
ratio*=4
if(ratio >1) ratio = 1 - (ratio - 1)
return ratio
}
window.handleCrash = function(){
clearIntervals()
screens({
title:'You crashed!',
content:"Play again?",
button:"Continue",
})
state.paused=true
state.crashed=true
return null
}
//keys for multiple key listeners
let activeKeys={}
let crashed = false
//FUNCTIONS-----------------------------------------
function clearIntervals(){
clearInterval(myIntervals.moving)
myIntervals.moving = null
clearInterval(myIntervals.rotating)
myIntervals.rotating = null
deleteKeys()
return null
}
function deleteKeys(){
for(let key in activeKeys){
delete activeKeys[key]
}
}
function handleVictory(){
clearIntervals()
screens({
title:'Goal reached!',
content:"",
button:"Next Level",
})
state.paused=true
state.completed=true
}
var crashListener = setInterval(()=>{
if( checkCrash() ){
handleCrash()
clearTimeout(crashListener)
}
},50)
function checkVictory(){
const point = document.getElementById('victory-point')
//the values should adjust for what percentage to the side the car has rotated
let ratio = rotationPercentage()
//If the car is fully rotated to the side, tbe difference will be 25 pixels less to top, so 25px should be added.
if(
//from bottom to to p
(yPosition + (25*ratio) ) < (point.offsetTop + point.offsetHeight) &&
(yPosition + (25*ratio)) > point.offsetTop &&
(xPosition + 40) > point.offsetLeft &&
xPosition < (point.offsetLeft + point.offsetWidth)
){
return true
}else{
return false
}
}
function checkCrash(){
if(state.paused) return
let ratio = rotationPercentage()
function checkBoundaries(){
if(
(yPosition + (25 * ratio) ) < 0 | //TOP
((yPosition + mycar.offsetHeight) - (25 * ratio) ) > gameArea.offsetHeight | //BOTTOM
(xPosition - (25 * ratio) ) < 0 | //LEFT
((xPosition + mycar.offsetWidth) + (25 * ratio) ) > gameArea.offsetWidth //RIGHT
){
return true
}
}
const calcPoints = {
center(){
return ((mycar.offsetTop + mycar.offsetHeight)/2 + (mycar.offsetLeft + mycar.offsetWidth)/2)
},
topleft(){
return{
x:1,
y:1
}
}
}
function checkForeignObjects(){
let crashed = false
document.querySelectorAll('[crashable="true"]').forEach(crashable=>{
let foreignRatio, foreignRotation;
if(crashable.style.transform){
//this only works because rotateZ is the only transform applied
foreignRotation = parseInt(crashable.style.transform.match(/[0-9]+/));
//this tests if the foreign object is rotated
(foreignRotation%360)/360 < 0 ? foreignRatio = Math.abs((foreignRotation%360)/360 + 1) : foreignRatio = (foreignRotation%360)/360
if(foreignRatio >= 0.5) foreignRatio = (1 - foreignRatio)
foreignRatio*=4
if(foreignRatio >1) foreignRatio = 1 - (foreignRatio - 1)
}else{
foreignRatio = 0
}
//defines boundaries, adjusts for rotation
let top =(crashable.offsetTop + crashable.offsetHeight)
let bottom = crashable.offsetTop
let left = (crashable.offsetLeft+crashable.offsetWidth)
let right = crashable.offsetLeft
let difference = (crashable.offsetHeight - crashable.offsetWidth) /2
//tests the values
if(
(yPosition + (25 * ratio) ) < top - (difference * foreignRatio) && //INTO BOTTOM
((yPosition + mycar.offsetHeight) - (25 * ratio) ) > bottom + (difference * foreignRatio) && //INTO TOP
(xPosition - (25 * ratio) ) < left + (difference * foreignRatio) && //INTO LEFT
((xPosition + mycar.offsetWidth) + (25 * ratio) ) > right - (difference * foreignRatio) //INTO RIGHT
){
crashed = true
}
})
return crashed
}
if( checkBoundaries() | checkForeignObjects() ) return true
}
function move(isForward) |
if( checkVictory() ) return handleVictory()
mycar.style.top=`${yPosition}px`
mycar.style.left=`${xPosition}px`
},myIntervalValues.moving)
}
//EVENT LISTENERS ---------------------------------------------------
window.initListeners = function(){
document.addEventListener('keypress',e=>{
//WHEN YOU PRESS THE SPACEBAR
if(e.keyCode==32){
//PAUSES GAME
if(!state.paused){
screens({
title:'Paused',
content:'Press space to continue.',
})
state.paused = true
clearIntervals()
pauseTimeouts()
}else{
killScreen()
resumeTimeouts()
//ADDITIONAL OPTIONS IF SPACEBAR IS PRESSED
if(state.crashed){
destroyTimeouts()
state.crashed=false
maps[state.mapIndex].reset()
return crashListener = setInterval(()=>{
if( checkCrash() ){
handleCrash()
clearTimeout(crashListener)
}
},50)
}
if(state.completed){
state.completed = false
maps[state.mapIndex].destroy()
state.mapIndex++
return maps[state.mapIndex].init()
}
}
}
})
//WHEN YOU PRESS ANY OTHER KEY
document.addEventListener('keydown',function handleKeyDown(e){
1
//38: top arrow....39 right arrow..... 40 bottom arrow... 37 left arrow
//16: shift, 32: spacebar
activeKeys[e.keyCode]=e.keyCode
// console.log(e.keyCode)
for(let key in activeKeys){
//toggle headlights
if(key==16){
document.querySelectorAll('#my-car .headlight').forEach(element=>{
if(!element.classList.contains('highbeams-in')){
element.classList.add('highbeams-in')
element.classList.remove('highbeams-out')
}
else{
element.classList.remove('highbeams-in')
element.classList.add('highbeams-out')
}
})
}
//move forward
if(key==38&&!myIntervals.moving){
if(state.paused) return
move(true)
}
| {
myIntervals.moving = setInterval(()=>{
if(state.paused) return
let ratio = (rotationAngle%360)/360
if(ratio < 0 ) ratio=Math.abs(ratio + 1)
let ratio2 = (10 * (ratio*4))
if(ratio2 > 20) ratio2 -= 2*(ratio2 - 20)
let ratio3 = (10 * (ratio*4))
if(ratio3 > 10 && ratio3 < 30) ratio3 -= 2*(ratio3 - 10)
else if(ratio3 >= 30) ratio3 -=40
if(isForward){
yPosition -= (10 - ratio2)
xPosition += ratio3
}else{
yPosition += (10 - ratio2)
xPosition -=ratio3
} | identifier_body |
codon_usage.py |
AA_dict = \
collections.defaultdict(
lambda:collections.defaultdict(list))
# dict key: AA -> codon list
AA_map = {
'Phe':['TTT', 'TTC'],
'Leu':['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],
'Ser':['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],
'Tyr':['TAT', 'TAC'],
'STOP':['TAA', 'TAG', 'TGA'],
'Cys':['TGT', 'TGC'],
'Trp':['TGG'],
'Pro':['CCT', 'CCC', 'CCA', 'CCG'],
'His':['CAT', 'CAC'],
'Gln':['CAA', 'CAG'],
'Arg':['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],
'Ile':['ATT', 'ATC', 'ATA'],
'Met':['ATG'],
'Thr':['ACT', 'ACC', 'ACA', 'ACG'],
'Asn':['AAT', 'AAC'],
'Lys':['AAA', 'AAG'],
'Val':['GTT', 'GTC', 'GTA', 'GTG'],
'Ala':['GCT', 'GCC', 'GCA', 'GCG'],
'Asp':['GAT', 'GAC'],
'Glu':['GAA', 'GAG'],
'Gly':['GGT', 'GGC', 'GGA', 'GGG']
}
# list of codon usage for each gene
usage_dict_list = []
# get codon usage information for each gene
for seq in self.seq:
usage_dict_list.append(self.get_usage_dict(seq))
# get the list of codon usage percentage from each gene
for AA in list(AA_map.keys()):
for codon in AA_map[AA]:
# get codon usage information from each gene
for usage_dict in usage_dict_list:
# append codon usage percentage in the gene
AA_dict[AA][codon].append(
usage_dict[AA][0][codon][1])
return AA_dict
def heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label):
# list of Chi-Square test results
AA_chisquare = []
# AA plotting annotation information
AA_text = []
# list of student's t-test results
codon_ttest = []
# codon plotting annotaion information
codon_text = []
i = 0
j = 0
# number of genes analyzed
count_all = 0
# number of genes that show significant results
count_sig = 0
for AA in list(sp_AA_dict.keys()):
# mean values of codon usage for each AA
sp_codon_mean = []
lp_codon_mean = []
for codon in sp_AA_dict[AA]:
# calculate ttest results
p_val = stats.ttest_ind(sp_AA_dict[AA][codon],
lp_AA_dict[AA][codon],
equal_var = False)[1]
# display eight codons in a row
if not i % 8:
codon_ttest.append([])
codon_text.append([])
i += 1
# handle NULL values
if np.isnan(p_val):
codon_ttest[-1].append(0)
codon_text[-1].append(codon + '\n NA')
# save ttest p-values and annotation information
else:
codon_ttest[-1].append(p_val)
codon_text[-1].append(codon + '\n' + str(round(p_val, 2)))
count_all += 1
if p_val < 0.5:
count_sig += 1
sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))
lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon]))
# get Chi-Square test results of each AA
p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]),
axis = None)[1]
# display six AA in a row
if not j % 6:
AA_chisquare.append([])
AA_text.append([])
j += 1
# handle Null values
if np.isnan(p_val):
AA_chisquare[-1].append(0)
AA_text[-1].append(AA + '\n NA')
# save Chi-Square test p-values and annotation information
else:
AA_chisquare[-1].append(p_val)
AA_text[-1].append(AA + '\n' + str(round(p_val, 2)))
# handle empty cells
for n in range(j % 6, 6):
AA_chisquare[-1].append(0)
AA_text[-1].append('')
# get list of AAs that show significant difference between SP and LP groups
AAs = choose_codons(codon_ttest, codon_text)
AA_chisquare = np.array(AA_chisquare)
codon_ttest = np.array(codon_ttest)
AA_text = np.array(AA_text)
codon_text = np.array(codon_text)
print('%d out of %d codon show significant usage difference \
between SP and LP genes (p_value < 0.5)\n' %
(count_sig, count_all))
plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)
plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)
return AAs
def plot_heatmap(data, text, cbarlabel, label):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 5))
im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)
annotate_heatmap(im, text)
fig.tight_layout()
plt.show
plt.savefig(f'../results/{cbarlabel}_{label}.png')
def heatmap(data, ax, cmap, cbarlabel):
if not ax:
ax = plt.gca()
im = ax.imshow(data, cmap)
cbar = ax.figure.colorbar(im, ax=ax)
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
ax.set_xticklabels(range(data.shape[1]))
ax.set_yticklabels(range(data.shape[0]))
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# draw white space between squares
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor = True)
ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor = True)
ax.grid(which = 'minor', color = 'w', linestyle = '-', linewidth = 3)
ax.tick_params(which = 'minor', bottom = False, left = False)
cbar.ax.set_ylabel(cbarlabel, va = 'top')
return im, cbar
def annotate_heatmap(im, text_label):
textcolors = ['black','white']
data = im.get_array()
# set threshold to decide color
threshold = im.norm(data.max()) / 2
kw = dict(horizontalalignment = 'center',
verticalalignment = 'center')
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color = textcolors[im.norm(data[i,j]) > threshold])
im.axes.text(j, i, text_label[i,j], **kw)
def choose_codons(ttest, text):
# dict key: AA -> codon
# only contains AAs with only two codon choices
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TAT':'Tyr', 'TAC':'Tyr',
'TGT':'Cys', 'TGC':'Cys', 'CAT':'His', 'CAC':'His',
'CAA':'Gln', 'CAG':'Gln', 'AAT':'Asn', 'AAC':'Asn',
'AAA':'Lys', 'AAG':'Lys', 'GAT':'Asp', 'GAC':'Asp',
'GAA':'Glu', 'GAG':'Glu'}
codon_dict = collections.defaultdict(list)
for i in range(len(ttest)):
for j in range(len(ttest[i])):
if ttest[i][j] < 0.01:
codon = text[i][j][:3]
if codon in codon_map:
codon_dict[codon_map[codon]].append(codon)
| file = io.open('AAs_to_compare.txt', 'w')
file.write('Compare following AAs\n')
# AAs that have only two codon choices and show significant | random_line_split | |
codon_usage.py | gene_seq = ''
# read a gene sequence line
else:
gene_seq += line.strip()
file.close()
print('%s:\n%d genes added\n%d are non-triple\n'%
(filename[:2],count_all, count_non_triple))
return (all_seq, count_all - count_non_triple)
def get_AA(self, codon):
# dict key: codon -> AA
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TTA':'Leu', 'TTG':'Leu',
'TCT':'Ser', 'TCC':'Ser', 'TCA':'Ser', 'TCG':'Ser',
'TAT':'Tyr', 'TAC':'Tyr', 'TAA':'STOP', 'TAG':'STOP',
'TGT':'Cys', 'TGC':'Cys', 'TGA':'STOP', 'TGG':'Trp',
'CTT':'Leu', 'CTC':'Leu', 'CTA':'Leu', 'CTG':'Leu',
'CCT':'Pro', 'CCC':'Pro', 'CCA':'Pro', 'CCG':'Pro',
'CAT':'His', 'CAC':'His', 'CAA':'Gln', 'CAG':'Gln',
'CGT':'Arg', 'CGC':'Arg', 'CGA':'Arg', 'CGG':'Arg',
'ATT':'Ile', 'ATC':'Ile', 'ATA':'Ile', 'ATG':'Met',
'ACT':'Thr', 'ACC':'Thr', 'ACA':'Thr', 'ACG':'Thr',
'AAT':'Asn', 'AAC':'Asn', 'AAA':'Lys', 'AAG':'Lys',
'AGT':'Ser', 'AGC':'Ser', 'AGA':'Arg', 'AGG':'Arg',
'GTT':'Val', 'GTC':'Val', 'GTA':'Val', 'GTG':'Val',
'GCT':'Ala', 'GCC':'Ala', 'GCA':'Ala', 'GCG':'Ala',
'GAT':'Asp', 'GAC':'Asp', 'GAA':'Glu', 'GAG':'Glu',
'GGT':'Gly', 'GGC':'Gly', 'GGA':'Gly', 'GGG':'Gly'}
if codon in codon_map:
return codon_map[codon]
else:
return ''
def get_usage_dict(self, seq):
# usage_dict structure:
# dict key: AA -> [
# dict key: codon ->
# [codon_count,
# codon_count/AA_count]
# AA_count
# ]
usage_dict = \
collections.defaultdict(lambda:
[
collections.defaultdict(
lambda: [0, 0]),
0
])
# save AAs usage information
for index in range(0, len(seq), 3):
codon = seq[index:index+3]
AA = self.get_AA(codon)
if AA:
# count how many times the AA appears
usage_dict[AA][1] += 1
# count how many times the codon is used
usage_dict[AA][0][codon][0] += 1
# calculate the codon usage percentage for an AA
for AA in usage_dict:
for codon in usage_dict[AA][0]:
usage_dict[AA][0][codon][1] = \
usage_dict[AA][0][codon][0]/usage_dict[AA][1]
return usage_dict
def get_AA_dict(self):
# AA_dict structure:
# 1st dict key: AA -> 2nd dict key: codon -> a list of codon usage
# percentage of each gene
AA_dict = \
collections.defaultdict(
lambda:collections.defaultdict(list))
# dict key: AA -> codon list
AA_map = {
'Phe':['TTT', 'TTC'],
'Leu':['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],
'Ser':['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],
'Tyr':['TAT', 'TAC'],
'STOP':['TAA', 'TAG', 'TGA'],
'Cys':['TGT', 'TGC'],
'Trp':['TGG'],
'Pro':['CCT', 'CCC', 'CCA', 'CCG'],
'His':['CAT', 'CAC'],
'Gln':['CAA', 'CAG'],
'Arg':['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],
'Ile':['ATT', 'ATC', 'ATA'],
'Met':['ATG'],
'Thr':['ACT', 'ACC', 'ACA', 'ACG'],
'Asn':['AAT', 'AAC'],
'Lys':['AAA', 'AAG'],
'Val':['GTT', 'GTC', 'GTA', 'GTG'],
'Ala':['GCT', 'GCC', 'GCA', 'GCG'],
'Asp':['GAT', 'GAC'],
'Glu':['GAA', 'GAG'],
'Gly':['GGT', 'GGC', 'GGA', 'GGG']
}
# list of codon usage for each gene
usage_dict_list = []
# get codon usage information for each gene
for seq in self.seq:
usage_dict_list.append(self.get_usage_dict(seq))
# get the list of codon usage percentage from each gene
for AA in list(AA_map.keys()):
for codon in AA_map[AA]:
# get codon usage information from each gene
for usage_dict in usage_dict_list:
# append codon usage percentage in the gene
AA_dict[AA][codon].append(
usage_dict[AA][0][codon][1])
return AA_dict
def heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label):
# list of Chi-Square test results
AA_chisquare = []
# AA plotting annotation information
AA_text = []
# list of student's t-test results
codon_ttest = []
# codon plotting annotaion information
codon_text = []
i = 0
j = 0
# number of genes analyzed
count_all = 0
# number of genes that show significant results
count_sig = 0
for AA in list(sp_AA_dict.keys()):
# mean values of codon usage for each AA
sp_codon_mean = []
lp_codon_mean = []
for codon in sp_AA_dict[AA]:
# calculate ttest results
p_val = stats.ttest_ind(sp_AA_dict[AA][codon],
lp_AA_dict[AA][codon],
equal_var = False)[1]
# display eight codons in a row
if not i % 8:
codon_ttest.append([])
codon_text.append([])
i += 1
# handle NULL values
if np.isnan(p_val):
codon_ttest[-1].append(0)
codon_text[-1].append(codon + '\n NA')
# save ttest p-values and annotation information
else:
codon_ttest[-1].append(p_val)
codon_text[-1].append(codon + '\n' + str(round(p_val, 2)))
count_all += 1
if p_val < 0.5:
count_sig += 1
sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))
lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon]))
# get Chi-Square test results of each AA
p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]),
axis = None)[1]
# display six AA in a row
if not j % 6:
AA_chisquare.append([])
AA_text.append([])
j += 1
# handle Null values
if np.isnan(p_val):
AA_chisquare[-1].append(0)
AA_text[-1].append(AA + '\n NA')
# save Chi-Square test | file = io.open(filename)
# list of selected gene sequences, excluded genes that are non-triple
all_seq = []
gene_seq = ''
count_all = 0
count_non_triple = 0
for line in file:
# read a gene information line
if line[0]=='>':
count_all += 1
# if a gene has been read, then append it to all_seq if the
# sequence is triple
if gene_seq!='':
if len(gene_seq)%3:
count_non_triple += 1
else:
all_seq.append(gene_seq)
| identifier_body | |
codon_usage.py | TAG', 'TGA'],
'Cys':['TGT', 'TGC'],
'Trp':['TGG'],
'Pro':['CCT', 'CCC', 'CCA', 'CCG'],
'His':['CAT', 'CAC'],
'Gln':['CAA', 'CAG'],
'Arg':['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],
'Ile':['ATT', 'ATC', 'ATA'],
'Met':['ATG'],
'Thr':['ACT', 'ACC', 'ACA', 'ACG'],
'Asn':['AAT', 'AAC'],
'Lys':['AAA', 'AAG'],
'Val':['GTT', 'GTC', 'GTA', 'GTG'],
'Ala':['GCT', 'GCC', 'GCA', 'GCG'],
'Asp':['GAT', 'GAC'],
'Glu':['GAA', 'GAG'],
'Gly':['GGT', 'GGC', 'GGA', 'GGG']
}
# list of codon usage for each gene
usage_dict_list = []
# get codon usage information for each gene
for seq in self.seq:
usage_dict_list.append(self.get_usage_dict(seq))
# get the list of codon usage percentage from each gene
for AA in list(AA_map.keys()):
for codon in AA_map[AA]:
# get codon usage information from each gene
for usage_dict in usage_dict_list:
# append codon usage percentage in the gene
AA_dict[AA][codon].append(
usage_dict[AA][0][codon][1])
return AA_dict
def heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label):
# list of Chi-Square test results
AA_chisquare = []
# AA plotting annotation information
AA_text = []
# list of student's t-test results
codon_ttest = []
# codon plotting annotaion information
codon_text = []
i = 0
j = 0
# number of genes analyzed
count_all = 0
# number of genes that show significant results
count_sig = 0
for AA in list(sp_AA_dict.keys()):
# mean values of codon usage for each AA
sp_codon_mean = []
lp_codon_mean = []
for codon in sp_AA_dict[AA]:
# calculate ttest results
p_val = stats.ttest_ind(sp_AA_dict[AA][codon],
lp_AA_dict[AA][codon],
equal_var = False)[1]
# display eight codons in a row
if not i % 8:
codon_ttest.append([])
codon_text.append([])
i += 1
# handle NULL values
if np.isnan(p_val):
codon_ttest[-1].append(0)
codon_text[-1].append(codon + '\n NA')
# save ttest p-values and annotation information
else:
codon_ttest[-1].append(p_val)
codon_text[-1].append(codon + '\n' + str(round(p_val, 2)))
count_all += 1
if p_val < 0.5:
count_sig += 1
sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))
lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon]))
# get Chi-Square test results of each AA
p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]),
axis = None)[1]
# display six AA in a row
if not j % 6:
AA_chisquare.append([])
AA_text.append([])
j += 1
# handle Null values
if np.isnan(p_val):
AA_chisquare[-1].append(0)
AA_text[-1].append(AA + '\n NA')
# save Chi-Square test p-values and annotation information
else:
AA_chisquare[-1].append(p_val)
AA_text[-1].append(AA + '\n' + str(round(p_val, 2)))
# handle empty cells
for n in range(j % 6, 6):
AA_chisquare[-1].append(0)
AA_text[-1].append('')
# get list of AAs that show significant difference between SP and LP groups
AAs = choose_codons(codon_ttest, codon_text)
AA_chisquare = np.array(AA_chisquare)
codon_ttest = np.array(codon_ttest)
AA_text = np.array(AA_text)
codon_text = np.array(codon_text)
print('%d out of %d codon show significant usage difference \
between SP and LP genes (p_value < 0.5)\n' %
(count_sig, count_all))
plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)
plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)
return AAs
def plot_heatmap(data, text, cbarlabel, label):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 5))
im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)
annotate_heatmap(im, text)
fig.tight_layout()
plt.show
plt.savefig(f'../results/{cbarlabel}_{label}.png')
def heatmap(data, ax, cmap, cbarlabel):
if not ax:
ax = plt.gca()
im = ax.imshow(data, cmap)
cbar = ax.figure.colorbar(im, ax=ax)
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
ax.set_xticklabels(range(data.shape[1]))
ax.set_yticklabels(range(data.shape[0]))
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# draw white space between squares
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor = True)
ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor = True)
ax.grid(which = 'minor', color = 'w', linestyle = '-', linewidth = 3)
ax.tick_params(which = 'minor', bottom = False, left = False)
cbar.ax.set_ylabel(cbarlabel, va = 'top')
return im, cbar
def annotate_heatmap(im, text_label):
textcolors = ['black','white']
data = im.get_array()
# set threshold to decide color
threshold = im.norm(data.max()) / 2
kw = dict(horizontalalignment = 'center',
verticalalignment = 'center')
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color = textcolors[im.norm(data[i,j]) > threshold])
im.axes.text(j, i, text_label[i,j], **kw)
def choose_codons(ttest, text):
# dict key: AA -> codon
# only contains AAs with only two codon choices
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TAT':'Tyr', 'TAC':'Tyr',
'TGT':'Cys', 'TGC':'Cys', 'CAT':'His', 'CAC':'His',
'CAA':'Gln', 'CAG':'Gln', 'AAT':'Asn', 'AAC':'Asn',
'AAA':'Lys', 'AAG':'Lys', 'GAT':'Asp', 'GAC':'Asp',
'GAA':'Glu', 'GAG':'Glu'}
codon_dict = collections.defaultdict(list)
for i in range(len(ttest)):
for j in range(len(ttest[i])):
if ttest[i][j] < 0.01:
codon = text[i][j][:3]
if codon in codon_map:
codon_dict[codon_map[codon]].append(codon)
file = io.open('AAs_to_compare.txt', 'w')
file.write('Compare following AAs\n')
# AAs that have only two codon choices and show significant
# codon usage difference between SP and LP groups
AAs = []
for AA in codon_dict.keys():
AAs.append(AA)
if len(codon_dict[AA]) == 2:
file.write('%s: %s, %s\n' %
(AA, codon_dict[AA][0], codon_dict[AA][1]))
else:
file.write('%s: %s\n' % (AA, codon_dict[AA][0]))
file.close()
return AAs
def | plot_SP_LP | identifier_name | |
codon_usage.py |
# read a gene sequence line
else:
gene_seq += line.strip()
file.close()
print('%s:\n%d genes added\n%d are non-triple\n'%
(filename[:2],count_all, count_non_triple))
return (all_seq, count_all - count_non_triple)
def get_AA(self, codon):
# dict key: codon -> AA
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TTA':'Leu', 'TTG':'Leu',
'TCT':'Ser', 'TCC':'Ser', 'TCA':'Ser', 'TCG':'Ser',
'TAT':'Tyr', 'TAC':'Tyr', 'TAA':'STOP', 'TAG':'STOP',
'TGT':'Cys', 'TGC':'Cys', 'TGA':'STOP', 'TGG':'Trp',
'CTT':'Leu', 'CTC':'Leu', 'CTA':'Leu', 'CTG':'Leu',
'CCT':'Pro', 'CCC':'Pro', 'CCA':'Pro', 'CCG':'Pro',
'CAT':'His', 'CAC':'His', 'CAA':'Gln', 'CAG':'Gln',
'CGT':'Arg', 'CGC':'Arg', 'CGA':'Arg', 'CGG':'Arg',
'ATT':'Ile', 'ATC':'Ile', 'ATA':'Ile', 'ATG':'Met',
'ACT':'Thr', 'ACC':'Thr', 'ACA':'Thr', 'ACG':'Thr',
'AAT':'Asn', 'AAC':'Asn', 'AAA':'Lys', 'AAG':'Lys',
'AGT':'Ser', 'AGC':'Ser', 'AGA':'Arg', 'AGG':'Arg',
'GTT':'Val', 'GTC':'Val', 'GTA':'Val', 'GTG':'Val',
'GCT':'Ala', 'GCC':'Ala', 'GCA':'Ala', 'GCG':'Ala',
'GAT':'Asp', 'GAC':'Asp', 'GAA':'Glu', 'GAG':'Glu',
'GGT':'Gly', 'GGC':'Gly', 'GGA':'Gly', 'GGG':'Gly'}
if codon in codon_map:
return codon_map[codon]
else:
return ''
def get_usage_dict(self, seq):
# usage_dict structure:
# dict key: AA -> [
# dict key: codon ->
# [codon_count,
# codon_count/AA_count]
# AA_count
# ]
usage_dict = \
collections.defaultdict(lambda:
[
collections.defaultdict(
lambda: [0, 0]),
0
])
# save AAs usage information
for index in range(0, len(seq), 3):
codon = seq[index:index+3]
AA = self.get_AA(codon)
if AA:
# count how many times the AA appears
usage_dict[AA][1] += 1
# count how many times the codon is used
usage_dict[AA][0][codon][0] += 1
# calculate the codon usage percentage for an AA
for AA in usage_dict:
for codon in usage_dict[AA][0]:
usage_dict[AA][0][codon][1] = \
usage_dict[AA][0][codon][0]/usage_dict[AA][1]
return usage_dict
def get_AA_dict(self):
# AA_dict structure:
# 1st dict key: AA -> 2nd dict key: codon -> a list of codon usage
# percentage of each gene
AA_dict = \
collections.defaultdict(
lambda:collections.defaultdict(list))
# dict key: AA -> codon list
AA_map = {
'Phe':['TTT', 'TTC'],
'Leu':['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],
'Ser':['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],
'Tyr':['TAT', 'TAC'],
'STOP':['TAA', 'TAG', 'TGA'],
'Cys':['TGT', 'TGC'],
'Trp':['TGG'],
'Pro':['CCT', 'CCC', 'CCA', 'CCG'],
'His':['CAT', 'CAC'],
'Gln':['CAA', 'CAG'],
'Arg':['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],
'Ile':['ATT', 'ATC', 'ATA'],
'Met':['ATG'],
'Thr':['ACT', 'ACC', 'ACA', 'ACG'],
'Asn':['AAT', 'AAC'],
'Lys':['AAA', 'AAG'],
'Val':['GTT', 'GTC', 'GTA', 'GTG'],
'Ala':['GCT', 'GCC', 'GCA', 'GCG'],
'Asp':['GAT', 'GAC'],
'Glu':['GAA', 'GAG'],
'Gly':['GGT', 'GGC', 'GGA', 'GGG']
}
# list of codon usage for each gene
usage_dict_list = []
# get codon usage information for each gene
for seq in self.seq:
usage_dict_list.append(self.get_usage_dict(seq))
# get the list of codon usage percentage from each gene
for AA in list(AA_map.keys()):
for codon in AA_map[AA]:
# get codon usage information from each gene
for usage_dict in usage_dict_list:
# append codon usage percentage in the gene
AA_dict[AA][codon].append(
usage_dict[AA][0][codon][1])
return AA_dict
def heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label):
# list of Chi-Square test results
AA_chisquare = []
# AA plotting annotation information
AA_text = []
# list of student's t-test results
codon_ttest = []
# codon plotting annotaion information
codon_text = []
i = 0
j = 0
# number of genes analyzed
count_all = 0
# number of genes that show significant results
count_sig = 0
for AA in list(sp_AA_dict.keys()):
# mean values of codon usage for each AA
sp_codon_mean = []
lp_codon_mean = []
for codon in sp_AA_dict[AA]:
# calculate ttest results
p_val = stats.ttest_ind(sp_AA_dict[AA][codon],
lp_AA_dict[AA][codon],
equal_var = False)[1]
# display eight codons in a row
if not i % 8:
codon_ttest.append([])
codon_text.append([])
i += 1
# handle NULL values
if np.isnan(p_val):
codon_ttest[-1].append(0)
codon_text[-1].append(codon + '\n NA')
# save ttest p-values and annotation information
else:
codon_ttest[-1].append(p_val)
codon_text[-1].append(codon + '\n' + str(round(p_val, 2)))
count_all += 1
if p_val < 0.5:
count_sig += 1
sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))
lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon]))
# get Chi-Square test results of each AA
p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]),
axis = None)[1]
# display six AA in a row
if not j % 6:
AA_chisquare.append([])
AA_text.append([])
j += 1
# handle Null values
if np.isnan(p_val):
AA_chisquare[-1].append(0)
AA_text[-1].append(AA + '\n NA')
# save Chi-Square test p-values and annotation information
else:
AA_chisquare[-1].append(p_val)
AA_text[-1].append(AA + '\n' + str(round(p_val, 2)))
# handle empty cells
for n in range(j % 6, 6):
AA_chisquare[-1 | count_all += 1
# if a gene has been read, then append it to all_seq if the
# sequence is triple
if gene_seq!='':
if len(gene_seq)%3:
count_non_triple += 1
else:
all_seq.append(gene_seq)
gene_seq = '' | conditional_block | |
game.rs | the next non-zero cell to position i
// and retry this entry until line[i] becomes non-zero
if line[i] == 0 {
line[i] = line[j];
line[j] = 0;
continue;
// otherwise, if the current cell and next cell are the same, merge them
} else if line[i] == line[j] {
if line[i] != 0xF { line[i] += 1 };
line[j] = 0;
}
// finally, move to the next (or current, if i was 0) row
i += 1;
}
// put the new row after merging back together into a "merged" row
let result = (line[0] << 0) |
(line[1] << 4) |
(line[2] << 8) |
(line[3] << 12);
// right and down use normal row and result variables.
// for left and up, we create a reverse of the row and result.
let rev_row = (row >> 12) & 0x000F | (row >> 4) & 0x00F0 | (row << 4) & 0x0F00 | (row << 12) & 0xF000;
let rev_res = (result >> 12) & 0x000F | (result >> 4) & 0x00F0 | (result << 4) & 0x0F00 | (result << 12) & 0xF000;
// results are keyed by row / reverse row index.
let row_idx = row as usize;
let rev_idx = rev_row as usize;
right_moves[row_idx] = row ^ result;
left_moves[rev_idx] = rev_row ^ rev_res;
up_moves[rev_idx] = Moves::column_from(rev_row) ^ Moves::column_from(rev_res);
down_moves[row_idx] = Moves::column_from(row) ^ Moves::column_from(result);
};
Moves { left: left_moves, right: right_moves, down: down_moves, up: up_moves, scores: scores }
};
}
/// Struct used to play a single game of 2048.
///
/// `tfe::Game` uses a single `u64` as board value.
/// The board itself is divided into rows (x4 16 bit "row" per "board") which are
/// divided into tiles (4x 4 bit "nybbles" per "row").
///
/// All manipulations are done using bit-shifts and a precomputed table of moves and scores.
/// Every move is stored as four lookups total, one for each row. The result of XOR'ing each row
/// back into the board at the right position is the output board.
pub struct Game { pub board: u64 }
impl Game {
/// Constructs a new `tfe::Game`.
///
/// `Game` stores a board internally as a `u64`.
///
/// # Examples
///
/// Simple example:
///
/// ```
/// use tfe::Game;
///
/// let mut game = Game::new();
/// # println!("{:016x}", game.board);
/// ```
///
/// Accessing board value:
///
/// ```
/// use tfe::Game;
///
/// let mut game = Game::new();
/// println!("{:016x}", game.board);
/// ```
pub fn new() -> Self {
let mut game = Game { board: 0x0000_0000_0000_0000_u64 };
game.board |= Self::spawn_tile(game.board);
game.board |= Self::spawn_tile(game.board);
game
}
/// Like `new` but takes a closure that accepts two parameters and returns
/// a `Direction`. The parameters passed to the closure:
///
/// - `u64`: The current board
/// - `&Vec<Direction>`: A list of attempted moves that had no effect.
/// Gets cleared when a move succeeds.
///
/// # Examples
///
/// Simple example:
///
/// ```
/// use tfe::{Game, Direction};
///
/// let game = Game::play(|_board, failed| Direction::sample_without(failed));
/// ```
///
/// In this example, the variable `game` will have a value of a single `Game` played to
/// completion. A game is over when it has no moves left. This is true when all possible
/// moves return the same resulting board as before the move was executed.
///
/// The `failed: &Vec<Direction>` will contain **at most** 3 items, when the 4th item is added
/// the game ends automatically without calling the closure again.
pub fn play<F: Fn(u64, &Vec<Direction>) -> Direction>(mv: F) -> Self {
let mut game = Self::new();
let mut attempted: Vec<Direction> = Vec::with_capacity(4);
loop {
let mv = mv(game.board, &attempted);
if !attempted.iter().any(|dir| dir == &mv) {
let result_board = Self::execute(game.board, &mv);
if game.board == result_board {
if attempted.len() == 3 { break }
attempted.push(mv);
} else {
game.board = result_board | Self::spawn_tile(result_board);
attempted.clear();
}
}
}
game
}
/// Returns `board` moved in given `direction`.
///
/// - When `Direction::Left`, return board moved left
/// - When `Direction::Right`, return board moved right
/// - When `Direction::Down`, return board moved down
/// - When `Direction::Up`, return board moved up
///
/// # Examples
///
/// Simple example:
///
/// ```
/// use tfe::{Game, Direction};
///
/// let board = 0x0000_0000_0022_1100;
/// let moved = Game::execute(board, &Direction::Left);
///
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 4 | 4 | | 8 | 0 | 0 | 0 |
/// // | 2 | 2 | 0 | 0 | | 4 | 0 | 0 | 0 |
///
/// assert_eq!(board, 0x0000_0000_0022_1100);
/// assert_eq!(moved, 0x0000_0000_3000_2000);
/// ```
pub fn execute(board: u64, direction: &Direction) -> u64 {
match direction {
Direction::Left => Self::move_left(board),
Direction::Right => Self::move_right(board),
Direction::Down => Self::move_down(board),
Direction::Up => Self::move_up(board)
}
}
/// Returns a transposed board where rows are transformed into columns and vice versa.
///
/// ```
/// use tfe::Game; | ///
/// // | F | E | D | C | | F | B | 7 | 3 |
/// // | B | A | 9 | 8 | => | E | A | 6 | 2 |
/// // | 7 | 6 | 5 | 4 | | D | 9 | 5 | 1 |
/// // | 3 | 2 | 1 | 0 | | C | 8 | 4 | 0 |
///
/// assert_eq!(Game::transpose(0xFEDC_BA98_7654_3210), 0xFB73_EA62_D951_C840);
/// ```
pub fn transpose(board: u64) -> u64 {
let a1 = board & 0xF0F0_0F0F_F0F0_0F0F_u64;
let a2 = board & 0x0000_F0F0_0000_F0F0_u64;
let a3 = board & 0x0F0F_0000_0F0F_0000_u64;
let a = a1 | (a2 << 12) | (a3 >> 12);
let b1 = a & 0xFF00_FF00_00FF_00FF_u64;
let b2 = a & 0x00FF_00FF_0000_0000_u | random_line_split | |
game.rs | the next non-zero cell to position i
// and retry this entry until line[i] becomes non-zero
if line[i] == 0 {
line[i] = line[j];
line[j] = 0;
continue;
// otherwise, if the current cell and next cell are the same, merge them
} else if line[i] == line[j] {
if line[i] != 0xF { line[i] += 1 };
line[j] = 0;
}
// finally, move to the next (or current, if i was 0) row
i += 1;
}
// put the new row after merging back together into a "merged" row
let result = (line[0] << 0) |
(line[1] << 4) |
(line[2] << 8) |
(line[3] << 12);
// right and down use normal row and result variables.
// for left and up, we create a reverse of the row and result.
let rev_row = (row >> 12) & 0x000F | (row >> 4) & 0x00F0 | (row << 4) & 0x0F00 | (row << 12) & 0xF000;
let rev_res = (result >> 12) & 0x000F | (result >> 4) & 0x00F0 | (result << 4) & 0x0F00 | (result << 12) & 0xF000;
// results are keyed by row / reverse row index.
let row_idx = row as usize;
let rev_idx = rev_row as usize;
right_moves[row_idx] = row ^ result;
left_moves[rev_idx] = rev_row ^ rev_res;
up_moves[rev_idx] = Moves::column_from(rev_row) ^ Moves::column_from(rev_res);
down_moves[row_idx] = Moves::column_from(row) ^ Moves::column_from(result);
};
Moves { left: left_moves, right: right_moves, down: down_moves, up: up_moves, scores: scores }
};
}
/// Struct used to play a single game of 2048.
///
/// `tfe::Game` uses a single `u64` as board value.
/// The board itself is divided into rows (x4 16 bit "row" per "board") which are
/// divided into tiles (4x 4 bit "nybbles" per "row").
///
/// All manipulations are done using bit-shifts and a precomputed table of moves and scores.
/// Every move is stored as four lookups total, one for each row. The result of XOR'ing each row
/// back into the board at the right position is the output board.
pub struct | { pub board: u64 }
impl Game {
/// Constructs a new `tfe::Game`.
///
/// `Game` stores a board internally as a `u64`.
///
/// # Examples
///
/// Simple example:
///
/// ```
/// use tfe::Game;
///
/// let mut game = Game::new();
/// # println!("{:016x}", game.board);
/// ```
///
/// Accessing board value:
///
/// ```
/// use tfe::Game;
///
/// let mut game = Game::new();
/// println!("{:016x}", game.board);
/// ```
pub fn new() -> Self {
let mut game = Game { board: 0x0000_0000_0000_0000_u64 };
game.board |= Self::spawn_tile(game.board);
game.board |= Self::spawn_tile(game.board);
game
}
/// Like `new` but takes a closure that accepts two parameters and returns
/// a `Direction`. The parameters passed to the closure:
///
/// - `u64`: The current board
/// - `&Vec<Direction>`: A list of attempted moves that had no effect.
/// Gets cleared when a move succeeds.
///
/// # Examples
///
/// Simple example:
///
/// ```
/// use tfe::{Game, Direction};
///
/// let game = Game::play(|_board, failed| Direction::sample_without(failed));
/// ```
///
/// In this example, the variable `game` will have a value of a single `Game` played to
/// completion. A game is over when it has no moves left. This is true when all possible
/// moves return the same resulting board as before the move was executed.
///
/// The `failed: &Vec<Direction>` will contain **at most** 3 items, when the 4th item is added
/// the game ends automatically without calling the closure again.
pub fn play<F: Fn(u64, &Vec<Direction>) -> Direction>(mv: F) -> Self {
let mut game = Self::new();
let mut attempted: Vec<Direction> = Vec::with_capacity(4);
loop {
let mv = mv(game.board, &attempted);
if !attempted.iter().any(|dir| dir == &mv) {
let result_board = Self::execute(game.board, &mv);
if game.board == result_board {
if attempted.len() == 3 { break }
attempted.push(mv);
} else {
game.board = result_board | Self::spawn_tile(result_board);
attempted.clear();
}
}
}
game
}
/// Returns `board` moved in given `direction`.
///
/// - When `Direction::Left`, return board moved left
/// - When `Direction::Right`, return board moved right
/// - When `Direction::Down`, return board moved down
/// - When `Direction::Up`, return board moved up
///
/// # Examples
///
/// Simple example:
///
/// ```
/// use tfe::{Game, Direction};
///
/// let board = 0x0000_0000_0022_1100;
/// let moved = Game::execute(board, &Direction::Left);
///
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 4 | 4 | | 8 | 0 | 0 | 0 |
/// // | 2 | 2 | 0 | 0 | | 4 | 0 | 0 | 0 |
///
/// assert_eq!(board, 0x0000_0000_0022_1100);
/// assert_eq!(moved, 0x0000_0000_3000_2000);
/// ```
pub fn execute(board: u64, direction: &Direction) -> u64 {
match direction {
Direction::Left => Self::move_left(board),
Direction::Right => Self::move_right(board),
Direction::Down => Self::move_down(board),
Direction::Up => Self::move_up(board)
}
}
/// Returns a transposed board where rows are transformed into columns and vice versa.
///
/// ```
/// use tfe::Game;
///
/// // | F | E | D | C | | F | B | 7 | 3 |
/// // | B | A | 9 | 8 | => | E | A | 6 | 2 |
/// // | 7 | 6 | 5 | 4 | | D | 9 | 5 | 1 |
/// // | 3 | 2 | 1 | 0 | | C | 8 | 4 | 0 |
///
/// assert_eq!(Game::transpose(0xFEDC_BA98_7654_3210), 0xFB73_EA62_D951_C840);
/// ```
pub fn transpose(board: u64) -> u64 {
let a1 = board & 0xF0F0_0F0F_F0F0_0F0F_u64;
let a2 = board & 0x0000_F0F0_0000_F0F0_u64;
let a3 = board & 0x0F0F_0000_0F0F_0000_u64;
let a = a1 | (a2 << 12) | (a3 >> 12);
let b1 = a & 0xFF00_FF00_00FF_00FF_u64;
let b2 = a & 0x00FF_00FF_0000_0000 | Game | identifier_name |
svg.go | ))
for idx, pt := range poly.Points {
wall.Verts[idx].X = int(pt.X)
wall.Verts[idx].Y = int(pt.Y)
}
return wall
}
func (poly *svgPolygon) generatePoints() {
vals := strings.FieldsFunc(poly.PointsData, func(r rune) bool {
return (r == ' ' || r == '\t' || r == ',')
})
poly.Points = make([]svgVert, len(vals)/2)
for idx, _ := range poly.Points {
x, _ := strconv.ParseFloat(vals[idx*2], 32)
y, _ := strconv.ParseFloat(vals[idx*2+1], 32)
poly.Points[idx].X = x
poly.Points[idx].Y = y
}
}
type svgPathScanner struct {
Path string
Polygons []svgPolygon
CurrentPolygon *svgPolygon
Cursor svgVert
Mode int
S scanner.Scanner
}
func NewPathScanner(path string, mode int) svgPathScanner {
log.Printf("Scanner path: %s\n", path)
sps := svgPathScanner{Path: path, Mode: mode}
return sps
}
func (sps *svgPathScanner) scanTwoInts() (int, int) {
X := sps.scanOneInt()
sps.scanWhitespace()
Y := sps.scanOneInt()
log.Printf("X: %d Y: %d\n", X, Y)
return X, Y
}
func (sps *svgPathScanner) scanWhitespace() {
for r := sps.S.Peek(); r == ' ' || r == ','; r = sps.S.Peek() {
r = sps.S.Next()
}
}
func (sps *svgPathScanner) scanOneInt() int {
r := sps.S.Scan()
sign := 1
if r == '-' {
sps.S.Scan()
sign = -1
}
X, _ := strconv.ParseFloat(sps.S.TokenText(), 32)
return int(X) * sign
}
func (sps svgPathScanner) GeneratePolygons() ([]svgPolygon, error) {
sps.S.Init(strings.NewReader(sps.Path))
sps.S.Mode = scanner.ScanFloats | scanner.ScanChars
tok := sps.S.Scan()
lastTokenText := ""
for tok != scanner.EOF {
tokenText := sps.S.TokenText()
log.Printf("TT: %s LTT:%s\n", tokenText, lastTokenText)
if !sps.handleToken(tokenText) {
log.Printf("Retry\n")
sps.handleToken(lastTokenText)
}
lastTokenText = tokenText
tok = sps.S.Scan()
}
return sps.Polygons, nil
}
func (sps *svgPathScanner) handleToken(cmd string) bool {
log.Printf("Cmd: %s\n", cmd)
switch cmd {
case "M":
poly := svgPolygon{Mode: sps.Mode}
sps.CurrentPolygon = &poly
x, y := sps.scanTwoInts()
sps.Cursor.X, sps.Cursor.Y = float64(x), float64(y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "m":
poly := svgPolygon{Mode: sps.Mode}
sps.CurrentPolygon = &poly
X, Y := sps.scanTwoInts()
sps.Cursor.X += float64(X)
sps.Cursor.Y += float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "L":
X, Y := sps.scanTwoInts()
sps.Cursor.X, sps.Cursor.Y = float64(X), float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "l":
X, Y := sps.scanTwoInts()
sps.Cursor.X += float64(X)
sps.Cursor.Y += float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "H":
sps.Cursor.X = float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "h":
sps.Cursor.X += float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "V":
sps.Cursor.Y = float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "v":
sps.Cursor.Y += float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "z":
sps.Polygons = append(sps.Polygons, *sps.CurrentPolygon)
sps.CurrentPolygon = nil
case "Z":
sps.Polygons = append(sps.Polygons, *sps.CurrentPolygon)
sps.CurrentPolygon = nil
default:
return false
}
return true
}
func (path *svgPath) generatePath() {
polys, err := NewPathScanner(path.D, path.mode()).GeneratePolygons()
if err != nil {
log.Printf("Error generating polygons: %v", err)
}
log.Printf("Polys: %+v\n", polys)
path.Polygons = polys
}
func (group *SvgGroup) updateTransform() {
var transformRegex = regexp.MustCompile(`([a-zA-Z]+)\((.*)\)`)
if group.Transform != "" {
matches := transformRegex.FindStringSubmatch(group.Transform)
if matches == nil {
log.Printf("Unknown transform: %s\n", group.Transform)
} else if matches[1] == "translate" {
coords := strings.Split(matches[2], ",")
group.translate.X, _ = strconv.ParseFloat(coords[0], 64)
group.translate.Y, _ = strconv.ParseFloat(coords[1], 64)
} else {
log.Printf("Unknown transform: [%s] in %s\n", matches[1], group.Transform)
}
}
}
func (group *SvgGroup) updatePaths() {
for idx := range group.Paths {
group.Paths[idx].generatePath()
group.Paths[idx].Name = group.Name
}
}
func (group *SvgGroup) updatePolygons() {
for idx := range group.Polygons {
group.Polygons[idx].generatePoints()
}
}
func (group *SvgGroup) process() {
group.updateTransform()
group.updatePaths()
group.updatePolygons()
for idx := range group.Groups {
g := &group.Groups[idx]
g.process()
}
}
func (group *SvgGroup) GetPaths() []svgPath {
paths := make([]svgPath, 0)
if strings.HasPrefix(group.Name, "door_open_") || strings.HasPrefix(group.Name, "door_closed_") || strings.HasPrefix(group.Name, "walls") {
for _, p := range group.Paths {
paths = append(paths, p)
}
}
for _, g := range group.Groups {
paths = append(paths, g.GetPaths()...)
}
for idx, _ := range paths {
p := &paths[idx]
for pIdx, _ := range p.Polygons {
poly := &p.Polygons[pIdx]
for vIdx, _ := range poly.Points {
v := &poly.Points[vIdx]
v.X += group.translate.X
v.Y += group.translate.Y
}
}
}
return paths
}
func geosPolygonToPolygon(poly *geos.Geometry) svgPolygon {
shell, err := poly.Shell()
if err != nil {
log.Fatal(fmt.Errorf("Shell creation error: %+v", err))
}
mergedPoly := svgPolygon{}
coords, err := shell.Coords()
if err != nil {
log.Fatal(fmt.Errorf("Coords error: %+v", err))
}
for _, pt := range coords {
mergedPoly.Points = append(mergedPoly.Points, svgVert{pt.X, pt.Y})
}
return mergedPoly
}
func (group *SvgGroup) MergePolygons() {
if len(group.Polygons) > 0 {
var poly *geos.Geometry
for _, p := range group.Polygons {
if len(p.Points) > 2 {
verts := make([]geos.Coord, 0)
for _, v := range p.Points {
verts = append(verts, geos.NewCoord(v.X, v.Y)) | }
verts = append(verts, geos.NewCoord(p.Points[0].X, p.Points[0].Y))
if poly == nil {
newPoly, err := geos.NewPolygon(verts)
if err != nil {
log.Fatal(fmt.Errorf("New poly creation error: %+v", err))
}
poly = newPoly
} else {
uPoly, _ := geos.NewPolygon(verts)
uPolyType, _ := uPoly.Type()
if uPolyType == geos.POLYGON {
union, err := poly.Union(uPoly)
if err != nil {
log.Printf("Skipping poly: Poly union error: %+v %+v", err, uPoly)
} else {
poly = union
}
} else {
log.Printf(" | random_line_split | |
svg.go |
func (poly *svgPolygon) generatePoints() {
vals := strings.FieldsFunc(poly.PointsData, func(r rune) bool {
return (r == ' ' || r == '\t' || r == ',')
})
poly.Points = make([]svgVert, len(vals)/2)
for idx, _ := range poly.Points {
x, _ := strconv.ParseFloat(vals[idx*2], 32)
y, _ := strconv.ParseFloat(vals[idx*2+1], 32)
poly.Points[idx].X = x
poly.Points[idx].Y = y
}
}
type svgPathScanner struct {
Path string
Polygons []svgPolygon
CurrentPolygon *svgPolygon
Cursor svgVert
Mode int
S scanner.Scanner
}
func NewPathScanner(path string, mode int) svgPathScanner {
log.Printf("Scanner path: %s\n", path)
sps := svgPathScanner{Path: path, Mode: mode}
return sps
}
func (sps *svgPathScanner) scanTwoInts() (int, int) {
X := sps.scanOneInt()
sps.scanWhitespace()
Y := sps.scanOneInt()
log.Printf("X: %d Y: %d\n", X, Y)
return X, Y
}
func (sps *svgPathScanner) scanWhitespace() {
for r := sps.S.Peek(); r == ' ' || r == ','; r = sps.S.Peek() {
r = sps.S.Next()
}
}
func (sps *svgPathScanner) scanOneInt() int {
r := sps.S.Scan()
sign := 1
if r == '-' {
sps.S.Scan()
sign = -1
}
X, _ := strconv.ParseFloat(sps.S.TokenText(), 32)
return int(X) * sign
}
func (sps svgPathScanner) GeneratePolygons() ([]svgPolygon, error) {
sps.S.Init(strings.NewReader(sps.Path))
sps.S.Mode = scanner.ScanFloats | scanner.ScanChars
tok := sps.S.Scan()
lastTokenText := ""
for tok != scanner.EOF {
tokenText := sps.S.TokenText()
log.Printf("TT: %s LTT:%s\n", tokenText, lastTokenText)
if !sps.handleToken(tokenText) {
log.Printf("Retry\n")
sps.handleToken(lastTokenText)
}
lastTokenText = tokenText
tok = sps.S.Scan()
}
return sps.Polygons, nil
}
func (sps *svgPathScanner) handleToken(cmd string) bool {
log.Printf("Cmd: %s\n", cmd)
switch cmd {
case "M":
poly := svgPolygon{Mode: sps.Mode}
sps.CurrentPolygon = &poly
x, y := sps.scanTwoInts()
sps.Cursor.X, sps.Cursor.Y = float64(x), float64(y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "m":
poly := svgPolygon{Mode: sps.Mode}
sps.CurrentPolygon = &poly
X, Y := sps.scanTwoInts()
sps.Cursor.X += float64(X)
sps.Cursor.Y += float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "L":
X, Y := sps.scanTwoInts()
sps.Cursor.X, sps.Cursor.Y = float64(X), float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "l":
X, Y := sps.scanTwoInts()
sps.Cursor.X += float64(X)
sps.Cursor.Y += float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "H":
sps.Cursor.X = float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "h":
sps.Cursor.X += float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "V":
sps.Cursor.Y = float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "v":
sps.Cursor.Y += float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "z":
sps.Polygons = append(sps.Polygons, *sps.CurrentPolygon)
sps.CurrentPolygon = nil
case "Z":
sps.Polygons = append(sps.Polygons, *sps.CurrentPolygon)
sps.CurrentPolygon = nil
default:
return false
}
return true
}
func (path *svgPath) generatePath() {
polys, err := NewPathScanner(path.D, path.mode()).GeneratePolygons()
if err != nil {
log.Printf("Error generating polygons: %v", err)
}
log.Printf("Polys: %+v\n", polys)
path.Polygons = polys
}
func (group *SvgGroup) updateTransform() {
var transformRegex = regexp.MustCompile(`([a-zA-Z]+)\((.*)\)`)
if group.Transform != "" {
matches := transformRegex.FindStringSubmatch(group.Transform)
if matches == nil {
log.Printf("Unknown transform: %s\n", group.Transform)
} else if matches[1] == "translate" {
coords := strings.Split(matches[2], ",")
group.translate.X, _ = strconv.ParseFloat(coords[0], 64)
group.translate.Y, _ = strconv.ParseFloat(coords[1], 64)
} else {
log.Printf("Unknown transform: [%s] in %s\n", matches[1], group.Transform)
}
}
}
func (group *SvgGroup) updatePaths() {
for idx := range group.Paths {
group.Paths[idx].generatePath()
group.Paths[idx].Name = group.Name
}
}
func (group *SvgGroup) updatePolygons() {
for idx := range group.Polygons {
group.Polygons[idx].generatePoints()
}
}
func (group *SvgGroup) process() {
group.updateTransform()
group.updatePaths()
group.updatePolygons()
for idx := range group.Groups {
g := &group.Groups[idx]
g.process()
}
}
func (group *SvgGroup) GetPaths() []svgPath {
paths := make([]svgPath, 0)
if strings.HasPrefix(group.Name, "door_open_") || strings.HasPrefix(group.Name, "door_closed_") || strings.HasPrefix(group.Name, "walls") {
for _, p := range group.Paths {
paths = append(paths, p)
}
}
for _, g := range group.Groups {
paths = append(paths, g.GetPaths()...)
}
for idx, _ := range paths {
p := &paths[idx]
for pIdx, _ := range p.Polygons {
poly := &p.Polygons[pIdx]
for vIdx, _ := range poly.Points {
v := &poly.Points[vIdx]
v.X += group.translate.X
v.Y += group.translate.Y
}
}
}
return paths
}
func geosPolygonToPolygon(poly *geos.Geometry) svgPolygon {
shell, err := poly.Shell()
if err != nil {
log.Fatal(fmt.Errorf("Shell creation error: %+v", err))
}
mergedPoly := svgPolygon{}
coords, err := shell.Coords()
if err != nil {
log.Fatal(fmt.Errorf("Coords error: %+v", err))
}
for _, pt := range coords {
mergedPoly.Points = append(mergedPoly.Points, svgVert{pt.X, pt.Y})
}
return mergedPoly
}
func (group *SvgGroup) MergePolygons() {
if len(group.Polygons) > 0 {
var poly *geos.Geometry
for _, p := range group.Polygons {
if len(p.Points) > 2 {
verts := make([]geos.Coord, 0)
for _, v := range p.Points {
verts = append(verts, geos.NewCoord(v.X, v.Y))
}
verts = append(verts, geos.NewCoord(p.Points[0].X, p.Points[0].Y))
if poly == nil {
newPoly, err := geos.NewPolygon(verts)
if err != nil {
log.Fatal(fmt.Errorf("New poly creation error: %+v", err))
}
poly = newPoly
} else {
uPoly, _ := geos.NewPolygon(verts)
uPolyType, _ := uPoly.Type()
if uPolyType == geos.POLYGON {
union, err := poly.Union(uPoly)
if err != nil {
log.Printf("Skipping poly: Poly union error: %+v %+v", err | {
wall := jsonWedPolygon{Mode: poly.Mode}
wall.Verts = make([]image.Point, len(poly.Points))
for idx, pt := range poly.Points {
wall.Verts[idx].X = int(pt.X)
wall.Verts[idx].Y = int(pt.Y)
}
return wall
} | identifier_body | |
svg.go | ))
for idx, pt := range poly.Points {
wall.Verts[idx].X = int(pt.X)
wall.Verts[idx].Y = int(pt.Y)
}
return wall
}
func (poly *svgPolygon) generatePoints() {
vals := strings.FieldsFunc(poly.PointsData, func(r rune) bool {
return (r == ' ' || r == '\t' || r == ',')
})
poly.Points = make([]svgVert, len(vals)/2)
for idx, _ := range poly.Points {
x, _ := strconv.ParseFloat(vals[idx*2], 32)
y, _ := strconv.ParseFloat(vals[idx*2+1], 32)
poly.Points[idx].X = x
poly.Points[idx].Y = y
}
}
type svgPathScanner struct {
Path string
Polygons []svgPolygon
CurrentPolygon *svgPolygon
Cursor svgVert
Mode int
S scanner.Scanner
}
func NewPathScanner(path string, mode int) svgPathScanner {
log.Printf("Scanner path: %s\n", path)
sps := svgPathScanner{Path: path, Mode: mode}
return sps
}
func (sps *svgPathScanner) scanTwoInts() (int, int) {
X := sps.scanOneInt()
sps.scanWhitespace()
Y := sps.scanOneInt()
log.Printf("X: %d Y: %d\n", X, Y)
return X, Y
}
func (sps *svgPathScanner) scanWhitespace() {
for r := sps.S.Peek(); r == ' ' || r == ','; r = sps.S.Peek() {
r = sps.S.Next()
}
}
func (sps *svgPathScanner) scanOneInt() int {
r := sps.S.Scan()
sign := 1
if r == '-' {
sps.S.Scan()
sign = -1
}
X, _ := strconv.ParseFloat(sps.S.TokenText(), 32)
return int(X) * sign
}
func (sps svgPathScanner) GeneratePolygons() ([]svgPolygon, error) {
sps.S.Init(strings.NewReader(sps.Path))
sps.S.Mode = scanner.ScanFloats | scanner.ScanChars
tok := sps.S.Scan()
lastTokenText := ""
for tok != scanner.EOF {
tokenText := sps.S.TokenText()
log.Printf("TT: %s LTT:%s\n", tokenText, lastTokenText)
if !sps.handleToken(tokenText) {
log.Printf("Retry\n")
sps.handleToken(lastTokenText)
}
lastTokenText = tokenText
tok = sps.S.Scan()
}
return sps.Polygons, nil
}
func (sps *svgPathScanner) handleToken(cmd string) bool {
log.Printf("Cmd: %s\n", cmd)
switch cmd {
case "M":
poly := svgPolygon{Mode: sps.Mode}
sps.CurrentPolygon = &poly
x, y := sps.scanTwoInts()
sps.Cursor.X, sps.Cursor.Y = float64(x), float64(y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "m":
poly := svgPolygon{Mode: sps.Mode}
sps.CurrentPolygon = &poly
X, Y := sps.scanTwoInts()
sps.Cursor.X += float64(X)
sps.Cursor.Y += float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "L":
X, Y := sps.scanTwoInts()
sps.Cursor.X, sps.Cursor.Y = float64(X), float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "l":
X, Y := sps.scanTwoInts()
sps.Cursor.X += float64(X)
sps.Cursor.Y += float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "H":
sps.Cursor.X = float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "h":
sps.Cursor.X += float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "V":
sps.Cursor.Y = float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "v":
sps.Cursor.Y += float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "z":
sps.Polygons = append(sps.Polygons, *sps.CurrentPolygon)
sps.CurrentPolygon = nil
case "Z":
sps.Polygons = append(sps.Polygons, *sps.CurrentPolygon)
sps.CurrentPolygon = nil
default:
return false
}
return true
}
func (path *svgPath) generatePath() {
polys, err := NewPathScanner(path.D, path.mode()).GeneratePolygons()
if err != nil {
log.Printf("Error generating polygons: %v", err)
}
log.Printf("Polys: %+v\n", polys)
path.Polygons = polys
}
func (group *SvgGroup) updateTransform() {
var transformRegex = regexp.MustCompile(`([a-zA-Z]+)\((.*)\)`)
if group.Transform != "" {
matches := transformRegex.FindStringSubmatch(group.Transform)
if matches == nil {
log.Printf("Unknown transform: %s\n", group.Transform)
} else if matches[1] == "translate" {
coords := strings.Split(matches[2], ",")
group.translate.X, _ = strconv.ParseFloat(coords[0], 64)
group.translate.Y, _ = strconv.ParseFloat(coords[1], 64)
} else {
log.Printf("Unknown transform: [%s] in %s\n", matches[1], group.Transform)
}
}
}
func (group *SvgGroup) updatePaths() {
for idx := range group.Paths {
group.Paths[idx].generatePath()
group.Paths[idx].Name = group.Name
}
}
func (group *SvgGroup) updatePolygons() {
for idx := range group.Polygons {
group.Polygons[idx].generatePoints()
}
}
func (group *SvgGroup) process() {
group.updateTransform()
group.updatePaths()
group.updatePolygons()
for idx := range group.Groups {
g := &group.Groups[idx]
g.process()
}
}
func (group *SvgGroup) GetPaths() []svgPath {
paths := make([]svgPath, 0)
if strings.HasPrefix(group.Name, "door_open_") || strings.HasPrefix(group.Name, "door_closed_") || strings.HasPrefix(group.Name, "walls") {
for _, p := range group.Paths {
paths = append(paths, p)
}
}
for _, g := range group.Groups {
paths = append(paths, g.GetPaths()...)
}
for idx, _ := range paths {
p := &paths[idx]
for pIdx, _ := range p.Polygons {
poly := &p.Polygons[pIdx]
for vIdx, _ := range poly.Points {
v := &poly.Points[vIdx]
v.X += group.translate.X
v.Y += group.translate.Y
}
}
}
return paths
}
func geosPolygonToPolygon(poly *geos.Geometry) svgPolygon {
shell, err := poly.Shell()
if err != nil {
log.Fatal(fmt.Errorf("Shell creation error: %+v", err))
}
mergedPoly := svgPolygon{}
coords, err := shell.Coords()
if err != nil {
log.Fatal(fmt.Errorf("Coords error: %+v", err))
}
for _, pt := range coords {
mergedPoly.Points = append(mergedPoly.Points, svgVert{pt.X, pt.Y})
}
return mergedPoly
}
func (group *SvgGroup) | () {
if len(group.Polygons) > 0 {
var poly *geos.Geometry
for _, p := range group.Polygons {
if len(p.Points) > 2 {
verts := make([]geos.Coord, 0)
for _, v := range p.Points {
verts = append(verts, geos.NewCoord(v.X, v.Y))
}
verts = append(verts, geos.NewCoord(p.Points[0].X, p.Points[0].Y))
if poly == nil {
newPoly, err := geos.NewPolygon(verts)
if err != nil {
log.Fatal(fmt.Errorf("New poly creation error: %+v", err))
}
poly = newPoly
} else {
uPoly, _ := geos.NewPolygon(verts)
uPolyType, _ := uPoly.Type()
if uPolyType == geos.POLYGON {
union, err := poly.Union(uPoly)
if err != nil {
log.Printf("Skipping poly: Poly union error: %+v %+v", err, uPoly)
} else {
poly = union
}
} else {
log.Printf | MergePolygons | identifier_name |
svg.go | for idx, pt := range poly.Points {
wall.Verts[idx].X = int(pt.X)
wall.Verts[idx].Y = int(pt.Y)
}
return wall
}
func (poly *svgPolygon) generatePoints() {
vals := strings.FieldsFunc(poly.PointsData, func(r rune) bool {
return (r == ' ' || r == '\t' || r == ',')
})
poly.Points = make([]svgVert, len(vals)/2)
for idx, _ := range poly.Points {
x, _ := strconv.ParseFloat(vals[idx*2], 32)
y, _ := strconv.ParseFloat(vals[idx*2+1], 32)
poly.Points[idx].X = x
poly.Points[idx].Y = y
}
}
type svgPathScanner struct {
Path string
Polygons []svgPolygon
CurrentPolygon *svgPolygon
Cursor svgVert
Mode int
S scanner.Scanner
}
func NewPathScanner(path string, mode int) svgPathScanner {
log.Printf("Scanner path: %s\n", path)
sps := svgPathScanner{Path: path, Mode: mode}
return sps
}
func (sps *svgPathScanner) scanTwoInts() (int, int) {
X := sps.scanOneInt()
sps.scanWhitespace()
Y := sps.scanOneInt()
log.Printf("X: %d Y: %d\n", X, Y)
return X, Y
}
func (sps *svgPathScanner) scanWhitespace() {
for r := sps.S.Peek(); r == ' ' || r == ','; r = sps.S.Peek() {
r = sps.S.Next()
}
}
func (sps *svgPathScanner) scanOneInt() int {
r := sps.S.Scan()
sign := 1
if r == '-' {
sps.S.Scan()
sign = -1
}
X, _ := strconv.ParseFloat(sps.S.TokenText(), 32)
return int(X) * sign
}
func (sps svgPathScanner) GeneratePolygons() ([]svgPolygon, error) {
sps.S.Init(strings.NewReader(sps.Path))
sps.S.Mode = scanner.ScanFloats | scanner.ScanChars
tok := sps.S.Scan()
lastTokenText := ""
for tok != scanner.EOF {
tokenText := sps.S.TokenText()
log.Printf("TT: %s LTT:%s\n", tokenText, lastTokenText)
if !sps.handleToken(tokenText) {
log.Printf("Retry\n")
sps.handleToken(lastTokenText)
}
lastTokenText = tokenText
tok = sps.S.Scan()
}
return sps.Polygons, nil
}
func (sps *svgPathScanner) handleToken(cmd string) bool {
log.Printf("Cmd: %s\n", cmd)
switch cmd {
case "M":
poly := svgPolygon{Mode: sps.Mode}
sps.CurrentPolygon = &poly
x, y := sps.scanTwoInts()
sps.Cursor.X, sps.Cursor.Y = float64(x), float64(y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "m":
poly := svgPolygon{Mode: sps.Mode}
sps.CurrentPolygon = &poly
X, Y := sps.scanTwoInts()
sps.Cursor.X += float64(X)
sps.Cursor.Y += float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "L":
X, Y := sps.scanTwoInts()
sps.Cursor.X, sps.Cursor.Y = float64(X), float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "l":
X, Y := sps.scanTwoInts()
sps.Cursor.X += float64(X)
sps.Cursor.Y += float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "H":
sps.Cursor.X = float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "h":
sps.Cursor.X += float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "V":
sps.Cursor.Y = float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "v":
sps.Cursor.Y += float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "z":
sps.Polygons = append(sps.Polygons, *sps.CurrentPolygon)
sps.CurrentPolygon = nil
case "Z":
sps.Polygons = append(sps.Polygons, *sps.CurrentPolygon)
sps.CurrentPolygon = nil
default:
return false
}
return true
}
func (path *svgPath) generatePath() {
polys, err := NewPathScanner(path.D, path.mode()).GeneratePolygons()
if err != nil {
log.Printf("Error generating polygons: %v", err)
}
log.Printf("Polys: %+v\n", polys)
path.Polygons = polys
}
func (group *SvgGroup) updateTransform() {
var transformRegex = regexp.MustCompile(`([a-zA-Z]+)\((.*)\)`)
if group.Transform != "" {
matches := transformRegex.FindStringSubmatch(group.Transform)
if matches == nil {
log.Printf("Unknown transform: %s\n", group.Transform)
} else if matches[1] == "translate" {
coords := strings.Split(matches[2], ",")
group.translate.X, _ = strconv.ParseFloat(coords[0], 64)
group.translate.Y, _ = strconv.ParseFloat(coords[1], 64)
} else {
log.Printf("Unknown transform: [%s] in %s\n", matches[1], group.Transform)
}
}
}
func (group *SvgGroup) updatePaths() {
for idx := range group.Paths {
group.Paths[idx].generatePath()
group.Paths[idx].Name = group.Name
}
}
func (group *SvgGroup) updatePolygons() {
for idx := range group.Polygons {
group.Polygons[idx].generatePoints()
}
}
func (group *SvgGroup) process() {
group.updateTransform()
group.updatePaths()
group.updatePolygons()
for idx := range group.Groups {
g := &group.Groups[idx]
g.process()
}
}
func (group *SvgGroup) GetPaths() []svgPath {
paths := make([]svgPath, 0)
if strings.HasPrefix(group.Name, "door_open_") || strings.HasPrefix(group.Name, "door_closed_") || strings.HasPrefix(group.Name, "walls") {
for _, p := range group.Paths {
paths = append(paths, p)
}
}
for _, g := range group.Groups {
paths = append(paths, g.GetPaths()...)
}
for idx, _ := range paths |
return paths
}
func geosPolygonToPolygon(poly *geos.Geometry) svgPolygon {
shell, err := poly.Shell()
if err != nil {
log.Fatal(fmt.Errorf("Shell creation error: %+v", err))
}
mergedPoly := svgPolygon{}
coords, err := shell.Coords()
if err != nil {
log.Fatal(fmt.Errorf("Coords error: %+v", err))
}
for _, pt := range coords {
mergedPoly.Points = append(mergedPoly.Points, svgVert{pt.X, pt.Y})
}
return mergedPoly
}
func (group *SvgGroup) MergePolygons() {
if len(group.Polygons) > 0 {
var poly *geos.Geometry
for _, p := range group.Polygons {
if len(p.Points) > 2 {
verts := make([]geos.Coord, 0)
for _, v := range p.Points {
verts = append(verts, geos.NewCoord(v.X, v.Y))
}
verts = append(verts, geos.NewCoord(p.Points[0].X, p.Points[0].Y))
if poly == nil {
newPoly, err := geos.NewPolygon(verts)
if err != nil {
log.Fatal(fmt.Errorf("New poly creation error: %+v", err))
}
poly = newPoly
} else {
uPoly, _ := geos.NewPolygon(verts)
uPolyType, _ := uPoly.Type()
if uPolyType == geos.POLYGON {
union, err := poly.Union(uPoly)
if err != nil {
log.Printf("Skipping poly: Poly union error: %+v %+v", err, uPoly)
} else {
poly = union
}
} else {
log.Printf | {
p := &paths[idx]
for pIdx, _ := range p.Polygons {
poly := &p.Polygons[pIdx]
for vIdx, _ := range poly.Points {
v := &poly.Points[vIdx]
v.X += group.translate.X
v.Y += group.translate.Y
}
}
} | conditional_block |
db_feeds.go | !strings.EqualFold(feedData.Author.Email, "") || !strings.EqualFold(feedData.Author.Name, "") {
var authorID int64
if !AuthorExist(db, feedData.Author.Name, feedData.Author.Email) {
authorID, err = AddAuthor(db, feedData.Author.Name, feedData.Author.Email)
if err != nil {
return err
}
} else {
authorID, err = GetAuthorByNameAndEmail(db, feedData.Author.Name, feedData.Author.Email)
if err != nil {
return err
}
}
//Updating the feed author
err = UpdateFeedAuthor(db, feedID, authorID)
if err != nil {
return err
}
}
}
//Add Episodes
for _, episode := range feedData.Items {
var rssHTML string
if len(episode.Description) > len(episode.Content) {
rssHTML = episode.Description
} else {
rssHTML = episode.Content
}
if EpisodeExist(db, episode.Title) {
//TODO: need to check if this works...
continue
//Continue should skipp to the next loop interations
}
episodeID, err := AddEpisode(db, feedID, episode.Link, episode.Title, episode.PublishedParsed, rssHTML)
if err != nil {
return err
}
//Add media content
media, ok := episode.Extensions["media"]
if ok {
content, ok := media["content"]
if ok {
for i := 0; i < len(content); i++ {
var mediaContent string
url, ok := content[i].Attrs["url"]
if ok {
mediaContent += url
itemType, ok := content[i].Attrs["type"]
if ok {
mediaContent = fmt.Sprintf("%s (type: %s)", mediaContent, itemType)
err = UpdateEpisodeMediaContent(db, episodeID, mediaContent)
if err != nil {
return err
}
}
}
}
}
}
//Add author
if episode.Author != nil {
//If both the name and the email is not blank
if !strings.EqualFold(episode.Author.Email, "") || !strings.EqualFold(episode.Author.Name, "") {
var authorID int64
if !AuthorExist(db, episode.Author.Name, episode.Author.Email) {
authorID, err = AddAuthor(db, episode.Author.Name, episode.Author.Email)
if err != nil {
return err
}
} else {
authorID, err = GetAuthorByNameAndEmail(db, episode.Author.Name, episode.Author.Email)
if err != nil {
return err
}
}
//Updating the episode author
err = UpdateEpisodeAuthor(db, episodeID, authorID)
if err != nil {
return err
}
}
}
}
return
}
//LoadFeed -- Loads a feed from the database
func LoadFeed(db *sql.DB, id int64) (feed *Feed, err error) {
var feedData *gofeed.Feed
url, err := GetFeedURL(db, id)
if err != nil {
log.Fatal(err)
}
title, err := GetFeedTitle(db, id)
if err != nil {
log.Fatal(err)
}
if strings.EqualFold(title, "") {
title = url
}
data, err := GetFeedRawData(db, id)
if err != nil |
if !strings.EqualFold(data, "") {
//Need to convert the data to a gofeed object
feedParser := gofeed.NewParser()
feedData, err = feedParser.Parse(strings.NewReader(data))
if err != nil {
return feed, fmt.Errorf("gofeed parser was unable to parse data: %s -- %s", data, err.Error())
}
}
var tags []string
activeTags := AllActiveFeedTags(db, id)
for _, tag := range activeTags {
tags = append(tags, tag)
}
return &Feed{id, url, title, tags, feedData}, nil
}
//GetFeedAuthor -- returns the feed author
func GetFeedAuthor(db *sql.DB, feedID int64) (name, email string, err error) {
stmt := "SELECT authors.name, authors.email FROM feeds INNER JOIN authors ON authors.id = feeds.author_id WHERE feeds.id = $1"
row := db.QueryRow(stmt, feedID)
err = row.Scan(&name, &email)
return
}
//FeedHasAuthor -- returns true is an author id exists and false otherwise
func FeedHasAuthor(db *sql.DB, feedID int64) (result bool) {
var count int64
row := db.QueryRow("SELECT COUNT(author_id) FROM feeds WHERE id = $1", feedID)
err := row.Scan(&count)
if err != nil {
log.Fatal(err)
}
if count > 0 {
result = true
}
return
}
//GetFeedURL -- returnd the feed's url
func GetFeedURL(db *sql.DB, feedID int64) (url string, err error) {
row := db.QueryRow("SELECT uri FROM feeds WHERE id = $1", feedID)
err = row.Scan(&url)
if err != nil {
return url, fmt.Errorf("Error occured while trying to find the url for feed id (%d): %s", feedID, err.Error())
}
return url, nil
}
//GetFeedAuthorID -- returns the feed's author ID
func GetFeedAuthorID(db *sql.DB, feedID int64) (int64, error) {
var authorID int64
row := db.QueryRow("SELECT author_id FROM feeds WHERE id = $1", feedID)
err := row.Scan(&authorID)
if err != nil {
return authorID, fmt.Errorf("Error occured while trying to find the author_id for feed id (%d): %s", feedID, err.Error())
}
return authorID, nil
}
//UpdateFeedAuthor -- Updates the feed's author
func UpdateFeedAuthor(db *sql.DB, feedID, authorID int64) error {
_, err := db.Exec("UPDATE feeds SET author_id = $1 WHERE id = $2", authorID, feedID)
return err
}
//GetFeedRawData -- returns the feed's raw data
func GetFeedRawData(db *sql.DB, feedID int64) (string, error) {
var rawData string
row := db.QueryRow("SELECT raw_data FROM feeds WHERE id = $1", feedID)
err := row.Scan(&rawData)
if err != nil {
return rawData, fmt.Errorf("Error occured while trying to find the raw_data for feed id (%d): %s", feedID, err.Error())
}
return rawData, nil
}
//UpdateFeedRawData -- Updates the feed's raw data
func UpdateFeedRawData(db *sql.DB, feedID int64, rawData string) error {
_, err := db.Exec("UPDATE feeds SET raw_data = $1 WHERE id = $2", rawData, feedID)
return err
}
//GetFeedTitle -- returns the feed title
func GetFeedTitle(db *sql.DB, feedID int64) (string, error) {
var title string
row := db.QueryRow("SELECT title FROM feeds WHERE id = $1", feedID)
err := row.Scan(&title)
if err != nil {
return title, fmt.Errorf("Error occured while trying to find the feed title for id (%d): %s", feedID, err.Error())
}
return title, nil
}
//UpdateFeedTitle -- Updates the feed title
func UpdateFeedTitle(db *sql.DB, feedID int64, title string) error {
_, err := db.Exec("UPDATE feeds SET title = $1 WHERE id = $2", title, feedID)
return err
}
//GetFeedID -- Given a url or title, it returns the feed id
func GetFeedID(db *sql.DB, item string) (int64, error) {
var id int64
row := db.QueryRow("SELECT id FROM feeds WHERE uri = $1 OR title = $2", item, item)
err := row.Scan(&id)
if err != nil {
return id, fmt.Errorf("Error occured while trying to find the feed id for url/title (%s): %s", item, err.Error())
}
return id, nil
}
//AllActiveFeeds -- Returns all active feeds
func AllActiveFeeds(db *sql.DB) map[int64]string {
var result = make(map[int64]string)
rows, err := db.Query("SELECT id, uri FROM feeds WHERE deleted = 0")
if err != nil {
log.Fatalf("Error happened when trying to get all active feeds: %s", err)
}
defer func() {
if err = rows.Close(); err != nil {
log.Fatalf("Error happened while trying to close a row: %s", err.Error())
}
}()
for rows.Next() {
var id int64
var url string
err := rows.Scan(&id, &url)
if err != nil {
log.Fatalf("Error happened while scanning the rows for the all active feeds function: %s", err.Error())
| {
return feed, fmt.Errorf("No data to retrieve: %s", err.Error())
} | conditional_block |
db_feeds.go |
if !strings.EqualFold(episode.Author.Email, "") || !strings.EqualFold(episode.Author.Name, "") {
var authorID int64
if !AuthorExist(db, episode.Author.Name, episode.Author.Email) {
authorID, err = AddAuthor(db, episode.Author.Name, episode.Author.Email)
if err != nil {
return err
}
} else {
authorID, err = GetAuthorByNameAndEmail(db, episode.Author.Name, episode.Author.Email)
if err != nil {
return err
}
}
//Updating the episode author
err = UpdateEpisodeAuthor(db, episodeID, authorID)
if err != nil {
return err
}
}
}
}
return
}
//LoadFeed -- Loads a feed from the database
func LoadFeed(db *sql.DB, id int64) (feed *Feed, err error) {
var feedData *gofeed.Feed
url, err := GetFeedURL(db, id)
if err != nil {
log.Fatal(err)
}
title, err := GetFeedTitle(db, id)
if err != nil {
log.Fatal(err)
}
if strings.EqualFold(title, "") {
title = url
}
data, err := GetFeedRawData(db, id)
if err != nil {
return feed, fmt.Errorf("No data to retrieve: %s", err.Error())
}
if !strings.EqualFold(data, "") {
//Need to convert the data to a gofeed object
feedParser := gofeed.NewParser()
feedData, err = feedParser.Parse(strings.NewReader(data))
if err != nil {
return feed, fmt.Errorf("gofeed parser was unable to parse data: %s -- %s", data, err.Error())
}
}
var tags []string
activeTags := AllActiveFeedTags(db, id)
for _, tag := range activeTags {
tags = append(tags, tag)
}
return &Feed{id, url, title, tags, feedData}, nil
}
//GetFeedAuthor -- returns the feed author
func GetFeedAuthor(db *sql.DB, feedID int64) (name, email string, err error) {
stmt := "SELECT authors.name, authors.email FROM feeds INNER JOIN authors ON authors.id = feeds.author_id WHERE feeds.id = $1"
row := db.QueryRow(stmt, feedID)
err = row.Scan(&name, &email)
return
}
//FeedHasAuthor -- returns true is an author id exists and false otherwise
func FeedHasAuthor(db *sql.DB, feedID int64) (result bool) {
var count int64
row := db.QueryRow("SELECT COUNT(author_id) FROM feeds WHERE id = $1", feedID)
err := row.Scan(&count)
if err != nil {
log.Fatal(err)
}
if count > 0 {
result = true
}
return
}
//GetFeedURL -- returnd the feed's url
func GetFeedURL(db *sql.DB, feedID int64) (url string, err error) {
row := db.QueryRow("SELECT uri FROM feeds WHERE id = $1", feedID)
err = row.Scan(&url)
if err != nil {
return url, fmt.Errorf("Error occured while trying to find the url for feed id (%d): %s", feedID, err.Error())
}
return url, nil
}
//GetFeedAuthorID -- returns the feed's author ID
func GetFeedAuthorID(db *sql.DB, feedID int64) (int64, error) {
var authorID int64
row := db.QueryRow("SELECT author_id FROM feeds WHERE id = $1", feedID)
err := row.Scan(&authorID)
if err != nil {
return authorID, fmt.Errorf("Error occured while trying to find the author_id for feed id (%d): %s", feedID, err.Error())
}
return authorID, nil
}
//UpdateFeedAuthor -- Updates the feed's author
func UpdateFeedAuthor(db *sql.DB, feedID, authorID int64) error {
_, err := db.Exec("UPDATE feeds SET author_id = $1 WHERE id = $2", authorID, feedID)
return err
}
//GetFeedRawData -- returns the feed's raw data
func GetFeedRawData(db *sql.DB, feedID int64) (string, error) {
var rawData string
row := db.QueryRow("SELECT raw_data FROM feeds WHERE id = $1", feedID)
err := row.Scan(&rawData)
if err != nil {
return rawData, fmt.Errorf("Error occured while trying to find the raw_data for feed id (%d): %s", feedID, err.Error())
}
return rawData, nil
}
//UpdateFeedRawData -- Updates the feed's raw data
func UpdateFeedRawData(db *sql.DB, feedID int64, rawData string) error {
_, err := db.Exec("UPDATE feeds SET raw_data = $1 WHERE id = $2", rawData, feedID)
return err
}
//GetFeedTitle -- returns the feed title
func GetFeedTitle(db *sql.DB, feedID int64) (string, error) {
var title string
row := db.QueryRow("SELECT title FROM feeds WHERE id = $1", feedID)
err := row.Scan(&title)
if err != nil {
return title, fmt.Errorf("Error occured while trying to find the feed title for id (%d): %s", feedID, err.Error())
}
return title, nil
}
//UpdateFeedTitle -- Updates the feed title
func UpdateFeedTitle(db *sql.DB, feedID int64, title string) error {
_, err := db.Exec("UPDATE feeds SET title = $1 WHERE id = $2", title, feedID)
return err
}
//GetFeedID -- Given a url or title, it returns the feed id
func GetFeedID(db *sql.DB, item string) (int64, error) {
var id int64
row := db.QueryRow("SELECT id FROM feeds WHERE uri = $1 OR title = $2", item, item)
err := row.Scan(&id)
if err != nil {
return id, fmt.Errorf("Error occured while trying to find the feed id for url/title (%s): %s", item, err.Error())
}
return id, nil
}
//AllActiveFeeds -- Returns all active feeds
func AllActiveFeeds(db *sql.DB) map[int64]string {
var result = make(map[int64]string)
rows, err := db.Query("SELECT id, uri FROM feeds WHERE deleted = 0")
if err != nil {
log.Fatalf("Error happened when trying to get all active feeds: %s", err)
}
defer func() {
if err = rows.Close(); err != nil {
log.Fatalf("Error happened while trying to close a row: %s", err.Error())
}
}()
for rows.Next() {
var id int64
var url string
err := rows.Scan(&id, &url)
if err != nil {
log.Fatalf("Error happened while scanning the rows for the all active feeds function: %s", err.Error())
}
result[id] = url
}
return result
}
//FilterFeeds -- Takes in a list of feeds and compares them with the feeds listed in the Database.
//Returns all the feeds that are listed as active in the database but where not in the list.
func FilterFeeds(db *sql.DB, feeds map[int64]string) map[int64]string {
var result = make(map[int64]string)
allFeeds := AllActiveFeeds(db)
for dbKey, dbValue := range allFeeds {
found := false
for feedKey, feedValue := range feeds {
if dbKey == feedKey && strings.EqualFold(dbValue, feedValue) {
found = true
break
}
}
if !found {
result[dbKey] = dbValue
}
}
return result
}
//DeleteFeed -- Flips the delete flag on for a feed in the database
func DeleteFeed(db *sql.DB, feedID int64) error {
_, err := db.Exec("UPDATE feeds SET deleted = 1 WHERE id = $1", feedID)
return err
}
//UndeleteFeed -- Flips the delete flag off for a feed in the database
func UndeleteFeed(db *sql.DB, feedID int64) error {
_, err := db.Exec("UPDATE feeds SET deleted = 0 WHERE id = $1", feedID)
return err
}
//IsFeedDeleted -- Checks to see if the feed is currently marked as deleted
func IsFeedDeleted(db *sql.DB, feedID int64) bool {
var result bool
var deleted int64
row := db.QueryRow("SELECT deleted FROM feeds WHERE id = $1", feedID)
err := row.Scan(&deleted)
if err != nil {
if err != sql.ErrNoRows {
log.Fatalf("Feed (%d) does not exist: %s", feedID, err.Error())
} else {
log.Fatalf("Error happened while trying check the value of the delete flag for feed (%d): %s", feedID, err.Error())
}
}
if deleted == 1 {
result = true
} else {
result = false
}
return result
}
//FeedURLExist -- Checks to see if a feed exists
func | FeedURLExist | identifier_name | |
db_feeds.go | !strings.EqualFold(feedData.Author.Email, "") || !strings.EqualFold(feedData.Author.Name, "") {
var authorID int64
if !AuthorExist(db, feedData.Author.Name, feedData.Author.Email) {
authorID, err = AddAuthor(db, feedData.Author.Name, feedData.Author.Email)
if err != nil {
return err
}
} else {
authorID, err = GetAuthorByNameAndEmail(db, feedData.Author.Name, feedData.Author.Email)
if err != nil {
return err
}
}
//Updating the feed author
err = UpdateFeedAuthor(db, feedID, authorID)
if err != nil {
return err
}
}
}
//Add Episodes
for _, episode := range feedData.Items {
var rssHTML string
if len(episode.Description) > len(episode.Content) {
rssHTML = episode.Description
} else {
rssHTML = episode.Content
}
if EpisodeExist(db, episode.Title) {
//TODO: need to check if this works...
continue
//Continue should skipp to the next loop interations
}
episodeID, err := AddEpisode(db, feedID, episode.Link, episode.Title, episode.PublishedParsed, rssHTML)
if err != nil {
return err
}
//Add media content
media, ok := episode.Extensions["media"]
if ok {
content, ok := media["content"]
if ok {
for i := 0; i < len(content); i++ {
var mediaContent string
url, ok := content[i].Attrs["url"]
if ok {
mediaContent += url
itemType, ok := content[i].Attrs["type"]
if ok {
mediaContent = fmt.Sprintf("%s (type: %s)", mediaContent, itemType)
err = UpdateEpisodeMediaContent(db, episodeID, mediaContent)
if err != nil {
return err
}
}
}
}
}
}
//Add author
if episode.Author != nil {
//If both the name and the email is not blank
if !strings.EqualFold(episode.Author.Email, "") || !strings.EqualFold(episode.Author.Name, "") {
var authorID int64
if !AuthorExist(db, episode.Author.Name, episode.Author.Email) {
authorID, err = AddAuthor(db, episode.Author.Name, episode.Author.Email)
if err != nil {
return err
}
} else {
authorID, err = GetAuthorByNameAndEmail(db, episode.Author.Name, episode.Author.Email)
if err != nil {
return err
}
}
//Updating the episode author
err = UpdateEpisodeAuthor(db, episodeID, authorID)
if err != nil {
return err
}
}
}
}
return
}
//LoadFeed -- Loads a feed from the database
func LoadFeed(db *sql.DB, id int64) (feed *Feed, err error) |
if !strings.EqualFold(data, "") {
//Need to convert the data to a gofeed object
feedParser := gofeed.NewParser()
feedData, err = feedParser.Parse(strings.NewReader(data))
if err != nil {
return feed, fmt.Errorf("gofeed parser was unable to parse data: %s -- %s", data, err.Error())
}
}
var tags []string
activeTags := AllActiveFeedTags(db, id)
for _, tag := range activeTags {
tags = append(tags, tag)
}
return &Feed{id, url, title, tags, feedData}, nil
}
//GetFeedAuthor -- returns the feed author
func GetFeedAuthor(db *sql.DB, feedID int64) (name, email string, err error) {
stmt := "SELECT authors.name, authors.email FROM feeds INNER JOIN authors ON authors.id = feeds.author_id WHERE feeds.id = $1"
row := db.QueryRow(stmt, feedID)
err = row.Scan(&name, &email)
return
}
//FeedHasAuthor -- returns true is an author id exists and false otherwise
func FeedHasAuthor(db *sql.DB, feedID int64) (result bool) {
var count int64
row := db.QueryRow("SELECT COUNT(author_id) FROM feeds WHERE id = $1", feedID)
err := row.Scan(&count)
if err != nil {
log.Fatal(err)
}
if count > 0 {
result = true
}
return
}
//GetFeedURL -- returnd the feed's url
func GetFeedURL(db *sql.DB, feedID int64) (url string, err error) {
row := db.QueryRow("SELECT uri FROM feeds WHERE id = $1", feedID)
err = row.Scan(&url)
if err != nil {
return url, fmt.Errorf("Error occured while trying to find the url for feed id (%d): %s", feedID, err.Error())
}
return url, nil
}
//GetFeedAuthorID -- returns the feed's author ID
func GetFeedAuthorID(db *sql.DB, feedID int64) (int64, error) {
var authorID int64
row := db.QueryRow("SELECT author_id FROM feeds WHERE id = $1", feedID)
err := row.Scan(&authorID)
if err != nil {
return authorID, fmt.Errorf("Error occured while trying to find the author_id for feed id (%d): %s", feedID, err.Error())
}
return authorID, nil
}
//UpdateFeedAuthor -- Updates the feed's author
func UpdateFeedAuthor(db *sql.DB, feedID, authorID int64) error {
_, err := db.Exec("UPDATE feeds SET author_id = $1 WHERE id = $2", authorID, feedID)
return err
}
//GetFeedRawData -- returns the feed's raw data
func GetFeedRawData(db *sql.DB, feedID int64) (string, error) {
var rawData string
row := db.QueryRow("SELECT raw_data FROM feeds WHERE id = $1", feedID)
err := row.Scan(&rawData)
if err != nil {
return rawData, fmt.Errorf("Error occured while trying to find the raw_data for feed id (%d): %s", feedID, err.Error())
}
return rawData, nil
}
//UpdateFeedRawData -- Updates the feed's raw data
func UpdateFeedRawData(db *sql.DB, feedID int64, rawData string) error {
_, err := db.Exec("UPDATE feeds SET raw_data = $1 WHERE id = $2", rawData, feedID)
return err
}
//GetFeedTitle -- returns the feed title
func GetFeedTitle(db *sql.DB, feedID int64) (string, error) {
var title string
row := db.QueryRow("SELECT title FROM feeds WHERE id = $1", feedID)
err := row.Scan(&title)
if err != nil {
return title, fmt.Errorf("Error occured while trying to find the feed title for id (%d): %s", feedID, err.Error())
}
return title, nil
}
//UpdateFeedTitle -- Updates the feed title
func UpdateFeedTitle(db *sql.DB, feedID int64, title string) error {
_, err := db.Exec("UPDATE feeds SET title = $1 WHERE id = $2", title, feedID)
return err
}
//GetFeedID -- Given a url or title, it returns the feed id
func GetFeedID(db *sql.DB, item string) (int64, error) {
var id int64
row := db.QueryRow("SELECT id FROM feeds WHERE uri = $1 OR title = $2", item, item)
err := row.Scan(&id)
if err != nil {
return id, fmt.Errorf("Error occured while trying to find the feed id for url/title (%s): %s", item, err.Error())
}
return id, nil
}
//AllActiveFeeds -- Returns all active feeds
func AllActiveFeeds(db *sql.DB) map[int64]string {
var result = make(map[int64]string)
rows, err := db.Query("SELECT id, uri FROM feeds WHERE deleted = 0")
if err != nil {
log.Fatalf("Error happened when trying to get all active feeds: %s", err)
}
defer func() {
if err = rows.Close(); err != nil {
log.Fatalf("Error happened while trying to close a row: %s", err.Error())
}
}()
for rows.Next() {
var id int64
var url string
err := rows.Scan(&id, &url)
if err != nil {
log.Fatalf("Error happened while scanning the rows for the all active feeds function: %s", err.Error())
| {
var feedData *gofeed.Feed
url, err := GetFeedURL(db, id)
if err != nil {
log.Fatal(err)
}
title, err := GetFeedTitle(db, id)
if err != nil {
log.Fatal(err)
}
if strings.EqualFold(title, "") {
title = url
}
data, err := GetFeedRawData(db, id)
if err != nil {
return feed, fmt.Errorf("No data to retrieve: %s", err.Error())
} | identifier_body |
db_feeds.go | )
if err != nil {
log.Fatal(err)
}
title, err := GetFeedTitle(db, id)
if err != nil {
log.Fatal(err)
}
if strings.EqualFold(title, "") {
title = url
}
data, err := GetFeedRawData(db, id)
if err != nil {
return feed, fmt.Errorf("No data to retrieve: %s", err.Error())
}
if !strings.EqualFold(data, "") {
//Need to convert the data to a gofeed object
feedParser := gofeed.NewParser()
feedData, err = feedParser.Parse(strings.NewReader(data))
if err != nil {
return feed, fmt.Errorf("gofeed parser was unable to parse data: %s -- %s", data, err.Error())
}
}
var tags []string
activeTags := AllActiveFeedTags(db, id)
for _, tag := range activeTags {
tags = append(tags, tag)
}
return &Feed{id, url, title, tags, feedData}, nil
}
//GetFeedAuthor -- returns the feed author
func GetFeedAuthor(db *sql.DB, feedID int64) (name, email string, err error) {
stmt := "SELECT authors.name, authors.email FROM feeds INNER JOIN authors ON authors.id = feeds.author_id WHERE feeds.id = $1"
row := db.QueryRow(stmt, feedID)
err = row.Scan(&name, &email)
return
}
//FeedHasAuthor -- returns true is an author id exists and false otherwise
func FeedHasAuthor(db *sql.DB, feedID int64) (result bool) {
var count int64
row := db.QueryRow("SELECT COUNT(author_id) FROM feeds WHERE id = $1", feedID)
err := row.Scan(&count)
if err != nil {
log.Fatal(err)
}
if count > 0 {
result = true
}
return
}
//GetFeedURL -- returnd the feed's url
func GetFeedURL(db *sql.DB, feedID int64) (url string, err error) {
row := db.QueryRow("SELECT uri FROM feeds WHERE id = $1", feedID)
err = row.Scan(&url)
if err != nil {
return url, fmt.Errorf("Error occured while trying to find the url for feed id (%d): %s", feedID, err.Error())
}
return url, nil
}
//GetFeedAuthorID -- returns the feed's author ID
func GetFeedAuthorID(db *sql.DB, feedID int64) (int64, error) {
var authorID int64
row := db.QueryRow("SELECT author_id FROM feeds WHERE id = $1", feedID)
err := row.Scan(&authorID)
if err != nil {
return authorID, fmt.Errorf("Error occured while trying to find the author_id for feed id (%d): %s", feedID, err.Error())
}
return authorID, nil
}
//UpdateFeedAuthor -- Updates the feed's author
func UpdateFeedAuthor(db *sql.DB, feedID, authorID int64) error {
_, err := db.Exec("UPDATE feeds SET author_id = $1 WHERE id = $2", authorID, feedID)
return err
}
//GetFeedRawData -- returns the feed's raw data
func GetFeedRawData(db *sql.DB, feedID int64) (string, error) {
var rawData string
row := db.QueryRow("SELECT raw_data FROM feeds WHERE id = $1", feedID)
err := row.Scan(&rawData)
if err != nil {
return rawData, fmt.Errorf("Error occured while trying to find the raw_data for feed id (%d): %s", feedID, err.Error())
}
return rawData, nil
}
//UpdateFeedRawData -- Updates the feed's raw data
func UpdateFeedRawData(db *sql.DB, feedID int64, rawData string) error {
_, err := db.Exec("UPDATE feeds SET raw_data = $1 WHERE id = $2", rawData, feedID)
return err
}
//GetFeedTitle -- returns the feed title
func GetFeedTitle(db *sql.DB, feedID int64) (string, error) {
var title string
row := db.QueryRow("SELECT title FROM feeds WHERE id = $1", feedID)
err := row.Scan(&title)
if err != nil {
return title, fmt.Errorf("Error occured while trying to find the feed title for id (%d): %s", feedID, err.Error())
}
return title, nil
}
//UpdateFeedTitle -- Updates the feed title
func UpdateFeedTitle(db *sql.DB, feedID int64, title string) error {
_, err := db.Exec("UPDATE feeds SET title = $1 WHERE id = $2", title, feedID)
return err
}
//GetFeedID -- Given a url or title, it returns the feed id
func GetFeedID(db *sql.DB, item string) (int64, error) {
var id int64
row := db.QueryRow("SELECT id FROM feeds WHERE uri = $1 OR title = $2", item, item)
err := row.Scan(&id)
if err != nil {
return id, fmt.Errorf("Error occured while trying to find the feed id for url/title (%s): %s", item, err.Error())
}
return id, nil
}
//AllActiveFeeds -- Returns all active feeds
func AllActiveFeeds(db *sql.DB) map[int64]string {
var result = make(map[int64]string)
rows, err := db.Query("SELECT id, uri FROM feeds WHERE deleted = 0")
if err != nil {
log.Fatalf("Error happened when trying to get all active feeds: %s", err)
}
defer func() {
if err = rows.Close(); err != nil {
log.Fatalf("Error happened while trying to close a row: %s", err.Error())
}
}()
for rows.Next() {
var id int64
var url string
err := rows.Scan(&id, &url)
if err != nil {
log.Fatalf("Error happened while scanning the rows for the all active feeds function: %s", err.Error())
}
result[id] = url
}
return result
}
//FilterFeeds -- Takes in a list of feeds and compares them with the feeds listed in the Database.
//Returns all the feeds that are listed as active in the database but where not in the list.
func FilterFeeds(db *sql.DB, feeds map[int64]string) map[int64]string {
var result = make(map[int64]string)
allFeeds := AllActiveFeeds(db)
for dbKey, dbValue := range allFeeds {
found := false
for feedKey, feedValue := range feeds {
if dbKey == feedKey && strings.EqualFold(dbValue, feedValue) {
found = true
break
}
}
if !found {
result[dbKey] = dbValue
}
}
return result
}
//DeleteFeed -- Flips the delete flag on for a feed in the database
func DeleteFeed(db *sql.DB, feedID int64) error {
_, err := db.Exec("UPDATE feeds SET deleted = 1 WHERE id = $1", feedID)
return err
}
//UndeleteFeed -- Flips the delete flag off for a feed in the database
func UndeleteFeed(db *sql.DB, feedID int64) error {
_, err := db.Exec("UPDATE feeds SET deleted = 0 WHERE id = $1", feedID)
return err
}
//IsFeedDeleted -- Checks to see if the feed is currently marked as deleted
func IsFeedDeleted(db *sql.DB, feedID int64) bool {
var result bool
var deleted int64
row := db.QueryRow("SELECT deleted FROM feeds WHERE id = $1", feedID)
err := row.Scan(&deleted)
if err != nil {
if err != sql.ErrNoRows {
log.Fatalf("Feed (%d) does not exist: %s", feedID, err.Error())
} else {
log.Fatalf("Error happened while trying check the value of the delete flag for feed (%d): %s", feedID, err.Error())
}
}
if deleted == 1 {
result = true
} else {
result = false
}
return result
}
//FeedURLExist -- Checks to see if a feed exists
func FeedURLExist(db *sql.DB, url string) bool {
var id int64
var result bool
row := db.QueryRow("SELECT id FROM feeds WHERE uri = $1", url)
err := row.Scan(&id)
if err != nil {
if err != sql.ErrNoRows {
log.Fatalf("Error happened when trying to check if the feed (%s) exists: %s", url, err.Error())
}
} else {
result = true
}
return result
}
//AddFeedURL -- Adds a feed url to the database
func AddFeedURL(db *sql.DB, url string) (int64, error) {
var result int64
feedStmt := "INSERT INTO feeds (uri) VALUES ($1)"
if FeedURLExist(db, url) {
return result, fmt.Errorf("Feed already exists")
}
dbResult, err := db.Exec(feedStmt, url)
if err != nil {
log.Fatal(err)
}
result, err = dbResult.LastInsertId() | if err != nil {
log.Fatal(err) | random_line_split | |
rtmDiskUsage.js | this.filterUsageText.getValue()) {
this.txnFilterDiskUsage = +this.filterUsageText.getValue();
common.WebEnv.Save(this.envKeyUsageLimit, this.txnFilterDiskUsage);
this.frameRefresh();
}
},
specialkeyEvent: function(me, e) {
if (e.getKey() === e.ENTER && me.oldValue !== me.value) {
if (me.value < 0) {
me.setValue(0);
} else if (me.value > 99) {
me.setValue(99);
}
me.oldValue = me.value;
me.fireEvent('blur', me);
}
},
/**
* 모니터링 서버들의 호스트별로 탭 화면을 구성
*/
createTabPanel: function() {
this.tabPanel = Ext.create('Exem.TabPanel', {
layout: 'fit',
width: '100%',
height: 25,
items: [{
title: common.Util.TR('Total'),
itemId: 'total',
layout: 'fit'
}],
listeners: {
scope: this,
tabchange: function(tabpanel, newcard) {
this.loadingMask.show(null, true);
this.activeTabTitle = newcard.title;
this.frameRefresh();
}
}
});
var hostName;
for (var ix = 0, ixLen = this.displayHostList.length; ix < ixLen ; ix++ ) {
hostName = this.displayHostList[ix];
this.tabPanel.add({
layout: 'fit',
title : hostName,
itemId: hostName
});
}
this.tabPanel.setActiveTab(0);
this.activeTabTitle = this.tabPanel.getActiveTab().title;
},
/**
* Grid 생성
*/
createGrid: function () {
this.diskUsageGrid = Ext.create('Exem.BaseGrid', {
layout : 'fit',
usePager : false,
autoScroll : false,
borderVisible: true,
localeType : 'H:i:s',
columnLines : true,
baseGridCls : 'baseGridRTM',
exportFileName: this.title,
useEmptyText: true,
emptyTextMsg: common.Util.TR('No data to display'),
style: {
'overflow-x': 'hidden'
}
});
this.diskUsageGrid.beginAddColumns();
this.diskUsageGrid.addColumn(common.Util.CTR('Host Name'), 'host_name', 80, Grid.String, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('Mount Name'), 'mount_name', 75, Grid.String, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('File System'), 'file_system', 95, Grid.String, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('Usage(%)'), 'usage', 95, Grid.Float, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('Use Size(MB)'), 'use_size', 95, Grid.Number, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('Total Size(MB)'), 'total_size', 95, Grid.Number, true, false);
this.diskUsageGrid.endAddColumns();
this.diskUsageGrid.addRenderer('usage', this.gridBarRenderer.bind(this), RendererType.bar);
this.diskUsageGrid._columnsList[3].on({
scope: this,
resize: function() {
this.progressFillWidth = $('#'+this.id+' .progress-bar').width();
if (this.progressFillWidth) {
$('#'+this.id+' .progress-fill-text').css('width', this.progressFillWidth);
}
}
});
this.diskUsageGrid._columnsList[1].minWidth = 150;
this.diskUsageGrid._columnsList[1].flex = 1;
// 필터 설정 후 다른 탭으로 전환하고 설정된 필터를 해제하면 변경 전 탭에서 표시된 데이터가
// 보여지는 이슈로 인해 필터 설정 시 그리드를 새로 고침하도록 수정.
this.diskUsageGrid.pnlExGrid.on('filterchange', function() {
this.diskUsageGrid.clearRows();
this.frameRefresh();
}.bind(this));
},
/**
* 그리드에 보여지는 막대 그래프 설정.
* value, metaData, record, rowIndex, colIndex, store, view
*
* @param {} value
* @param {} metaData
* @param {} record
* @param {} rowIndex
* @param {} colIndex
* @param {} store
* @param {} view
* @return {}
*/
gridBarRenderer: function() {
var htmlStr;
var value = arguments[0];
if (value !== 0) {
if (!this.progressFillWidth) {
this.progressFillWidth = 83;
}
htmlStr =
'<div class="progress-bar" style="border: 0px solid #666; height:13px; width: 100%;position:relative; text-align:center;">'+
'<div class="progress-fill" style="width:' + value + '%;">'+
'<div class="progress-fill-text" style="width:'+this.progressFillWidth+'px">'+value+'%</div>'+
'</div>'+ value + '%' +
'</div>';
} else {
htmlStr = '<div data-qtip="" style="text-align:center;">'+'0%'+'</div>';
}
return htmlStr;
},
/**
* 데이터 새로고침을 중지.
*/
stopRefreshData: function() {
if (this.refreshTimer) {
clearTimeout(this.refreshTimer);
}
},
/**
* 데이터 새로 고침.
* 새로고침 간격 (1분)
*/
frameRefresh: function() {
this.stopRefreshData();
var isDisplayCmp = Comm.RTComm.isEnableRtmView(this.openViewType);
if (isDisplayCmp || this.floatingLayer) {
this.selectDiskUsage();
}
this.refreshTimer = setTimeout(this.frameRefresh.bind(this), PlotChart.time.exMin * 1);
},
/**
* 디스크 사용량 조회
*/
selectDiskUsage: function() {
var hostName = this.activeTabTitle;
if (common.Util.TR('Total') === hostName) {
hostName = '';
for (var ix = 0, ixLen = this.displayHostList.length; ix < ixLen ; ix++ ) {
hostName += (ix === 0? '\'' : ',\'') + this.displayHostList[ix] + '\'';
}
} else {
hostName = '\'' + hostName + '\'';
}
if (Ext.isEmpty(hostName) === true) {
console.debug('%c [Disk Usage] [WARNING] ', 'color:#800000;background-color:gold;font-weight:bold;', 'No Selected Host Name');
return;
}
WS.SQLExec({
sql_file: 'IMXRT_DiskUsage.sql',
bind : [{
name: 'server_type', value: this.serverType, type: SQLBindType.INTEGER
}],
replace_string: [{
name: 'host_name', value: hostName
}]
}, function(aheader, adata) {
this.loadingMask.hide();
if (adata === null || adata === undefined) {
console.debug('%c [Disk Usage] [WARNING] ', 'color:#800000;background-color:gold;font-weight:bold;', aheader.message);
}
if (this.isClosedDockForm === true) {
return;
}
this.drawData(adata);
aheader = null;
adata = null;
}, this);
},
/**
* 디스크 사용량 데이터 표시
*
* @param {object} adata
*/
drawData: function(adata) {
this.diskUsageGrid.clearRows();
if (this.diskUsageGrid.pnlExGrid.headerCt === undefined ||
this.diskUsageGrid.pnlExGrid.headerCt === null) {
return;
}
var isDownHost;
for (var ix = 0, ixLen = adata.rows.length; ix < ixLen; ix++) {
if (+this.txnFilterDiskUsage <= +adata.rows[ix][2]) {
isDownHost = Comm.RTComm.isDownByHostName(adata.rows[ix][5]);
if (isDownHost === true) {
continue;
}
this.diskUsageGrid.addRow([
adata.rows[ix][5], // host name
adat | a.rows[ix][0], // mount name
adata.rows[ix][1], // file system
adata.rows[ix][2], // ratio
Math.trunc(+adata.rows[ix][3]), // used size
Math.trunc(+adata.rows[ix][4]) // tota size
]);
}
}
if (isDownHost === true) {
this.diskUsageGrid.emptyTextMsg = common.Util.TR('Host Down');
} else {
this.diskUsageGrid.emptyTextMsg = common.Util.TR('No data to display');
}
this.diskUsageGrid.showEmptyText();
this.diskUsageGr | conditional_block | |
rtmDiskUsage.js | FilterDiskUsage = +this.filterUsageText.getValue();
common.WebEnv.Save(this.envKeyUsageLimit, this.txnFilterDiskUsage);
this.frameRefresh();
}
},
specialkeyEvent: function(me, e) {
if (e.getKey() === e.ENTER && me.oldValue !== me.value) {
if (me.value < 0) {
me.setValue(0);
} else if (me.value > 99) {
me.setValue(99);
}
me.oldValue = me.value;
me.fireEvent('blur', me);
}
},
/**
* 모니터링 서버들의 호스트별로 탭 화면을 구성
*/
createTabPanel: function() {
this.tabPanel = Ext.create('Exem.TabPanel', {
layout: 'fit',
width: '100%',
height: 25,
items: [{
title: common.Util.TR('Total'),
itemId: 'total',
layout: 'fit'
}],
listeners: {
scope: this,
tabchange: function(tabpanel, newcard) {
this.loadingMask.show(null, true);
this.activeTabTitle = newcard.title;
this.frameRefresh();
}
}
});
var hostName;
for (var ix = 0, ixLen = this.displayHostList.length; ix < ixLen ; ix++ ) {
hostName = this.displayHostList[ix];
this.tabPanel.add({
layout: 'fit',
title : hostName,
itemId: hostName
});
}
this.tabPanel.setActiveTab(0);
this.activeTabTitle = this.tabPanel.getActiveTab().title;
},
/**
* Grid 생성
*/
createGrid: function () {
this.diskUsageGrid = Ext.create('Exem.BaseGrid', {
layout : 'fit',
usePager : false,
autoScroll : false,
borderVisible: true,
localeType : 'H:i:s',
columnLines : true,
baseGridCls : 'baseGridRTM',
exportFileName: this.title,
useEmptyText: true,
emptyTextMsg: common.Util.TR('No data to display'),
style: {
'overflow-x': 'hidden'
}
});
this.diskUsageGrid.beginAddColumns();
this.diskUsageGrid.addColumn(common.Util.CTR('Host Name'), 'host_name', 80, Grid.String, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('Mount Name'), 'mount_name', 75, Grid.String, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('File System'), 'file_system', 95, Grid.String, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('Usage(%)'), 'usage', 95, Grid.Float, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('Use Size(MB)'), 'use_size', 95, Grid.Number, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('Total Size(MB)'), 'total_size', 95, Grid.Number, true, false);
this.diskUsageGrid.endAddColumns();
this.diskUsageGrid.addRenderer('usage', this.gridBarRenderer.bind(this), RendererType.bar);
this.diskUsageGrid._columnsList[3].on({
scope: this,
resize: function() {
this.progressFillWidth = $('#'+this.id+' .progress-bar').width();
if (this.progressFillWidth) {
$('#'+this.id+' .progress-fill-text').css('width', this.progressFillWidth);
}
}
});
this.diskUsageGrid._columnsList[1].minWidth = 150;
this.diskUsageGrid._columnsList[1].flex = 1;
// 필터 설정 후 다른 탭으로 전환하고 설정된 필터를 해제하면 변경 전 탭에서 표시된 데이터가
// 보여지는 이슈로 인해 필터 설정 시 그리드를 새로 고침하도록 수정.
this.diskUsageGrid.pnlExGrid.on('filterchange', function() {
this.diskUsageGrid.clearRows();
this.frameRefresh();
}.bind(this));
},
/**
* 그리드에 보여지는 막대 그래프 설정.
* value, metaData, record, rowIndex, colIndex, store, view
*
* @param {} value
* @param {} metaData
* @param {} record
* @param {} rowIndex
* @param {} colIndex
* @param {} store
* @param {} view
* @return {}
*/
gridBarRenderer: function() {
var htmlStr;
var value = arguments[0];
if (value !== 0) {
if (!this.progressFillWidth) {
this.progressFillWidth = 83;
}
htmlStr =
'<div class="progress-bar" style="border: 0px solid #666; height:13px; width: 100%;position:relative; text-align:center;">'+
'<div class="progress-fill" style="width:' + value + '%;">'+
'<div class="progress-fill-text" style="width:'+this.progressFillWidth+'px">'+value+'%</div>'+
'</div>'+ value + '%' +
'</div>';
} else {
htmlStr = '<div data-qtip="" style="text-align:center;">'+'0%'+'</div>';
}
return htmlStr;
},
/**
* 데이터 새로고침을 중지.
*/
stopRefreshData: function() {
if (this.refreshTimer) {
clearTimeout(this.refreshTimer);
}
},
/**
* 데이터 새로 고침.
* 새로고침 간격 (1분)
*/
frameRefresh: function() {
this.stopRefreshData();
var isDisplayCmp = Comm.RTComm.isEnableRtmView(this.openViewType);
if (isDisplayCmp || this.floatingLayer) {
this.selectDiskUsage();
}
this.refreshTimer = setTimeout(this.frameRefresh.bind(this), PlotChart.time.exMin * 1);
},
/**
* 디스크 사용량 조회
*/
selectDiskUsage: function() {
var hostName = this.activeTabTitle;
if (common.Util.TR('Total') === hostName) {
hostName = '';
for (var ix = 0, ixLen = this.displayHostList.length; ix < ixLen ; ix++ ) {
hostName += (ix === 0? '\'' : ',\'') + this.displayHostList[ix] + '\'';
}
} else {
hostName = '\'' + hostName + '\'';
}
if (Ext.isEmpty(hostName) === true) {
console.debug('%c [Disk Usage] [WARNING] ', 'color:#800000;background-color:gold;font-weight:bold;', 'No Selected Host Name');
return;
}
WS.SQLExec({
sql_file: 'IMXRT_DiskUsage.sql',
bind : [{
name: 'server_type', value: this.serverType, type: SQLBindType.INTEGER
}],
replace_string: [{
name: 'host_name', value: hostName
}]
}, function(aheader, adata) {
this.loadingMask.hide();
if (adata === null || adata === undefined) {
console.debug('%c [Disk Usage] [WARNING] ', 'color:#800000;background-color:gold;font-weight:bold;', aheader.message);
}
if (this.isClosedDockForm === true) {
return;
}
this.drawData(adata);
aheader = null;
adata = null;
}, this);
},
/**
* 디스크 사용량 데이터 표시
*
* @param {object} adata
*/
drawData: function(adata) {
this.diskUsageGrid.clearRows();
if (this.diskUsageGrid.pnlExGrid.headerCt === undefined ||
this.diskUsageGrid.pnlExGrid.headerCt === null) {
return;
}
var isDownHost;
for (var ix = 0, ixLen = adata.rows.length; ix < ixLen; ix++) {
if (+this.txnFilterDiskUsage <= +adata.rows[ix][2]) {
isDownHost = Comm.RTComm.isDownByHostName(adata.rows[ix][5]);
if (isDownHost === true) {
continue;
}
this.diskUsageGrid.addRow([
adata.rows[ix][5], // host name
adata.rows[ix][0], // mount name
adata.rows[ix][1], // file system
adata.rows[ix][2], // ratio
Math.trunc(+adata.rows[ix][3]), // used size
Math.trunc(+adata.rows[ix][4]) // tota size
]);
}
}
if (isDownHost === true) {
this.diskUsageGrid.emptyTextMsg = common.Util.TR('Host Down');
} else {
this.diskUsageGrid.emptyTextMsg = common.Util.TR('No data to display');
}
this.diskUsageGrid.showEmptyText();
this.diskUsageGrid.drawGrid();
adata = null;
}
| }); | random_line_split | |
exif-tags.ts | Method",
0x923A : "CIP3DataFile",
0x923B : "CIP3Sheet",
0x923C : "CIP3Side",
0x923F : "StoNits",
0x927C : "MakerNote",
0x9286 : "UserComment",
0x9290 : "SubSecTime",
0x9291 : "SubSecTimeOriginal",
0x9292 : "SubSecTimeDigitized",
0x932F : "MSDocumentText",
0x9330 : "MSPropertySetStorage",
0x9331 : "MSDocumentTextPosition",
0x935C : "ImageSourceData",
0x9C9B : "XPTitle",
0x9C9C : "XPComment",
0x9C9D : "XPAuthor",
0x9C9E : "XPKeywords",
0x9C9F : "XPSubject",
0xA000 : "FlashpixVersion",
0xA001 : "ColorSpace",
0xA002 : "ExifImageWidth",
0xA003 : "ExifImageHeight",
0xA004 : "RelatedSoundFile",
0xA005 : "InteropOffset",
0xA20B : "FlashEnergy",
0xA20C : "SpatialFrequencyResponse",
0xA20D : "Noise",
0xA20E : "FocalPlaneXResolution",
0xA20F : "FocalPlaneYResolution",
0xA210 : "FocalPlaneResolutionUnit",
0xA211 : "ImageNumber",
0xA212 : "SecurityClassification",
0xA213 : "ImageHistory",
0xA214 : "SubjectLocation",
0xA215 : "ExposureIndex",
0xA216 : "TIFF-EPStandardID",
0xA217 : "SensingMethod",
0xA300 : "FileSource",
0xA301 : "SceneType",
0xA302 : "CFAPattern",
0xA401 : "CustomRendered",
0xA402 : "ExposureMode",
0xA403 : "WhiteBalance",
0xA404 : "DigitalZoomRatio",
0xA405 : "FocalLengthIn35mmFormat",
0xA406 : "SceneCaptureType",
0xA407 : "GainControl",
0xA408 : "Contrast",
0xA409 : "Saturation",
0xA40A : "Sharpness",
0xA40B : "DeviceSettingDescription",
0xA40C : "SubjectDistanceRange",
0xA420 : "ImageUniqueID",
0xA430 : "OwnerName",
0xA431 : "SerialNumber",
0xA432 : "LensInfo",
0xA433 : "LensMake",
0xA434 : "LensModel",
0xA435 : "LensSerialNumber",
0xA480 : "GDALMetadata",
0xA481 : "GDALNoData",
0xA500 : "Gamma",
0xAFC0 : "ExpandSoftware",
0xAFC1 : "ExpandLens",
0xAFC2 : "ExpandFilm",
0xAFC3 : "ExpandFilterLens",
0xAFC4 : "ExpandScanner",
0xAFC5 : "ExpandFlashLamp",
0xBC01 : "PixelFormat",
0xBC02 : "Transformation",
0xBC03 : "Uncompressed",
0xBC04 : "ImageType",
0xBC80 : "ImageWidth",
0xBC81 : "ImageHeight",
0xBC82 : "WidthResolution",
0xBC83 : "HeightResolution",
0xBCC0 : "ImageOffset",
0xBCC1 : "ImageByteCount",
0xBCC2 : "AlphaOffset",
0xBCC3 : "AlphaByteCount",
0xBCC4 : "ImageDataDiscard",
0xBCC5 : "AlphaDataDiscard",
0xC427 : "OceScanjobDesc",
0xC428 : "OceApplicationSelector",
0xC429 : "OceIDNumber",
0xC42A : "OceImageLogic",
0xC44F : "Annotations",
0xC4A5 : "PrintIM",
0xC580 : "USPTOOriginalContentType",
0xC612 : "DNGVersion",
0xC613 : "DNGBackwardVersion",
0xC614 : "UniqueCameraModel",
0xC615 : "LocalizedCameraModel",
0xC616 : "CFAPlaneColor",
0xC617 : "CFALayout",
0xC618 : "LinearizationTable",
0xC619 : "BlackLevelRepeatDim",
0xC61A : "BlackLevel",
0xC61B : "BlackLevelDeltaH",
0xC61C : "BlackLevelDeltaV",
0xC61D : "WhiteLevel",
0xC61E : "DefaultScale",
0xC61F : "DefaultCropOrigin",
0xC620 : "DefaultCropSize",
0xC621 : "ColorMatrix1",
0xC622 : "ColorMatrix2",
0xC623 : "CameraCalibration1",
0xC624 : "CameraCalibration2",
0xC625 : "ReductionMatrix1",
0xC626 : "ReductionMatrix2",
0xC627 : "AnalogBalance",
0xC628 : "AsShotNeutral",
0xC629 : "AsShotWhiteXY",
0xC62A : "BaselineExposure",
0xC62B : "BaselineNoise",
0xC62C : "BaselineSharpness",
0xC62D : "BayerGreenSplit",
0xC62E : "LinearResponseLimit",
0xC62F : "CameraSerialNumber",
0xC630 : "DNGLensInfo",
0xC631 : "ChromaBlurRadius",
0xC632 : "AntiAliasStrength",
0xC633 : "ShadowScale",
0xC634 : "DNGPrivateData",
0xC635 : "MakerNoteSafety",
0xC640 : "RawImageSegmentation",
0xC65A : "CalibrationIlluminant1",
0xC65B : "CalibrationIlluminant2",
0xC65C : "BestQualityScale",
0xC65D : "RawDataUniqueID",
0xC660 : "AliasLayerMetadata",
0xC68B : "OriginalRawFileName",
0xC68C : "OriginalRawFileData",
0xC68D : "ActiveArea",
0xC68E : "MaskedAreas",
0xC68F : "AsShotICCProfile",
0xC690 : "AsShotPreProfileMatrix",
0xC691 : "CurrentICCProfile",
0xC692 : "CurrentPreProfileMatrix",
0xC6BF : "ColorimetricReference",
0xC6D2 : "PanasonicTitle",
0xC6D3 : "PanasonicTitle2",
0xC6F3 : "CameraCalibrationSig",
0xC6F4 : "ProfileCalibrationSig",
0xC6F5 : "ProfileIFD",
0xC6F6 : "AsShotProfileName",
0xC6F7 : "NoiseReductionApplied",
0xC6F8 : "ProfileName",
0xC6F9 : "ProfileHueSatMapDims",
0xC6FA : "ProfileHueSatMapData1",
0xC6FB : "ProfileHueSatMapData2",
0xC6FC : "ProfileToneCurve",
0xC6FD : "ProfileEmbedPolicy",
0xC6FE : "ProfileCopyright",
0xC714 : "ForwardMatrix1", | 0xC715 : "ForwardMatrix2", | random_line_split | |
preprocessing_lpba40.py | import numpy as np
import SimpleITK as sitk
import matplotlib.pyplot as plt
import scipy.stats as stats
DEFAULT_CUTOFF = 0.01, 0.99
STANDARD_RANGE = 0, 100
def resample_image(itk_image, out_spacing=(2.0, 2.0, 2.0), is_label=False):
original_spacing = itk_image.GetSpacing()
original_size = itk_image.GetSize()
out_size = [int(np.round(original_size[0] * (original_spacing[0] / out_spacing[0]))),
int(np.round(original_size[1] * (original_spacing[1] / out_spacing[1]))),
int(np.round(original_size[2] * (original_spacing[2] / out_spacing[2])))]
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(out_spacing)
resample.SetSize(out_size)
resample.SetOutputDirection(itk_image.GetDirection())
resample.SetOutputOrigin(itk_image.GetOrigin())
resample.SetTransform(sitk.Transform())
resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
def center_crop(img, size_ratio):
x, y, z = img.shape
size_ratio_x, size_ratio_y, size_ratio_z = size_ratio
size_x = size_ratio_x
size_y = size_ratio_y
size_z = size_ratio_z
if x < size_x or y < size_y and z < size_z:
raise ValueError
x1 = 0
y1 = int((y - size_y) / 2)
z1 = int((z - size_z) / 2)
img_crop = img[x1: x1 + size_x, y1: y1 + size_y, z1: z1 + size_z]
return img_crop
def _get_percentiles(percentiles_cutoff):
quartiles = np.arange(25, 100, 25).tolist()
deciles = np.arange(10, 100, 10).tolist()
all_percentiles = list(percentiles_cutoff) + quartiles + deciles
percentiles = sorted(set(all_percentiles))
return np.array(percentiles)
def _get_average_mapping(percentiles_database):
"""Map the landmarks of the database to the chosen range.
Args:
percentiles_database: Percentiles database over which to perform the
averaging.
"""
# Assuming percentiles_database.shape == (num_data_points, num_percentiles)
pc1 = percentiles_database[:, 0]
pc2 = percentiles_database[:, -1]
s1, s2 = STANDARD_RANGE
slopes = (s2 - s1) / (pc2 - pc1)
slopes = np.nan_to_num(slopes)
intercepts = np.mean(s1 - slopes * pc1)
num_images = len(percentiles_database)
final_map = slopes.dot(percentiles_database) / num_images + intercepts
return final_map
def _standardize_cutoff(cutoff):
"""Standardize the cutoff values given in the configuration.
Computes percentile landmark normalization by default.
"""
cutoff = np.asarray(cutoff)
cutoff[0] = max(0., cutoff[0])
cutoff[1] = min(1., cutoff[1])
cutoff[0] = np.min([cutoff[0], 0.09])
cutoff[1] = np.max([cutoff[1], 0.91])
return cutoff
def normalize(array, landmarks, mask=None, cutoff=None, epsilon=1e-5):
cutoff_ = DEFAULT_CUTOFF if cutoff is None else cutoff
mapping = landmarks
data = array
shape = data.shape
data = data.reshape(-1).astype(np.float32)
if mask is None:
mask = np.ones_like(data, np.bool)
mask = mask.reshape(-1)
range_to_use = [0, 1, 2, 4, 5, 6, 7, 8, 10, 11, 12]
quantiles_cutoff = _standardize_cutoff(cutoff_)
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles = _get_percentiles(percentiles_cutoff)
percentile_values = np.percentile(data[mask], percentiles)
# Apply linear histogram standardization
range_mapping = mapping[range_to_use]
range_perc = percentile_values[range_to_use]
diff_mapping = np.diff(range_mapping)
diff_perc = np.diff(range_perc)
# Handling the case where two landmarks are the same
# for a given input image. This usually happens when
# image background is not removed from the image.
diff_perc[diff_perc < epsilon] = np.inf
affine_map = np.zeros([2, len(range_to_use) - 1])
# Compute slopes of the linear models
affine_map[0] = diff_mapping / diff_perc
# Compute intercepts of the linear models
affine_map[1] = range_mapping[:-1] - affine_map[0] * range_perc[:-1]
bin_id = np.digitize(data, range_perc[1:-1], right=False)
lin_img = affine_map[0, bin_id]
aff_img = affine_map[1, bin_id]
new_img = lin_img * data + aff_img
new_img = new_img.reshape(shape)
new_img = new_img.astype(np.float32)
return new_img
def calculate_landmarks(image_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs'):
quantiles_cutoff = DEFAULT_CUTOFF
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles_database = []
percentiles = _get_percentiles(percentiles_cutoff)
count = 1
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
img_path = os.path.join(image_path, 'l{}_to_l{}.hdr'.format(str(i), str(j)))
atlas_path = os.path.join(
image_path.replace('LPBA40_rigidly_registered_pairs', 'LPBA40_rigidly_registered_label_pairs'),
'l{}_to_l{}.hdr'.format(str(i), str(j)))
if os.path.exists(img_path) and os.path.exists(atlas_path):
img_sitk = sitk.ReadImage(str(img_path))
img_np = sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2)
mask = img_np > 0
percentile_values = np.percentile(img_np[mask], percentiles)
percentiles_database.append(percentile_values)
count += 1
else:
raise FileNotFoundError
percentiles_database = np.vstack(percentiles_database)
mapping = _get_average_mapping(percentiles_database)
print(mapping)
np.save('../datasets/LPBA40/mapping.npy', mapping)
return mapping
def histogram_stardardization_resample_center_crop(mapping,
input_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs',
output_path_hs_small='../datasets/LPBA40/LPBA40_rigidly_registered_pairs_histogram_standardization_small',
output_path_mask='../datasets/LPBA40/LPBA40_rigidly_registered_label_pairs_small'):
if not os.path.exists(str(output_path_hs_small)):
os.makedirs(str(output_path_hs_small))
if not os.path.exists(str(output_path_mask)):
os.makedirs(str(output_path_mask))
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
# ~~~~~~~~~~~~~~~ images ~~~~~~~~~~~~~~~
volpath = os.path.join(input_path, 'l{}_to_l{}.nii'.format(str(i), str(j)))
img_sitk = sitk.ReadImage(str(volpath))
img_np = sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2)
mask = img_np > 0
# 1. histogram_stardardization
img_np_hs = normalize(img_np, mapping, mask)
img_sitk_hs = sitk.GetImageFromArray(img_np_hs.swapaxes(0, 2))
img_sitk_hs.SetSpacing(img_sitk.GetSpacing())
img_sitk_hs.SetDirection(img_sitk.GetDirection())
img_sitk_hs.SetOrigin(img_sitk.GetOrigin())
# 2. resample
img_sitk_hs_small = resample_image(img_sitk_hs)
img_np_hs_small = sitk.GetArrayFromImage(img_sitk_hs_small).swapaxes(0, 2)
# 3. center_crop
img_crop = center_crop(img=img_np_hs_small, size_ratio=(80, 106, 80)).swapaxes(0, 2)
new_img = sitk.GetImageFromArray(img_crop)
new_img.SetSpacing(img_sitk_hs_small.GetSpacing())
new_img.SetDirection(img_sitk_hs_small.GetDirection())
new | import os
import subprocess | random_line_split | |
preprocessing_lpba40.py | is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
def center_crop(img, size_ratio):
x, y, z = img.shape
size_ratio_x, size_ratio_y, size_ratio_z = size_ratio
size_x = size_ratio_x
size_y = size_ratio_y
size_z = size_ratio_z
if x < size_x or y < size_y and z < size_z:
raise ValueError
x1 = 0
y1 = int((y - size_y) / 2)
z1 = int((z - size_z) / 2)
img_crop = img[x1: x1 + size_x, y1: y1 + size_y, z1: z1 + size_z]
return img_crop
def _get_percentiles(percentiles_cutoff):
quartiles = np.arange(25, 100, 25).tolist()
deciles = np.arange(10, 100, 10).tolist()
all_percentiles = list(percentiles_cutoff) + quartiles + deciles
percentiles = sorted(set(all_percentiles))
return np.array(percentiles)
def _get_average_mapping(percentiles_database):
"""Map the landmarks of the database to the chosen range.
Args:
percentiles_database: Percentiles database over which to perform the
averaging.
"""
# Assuming percentiles_database.shape == (num_data_points, num_percentiles)
pc1 = percentiles_database[:, 0]
pc2 = percentiles_database[:, -1]
s1, s2 = STANDARD_RANGE
slopes = (s2 - s1) / (pc2 - pc1)
slopes = np.nan_to_num(slopes)
intercepts = np.mean(s1 - slopes * pc1)
num_images = len(percentiles_database)
final_map = slopes.dot(percentiles_database) / num_images + intercepts
return final_map
def _standardize_cutoff(cutoff):
"""Standardize the cutoff values given in the configuration.
Computes percentile landmark normalization by default.
"""
cutoff = np.asarray(cutoff)
cutoff[0] = max(0., cutoff[0])
cutoff[1] = min(1., cutoff[1])
cutoff[0] = np.min([cutoff[0], 0.09])
cutoff[1] = np.max([cutoff[1], 0.91])
return cutoff
def normalize(array, landmarks, mask=None, cutoff=None, epsilon=1e-5):
cutoff_ = DEFAULT_CUTOFF if cutoff is None else cutoff
mapping = landmarks
data = array
shape = data.shape
data = data.reshape(-1).astype(np.float32)
if mask is None:
mask = np.ones_like(data, np.bool)
mask = mask.reshape(-1)
range_to_use = [0, 1, 2, 4, 5, 6, 7, 8, 10, 11, 12]
quantiles_cutoff = _standardize_cutoff(cutoff_)
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles = _get_percentiles(percentiles_cutoff)
percentile_values = np.percentile(data[mask], percentiles)
# Apply linear histogram standardization
range_mapping = mapping[range_to_use]
range_perc = percentile_values[range_to_use]
diff_mapping = np.diff(range_mapping)
diff_perc = np.diff(range_perc)
# Handling the case where two landmarks are the same
# for a given input image. This usually happens when
# image background is not removed from the image.
diff_perc[diff_perc < epsilon] = np.inf
affine_map = np.zeros([2, len(range_to_use) - 1])
# Compute slopes of the linear models
affine_map[0] = diff_mapping / diff_perc
# Compute intercepts of the linear models
affine_map[1] = range_mapping[:-1] - affine_map[0] * range_perc[:-1]
bin_id = np.digitize(data, range_perc[1:-1], right=False)
lin_img = affine_map[0, bin_id]
aff_img = affine_map[1, bin_id]
new_img = lin_img * data + aff_img
new_img = new_img.reshape(shape)
new_img = new_img.astype(np.float32)
return new_img
def calculate_landmarks(image_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs'):
quantiles_cutoff = DEFAULT_CUTOFF
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles_database = []
percentiles = _get_percentiles(percentiles_cutoff)
count = 1
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
img_path = os.path.join(image_path, 'l{}_to_l{}.hdr'.format(str(i), str(j)))
atlas_path = os.path.join(
image_path.replace('LPBA40_rigidly_registered_pairs', 'LPBA40_rigidly_registered_label_pairs'),
'l{}_to_l{}.hdr'.format(str(i), str(j)))
if os.path.exists(img_path) and os.path.exists(atlas_path):
img_sitk = sitk.ReadImage(str(img_path))
img_np = sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2)
mask = img_np > 0
percentile_values = np.percentile(img_np[mask], percentiles)
percentiles_database.append(percentile_values)
count += 1
else:
raise FileNotFoundError
percentiles_database = np.vstack(percentiles_database)
mapping = _get_average_mapping(percentiles_database)
print(mapping)
np.save('../datasets/LPBA40/mapping.npy', mapping)
return mapping
def histogram_stardardization_resample_center_crop(mapping,
input_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs',
output_path_hs_small='../datasets/LPBA40/LPBA40_rigidly_registered_pairs_histogram_standardization_small',
output_path_mask='../datasets/LPBA40/LPBA40_rigidly_registered_label_pairs_small'):
if not os.path.exists(str(output_path_hs_small)):
|
if not os.path.exists(str(output_path_mask)):
os.makedirs(str(output_path_mask))
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
# ~~~~~~~~~~~~~~~ images ~~~~~~~~~~~~~~~
volpath = os.path.join(input_path, 'l{}_to_l{}.nii'.format(str(i), str(j)))
img_sitk = sitk.ReadImage(str(volpath))
img_np = sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2)
mask = img_np > 0
# 1. histogram_stardardization
img_np_hs = normalize(img_np, mapping, mask)
img_sitk_hs = sitk.GetImageFromArray(img_np_hs.swapaxes(0, 2))
img_sitk_hs.SetSpacing(img_sitk.GetSpacing())
img_sitk_hs.SetDirection(img_sitk.GetDirection())
img_sitk_hs.SetOrigin(img_sitk.GetOrigin())
# 2. resample
img_sitk_hs_small = resample_image(img_sitk_hs)
img_np_hs_small = sitk.GetArrayFromImage(img_sitk_hs_small).swapaxes(0, 2)
# 3. center_crop
img_crop = center_crop(img=img_np_hs_small, size_ratio=(80, 106, 80)).swapaxes(0, 2)
new_img = sitk.GetImageFromArray(img_crop)
new_img.SetSpacing(img_sitk_hs_small.GetSpacing())
new_img.SetDirection(img_sitk_hs_small.GetDirection())
new_img.SetOrigin(img_sitk_hs_small.GetOrigin())
output_path_hs_small_img = os.path.join(output_path_hs_small, 'l{}_to_l{}.nii'.format(str(i), str(j)))
sitk.WriteImage(new_img, str(output_path_hs_small_img))
# ~~~~~~~~~~~~~~~ masks ~~~~~~~~~~~~~~~
atlas_path = volpath.replace('LPBA40_rigidly_registered_pairs', 'LPBA40_rigidly_registered_label_pairs')
atlas = sitk.ReadImage(str(atlas_path))
# 1. resample
atlas_resampled = resample_image(atlas, is_label=True)
atlas_np = sitk.GetArrayFromImage(atlas_resampled).swapaxes(0, 2)
# 2. center_crop
atlas_crop = center_crop(img=atlas_np, size_ratio=(80, 106, 80)).swapaxes(0, 2)
new_atlas = sitk.GetImageFromArray(atlas_crop)
new_atlas.SetSpacing(atlas_resampled.GetSpacing())
new_atlas.SetDirection(atlas_resampled.GetDirection())
new_atlas.SetOrigin(atlas_resampled.GetOrigin())
output | os.makedirs(str(output_path_hs_small)) | conditional_block |
preprocessing_lpba40.py | is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
def center_crop(img, size_ratio):
x, y, z = img.shape
size_ratio_x, size_ratio_y, size_ratio_z = size_ratio
size_x = size_ratio_x
size_y = size_ratio_y
size_z = size_ratio_z
if x < size_x or y < size_y and z < size_z:
raise ValueError
x1 = 0
y1 = int((y - size_y) / 2)
z1 = int((z - size_z) / 2)
img_crop = img[x1: x1 + size_x, y1: y1 + size_y, z1: z1 + size_z]
return img_crop
def _get_percentiles(percentiles_cutoff):
|
def _get_average_mapping(percentiles_database):
"""Map the landmarks of the database to the chosen range.
Args:
percentiles_database: Percentiles database over which to perform the
averaging.
"""
# Assuming percentiles_database.shape == (num_data_points, num_percentiles)
pc1 = percentiles_database[:, 0]
pc2 = percentiles_database[:, -1]
s1, s2 = STANDARD_RANGE
slopes = (s2 - s1) / (pc2 - pc1)
slopes = np.nan_to_num(slopes)
intercepts = np.mean(s1 - slopes * pc1)
num_images = len(percentiles_database)
final_map = slopes.dot(percentiles_database) / num_images + intercepts
return final_map
def _standardize_cutoff(cutoff):
"""Standardize the cutoff values given in the configuration.
Computes percentile landmark normalization by default.
"""
cutoff = np.asarray(cutoff)
cutoff[0] = max(0., cutoff[0])
cutoff[1] = min(1., cutoff[1])
cutoff[0] = np.min([cutoff[0], 0.09])
cutoff[1] = np.max([cutoff[1], 0.91])
return cutoff
def normalize(array, landmarks, mask=None, cutoff=None, epsilon=1e-5):
cutoff_ = DEFAULT_CUTOFF if cutoff is None else cutoff
mapping = landmarks
data = array
shape = data.shape
data = data.reshape(-1).astype(np.float32)
if mask is None:
mask = np.ones_like(data, np.bool)
mask = mask.reshape(-1)
range_to_use = [0, 1, 2, 4, 5, 6, 7, 8, 10, 11, 12]
quantiles_cutoff = _standardize_cutoff(cutoff_)
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles = _get_percentiles(percentiles_cutoff)
percentile_values = np.percentile(data[mask], percentiles)
# Apply linear histogram standardization
range_mapping = mapping[range_to_use]
range_perc = percentile_values[range_to_use]
diff_mapping = np.diff(range_mapping)
diff_perc = np.diff(range_perc)
# Handling the case where two landmarks are the same
# for a given input image. This usually happens when
# image background is not removed from the image.
diff_perc[diff_perc < epsilon] = np.inf
affine_map = np.zeros([2, len(range_to_use) - 1])
# Compute slopes of the linear models
affine_map[0] = diff_mapping / diff_perc
# Compute intercepts of the linear models
affine_map[1] = range_mapping[:-1] - affine_map[0] * range_perc[:-1]
bin_id = np.digitize(data, range_perc[1:-1], right=False)
lin_img = affine_map[0, bin_id]
aff_img = affine_map[1, bin_id]
new_img = lin_img * data + aff_img
new_img = new_img.reshape(shape)
new_img = new_img.astype(np.float32)
return new_img
def calculate_landmarks(image_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs'):
quantiles_cutoff = DEFAULT_CUTOFF
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles_database = []
percentiles = _get_percentiles(percentiles_cutoff)
count = 1
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
img_path = os.path.join(image_path, 'l{}_to_l{}.hdr'.format(str(i), str(j)))
atlas_path = os.path.join(
image_path.replace('LPBA40_rigidly_registered_pairs', 'LPBA40_rigidly_registered_label_pairs'),
'l{}_to_l{}.hdr'.format(str(i), str(j)))
if os.path.exists(img_path) and os.path.exists(atlas_path):
img_sitk = sitk.ReadImage(str(img_path))
img_np = sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2)
mask = img_np > 0
percentile_values = np.percentile(img_np[mask], percentiles)
percentiles_database.append(percentile_values)
count += 1
else:
raise FileNotFoundError
percentiles_database = np.vstack(percentiles_database)
mapping = _get_average_mapping(percentiles_database)
print(mapping)
np.save('../datasets/LPBA40/mapping.npy', mapping)
return mapping
def histogram_stardardization_resample_center_crop(mapping,
input_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs',
output_path_hs_small='../datasets/LPBA40/LPBA40_rigidly_registered_pairs_histogram_standardization_small',
output_path_mask='../datasets/LPBA40/LPBA40_rigidly_registered_label_pairs_small'):
if not os.path.exists(str(output_path_hs_small)):
os.makedirs(str(output_path_hs_small))
if not os.path.exists(str(output_path_mask)):
os.makedirs(str(output_path_mask))
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
# ~~~~~~~~~~~~~~~ images ~~~~~~~~~~~~~~~
volpath = os.path.join(input_path, 'l{}_to_l{}.nii'.format(str(i), str(j)))
img_sitk = sitk.ReadImage(str(volpath))
img_np = sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2)
mask = img_np > 0
# 1. histogram_stardardization
img_np_hs = normalize(img_np, mapping, mask)
img_sitk_hs = sitk.GetImageFromArray(img_np_hs.swapaxes(0, 2))
img_sitk_hs.SetSpacing(img_sitk.GetSpacing())
img_sitk_hs.SetDirection(img_sitk.GetDirection())
img_sitk_hs.SetOrigin(img_sitk.GetOrigin())
# 2. resample
img_sitk_hs_small = resample_image(img_sitk_hs)
img_np_hs_small = sitk.GetArrayFromImage(img_sitk_hs_small).swapaxes(0, 2)
# 3. center_crop
img_crop = center_crop(img=img_np_hs_small, size_ratio=(80, 106, 80)).swapaxes(0, 2)
new_img = sitk.GetImageFromArray(img_crop)
new_img.SetSpacing(img_sitk_hs_small.GetSpacing())
new_img.SetDirection(img_sitk_hs_small.GetDirection())
new_img.SetOrigin(img_sitk_hs_small.GetOrigin())
output_path_hs_small_img = os.path.join(output_path_hs_small, 'l{}_to_l{}.nii'.format(str(i), str(j)))
sitk.WriteImage(new_img, str(output_path_hs_small_img))
# ~~~~~~~~~~~~~~~ masks ~~~~~~~~~~~~~~~
atlas_path = volpath.replace('LPBA40_rigidly_registered_pairs', 'LPBA40_rigidly_registered_label_pairs')
atlas = sitk.ReadImage(str(atlas_path))
# 1. resample
atlas_resampled = resample_image(atlas, is_label=True)
atlas_np = sitk.GetArrayFromImage(atlas_resampled).swapaxes(0, 2)
# 2. center_crop
atlas_crop = center_crop(img=atlas_np, size_ratio=(80, 106, 80)).swapaxes(0, 2)
new_atlas = sitk.GetImageFromArray(atlas_crop)
new_atlas.SetSpacing(atlas_resampled.GetSpacing())
new_atlas.SetDirection(atlas_resampled.GetDirection())
new_atlas.SetOrigin(atlas_resampled.GetOrigin())
| quartiles = np.arange(25, 100, 25).tolist()
deciles = np.arange(10, 100, 10).tolist()
all_percentiles = list(percentiles_cutoff) + quartiles + deciles
percentiles = sorted(set(all_percentiles))
return np.array(percentiles) | identifier_body |
preprocessing_lpba40.py | is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
def center_crop(img, size_ratio):
x, y, z = img.shape
size_ratio_x, size_ratio_y, size_ratio_z = size_ratio
size_x = size_ratio_x
size_y = size_ratio_y
size_z = size_ratio_z
if x < size_x or y < size_y and z < size_z:
raise ValueError
x1 = 0
y1 = int((y - size_y) / 2)
z1 = int((z - size_z) / 2)
img_crop = img[x1: x1 + size_x, y1: y1 + size_y, z1: z1 + size_z]
return img_crop
def _get_percentiles(percentiles_cutoff):
quartiles = np.arange(25, 100, 25).tolist()
deciles = np.arange(10, 100, 10).tolist()
all_percentiles = list(percentiles_cutoff) + quartiles + deciles
percentiles = sorted(set(all_percentiles))
return np.array(percentiles)
def _get_average_mapping(percentiles_database):
"""Map the landmarks of the database to the chosen range.
Args:
percentiles_database: Percentiles database over which to perform the
averaging.
"""
# Assuming percentiles_database.shape == (num_data_points, num_percentiles)
pc1 = percentiles_database[:, 0]
pc2 = percentiles_database[:, -1]
s1, s2 = STANDARD_RANGE
slopes = (s2 - s1) / (pc2 - pc1)
slopes = np.nan_to_num(slopes)
intercepts = np.mean(s1 - slopes * pc1)
num_images = len(percentiles_database)
final_map = slopes.dot(percentiles_database) / num_images + intercepts
return final_map
def _standardize_cutoff(cutoff):
"""Standardize the cutoff values given in the configuration.
Computes percentile landmark normalization by default.
"""
cutoff = np.asarray(cutoff)
cutoff[0] = max(0., cutoff[0])
cutoff[1] = min(1., cutoff[1])
cutoff[0] = np.min([cutoff[0], 0.09])
cutoff[1] = np.max([cutoff[1], 0.91])
return cutoff
def | (array, landmarks, mask=None, cutoff=None, epsilon=1e-5):
cutoff_ = DEFAULT_CUTOFF if cutoff is None else cutoff
mapping = landmarks
data = array
shape = data.shape
data = data.reshape(-1).astype(np.float32)
if mask is None:
mask = np.ones_like(data, np.bool)
mask = mask.reshape(-1)
range_to_use = [0, 1, 2, 4, 5, 6, 7, 8, 10, 11, 12]
quantiles_cutoff = _standardize_cutoff(cutoff_)
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles = _get_percentiles(percentiles_cutoff)
percentile_values = np.percentile(data[mask], percentiles)
# Apply linear histogram standardization
range_mapping = mapping[range_to_use]
range_perc = percentile_values[range_to_use]
diff_mapping = np.diff(range_mapping)
diff_perc = np.diff(range_perc)
# Handling the case where two landmarks are the same
# for a given input image. This usually happens when
# image background is not removed from the image.
diff_perc[diff_perc < epsilon] = np.inf
affine_map = np.zeros([2, len(range_to_use) - 1])
# Compute slopes of the linear models
affine_map[0] = diff_mapping / diff_perc
# Compute intercepts of the linear models
affine_map[1] = range_mapping[:-1] - affine_map[0] * range_perc[:-1]
bin_id = np.digitize(data, range_perc[1:-1], right=False)
lin_img = affine_map[0, bin_id]
aff_img = affine_map[1, bin_id]
new_img = lin_img * data + aff_img
new_img = new_img.reshape(shape)
new_img = new_img.astype(np.float32)
return new_img
def calculate_landmarks(image_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs'):
quantiles_cutoff = DEFAULT_CUTOFF
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles_database = []
percentiles = _get_percentiles(percentiles_cutoff)
count = 1
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
img_path = os.path.join(image_path, 'l{}_to_l{}.hdr'.format(str(i), str(j)))
atlas_path = os.path.join(
image_path.replace('LPBA40_rigidly_registered_pairs', 'LPBA40_rigidly_registered_label_pairs'),
'l{}_to_l{}.hdr'.format(str(i), str(j)))
if os.path.exists(img_path) and os.path.exists(atlas_path):
img_sitk = sitk.ReadImage(str(img_path))
img_np = sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2)
mask = img_np > 0
percentile_values = np.percentile(img_np[mask], percentiles)
percentiles_database.append(percentile_values)
count += 1
else:
raise FileNotFoundError
percentiles_database = np.vstack(percentiles_database)
mapping = _get_average_mapping(percentiles_database)
print(mapping)
np.save('../datasets/LPBA40/mapping.npy', mapping)
return mapping
def histogram_stardardization_resample_center_crop(mapping,
input_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs',
output_path_hs_small='../datasets/LPBA40/LPBA40_rigidly_registered_pairs_histogram_standardization_small',
output_path_mask='../datasets/LPBA40/LPBA40_rigidly_registered_label_pairs_small'):
if not os.path.exists(str(output_path_hs_small)):
os.makedirs(str(output_path_hs_small))
if not os.path.exists(str(output_path_mask)):
os.makedirs(str(output_path_mask))
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
# ~~~~~~~~~~~~~~~ images ~~~~~~~~~~~~~~~
volpath = os.path.join(input_path, 'l{}_to_l{}.nii'.format(str(i), str(j)))
img_sitk = sitk.ReadImage(str(volpath))
img_np = sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2)
mask = img_np > 0
# 1. histogram_stardardization
img_np_hs = normalize(img_np, mapping, mask)
img_sitk_hs = sitk.GetImageFromArray(img_np_hs.swapaxes(0, 2))
img_sitk_hs.SetSpacing(img_sitk.GetSpacing())
img_sitk_hs.SetDirection(img_sitk.GetDirection())
img_sitk_hs.SetOrigin(img_sitk.GetOrigin())
# 2. resample
img_sitk_hs_small = resample_image(img_sitk_hs)
img_np_hs_small = sitk.GetArrayFromImage(img_sitk_hs_small).swapaxes(0, 2)
# 3. center_crop
img_crop = center_crop(img=img_np_hs_small, size_ratio=(80, 106, 80)).swapaxes(0, 2)
new_img = sitk.GetImageFromArray(img_crop)
new_img.SetSpacing(img_sitk_hs_small.GetSpacing())
new_img.SetDirection(img_sitk_hs_small.GetDirection())
new_img.SetOrigin(img_sitk_hs_small.GetOrigin())
output_path_hs_small_img = os.path.join(output_path_hs_small, 'l{}_to_l{}.nii'.format(str(i), str(j)))
sitk.WriteImage(new_img, str(output_path_hs_small_img))
# ~~~~~~~~~~~~~~~ masks ~~~~~~~~~~~~~~~
atlas_path = volpath.replace('LPBA40_rigidly_registered_pairs', 'LPBA40_rigidly_registered_label_pairs')
atlas = sitk.ReadImage(str(atlas_path))
# 1. resample
atlas_resampled = resample_image(atlas, is_label=True)
atlas_np = sitk.GetArrayFromImage(atlas_resampled).swapaxes(0, 2)
# 2. center_crop
atlas_crop = center_crop(img=atlas_np, size_ratio=(80, 106, 80)).swapaxes(0, 2)
new_atlas = sitk.GetImageFromArray(atlas_crop)
new_atlas.SetSpacing(atlas_resampled.GetSpacing())
new_atlas.SetDirection(atlas_resampled.GetDirection())
new_atlas.SetOrigin(atlas_resampled.GetOrigin())
output | normalize | identifier_name |
git.rs | DirBuilder, File};
use std::io::{BufWriter, Write};
use std::path::{Path, PathBuf};
use std::process::Command as SystemCommand;
use crate::error::{RLibError, Result};
//-------------------------------------------------------------------------------//
// Enums & Structs
//-------------------------------------------------------------------------------//
/// Struct containing the data needed to perform a fetch/pull from a repo.
#[derive(Debug)]
pub struct GitIntegration {
/// Local Path of the repo.
local_path: PathBuf,
/// URL of the repo.
url: String,
/// Branch to fetch/pull.
branch: String,
/// Remote to fetch/pull from.
remote: String,
}
/// Possible responses we can get from a fetch/pull.
#[derive(Debug)]
pub enum GitResponse {
NewUpdate,
NoUpdate,
NoLocalFiles,
Diverged,
}
//---------------------------------------------------------------------------//
// Enum & Structs Implementations
//---------------------------------------------------------------------------//
impl GitIntegration {
/// This function creates a new GitIntegration struct with data for a git operation.
pub fn new(local_path: &Path, url: &str, branch: &str, remote: &str) -> Self {
Self {
local_path: local_path.to_owned(),
url: url.to_owned(),
branch: branch.to_owned(),
remote: remote.to_owned(),
}
}
/// This function tries to initializes a git repo.
pub fn init(&self) -> Result<Repository> {
Repository::init(&self.local_path).map_err(From::from)
}
/// This function generates a gitignore file for the git repo.
///
/// If it already exists, it'll replace the existing file.
pub fn add_gitignore(&self, contents: &str) -> Result<()> {
let mut file = BufWriter::new(File::create(self.local_path.join(".gitignore"))?);
file.write_all(contents.as_bytes()).map_err(From::from)
}
/// This function switches the branch of a `GitIntegration` to the provided refspec.
pub fn checkout_branch(&self, repo: &Repository, refs: &str) -> Result<()> {
let head = repo.head().unwrap();
let oid = head.target().unwrap();
let commit = repo.find_commit(oid)?;
let branch_name = refs.splitn(3, '/').collect::<Vec<_>>()[2].to_owned();
let _ = repo.branch(&branch_name, &commit, false);
let branch_object = repo.revparse_single(refs)?;
repo.checkout_tree(&branch_object, None)?;
repo.set_head(refs)?;
Ok(())
}
/// This function checks if there is a new update for the current repo.
pub fn check_update(&self) -> Result<GitResponse> {
let mut repo = match Repository::open(&self.local_path) {
Ok(repo) => repo,
// If this fails, it means we either we don´t have the repo downloaded, or we have a folder without the .git folder.
Err(_) => return Ok(GitResponse::NoLocalFiles),
};
// Just in case there are loose changes, stash them.
// Ignore a fail on this, as it's possible we don't have contents to stash.
let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase();
let master_refname = format!("refs/heads/{}", self.branch);
let signature = Signature::now("RPFM Updater", "-")?;
let stash_id = repo.stash_save(&signature, &format!("Stashed changes before checking for updates from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED));
// In case we're not in master, checkout the master branch.
if current_branch_name != master_refname {
self.checkout_branch(&repo, &master_refname)?;
}
// Fetch the info of the master branch.
repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?;
let analysis = {
let fetch_head = repo.find_reference("FETCH_HEAD")?;
let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?;
repo.merge_analysis(&[&fetch_commit])?
};
// Reset the repo to his original state after the check
if current_branch_name != master_refname {
self.checkout_branch(&repo, ¤t_branch_name)?;
}
if stash_id.is_ok() {
let _ = repo.stash_pop(0, None);
}
if analysis.0.is_up_to_date() {
Ok(GitResponse::NoUpdate)
}
// If the branch is a fast-forward, or has diverged, ask for an update.
else if analysis.0.is_fast_forward() || analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() {
Ok(GitResponse::NewUpdate)
}
// Otherwise, it means the branches diverged. In this case, return a diverged.
else {
Ok(GitResponse::Diverged)
}
}
/// This function downloads the latest revision of the current repository.
pub fn update_repo(&self) -> Result<()> {
let mut new_repo = false;
let mut repo = match Repository::open(&self.local_path) {
Ok(repo) => repo,
Err(_) => {
// If it fails to open, it means either we don't have the .git folder, or we don't have a folder at all.
// In either case, recreate it and redownload the repo. No more steps are needed here.
// On windows, remove the read-only flags before doing anything else, or this will fail.
if cfg!(target_os = "windows") {
let path = self.local_path.to_string_lossy().to_string() + "\\*.*";
let _ = SystemCommand::new("attrib").arg("-r").arg(path).arg("/s").output();
}
let _ = std::fs::remove_dir_all(&self.local_path);
DirBuilder::new().recursive(true).create(&self.local_path)?;
match Repository::clone(&self.url, &self.local_path) {
Ok(repo) => {
new_repo = true;
repo
},
Err(_) => return Err(RLibError::GitErrorDownloadFromRepo(self.url.to_owned())),
}
}
};
// Just in case there are loose changes, stash them.
// Ignore a fail on this, as it's possible we don't have contents to stash.
let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase();
let master_refname = format!("refs/heads/{}", self.branch);
let signature = Signature::now("RPFM Updater", "-")?;
let stash_id = repo.stash_save(&signature, &format!("Stashed changes before update from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED));
// In case we're not in master, checkout the master branch.
if current_branch_name != master_refname {
self.checkout_branch(&repo, &master_refname)?;
}
// If we just cloned a new repo and changed branches, return.
if new_repo {
return Ok(());
}
// If it worked, now we have to do a pull from master. Sadly, git2-rs does not support pull.
// Instead, we kinda force a fast-forward. Made in StackOverflow.
repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?;
let (analysis, fetch_commit_id) = {
let fetch_head = repo.find_reference("FETCH_HEAD")?;
let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?;
(repo.merge_analysis(&[&fetch_commit])?, fetch_commit.id())
};
// If we're up to date, nothing more is needed.
if analysis.0.is_up_to_date() {
// Reset the repo to his original state after the check
if current_branch_name != master_refname {
self.checkout_branch(&repo, ¤t_branch_name)?;
}
if stash_id.is_ok() {
let _ = repo.stash_pop(0, None);
}
Err(RLibError::GitErrorNoUpdatesAvailable(self.url.to_owned()))
}
// If we can do a fast-forward, we do it. This is the preferred option.
else if analysis.0.is_fast_forward() {
let mut reference = repo.find_reference(&master_refname)?;
reference.set_target(fetch_commit_id, "Fast-Forward")?;
repo.set_head(&master_refname)?;
repo.checkout_head(Some(CheckoutBuilder::default().force())).map_err(From::from)
}
// If not, we face multiple problems:
// - If there are uncommitted changes: covered by the stash.
// - If we're not in the branch: covered by the branch switch.
// - If the branches diverged: this one... the cleanest way to deal with it should be redownload the repo.
else if analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() {
// On windows, remove the read-only flags before doing anything else, or this will fail.
if cfg!(target_os = "windows") {
| let path = self.local_path.to_string_lossy().to_string() + "\\*.*";
let _ = SystemCommand::new("attrib").arg("-r").arg(path).arg("/s").output();
}
| conditional_block | |
git.rs | file is licensed under the MIT license, which can be found here:
// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
//---------------------------------------------------------------------------//
//! This module contains the code for the limited Git support.
use git2::{Reference, ReferenceFormat, Repository, Signature, StashFlags, build::CheckoutBuilder};
use std::fs::{DirBuilder, File};
use std::io::{BufWriter, Write};
use std::path::{Path, PathBuf};
use std::process::Command as SystemCommand;
use crate::error::{RLibError, Result};
//-------------------------------------------------------------------------------//
// Enums & Structs
//-------------------------------------------------------------------------------//
/// Struct containing the data needed to perform a fetch/pull from a repo.
#[derive(Debug)]
pub struct GitIntegration {
/// Local Path of the repo.
local_path: PathBuf,
/// URL of the repo.
url: String,
/// Branch to fetch/pull.
branch: String,
/// Remote to fetch/pull from.
remote: String,
}
/// Possible responses we can get from a fetch/pull.
#[derive(Debug)]
pub enum GitResponse {
NewUpdate,
NoUpdate,
NoLocalFiles,
Diverged,
}
//---------------------------------------------------------------------------//
// Enum & Structs Implementations
//---------------------------------------------------------------------------//
impl GitIntegration {
/// This function creates a new GitIntegration struct with data for a git operation.
pub fn new(local_path: &Path, url: &str, branch: &str, remote: &str) -> Self {
Self {
local_path: local_path.to_owned(),
url: url.to_owned(),
branch: branch.to_owned(),
remote: remote.to_owned(),
}
}
/// This function tries to initializes a git repo.
pub fn init(&self) -> Result<Repository> {
Repository::init(&self.local_path).map_err(From::from)
}
/// This function generates a gitignore file for the git repo.
///
/// If it already exists, it'll replace the existing file.
pub fn ad | self, contents: &str) -> Result<()> {
let mut file = BufWriter::new(File::create(self.local_path.join(".gitignore"))?);
file.write_all(contents.as_bytes()).map_err(From::from)
}
/// This function switches the branch of a `GitIntegration` to the provided refspec.
pub fn checkout_branch(&self, repo: &Repository, refs: &str) -> Result<()> {
let head = repo.head().unwrap();
let oid = head.target().unwrap();
let commit = repo.find_commit(oid)?;
let branch_name = refs.splitn(3, '/').collect::<Vec<_>>()[2].to_owned();
let _ = repo.branch(&branch_name, &commit, false);
let branch_object = repo.revparse_single(refs)?;
repo.checkout_tree(&branch_object, None)?;
repo.set_head(refs)?;
Ok(())
}
/// This function checks if there is a new update for the current repo.
pub fn check_update(&self) -> Result<GitResponse> {
let mut repo = match Repository::open(&self.local_path) {
Ok(repo) => repo,
// If this fails, it means we either we don´t have the repo downloaded, or we have a folder without the .git folder.
Err(_) => return Ok(GitResponse::NoLocalFiles),
};
// Just in case there are loose changes, stash them.
// Ignore a fail on this, as it's possible we don't have contents to stash.
let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase();
let master_refname = format!("refs/heads/{}", self.branch);
let signature = Signature::now("RPFM Updater", "-")?;
let stash_id = repo.stash_save(&signature, &format!("Stashed changes before checking for updates from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED));
// In case we're not in master, checkout the master branch.
if current_branch_name != master_refname {
self.checkout_branch(&repo, &master_refname)?;
}
// Fetch the info of the master branch.
repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?;
let analysis = {
let fetch_head = repo.find_reference("FETCH_HEAD")?;
let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?;
repo.merge_analysis(&[&fetch_commit])?
};
// Reset the repo to his original state after the check
if current_branch_name != master_refname {
self.checkout_branch(&repo, ¤t_branch_name)?;
}
if stash_id.is_ok() {
let _ = repo.stash_pop(0, None);
}
if analysis.0.is_up_to_date() {
Ok(GitResponse::NoUpdate)
}
// If the branch is a fast-forward, or has diverged, ask for an update.
else if analysis.0.is_fast_forward() || analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() {
Ok(GitResponse::NewUpdate)
}
// Otherwise, it means the branches diverged. In this case, return a diverged.
else {
Ok(GitResponse::Diverged)
}
}
/// This function downloads the latest revision of the current repository.
pub fn update_repo(&self) -> Result<()> {
let mut new_repo = false;
let mut repo = match Repository::open(&self.local_path) {
Ok(repo) => repo,
Err(_) => {
// If it fails to open, it means either we don't have the .git folder, or we don't have a folder at all.
// In either case, recreate it and redownload the repo. No more steps are needed here.
// On windows, remove the read-only flags before doing anything else, or this will fail.
if cfg!(target_os = "windows") {
let path = self.local_path.to_string_lossy().to_string() + "\\*.*";
let _ = SystemCommand::new("attrib").arg("-r").arg(path).arg("/s").output();
}
let _ = std::fs::remove_dir_all(&self.local_path);
DirBuilder::new().recursive(true).create(&self.local_path)?;
match Repository::clone(&self.url, &self.local_path) {
Ok(repo) => {
new_repo = true;
repo
},
Err(_) => return Err(RLibError::GitErrorDownloadFromRepo(self.url.to_owned())),
}
}
};
// Just in case there are loose changes, stash them.
// Ignore a fail on this, as it's possible we don't have contents to stash.
let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase();
let master_refname = format!("refs/heads/{}", self.branch);
let signature = Signature::now("RPFM Updater", "-")?;
let stash_id = repo.stash_save(&signature, &format!("Stashed changes before update from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED));
// In case we're not in master, checkout the master branch.
if current_branch_name != master_refname {
self.checkout_branch(&repo, &master_refname)?;
}
// If we just cloned a new repo and changed branches, return.
if new_repo {
return Ok(());
}
// If it worked, now we have to do a pull from master. Sadly, git2-rs does not support pull.
// Instead, we kinda force a fast-forward. Made in StackOverflow.
repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?;
let (analysis, fetch_commit_id) = {
let fetch_head = repo.find_reference("FETCH_HEAD")?;
let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?;
(repo.merge_analysis(&[&fetch_commit])?, fetch_commit.id())
};
// If we're up to date, nothing more is needed.
if analysis.0.is_up_to_date() {
// Reset the repo to his original state after the check
if current_branch_name != master_refname {
self.checkout_branch(&repo, ¤t_branch_name)?;
}
if stash_id.is_ok() {
let _ = repo.stash_pop(0, None);
}
Err(RLibError::GitErrorNoUpdatesAvailable(self.url.to_owned()))
}
// If we can do a fast-forward, we do it. This is the preferred option.
else if analysis.0.is_fast_forward() {
let mut reference = repo.find_reference(&master_refname)?;
reference.set_target(fetch_commit_id, "Fast-Forward")?;
repo.set_head(&master_refname)?;
repo.checkout_head(Some(CheckoutBuilder::default().force())).map_err(From::from)
}
// If not, we face multiple problems:
// - If there are uncommitted changes: covered by the stash.
// - If we're not in the branch: covered by the branch switch.
// - If the branches diverged: this one... the cleanest way to deal with it should be redownload the repo.
else if analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() {
// | d_gitignore(& | identifier_name |
git.rs | file is licensed under the MIT license, which can be found here:
// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
//---------------------------------------------------------------------------//
//! This module contains the code for the limited Git support.
use git2::{Reference, ReferenceFormat, Repository, Signature, StashFlags, build::CheckoutBuilder};
use std::fs::{DirBuilder, File};
use std::io::{BufWriter, Write};
use std::path::{Path, PathBuf};
use std::process::Command as SystemCommand;
use crate::error::{RLibError, Result};
//-------------------------------------------------------------------------------//
// Enums & Structs
//-------------------------------------------------------------------------------//
/// Struct containing the data needed to perform a fetch/pull from a repo.
#[derive(Debug)]
pub struct GitIntegration {
/// Local Path of the repo.
local_path: PathBuf,
/// URL of the repo.
url: String,
/// Branch to fetch/pull.
branch: String,
/// Remote to fetch/pull from.
remote: String,
}
/// Possible responses we can get from a fetch/pull.
#[derive(Debug)]
pub enum GitResponse {
NewUpdate,
NoUpdate,
NoLocalFiles,
Diverged,
}
//---------------------------------------------------------------------------//
// Enum & Structs Implementations
//---------------------------------------------------------------------------//
impl GitIntegration {
/// This function creates a new GitIntegration struct with data for a git operation.
pub fn new(local_path: &Path, url: &str, branch: &str, remote: &str) -> Self {
Self {
local_path: local_path.to_owned(),
url: url.to_owned(),
branch: branch.to_owned(),
remote: remote.to_owned(),
}
}
/// This function tries to initializes a git repo.
pub fn init(&self) -> Result<Repository> {
Repository::init(&self.local_path).map_err(From::from)
}
/// This function generates a gitignore file for the git repo.
///
/// If it already exists, it'll replace the existing file.
pub fn add_gitignore(&self, contents: &str) -> Result<()> {
let mut file = BufWriter::new(File::create(self.local_path.join(".gitignore"))?);
file.write_all(contents.as_bytes()).map_err(From::from)
}
/// This function switches the branch of a `GitIntegration` to the provided refspec.
pub fn checkout_branch(&self, repo: &Repository, refs: &str) -> Result<()> {
let head = repo.head().unwrap();
let oid = head.target().unwrap();
let commit = repo.find_commit(oid)?;
let branch_name = refs.splitn(3, '/').collect::<Vec<_>>()[2].to_owned();
let _ = repo.branch(&branch_name, &commit, false);
let branch_object = repo.revparse_single(refs)?;
repo.checkout_tree(&branch_object, None)?;
repo.set_head(refs)?;
Ok(())
}
/// This function checks if there is a new update for the current repo.
pub fn check_update(&self) -> Result<GitResponse> { | Err(_) => return Ok(GitResponse::NoLocalFiles),
};
// Just in case there are loose changes, stash them.
// Ignore a fail on this, as it's possible we don't have contents to stash.
let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase();
let master_refname = format!("refs/heads/{}", self.branch);
let signature = Signature::now("RPFM Updater", "-")?;
let stash_id = repo.stash_save(&signature, &format!("Stashed changes before checking for updates from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED));
// In case we're not in master, checkout the master branch.
if current_branch_name != master_refname {
self.checkout_branch(&repo, &master_refname)?;
}
// Fetch the info of the master branch.
repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?;
let analysis = {
let fetch_head = repo.find_reference("FETCH_HEAD")?;
let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?;
repo.merge_analysis(&[&fetch_commit])?
};
// Reset the repo to his original state after the check
if current_branch_name != master_refname {
self.checkout_branch(&repo, ¤t_branch_name)?;
}
if stash_id.is_ok() {
let _ = repo.stash_pop(0, None);
}
if analysis.0.is_up_to_date() {
Ok(GitResponse::NoUpdate)
}
// If the branch is a fast-forward, or has diverged, ask for an update.
else if analysis.0.is_fast_forward() || analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() {
Ok(GitResponse::NewUpdate)
}
// Otherwise, it means the branches diverged. In this case, return a diverged.
else {
Ok(GitResponse::Diverged)
}
}
/// This function downloads the latest revision of the current repository.
pub fn update_repo(&self) -> Result<()> {
let mut new_repo = false;
let mut repo = match Repository::open(&self.local_path) {
Ok(repo) => repo,
Err(_) => {
// If it fails to open, it means either we don't have the .git folder, or we don't have a folder at all.
// In either case, recreate it and redownload the repo. No more steps are needed here.
// On windows, remove the read-only flags before doing anything else, or this will fail.
if cfg!(target_os = "windows") {
let path = self.local_path.to_string_lossy().to_string() + "\\*.*";
let _ = SystemCommand::new("attrib").arg("-r").arg(path).arg("/s").output();
}
let _ = std::fs::remove_dir_all(&self.local_path);
DirBuilder::new().recursive(true).create(&self.local_path)?;
match Repository::clone(&self.url, &self.local_path) {
Ok(repo) => {
new_repo = true;
repo
},
Err(_) => return Err(RLibError::GitErrorDownloadFromRepo(self.url.to_owned())),
}
}
};
// Just in case there are loose changes, stash them.
// Ignore a fail on this, as it's possible we don't have contents to stash.
let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase();
let master_refname = format!("refs/heads/{}", self.branch);
let signature = Signature::now("RPFM Updater", "-")?;
let stash_id = repo.stash_save(&signature, &format!("Stashed changes before update from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED));
// In case we're not in master, checkout the master branch.
if current_branch_name != master_refname {
self.checkout_branch(&repo, &master_refname)?;
}
// If we just cloned a new repo and changed branches, return.
if new_repo {
return Ok(());
}
// If it worked, now we have to do a pull from master. Sadly, git2-rs does not support pull.
// Instead, we kinda force a fast-forward. Made in StackOverflow.
repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?;
let (analysis, fetch_commit_id) = {
let fetch_head = repo.find_reference("FETCH_HEAD")?;
let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?;
(repo.merge_analysis(&[&fetch_commit])?, fetch_commit.id())
};
// If we're up to date, nothing more is needed.
if analysis.0.is_up_to_date() {
// Reset the repo to his original state after the check
if current_branch_name != master_refname {
self.checkout_branch(&repo, ¤t_branch_name)?;
}
if stash_id.is_ok() {
let _ = repo.stash_pop(0, None);
}
Err(RLibError::GitErrorNoUpdatesAvailable(self.url.to_owned()))
}
// If we can do a fast-forward, we do it. This is the preferred option.
else if analysis.0.is_fast_forward() {
let mut reference = repo.find_reference(&master_refname)?;
reference.set_target(fetch_commit_id, "Fast-Forward")?;
repo.set_head(&master_refname)?;
repo.checkout_head(Some(CheckoutBuilder::default().force())).map_err(From::from)
}
// If not, we face multiple problems:
// - If there are uncommitted changes: covered by the stash.
// - If we're not in the branch: covered by the branch switch.
// - If the branches diverged: this one... the cleanest way to deal with it should be redownload the repo.
else if analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() {
// On | let mut repo = match Repository::open(&self.local_path) {
Ok(repo) => repo,
// If this fails, it means we either we don´t have the repo downloaded, or we have a folder without the .git folder. | random_line_split |
RPMutils.py | #the number of neurons to use per dimension
NEURONS_PER_DIMENSION = 25
#random seed to use when generating vocabulary vectors
VOCABULARY_SEED = 100
#the minimum confidence value we will require in order to decide we have a match in cleanup memory
MIN_CONFIDENCE = 0.7
#the minimum value we will require to pick either the same or different result
#SAMEDIFF_CHOICE = 0.5
#the minimum score we will require in order to decide we have found a correct rule
CORRECTNESS_THRESHOLD_FIG = 0.8
CORRECTNESS_THRESHOLD_SEQ = 0.7
CORRECTNESS_THRESHOLD_SET = 0.9
#whether or not to add probes when building networks
USE_PROBES = True
#the time (in seconds) for which we present each input
STEP_SIZE = 0.2
#the size (number of base words) of vocabulary to use (we have different versions depending on how many base words are allowed)
VOCAB_SIZE = 80
#true if we are running the controller, false if we are just running the individual modules
RUN_WITH_CONTROLLER = True
#the maximum similarity we will allow when generating a set of vectors
VECTOR_SIMILARITY = 1.0
#if running jobs concurrently, use this to ensure they don't use overlapping data files
JOB_ID = 0
#the mode to run model in
SIMULATION_MODE = SimulationMode.DEFAULT
#whether or not to use cleanup memory
USE_CLEANUP = False
#whether or not to update the cleanup memory after a run
DYNAMIC_MEMORY = False
#the number of threads we want to run with
NUM_THREADS = 0
#whether or not to split n-dimensional populations into n 1-dimensional populations
SPLIT_DIMENSIONS = True
#the threshold to use when detecting same features in figure solver
SAME_THRESHOLD = 1.0
#the threshold to use when detecting different features in figure solver
DIFF_THRESHOLD = 0.9
#the minimum difference required to differentiate between matrix answers
SIMILARITY_THRESHOLD = 0.0
#the folder in which to read/write all files throughout the run
FOLDER_NAME = "test"
#scale on the total number of neurons
NEURON_SCALE = 1.0
#whether or not to do same/diff calculations in neurons
NEURO_SAMEDIFF = True
#whether or not to load rules from file
LOAD_RULES = False
#kill the given percentage of neurons after generation
KILL_NEURONS = 0.0
#returns the appropriate correctness threshold for each module
def correctnessThreshold(module):
if module == "figuresolver":
return CORRECTNESS_THRESHOLD_FIG
if module == "sequencesolver":
return CORRECTNESS_THRESHOLD_SEQ
if module == "setsolver":
return CORRECTNESS_THRESHOLD_SET
#returns a string containing the current value of all the parameters
def getParameterSettings():
keys = getParameterSettings.func_globals.keys()
values = getParameterSettings.func_globals.values()
parms = [[keys[i],values[i]] for i,key in enumerate(keys) if key.isupper()]
return ",".join(["=".join([str(x) for x in pair]) for pair in parms])
#output from origin (used to update cleanup memory)
def cleanupDataFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "cleanupoutputdata_" + str(JOB_ID) + ".txt")
#file containing word-vector associations
def vocabFile(d, numwords, seed):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "RPMvocab_" + str(numwords) + "x" + str(d) + "_" + str(seed) + ".txt")
#file containing vectors in cleanup memory
def cleanupFile(d, numwords):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "cleanup_" + str(numwords) + "x" + str(d) + "_" + str(JOB_ID) + ".txt") | #prediction of blank cell from neural module
def hypothesisFile(modulename):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, modulename + "_hypothesis_" + str(JOB_ID) + ".txt")
#file to record the rules used to solve a matrix
def ruleFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "rules_" + str(JOB_ID) + ".txt")
#vocabulary present in the matrix
def matrixVocabFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "matrixvocab_" + str(JOB_ID) + ".txt")
#returns a dxd matrix with the given value along the diagonal
def eye(d, val):
identity = [[0 for x in range(d)] for x in range(d)]
for i in range(d):
identity[i][i] = val
return(identity)
def str2floatlist(str):
return [float(word) for word in str.split()]
def floatlist2str(floatlist):
return " ".join([str(x) for x in floatlist])
#generates a random d-dimensional vector
def genVector(d):
result = [PDFTools.sampleFloat(GaussianPDF()) for i in range(d)]
result = normalize(result)
return result
#creates a function which outputs a random unit vector
def makeInputVector(name, d, randomSeed=None):
vec = []
if randomSeed == None:
randomSeed = long(time.clock()*100000000000000000)
if randomSeed > -1:
PDFTools.setSeed(randomSeed)
length = 0
for i in range(d):
tmp = PDFTools.sampleFloat(GaussianPDF())
vec = vec + [tmp]
length = length + tmp**2
length = math.sqrt(length)
f = []
for i in range(d):
vec[i] = vec[i] / length
f = f + [ConstantFunction(1, vec[i])]
if randomSeed > -1:
PDFTools.setSeed(long(time.clock()*1000000000000000))
print vec
return(FunctionInput(name, f, Units.UNK))
#create function inputs, where each function outputs one of the given vectors
def makeInputVectors(names, vectors):
return [FunctionInput(names[i], [ConstantFunction(1,x) for x in vec], Units.UNK) for i,vec in enumerate(vectors)]
#load vectors from a file and create corresponding output functions
def loadInputVectors(filename):
file = open(filename)
vectors = [str2floatlist(line) for line in file]
file.close()
return makeInputVectors(["vec_" + str(i) for i in range(len(vectors))], vectors)
#an NEF ensemble factory with more evaluation points than normal
class NEFMorePoints(NEFEnsembleFactoryImpl):
def getNumEvalPoints(self, d):
#add shortcut so that it doesn't waste time evaluating a bunch of points when its in direct mode
if SIMULATION_MODE == SimulationMode.DIRECT:
return 1
pointsPerDim = [0, 1000, 2000]
if d < 3:
return(pointsPerDim[d])
else:
return(d*500)
#default ensemble factory used in the model
def defaultEnsembleFactory():
ef=NEFMorePoints()
ef.nodeFactory.tauRC = 0.02
ef.nodeFactory.tauRef = 0.002
ef.nodeFactory.maxRate=IndicatorPDF(200,500)
ef.nodeFactory.intercept=IndicatorPDF(-1, 1)
ef.beQuiet()
return(ef)
#returns all the probes containing name
def findMatchingProbes(probes, name, subname=None):
result = []
for probe in probes:
if name in probe.getTarget().getName() or ((probe.getEnsembleName() != None) and (probe.getEnsembleName().count(name) > 0)):
result = result + [probe]
if subname == None:
return result
else:
return findMatchingProbes(result, subname)
#calculate circular convolution of vec1 and vec2
def cconv(vec1, vec2):
if vec1 == None:
return(vec2)
if vec2 == None:
return(vec1)
d = len(vec1)
result = [0 for i in range(d)]
for i in range(d):
for j in range(d):
result[i] = result[i] + vec1[j] * vec2[(i - j) % d]
return(result)
#calculate vector addition of vec1 and vec2
def vecsum(vec1, vec2):
if vec1 == None:
return(vec2)
if vec2 == None:
return(vec1)
return [x+y for x,y in zip(vec1,vec2)]
#calculate length of vec
def length(vec):
return math.sqrt |
#rule output from neural module
def resultFile(modulename):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, modulename + "_result_" + str(JOB_ID) + ".txt")
| random_line_split |
RPMutils.py | #the number of neurons to use per dimension
NEURONS_PER_DIMENSION = 25
#random seed to use when generating vocabulary vectors
VOCABULARY_SEED = 100
#the minimum confidence value we will require in order to decide we have a match in cleanup memory
MIN_CONFIDENCE = 0.7
#the minimum value we will require to pick either the same or different result
#SAMEDIFF_CHOICE = 0.5
#the minimum score we will require in order to decide we have found a correct rule
CORRECTNESS_THRESHOLD_FIG = 0.8
CORRECTNESS_THRESHOLD_SEQ = 0.7
CORRECTNESS_THRESHOLD_SET = 0.9
#whether or not to add probes when building networks
USE_PROBES = True
#the time (in seconds) for which we present each input
STEP_SIZE = 0.2
#the size (number of base words) of vocabulary to use (we have different versions depending on how many base words are allowed)
VOCAB_SIZE = 80
#true if we are running the controller, false if we are just running the individual modules
RUN_WITH_CONTROLLER = True
#the maximum similarity we will allow when generating a set of vectors
VECTOR_SIMILARITY = 1.0
#if running jobs concurrently, use this to ensure they don't use overlapping data files
JOB_ID = 0
#the mode to run model in
SIMULATION_MODE = SimulationMode.DEFAULT
#whether or not to use cleanup memory
USE_CLEANUP = False
#whether or not to update the cleanup memory after a run
DYNAMIC_MEMORY = False
#the number of threads we want to run with
NUM_THREADS = 0
#whether or not to split n-dimensional populations into n 1-dimensional populations
SPLIT_DIMENSIONS = True
#the threshold to use when detecting same features in figure solver
SAME_THRESHOLD = 1.0
#the threshold to use when detecting different features in figure solver
DIFF_THRESHOLD = 0.9
#the minimum difference required to differentiate between matrix answers
SIMILARITY_THRESHOLD = 0.0
#the folder in which to read/write all files throughout the run
FOLDER_NAME = "test"
#scale on the total number of neurons
NEURON_SCALE = 1.0
#whether or not to do same/diff calculations in neurons
NEURO_SAMEDIFF = True
#whether or not to load rules from file
LOAD_RULES = False
#kill the given percentage of neurons after generation
KILL_NEURONS = 0.0
#returns the appropriate correctness threshold for each module
def correctnessThreshold(module):
if module == "figuresolver":
return CORRECTNESS_THRESHOLD_FIG
if module == "sequencesolver":
return CORRECTNESS_THRESHOLD_SEQ
if module == "setsolver":
return CORRECTNESS_THRESHOLD_SET
#returns a string containing the current value of all the parameters
def getParameterSettings():
keys = getParameterSettings.func_globals.keys()
values = getParameterSettings.func_globals.values()
parms = [[keys[i],values[i]] for i,key in enumerate(keys) if key.isupper()]
return ",".join(["=".join([str(x) for x in pair]) for pair in parms])
#output from origin (used to update cleanup memory)
def cleanupDataFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "cleanupoutputdata_" + str(JOB_ID) + ".txt")
#file containing word-vector associations
def vocabFile(d, numwords, seed):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "RPMvocab_" + str(numwords) + "x" + str(d) + "_" + str(seed) + ".txt")
#file containing vectors in cleanup memory
def cleanupFile(d, numwords):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "cleanup_" + str(numwords) + "x" + str(d) + "_" + str(JOB_ID) + ".txt")
#rule output from neural module
def resultFile(modulename):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, modulename + "_result_" + str(JOB_ID) + ".txt")
#prediction of blank cell from neural module
def hypothesisFile(modulename):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, modulename + "_hypothesis_" + str(JOB_ID) + ".txt")
#file to record the rules used to solve a matrix
def ruleFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "rules_" + str(JOB_ID) + ".txt")
#vocabulary present in the matrix
def matrixVocabFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "matrixvocab_" + str(JOB_ID) + ".txt")
#returns a dxd matrix with the given value along the diagonal
def eye(d, val):
identity = [[0 for x in range(d)] for x in range(d)]
for i in range(d):
identity[i][i] = val
return(identity)
def str2floatlist(str):
return [float(word) for word in str.split()]
def floatlist2str(floatlist):
return " ".join([str(x) for x in floatlist])
#generates a random d-dimensional vector
def genVector(d):
result = [PDFTools.sampleFloat(GaussianPDF()) for i in range(d)]
result = normalize(result)
return result
#creates a function which outputs a random unit vector
def makeInputVector(name, d, randomSeed=None):
vec = []
if randomSeed == None:
randomSeed = long(time.clock()*100000000000000000)
if randomSeed > -1:
PDFTools.setSeed(randomSeed)
length = 0
for i in range(d):
tmp = PDFTools.sampleFloat(GaussianPDF())
vec = vec + [tmp]
length = length + tmp**2
length = math.sqrt(length)
f = []
for i in range(d):
vec[i] = vec[i] / length
f = f + [ConstantFunction(1, vec[i])]
if randomSeed > -1:
PDFTools.setSeed(long(time.clock()*1000000000000000))
print vec
return(FunctionInput(name, f, Units.UNK))
#create function inputs, where each function outputs one of the given vectors
def | (names, vectors):
return [FunctionInput(names[i], [ConstantFunction(1,x) for x in vec], Units.UNK) for i,vec in enumerate(vectors)]
#load vectors from a file and create corresponding output functions
def loadInputVectors(filename):
file = open(filename)
vectors = [str2floatlist(line) for line in file]
file.close()
return makeInputVectors(["vec_" + str(i) for i in range(len(vectors))], vectors)
#an NEF ensemble factory with more evaluation points than normal
class NEFMorePoints(NEFEnsembleFactoryImpl):
def getNumEvalPoints(self, d):
#add shortcut so that it doesn't waste time evaluating a bunch of points when its in direct mode
if SIMULATION_MODE == SimulationMode.DIRECT:
return 1
pointsPerDim = [0, 1000, 2000]
if d < 3:
return(pointsPerDim[d])
else:
return(d*500)
#default ensemble factory used in the model
def defaultEnsembleFactory():
ef=NEFMorePoints()
ef.nodeFactory.tauRC = 0.02
ef.nodeFactory.tauRef = 0.002
ef.nodeFactory.maxRate=IndicatorPDF(200,500)
ef.nodeFactory.intercept=IndicatorPDF(-1, 1)
ef.beQuiet()
return(ef)
#returns all the probes containing name
def findMatchingProbes(probes, name, subname=None):
result = []
for probe in probes:
if name in probe.getTarget().getName() or ((probe.getEnsembleName() != None) and (probe.getEnsembleName().count(name) > 0)):
result = result + [probe]
if subname == None:
return result
else:
return findMatchingProbes(result, subname)
#calculate circular convolution of vec1 and vec2
def cconv(vec1, vec2):
if vec1 == None:
return(vec2)
if vec2 == None:
return(vec1)
d = len(vec1)
result = [0 for i in range(d)]
for i in range(d):
for j in range(d):
result[i] = result[i] + vec1[j] * vec2[(i - j) % d]
return(result)
#calculate vector addition of vec1 and vec2
def vecsum(vec1, vec2):
if vec1 == None:
return(vec2)
if vec2 == None:
return(vec1)
return [x+y for x,y in zip(vec1,vec2)]
#calculate length of vec
def length(vec):
return math.sqrt | makeInputVectors | identifier_name |
RPMutils.py |
NEURONS_PER_DIMENSION = 25
#random seed to use when generating vocabulary vectors
VOCABULARY_SEED = 100
#the minimum confidence value we will require in order to decide we have a match in cleanup memory
MIN_CONFIDENCE = 0.7
#the minimum value we will require to pick either the same or different result
#SAMEDIFF_CHOICE = 0.5
#the minimum score we will require in order to decide we have found a correct rule
CORRECTNESS_THRESHOLD_FIG = 0.8
CORRECTNESS_THRESHOLD_SEQ = 0.7
CORRECTNESS_THRESHOLD_SET = 0.9
#whether or not to add probes when building networks
USE_PROBES = True
#the time (in seconds) for which we present each input
STEP_SIZE = 0.2
#the size (number of base words) of vocabulary to use (we have different versions depending on how many base words are allowed)
VOCAB_SIZE = 80
#true if we are running the controller, false if we are just running the individual modules
RUN_WITH_CONTROLLER = True
#the maximum similarity we will allow when generating a set of vectors
VECTOR_SIMILARITY = 1.0
#if running jobs concurrently, use this to ensure they don't use overlapping data files
JOB_ID = 0
#the mode to run model in
SIMULATION_MODE = SimulationMode.DEFAULT
#whether or not to use cleanup memory
USE_CLEANUP = False
#whether or not to update the cleanup memory after a run
DYNAMIC_MEMORY = False
#the number of threads we want to run with
NUM_THREADS = 0
#whether or not to split n-dimensional populations into n 1-dimensional populations
SPLIT_DIMENSIONS = True
#the threshold to use when detecting same features in figure solver
SAME_THRESHOLD = 1.0
#the threshold to use when detecting different features in figure solver
DIFF_THRESHOLD = 0.9
#the minimum difference required to differentiate between matrix answers
SIMILARITY_THRESHOLD = 0.0
#the folder in which to read/write all files throughout the run
FOLDER_NAME = "test"
#scale on the total number of neurons
NEURON_SCALE = 1.0
#whether or not to do same/diff calculations in neurons
NEURO_SAMEDIFF = True
#whether or not to load rules from file
LOAD_RULES = False
#kill the given percentage of neurons after generation
KILL_NEURONS = 0.0
#returns the appropriate correctness threshold for each module
def correctnessThreshold(module):
if module == "figuresolver":
return CORRECTNESS_THRESHOLD_FIG
if module == "sequencesolver":
return CORRECTNESS_THRESHOLD_SEQ
if module == "setsolver":
return CORRECTNESS_THRESHOLD_SET
#returns a string containing the current value of all the parameters
def getParameterSettings():
keys = getParameterSettings.func_globals.keys()
values = getParameterSettings.func_globals.values()
parms = [[keys[i],values[i]] for i,key in enumerate(keys) if key.isupper()]
return ",".join(["=".join([str(x) for x in pair]) for pair in parms])
#output from origin (used to update cleanup memory)
def cleanupDataFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "cleanupoutputdata_" + str(JOB_ID) + ".txt")
#file containing word-vector associations
def vocabFile(d, numwords, seed):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "RPMvocab_" + str(numwords) + "x" + str(d) + "_" + str(seed) + ".txt")
#file containing vectors in cleanup memory
def cleanupFile(d, numwords):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "cleanup_" + str(numwords) + "x" + str(d) + "_" + str(JOB_ID) + ".txt")
#rule output from neural module
def resultFile(modulename):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, modulename + "_result_" + str(JOB_ID) + ".txt")
#prediction of blank cell from neural module
def hypothesisFile(modulename):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, modulename + "_hypothesis_" + str(JOB_ID) + ".txt")
#file to record the rules used to solve a matrix
def ruleFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "rules_" + str(JOB_ID) + ".txt")
#vocabulary present in the matrix
def matrixVocabFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "matrixvocab_" + str(JOB_ID) + ".txt")
#returns a dxd matrix with the given value along the diagonal
def eye(d, val):
identity = [[0 for x in range(d)] for x in range(d)]
for i in range(d):
identity[i][i] = val
return(identity)
def str2floatlist(str):
return [float(word) for word in str.split()]
def floatlist2str(floatlist):
return " ".join([str(x) for x in floatlist])
#generates a random d-dimensional vector
def genVector(d):
result = [PDFTools.sampleFloat(GaussianPDF()) for i in range(d)]
result = normalize(result)
return result
#creates a function which outputs a random unit vector
def makeInputVector(name, d, randomSeed=None):
vec = []
if randomSeed == None:
randomSeed = long(time.clock()*100000000000000000)
if randomSeed > -1:
PDFTools.setSeed(randomSeed)
length = 0
for i in range(d):
tmp = PDFTools.sampleFloat(GaussianPDF())
vec = vec + [tmp]
length = length + tmp**2
length = math.sqrt(length)
f = []
for i in range(d):
vec[i] = vec[i] / length
f = f + [ConstantFunction(1, vec[i])]
if randomSeed > -1:
PDFTools.setSeed(long(time.clock()*1000000000000000))
print vec
return(FunctionInput(name, f, Units.UNK))
#create function inputs, where each function outputs one of the given vectors
def makeInputVectors(names, vectors):
return [FunctionInput(names[i], [ConstantFunction(1,x) for x in vec], Units.UNK) for i,vec in enumerate(vectors)]
#load vectors from a file and create corresponding output functions
def loadInputVectors(filename):
file = open(filename)
vectors = [str2floatlist(line) for line in file]
file.close()
return makeInputVectors(["vec_" + str(i) for i in range(len(vectors))], vectors)
#an NEF ensemble factory with more evaluation points than normal
class NEFMorePoints(NEFEnsembleFactoryImpl):
def getNumEvalPoints(self, d):
#add shortcut so that it doesn't waste time evaluating a bunch of points when its in direct mode
if SIMULATION_MODE == SimulationMode.DIRECT:
return 1
pointsPerDim = [0, 1000, 2000]
if d < 3:
return(pointsPerDim[d])
else:
return(d*500)
#default ensemble factory used in the model
def defaultEnsembleFactory():
ef=NEFMorePoints()
ef.nodeFactory.tauRC = 0.02
ef.nodeFactory.tauRef = 0.002
ef.nodeFactory.maxRate=IndicatorPDF(200,500)
ef.nodeFactory.intercept=IndicatorPDF(-1, 1)
ef.beQuiet()
return(ef)
#returns all the probes containing name
def findMatchingProbes(probes, name, subname=None):
result = []
for probe in probes:
if name in probe.getTarget().getName() or ((probe.getEnsembleName() != None) and (probe.getEnsembleName().count(name) > 0)):
result = result + [probe]
if subname == None:
return result
else:
return findMatchingProbes(result, subname)
#calculate circular convolution of vec1 and vec2
def cconv(vec1, vec2):
if vec1 == None:
return(vec2)
if vec2 == None:
return(vec1)
d = len(vec1)
result = [0 for i in range(d)]
for i in range(d):
for j in range(d):
result[i] = result[i] + vec1[j] * vec2[(i - j) % d]
return(result)
#calculate vector addition of vec1 and vec2
def vecsum(vec1, vec2):
if vec1 == None:
return(vec2)
if vec2 == None:
return(vec1)
return [x+y for x,y in zip(vec1,vec2)]
#calculate length of vec
def length(vec):
| return math.sqrt(sum([x**2 for x in vec])) | identifier_body | |
RPMutils.py | 2
#the size (number of base words) of vocabulary to use (we have different versions depending on how many base words are allowed)
VOCAB_SIZE = 80
#true if we are running the controller, false if we are just running the individual modules
RUN_WITH_CONTROLLER = True
#the maximum similarity we will allow when generating a set of vectors
VECTOR_SIMILARITY = 1.0
#if running jobs concurrently, use this to ensure they don't use overlapping data files
JOB_ID = 0
#the mode to run model in
SIMULATION_MODE = SimulationMode.DEFAULT
#whether or not to use cleanup memory
USE_CLEANUP = False
#whether or not to update the cleanup memory after a run
DYNAMIC_MEMORY = False
#the number of threads we want to run with
NUM_THREADS = 0
#whether or not to split n-dimensional populations into n 1-dimensional populations
SPLIT_DIMENSIONS = True
#the threshold to use when detecting same features in figure solver
SAME_THRESHOLD = 1.0
#the threshold to use when detecting different features in figure solver
DIFF_THRESHOLD = 0.9
#the minimum difference required to differentiate between matrix answers
SIMILARITY_THRESHOLD = 0.0
#the folder in which to read/write all files throughout the run
FOLDER_NAME = "test"
#scale on the total number of neurons
NEURON_SCALE = 1.0
#whether or not to do same/diff calculations in neurons
NEURO_SAMEDIFF = True
#whether or not to load rules from file
LOAD_RULES = False
#kill the given percentage of neurons after generation
KILL_NEURONS = 0.0
#returns the appropriate correctness threshold for each module
def correctnessThreshold(module):
if module == "figuresolver":
return CORRECTNESS_THRESHOLD_FIG
if module == "sequencesolver":
return CORRECTNESS_THRESHOLD_SEQ
if module == "setsolver":
return CORRECTNESS_THRESHOLD_SET
#returns a string containing the current value of all the parameters
def getParameterSettings():
keys = getParameterSettings.func_globals.keys()
values = getParameterSettings.func_globals.values()
parms = [[keys[i],values[i]] for i,key in enumerate(keys) if key.isupper()]
return ",".join(["=".join([str(x) for x in pair]) for pair in parms])
#output from origin (used to update cleanup memory)
def cleanupDataFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "cleanupoutputdata_" + str(JOB_ID) + ".txt")
#file containing word-vector associations
def vocabFile(d, numwords, seed):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "RPMvocab_" + str(numwords) + "x" + str(d) + "_" + str(seed) + ".txt")
#file containing vectors in cleanup memory
def cleanupFile(d, numwords):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "cleanup_" + str(numwords) + "x" + str(d) + "_" + str(JOB_ID) + ".txt")
#rule output from neural module
def resultFile(modulename):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, modulename + "_result_" + str(JOB_ID) + ".txt")
#prediction of blank cell from neural module
def hypothesisFile(modulename):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, modulename + "_hypothesis_" + str(JOB_ID) + ".txt")
#file to record the rules used to solve a matrix
def ruleFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "rules_" + str(JOB_ID) + ".txt")
#vocabulary present in the matrix
def matrixVocabFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "matrixvocab_" + str(JOB_ID) + ".txt")
#returns a dxd matrix with the given value along the diagonal
def eye(d, val):
identity = [[0 for x in range(d)] for x in range(d)]
for i in range(d):
identity[i][i] = val
return(identity)
def str2floatlist(str):
return [float(word) for word in str.split()]
def floatlist2str(floatlist):
return " ".join([str(x) for x in floatlist])
#generates a random d-dimensional vector
def genVector(d):
result = [PDFTools.sampleFloat(GaussianPDF()) for i in range(d)]
result = normalize(result)
return result
#creates a function which outputs a random unit vector
def makeInputVector(name, d, randomSeed=None):
vec = []
if randomSeed == None:
randomSeed = long(time.clock()*100000000000000000)
if randomSeed > -1:
PDFTools.setSeed(randomSeed)
length = 0
for i in range(d):
tmp = PDFTools.sampleFloat(GaussianPDF())
vec = vec + [tmp]
length = length + tmp**2
length = math.sqrt(length)
f = []
for i in range(d):
vec[i] = vec[i] / length
f = f + [ConstantFunction(1, vec[i])]
if randomSeed > -1:
PDFTools.setSeed(long(time.clock()*1000000000000000))
print vec
return(FunctionInput(name, f, Units.UNK))
#create function inputs, where each function outputs one of the given vectors
def makeInputVectors(names, vectors):
return [FunctionInput(names[i], [ConstantFunction(1,x) for x in vec], Units.UNK) for i,vec in enumerate(vectors)]
#load vectors from a file and create corresponding output functions
def loadInputVectors(filename):
file = open(filename)
vectors = [str2floatlist(line) for line in file]
file.close()
return makeInputVectors(["vec_" + str(i) for i in range(len(vectors))], vectors)
#an NEF ensemble factory with more evaluation points than normal
class NEFMorePoints(NEFEnsembleFactoryImpl):
def getNumEvalPoints(self, d):
#add shortcut so that it doesn't waste time evaluating a bunch of points when its in direct mode
if SIMULATION_MODE == SimulationMode.DIRECT:
return 1
pointsPerDim = [0, 1000, 2000]
if d < 3:
return(pointsPerDim[d])
else:
return(d*500)
#default ensemble factory used in the model
def defaultEnsembleFactory():
ef=NEFMorePoints()
ef.nodeFactory.tauRC = 0.02
ef.nodeFactory.tauRef = 0.002
ef.nodeFactory.maxRate=IndicatorPDF(200,500)
ef.nodeFactory.intercept=IndicatorPDF(-1, 1)
ef.beQuiet()
return(ef)
#returns all the probes containing name
def findMatchingProbes(probes, name, subname=None):
result = []
for probe in probes:
if name in probe.getTarget().getName() or ((probe.getEnsembleName() != None) and (probe.getEnsembleName().count(name) > 0)):
result = result + [probe]
if subname == None:
return result
else:
return findMatchingProbes(result, subname)
#calculate circular convolution of vec1 and vec2
def cconv(vec1, vec2):
if vec1 == None:
return(vec2)
if vec2 == None:
return(vec1)
d = len(vec1)
result = [0 for i in range(d)]
for i in range(d):
for j in range(d):
result[i] = result[i] + vec1[j] * vec2[(i - j) % d]
return(result)
#calculate vector addition of vec1 and vec2
def vecsum(vec1, vec2):
if vec1 == None:
return(vec2)
if vec2 == None:
return(vec1)
return [x+y for x,y in zip(vec1,vec2)]
#calculate length of vec
def length(vec):
return math.sqrt(sum([x**2 for x in vec]))
#normalize vec
def normalize(vec):
l = length(vec)
if l == 0:
return vec
return [x/l for x in vec]
#calculate similarity between vec1 and vec2
def similarity(vec1, vec2):
if len(vec1) != len(vec2):
System.err.println("vectors not the same length in RPMutils.similarity(), something is wrong")
System.err.println(str(len(vec1)) + " " + str(len(vec2)))
return sum([x*y for x,y in zip(vec1,vec2)])
def ainv(vec):
newvec = []
for i,val in enumerate(vec):
newvec += [vec[-i % len(vec)]]
return newvec
#calculate mean value of vec
def mean(vec):
if len(vec) == 0:
| return 0.0 | conditional_block | |
runtime.py | _periods * period, but since there is
no delay in the first loop, there will
be num_period + 1 loop iterations.
"""
if period < 0.0:
raise RuntimeError('Specified period is invalid. Must be >= 0.')
if num_period is not None:
if num_period < 0:
raise RuntimeError('Specified num_period is invalid. Must be > 0.')
if not isinstance(num_period, int):
raise ValueError('num_period must be a whole number.')
self._period = period
self._num_loop = num_period
# Add one to ensure:
# total_time == num_loop * period
# because we do not delay the start iteration
if self._num_loop is not None:
self._num_loop += 1
def __iter__(self):
"""Set up a timed loop
Iteration method for timed loop. This iterator can be used in
a for statement to execute the loop periodically.
"""
self._loop_idx = 0
self._target_time = time.time()
return self
def __next__(self):
"""Sleep until next targeted time for loop and update counter
"""
result = self._loop_idx
if self._loop_idx == self._num_loop:
raise StopIteration
if self._loop_idx != 0:
sleep_time = self._target_time - time.time()
if sleep_time > 0:
self.wait(sleep_time)
self._target_time += self._period
self._loop_idx += 1
return result
def wait(self, timeout):
"""Pass-through to time.sleep()
Args:
timeout (float): Target interval for the loop execution in
units of seconds.
"""
time.sleep(timeout)
class PIDTimedLoop(TimedLoop):
def __init__(self, pid, period, num_period=None):
"""Similar to the TimedLoop but stop when subprocess ends
The number of loops executed is one greater than the number of
time intervals requested, and that the first iteration is not
delayed. The total amount of time spanned by the loop is the
product of the two input parameters.
To create an infinite loop, specify num_period is None.
Loop will always terminate when the subprocess pid terminates.
Args:
pid (Popen): Object returned by subprocess.Popen() constructor.
period (float): Target interval for the loop execution in
units of seconds.
num_period (int): Number of time periods spanned by the
loop. The total loop time is
num_periods * period, but since there is
no delay in the first loop, there will
be num_period + 1 loop iterations.
"""
super(PIDTimedLoop, self).__init__(period, num_period)
self._pid = pid
self._is_active = pid.poll() is None
def wait(self, timeout):
"""Wait for timeout seconds or until pid ends
Args:
timeout (float): Target interval for the loop execution in
units of seconds.
Raises:
StopIteration: When last call to wait termintated due to
the process ending
"""
if not self._is_active:
raise StopIteration
try:
self._pid.wait(timeout=timeout)
self._is_active = False
except subprocess.TimeoutExpired:
pass
class Agent:
"""Base class that documents the interfaces required by an agent
Agent objects are used to initialize a Controller object and
define the control algorithm.
"""
def __init__(self):
raise NotImplementedError('Agent is an abstract base class')
def get_signals(self):
"""Get list of read requests
The returned signals will be sampled from the platform by the
Controller and passed into Agent.update().
Returns:
list((str, int, int)): List of request tuples. Each
request comprises a signal name,
domain type, and domain index.
"""
raise NotImplementedError('Agent is an abstract base class')
def get_controls(self):
"""Get list of control requests
The returned controls will be set in the platform by the Controller
based on the return value from Agent.update().
Returns:
list((str, int, int)): List of request tuples. Each
request comprises a control name,
domain type, and domain index.
"""
raise NotImplementedError('Agent is an abstract base class')
def run_begin(self, policy, profile):
"""Called by Controller at the start of each run
The policy for the run is passed through to the agent from the
Controller.run() input. For some agents, the policy may always be
None.
Args:
policy (object): The Agent specific policy provided to the
Controller.run() method.
profile (str): Profile name to associate with the report
"""
raise NotImplementedError('Agent is an abstract base class')
def policy_repr(self, policy):
"""Create a string representation of a policy suitable for printing
"""
return policy.__repr__()
def run_end(self):
"""Called by the Controller at the end of each run
The result of calling the get_report() method after run_end() should
reflect the same report until the next call to run_end(). This report
will document the measurements made between the last calls to
run_begin() and run_end(). Each call to run_end() will follow a
previous call to run_begin(). The run_end() method will be called by
the Controller even if the run resulted in an error that raises an
exception. In this way resources associated with a single run can be
released when the run_end() method is called.
"""
raise NotImplementedError('Agent is an abstract base class')
def update(self, signals):
"""Called periodically by the Controller
The signals that specified by get_signals() will be passed as inputs
to the method by the Controller. The update() method will be called
periodically and the interval is set by the value returned by
Agent.get_period().
Args:
signals (list(float)): Recently read signal values
Returns:
list(float): Control values for next control interval
"""
raise NotImplementedError('Agent is an abstract base class')
def get_period(self):
"""Get the target time interval for the control loop
Returns:
float: Time interval in seconds
"""
raise NotImplementedError('Agent is an abstract base class')
def get_report(self):
"""Summary of all data collected by calls to update()
The report covers the interval of time between the last two calls to
Agent.begin_run() / Agent.end_run(). Until the next call to
Agent.begin_run(), the same report will be returned by this method.
The Controller.run() method will return this report upon completion of
the run.
Returns:
str: Human readable report
"""
raise NotImplementedError('Agent is an abstract base class')
class Controller:
"""Class that supports a runtime control algorithm
"""
def __init__(self, agent, timeout=0):
"""Controller constructor
Args:
agent (Agent): Object that conforms to the Agent class
interface
timeout (float): The agent algorithm will run for the full
duration of the application execution if timeout
is 0. Setting the timeout to a non-zero value
will end the agent algorithm after the specified
period of time or when the application ends,
whichever occurs first.
"""
if not isinstance(agent, Agent):
raise ValueError('agent must be a subclass of Agent.')
if timeout < 0:
raise ValueError('timeout must be >= 0')
self._agent = agent
self._signals = agent.get_signals()
self._controls = agent.get_controls()
self._signals_idx = []
self._controls_idx = []
self._update_period = agent.get_period()
self._num_update = None
if timeout != 0:
self._num_update = math.ceil(timeout / self._update_period)
self._returncode = None
def push_all(self):
self._signals_idx = [pio.push_signal(*ss) for ss in self._signals]
self._controls_idx = [pio.push_control(*cc) for cc in self._controls]
def read_all_signals(self):
"""Sample for all signals pushed with pio
Returns:
list(float): Sampled values for each signal
"""
return [pio.sample(signal_idx)
for signal_idx in self._signals_idx]
def returncode(self):
"""Get the return code of the application process
Returns:
int: Return code of app process
"""
if self._returncode is None:
raise RuntimeError('App process is still running')
return self._returncode
def run(self, argv, policy=None, profile=None):
| """Execute control loop defined by agent
Interfaces with PlatformIO directly through the geopmdpy.pio module.
Args:
argv (list(str)): Arguments for application that is executed
policy (object): Policy for Agent to use during the run
Returns:
str: Human readable report created by agent
"""
sys.stderr.write('<geopmdpy> RUN BEGIN\n')
if profile is None:
profile = ' '.join([shlex.quote(arg) for arg in argv])
try:
pio.save_control()
self.push_all()
pid = subprocess.Popen(argv) | identifier_body | |
runtime.py | 90643310547
"""
def __init__(self, period, num_period=None):
"""Constructor for timed loop object
The number of loops executed is one greater than the number of
time intervals requested, and that the first iteration is not
delayed. The total amount of time spanned by the loop is the
product of the two input parameters.
To create an infinite loop, specify num_period is None.
Args:
period (float): Target interval for the loop execution in
units of seconds.
num_period (int): Number of time periods spanned by the
loop. The total loop time is
num_periods * period, but since there is
no delay in the first loop, there will
be num_period + 1 loop iterations.
"""
if period < 0.0:
raise RuntimeError('Specified period is invalid. Must be >= 0.')
if num_period is not None:
if num_period < 0:
raise RuntimeError('Specified num_period is invalid. Must be > 0.')
if not isinstance(num_period, int):
raise ValueError('num_period must be a whole number.')
self._period = period
self._num_loop = num_period
# Add one to ensure:
# total_time == num_loop * period
# because we do not delay the start iteration
if self._num_loop is not None:
self._num_loop += 1
def __iter__(self):
"""Set up a timed loop
Iteration method for timed loop. This iterator can be used in
a for statement to execute the loop periodically.
"""
self._loop_idx = 0
self._target_time = time.time()
return self
def __next__(self):
"""Sleep until next targeted time for loop and update counter
"""
result = self._loop_idx
if self._loop_idx == self._num_loop:
raise StopIteration
if self._loop_idx != 0:
sleep_time = self._target_time - time.time()
if sleep_time > 0:
self.wait(sleep_time)
self._target_time += self._period
self._loop_idx += 1
return result
def wait(self, timeout):
"""Pass-through to time.sleep()
Args:
timeout (float): Target interval for the loop execution in
units of seconds.
"""
time.sleep(timeout)
class PIDTimedLoop(TimedLoop):
def __init__(self, pid, period, num_period=None):
"""Similar to the TimedLoop but stop when subprocess ends
The number of loops executed is one greater than the number of
time intervals requested, and that the first iteration is not
delayed. The total amount of time spanned by the loop is the
product of the two input parameters.
To create an infinite loop, specify num_period is None.
Loop will always terminate when the subprocess pid terminates.
Args:
pid (Popen): Object returned by subprocess.Popen() constructor.
period (float): Target interval for the loop execution in
units of seconds.
num_period (int): Number of time periods spanned by the
loop. The total loop time is
num_periods * period, but since there is
no delay in the first loop, there will
be num_period + 1 loop iterations.
"""
super(PIDTimedLoop, self).__init__(period, num_period)
self._pid = pid
self._is_active = pid.poll() is None
def wait(self, timeout):
"""Wait for timeout seconds or until pid ends
Args:
timeout (float): Target interval for the loop execution in
units of seconds.
Raises:
StopIteration: When last call to wait termintated due to
the process ending
"""
if not self._is_active:
raise StopIteration
try:
self._pid.wait(timeout=timeout)
self._is_active = False
except subprocess.TimeoutExpired:
pass
class Agent:
"""Base class that documents the interfaces required by an agent
Agent objects are used to initialize a Controller object and
define the control algorithm.
"""
def __init__(self):
raise NotImplementedError('Agent is an abstract base class')
def get_signals(self):
"""Get list of read requests
The returned signals will be sampled from the platform by the
Controller and passed into Agent.update().
Returns:
list((str, int, int)): List of request tuples. Each
request comprises a signal name,
domain type, and domain index.
"""
raise NotImplementedError('Agent is an abstract base class')
def get_controls(self):
"""Get list of control requests
The returned controls will be set in the platform by the Controller
based on the return value from Agent.update().
Returns:
list((str, int, int)): List of request tuples. Each
request comprises a control name,
domain type, and domain index.
"""
raise NotImplementedError('Agent is an abstract base class')
def run_begin(self, policy, profile):
"""Called by Controller at the start of each run
The policy for the run is passed through to the agent from the
Controller.run() input. For some agents, the policy may always be
None.
Args:
policy (object): The Agent specific policy provided to the
Controller.run() method.
profile (str): Profile name to associate with the report
"""
raise NotImplementedError('Agent is an abstract base class')
def policy_repr(self, policy):
"""Create a string representation of a policy suitable for printing
"""
return policy.__repr__()
def run_end(self):
"""Called by the Controller at the end of each run
The result of calling the get_report() method after run_end() should
reflect the same report until the next call to run_end(). This report
will document the measurements made between the last calls to
run_begin() and run_end(). Each call to run_end() will follow a
previous call to run_begin(). The run_end() method will be called by
the Controller even if the run resulted in an error that raises an
exception. In this way resources associated with a single run can be
released when the run_end() method is called.
"""
raise NotImplementedError('Agent is an abstract base class')
def update(self, signals):
"""Called periodically by the Controller
The signals that specified by get_signals() will be passed as inputs
to the method by the Controller. The update() method will be called
periodically and the interval is set by the value returned by
Agent.get_period().
Args:
signals (list(float)): Recently read signal values
Returns:
list(float): Control values for next control interval
"""
raise NotImplementedError('Agent is an abstract base class')
def get_period(self):
"""Get the target time interval for the control loop
Returns:
float: Time interval in seconds
"""
raise NotImplementedError('Agent is an abstract base class')
def get_report(self):
"""Summary of all data collected by calls to update()
The report covers the interval of time between the last two calls to
Agent.begin_run() / Agent.end_run(). Until the next call to
Agent.begin_run(), the same report will be returned by this method.
The Controller.run() method will return this report upon completion of
the run.
Returns:
str: Human readable report
"""
raise NotImplementedError('Agent is an abstract base class')
class Controller:
"""Class that supports a runtime control algorithm
"""
def __init__(self, agent, timeout=0):
"""Controller constructor
Args:
agent (Agent): Object that conforms to the Agent class
interface
timeout (float): The agent algorithm will run for the full
duration of the application execution if timeout
is 0. Setting the timeout to a non-zero value
will end the agent algorithm after the specified
period of time or when the application ends,
whichever occurs first.
"""
if not isinstance(agent, Agent):
raise ValueError('agent must be a subclass of Agent.')
if timeout < 0:
raise ValueError('timeout must be >= 0')
self._agent = agent
self._signals = agent.get_signals()
self._controls = agent.get_controls()
self._signals_idx = []
self._controls_idx = []
self._update_period = agent.get_period()
self._num_update = None
if timeout != 0:
self._num_update = math.ceil(timeout / self._update_period)
self._returncode = None
def push_all(self):
self._signals_idx = [pio.push_signal(*ss) for ss in self._signals]
self._controls_idx = [pio.push_control(*cc) for cc in self._controls]
def read_all_signals(self):
"""Sample for all signals pushed with pio
Returns:
list(float): Sampled values for each signal
"""
return [pio.sample(signal_idx)
for signal_idx in self._signals_idx]
def returncode(self):
"""Get the return code of the application process
Returns:
int: Return code of app process
"""
if self._returncode is None:
raise RuntimeError('App process is still running') | random_line_split | ||
runtime.py | : 0.10126090049743652
2: 0.20174455642700195
3: 0.30123186111450195
4: 0.4010961055755615
5: 0.5020360946655273
6: 0.6011238098144531
7: 0.7011349201202393
8: 0.8020164966583252
9: 0.9015650749206543
10: 1.0021190643310547
"""
def __init__(self, period, num_period=None):
"""Constructor for timed loop object
The number of loops executed is one greater than the number of
time intervals requested, and that the first iteration is not
delayed. The total amount of time spanned by the loop is the
product of the two input parameters.
To create an infinite loop, specify num_period is None.
Args:
period (float): Target interval for the loop execution in
units of seconds.
num_period (int): Number of time periods spanned by the
loop. The total loop time is
num_periods * period, but since there is
no delay in the first loop, there will
be num_period + 1 loop iterations.
"""
if period < 0.0:
raise RuntimeError('Specified period is invalid. Must be >= 0.')
if num_period is not None:
if num_period < 0:
raise RuntimeError('Specified num_period is invalid. Must be > 0.')
if not isinstance(num_period, int):
raise ValueError('num_period must be a whole number.')
self._period = period
self._num_loop = num_period
# Add one to ensure:
# total_time == num_loop * period
# because we do not delay the start iteration
if self._num_loop is not None:
self._num_loop += 1
def __iter__(self):
"""Set up a timed loop
Iteration method for timed loop. This iterator can be used in
a for statement to execute the loop periodically.
"""
self._loop_idx = 0
self._target_time = time.time()
return self
def __next__(self):
"""Sleep until next targeted time for loop and update counter
"""
result = self._loop_idx
if self._loop_idx == self._num_loop:
raise StopIteration
if self._loop_idx != 0:
sleep_time = self._target_time - time.time()
if sleep_time > 0:
self.wait(sleep_time)
self._target_time += self._period
self._loop_idx += 1
return result
def wait(self, timeout):
"""Pass-through to time.sleep()
Args:
timeout (float): Target interval for the loop execution in
units of seconds.
"""
time.sleep(timeout)
class PIDTimedLoop(TimedLoop):
def __init__(self, pid, period, num_period=None):
"""Similar to the TimedLoop but stop when subprocess ends
The number of loops executed is one greater than the number of
time intervals requested, and that the first iteration is not
delayed. The total amount of time spanned by the loop is the
product of the two input parameters.
To create an infinite loop, specify num_period is None.
Loop will always terminate when the subprocess pid terminates.
Args:
pid (Popen): Object returned by subprocess.Popen() constructor.
period (float): Target interval for the loop execution in
units of seconds.
num_period (int): Number of time periods spanned by the
loop. The total loop time is
num_periods * period, but since there is
no delay in the first loop, there will
be num_period + 1 loop iterations.
"""
super(PIDTimedLoop, self).__init__(period, num_period)
self._pid = pid
self._is_active = pid.poll() is None
def wait(self, timeout):
"""Wait for timeout seconds or until pid ends
Args:
timeout (float): Target interval for the loop execution in
units of seconds.
Raises:
StopIteration: When last call to wait termintated due to
the process ending
"""
if not self._is_active:
raise StopIteration
try:
self._pid.wait(timeout=timeout)
self._is_active = False
except subprocess.TimeoutExpired:
pass
class Agent:
"""Base class that documents the interfaces required by an agent
Agent objects are used to initialize a Controller object and
define the control algorithm.
"""
def __init__(self):
raise NotImplementedError('Agent is an abstract base class')
def get_signals(self):
"""Get list of read requests
The returned signals will be sampled from the platform by the
Controller and passed into Agent.update().
Returns:
list((str, int, int)): List of request tuples. Each
request comprises a signal name,
domain type, and domain index.
"""
raise NotImplementedError('Agent is an abstract base class')
def get_controls(self):
"""Get list of control requests
The returned controls will be set in the platform by the Controller
based on the return value from Agent.update().
Returns:
list((str, int, int)): List of request tuples. Each
request comprises a control name,
domain type, and domain index.
"""
raise NotImplementedError('Agent is an abstract base class')
def run_begin(self, policy, profile):
"""Called by Controller at the start of each run
The policy for the run is passed through to the agent from the
Controller.run() input. For some agents, the policy may always be
None.
Args:
policy (object): The Agent specific policy provided to the
Controller.run() method.
profile (str): Profile name to associate with the report
"""
raise NotImplementedError('Agent is an abstract base class')
def policy_repr(self, policy):
"""Create a string representation of a policy suitable for printing
"""
return policy.__repr__()
def run_end(self):
"""Called by the Controller at the end of each run
The result of calling the get_report() method after run_end() should
reflect the same report until the next call to run_end(). This report
will document the measurements made between the last calls to
run_begin() and run_end(). Each call to run_end() will follow a
previous call to run_begin(). The run_end() method will be called by
the Controller even if the run resulted in an error that raises an
exception. In this way resources associated with a single run can be
released when the run_end() method is called.
"""
raise NotImplementedError('Agent is an abstract base class')
def update(self, signals):
"""Called periodically by the Controller
The signals that specified by get_signals() will be passed as inputs
to the method by the Controller. The update() method will be called
periodically and the interval is set by the value returned by
Agent.get_period().
Args:
signals (list(float)): Recently read signal values
Returns:
list(float): Control values for next control interval
"""
raise NotImplementedError('Agent is an abstract base class')
def | (self):
"""Get the target time interval for the control loop
Returns:
float: Time interval in seconds
"""
raise NotImplementedError('Agent is an abstract base class')
def get_report(self):
"""Summary of all data collected by calls to update()
The report covers the interval of time between the last two calls to
Agent.begin_run() / Agent.end_run(). Until the next call to
Agent.begin_run(), the same report will be returned by this method.
The Controller.run() method will return this report upon completion of
the run.
Returns:
str: Human readable report
"""
raise NotImplementedError('Agent is an abstract base class')
class Controller:
"""Class that supports a runtime control algorithm
"""
def __init__(self, agent, timeout=0):
"""Controller constructor
Args:
agent (Agent): Object that conforms to the Agent class
interface
timeout (float): The agent algorithm will run for the full
duration of the application execution if timeout
is 0. Setting the timeout to a non-zero value
will end the agent algorithm after the specified
period of time or when the application ends,
whichever occurs first.
"""
if not isinstance(agent, Agent):
raise ValueError('agent must be a subclass of Agent.')
if timeout < 0:
raise ValueError('timeout must be >= 0')
self._agent = | get_period | identifier_name |
runtime.py | if num_period < 0:
raise RuntimeError('Specified num_period is invalid. Must be > 0.')
if not isinstance(num_period, int):
raise ValueError('num_period must be a whole number.')
self._period = period
self._num_loop = num_period
# Add one to ensure:
# total_time == num_loop * period
# because we do not delay the start iteration
if self._num_loop is not None:
self._num_loop += 1
def __iter__(self):
"""Set up a timed loop
Iteration method for timed loop. This iterator can be used in
a for statement to execute the loop periodically.
"""
self._loop_idx = 0
self._target_time = time.time()
return self
def __next__(self):
"""Sleep until next targeted time for loop and update counter
"""
result = self._loop_idx
if self._loop_idx == self._num_loop:
raise StopIteration
if self._loop_idx != 0:
sleep_time = self._target_time - time.time()
if sleep_time > 0:
self.wait(sleep_time)
self._target_time += self._period
self._loop_idx += 1
return result
def wait(self, timeout):
"""Pass-through to time.sleep()
Args:
timeout (float): Target interval for the loop execution in
units of seconds.
"""
time.sleep(timeout)
class PIDTimedLoop(TimedLoop):
def __init__(self, pid, period, num_period=None):
"""Similar to the TimedLoop but stop when subprocess ends
The number of loops executed is one greater than the number of
time intervals requested, and that the first iteration is not
delayed. The total amount of time spanned by the loop is the
product of the two input parameters.
To create an infinite loop, specify num_period is None.
Loop will always terminate when the subprocess pid terminates.
Args:
pid (Popen): Object returned by subprocess.Popen() constructor.
period (float): Target interval for the loop execution in
units of seconds.
num_period (int): Number of time periods spanned by the
loop. The total loop time is
num_periods * period, but since there is
no delay in the first loop, there will
be num_period + 1 loop iterations.
"""
super(PIDTimedLoop, self).__init__(period, num_period)
self._pid = pid
self._is_active = pid.poll() is None
def wait(self, timeout):
"""Wait for timeout seconds or until pid ends
Args:
timeout (float): Target interval for the loop execution in
units of seconds.
Raises:
StopIteration: When last call to wait termintated due to
the process ending
"""
if not self._is_active:
raise StopIteration
try:
self._pid.wait(timeout=timeout)
self._is_active = False
except subprocess.TimeoutExpired:
pass
class Agent:
"""Base class that documents the interfaces required by an agent
Agent objects are used to initialize a Controller object and
define the control algorithm.
"""
def __init__(self):
raise NotImplementedError('Agent is an abstract base class')
def get_signals(self):
"""Get list of read requests
The returned signals will be sampled from the platform by the
Controller and passed into Agent.update().
Returns:
list((str, int, int)): List of request tuples. Each
request comprises a signal name,
domain type, and domain index.
"""
raise NotImplementedError('Agent is an abstract base class')
def get_controls(self):
"""Get list of control requests
The returned controls will be set in the platform by the Controller
based on the return value from Agent.update().
Returns:
list((str, int, int)): List of request tuples. Each
request comprises a control name,
domain type, and domain index.
"""
raise NotImplementedError('Agent is an abstract base class')
def run_begin(self, policy, profile):
"""Called by Controller at the start of each run
The policy for the run is passed through to the agent from the
Controller.run() input. For some agents, the policy may always be
None.
Args:
policy (object): The Agent specific policy provided to the
Controller.run() method.
profile (str): Profile name to associate with the report
"""
raise NotImplementedError('Agent is an abstract base class')
def policy_repr(self, policy):
"""Create a string representation of a policy suitable for printing
"""
return policy.__repr__()
def run_end(self):
"""Called by the Controller at the end of each run
The result of calling the get_report() method after run_end() should
reflect the same report until the next call to run_end(). This report
will document the measurements made between the last calls to
run_begin() and run_end(). Each call to run_end() will follow a
previous call to run_begin(). The run_end() method will be called by
the Controller even if the run resulted in an error that raises an
exception. In this way resources associated with a single run can be
released when the run_end() method is called.
"""
raise NotImplementedError('Agent is an abstract base class')
def update(self, signals):
"""Called periodically by the Controller
The signals that specified by get_signals() will be passed as inputs
to the method by the Controller. The update() method will be called
periodically and the interval is set by the value returned by
Agent.get_period().
Args:
signals (list(float)): Recently read signal values
Returns:
list(float): Control values for next control interval
"""
raise NotImplementedError('Agent is an abstract base class')
def get_period(self):
"""Get the target time interval for the control loop
Returns:
float: Time interval in seconds
"""
raise NotImplementedError('Agent is an abstract base class')
def get_report(self):
"""Summary of all data collected by calls to update()
The report covers the interval of time between the last two calls to
Agent.begin_run() / Agent.end_run(). Until the next call to
Agent.begin_run(), the same report will be returned by this method.
The Controller.run() method will return this report upon completion of
the run.
Returns:
str: Human readable report
"""
raise NotImplementedError('Agent is an abstract base class')
class Controller:
"""Class that supports a runtime control algorithm
"""
def __init__(self, agent, timeout=0):
"""Controller constructor
Args:
agent (Agent): Object that conforms to the Agent class
interface
timeout (float): The agent algorithm will run for the full
duration of the application execution if timeout
is 0. Setting the timeout to a non-zero value
will end the agent algorithm after the specified
period of time or when the application ends,
whichever occurs first.
"""
if not isinstance(agent, Agent):
raise ValueError('agent must be a subclass of Agent.')
if timeout < 0:
raise ValueError('timeout must be >= 0')
self._agent = agent
self._signals = agent.get_signals()
self._controls = agent.get_controls()
self._signals_idx = []
self._controls_idx = []
self._update_period = agent.get_period()
self._num_update = None
if timeout != 0:
self._num_update = math.ceil(timeout / self._update_period)
self._returncode = None
def push_all(self):
self._signals_idx = [pio.push_signal(*ss) for ss in self._signals]
self._controls_idx = [pio.push_control(*cc) for cc in self._controls]
def read_all_signals(self):
"""Sample for all signals pushed with pio
Returns:
list(float): Sampled values for each signal
"""
return [pio.sample(signal_idx)
for signal_idx in self._signals_idx]
def returncode(self):
"""Get the return code of the application process
Returns:
int: Return code of app process
"""
if self._returncode is None:
raise RuntimeError('App process is still running')
return self._returncode
def run(self, argv, policy=None, profile=None):
"""Execute control loop defined by agent
Interfaces with PlatformIO directly through the geopmdpy.pio module.
Args:
argv (list(str)): Arguments for application that is executed
policy (object): Policy for Agent to use during the run
Returns:
str: Human readable report created by agent
"""
sys.stderr.write('<geopmdpy> RUN BEGIN\n')
if profile is None:
profile = ' '.join([shlex.quote(arg) for arg in argv])
try:
pio.save_control()
self.push_all()
pid = subprocess.Popen(argv)
self._agent.run_begin(policy, profile)
for loop_idx in PIDTimedLoop(pid, self._update_period, self._num_update):
pio.read_batch()
signals = self.read_all_signals()
new_settings = self._agent.update(signals)
if pid.poll() is not None:
| break | conditional_block | |
FutureWork.py |
"NamesS":NamesShort,
"NamesGlobal":NFull,
"NamesGlobalS":["Depth\\ [km]", "Vs\\ [km/s]", "Vp\\ [km/s]", "\\rho\\ [T/m^3]"],
"DataUnits":"[km/s]",
"DataName":"Phase\\ velocity\\ [km/s]",
"DataAxis":"Periods\\ [s]"}
# Defining the forward modelling function
def funcSurf96(model):
import numpy as np
from pysurf96 import surf96
Vp = np.asarray([0.300, 0.750, 1.5]) # Defined again inside the function for parallelization
rho = np.asarray([1.5, 1.9, 2.2]) # Idem
nLayer = 3 # Idem
Frequency = np.logspace(0.1,1.5,50) # Idem
Periods = np.divide(1,Frequency) # Idem
return surf96(thickness=np.append(model[0:nLayer-1], [0]), # The 2 first values of the model are the thicknesses
vp=Vp, # Fixed value for Vp
vs=model[nLayer-1:2*nLayer-1], # The 3 last values of the model are the Vs
rho=rho, # Fixed value for rho
periods=Periods, # Periods at which to compute the model
wave="rayleigh", # Type of wave to simulate
mode=1, # Only compute the fundamental mode
velocity="phase", # Use phase velocity and not group velocity
flat_earth=True) # Local model where the flat-earth hypothesis makes sens
forwardFun = funcSurf96
forward = {"Fun":forwardFun,"Axis":Periods}
# Building the function for conditions (here, just checks that a sampled model is inside the prior)
cond = lambda model: (np.logical_and(np.greater_equal(model,Mins),np.less_equal(model,Maxs))).all()
# Initialize the model parameters for BEL1D
ModelSynthetic = BEL1D.MODELSET(prior=ListPrior,cond=cond,method=method,forwardFun=forward,paramNames=paramNames,nbLayer=nLayer)
return TrueModel1, TrueModel2, Periods, Dataset1, Dataset2, NoiseEstimate, ModelSynthetic
if __name__ == '__main__':
import numpy as np
from pyBEL1D import BEL1D
from pathos import multiprocessing as mp
from pathos import pools as pp
from matplotlib import pyplot as plt
from pysurf96 import surf96 # Code for the forward modelling of dispersion curves
### Parameters for the computation:
RunFixedLayers = False
RunPostPropag = True
ParallelComputing = True
RandomSeed = False
if not(RandomSeed):
np.random.seed(0) # For reproductibilty
from random import seed
seed(0)
if ParallelComputing:
pool = pp.ProcessPool(mp.cpu_count())# Create the parallel pool with at most the number of available CPU cores
ppComp = [True, pool]
else:
ppComp = [False, None] # No parallel computing
'''1) Building a prior with fixed, large number of layers'''
if RunFixedLayers:
### Building the synthetic benchmark:
Kernel = "Data/sNMR/MRS2021.mrsk"
Timing = np.arange(0.005, 0.5, 0.005)
SyntheticBenchmarkSNMR = np.asarray([0.05, 0.05, 0.05, 0.06, 0.07, 0.08, 0.10, 0.12, 0.14, 0.15, 0.05, 0.05, 0.06, 0.07, 0.08, 0.12, 0.16, 0.20, 0.24, 0.25]) # 3-layers model
### Building the prior/forward model class (MODELSET)
InitialModel = BEL1D.MODELSET.sNMR_logLayers(Kernel=Kernel, Timing=Timing, logUniform=False ,nbLayers=10, maxThick=10)
### Computing the model:
DatasetBenchmark = InitialModel.forwardFun["Fun"](SyntheticBenchmarkSNMR)
Noise = np.mean(DatasetBenchmark)/20
print('The noise level is {} nV'.format(Noise))
DatasetBenchmark += np.random.normal(scale=Noise, size=DatasetBenchmark.shape)
## Creating the BEL1D instances and IPR:
Prebel, Postbel, PrebelInit , stats = BEL1D.IPR(MODEL=InitialModel, Dataset=DatasetBenchmark, NoiseEstimate=Noise*1e9, Parallelization=ppComp,
nbModelsBase=10000, nbModelsSample=10000, stats=True, reduceModels=True, Mixing=(lambda x: 1), Graphs=False, saveIters=False, verbose=True)
# Displaying the results:
Postbel.ShowPostModels(TrueModel=SyntheticBenchmarkSNMR, RMSE=True, Parallelization=ppComp)
plt.show()
'''2) Propagating the posterior model from space from close-by points'''
if RunPostPropag:
### Defining the synthetic bechmarks:
TrueModel1, TrueModel2, Periods, Dataset1, Dataset2, NoiseEstimate, ModelSynthetic = buildMODELSET_MASW()
### Creating the firts BEL1D instance:
nbModelsBase = 1000
def MixingFunc(iter:int) -> float:
return 1# Always keeping the same proportion of models as the initial prior (see paper for argumentation).
Prebel1, Postbel1, PrebelInit1, statsCompute1 = BEL1D.IPR(MODEL=ModelSynthetic,Dataset=Dataset1,NoiseEstimate=NoiseEstimate,Parallelization=ppComp,
nbModelsBase=nbModelsBase,nbModelsSample=nbModelsBase,stats=True, Mixing=MixingFunc,
Graphs=False, TrueModel=TrueModel1, verbose=True, nbIterMax=10)
Prebel2, Postbel2, PrebelInit2, statsCompute2 = BEL1D.IPR(MODEL=ModelSynthetic,Dataset=Dataset2,NoiseEstimate=NoiseEstimate,Parallelization=ppComp,
nbModelsBase=nbModelsBase,nbModelsSample=nbModelsBase,stats=True, Mixing=MixingFunc,
Graphs=False, TrueModel=TrueModel1, verbose=True, PriorSampled=PrebelInit1.MODELS, nbIterMax=10)
Postbel1.ShowPostModels(TrueModel=TrueModel1, RMSE=True, Parallelization=ppComp)
fig = plt.gcf()
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.140, top=0.0)
ax.set_xlim(left=0.0, right=0.8)
ax.set_title('Initial Prior: Model 1', fontsize=16)
plt.tight_layout()
Postbel2.ShowPostModels(TrueModel=TrueModel2, RMSE=True, Parallelization=ppComp)
fig = plt.gcf()
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.140, top=0.0)
ax.set_xlim(left=0.0, right=0.8)
ax.set_title('Initial Prior: Model 2', fontsize=16)
plt.tight_layout()
### Creating a new instance with mixing of initial prior and posterior 1 form dataset 2:
sharePost = 1/4
ModelsPrior = PrebelInit1.MODELS[:int(PrebelInit1.nbModels*(1-sharePost)),:]
ModelsPosterior = Postbel1.SAMPLES[:int(Postbel1.nbSamples*sharePost),:]
MixedPrior = np.vstack((ModelsPrior, ModelsPosterior))
Prebel2_bis, Postbel2_bis, PrebelInit2_bis, statsCompute2_bis = BEL1D.IPR(MODEL=ModelSynthetic,Dataset=Dataset2,NoiseEstimate=NoiseEstimate,Parallelization=ppComp,
nbModelsBase=nbModelsBase,nbModelsSample=nbModelsBase,stats=True, Mixing=MixingFunc,
Graphs=False, TrueModel=TrueModel1, verbose=True, PriorSampled=MixedPrior, nbIterMax=10)
Postbel2_bis.ShowPostModels(TrueModel=TrueModel2, RMSE=True, Parallelization=ppComp)
fig = plt.gcf()
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.140, top=0.0)
ax.set_xlim(left=0.0, right=0.8)
ax.set_title('Propagated Posterior: Model 2', fontsize=16)
plt.tight_layout()
plt.show()
if ParallelComputing:
| pool.terminate() | conditional_block | |
FutureWork.py | ():
'''BUILDMODELSET is a function that will build the benchmark model.
It does not take any arguments. '''
# Values for the benchmark model parameters:
TrueModel1 = np.asarray([0.01, 0.05, 0.120, 0.280, 0.600]) # Thickness and Vs for the 3 layers (variable of the problem)
TrueModel2 = np.asarray([0.0125, 0.0525, 0.120, 0.280, 0.600])
Vp = np.asarray([0.300, 0.750, 1.5]) # Vp for the 3 layers
rho = np.asarray([1.5, 1.9, 2.2]) # rho for the 3 layers
nLayer = 3 # Number of layers in the model
Frequency = np.logspace(0.1,1.5,50) # Frequencies at which the signal is simulated
Periods = np.divide(1,Frequency) # Corresponding periods
# Forward modelling using surf96:
Dataset1 = surf96(thickness=np.append(TrueModel1[0:nLayer-1], [0]),vp=Vp,vs=TrueModel1[nLayer-1:2*nLayer-1],rho=rho,periods=Periods,wave="rayleigh",mode=1,velocity="phase",flat_earth=True)
Dataset2 = surf96(thickness=np.append(TrueModel2[0:nLayer-1], [0]),vp=Vp,vs=TrueModel2[nLayer-1:2*nLayer-1],rho=rho,periods=Periods,wave="rayleigh",mode=1,velocity="phase",flat_earth=True)
# Building the noise model (Boaga et al., 2011)
ErrorModelSynth = [0.075, 20]
NoiseEstimate = np.asarray(np.divide(ErrorModelSynth[0]*Dataset1*1000 + np.divide(ErrorModelSynth[1],Frequency),1000)) # Standard deviation for all measurements in km/s
RMSE_Noise = np.sqrt(np.square(NoiseEstimate).mean(axis=-1))
print('The RMSE for the dataset with 1 times the standard deviation is: {} km/s'.format(RMSE_Noise))
# Define the prior model space:
# Find min and max Vp for each layer in the range of Poisson's ratio [0.2, 0.45]:
# For Vp1=0.3, the roots are : 0.183712 and 0.0904534 -> Vs1 = [0.1, 0.18]
# For Vp2=0.75, the roots are : 0.459279 and 0.226134 -> Vs2 = [0.25, 0.45]
# For Vp3=1.5, the roots are : 0.918559 and 0.452267 -> Vs2 = [0.5, 0.9]
prior = np.array([[0.001, 0.03, 0.1, 0.18],[0.01, 0.1, 0.25, 0.45],[0.0, 0.0, 0.5, 0.9]])# Thicknesses min and max, Vs min and max for each layers.
# Defining names of the variables (for graphical outputs).
nParam = 2 # e and Vs
ListPrior = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesFullUnits = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesShort = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesShortUnits = [None] * ((nLayer*nParam)-1)# Half space at bottom
Mins = np.zeros(((nLayer*nParam)-1,))
Maxs = np.zeros(((nLayer*nParam)-1,))
Units = ["\\ [km]", "\\ [km/s]"]
NFull = ["Thickness\\ ","s-Wave\\ velocity\\ "]
NShort = ["th_{", "Vs_{"]
ident = 0
for j in range(nParam):
for i in range(nLayer):
if not((i == nLayer-1) and (j == 0)):# Not the half-space thickness
ListPrior[ident] = stats.uniform(loc=prior[i,j*2],scale=prior[i,j*2+1]-prior[i,j*2])
Mins[ident] = prior[i,j*2]
Maxs[ident] = prior[i,j*2+1]
NamesFullUnits[ident] = NFull[j] + str(i+1) + Units[j]
NamesShortUnits[ident] = NShort[j] + str(i+1) + "}" + Units[j]
NamesShort[ident] = NShort[j] + str(i+1) + "}"
ident += 1
method = "DC"
Periods = np.divide(1,Frequency)
paramNames = {"NamesFU":NamesFullUnits,
"NamesSU":NamesShortUnits,
"NamesS":NamesShort,
"NamesGlobal":NFull,
"NamesGlobalS":["Depth\\ [km]", "Vs\\ [km/s]", "Vp\\ [km/s]", "\\rho\\ [T/m^3]"],
"DataUnits":"[km/s]",
"DataName":"Phase\\ velocity\\ [km/s]",
"DataAxis":"Periods\\ [s]"}
# Defining the forward modelling function
def funcSurf96(model):
import numpy as np
from pysurf96 import surf96
Vp = np.asarray([0.300, 0.750, 1.5]) # Defined again inside the function for parallelization
rho = np.asarray([1.5, 1.9, 2.2]) # Idem
nLayer = 3 # Idem
Frequency = np.logspace(0.1,1.5,50) # Idem
Periods = np.divide(1,Frequency) # Idem
return surf96(thickness=np.append(model[0:nLayer-1], [0]), # The 2 first values of the model are the thicknesses
vp=Vp, # Fixed value for Vp
vs=model[nLayer-1:2*nLayer-1], # The 3 last values of the model are the Vs
rho=rho, # Fixed value for rho
periods=Periods, # Periods at which to compute the model
wave="rayleigh", # Type of wave to simulate
mode=1, # Only compute the fundamental mode
velocity="phase", # Use phase velocity and not group velocity
flat_earth=True) # Local model where the flat-earth hypothesis makes sens
forwardFun = funcSurf96
forward = {"Fun":forwardFun,"Axis":Periods}
# Building the function for conditions (here, just checks that a sampled model is inside the prior)
cond = lambda model: (np.logical_and(np.greater_equal(model,Mins),np.less_equal(model,Maxs))).all()
# Initialize the model parameters for BEL1D
ModelSynthetic = BEL1D.MODELSET(prior=ListPrior,cond=cond,method=method,forwardFun=forward,paramNames=paramNames,nbLayer=nLayer)
return TrueModel1, TrueModel2, Periods, Dataset1, Dataset2, NoiseEstimate, ModelSynthetic
if __name__ == '__main__':
import numpy as np
from pyBEL1D import BEL1D
from pathos import multiprocessing as mp
from pathos import pools as pp
from matplotlib import pyplot as plt
from pysurf96 import surf96 # Code for the forward modelling of dispersion curves
### Parameters for the computation:
RunFixedLayers = False
RunPostPropag = True
ParallelComputing = True
RandomSeed = False
if not(RandomSeed):
np.random.seed(0) # For reproductibilty
from random import seed
seed(0)
if ParallelComputing:
pool = pp.ProcessPool(mp.cpu_count())# Create the parallel pool with at most the number of available CPU cores
ppComp = [True, pool]
else:
ppComp = [False, None] # No parallel computing
'''1) Building a prior with fixed, large number of layers'''
if RunFixedLayers:
### Building the synthetic benchmark:
Kernel = "Data/sNMR/MRS2021.mrsk"
Timing = np.arange(0.005, 0.5, 0.005)
SyntheticBenchmarkSN | buildMODELSET_MASW | identifier_name | |
FutureWork.py | [0]),vp=Vp,vs=TrueModel1[nLayer-1:2*nLayer-1],rho=rho,periods=Periods,wave="rayleigh",mode=1,velocity="phase",flat_earth=True)
Dataset2 = surf96(thickness=np.append(TrueModel2[0:nLayer-1], [0]),vp=Vp,vs=TrueModel2[nLayer-1:2*nLayer-1],rho=rho,periods=Periods,wave="rayleigh",mode=1,velocity="phase",flat_earth=True)
# Building the noise model (Boaga et al., 2011)
ErrorModelSynth = [0.075, 20]
NoiseEstimate = np.asarray(np.divide(ErrorModelSynth[0]*Dataset1*1000 + np.divide(ErrorModelSynth[1],Frequency),1000)) # Standard deviation for all measurements in km/s
RMSE_Noise = np.sqrt(np.square(NoiseEstimate).mean(axis=-1))
print('The RMSE for the dataset with 1 times the standard deviation is: {} km/s'.format(RMSE_Noise))
# Define the prior model space:
# Find min and max Vp for each layer in the range of Poisson's ratio [0.2, 0.45]:
# For Vp1=0.3, the roots are : 0.183712 and 0.0904534 -> Vs1 = [0.1, 0.18]
# For Vp2=0.75, the roots are : 0.459279 and 0.226134 -> Vs2 = [0.25, 0.45]
# For Vp3=1.5, the roots are : 0.918559 and 0.452267 -> Vs2 = [0.5, 0.9]
prior = np.array([[0.001, 0.03, 0.1, 0.18],[0.01, 0.1, 0.25, 0.45],[0.0, 0.0, 0.5, 0.9]])# Thicknesses min and max, Vs min and max for each layers.
# Defining names of the variables (for graphical outputs).
nParam = 2 # e and Vs
ListPrior = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesFullUnits = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesShort = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesShortUnits = [None] * ((nLayer*nParam)-1)# Half space at bottom
Mins = np.zeros(((nLayer*nParam)-1,))
Maxs = np.zeros(((nLayer*nParam)-1,))
Units = ["\\ [km]", "\\ [km/s]"]
NFull = ["Thickness\\ ","s-Wave\\ velocity\\ "]
NShort = ["th_{", "Vs_{"]
ident = 0
for j in range(nParam):
for i in range(nLayer):
if not((i == nLayer-1) and (j == 0)):# Not the half-space thickness
ListPrior[ident] = stats.uniform(loc=prior[i,j*2],scale=prior[i,j*2+1]-prior[i,j*2])
Mins[ident] = prior[i,j*2]
Maxs[ident] = prior[i,j*2+1]
NamesFullUnits[ident] = NFull[j] + str(i+1) + Units[j]
NamesShortUnits[ident] = NShort[j] + str(i+1) + "}" + Units[j]
NamesShort[ident] = NShort[j] + str(i+1) + "}"
ident += 1
method = "DC"
Periods = np.divide(1,Frequency)
paramNames = {"NamesFU":NamesFullUnits,
"NamesSU":NamesShortUnits,
"NamesS":NamesShort,
"NamesGlobal":NFull,
"NamesGlobalS":["Depth\\ [km]", "Vs\\ [km/s]", "Vp\\ [km/s]", "\\rho\\ [T/m^3]"],
"DataUnits":"[km/s]",
"DataName":"Phase\\ velocity\\ [km/s]",
"DataAxis":"Periods\\ [s]"}
# Defining the forward modelling function
def funcSurf96(model):
|
forwardFun = funcSurf96
forward = {"Fun":forwardFun,"Axis":Periods}
# Building the function for conditions (here, just checks that a sampled model is inside the prior)
cond = lambda model: (np.logical_and(np.greater_equal(model,Mins),np.less_equal(model,Maxs))).all()
# Initialize the model parameters for BEL1D
ModelSynthetic = BEL1D.MODELSET(prior=ListPrior,cond=cond,method=method,forwardFun=forward,paramNames=paramNames,nbLayer=nLayer)
return TrueModel1, TrueModel2, Periods, Dataset1, Dataset2, NoiseEstimate, ModelSynthetic
if __name__ == '__main__':
import numpy as np
from pyBEL1D import BEL1D
from pathos import multiprocessing as mp
from pathos import pools as pp
from matplotlib import pyplot as plt
from pysurf96 import surf96 # Code for the forward modelling of dispersion curves
### Parameters for the computation:
RunFixedLayers = False
RunPostPropag = True
ParallelComputing = True
RandomSeed = False
if not(RandomSeed):
np.random.seed(0) # For reproductibilty
from random import seed
seed(0)
if ParallelComputing:
pool = pp.ProcessPool(mp.cpu_count())# Create the parallel pool with at most the number of available CPU cores
ppComp = [True, pool]
else:
ppComp = [False, None] # No parallel computing
'''1) Building a prior with fixed, large number of layers'''
if RunFixedLayers:
### Building the synthetic benchmark:
Kernel = "Data/sNMR/MRS2021.mrsk"
Timing = np.arange(0.005, 0.5, 0.005)
SyntheticBenchmarkSNMR = np.asarray([0.05, 0.05, 0.05, 0.06, 0.07, 0.08, 0.10, 0.12, 0.14, 0.15, 0.05, 0.05, 0.06, 0.07, 0.08, 0.12, 0.16, 0.20, 0.24, 0.25]) # 3-layers model
### Building the prior/forward model class (MODELSET)
InitialModel = BEL1D.MODELSET.sNMR_logLayers(Kernel=Kernel, Timing=Timing, logUniform=False ,nbLayers=10, maxThick=10)
### Computing the model:
DatasetBenchmark = InitialModel.forwardFun["Fun"](SyntheticBenchmarkSNMR)
Noise = np.mean(DatasetBenchmark)/20
print('The noise level is {} nV'.format(Noise))
DatasetBenchmark += np.random.normal(scale=Noise, size=DatasetBenchmark.shape)
## Creating the BEL1D instances and IPR:
Prebel, Postbel, PrebelInit , stats = BEL1D.IPR(MODEL=InitialModel, Dataset=DatasetBenchmark, NoiseEstimate=Noise* | import numpy as np
from pysurf96 import surf96
Vp = np.asarray([0.300, 0.750, 1.5]) # Defined again inside the function for parallelization
rho = np.asarray([1.5, 1.9, 2.2]) # Idem
nLayer = 3 # Idem
Frequency = np.logspace(0.1,1.5,50) # Idem
Periods = np.divide(1,Frequency) # Idem
return surf96(thickness=np.append(model[0:nLayer-1], [0]), # The 2 first values of the model are the thicknesses
vp=Vp, # Fixed value for Vp
vs=model[nLayer-1:2*nLayer-1], # The 3 last values of the model are the Vs
rho=rho, # Fixed value for rho
periods=Periods, # Periods at which to compute the model
wave="rayleigh", # Type of wave to simulate
mode=1, # Only compute the fundamental mode
velocity="phase", # Use phase velocity and not group velocity
flat_earth=True) # Local model where the flat-earth hypothesis makes sens | identifier_body |
FutureWork.py | [0]),vp=Vp,vs=TrueModel1[nLayer-1:2*nLayer-1],rho=rho,periods=Periods,wave="rayleigh",mode=1,velocity="phase",flat_earth=True)
Dataset2 = surf96(thickness=np.append(TrueModel2[0:nLayer-1], [0]),vp=Vp,vs=TrueModel2[nLayer-1:2*nLayer-1],rho=rho,periods=Periods,wave="rayleigh",mode=1,velocity="phase",flat_earth=True)
# Building the noise model (Boaga et al., 2011)
ErrorModelSynth = [0.075, 20]
NoiseEstimate = np.asarray(np.divide(ErrorModelSynth[0]*Dataset1*1000 + np.divide(ErrorModelSynth[1],Frequency),1000)) # Standard deviation for all measurements in km/s
RMSE_Noise = np.sqrt(np.square(NoiseEstimate).mean(axis=-1))
print('The RMSE for the dataset with 1 times the standard deviation is: {} km/s'.format(RMSE_Noise))
# Define the prior model space:
# Find min and max Vp for each layer in the range of Poisson's ratio [0.2, 0.45]:
# For Vp1=0.3, the roots are : 0.183712 and 0.0904534 -> Vs1 = [0.1, 0.18]
# For Vp2=0.75, the roots are : 0.459279 and 0.226134 -> Vs2 = [0.25, 0.45]
# For Vp3=1.5, the roots are : 0.918559 and 0.452267 -> Vs2 = [0.5, 0.9]
prior = np.array([[0.001, 0.03, 0.1, 0.18],[0.01, 0.1, 0.25, 0.45],[0.0, 0.0, 0.5, 0.9]])# Thicknesses min and max, Vs min and max for each layers.
# Defining names of the variables (for graphical outputs).
nParam = 2 # e and Vs
ListPrior = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesFullUnits = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesShort = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesShortUnits = [None] * ((nLayer*nParam)-1)# Half space at bottom
Mins = np.zeros(((nLayer*nParam)-1,))
Maxs = np.zeros(((nLayer*nParam)-1,))
Units = ["\\ [km]", "\\ [km/s]"]
NFull = ["Thickness\\ ","s-Wave\\ velocity\\ "]
NShort = ["th_{", "Vs_{"]
ident = 0
for j in range(nParam):
for i in range(nLayer):
if not((i == nLayer-1) and (j == 0)):# Not the half-space thickness
ListPrior[ident] = stats.uniform(loc=prior[i,j*2],scale=prior[i,j*2+1]-prior[i,j*2])
Mins[ident] = prior[i,j*2]
Maxs[ident] = prior[i,j*2+1]
NamesFullUnits[ident] = NFull[j] + str(i+1) + Units[j]
NamesShortUnits[ident] = NShort[j] + str(i+1) + "}" + Units[j]
NamesShort[ident] = NShort[j] + str(i+1) + "}"
ident += 1
method = "DC"
Periods = np.divide(1,Frequency)
paramNames = {"NamesFU":NamesFullUnits,
"NamesSU":NamesShortUnits,
"NamesS":NamesShort,
"NamesGlobal":NFull,
"NamesGlobalS":["Depth\\ [km]", "Vs\\ [km/s]", "Vp\\ [km/s]", "\\rho\\ [T/m^3]"],
"DataUnits":"[km/s]",
"DataName":"Phase\\ velocity\\ [km/s]",
"DataAxis":"Periods\\ [s]"}
# Defining the forward modelling function
def funcSurf96(model):
import numpy as np
from pysurf96 import surf96
Vp = np.asarray([0.300, 0.750, 1.5]) # Defined again inside the function for parallelization
rho = np.asarray([1.5, 1.9, 2.2]) # Idem
nLayer = 3 # Idem
Frequency = np.logspace(0.1,1.5,50) # Idem
Periods = np.divide(1,Frequency) # Idem
return surf96(thickness=np.append(model[0:nLayer-1], [0]), # The 2 first values of the model are the thicknesses
vp=Vp, # Fixed value for Vp
vs=model[nLayer-1:2*nLayer-1], # The 3 last values of the model are the Vs
rho=rho, # Fixed value for rho
periods=Periods, # Periods at which to compute the model
wave="rayleigh", # Type of wave to simulate
mode=1, # Only compute the fundamental mode
velocity="phase", # Use phase velocity and not group velocity
flat_earth=True) # Local model where the flat-earth hypothesis makes sens
forwardFun = funcSurf96
forward = {"Fun":forwardFun,"Axis":Periods}
# Building the function for conditions (here, just checks that a sampled model is inside the prior)
cond = lambda model: (np.logical_and(np.greater_equal(model,Mins),np.less_equal(model,Maxs))).all()
# Initialize the model parameters for BEL1D
ModelSynthetic = BEL1D.MODELSET(prior=ListPrior,cond=cond,method=method,forwardFun=forward,paramNames=paramNames,nbLayer=nLayer)
return TrueModel1, TrueModel2, Periods, Dataset1, Dataset2, NoiseEstimate, ModelSynthetic
if __name__ == '__main__':
import numpy as np
from pyBEL1D import BEL1D
from pathos import multiprocessing as mp
from pathos import pools as pp
from matplotlib import pyplot as plt
from pysurf96 import surf96 # Code for the forward modelling of dispersion curves
### Parameters for the computation:
RunFixedLayers = False
RunPostPropag = True
ParallelComputing = True
RandomSeed = False
if not(RandomSeed):
np.random.seed(0) # For reproductibilty
from random import seed | if ParallelComputing:
pool = pp.ProcessPool(mp.cpu_count())# Create the parallel pool with at most the number of available CPU cores
ppComp = [True, pool]
else:
ppComp = [False, None] # No parallel computing
'''1) Building a prior with fixed, large number of layers'''
if RunFixedLayers:
### Building the synthetic benchmark:
Kernel = "Data/sNMR/MRS2021.mrsk"
Timing = np.arange(0.005, 0.5, 0.005)
SyntheticBenchmarkSNMR = np.asarray([0.05, 0.05, 0.05, 0.06, 0.07, 0.08, 0.10, 0.12, 0.14, 0.15, 0.05, 0.05, 0.06, 0.07, 0.08, 0.12, 0.16, 0.20, 0.24, 0.25]) # 3-layers model
### Building the prior/forward model class (MODELSET)
InitialModel = BEL1D.MODELSET.sNMR_logLayers(Kernel=Kernel, Timing=Timing, logUniform=False ,nbLayers=10, maxThick=10)
### Computing the model:
DatasetBenchmark = InitialModel.forwardFun["Fun"](SyntheticBenchmarkSNMR)
Noise = np.mean(DatasetBenchmark)/20
print('The noise level is {} nV'.format(Noise))
DatasetBenchmark += np.random.normal(scale=Noise, size=DatasetBenchmark.shape)
## Creating the BEL1D instances and IPR:
Prebel, Postbel, PrebelInit , stats = BEL1D.IPR(MODEL=InitialModel, Dataset=DatasetBenchmark, NoiseEstimate=Noise*1 | seed(0)
| random_line_split |
MultinomialAdversarialNetwork.py | eter import ConfusionMeter
import torch
import torch.nn as nn
import torch.nn.functional as functional
import torch.optim as optim
from torch.utils.data import ConcatDataset, DataLoader
"""
IMPORTANT: for some reason, Model (self.F_s,etc) will not work if inputs are not float32
=> need to convert. Dont know if same thing for target tho?
Also apparently, domain labels retrieved from get_domain_labels cannot be -1?
Output size for C HAS TO BE 2 even if it's a binary classification
"""
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))
return e_x / np.sum(e_x, axis=1).reshape(-1, 1)
class MultinomialAdversarialNetwork(TopicModel):
def | (self, k, m, model_params=None, log_params=None):
super().__init__(k,m,model_params,log_params)
def prepare_data(self,d):
"""
Assume d is a dictionary of dataset where d[domain] = another dataset class
Assume labeled domain = train set, unlabeled = test
"""
train_loaders, train_iters = {}, {}
unlabeled_loaders, unlabeled_iters = {}, {}
for domain in opt.domains:
#CONVERT TO FLOAT32
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))
train = data_utils.TensorDataset(features,target)
train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)
train_iters[domain] = iter(train_loaders[domain])
for domain in opt.unlabeled_domains:
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))
uset = data_utils.TensorDataset(features,target)
unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)
unlabeled_iters[domain] = iter(unlabeled_loaders[domain])
return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters
def fit(self, d, *args, **kwargs):
#minibatches = create_minibatch(X, y, z, batch_size)
#TODO: make this able to fit consecutively
train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = self.prepare_data(d)
#Training
self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes,opt.shared_hidden_size, opt.dropout)
self.F_d = {}
for domain in opt.domains:
self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)
self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size + opt.domain_hidden_size, opt.shared_hidden_size + opt.domain_hidden_size, 2,opt.dropout, opt.C_bn)
self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt.shared_hidden_size,len(opt.all_domains), opt.loss, opt.dropout, opt.D_bn)
# print("try")
# print(opt.device)
self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.device), self.D.to(opt.device)
for f_d in self.F_d.values():
f_d = f_d.to(opt.device)
# print("endtry")
# # optimizers
optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.parameters() if self.F_s else [], self.C.parameters()] + [f.parameters() for f in self.F_d.values()])), lr=0.0001)
optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)
loss_d_res = []
l_d_res = []
l_c_res = []
for epoch in range(opt.max_epoch):
self.F_s.train()
self.C.train()
self.D.train()
for f in self.F_d.values():
f.train()
# training accuracy
correct, total = defaultdict(int), defaultdict(int)
# D accuracy
d_correct, d_total = 0, 0
# conceptually view 1 epoch as 1 epoch of the first domain
num_iter = len(train_loaders[opt.domains[0]])
for i in range(num_iter):
# D iterations
utils.freeze_net(self.F_s)
map(utils.freeze_net, self.F_d.values())
utils.freeze_net(self.C)
utils.unfreeze_net(self.D)
# optional WGAN n_critic trick
n_critic = opt.n_critic
for _ in range(n_critic):
self.D.zero_grad()
loss_d = {}
# train on both labeled and unlabeled domains
for domain in opt.unlabeled_domains:
# targets not used
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
# D accuracy
_, pred = torch.max(d_outputs, 1)
d_total += len(d_inputs)
if opt.loss.lower() == 'l2':
_, tgt_indices = torch.max(d_targets, 1)
d_correct += (pred==tgt_indices).sum().item()
l_d = functional.mse_loss(d_outputs, d_targets)
l_d.backward()
else:
d_correct += (pred==d_targets).sum().item()
l_d = functional.nll_loss(d_outputs, d_targets)
l_d.backward()
loss_d[domain] = l_d.item()
optimizerD.step()
# F&C iteration
utils.unfreeze_net(self.F_s)
map(utils.unfreeze_net, self.F_d.values())
utils.unfreeze_net(self.C)
utils.freeze_net(self.D)
#if opt.fix_emb:
# utils.freeze_net(self.F_s.word_emb)
# map(utils.freeze_net, self.F_d.values())
self.F_s.zero_grad()
for f_d in self.F_d.values():
f_d.zero_grad()
self.C.zero_grad()
shared_feats, domain_feats = [], []
for domain in opt.domains:
inputs, targets = utils.endless_get_next_batch(
train_loaders, train_iters, domain)
#target = torch.int64 rn
targets = targets.to(opt.device)
inputs = inputs.to(opt.device)
shared_feat = self.F_s(inputs)
shared_feats.append(shared_feat)
domain_feat = self.F_d[domain](inputs)
domain_feats.append(domain_feat)
features = torch.cat((shared_feat, domain_feat), dim=1)
c_outputs = self.C(features)
#return c_outputs, targets
#DEVICE SIDE TRIGGERED ERROR OCCUR HERE (l_c=...)
l_c = functional.nll_loss(c_outputs, targets)
l_c.backward(retain_graph=True)
# training accuracy
_, pred = torch.max(c_outputs, 1)
total[domain] += targets.size(0)
correct[domain] += (pred == targets).sum().item()
# update F with D gradients on all domains
for domain in opt.unlabeled_domains:
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
if opt.loss.lower() == 'gr':
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
l_d = functional.nll_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= -opt.lambd
elif opt.loss.lower() == 'l2':
d_targets = utils.get_random_domain_label(opt.loss, len(d_inputs))
l_d = functional.mse_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= opt.lambd
l_d.backward()
optimizer.step()
# print(loss_d)
# print('l_d loss: {}'.format(l_d.item()))
# print('l_c loss: {}'.format(l_c.item()))
loss_d_res.append(loss_d['test'])
l_d_res.append(l_d.item())
l_c_res.append(l_c.item())
if (epoch + 1) % kwargs["display_step"] == 0:
print(
"Epoch:", "%04d, done" % (epoch + 1) #"cost=", "{:.9f}"#.format(l_d.data[0])
)
return loss_d_res, l_d_res, l_c_res
def transform(self, d, *args, **kwargs):
F_d = self.F_d[opt.domains[0]]
self.F_s.eval()
F_d.eval()
self.C.eval()
_,_,_,it = self.prepare_data(d)
it = it[opt.unlabeled_domains[0]]
correct = 0
total = 0
confusion = ConfusionMeter(opt.num_labels | __init__ | identifier_name |
MultinomialAdversarialNetwork.py | eter import ConfusionMeter
import torch
import torch.nn as nn
import torch.nn.functional as functional
import torch.optim as optim
from torch.utils.data import ConcatDataset, DataLoader
"""
IMPORTANT: for some reason, Model (self.F_s,etc) will not work if inputs are not float32
=> need to convert. Dont know if same thing for target tho?
Also apparently, domain labels retrieved from get_domain_labels cannot be -1?
Output size for C HAS TO BE 2 even if it's a binary classification
"""
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))
return e_x / np.sum(e_x, axis=1).reshape(-1, 1)
class MultinomialAdversarialNetwork(TopicModel):
def __init__(self, k, m, model_params=None, log_params=None):
super().__init__(k,m,model_params,log_params)
def prepare_data(self,d):
"""
Assume d is a dictionary of dataset where d[domain] = another dataset class
Assume labeled domain = train set, unlabeled = test
"""
train_loaders, train_iters = {}, {}
unlabeled_loaders, unlabeled_iters = {}, {}
for domain in opt.domains:
#CONVERT TO FLOAT32
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))
train = data_utils.TensorDataset(features,target)
train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)
train_iters[domain] = iter(train_loaders[domain])
for domain in opt.unlabeled_domains:
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))
uset = data_utils.TensorDataset(features,target)
unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)
unlabeled_iters[domain] = iter(unlabeled_loaders[domain])
return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters
def fit(self, d, *args, **kwargs):
#minibatches = create_minibatch(X, y, z, batch_size)
#TODO: make this able to fit consecutively
| for epoch in range(opt.max_epoch):
self.F_s.train()
self.C.train()
self.D.train()
for f in self.F_d.values():
f.train()
# training accuracy
correct, total = defaultdict(int), defaultdict(int)
# D accuracy
d_correct, d_total = 0, 0
# conceptually view 1 epoch as 1 epoch of the first domain
num_iter = len(train_loaders[opt.domains[0]])
for i in range(num_iter):
# D iterations
utils.freeze_net(self.F_s)
map(utils.freeze_net, self.F_d.values())
utils.freeze_net(self.C)
utils.unfreeze_net(self.D)
# optional WGAN n_critic trick
n_critic = opt.n_critic
for _ in range(n_critic):
self.D.zero_grad()
loss_d = {}
# train on both labeled and unlabeled domains
for domain in opt.unlabeled_domains:
# targets not used
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
# D accuracy
_, pred = torch.max(d_outputs, 1)
d_total += len(d_inputs)
if opt.loss.lower() == 'l2':
_, tgt_indices = torch.max(d_targets, 1)
d_correct += (pred==tgt_indices).sum().item()
l_d = functional.mse_loss(d_outputs, d_targets)
l_d.backward()
else:
d_correct += (pred==d_targets).sum().item()
l_d = functional.nll_loss(d_outputs, d_targets)
l_d.backward()
loss_d[domain] = l_d.item()
optimizerD.step()
# F&C iteration
utils.unfreeze_net(self.F_s)
map(utils.unfreeze_net, self.F_d.values())
utils.unfreeze_net(self.C)
utils.freeze_net(self.D)
#if opt.fix_emb:
# utils.freeze_net(self.F_s.word_emb)
# map(utils.freeze_net, self.F_d.values())
self.F_s.zero_grad()
for f_d in self.F_d.values():
f_d.zero_grad()
self.C.zero_grad()
shared_feats, domain_feats = [], []
for domain in opt.domains:
inputs, targets = utils.endless_get_next_batch(
train_loaders, train_iters, domain)
#target = torch.int64 rn
targets = targets.to(opt.device)
inputs = inputs.to(opt.device)
shared_feat = self.F_s(inputs)
shared_feats.append(shared_feat)
domain_feat = self.F_d[domain](inputs)
domain_feats.append(domain_feat)
features = torch.cat((shared_feat, domain_feat), dim=1)
c_outputs = self.C(features)
#return c_outputs, targets
#DEVICE SIDE TRIGGERED ERROR OCCUR HERE (l_c=...)
l_c = functional.nll_loss(c_outputs, targets)
l_c.backward(retain_graph=True)
# training accuracy
_, pred = torch.max(c_outputs, 1)
total[domain] += targets.size(0)
correct[domain] += (pred == targets).sum().item()
# update F with D gradients on all domains
for domain in opt.unlabeled_domains:
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
if opt.loss.lower() == 'gr':
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
l_d = functional.nll_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= -opt.lambd
elif opt.loss.lower() == 'l2':
d_targets = utils.get_random_domain_label(opt.loss, len(d_inputs))
l_d = functional.mse_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= opt.lambd
l_d.backward()
optimizer.step()
# print(loss_d)
# print('l_d loss: {}'.format(l_d.item()))
# print('l_c loss: {}'.format(l_c.item()))
loss_d_res.append(loss_d['test'])
l_d_res.append(l_d.item())
l_c_res.append(l_c.item())
if (epoch + 1) % kwargs["display_step"] == 0:
print(
"Epoch:", "%04d, done" % (epoch + 1) #"cost=", "{:.9f}"#.format(l_d.data[0])
)
return loss_d_res, l_d_res, l_c_res
def transform(self, d, *args, **kwargs):
F_d = self.F_d[opt.domains[0]]
self.F_s.eval()
F_d.eval()
self.C.eval()
_,_,_,it = self.prepare_data(d)
it = it[opt.unlabeled_domains[0]]
correct = 0
total = 0
confusion = ConfusionMeter(opt.num_labels)
| train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = self.prepare_data(d)
#Training
self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes,opt.shared_hidden_size, opt.dropout)
self.F_d = {}
for domain in opt.domains:
self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)
self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size + opt.domain_hidden_size, opt.shared_hidden_size + opt.domain_hidden_size, 2,opt.dropout, opt.C_bn)
self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt.shared_hidden_size,len(opt.all_domains), opt.loss, opt.dropout, opt.D_bn)
# print("try")
# print(opt.device)
self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.device), self.D.to(opt.device)
for f_d in self.F_d.values():
f_d = f_d.to(opt.device)
# print("endtry")
# # optimizers
optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.parameters() if self.F_s else [], self.C.parameters()] + [f.parameters() for f in self.F_d.values()])), lr=0.0001)
optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)
loss_d_res = []
l_d_res = []
l_c_res = [] | identifier_body |
MultinomialAdversarialNetwork.py | eter import ConfusionMeter
import torch
import torch.nn as nn
import torch.nn.functional as functional
import torch.optim as optim
from torch.utils.data import ConcatDataset, DataLoader
"""
IMPORTANT: for some reason, Model (self.F_s,etc) will not work if inputs are not float32
=> need to convert. Dont know if same thing for target tho?
Also apparently, domain labels retrieved from get_domain_labels cannot be -1?
Output size for C HAS TO BE 2 even if it's a binary classification
"""
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))
return e_x / np.sum(e_x, axis=1).reshape(-1, 1)
class MultinomialAdversarialNetwork(TopicModel):
def __init__(self, k, m, model_params=None, log_params=None):
super().__init__(k,m,model_params,log_params)
def prepare_data(self,d):
"""
Assume d is a dictionary of dataset where d[domain] = another dataset class
Assume labeled domain = train set, unlabeled = test
"""
train_loaders, train_iters = {}, {}
unlabeled_loaders, unlabeled_iters = {}, {}
for domain in opt.domains:
#CONVERT TO FLOAT32
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))
train = data_utils.TensorDataset(features,target)
train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)
train_iters[domain] = iter(train_loaders[domain])
for domain in opt.unlabeled_domains:
|
return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters
def fit(self, d, *args, **kwargs):
#minibatches = create_minibatch(X, y, z, batch_size)
#TODO: make this able to fit consecutively
train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = self.prepare_data(d)
#Training
self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes,opt.shared_hidden_size, opt.dropout)
self.F_d = {}
for domain in opt.domains:
self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)
self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size + opt.domain_hidden_size, opt.shared_hidden_size + opt.domain_hidden_size, 2,opt.dropout, opt.C_bn)
self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt.shared_hidden_size,len(opt.all_domains), opt.loss, opt.dropout, opt.D_bn)
# print("try")
# print(opt.device)
self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.device), self.D.to(opt.device)
for f_d in self.F_d.values():
f_d = f_d.to(opt.device)
# print("endtry")
# # optimizers
optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.parameters() if self.F_s else [], self.C.parameters()] + [f.parameters() for f in self.F_d.values()])), lr=0.0001)
optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)
loss_d_res = []
l_d_res = []
l_c_res = []
for epoch in range(opt.max_epoch):
self.F_s.train()
self.C.train()
self.D.train()
for f in self.F_d.values():
f.train()
# training accuracy
correct, total = defaultdict(int), defaultdict(int)
# D accuracy
d_correct, d_total = 0, 0
# conceptually view 1 epoch as 1 epoch of the first domain
num_iter = len(train_loaders[opt.domains[0]])
for i in range(num_iter):
# D iterations
utils.freeze_net(self.F_s)
map(utils.freeze_net, self.F_d.values())
utils.freeze_net(self.C)
utils.unfreeze_net(self.D)
# optional WGAN n_critic trick
n_critic = opt.n_critic
for _ in range(n_critic):
self.D.zero_grad()
loss_d = {}
# train on both labeled and unlabeled domains
for domain in opt.unlabeled_domains:
# targets not used
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
# D accuracy
_, pred = torch.max(d_outputs, 1)
d_total += len(d_inputs)
if opt.loss.lower() == 'l2':
_, tgt_indices = torch.max(d_targets, 1)
d_correct += (pred==tgt_indices).sum().item()
l_d = functional.mse_loss(d_outputs, d_targets)
l_d.backward()
else:
d_correct += (pred==d_targets).sum().item()
l_d = functional.nll_loss(d_outputs, d_targets)
l_d.backward()
loss_d[domain] = l_d.item()
optimizerD.step()
# F&C iteration
utils.unfreeze_net(self.F_s)
map(utils.unfreeze_net, self.F_d.values())
utils.unfreeze_net(self.C)
utils.freeze_net(self.D)
#if opt.fix_emb:
# utils.freeze_net(self.F_s.word_emb)
# map(utils.freeze_net, self.F_d.values())
self.F_s.zero_grad()
for f_d in self.F_d.values():
f_d.zero_grad()
self.C.zero_grad()
shared_feats, domain_feats = [], []
for domain in opt.domains:
inputs, targets = utils.endless_get_next_batch(
train_loaders, train_iters, domain)
#target = torch.int64 rn
targets = targets.to(opt.device)
inputs = inputs.to(opt.device)
shared_feat = self.F_s(inputs)
shared_feats.append(shared_feat)
domain_feat = self.F_d[domain](inputs)
domain_feats.append(domain_feat)
features = torch.cat((shared_feat, domain_feat), dim=1)
c_outputs = self.C(features)
#return c_outputs, targets
#DEVICE SIDE TRIGGERED ERROR OCCUR HERE (l_c=...)
l_c = functional.nll_loss(c_outputs, targets)
l_c.backward(retain_graph=True)
# training accuracy
_, pred = torch.max(c_outputs, 1)
total[domain] += targets.size(0)
correct[domain] += (pred == targets).sum().item()
# update F with D gradients on all domains
for domain in opt.unlabeled_domains:
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
if opt.loss.lower() == 'gr':
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
l_d = functional.nll_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= -opt.lambd
elif opt.loss.lower() == 'l2':
d_targets = utils.get_random_domain_label(opt.loss, len(d_inputs))
l_d = functional.mse_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= opt.lambd
l_d.backward()
optimizer.step()
# print(loss_d)
# print('l_d loss: {}'.format(l_d.item()))
# print('l_c loss: {}'.format(l_c.item()))
loss_d_res.append(loss_d['test'])
l_d_res.append(l_d.item())
l_c_res.append(l_c.item())
if (epoch + 1) % kwargs["display_step"] == 0:
print(
"Epoch:", "%04d, done" % (epoch + 1) #"cost=", "{:.9f}"#.format(l_d.data[0])
)
return loss_d_res, l_d_res, l_c_res
def transform(self, d, *args, **kwargs):
F_d = self.F_d[opt.domains[0]]
self.F_s.eval()
F_d.eval()
self.C.eval()
_,_,_,it = self.prepare_data(d)
it = it[opt.unlabeled_domains[0]]
correct = 0
total = 0
confusion = ConfusionMeter(opt.num_labels | features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))
uset = data_utils.TensorDataset(features,target)
unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)
unlabeled_iters[domain] = iter(unlabeled_loaders[domain]) | conditional_block |
MultinomialAdversarialNetwork.py | .meter import ConfusionMeter
import torch
import torch.nn as nn
import torch.nn.functional as functional
import torch.optim as optim
from torch.utils.data import ConcatDataset, DataLoader
"""
IMPORTANT: for some reason, Model (self.F_s,etc) will not work if inputs are not float32
=> need to convert. Dont know if same thing for target tho?
Also apparently, domain labels retrieved from get_domain_labels cannot be -1?
Output size for C HAS TO BE 2 even if it's a binary classification
"""
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))
return e_x / np.sum(e_x, axis=1).reshape(-1, 1)
class MultinomialAdversarialNetwork(TopicModel):
def __init__(self, k, m, model_params=None, log_params=None):
super().__init__(k,m,model_params,log_params)
def prepare_data(self,d):
"""
Assume d is a dictionary of dataset where d[domain] = another dataset class
Assume labeled domain = train set, unlabeled = test
"""
train_loaders, train_iters = {}, {}
unlabeled_loaders, unlabeled_iters = {}, {}
for domain in opt.domains:
#CONVERT TO FLOAT32
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))
train = data_utils.TensorDataset(features,target)
train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)
train_iters[domain] = iter(train_loaders[domain])
for domain in opt.unlabeled_domains:
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))
uset = data_utils.TensorDataset(features,target)
unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)
unlabeled_iters[domain] = iter(unlabeled_loaders[domain])
return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters
def fit(self, d, *args, **kwargs):
#minibatches = create_minibatch(X, y, z, batch_size)
#TODO: make this able to fit consecutively
train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = self.prepare_data(d)
#Training
self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes,opt.shared_hidden_size, opt.dropout)
self.F_d = {}
for domain in opt.domains:
self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)
self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size + opt.domain_hidden_size, opt.shared_hidden_size + opt.domain_hidden_size, 2,opt.dropout, opt.C_bn)
self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt.shared_hidden_size,len(opt.all_domains), opt.loss, opt.dropout, opt.D_bn)
# print("try")
# print(opt.device)
self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.device), self.D.to(opt.device)
for f_d in self.F_d.values():
f_d = f_d.to(opt.device)
# print("endtry")
# # optimizers
optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.parameters() if self.F_s else [], self.C.parameters()] + [f.parameters() for f in self.F_d.values()])), lr=0.0001)
optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)
loss_d_res = []
l_d_res = []
l_c_res = []
for epoch in range(opt.max_epoch):
self.F_s.train()
self.C.train()
self.D.train()
for f in self.F_d.values():
f.train()
# training accuracy
correct, total = defaultdict(int), defaultdict(int)
# D accuracy
d_correct, d_total = 0, 0
# conceptually view 1 epoch as 1 epoch of the first domain
num_iter = len(train_loaders[opt.domains[0]])
for i in range(num_iter):
# D iterations
utils.freeze_net(self.F_s)
map(utils.freeze_net, self.F_d.values())
utils.freeze_net(self.C)
utils.unfreeze_net(self.D)
# optional WGAN n_critic trick
n_critic = opt.n_critic
for _ in range(n_critic):
self.D.zero_grad()
loss_d = {}
# train on both labeled and unlabeled domains
for domain in opt.unlabeled_domains:
# targets not used
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
# D accuracy
_, pred = torch.max(d_outputs, 1)
d_total += len(d_inputs)
if opt.loss.lower() == 'l2':
_, tgt_indices = torch.max(d_targets, 1)
d_correct += (pred==tgt_indices).sum().item()
l_d = functional.mse_loss(d_outputs, d_targets)
l_d.backward()
else:
d_correct += (pred==d_targets).sum().item()
l_d = functional.nll_loss(d_outputs, d_targets)
l_d.backward()
loss_d[domain] = l_d.item()
optimizerD.step()
# F&C iteration
utils.unfreeze_net(self.F_s)
map(utils.unfreeze_net, self.F_d.values())
utils.unfreeze_net(self.C)
utils.freeze_net(self.D)
#if opt.fix_emb:
# utils.freeze_net(self.F_s.word_emb)
# map(utils.freeze_net, self.F_d.values())
self.F_s.zero_grad()
for f_d in self.F_d.values():
f_d.zero_grad()
self.C.zero_grad()
shared_feats, domain_feats = [], []
for domain in opt.domains:
inputs, targets = utils.endless_get_next_batch(
train_loaders, train_iters, domain)
#target = torch.int64 rn
targets = targets.to(opt.device)
inputs = inputs.to(opt.device)
shared_feat = self.F_s(inputs)
shared_feats.append(shared_feat)
domain_feat = self.F_d[domain](inputs)
domain_feats.append(domain_feat)
features = torch.cat((shared_feat, domain_feat), dim=1)
c_outputs = self.C(features)
#return c_outputs, targets
#DEVICE SIDE TRIGGERED ERROR OCCUR HERE (l_c=...)
l_c = functional.nll_loss(c_outputs, targets)
l_c.backward(retain_graph=True)
# training accuracy
_, pred = torch.max(c_outputs, 1)
total[domain] += targets.size(0)
correct[domain] += (pred == targets).sum().item()
# update F with D gradients on all domains
for domain in opt.unlabeled_domains:
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
if opt.loss.lower() == 'gr':
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
l_d = functional.nll_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= -opt.lambd | elif opt.loss.lower() == 'l2':
d_targets = utils.get_random_domain_label(opt.loss, len(d_inputs))
l_d = functional.mse_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= opt.lambd
l_d.backward()
optimizer.step()
# print(loss_d)
# print('l_d loss: {}'.format(l_d.item()))
# print('l_c loss: {}'.format(l_c.item()))
loss_d_res.append(loss_d['test'])
l_d_res.append(l_d.item())
l_c_res.append(l_c.item())
if (epoch + 1) % kwargs["display_step"] == 0:
print(
"Epoch:", "%04d, done" % (epoch + 1) #"cost=", "{:.9f}"#.format(l_d.data[0])
)
return loss_d_res, l_d_res, l_c_res
def transform(self, d, *args, **kwargs):
F_d = self.F_d[opt.domains[0]]
self.F_s.eval()
F_d.eval()
self.C.eval()
_,_,_,it = self.prepare_data(d)
it = it[opt.unlabeled_domains[0]]
correct = 0
total = 0
confusion = ConfusionMeter(opt.num_labels)
| random_line_split | |
fid_pics.py | 0'
exp4.pz_std = 2.0
exp4.z_dim = 8
exp4.symmetrize = False
exp4.dataset = 'mnist'
exp4.alias = 'mnist_gan_dcgan'
exp4.test_size = 1000
# Exp 5: CelebA with WAE+MMD on 64 dimensional Z space, BEGAN architecture
exp5 = ExpInfo()
exp5.trained_model_path = cluster_celeba_mmd_began_path
exp5.model_id = '157800'
exp5.pz_std = 2.0
exp5.z_dim = 64
exp5.symmetrize = True
exp5.dataset = 'celebA'
exp5.alias = 'celeba_mmd_began'
exp5.test_size = 512
# Exp 6: MNIST with VAE on 8 dimensional Z space, DCGAN architecture
exp6 = ExpInfo()
exp6.trained_model_path = cluster_mnist_vae_path
exp6.model_id = 'final-69000'
exp6.pz_std = 1.0
exp6.z_dim = 8
exp6.symmetrize = False
exp6.dataset = 'mnist'
exp6.alias = 'mnist_vae_dcgan'
exp6.test_size = 1000
# Exp 7: MNIST with VAE on 2 dimensional Z space, DCGAN architecture
exp7 = ExpInfo()
exp7.trained_model_path = cluster_mnist_vae2d_path
exp7.model_id = 'final-69000'
exp7.pz_std = 1.0
exp7.z_dim = 2
exp7.symmetrize = False
exp7.dataset = 'mnist'
exp7.alias = 'mnist_vae_2d_dcgan'
exp7.test_size = 1000
# Exp 8: MNIST with WAE-MMD on 2 dimensional Z space, DCGAN architecture
exp8 = ExpInfo()
exp8.trained_model_path = cluster_mnist_mmd2d_path
exp8.model_id = 'final-69000'
exp8.pz_std = 2.0
exp8.z_dim = 2
exp8.symmetrize = False
exp8.dataset = 'mnist'
exp8.alias = 'mnist_mmd_2d_dcgan'
exp8.test_size = 1000
# Exp 9: CelebA with VAE on 64 dimensional Z space, dcgan architecture
exp9 = ExpInfo()
exp9.trained_model_path = cluster_celeba_vae_path
exp9.model_id = '126240'
exp9.pz_std = 1.0
exp9.z_dim = 64
exp9.symmetrize = True
exp9.dataset = 'celebA'
exp9.alias = 'celeba_vae'
exp9.test_size = 512
if exp_name == 'celeba_mmd_dcgan':
exp = exp1
elif exp_name == 'celeba_gan_dcgan':
exp = exp2
elif exp_name == 'mnist_mmd_dcgan':
exp = exp3
elif exp_name == 'mnist_gan_dcgan':
exp = exp4
elif exp_name == 'celeba_mmd_began':
exp = exp5
elif exp_name == 'mnist_vae':
exp = exp6
elif exp_name == 'mnist_vae_2d':
exp = exp7
elif exp_name == 'mnist_mmd_2d':
exp = exp8
elif exp_name == 'celeba_vae':
exp = exp9
exp_list = [exp]
for exp in exp_list:
output_dir = os.path.join(OUT_DIR, exp.alias)
create_dir(output_dir)
z_dim = exp.z_dim
pz_std = exp.pz_std
dataset = exp.dataset
model_path = exp.trained_model_path
normalyze = exp.symmetrize
if SAVE_REAL_PICS:
pic_dir = os.path.join(output_dir, 'real')
create_dir(pic_dir)
# Saving real pics
opts = {}
opts['dataset'] = dataset
opts['input_normalize_sym'] = normalyze
opts['work_dir'] = output_dir
if exp.dataset == 'celebA':
opts['data_dir'] = CELEBA_DATA_DIR
elif exp.dataset == 'mnist':
opts['data_dir'] = MNIST_DATA_DIR
opts['celebA_crop'] = 'closecrop'
data = DataHandler(opts)
pic_id = 1
if dataset == 'celebA':
shuffled_ids = np.load(os.path.join(model_path, 'shuffled_training_ids'))
test_ids = shuffled_ids[-exp.test_size:]
test_images = data.data
train_ids = shuffled_ids[:-exp.test_size]
train_images = data.data
else:
test_images = data.test_data.X
train_images = data.data.X
train_ids = range(len(train_images))
test_ids = range(len(test_images))
if SAVE_PNG:
for idx in test_ids:
if pic_id % 1000 == 0:
print 'Saved %d/%d' % (pic_id, NUM_PICS)
save_pic(test_images[idx], os.path.join(pic_dir, 'real_image{:05d}.png'.format(pic_id)), exp)
pic_id += 1
if pic_id > NUM_PICS:
break
num_remain = max(NUM_PICS - len(test_ids), 0)
train_size = data.num_points
rand_train_ids = np.random.choice(train_size, num_remain, replace=False)
rand_train_ids = [train_ids[idx] for idx in rand_train_ids]
rand_train_pics = train_images[rand_train_ids]
if SAVE_PNG:
for i in range(num_remain):
if pic_id % 1000 == 0:
print 'Saved %d/%d' % (pic_id, NUM_PICS)
save_pic(rand_train_pics[i], os.path.join(pic_dir, 'real_image{:05d}.png'.format(pic_id)), exp)
pic_id += 1
all_pics = np.vstack([test_images, rand_train_pics])
all_pics = all_pics.astype(np.float)
if len(all_pics) > NUM_PICS:
all_pics = all_pics[:NUM_PICS]
np.random.shuffle(all_pics)
np.save(os.path.join(output_dir, 'real'), all_pics)
if SAVE_FAKE_PICS:
with tf.Session() as sess:
with sess.graph.as_default():
saver = tf.train.import_meta_graph(
os.path.join(model_path, 'checkpoints', model_name_prefix + exp.model_id + '.meta'))
saver.restore(sess, os.path.join(model_path, 'checkpoints', model_name_prefix + exp.model_id))
real_points_ph = tf.get_collection('real_points_ph')[0]
noise_ph = tf.get_collection('noise_ph')[0]
is_training_ph = tf.get_collection('is_training_ph')[0]
decoder = tf.get_collection('decoder')[0]
# Saving random samples
mean = np.zeros(z_dim)
cov = np.identity(z_dim)
noise = pz_std * np.random.multivariate_normal(
mean, cov, NUM_PICS).astype(np.float32)
res = sess.run(decoder, feed_dict={noise_ph: noise, is_training_ph: False})
pic_dir = os.path.join(output_dir, 'fake')
create_dir(pic_dir)
if SAVE_PNG:
for i in range(1, NUM_PICS + 1):
if i % 1000 == 0:
print 'Saved %d/%d' % (i, NUM_PICS)
save_pic(res[i-1], os.path.join(pic_dir, 'fake_image{:05d}.png'.format(i)), exp)
np.save(os.path.join(output_dir, 'fake'), res)
def save_pic(pic, path, exp):
| if len(pic.shape) == 4:
pic = pic[0]
height = pic.shape[0]
width = pic.shape[1]
fig = plt.figure(frameon=False, figsize=(width, height))#, dpi=1)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
if exp.symmetrize:
pic = (pic + 1.) / 2.
if exp.dataset == 'mnist':
pic = pic[:, :, 0]
pic = 1. - pic
if exp.dataset == 'mnist':
ax.imshow(pic, cmap='Greys', interpolation='none')
else:
ax.imshow(pic, interpolation='none')
fig.savefig(path, dpi=1, format='png')
plt.close()
# if exp.dataset == 'mnist': | identifier_body | |
fid_pics.py | _expC_81'
cluster_mnist_mmd2d_path = './mount/GANs/results_mnist_pot_smaller_zdim2_21'
cluster_mnist_gan_path = './mount/GANs/results_mnist_pot_sota_worst2d1'
cluster_mnist_vae_path = './mount/GANs/results_mnist_vae_81'
cluster_mnist_vae2d_path = './mount/GANs/results_mnist_vae_zdim2_21'
cluster_celeba_gan_path = './mount/GANs/results_celeba_pot_worst2d_plateau_gan_jsmod_641'
cluster_celeba_vae_path = './mount/GANs/results_celeba_vae_641'
cluster_celeba_mmd_path = './mount/GANs/results_celeba_pot_worst2d_plateau_mmd_642'
cluster_celeba_mmd_began_path = './mount/GANs/results_celeba_pot_worst2d_plateau_mmd_began_642'
model_name_prefix = 'trained-pot-'
# Exp 1: CelebA with WAE+MMD on 64 dimensional Z space, DCGAN architecture
exp1 = ExpInfo()
exp1.trained_model_path = cluster_celeba_mmd_path
exp1.model_id = '378720'
exp1.pz_std = 2.0
exp1.z_dim = 64
exp1.symmetrize = True
exp1.dataset = 'celebA'
exp1.alias = 'celeba_mmd_dcgan'
exp1.test_size = 512
# Exp 2: CelebA with WAE+GAN on 64 dimensional Z space, DCGAN architecture
exp2 = ExpInfo()
exp2.trained_model_path = cluster_celeba_gan_path
exp2.model_id = '126480'
exp2.pz_std = 2.0
exp2.z_dim = 64
exp2.symmetrize = True
exp2.dataset = 'celebA'
exp2.alias = 'celeba_gan_dcgan'
exp2.test_size = 512
# Exp 3: MNIST with WAE+MMD on 8 dimensional Z space, DCGAN architecture
exp3 = ExpInfo()
exp3.trained_model_path = cluster_mnist_mmd_path
exp3.model_id = '55200'
exp3.pz_std = 1.0
exp3.z_dim = 8
exp3.symmetrize = False
exp3.dataset = 'mnist'
exp3.alias = 'mnist_mmd_dcgan'
exp3.test_size = 1000
# Exp 4: MNIST with WAE+GAN on 8 dimensional Z space, DCGAN architecture
exp4 = ExpInfo()
exp4.trained_model_path = cluster_mnist_gan_path
exp4.model_id = '62100'
exp4.pz_std = 2.0
exp4.z_dim = 8
exp4.symmetrize = False
exp4.dataset = 'mnist'
exp4.alias = 'mnist_gan_dcgan'
exp4.test_size = 1000
# Exp 5: CelebA with WAE+MMD on 64 dimensional Z space, BEGAN architecture
exp5 = ExpInfo()
exp5.trained_model_path = cluster_celeba_mmd_began_path
exp5.model_id = '157800'
exp5.pz_std = 2.0
exp5.z_dim = 64
exp5.symmetrize = True
exp5.dataset = 'celebA'
exp5.alias = 'celeba_mmd_began'
exp5.test_size = 512
# Exp 6: MNIST with VAE on 8 dimensional Z space, DCGAN architecture
exp6 = ExpInfo()
exp6.trained_model_path = cluster_mnist_vae_path
exp6.model_id = 'final-69000'
exp6.pz_std = 1.0
exp6.z_dim = 8
exp6.symmetrize = False
exp6.dataset = 'mnist'
exp6.alias = 'mnist_vae_dcgan'
exp6.test_size = 1000
# Exp 7: MNIST with VAE on 2 dimensional Z space, DCGAN architecture
exp7 = ExpInfo()
exp7.trained_model_path = cluster_mnist_vae2d_path
exp7.model_id = 'final-69000'
exp7.pz_std = 1.0
exp7.z_dim = 2
exp7.symmetrize = False
exp7.dataset = 'mnist'
exp7.alias = 'mnist_vae_2d_dcgan'
exp7.test_size = 1000
# Exp 8: MNIST with WAE-MMD on 2 dimensional Z space, DCGAN architecture
exp8 = ExpInfo()
exp8.trained_model_path = cluster_mnist_mmd2d_path
exp8.model_id = 'final-69000'
exp8.pz_std = 2.0
exp8.z_dim = 2
exp8.symmetrize = False
exp8.dataset = 'mnist'
exp8.alias = 'mnist_mmd_2d_dcgan'
exp8.test_size = 1000
# Exp 9: CelebA with VAE on 64 dimensional Z space, dcgan architecture
exp9 = ExpInfo()
exp9.trained_model_path = cluster_celeba_vae_path
exp9.model_id = '126240'
exp9.pz_std = 1.0
exp9.z_dim = 64
exp9.symmetrize = True
exp9.dataset = 'celebA'
exp9.alias = 'celeba_vae'
exp9.test_size = 512
if exp_name == 'celeba_mmd_dcgan':
exp = exp1
elif exp_name == 'celeba_gan_dcgan':
exp = exp2
elif exp_name == 'mnist_mmd_dcgan':
exp = exp3
elif exp_name == 'mnist_gan_dcgan':
exp = exp4
elif exp_name == 'celeba_mmd_began':
exp = exp5
elif exp_name == 'mnist_vae':
exp = exp6
elif exp_name == 'mnist_vae_2d':
exp = exp7
elif exp_name == 'mnist_mmd_2d':
|
elif exp_name == 'celeba_vae':
exp = exp9
exp_list = [exp]
for exp in exp_list:
output_dir = os.path.join(OUT_DIR, exp.alias)
create_dir(output_dir)
z_dim = exp.z_dim
pz_std = exp.pz_std
dataset = exp.dataset
model_path = exp.trained_model_path
normalyze = exp.symmetrize
if SAVE_REAL_PICS:
pic_dir = os.path.join(output_dir, 'real')
create_dir(pic_dir)
# Saving real pics
opts = {}
opts['dataset'] = dataset
opts['input_normalize_sym'] = normalyze
opts['work_dir'] = output_dir
if exp.dataset == 'celebA':
opts['data_dir'] = CELEBA_DATA_DIR
elif exp.dataset == 'mnist':
opts['data_dir'] = MNIST_DATA_DIR
opts['celebA_crop'] = 'closecrop'
data = DataHandler(opts)
pic_id = 1
if dataset == 'celebA':
shuffled_ids = np.load(os.path.join(model_path, 'shuffled_training_ids'))
test_ids = shuffled_ids[-exp.test_size:]
test_images = data.data
train_ids = shuffled_ids[:-exp.test_size]
train_images = data.data
else:
test_images = data.test_data.X
train_images = data.data.X
train_ids = range(len(train_images))
test_ids = range(len(test_images))
if SAVE_PNG:
for idx in test_ids:
if pic_id % 1000 == 0:
print 'Saved %d/%d' % (pic_id, NUM_PICS)
save_pic(test_images[idx], os.path.join(pic_dir, 'real_image{:05d}.png'.format(pic_id)), exp)
pic_id += 1
if pic_id > NUM_PICS:
break
num_remain = max(NUM_PICS - len(test_ids), 0)
train_size = data.num_points
rand_train_ids = np.random.choice(train_size, num_remain, replace=False)
rand_train_ids = [train_ids[idx] for idx in rand_train_ids | exp = exp8 | conditional_block |
fid_pics.py | self.symmetrize = None
self.dataset = None
self.alias = None
self.test_size = None
def main():
exp_name = sys.argv[-1]
create_dir(OUT_DIR)
exp_names = ['mnist_gan', 'mnist_mmd', 'celeba_gan', 'celeba_mmd']
cluster_mnist_mmd_path = './mount/GANs/results_mnist_pot_sota_worst2d_plateau_mmd_tricks_expC_81'
cluster_mnist_mmd2d_path = './mount/GANs/results_mnist_pot_smaller_zdim2_21'
cluster_mnist_gan_path = './mount/GANs/results_mnist_pot_sota_worst2d1'
cluster_mnist_vae_path = './mount/GANs/results_mnist_vae_81'
cluster_mnist_vae2d_path = './mount/GANs/results_mnist_vae_zdim2_21'
cluster_celeba_gan_path = './mount/GANs/results_celeba_pot_worst2d_plateau_gan_jsmod_641'
cluster_celeba_vae_path = './mount/GANs/results_celeba_vae_641'
cluster_celeba_mmd_path = './mount/GANs/results_celeba_pot_worst2d_plateau_mmd_642'
cluster_celeba_mmd_began_path = './mount/GANs/results_celeba_pot_worst2d_plateau_mmd_began_642'
model_name_prefix = 'trained-pot-'
# Exp 1: CelebA with WAE+MMD on 64 dimensional Z space, DCGAN architecture
exp1 = ExpInfo()
exp1.trained_model_path = cluster_celeba_mmd_path
exp1.model_id = '378720'
exp1.pz_std = 2.0
exp1.z_dim = 64
exp1.symmetrize = True
exp1.dataset = 'celebA'
exp1.alias = 'celeba_mmd_dcgan'
exp1.test_size = 512
# Exp 2: CelebA with WAE+GAN on 64 dimensional Z space, DCGAN architecture
exp2 = ExpInfo()
exp2.trained_model_path = cluster_celeba_gan_path
exp2.model_id = '126480'
exp2.pz_std = 2.0
exp2.z_dim = 64
exp2.symmetrize = True
exp2.dataset = 'celebA'
exp2.alias = 'celeba_gan_dcgan'
exp2.test_size = 512
# Exp 3: MNIST with WAE+MMD on 8 dimensional Z space, DCGAN architecture
exp3 = ExpInfo()
exp3.trained_model_path = cluster_mnist_mmd_path
exp3.model_id = '55200'
exp3.pz_std = 1.0
exp3.z_dim = 8
exp3.symmetrize = False
exp3.dataset = 'mnist'
exp3.alias = 'mnist_mmd_dcgan'
exp3.test_size = 1000
# Exp 4: MNIST with WAE+GAN on 8 dimensional Z space, DCGAN architecture
exp4 = ExpInfo()
exp4.trained_model_path = cluster_mnist_gan_path
exp4.model_id = '62100'
exp4.pz_std = 2.0
exp4.z_dim = 8
exp4.symmetrize = False
exp4.dataset = 'mnist'
exp4.alias = 'mnist_gan_dcgan'
exp4.test_size = 1000
# Exp 5: CelebA with WAE+MMD on 64 dimensional Z space, BEGAN architecture
exp5 = ExpInfo()
exp5.trained_model_path = cluster_celeba_mmd_began_path
exp5.model_id = '157800'
exp5.pz_std = 2.0
exp5.z_dim = 64
exp5.symmetrize = True
exp5.dataset = 'celebA'
exp5.alias = 'celeba_mmd_began'
exp5.test_size = 512
# Exp 6: MNIST with VAE on 8 dimensional Z space, DCGAN architecture
exp6 = ExpInfo()
exp6.trained_model_path = cluster_mnist_vae_path
exp6.model_id = 'final-69000'
exp6.pz_std = 1.0
exp6.z_dim = 8
exp6.symmetrize = False
exp6.dataset = 'mnist'
exp6.alias = 'mnist_vae_dcgan'
exp6.test_size = 1000
# Exp 7: MNIST with VAE on 2 dimensional Z space, DCGAN architecture
exp7 = ExpInfo()
exp7.trained_model_path = cluster_mnist_vae2d_path
exp7.model_id = 'final-69000'
exp7.pz_std = 1.0
exp7.z_dim = 2
exp7.symmetrize = False
exp7.dataset = 'mnist'
exp7.alias = 'mnist_vae_2d_dcgan'
exp7.test_size = 1000
# Exp 8: MNIST with WAE-MMD on 2 dimensional Z space, DCGAN architecture
exp8 = ExpInfo()
exp8.trained_model_path = cluster_mnist_mmd2d_path
exp8.model_id = 'final-69000'
exp8.pz_std = 2.0
exp8.z_dim = 2
exp8.symmetrize = False
exp8.dataset = 'mnist'
exp8.alias = 'mnist_mmd_2d_dcgan'
exp8.test_size = 1000
# Exp 9: CelebA with VAE on 64 dimensional Z space, dcgan architecture
exp9 = ExpInfo()
exp9.trained_model_path = cluster_celeba_vae_path
exp9.model_id = '126240'
exp9.pz_std = 1.0
exp9.z_dim = 64
exp9.symmetrize = True
exp9.dataset = 'celebA'
exp9.alias = 'celeba_vae'
exp9.test_size = 512
if exp_name == 'celeba_mmd_dcgan':
exp = exp1
elif exp_name == 'celeba_gan_dcgan':
exp = exp2
elif exp_name == 'mnist_mmd_dcgan':
exp = exp3
elif exp_name == 'mnist_gan_dcgan':
exp = exp4
elif exp_name == 'celeba_mmd_began':
exp = exp5
elif exp_name == 'mnist_vae':
exp = exp6
elif exp_name == 'mnist_vae_2d':
exp = exp7
elif exp_name == 'mnist_mmd_2d':
exp = exp8
elif exp_name == 'celeba_vae':
exp = exp9
exp_list = [exp]
for exp in exp_list:
output_dir = os.path.join(OUT_DIR, exp.alias)
create_dir(output_dir)
z_dim = exp.z_dim
pz_std = exp.pz_std
dataset = exp.dataset
model_path = exp.trained_model_path
normalyze = exp.symmetrize
if SAVE_REAL_PICS:
pic_dir = os.path.join(output_dir, 'real')
create_dir(pic_dir)
# Saving real pics
opts = {}
opts['dataset'] = dataset
opts['input_normalize_sym'] = normalyze
opts['work_dir'] = output_dir
if exp.dataset == 'celebA':
opts['data_dir'] = CELEBA_DATA_DIR
elif exp.dataset == 'mnist':
opts['data_dir'] = MNIST_DATA_DIR
opts['celebA_crop'] = 'closecrop'
data = DataHandler(opts)
pic_id = 1
if dataset == 'celebA':
shuffled_ids = np.load(os.path.join(model_path, 'shuffled_training_ids'))
test_ids = shuffled_ids[-exp.test_size:]
test_images = data.data
train_ids = shuffled_ids[:-exp.test_size]
train_images = data.data
else:
test_images = data.test_data.X
train_images = data.data.X
train_ids = range(len(train_images))
test_ids = range(len(test_images))
if SAVE_PNG:
for idx in test_ids:
if pic_id % 1000 == 0:
print 'Saved %d/%d' % (pic_id, NUM_PICS)
| random_line_split | ||
fid_pics.py | 800'
exp5.pz_std = 2.0
exp5.z_dim = 64
exp5.symmetrize = True
exp5.dataset = 'celebA'
exp5.alias = 'celeba_mmd_began'
exp5.test_size = 512
# Exp 6: MNIST with VAE on 8 dimensional Z space, DCGAN architecture
exp6 = ExpInfo()
exp6.trained_model_path = cluster_mnist_vae_path
exp6.model_id = 'final-69000'
exp6.pz_std = 1.0
exp6.z_dim = 8
exp6.symmetrize = False
exp6.dataset = 'mnist'
exp6.alias = 'mnist_vae_dcgan'
exp6.test_size = 1000
# Exp 7: MNIST with VAE on 2 dimensional Z space, DCGAN architecture
exp7 = ExpInfo()
exp7.trained_model_path = cluster_mnist_vae2d_path
exp7.model_id = 'final-69000'
exp7.pz_std = 1.0
exp7.z_dim = 2
exp7.symmetrize = False
exp7.dataset = 'mnist'
exp7.alias = 'mnist_vae_2d_dcgan'
exp7.test_size = 1000
# Exp 8: MNIST with WAE-MMD on 2 dimensional Z space, DCGAN architecture
exp8 = ExpInfo()
exp8.trained_model_path = cluster_mnist_mmd2d_path
exp8.model_id = 'final-69000'
exp8.pz_std = 2.0
exp8.z_dim = 2
exp8.symmetrize = False
exp8.dataset = 'mnist'
exp8.alias = 'mnist_mmd_2d_dcgan'
exp8.test_size = 1000
# Exp 9: CelebA with VAE on 64 dimensional Z space, dcgan architecture
exp9 = ExpInfo()
exp9.trained_model_path = cluster_celeba_vae_path
exp9.model_id = '126240'
exp9.pz_std = 1.0
exp9.z_dim = 64
exp9.symmetrize = True
exp9.dataset = 'celebA'
exp9.alias = 'celeba_vae'
exp9.test_size = 512
if exp_name == 'celeba_mmd_dcgan':
exp = exp1
elif exp_name == 'celeba_gan_dcgan':
exp = exp2
elif exp_name == 'mnist_mmd_dcgan':
exp = exp3
elif exp_name == 'mnist_gan_dcgan':
exp = exp4
elif exp_name == 'celeba_mmd_began':
exp = exp5
elif exp_name == 'mnist_vae':
exp = exp6
elif exp_name == 'mnist_vae_2d':
exp = exp7
elif exp_name == 'mnist_mmd_2d':
exp = exp8
elif exp_name == 'celeba_vae':
exp = exp9
exp_list = [exp]
for exp in exp_list:
output_dir = os.path.join(OUT_DIR, exp.alias)
create_dir(output_dir)
z_dim = exp.z_dim
pz_std = exp.pz_std
dataset = exp.dataset
model_path = exp.trained_model_path
normalyze = exp.symmetrize
if SAVE_REAL_PICS:
pic_dir = os.path.join(output_dir, 'real')
create_dir(pic_dir)
# Saving real pics
opts = {}
opts['dataset'] = dataset
opts['input_normalize_sym'] = normalyze
opts['work_dir'] = output_dir
if exp.dataset == 'celebA':
opts['data_dir'] = CELEBA_DATA_DIR
elif exp.dataset == 'mnist':
opts['data_dir'] = MNIST_DATA_DIR
opts['celebA_crop'] = 'closecrop'
data = DataHandler(opts)
pic_id = 1
if dataset == 'celebA':
shuffled_ids = np.load(os.path.join(model_path, 'shuffled_training_ids'))
test_ids = shuffled_ids[-exp.test_size:]
test_images = data.data
train_ids = shuffled_ids[:-exp.test_size]
train_images = data.data
else:
test_images = data.test_data.X
train_images = data.data.X
train_ids = range(len(train_images))
test_ids = range(len(test_images))
if SAVE_PNG:
for idx in test_ids:
if pic_id % 1000 == 0:
print 'Saved %d/%d' % (pic_id, NUM_PICS)
save_pic(test_images[idx], os.path.join(pic_dir, 'real_image{:05d}.png'.format(pic_id)), exp)
pic_id += 1
if pic_id > NUM_PICS:
break
num_remain = max(NUM_PICS - len(test_ids), 0)
train_size = data.num_points
rand_train_ids = np.random.choice(train_size, num_remain, replace=False)
rand_train_ids = [train_ids[idx] for idx in rand_train_ids]
rand_train_pics = train_images[rand_train_ids]
if SAVE_PNG:
for i in range(num_remain):
if pic_id % 1000 == 0:
print 'Saved %d/%d' % (pic_id, NUM_PICS)
save_pic(rand_train_pics[i], os.path.join(pic_dir, 'real_image{:05d}.png'.format(pic_id)), exp)
pic_id += 1
all_pics = np.vstack([test_images, rand_train_pics])
all_pics = all_pics.astype(np.float)
if len(all_pics) > NUM_PICS:
all_pics = all_pics[:NUM_PICS]
np.random.shuffle(all_pics)
np.save(os.path.join(output_dir, 'real'), all_pics)
if SAVE_FAKE_PICS:
with tf.Session() as sess:
with sess.graph.as_default():
saver = tf.train.import_meta_graph(
os.path.join(model_path, 'checkpoints', model_name_prefix + exp.model_id + '.meta'))
saver.restore(sess, os.path.join(model_path, 'checkpoints', model_name_prefix + exp.model_id))
real_points_ph = tf.get_collection('real_points_ph')[0]
noise_ph = tf.get_collection('noise_ph')[0]
is_training_ph = tf.get_collection('is_training_ph')[0]
decoder = tf.get_collection('decoder')[0]
# Saving random samples
mean = np.zeros(z_dim)
cov = np.identity(z_dim)
noise = pz_std * np.random.multivariate_normal(
mean, cov, NUM_PICS).astype(np.float32)
res = sess.run(decoder, feed_dict={noise_ph: noise, is_training_ph: False})
pic_dir = os.path.join(output_dir, 'fake')
create_dir(pic_dir)
if SAVE_PNG:
for i in range(1, NUM_PICS + 1):
if i % 1000 == 0:
print 'Saved %d/%d' % (i, NUM_PICS)
save_pic(res[i-1], os.path.join(pic_dir, 'fake_image{:05d}.png'.format(i)), exp)
np.save(os.path.join(output_dir, 'fake'), res)
def save_pic(pic, path, exp):
if len(pic.shape) == 4:
pic = pic[0]
height = pic.shape[0]
width = pic.shape[1]
fig = plt.figure(frameon=False, figsize=(width, height))#, dpi=1)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
if exp.symmetrize:
pic = (pic + 1.) / 2.
if exp.dataset == 'mnist':
pic = pic[:, :, 0]
pic = 1. - pic
if exp.dataset == 'mnist':
ax.imshow(pic, cmap='Greys', interpolation='none')
else:
ax.imshow(pic, interpolation='none')
fig.savefig(path, dpi=1, format='png')
plt.close()
# if exp.dataset == 'mnist':
# pic = pic[:, :, 0]
# pic = 1. - pic
# ax = plt.imshow(pic, cmap='Greys', interpolation='none')
# else:
# ax = plt.imshow(pic, interpolation='none')
# ax.axes.get_xaxis().set_ticks([])
# ax.axes.get_yaxis().set_ticks([])
# ax.axes.set_xlim([0, width])
# ax.axes.set_ylim([height, 0])
# ax.axes.set_aspect(1)
# fig.savefig(path, format='png')
# plt.close()
def | create_dir | identifier_name | |
server_utils.go | , nil
}
// Prepare bdev storage. Assumes validation has already been performed on server config. Hugepages
// are required for both emulated (AIO devices) and real NVMe bdevs. VFIO and IOMMU are not
// required for emulated NVMe.
func prepBdevStorage(srv *server, iommuEnabled bool) error {
defer srv.logDuration(track("time to prepare bdev storage"))
if srv.cfg.DisableHugepages {
srv.log.Debugf("skip nvme prepare as disable_hugepages: true in config")
return nil
}
bdevCfgs := getBdevCfgsFromSrvCfg(srv.cfg)
// Perform these checks only if non-emulated NVMe is used and user is unprivileged.
if bdevCfgs.HaveRealNVMe() && srv.runningUser.Username != "root" {
if srv.cfg.DisableVFIO {
return FaultVfioDisabled
}
if !iommuEnabled {
return FaultIommuDisabled
}
}
// When requesting to prepare NVMe drives during service start-up, use all addresses
// specified in engine config BdevList parameters as the PCIAllowList and the server
// config BdevExclude parameter as the PCIBlockList.
prepReq := storage.BdevPrepareRequest{
TargetUser: srv.runningUser.Username,
PCIAllowList: strings.Join(bdevCfgs.NVMeBdevs().Devices(), storage.BdevPciAddrSep),
PCIBlockList: strings.Join(srv.cfg.BdevExclude, storage.BdevPciAddrSep),
DisableVFIO: srv.cfg.DisableVFIO,
}
enableVMD := true
if srv.cfg.DisableVMD != nil && *srv.cfg.DisableVMD {
enableVMD = false
}
switch {
case enableVMD && srv.cfg.DisableVFIO:
srv.log.Info("VMD not enabled because VFIO disabled in config")
case enableVMD && !iommuEnabled:
srv.log.Info("VMD not enabled because IOMMU disabled on platform")
case enableVMD && bdevCfgs.HaveEmulatedNVMe():
srv.log.Info("VMD not enabled because emulated NVMe devices found in config")
default:
// If no case above matches, set enable VMD flag in request otherwise leave false.
prepReq.EnableVMD = enableVMD
}
if bdevCfgs.HaveBdevs() {
// The NrHugepages config value is a total for all engines. Distribute allocation
// of hugepages across each engine's numa node (as validation ensures that
// TargetsCount is equal for each engine). Assumes an equal number of engine's per
// numa node.
numaNodes, err := getEngineNUMANodes(srv.log, srv.cfg.Engines)
if err != nil {
return err
}
if len(numaNodes) == 0 {
return errors.New("invalid number of numa nodes detected (0)")
}
// Request a few more hugepages than actually required for each NUMA node
// allocation as some overhead may result in one or two being unavailable.
prepReq.HugePageCount = srv.cfg.NrHugepages / len(numaNodes)
prepReq.HugePageCount += common.ExtraHugePages
prepReq.HugeNodes = strings.Join(numaNodes, ",")
srv.log.Debugf("allocating %d hugepages on each of these numa nodes: %v",
prepReq.HugePageCount, numaNodes)
} else {
if srv.cfg.NrHugepages == 0 {
// If nr_hugepages is unset then set minimum needed for scanning in prepare
// request.
prepReq.HugePageCount = scanMinHugePageCount
} else {
// If nr_hugepages has been set manually but no bdevs in config then
// allocate on numa node 0 (for example if a bigger number of hugepages are
// required in discovery mode for an unusually large number of SSDs).
prepReq.HugePageCount = srv.cfg.NrHugepages
}
srv.log.Debugf("allocating %d hugepages on numa node 0", prepReq.HugePageCount)
}
// Run prepare to bind devices to user-space driver and allocate hugepages.
//
// TODO: should be passing root context into prepare request to
// facilitate cancellation.
if _, err := srv.ctlSvc.NvmePrepare(prepReq); err != nil {
srv.log.Errorf("automatic NVMe prepare failed: %s", err)
}
return nil
}
// scanBdevStorage performs discovery and validates existence of configured NVMe SSDs.
func scanBdevStorage(srv *server) (*storage.BdevScanResponse, error) {
defer srv.logDuration(track("time to scan bdev storage"))
if srv.cfg.DisableHugepages {
srv.log.Debugf("skip nvme scan as hugepages have been disabled in config")
return &storage.BdevScanResponse{}, nil
}
nvmeScanResp, err := srv.ctlSvc.NvmeScan(storage.BdevScanRequest{
DeviceList: getBdevCfgsFromSrvCfg(srv.cfg).Bdevs(),
BypassCache: true, // init cache on first scan
})
if err != nil {
err = errors.Wrap(err, "NVMe Scan Failed")
srv.log.Errorf("%s", err)
return nil, err
}
return nvmeScanResp, nil
}
func setEngineBdevs(engine *EngineInstance, scanResp *storage.BdevScanResponse, lastEngineIdx, lastBdevCount *int) error {
badInput := ""
switch {
case engine == nil:
badInput = "engine"
case scanResp == nil:
badInput = "scanResp"
case lastEngineIdx == nil:
badInput = "lastEngineIdx"
case lastBdevCount == nil:
badInput = "lastBdevCount"
}
if badInput != "" {
return errors.New("nil input param: " + badInput)
}
if err := engine.storage.SetBdevCache(*scanResp); err != nil {
return errors.Wrap(err, "setting engine storage bdev cache")
}
// After engine's bdev cache has been set, the cache will only contain details of bdevs
// identified in the relevant engine config and device addresses will have been verified
// against NVMe scan results. As any VMD endpoint addresses will have been replaced with
// backing device addresses, device counts will reflect the number of physical (as opposed
// to logical) bdevs and engine bdev counts can be accurately compared.
eIdx := engine.Index()
bdevCache := engine.storage.GetBdevCache()
newNrBdevs := len(bdevCache.Controllers)
engine.log.Debugf("last: [index: %d, bdevCount: %d], current: [index: %d, bdevCount: %d]",
*lastEngineIdx, *lastBdevCount, eIdx, newNrBdevs)
// Update last recorded counters if this is the first update or if the number of bdevs is
// unchanged. If bdev count differs between engines, return fault.
switch {
case *lastEngineIdx < 0:
if *lastBdevCount >= 0 {
return errors.New("expecting both lastEngineIdx and lastBdevCount to be unset")
}
*lastEngineIdx = int(eIdx)
*lastBdevCount = newNrBdevs
case *lastBdevCount < 0:
return errors.New("expecting both lastEngineIdx and lastBdevCount to be set")
case newNrBdevs == *lastBdevCount:
*lastEngineIdx = int(eIdx)
default:
return config.FaultConfigBdevCountMismatch(int(eIdx), newNrBdevs, *lastEngineIdx, *lastBdevCount)
}
return nil
}
func setDaosHelperEnvs(cfg *config.Server, setenv func(k, v string) error) error {
if cfg.HelperLogFile != "" {
if err := setenv(pbin.DaosPrivHelperLogFileEnvVar, cfg.HelperLogFile); err != nil {
return errors.Wrap(err, "unable to configure privileged helper logging")
}
}
if cfg.FWHelperLogFile != "" {
if err := setenv(pbin.DaosFWLogFileEnvVar, cfg.FWHelperLogFile); err != nil {
return errors.Wrap(err, "unable to configure privileged firmware helper logging")
}
}
return nil
}
// Minimum recommended number of hugepages has already been calculated and set in config so verify
// we have enough free hugepage memory to satisfy this requirement before setting mem_size and
// hugepage_size parameters for engine.
func updateMemValues(srv *server, engine *EngineInstance, getMemInfo common.GetMemInfoFn) error {
engine.RLock()
ec := engine.runner.GetConfig()
ei := ec.Index
if ec.Storage.Tiers.Bdevs().Len() == 0 | {
srv.log.Debugf("skipping mem check on engine %d, no bdevs", ei)
engine.RUnlock()
return nil
} | conditional_block | |
server_utils.go | }
// Ensure stable ordering of addresses.
sort.Slice(addrs, func(i, j int) bool {
if !isIPv4(addrs[i]) && isIPv4(addrs[j]) {
return false
} else if isIPv4(addrs[i]) && !isIPv4(addrs[j]) {
return true
}
return bytes.Compare(addrs[i], addrs[j]) < 0
})
return &net.TCPAddr{IP: addrs[0], Port: iPort}, nil
}
const scanMinHugePageCount = 128
func getBdevCfgsFromSrvCfg(cfg *config.Server) storage.TierConfigs {
var bdevCfgs storage.TierConfigs
for _, engineCfg := range cfg.Engines {
bdevCfgs = append(bdevCfgs, engineCfg.Storage.Tiers.BdevConfigs()...)
}
return bdevCfgs
}
func cfgGetReplicas(cfg *config.Server, lookup ipLookupFn) ([]*net.TCPAddr, error) {
var dbReplicas []*net.TCPAddr
for _, ap := range cfg.AccessPoints {
apAddr, err := resolveFirstAddr(ap, lookup)
if err != nil {
return nil, config.FaultConfigBadAccessPoints
}
dbReplicas = append(dbReplicas, apAddr)
}
return dbReplicas, nil
}
func cfgGetRaftDir(cfg *config.Server) string {
if len(cfg.Engines) == 0 {
return "" // can't save to SCM
}
if len(cfg.Engines[0].Storage.Tiers.ScmConfigs()) == 0 {
return ""
}
return filepath.Join(cfg.Engines[0].Storage.Tiers.ScmConfigs()[0].Scm.MountPoint, "control_raft")
}
func writeCoreDumpFilter(log logging.Logger, path string, filter uint8) error {
f, err := os.OpenFile(path, os.O_WRONLY, 0644)
if err != nil {
// Work around a testing oddity that seems to be related to launching
// the server via SSH, with the result that the /proc file is unwritable.
if os.IsPermission(err) {
log.Debugf("Unable to write core dump filter to %s: %s", path, err)
return nil
}
return errors.Wrapf(err, "unable to open core dump filter file %s", path)
}
defer f.Close()
_, err = f.WriteString(fmt.Sprintf("0x%x\n", filter))
return err
}
type replicaAddrGetter interface {
ReplicaAddr() (*net.TCPAddr, error)
}
type ctlAddrParams struct {
port int
replicaAddrSrc replicaAddrGetter
lookupHost ipLookupFn
}
func getControlAddr(params ctlAddrParams) (*net.TCPAddr, error) {
ipStr := "0.0.0.0"
if repAddr, err := params.replicaAddrSrc.ReplicaAddr(); err == nil {
ipStr = repAddr.IP.String()
}
ctlAddr, err := resolveFirstAddr(fmt.Sprintf("[%s]:%d", ipStr, params.port), params.lookupHost)
if err != nil {
return nil, errors.Wrap(err, "resolving control address")
}
return ctlAddr, nil
}
func createListener(ctlAddr *net.TCPAddr, listen netListenFn) (net.Listener, error) {
// Create and start listener on management network.
lis, err := listen("tcp4", fmt.Sprintf("0.0.0.0:%d", ctlAddr.Port))
if err != nil {
return nil, errors.Wrap(err, "unable to listen on management interface")
}
return lis, nil
}
// updateFabricEnvars adjusts the engine fabric configuration.
func updateFabricEnvars(log logging.Logger, cfg *engine.Config, fis *hardware.FabricInterfaceSet) error {
// In the case of some providers, mercury uses the interface name
// such as ib0, while OFI uses the device name such as hfi1_0 CaRT and
// Mercury will now support the new OFI_DOMAIN environment variable so
// that we can specify the correct device for each.
if !cfg.HasEnvVar("OFI_DOMAIN") {
fi, err := fis.GetInterfaceOnNetDevice(cfg.Fabric.Interface, cfg.Fabric.Provider)
if err != nil {
return errors.Wrapf(err, "unable to determine device domain for %s", cfg.Fabric.Interface)
}
log.Debugf("setting OFI_DOMAIN=%s for %s", fi.Name, cfg.Fabric.Interface)
envVar := "OFI_DOMAIN=" + fi.Name
cfg.WithEnvVars(envVar)
}
return nil
}
func getFabricNetDevClass(cfg *config.Server, fis *hardware.FabricInterfaceSet) (hardware.NetDevClass, error) {
var netDevClass hardware.NetDevClass
for index, engine := range cfg.Engines {
fi, err := fis.GetInterfaceOnNetDevice(engine.Fabric.Interface, engine.Fabric.Provider)
if err != nil {
return 0, err
}
ndc := fi.DeviceClass
if index == 0 {
netDevClass = ndc
continue
}
if ndc != netDevClass {
return 0, config.FaultConfigInvalidNetDevClass(index, netDevClass,
ndc, engine.Fabric.Interface)
}
}
return netDevClass, nil
}
// Detect the number of engine configs assigned to each NUMA node and return error if engines are
// distributed unevenly across NUMA nodes. Otherwise return sorted list of NUMA nodes in use.
// Configurations where all engines are on a single NUMA node will be allowed.
func getEngineNUMANodes(log logging.Logger, engineCfgs []*engine.Config) ([]string, error) {
nodeMap := make(map[int]int)
for _, ec := range engineCfgs {
nodeMap[int(ec.Storage.NumaNodeIndex)] += 1
}
var lastCount int
nodes := make([]string, 0, len(engineCfgs))
for k, v := range nodeMap {
if lastCount != 0 && v != lastCount {
return nil, FaultEngineNUMAImbalance(nodeMap)
}
lastCount = v
nodes = append(nodes, fmt.Sprintf("%d", k))
}
sort.Strings(nodes)
return nodes, nil
}
// Prepare bdev storage. Assumes validation has already been performed on server config. Hugepages
// are required for both emulated (AIO devices) and real NVMe bdevs. VFIO and IOMMU are not
// required for emulated NVMe.
func prepBdevStorage(srv *server, iommuEnabled bool) error {
defer srv.logDuration(track("time to prepare bdev storage"))
if srv.cfg.DisableHugepages {
srv.log.Debugf("skip nvme prepare as disable_hugepages: true in config")
return nil
}
bdevCfgs := getBdevCfgsFromSrvCfg(srv.cfg)
// Perform these checks only if non-emulated NVMe is used and user is unprivileged.
if bdevCfgs.HaveRealNVMe() && srv.runningUser.Username != "root" {
if srv.cfg.DisableVFIO {
return FaultVfioDisabled
}
if !iommuEnabled {
return FaultIommuDisabled
}
}
// When requesting to prepare NVMe drives during service start-up, use all addresses
// specified in engine config BdevList parameters as the PCIAllowList and the server
// config BdevExclude parameter as the PCIBlockList.
prepReq := storage.BdevPrepareRequest{
TargetUser: srv.runningUser.Username,
PCIAllowList: strings.Join(bdevCfgs.NVMeBdevs().Devices(), storage.BdevPciAddrSep),
PCIBlockList: strings.Join(srv.cfg.BdevExclude, storage.BdevPciAddrSep),
DisableVFIO: srv.cfg.DisableVFIO,
}
enableVMD := true
if srv.cfg.DisableVMD != nil && *srv.cfg.DisableVMD {
enableVMD = false
}
switch {
case enableVMD && srv.cfg.DisableVFIO:
srv.log.Info("VMD not enabled because VFIO disabled in config")
case enableVMD && !iommuEnabled:
srv.log.Info("VMD not enabled because IOMMU disabled on platform")
case enableVMD && bdevCfgs.HaveEmulatedNVMe():
srv.log.Info("VMD not enabled because emulated NVMe devices found in config")
default:
// If no case above matches, set enable | {
host, port, err := net.SplitHostPort(addr)
if err != nil {
return nil, errors.Wrapf(err, "unable to split %q", addr)
}
iPort, err := strconv.Atoi(port)
if err != nil {
return nil, errors.Wrapf(err, "unable to convert %q to int", port)
}
addrs, err := lookup(host)
if err != nil {
return nil, errors.Wrapf(err, "unable to resolve %q", host)
}
if len(addrs) == 0 {
return nil, errors.Errorf("no addresses found for %q", host)
}
isIPv4 := func(ip net.IP) bool {
return ip.To4() != nil | identifier_body | |
server_utils.go | f, err := os.OpenFile(path, os.O_WRONLY, 0644)
if err != nil {
// Work around a testing oddity that seems to be related to launching
// the server via SSH, with the result that the /proc file is unwritable.
if os.IsPermission(err) {
log.Debugf("Unable to write core dump filter to %s: %s", path, err)
return nil
}
return errors.Wrapf(err, "unable to open core dump filter file %s", path)
}
defer f.Close()
_, err = f.WriteString(fmt.Sprintf("0x%x\n", filter))
return err
}
type replicaAddrGetter interface {
ReplicaAddr() (*net.TCPAddr, error)
}
type ctlAddrParams struct {
port int
replicaAddrSrc replicaAddrGetter
lookupHost ipLookupFn
}
func getControlAddr(params ctlAddrParams) (*net.TCPAddr, error) {
ipStr := "0.0.0.0"
if repAddr, err := params.replicaAddrSrc.ReplicaAddr(); err == nil {
ipStr = repAddr.IP.String()
}
ctlAddr, err := resolveFirstAddr(fmt.Sprintf("[%s]:%d", ipStr, params.port), params.lookupHost)
if err != nil {
return nil, errors.Wrap(err, "resolving control address")
}
return ctlAddr, nil
}
func createListener(ctlAddr *net.TCPAddr, listen netListenFn) (net.Listener, error) {
// Create and start listener on management network.
lis, err := listen("tcp4", fmt.Sprintf("0.0.0.0:%d", ctlAddr.Port))
if err != nil {
return nil, errors.Wrap(err, "unable to listen on management interface")
}
return lis, nil
}
// updateFabricEnvars adjusts the engine fabric configuration.
func updateFabricEnvars(log logging.Logger, cfg *engine.Config, fis *hardware.FabricInterfaceSet) error {
// In the case of some providers, mercury uses the interface name
// such as ib0, while OFI uses the device name such as hfi1_0 CaRT and
// Mercury will now support the new OFI_DOMAIN environment variable so
// that we can specify the correct device for each.
if !cfg.HasEnvVar("OFI_DOMAIN") {
fi, err := fis.GetInterfaceOnNetDevice(cfg.Fabric.Interface, cfg.Fabric.Provider)
if err != nil {
return errors.Wrapf(err, "unable to determine device domain for %s", cfg.Fabric.Interface)
}
log.Debugf("setting OFI_DOMAIN=%s for %s", fi.Name, cfg.Fabric.Interface)
envVar := "OFI_DOMAIN=" + fi.Name
cfg.WithEnvVars(envVar)
}
return nil
}
func getFabricNetDevClass(cfg *config.Server, fis *hardware.FabricInterfaceSet) (hardware.NetDevClass, error) {
var netDevClass hardware.NetDevClass
for index, engine := range cfg.Engines {
fi, err := fis.GetInterfaceOnNetDevice(engine.Fabric.Interface, engine.Fabric.Provider)
if err != nil {
return 0, err
}
ndc := fi.DeviceClass
if index == 0 {
netDevClass = ndc
continue
}
if ndc != netDevClass {
return 0, config.FaultConfigInvalidNetDevClass(index, netDevClass,
ndc, engine.Fabric.Interface)
}
}
return netDevClass, nil
}
// Detect the number of engine configs assigned to each NUMA node and return error if engines are
// distributed unevenly across NUMA nodes. Otherwise return sorted list of NUMA nodes in use.
// Configurations where all engines are on a single NUMA node will be allowed.
func getEngineNUMANodes(log logging.Logger, engineCfgs []*engine.Config) ([]string, error) {
nodeMap := make(map[int]int)
for _, ec := range engineCfgs {
nodeMap[int(ec.Storage.NumaNodeIndex)] += 1
}
var lastCount int
nodes := make([]string, 0, len(engineCfgs))
for k, v := range nodeMap {
if lastCount != 0 && v != lastCount {
return nil, FaultEngineNUMAImbalance(nodeMap)
}
lastCount = v
nodes = append(nodes, fmt.Sprintf("%d", k))
}
sort.Strings(nodes)
return nodes, nil
}
// Prepare bdev storage. Assumes validation has already been performed on server config. Hugepages
// are required for both emulated (AIO devices) and real NVMe bdevs. VFIO and IOMMU are not
// required for emulated NVMe.
func prepBdevStorage(srv *server, iommuEnabled bool) error {
defer srv.logDuration(track("time to prepare bdev storage"))
if srv.cfg.DisableHugepages {
srv.log.Debugf("skip nvme prepare as disable_hugepages: true in config")
return nil
}
bdevCfgs := getBdevCfgsFromSrvCfg(srv.cfg)
// Perform these checks only if non-emulated NVMe is used and user is unprivileged.
if bdevCfgs.HaveRealNVMe() && srv.runningUser.Username != "root" {
if srv.cfg.DisableVFIO {
return FaultVfioDisabled
}
if !iommuEnabled {
return FaultIommuDisabled
}
}
// When requesting to prepare NVMe drives during service start-up, use all addresses
// specified in engine config BdevList parameters as the PCIAllowList and the server
// config BdevExclude parameter as the PCIBlockList.
prepReq := storage.BdevPrepareRequest{
TargetUser: srv.runningUser.Username,
PCIAllowList: strings.Join(bdevCfgs.NVMeBdevs().Devices(), storage.BdevPciAddrSep),
PCIBlockList: strings.Join(srv.cfg.BdevExclude, storage.BdevPciAddrSep),
DisableVFIO: srv.cfg.DisableVFIO,
}
enableVMD := true
if srv.cfg.DisableVMD != nil && *srv.cfg.DisableVMD {
enableVMD = false
}
switch {
case enableVMD && srv.cfg.DisableVFIO:
srv.log.Info("VMD not enabled because VFIO disabled in config")
case enableVMD && !iommuEnabled:
srv.log.Info("VMD not enabled because IOMMU disabled on platform")
case enableVMD && bdevCfgs.HaveEmulatedNVMe():
srv.log.Info("VMD not enabled because emulated NVMe devices found in config")
default:
// If no case above matches, set enable VMD flag in request otherwise leave false.
prepReq.EnableVMD = enableVMD
}
if bdevCfgs.HaveBdevs() {
// The NrHugepages config value is a total for all engines. Distribute allocation
// of hugepages across each engine's numa node (as validation ensures that
// TargetsCount is equal for each engine). Assumes an equal number of engine's per
// numa node.
numaNodes, err := getEngineNUMANodes(srv.log, srv.cfg.Engines)
if err != nil {
return err
}
if len(numaNodes) == 0 {
return errors.New("invalid number of numa nodes detected (0)")
}
// Request a few more hugepages than actually required for each NUMA node
// allocation as some overhead may result in one or two being unavailable.
prepReq.HugePageCount = srv.cfg.NrHugepages / len(numaNodes)
prepReq.HugePageCount += common.ExtraHugePages
prepReq.HugeNodes = strings.Join(numaNodes, ",")
srv.log.Debugf("allocating %d hugepages on each of these numa nodes: %v",
prepReq.HugePageCount, numaNodes)
} else {
if srv.cfg.NrHugepages == 0 {
// If nr_hugepages is unset then set minimum needed for scanning in prepare
// request.
prepReq.HugePageCount = scanMinHugePageCount
} else {
// If nr_hugepages has been set manually but no bdevs in config then
// allocate on numa node 0 (for example if a bigger number of hugepages are
// required in discovery mode for an unusually large number of SSDs).
prepReq.HugePageCount = srv.cfg.NrHugepages
}
srv.log.Debugf("allocating %d hugepages on numa node 0", prepReq.HugePageCount)
}
// Run prepare to bind devices to user-space driver and allocate hugepages.
//
// TODO: should be passing root context into prepare request to
// facilitate cancellation.
if _, err := srv.ctlSvc.NvmePrepare(prepReq); err != nil {
srv.log.Errorf | return filepath.Join(cfg.Engines[0].Storage.Tiers.ScmConfigs()[0].Scm.MountPoint, "control_raft")
}
func writeCoreDumpFilter(log logging.Logger, path string, filter uint8) error { | random_line_split | |
server_utils.go | VMD && srv.cfg.DisableVFIO:
srv.log.Info("VMD not enabled because VFIO disabled in config")
case enableVMD && !iommuEnabled:
srv.log.Info("VMD not enabled because IOMMU disabled on platform")
case enableVMD && bdevCfgs.HaveEmulatedNVMe():
srv.log.Info("VMD not enabled because emulated NVMe devices found in config")
default:
// If no case above matches, set enable VMD flag in request otherwise leave false.
prepReq.EnableVMD = enableVMD
}
if bdevCfgs.HaveBdevs() {
// The NrHugepages config value is a total for all engines. Distribute allocation
// of hugepages across each engine's numa node (as validation ensures that
// TargetsCount is equal for each engine). Assumes an equal number of engine's per
// numa node.
numaNodes, err := getEngineNUMANodes(srv.log, srv.cfg.Engines)
if err != nil {
return err
}
if len(numaNodes) == 0 {
return errors.New("invalid number of numa nodes detected (0)")
}
// Request a few more hugepages than actually required for each NUMA node
// allocation as some overhead may result in one or two being unavailable.
prepReq.HugePageCount = srv.cfg.NrHugepages / len(numaNodes)
prepReq.HugePageCount += common.ExtraHugePages
prepReq.HugeNodes = strings.Join(numaNodes, ",")
srv.log.Debugf("allocating %d hugepages on each of these numa nodes: %v",
prepReq.HugePageCount, numaNodes)
} else {
if srv.cfg.NrHugepages == 0 {
// If nr_hugepages is unset then set minimum needed for scanning in prepare
// request.
prepReq.HugePageCount = scanMinHugePageCount
} else {
// If nr_hugepages has been set manually but no bdevs in config then
// allocate on numa node 0 (for example if a bigger number of hugepages are
// required in discovery mode for an unusually large number of SSDs).
prepReq.HugePageCount = srv.cfg.NrHugepages
}
srv.log.Debugf("allocating %d hugepages on numa node 0", prepReq.HugePageCount)
}
// Run prepare to bind devices to user-space driver and allocate hugepages.
//
// TODO: should be passing root context into prepare request to
// facilitate cancellation.
if _, err := srv.ctlSvc.NvmePrepare(prepReq); err != nil {
srv.log.Errorf("automatic NVMe prepare failed: %s", err)
}
return nil
}
// scanBdevStorage performs discovery and validates existence of configured NVMe SSDs.
func scanBdevStorage(srv *server) (*storage.BdevScanResponse, error) {
defer srv.logDuration(track("time to scan bdev storage"))
if srv.cfg.DisableHugepages {
srv.log.Debugf("skip nvme scan as hugepages have been disabled in config")
return &storage.BdevScanResponse{}, nil
}
nvmeScanResp, err := srv.ctlSvc.NvmeScan(storage.BdevScanRequest{
DeviceList: getBdevCfgsFromSrvCfg(srv.cfg).Bdevs(),
BypassCache: true, // init cache on first scan
})
if err != nil {
err = errors.Wrap(err, "NVMe Scan Failed")
srv.log.Errorf("%s", err)
return nil, err
}
return nvmeScanResp, nil
}
func setEngineBdevs(engine *EngineInstance, scanResp *storage.BdevScanResponse, lastEngineIdx, lastBdevCount *int) error {
badInput := ""
switch {
case engine == nil:
badInput = "engine"
case scanResp == nil:
badInput = "scanResp"
case lastEngineIdx == nil:
badInput = "lastEngineIdx"
case lastBdevCount == nil:
badInput = "lastBdevCount"
}
if badInput != "" {
return errors.New("nil input param: " + badInput)
}
if err := engine.storage.SetBdevCache(*scanResp); err != nil {
return errors.Wrap(err, "setting engine storage bdev cache")
}
// After engine's bdev cache has been set, the cache will only contain details of bdevs
// identified in the relevant engine config and device addresses will have been verified
// against NVMe scan results. As any VMD endpoint addresses will have been replaced with
// backing device addresses, device counts will reflect the number of physical (as opposed
// to logical) bdevs and engine bdev counts can be accurately compared.
eIdx := engine.Index()
bdevCache := engine.storage.GetBdevCache()
newNrBdevs := len(bdevCache.Controllers)
engine.log.Debugf("last: [index: %d, bdevCount: %d], current: [index: %d, bdevCount: %d]",
*lastEngineIdx, *lastBdevCount, eIdx, newNrBdevs)
// Update last recorded counters if this is the first update or if the number of bdevs is
// unchanged. If bdev count differs between engines, return fault.
switch {
case *lastEngineIdx < 0:
if *lastBdevCount >= 0 {
return errors.New("expecting both lastEngineIdx and lastBdevCount to be unset")
}
*lastEngineIdx = int(eIdx)
*lastBdevCount = newNrBdevs
case *lastBdevCount < 0:
return errors.New("expecting both lastEngineIdx and lastBdevCount to be set")
case newNrBdevs == *lastBdevCount:
*lastEngineIdx = int(eIdx)
default:
return config.FaultConfigBdevCountMismatch(int(eIdx), newNrBdevs, *lastEngineIdx, *lastBdevCount)
}
return nil
}
func setDaosHelperEnvs(cfg *config.Server, setenv func(k, v string) error) error {
if cfg.HelperLogFile != "" {
if err := setenv(pbin.DaosPrivHelperLogFileEnvVar, cfg.HelperLogFile); err != nil {
return errors.Wrap(err, "unable to configure privileged helper logging")
}
}
if cfg.FWHelperLogFile != "" {
if err := setenv(pbin.DaosFWLogFileEnvVar, cfg.FWHelperLogFile); err != nil {
return errors.Wrap(err, "unable to configure privileged firmware helper logging")
}
}
return nil
}
// Minimum recommended number of hugepages has already been calculated and set in config so verify
// we have enough free hugepage memory to satisfy this requirement before setting mem_size and
// hugepage_size parameters for engine.
func updateMemValues(srv *server, engine *EngineInstance, getMemInfo common.GetMemInfoFn) error {
engine.RLock()
ec := engine.runner.GetConfig()
ei := ec.Index
if ec.Storage.Tiers.Bdevs().Len() == 0 {
srv.log.Debugf("skipping mem check on engine %d, no bdevs", ei)
engine.RUnlock()
return nil
}
engine.RUnlock()
// Retrieve up-to-date hugepage info to check that we got the requested number of hugepages.
mi, err := getMemInfo()
if err != nil {
return err
}
// Calculate mem_size per I/O engine (in MB) from number of hugepages required per engine.
nrPagesRequired := srv.cfg.NrHugepages / len(srv.cfg.Engines)
pageSizeMiB := mi.HugePageSizeKb / humanize.KiByte // kib to mib
memSizeReqMiB := nrPagesRequired * pageSizeMiB
memSizeFreeMiB := mi.HugePagesFree * pageSizeMiB
// Fail if free hugepage mem is not enough to sustain average I/O workload (~1GB).
srv.log.Debugf("Per-engine MemSize:%dMB, HugepageSize:%dMB (meminfo: %+v)", memSizeReqMiB,
pageSizeMiB, *mi)
if memSizeFreeMiB < memSizeReqMiB {
return FaultInsufficientFreeHugePageMem(int(ei), memSizeReqMiB, memSizeFreeMiB,
nrPagesRequired, mi.HugePagesFree)
}
// Set engine mem_size and hugepage_size (MiB) values based on hugepage info.
engine.setMemSize(memSizeReqMiB)
engine.setHugePageSz(pageSizeMiB)
return nil
}
func cleanEngineHugePages(srv *server) error {
req := storage.BdevPrepareRequest{
CleanHugePagesOnly: true,
}
msg := "cleanup hugepages via bdev backend"
resp, err := srv.ctlSvc.NvmePrepare(req)
if err != nil {
return errors.Wrap(err, msg)
}
srv.log.Debugf("%s: %d removed", msg, resp.NrHugePagesRemoved)
return nil
}
func | registerEngineEventCallbacks | identifier_name | |
path_test.go | testutil.Setup(t, packageDir)
t.Logf("Adding File: %s", inventoryFilePath)
tf.WriteFile(t, inventoryFilePath, inventoryConfigMap)
t.Logf("Adding File: %s", secondInventoryFilePath)
tf.WriteFile(t, secondInventoryFilePath, secondInventoryConfigMap)
t.Logf("Adding File: %s", podAFilePath)
tf.WriteFile(t, podAFilePath, podA)
t.Logf("Adding File: %s", podBFilePath)
tf.WriteFile(t, podBFilePath, podB)
return tf
}
var inventoryConfigMap = []byte(`
apiVersion: v1
kind: ConfigMap
metadata:
namespace: test-namespace
name: inventory
labels:
cli-utils.sigs.k8s.io/inventory-id: test-inventory
`)
var secondInventoryConfigMap = []byte(`
apiVersion: v1
kind: ConfigMap
metadata:
namespace: test-namespace
name: inventory-2
labels:
cli-utils.sigs.k8s.io/inventory-id: test-inventory
`)
var podA = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: pod-a
namespace: test-namespace
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: registry.k8s.io/pause:2.0
`)
var podB = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: pod-b
namespace: test-namespace
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: registry.k8s.io/pause:2.0
`)
func buildMultiResourceConfig(configs ...[]byte) []byte {
r := []byte{}
for i, config := range configs {
if i > 0 {
r = append(r, []byte(configSeparator)...)
}
r = append(r, config...)
}
return r
}
func TestProcessPaths(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
trueVal := true
testCases := map[string]struct {
paths []string
expectedFileNameFlags genericclioptions.FileNameFlags
errFromDemandOneDirectory string
}{
"empty slice means reading from StdIn": {
paths: []string{},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: &[]string{"-"},
},
},
"single file in slice is error; must be directory": {
paths: []string{podAFilePath},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: nil,
Recursive: nil,
},
errFromDemandOneDirectory: "argument 'test-pkg-dir/pod-a.yaml' is not but must be a directory",
},
"single dir in slice": {
paths: []string{tf.GetRootDir()},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: &[]string{tf.GetRootDir()},
Recursive: &trueVal,
},
},
"multiple arguments is an error": {
paths: []string{podAFilePath, podBFilePath},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: nil,
Recursive: nil,
},
errFromDemandOneDirectory: "specify exactly one directory path argument; rejecting [test-pkg-dir/pod-a.yaml test-pkg-dir/pod-b.yaml]",
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
fileNameFlags, err := DemandOneDirectory(tc.paths)
assert.Equal(t, tc.expectedFileNameFlags, fileNameFlags)
if err != nil && err.Error() != tc.errFromDemandOneDirectory |
})
}
}
func TestFilterInputFile(t *testing.T) {
tf := testutil.Setup(t)
defer tf.Clean()
testCases := map[string]struct {
configObjects [][]byte
expectedObjects [][]byte
}{
"Empty config objects writes empty file": {
configObjects: [][]byte{},
expectedObjects: [][]byte{},
},
"Only inventory obj writes empty file": {
configObjects: [][]byte{inventoryConfigMap},
expectedObjects: [][]byte{},
},
"Only pods writes both pods": {
configObjects: [][]byte{podA, podB},
expectedObjects: [][]byte{podA, podB},
},
"Basic case of inventory obj and two pods": {
configObjects: [][]byte{inventoryConfigMap, podA, podB},
expectedObjects: [][]byte{podA, podB},
},
"Basic case of inventory obj and two pods in different order": {
configObjects: [][]byte{podB, inventoryConfigMap, podA},
expectedObjects: [][]byte{podB, podA},
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
// Build a single file of multiple resource configs, and
// call the tested function FilterInputFile. This writes
// the passed file to the test filesystem, filtering
// the inventory object if it exists in the passed file.
in := buildMultiResourceConfig(tc.configObjects...)
err := FilterInputFile(bytes.NewReader(in), tf.GetRootDir())
if err != nil {
t.Fatalf("Unexpected error in FilterInputFile: %s", err)
}
// Retrieve the files from the test filesystem.
actualFiles, err := os.ReadDir(tf.GetRootDir())
if err != nil {
t.Fatalf("Error reading test filesystem directory: %s", err)
}
// Since we remove the generated file for each test, there should
// not be more than one file in the test filesystem.
if len(actualFiles) > 1 {
t.Fatalf("Wrong number of files (%d) in dir: %s", len(actualFiles), tf.GetRootDir())
}
// If there is a generated file, then read it into actualStr.
actualStr := ""
if len(actualFiles) != 0 {
actualFilename := actualFiles[0].Name()
defer os.Remove(actualFilename)
actual, err := os.ReadFile(actualFilename)
if err != nil {
t.Fatalf("Error reading created file (%s): %s", actualFilename, err)
}
actualStr = strings.TrimSpace(string(actual))
}
// Build the expected string from the expectedObjects. This expected
// string should not have the inventory object config in it.
expected := buildMultiResourceConfig(tc.expectedObjects...)
expectedStr := strings.TrimSpace(string(expected))
if expectedStr != actualStr {
t.Errorf("Expected file contents (%s) not equal to actual file contents (%s)",
expectedStr, actualStr)
}
})
}
}
func TestExpandDir(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
testCases := map[string]struct {
packageDirPath string
expandedInventory string
expandedPaths []string
isError bool
}{
"empty path is error": {
packageDirPath: "",
isError: true,
},
"path that is not dir is error": {
packageDirPath: "fakedir1",
isError: true,
},
"root package dir excludes inventory object": {
packageDirPath: tf.GetRootDir(),
expandedInventory: "inventory.yaml",
expandedPaths: []string{
"pod-a.yaml",
"pod-b.yaml",
},
isError: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
actualInventory, actualPaths, err := ExpandDir(tc.packageDirPath)
if tc.isError {
if err == nil {
t.Fatalf("expected error but received none")
}
return
}
if err != nil {
t.Fatalf("received unexpected error %#v", err)
return
}
actualFilename := filepath.Base(actualInventory)
if tc.expandedInventory != actualFilename {
t.Errorf("expected inventory template filepath (%s), got (%s)", tc.expandedInventory, actualFilename)
}
if len(tc.expandedPaths) != len(actualPaths) {
t.Errorf("expected (%d) resource filepaths, got (%d)", len(tc.expandedPaths), len(actualPaths))
}
for _, expectedPath := range tc.expandedPaths {
found := false
for _, actualPath := range actualPaths {
actualFilename := filepath.Base(actualPath)
if expectedPath == actualFilename {
found = true
break
}
}
if !found {
t.Errorf("expected filename (%s) not found", expectedPath)
}
}
})
}
}
func TestExpandDirErrors(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf | {
assert.Equal(t, err.Error(), tc.errFromDemandOneDirectory)
} | conditional_block |
path_test.go | testutil.Setup(t, packageDir)
t.Logf("Adding File: %s", inventoryFilePath)
tf.WriteFile(t, inventoryFilePath, inventoryConfigMap)
t.Logf("Adding File: %s", secondInventoryFilePath)
tf.WriteFile(t, secondInventoryFilePath, secondInventoryConfigMap)
t.Logf("Adding File: %s", podAFilePath)
tf.WriteFile(t, podAFilePath, podA)
t.Logf("Adding File: %s", podBFilePath)
tf.WriteFile(t, podBFilePath, podB)
return tf
}
var inventoryConfigMap = []byte(`
apiVersion: v1
kind: ConfigMap
metadata:
namespace: test-namespace
name: inventory
labels:
cli-utils.sigs.k8s.io/inventory-id: test-inventory
`)
var secondInventoryConfigMap = []byte(`
apiVersion: v1
kind: ConfigMap
metadata:
namespace: test-namespace
name: inventory-2
labels:
cli-utils.sigs.k8s.io/inventory-id: test-inventory
`)
var podA = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: pod-a
namespace: test-namespace
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: registry.k8s.io/pause:2.0
`)
var podB = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: pod-b
namespace: test-namespace
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: registry.k8s.io/pause:2.0
`)
func | (configs ...[]byte) []byte {
r := []byte{}
for i, config := range configs {
if i > 0 {
r = append(r, []byte(configSeparator)...)
}
r = append(r, config...)
}
return r
}
func TestProcessPaths(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
trueVal := true
testCases := map[string]struct {
paths []string
expectedFileNameFlags genericclioptions.FileNameFlags
errFromDemandOneDirectory string
}{
"empty slice means reading from StdIn": {
paths: []string{},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: &[]string{"-"},
},
},
"single file in slice is error; must be directory": {
paths: []string{podAFilePath},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: nil,
Recursive: nil,
},
errFromDemandOneDirectory: "argument 'test-pkg-dir/pod-a.yaml' is not but must be a directory",
},
"single dir in slice": {
paths: []string{tf.GetRootDir()},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: &[]string{tf.GetRootDir()},
Recursive: &trueVal,
},
},
"multiple arguments is an error": {
paths: []string{podAFilePath, podBFilePath},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: nil,
Recursive: nil,
},
errFromDemandOneDirectory: "specify exactly one directory path argument; rejecting [test-pkg-dir/pod-a.yaml test-pkg-dir/pod-b.yaml]",
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
fileNameFlags, err := DemandOneDirectory(tc.paths)
assert.Equal(t, tc.expectedFileNameFlags, fileNameFlags)
if err != nil && err.Error() != tc.errFromDemandOneDirectory {
assert.Equal(t, err.Error(), tc.errFromDemandOneDirectory)
}
})
}
}
func TestFilterInputFile(t *testing.T) {
tf := testutil.Setup(t)
defer tf.Clean()
testCases := map[string]struct {
configObjects [][]byte
expectedObjects [][]byte
}{
"Empty config objects writes empty file": {
configObjects: [][]byte{},
expectedObjects: [][]byte{},
},
"Only inventory obj writes empty file": {
configObjects: [][]byte{inventoryConfigMap},
expectedObjects: [][]byte{},
},
"Only pods writes both pods": {
configObjects: [][]byte{podA, podB},
expectedObjects: [][]byte{podA, podB},
},
"Basic case of inventory obj and two pods": {
configObjects: [][]byte{inventoryConfigMap, podA, podB},
expectedObjects: [][]byte{podA, podB},
},
"Basic case of inventory obj and two pods in different order": {
configObjects: [][]byte{podB, inventoryConfigMap, podA},
expectedObjects: [][]byte{podB, podA},
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
// Build a single file of multiple resource configs, and
// call the tested function FilterInputFile. This writes
// the passed file to the test filesystem, filtering
// the inventory object if it exists in the passed file.
in := buildMultiResourceConfig(tc.configObjects...)
err := FilterInputFile(bytes.NewReader(in), tf.GetRootDir())
if err != nil {
t.Fatalf("Unexpected error in FilterInputFile: %s", err)
}
// Retrieve the files from the test filesystem.
actualFiles, err := os.ReadDir(tf.GetRootDir())
if err != nil {
t.Fatalf("Error reading test filesystem directory: %s", err)
}
// Since we remove the generated file for each test, there should
// not be more than one file in the test filesystem.
if len(actualFiles) > 1 {
t.Fatalf("Wrong number of files (%d) in dir: %s", len(actualFiles), tf.GetRootDir())
}
// If there is a generated file, then read it into actualStr.
actualStr := ""
if len(actualFiles) != 0 {
actualFilename := actualFiles[0].Name()
defer os.Remove(actualFilename)
actual, err := os.ReadFile(actualFilename)
if err != nil {
t.Fatalf("Error reading created file (%s): %s", actualFilename, err)
}
actualStr = strings.TrimSpace(string(actual))
}
// Build the expected string from the expectedObjects. This expected
// string should not have the inventory object config in it.
expected := buildMultiResourceConfig(tc.expectedObjects...)
expectedStr := strings.TrimSpace(string(expected))
if expectedStr != actualStr {
t.Errorf("Expected file contents (%s) not equal to actual file contents (%s)",
expectedStr, actualStr)
}
})
}
}
func TestExpandDir(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
testCases := map[string]struct {
packageDirPath string
expandedInventory string
expandedPaths []string
isError bool
}{
"empty path is error": {
packageDirPath: "",
isError: true,
},
"path that is not dir is error": {
packageDirPath: "fakedir1",
isError: true,
},
"root package dir excludes inventory object": {
packageDirPath: tf.GetRootDir(),
expandedInventory: "inventory.yaml",
expandedPaths: []string{
"pod-a.yaml",
"pod-b.yaml",
},
isError: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
actualInventory, actualPaths, err := ExpandDir(tc.packageDirPath)
if tc.isError {
if err == nil {
t.Fatalf("expected error but received none")
}
return
}
if err != nil {
t.Fatalf("received unexpected error %#v", err)
return
}
actualFilename := filepath.Base(actualInventory)
if tc.expandedInventory != actualFilename {
t.Errorf("expected inventory template filepath (%s), got (%s)", tc.expandedInventory, actualFilename)
}
if len(tc.expandedPaths) != len(actualPaths) {
t.Errorf("expected (%d) resource filepaths, got (%d)", len(tc.expandedPaths), len(actualPaths))
}
for _, expectedPath := range tc.expandedPaths {
found := false
for _, actualPath := range actualPaths {
actualFilename := filepath.Base(actualPath)
if expectedPath == actualFilename {
found = true
break
}
}
if !found {
t.Errorf("expected filename (%s) not found", expectedPath)
}
}
})
}
}
func TestExpandDirErrors(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.C | buildMultiResourceConfig | identifier_name |
path_test.go | testutil.Setup(t, packageDir)
t.Logf("Adding File: %s", inventoryFilePath)
tf.WriteFile(t, inventoryFilePath, inventoryConfigMap)
t.Logf("Adding File: %s", secondInventoryFilePath)
tf.WriteFile(t, secondInventoryFilePath, secondInventoryConfigMap)
t.Logf("Adding File: %s", podAFilePath)
tf.WriteFile(t, podAFilePath, podA)
t.Logf("Adding File: %s", podBFilePath)
tf.WriteFile(t, podBFilePath, podB)
return tf
}
var inventoryConfigMap = []byte(`
apiVersion: v1
kind: ConfigMap
metadata:
namespace: test-namespace
name: inventory
labels:
cli-utils.sigs.k8s.io/inventory-id: test-inventory
`)
var secondInventoryConfigMap = []byte(`
apiVersion: v1
kind: ConfigMap
metadata:
namespace: test-namespace
name: inventory-2
labels:
cli-utils.sigs.k8s.io/inventory-id: test-inventory
`)
var podA = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: pod-a
namespace: test-namespace
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: registry.k8s.io/pause:2.0
`)
var podB = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: pod-b
namespace: test-namespace
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: registry.k8s.io/pause:2.0
`)
func buildMultiResourceConfig(configs ...[]byte) []byte |
func TestProcessPaths(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
trueVal := true
testCases := map[string]struct {
paths []string
expectedFileNameFlags genericclioptions.FileNameFlags
errFromDemandOneDirectory string
}{
"empty slice means reading from StdIn": {
paths: []string{},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: &[]string{"-"},
},
},
"single file in slice is error; must be directory": {
paths: []string{podAFilePath},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: nil,
Recursive: nil,
},
errFromDemandOneDirectory: "argument 'test-pkg-dir/pod-a.yaml' is not but must be a directory",
},
"single dir in slice": {
paths: []string{tf.GetRootDir()},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: &[]string{tf.GetRootDir()},
Recursive: &trueVal,
},
},
"multiple arguments is an error": {
paths: []string{podAFilePath, podBFilePath},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: nil,
Recursive: nil,
},
errFromDemandOneDirectory: "specify exactly one directory path argument; rejecting [test-pkg-dir/pod-a.yaml test-pkg-dir/pod-b.yaml]",
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
fileNameFlags, err := DemandOneDirectory(tc.paths)
assert.Equal(t, tc.expectedFileNameFlags, fileNameFlags)
if err != nil && err.Error() != tc.errFromDemandOneDirectory {
assert.Equal(t, err.Error(), tc.errFromDemandOneDirectory)
}
})
}
}
func TestFilterInputFile(t *testing.T) {
tf := testutil.Setup(t)
defer tf.Clean()
testCases := map[string]struct {
configObjects [][]byte
expectedObjects [][]byte
}{
"Empty config objects writes empty file": {
configObjects: [][]byte{},
expectedObjects: [][]byte{},
},
"Only inventory obj writes empty file": {
configObjects: [][]byte{inventoryConfigMap},
expectedObjects: [][]byte{},
},
"Only pods writes both pods": {
configObjects: [][]byte{podA, podB},
expectedObjects: [][]byte{podA, podB},
},
"Basic case of inventory obj and two pods": {
configObjects: [][]byte{inventoryConfigMap, podA, podB},
expectedObjects: [][]byte{podA, podB},
},
"Basic case of inventory obj and two pods in different order": {
configObjects: [][]byte{podB, inventoryConfigMap, podA},
expectedObjects: [][]byte{podB, podA},
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
// Build a single file of multiple resource configs, and
// call the tested function FilterInputFile. This writes
// the passed file to the test filesystem, filtering
// the inventory object if it exists in the passed file.
in := buildMultiResourceConfig(tc.configObjects...)
err := FilterInputFile(bytes.NewReader(in), tf.GetRootDir())
if err != nil {
t.Fatalf("Unexpected error in FilterInputFile: %s", err)
}
// Retrieve the files from the test filesystem.
actualFiles, err := os.ReadDir(tf.GetRootDir())
if err != nil {
t.Fatalf("Error reading test filesystem directory: %s", err)
}
// Since we remove the generated file for each test, there should
// not be more than one file in the test filesystem.
if len(actualFiles) > 1 {
t.Fatalf("Wrong number of files (%d) in dir: %s", len(actualFiles), tf.GetRootDir())
}
// If there is a generated file, then read it into actualStr.
actualStr := ""
if len(actualFiles) != 0 {
actualFilename := actualFiles[0].Name()
defer os.Remove(actualFilename)
actual, err := os.ReadFile(actualFilename)
if err != nil {
t.Fatalf("Error reading created file (%s): %s", actualFilename, err)
}
actualStr = strings.TrimSpace(string(actual))
}
// Build the expected string from the expectedObjects. This expected
// string should not have the inventory object config in it.
expected := buildMultiResourceConfig(tc.expectedObjects...)
expectedStr := strings.TrimSpace(string(expected))
if expectedStr != actualStr {
t.Errorf("Expected file contents (%s) not equal to actual file contents (%s)",
expectedStr, actualStr)
}
})
}
}
func TestExpandDir(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
testCases := map[string]struct {
packageDirPath string
expandedInventory string
expandedPaths []string
isError bool
}{
"empty path is error": {
packageDirPath: "",
isError: true,
},
"path that is not dir is error": {
packageDirPath: "fakedir1",
isError: true,
},
"root package dir excludes inventory object": {
packageDirPath: tf.GetRootDir(),
expandedInventory: "inventory.yaml",
expandedPaths: []string{
"pod-a.yaml",
"pod-b.yaml",
},
isError: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
actualInventory, actualPaths, err := ExpandDir(tc.packageDirPath)
if tc.isError {
if err == nil {
t.Fatalf("expected error but received none")
}
return
}
if err != nil {
t.Fatalf("received unexpected error %#v", err)
return
}
actualFilename := filepath.Base(actualInventory)
if tc.expandedInventory != actualFilename {
t.Errorf("expected inventory template filepath (%s), got (%s)", tc.expandedInventory, actualFilename)
}
if len(tc.expandedPaths) != len(actualPaths) {
t.Errorf("expected (%d) resource filepaths, got (%d)", len(tc.expandedPaths), len(actualPaths))
}
for _, expectedPath := range tc.expandedPaths {
found := false
for _, actualPath := range actualPaths {
actualFilename := filepath.Base(actualPath)
if expectedPath == actualFilename {
found = true
break
}
}
if !found {
t.Errorf("expected filename (%s) not found", expectedPath)
}
}
})
}
}
func TestExpandDirErrors(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf | {
r := []byte{}
for i, config := range configs {
if i > 0 {
r = append(r, []byte(configSeparator)...)
}
r = append(r, config...)
}
return r
} | identifier_body |
path_test.go | testutil.Setup(t, packageDir)
t.Logf("Adding File: %s", inventoryFilePath)
tf.WriteFile(t, inventoryFilePath, inventoryConfigMap)
t.Logf("Adding File: %s", secondInventoryFilePath)
tf.WriteFile(t, secondInventoryFilePath, secondInventoryConfigMap)
t.Logf("Adding File: %s", podAFilePath)
tf.WriteFile(t, podAFilePath, podA)
t.Logf("Adding File: %s", podBFilePath)
tf.WriteFile(t, podBFilePath, podB)
return tf
}
var inventoryConfigMap = []byte(`
apiVersion: v1
kind: ConfigMap
metadata:
namespace: test-namespace
name: inventory
labels:
cli-utils.sigs.k8s.io/inventory-id: test-inventory
`)
var secondInventoryConfigMap = []byte(`
apiVersion: v1
kind: ConfigMap
metadata:
namespace: test-namespace
name: inventory-2
labels:
cli-utils.sigs.k8s.io/inventory-id: test-inventory
`)
var podA = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: pod-a
namespace: test-namespace
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: registry.k8s.io/pause:2.0
`)
var podB = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: pod-b
namespace: test-namespace
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: registry.k8s.io/pause:2.0
`)
func buildMultiResourceConfig(configs ...[]byte) []byte {
r := []byte{}
for i, config := range configs {
if i > 0 {
r = append(r, []byte(configSeparator)...)
}
r = append(r, config...)
}
return r
}
func TestProcessPaths(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
trueVal := true
testCases := map[string]struct {
paths []string
expectedFileNameFlags genericclioptions.FileNameFlags
errFromDemandOneDirectory string
}{
"empty slice means reading from StdIn": {
paths: []string{},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: &[]string{"-"},
},
},
"single file in slice is error; must be directory": {
paths: []string{podAFilePath},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: nil,
Recursive: nil,
},
errFromDemandOneDirectory: "argument 'test-pkg-dir/pod-a.yaml' is not but must be a directory",
},
"single dir in slice": {
paths: []string{tf.GetRootDir()},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: &[]string{tf.GetRootDir()},
Recursive: &trueVal,
},
},
"multiple arguments is an error": {
paths: []string{podAFilePath, podBFilePath},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: nil,
Recursive: nil,
},
errFromDemandOneDirectory: "specify exactly one directory path argument; rejecting [test-pkg-dir/pod-a.yaml test-pkg-dir/pod-b.yaml]",
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
fileNameFlags, err := DemandOneDirectory(tc.paths) | }
})
}
}
func TestFilterInputFile(t *testing.T) {
tf := testutil.Setup(t)
defer tf.Clean()
testCases := map[string]struct {
configObjects [][]byte
expectedObjects [][]byte
}{
"Empty config objects writes empty file": {
configObjects: [][]byte{},
expectedObjects: [][]byte{},
},
"Only inventory obj writes empty file": {
configObjects: [][]byte{inventoryConfigMap},
expectedObjects: [][]byte{},
},
"Only pods writes both pods": {
configObjects: [][]byte{podA, podB},
expectedObjects: [][]byte{podA, podB},
},
"Basic case of inventory obj and two pods": {
configObjects: [][]byte{inventoryConfigMap, podA, podB},
expectedObjects: [][]byte{podA, podB},
},
"Basic case of inventory obj and two pods in different order": {
configObjects: [][]byte{podB, inventoryConfigMap, podA},
expectedObjects: [][]byte{podB, podA},
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
// Build a single file of multiple resource configs, and
// call the tested function FilterInputFile. This writes
// the passed file to the test filesystem, filtering
// the inventory object if it exists in the passed file.
in := buildMultiResourceConfig(tc.configObjects...)
err := FilterInputFile(bytes.NewReader(in), tf.GetRootDir())
if err != nil {
t.Fatalf("Unexpected error in FilterInputFile: %s", err)
}
// Retrieve the files from the test filesystem.
actualFiles, err := os.ReadDir(tf.GetRootDir())
if err != nil {
t.Fatalf("Error reading test filesystem directory: %s", err)
}
// Since we remove the generated file for each test, there should
// not be more than one file in the test filesystem.
if len(actualFiles) > 1 {
t.Fatalf("Wrong number of files (%d) in dir: %s", len(actualFiles), tf.GetRootDir())
}
// If there is a generated file, then read it into actualStr.
actualStr := ""
if len(actualFiles) != 0 {
actualFilename := actualFiles[0].Name()
defer os.Remove(actualFilename)
actual, err := os.ReadFile(actualFilename)
if err != nil {
t.Fatalf("Error reading created file (%s): %s", actualFilename, err)
}
actualStr = strings.TrimSpace(string(actual))
}
// Build the expected string from the expectedObjects. This expected
// string should not have the inventory object config in it.
expected := buildMultiResourceConfig(tc.expectedObjects...)
expectedStr := strings.TrimSpace(string(expected))
if expectedStr != actualStr {
t.Errorf("Expected file contents (%s) not equal to actual file contents (%s)",
expectedStr, actualStr)
}
})
}
}
func TestExpandDir(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
testCases := map[string]struct {
packageDirPath string
expandedInventory string
expandedPaths []string
isError bool
}{
"empty path is error": {
packageDirPath: "",
isError: true,
},
"path that is not dir is error": {
packageDirPath: "fakedir1",
isError: true,
},
"root package dir excludes inventory object": {
packageDirPath: tf.GetRootDir(),
expandedInventory: "inventory.yaml",
expandedPaths: []string{
"pod-a.yaml",
"pod-b.yaml",
},
isError: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
actualInventory, actualPaths, err := ExpandDir(tc.packageDirPath)
if tc.isError {
if err == nil {
t.Fatalf("expected error but received none")
}
return
}
if err != nil {
t.Fatalf("received unexpected error %#v", err)
return
}
actualFilename := filepath.Base(actualInventory)
if tc.expandedInventory != actualFilename {
t.Errorf("expected inventory template filepath (%s), got (%s)", tc.expandedInventory, actualFilename)
}
if len(tc.expandedPaths) != len(actualPaths) {
t.Errorf("expected (%d) resource filepaths, got (%d)", len(tc.expandedPaths), len(actualPaths))
}
for _, expectedPath := range tc.expandedPaths {
found := false
for _, actualPath := range actualPaths {
actualFilename := filepath.Base(actualPath)
if expectedPath == actualFilename {
found = true
break
}
}
if !found {
t.Errorf("expected filename (%s) not found", expectedPath)
}
}
})
}
}
func TestExpandDirErrors(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean | assert.Equal(t, tc.expectedFileNameFlags, fileNameFlags)
if err != nil && err.Error() != tc.errFromDemandOneDirectory {
assert.Equal(t, err.Error(), tc.errFromDemandOneDirectory) | random_line_split |
pandas_gp.py | 057,4073,4079,4091,4093,4099,
# 4111,4127,4129,4133,4139,4153,4157,4159,4177,4201,4211,4217,4219,
# 4229,4231]
# load the data
ibex = cPickle.load(open("ibex.pickle", "rb"))
#we need to have pd_df_bool terminals for gp to work
#better to have them as a column in the dataset
#then to use a terminal calling np.ones or np.zeros
#as we are not constrained by the size of the vector
ibex["Ones"] = True
ibex["Zeros"] = False
# Transaction costs - 10 Eur per contract
#cost = 30
cost = 50 # equiv (30/10000) * 10 points
# Split into train and test
train = ibex["2000":"2015"].copy()
test = ibex["2016":].copy()
# Functions and terminal for GP
class pd_df_float(object):
pass
class pd_df_bool(object):
pass
def f_gt(df_in, f_value):
return df_in > f_value
def f_lt(df_in, f_value):
return df_in < f_value
def protectedDiv(left, right):
try: return left / right
except ZeroDivisionError: return 0.0
def pd_add(left, right):
return left + right
def pd_subtract(left, right):
return left - right
def pd_multiply(left, right):
return left * right
def pd_divide(left, right):
return left / right
def pd_diff(df_in, _periods):
return df_in.diff(periods=abs(_periods))
def sma(df_in, periods):
return pd.rolling_mean(df_in, abs(periods))
def ewma(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.ewma(df_in, abs(periods), min_periods=abs(periods))
def hh(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_max(df_in, abs(periods), min_periods=abs(periods))
def ll(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_min(df_in, abs(periods), min_periods=abs(periods))
def pd_std(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_std(df_in, abs(periods), min_periods=abs(periods))
pset = gp.PrimitiveSetTyped('MAIN', [pd_df_float,
pd_df_float,
pd_df_float,
pd_df_float,
pd_df_float,
pd_df_bool,
pd_df_bool],
pd_df_bool)
pset.renameArguments(ARG0='Open')
pset.renameArguments(ARG1='High')
pset.renameArguments(ARG2='Low')
pset.renameArguments(ARG3='Close')
pset.renameArguments(ARG4='Volume')
# need to have pd_df_bool terminals for GP to work
pset.renameArguments(ARG5='Ones')
pset.renameArguments(ARG6='Zeros')
pset.addPrimitive(sma, [pd_df_float, int], pd_df_float, name="sma")
pset.addPrimitive(ewma, [pd_df_float, int], pd_df_float, name="ewma")
pset.addPrimitive(hh, [pd_df_float, int], pd_df_float, name="hh")
pset.addPrimitive(ll, [pd_df_float, int], pd_df_float, name="ll")
pset.addPrimitive(pd_std, [pd_df_float, int], pd_df_float, name="pd_std")
pset.addPrimitive(np.log, [pd_df_float], pd_df_float)
pset.addPrimitive(pd_diff, [pd_df_float, int], pd_df_float)
pset.addPrimitive(pd_add, [pd_df_float, pd_df_float], pd_df_float, name="pd_add")
pset.addPrimitive(pd_subtract, [pd_df_float, pd_df_float], pd_df_float, name="pd_sub")
pset.addPrimitive(pd_multiply, [pd_df_float, pd_df_float], pd_df_float, name="pd_mul")
pset.addPrimitive(pd_divide, [pd_df_float, pd_df_float], pd_df_float, name="pd_div")
pset.addPrimitive(operator.add, [int, int], int, name="add")
pset.addPrimitive(operator.sub, [int, int], int, name="sub")
#pset.addPrimitive(operator.mul, [int, int], int, name="mul")
pset.addPrimitive(protectedDiv, [int, int], int, name="div")
pset.addPrimitive(f_gt, [pd_df_float, float], pd_df_bool )
pset.addPrimitive(f_lt, [pd_df_float, float], pd_df_bool )
pset.addEphemeralConstant("short", lambda: random.randint(2,60), int)
pset.addEphemeralConstant("medium", lambda: random.randint(60,100), int)
pset.addEphemeralConstant("long", lambda: random.randint(100,200), int)
pset.addEphemeralConstant("xtralong", lambda: random.randint(200,20000), int)
pset.addEphemeralConstant("rand100", lambda: random.randint(0,100), int)
pset.addEphemeralConstant("randfloat", lambda: np.random.normal() / 100. , float)
pset.addPrimitive(operator.lt, [pd_df_float, pd_df_float], pd_df_bool)
pset.addPrimitive(operator.gt, [pd_df_float, pd_df_float], pd_df_bool)
pset.addPrimitive(np.bitwise_and, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_or, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_xor, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_not, [pd_df_bool], pd_df_bool)
pset.addPrimitive(operator.add, [float, float], float, name="f_add")
pset.addPrimitive(operator.sub, [float, float], float, name="f_sub")
pset.addPrimitive(protectedDiv, [float, float], float, name="f_div")
pset.addPrimitive(operator.mul, [float, float], float, name="f_mul")
#Better to pass this terminals as arguments (ARG5 and ARG6)
#pset.addTerminal(pd.TimeSeries(data=[1] * len(train), index=train.index, dtype=bool), pd_df_bool, name="ones")
#pset.addTerminal(pd.TimeSeries(data=[0] * len(train), index=train.index, dtype=bool), pd_df_bool, name="zeros")
pset.addTerminal(1.618, float)
pset.addTerminal(0.1618, float)
pset.addTerminal(0.01618, float)
pset.addTerminal(0.001618, float)
pset.addTerminal(-0.001618, float)
pset.addTerminal(-0.01618, float)
pset.addTerminal(-0.1618, float)
pset.addTerminal(-1.618, float)
pset.addTerminal(1, int)
for p in primes:
pset.addTerminal(p, int)
for f in np.arange(0,0.2,0.002):
pset.addTerminal(f, float)
pset.addTerminal(-f, float)
creator.create('FitnessMax', base.Fitness, weights=(1.0,))
creator.create('Individual', gp.PrimitiveTree, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register('expr', gp.genHalfAndHalf, pset=pset, min_=1, max_=6)
toolbox.register('individual', tools.initIterate, creator.Individual, toolbox.expr)
toolbox.register('population', tools.initRepeat, list, toolbox.individual)
toolbox.register('compile', gp.compile, pset=pset)
def evalFitness(individual, points):
func = toolbox.compile(expr=individual)
s = func(points.Open, points.High, points.Low, points.Close, points.Volume, points.Ones, points.Zeros)
# transform from bool to int
s = s*1
w = (s * points.Close.diff()) - np.abs(s.diff())*cost
w.dropna(inplace=True)
# W_win = w[w>0].sum()
# W_lose = abs(w[w<0].sum())
#
# profit_factor = protectedDiv(W_win, W_lose)
# return profit_factor ,
sharpe = w.mean() / w.std() * math.sqrt(600*255)
if np.isnan(sharpe) or np.isinf(sharpe):
| sharpe = -99999 | conditional_block | |
pandas_gp.py | constrained by the size of the vector
ibex["Ones"] = True
ibex["Zeros"] = False
# Transaction costs - 10 Eur per contract
#cost = 30
cost = 50 # equiv (30/10000) * 10 points
# Split into train and test
train = ibex["2000":"2015"].copy()
test = ibex["2016":].copy()
# Functions and terminal for GP
class pd_df_float(object):
pass
class pd_df_bool(object):
pass
def f_gt(df_in, f_value):
return df_in > f_value
def f_lt(df_in, f_value):
return df_in < f_value
def protectedDiv(left, right):
try: return left / right
except ZeroDivisionError: return 0.0
def pd_add(left, right):
return left + right
def pd_subtract(left, right):
return left - right
def pd_multiply(left, right):
return left * right
def pd_divide(left, right):
return left / right
def pd_diff(df_in, _periods):
return df_in.diff(periods=abs(_periods))
def sma(df_in, periods):
return pd.rolling_mean(df_in, abs(periods))
def ewma(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.ewma(df_in, abs(periods), min_periods=abs(periods))
def hh(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_max(df_in, abs(periods), min_periods=abs(periods))
def ll(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_min(df_in, abs(periods), min_periods=abs(periods))
def pd_std(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_std(df_in, abs(periods), min_periods=abs(periods))
pset = gp.PrimitiveSetTyped('MAIN', [pd_df_float,
pd_df_float,
pd_df_float,
pd_df_float,
pd_df_float,
pd_df_bool,
pd_df_bool],
pd_df_bool)
pset.renameArguments(ARG0='Open')
pset.renameArguments(ARG1='High')
pset.renameArguments(ARG2='Low')
pset.renameArguments(ARG3='Close')
pset.renameArguments(ARG4='Volume')
# need to have pd_df_bool terminals for GP to work
pset.renameArguments(ARG5='Ones')
pset.renameArguments(ARG6='Zeros')
pset.addPrimitive(sma, [pd_df_float, int], pd_df_float, name="sma")
pset.addPrimitive(ewma, [pd_df_float, int], pd_df_float, name="ewma")
pset.addPrimitive(hh, [pd_df_float, int], pd_df_float, name="hh")
pset.addPrimitive(ll, [pd_df_float, int], pd_df_float, name="ll")
pset.addPrimitive(pd_std, [pd_df_float, int], pd_df_float, name="pd_std")
pset.addPrimitive(np.log, [pd_df_float], pd_df_float)
pset.addPrimitive(pd_diff, [pd_df_float, int], pd_df_float)
pset.addPrimitive(pd_add, [pd_df_float, pd_df_float], pd_df_float, name="pd_add")
pset.addPrimitive(pd_subtract, [pd_df_float, pd_df_float], pd_df_float, name="pd_sub")
pset.addPrimitive(pd_multiply, [pd_df_float, pd_df_float], pd_df_float, name="pd_mul")
pset.addPrimitive(pd_divide, [pd_df_float, pd_df_float], pd_df_float, name="pd_div")
pset.addPrimitive(operator.add, [int, int], int, name="add")
pset.addPrimitive(operator.sub, [int, int], int, name="sub")
#pset.addPrimitive(operator.mul, [int, int], int, name="mul")
pset.addPrimitive(protectedDiv, [int, int], int, name="div")
pset.addPrimitive(f_gt, [pd_df_float, float], pd_df_bool )
pset.addPrimitive(f_lt, [pd_df_float, float], pd_df_bool )
pset.addEphemeralConstant("short", lambda: random.randint(2,60), int)
pset.addEphemeralConstant("medium", lambda: random.randint(60,100), int)
pset.addEphemeralConstant("long", lambda: random.randint(100,200), int)
pset.addEphemeralConstant("xtralong", lambda: random.randint(200,20000), int)
pset.addEphemeralConstant("rand100", lambda: random.randint(0,100), int)
pset.addEphemeralConstant("randfloat", lambda: np.random.normal() / 100. , float)
pset.addPrimitive(operator.lt, [pd_df_float, pd_df_float], pd_df_bool)
pset.addPrimitive(operator.gt, [pd_df_float, pd_df_float], pd_df_bool)
pset.addPrimitive(np.bitwise_and, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_or, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_xor, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_not, [pd_df_bool], pd_df_bool)
pset.addPrimitive(operator.add, [float, float], float, name="f_add")
pset.addPrimitive(operator.sub, [float, float], float, name="f_sub")
pset.addPrimitive(protectedDiv, [float, float], float, name="f_div")
pset.addPrimitive(operator.mul, [float, float], float, name="f_mul")
#Better to pass this terminals as arguments (ARG5 and ARG6)
#pset.addTerminal(pd.TimeSeries(data=[1] * len(train), index=train.index, dtype=bool), pd_df_bool, name="ones")
#pset.addTerminal(pd.TimeSeries(data=[0] * len(train), index=train.index, dtype=bool), pd_df_bool, name="zeros")
pset.addTerminal(1.618, float)
pset.addTerminal(0.1618, float)
pset.addTerminal(0.01618, float)
pset.addTerminal(0.001618, float)
pset.addTerminal(-0.001618, float)
pset.addTerminal(-0.01618, float)
pset.addTerminal(-0.1618, float)
pset.addTerminal(-1.618, float)
pset.addTerminal(1, int)
for p in primes:
pset.addTerminal(p, int)
for f in np.arange(0,0.2,0.002):
pset.addTerminal(f, float)
pset.addTerminal(-f, float)
creator.create('FitnessMax', base.Fitness, weights=(1.0,))
creator.create('Individual', gp.PrimitiveTree, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register('expr', gp.genHalfAndHalf, pset=pset, min_=1, max_=6)
toolbox.register('individual', tools.initIterate, creator.Individual, toolbox.expr)
toolbox.register('population', tools.initRepeat, list, toolbox.individual)
toolbox.register('compile', gp.compile, pset=pset)
def evalFitness(individual, points):
func = toolbox.compile(expr=individual)
s = func(points.Open, points.High, points.Low, points.Close, points.Volume, points.Ones, points.Zeros)
# transform from bool to int
s = s*1
w = (s * points.Close.diff()) - np.abs(s.diff())*cost
w.dropna(inplace=True)
# W_win = w[w>0].sum()
# W_lose = abs(w[w<0].sum())
#
# profit_factor = protectedDiv(W_win, W_lose)
# return profit_factor ,
sharpe = w.mean() / w.std() * math.sqrt(600*255)
if np.isnan(sharpe) or np.isinf(sharpe):
sharpe = -99999
return sharpe,
toolbox.register('evaluate', evalFitness, points=train)
toolbox.register('select', tools.selTournament, tournsize=3)
toolbox.register('mate', gp.cxOnePoint)
toolbox.register('expr_mut', gp.genFull, min_=0, max_=3)
toolbox.register('mutate', gp.mutUniform, expr=toolbox.expr_mut, pset=pset)
def plot(individual):
| nodes, edges, labels = gp.graph(individual)
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
pos = nx.drawing.nx_agraph.graphviz_layout(g, prog="dot")
nx.draw_networkx_nodes(g, pos)
nx.draw_networkx_edges(g, pos)
nx.draw_networkx_labels(g, pos, labels)
plt.show() | identifier_body | |
pandas_gp.py | 531,1543,1549,1553,1559,1567,1571,1579,1583,1597,1601,1607,
# 1609,1613,1619,1621,1627,1637,1657,1663,1667,1669,1693,1697,1699,
# 1709,1721,1723,1733,1741,1747,1753,1759,1777,1783,1787,1789,1801,
# 1811,1823,1831,1847,1861,1867,1871,1873,1877,1879,1889,1901,1907,
# 1913,1931,1933,1949,1951,1973,1979,1987,1993,1997,1999,2003,2011,
# 2017,2027,2029,2039,2053,2063,2069,2081,2083,2087,2089,2099,2111,
# 2113,2129,2131,2137,2141,2143,2153,2161,2179,2203,2207,2213,2221,
# 2237,2239,2243,2251,2267,2269,2273,2281,2287,2293,2297,2309,2311,
# 2333,2339,2341,2347,2351,2357,2371,2377,2381,2383,2389,2393,2399,
# 2411,2417,2423,2437,2441,2447,2459,2467,2473,2477,2503,2521,2531,
# 2539,2543,2549,2551,2557,2579,2591,2593,2609,2617,2621,2633,2647,
# 2657,2659,2663,2671,2677,2683,2687,2689,2693,2699,2707,2711,2713,
# 2719,2729,2731,2741,2749,2753,2767,2777,2789,2791,2797,2801,2803,
# 2819,2833,2837,2843,2851,2857,2861,2879,2887,2897,2903,2909,2917,
# 2927,2939,2953,2957,2963,2969,2971,2999,3001,3011,3019,3023,3037,
# 3041,3049,3061,3067,3079,3083,3089,3109,3119,3121,3137,3163,3167,
# 3169,3181,3187,3191,3203,3209,3217,3221,3229,3251,3253,3257,3259,
# 3271,3299,3301,3307,3313,3319,3323,3329,3331,3343,3347,3359,3361,
# 3371,3373,3389,3391,3407,3413,3433,3449,3457,3461,3463,3467,3469,
# 3491,3499,3511,3517,3527,3529,3533,3539,3541,3547,3557,3559,3571,
# 3581,3583,3593,3607,3613,3617,3623,3631,3637,3643,3659,3671,3673,
# 3677,3691,3697,3701,3709,3719,3727,3733,3739,3761,3767,3769,3779,
# 3793,3797,3803,3821,3823,3833,3847,3851,3853,3863,3877,3881,3889,
# 3907,3911,3917,3919,3923,3929,3931,3943,3947,3967,3989,4001,4003,
# 4007,4013,4019,4021,4027,4049,4051,4057,4073,4079,4091,4093,4099,
# 4111,4127,4129,4133,4139,4153,4157,4159,4177,4201,4211,4217,4219,
# 4229,4231]
# load the data
ibex = cPickle.load(open("ibex.pickle", "rb"))
#we need to have pd_df_bool terminals for gp to work
#better to have them as a column in the dataset
#then to use a terminal calling np.ones or np.zeros
#as we are not constrained by the size of the vector
ibex["Ones"] = True
ibex["Zeros"] = False
# Transaction costs - 10 Eur per contract
#cost = 30
cost = 50 # equiv (30/10000) * 10 points
# Split into train and test
train = ibex["2000":"2015"].copy()
test = ibex["2016":].copy()
# Functions and terminal for GP
class pd_df_float(object):
pass
class pd_df_bool(object):
pass
def f_gt(df_in, f_value):
return df_in > f_value
def f_lt(df_in, f_value):
return df_in < f_value
def protectedDiv(left, right):
try: return left / right
except ZeroDivisionError: return 0.0
def pd_add(left, right):
return left + right
def | pd_subtract | identifier_name | |
pandas_gp.py | 1949,1951,1973,1979,1987,1993,1997,1999,2003,2011,
# 2017,2027,2029,2039,2053,2063,2069,2081,2083,2087,2089,2099,2111,
# 2113,2129,2131,2137,2141,2143,2153,2161,2179,2203,2207,2213,2221,
# 2237,2239,2243,2251,2267,2269,2273,2281,2287,2293,2297,2309,2311,
# 2333,2339,2341,2347,2351,2357,2371,2377,2381,2383,2389,2393,2399,
# 2411,2417,2423,2437,2441,2447,2459,2467,2473,2477,2503,2521,2531,
# 2539,2543,2549,2551,2557,2579,2591,2593,2609,2617,2621,2633,2647,
# 2657,2659,2663,2671,2677,2683,2687,2689,2693,2699,2707,2711,2713,
# 2719,2729,2731,2741,2749,2753,2767,2777,2789,2791,2797,2801,2803,
# 2819,2833,2837,2843,2851,2857,2861,2879,2887,2897,2903,2909,2917,
# 2927,2939,2953,2957,2963,2969,2971,2999,3001,3011,3019,3023,3037,
# 3041,3049,3061,3067,3079,3083,3089,3109,3119,3121,3137,3163,3167,
# 3169,3181,3187,3191,3203,3209,3217,3221,3229,3251,3253,3257,3259,
# 3271,3299,3301,3307,3313,3319,3323,3329,3331,3343,3347,3359,3361,
# 3371,3373,3389,3391,3407,3413,3433,3449,3457,3461,3463,3467,3469,
# 3491,3499,3511,3517,3527,3529,3533,3539,3541,3547,3557,3559,3571,
# 3581,3583,3593,3607,3613,3617,3623,3631,3637,3643,3659,3671,3673,
# 3677,3691,3697,3701,3709,3719,3727,3733,3739,3761,3767,3769,3779,
# 3793,3797,3803,3821,3823,3833,3847,3851,3853,3863,3877,3881,3889,
# 3907,3911,3917,3919,3923,3929,3931,3943,3947,3967,3989,4001,4003,
# 4007,4013,4019,4021,4027,4049,4051,4057,4073,4079,4091,4093,4099,
# 4111,4127,4129,4133,4139,4153,4157,4159,4177,4201,4211,4217,4219,
# 4229,4231]
# load the data
ibex = cPickle.load(open("ibex.pickle", "rb"))
#we need to have pd_df_bool terminals for gp to work
#better to have them as a column in the dataset
#then to use a terminal calling np.ones or np.zeros
#as we are not constrained by the size of the vector
ibex["Ones"] = True
ibex["Zeros"] = False
# Transaction costs - 10 Eur per contract
#cost = 30
cost = 50 # equiv (30/10000) * 10 points
# Split into train and test
train = ibex["2000":"2015"].copy()
test = ibex["2016":].copy()
# Functions and terminal for GP
class pd_df_float(object):
pass
class pd_df_bool(object):
pass
def f_gt(df_in, f_value):
return df_in > f_value
def f_lt(df_in, f_value):
return df_in < f_value
def protectedDiv(left, right):
try: return left / right
except ZeroDivisionError: return 0.0
def pd_add(left, right):
return left + right
def pd_subtract(left, right):
return left - right
def pd_multiply(left, right):
return left * right
def pd_divide(left, right):
return left / right
def pd_diff(df_in, _periods):
return df_in.diff(periods=abs(_periods))
def sma(df_in, periods):
return pd.rolling_mean(df_in, abs(periods))
def ewma(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.ewma(df_in, abs(periods), min_periods=abs(periods))
def hh(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_max(df_in, abs(periods), min_periods=abs(periods))
def ll(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_min(df_in, abs(periods), min_periods=abs(periods))
def pd_std(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_std(df_in, abs(periods), min_periods=abs(periods))
| pset = gp.PrimitiveSetTyped('MAIN', [pd_df_float, | random_line_split | |
gke.go | /api/container/v1"
)
type GKENodePoolCall struct {
targetLabel string
projectID string
error error
s *compute.Service
ctx context.Context
targetLabelValue string
}
func GKENodePool(ctx context.Context, projectID string) *GKENodePoolCall {
s, err := compute.NewService(ctx)
if err != nil {
return &GKENodePoolCall{error: err}
}
// get all templates list
return &GKENodePoolCall{
s: s,
projectID: projectID,
ctx: ctx,
}
}
func (r *GKENodePoolCall) Filter(labelName, value string) *GKENodePoolCall {
if r.error != nil {
return r
}
r.targetLabel = labelName
r.targetLabelValue = value
return r
}
func (r *GKENodePoolCall) Resize(size int64) (*model.Report, error) {
if r.error != nil {
return nil, r.error
}
// get all instance group mangers list
managerList, err := compute.NewInstanceGroupManagersService(r.s).AggregatedList(r.projectID).Do()
if err != nil {
return nil, err
}
// add instance group name of cluster node pool to Set
gkeNodePoolInstanceGroupSet, err := r.getGKEInstanceGroup()
if err != nil {
return nil, err
}
fmt.Println("gkeNodePoolInstanceGroupSet:", gkeNodePoolInstanceGroupSet.ToSlice())
var res = r.error
var alreadyRes []string
var doneRes []string
for _, manager := range valuesIG(managerList.Items) {
fmt.Println("manager.InstanceTemplate:", manager.InstanceTemplate)
fmt.Println("manager.Name:", manager.Name)
// Check GKE NodePool InstanceGroup
if gkeNodePoolInstanceGroupSet.Contains(manager.Name) {
if !manager.Status.IsStable {
continue
}
if manager.TargetSize == size {
alreadyRes = append(alreadyRes, manager.Name)
continue
}
// get manager zone name
zoneUrlElements := strings.Split(manager.Zone, "/")
zone := zoneUrlElements[len(zoneUrlElements)-1]
ms := compute.NewInstanceGroupManagersService(r.s)
if _, err := ms.Resize(r.projectID, zone, manager.Name, size).Do(); err != nil {
res = multierror.Append(res, err)
continue
}
doneRes = append(doneRes, manager.Name)
}
time.Sleep(CallInterval)
}
return &model.Report{
InstanceType: model.GKENodePool,
Dones: doneRes,
Alreadies: alreadyRes,
}, res
}
func (r *GKENodePoolCall) | () (*model.Report, error) {
if r.error != nil {
return nil, r.error
}
managerList, err := compute.NewInstanceGroupManagersService(r.s).AggregatedList(r.projectID).Do()
if err != nil {
return nil, err
}
// add instance group name of cluster node pool to Set
gkeNodePoolInstanceGroupSet, err := r.getGKEInstanceGroup()
if err != nil {
return nil, err
}
sizeMap, err := GetOriginalNodePoolSize(r.ctx, r.projectID, r.targetLabel, r.targetLabelValue)
if err != nil {
return nil, err
}
var res = r.error
var doneRes []string
var alreadyRes []string
for _, manager := range valuesIG(managerList.Items) {
// check instance group of gke node pool
if gkeNodePoolInstanceGroupSet.Contains(manager.Name) {
if !manager.Status.IsStable {
continue
}
split := strings.Split(manager.InstanceGroup, "/")
instanceGroupName := split[len(split)-1]
originalSize := sizeMap[instanceGroupName]
if manager.TargetSize == originalSize {
alreadyRes = append(alreadyRes, manager.Name)
continue
}
// get manager zone name
zoneUrlElements := strings.Split(manager.Zone, "/")
zone := zoneUrlElements[len(zoneUrlElements)-1] // ex) us-central1-a
ms := compute.NewInstanceGroupManagersService(r.s)
if _, err := ms.Resize(r.projectID, zone, manager.Name, originalSize).Do(); err != nil {
res = multierror.Append(res, err)
continue
}
doneRes = append(doneRes, manager.Name)
}
time.Sleep(CallInterval)
}
return &model.Report{
InstanceType: model.GKENodePool,
Dones: doneRes,
Alreadies: alreadyRes,
}, res
}
// get target GKE instance group Set
func (r *GKENodePoolCall) getGKEInstanceGroup() (set.Set, error) {
s, err := container.NewService(r.ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + r.projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
res := set.NewSet()
for _, cluster := range filter(clusters.Clusters, r.targetLabel, r.targetLabelValue) {
for _, nodePool := range cluster.NodePools {
for _, gkeInstanceGroup := range nodePool.InstanceGroupUrls {
tmpUrlElements := strings.Split(gkeInstanceGroup, "/")
managerTemplate := tmpUrlElements[len(tmpUrlElements)-1]
res.Add(managerTemplate) // e.g. gke-tky-cluster-default-pool-cb765a7d-grp
}
}
}
return res, nil
}
func SetLableIfNoLabel(ctx context.Context, projectID, targetLabel string) error {
s, err := container.NewService(ctx)
if err != nil {
return err
}
currentNodeSize, err := GetCurrentNodeCount(ctx, projectID, targetLabel)
if err != nil {
return err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return err
}
for _, cluster := range filter(clusters.Clusters, targetLabel, "true") {
labels := cluster.ResourceLabels
fingerprint := cluster.LabelFingerprint
for _, nodePool := range cluster.NodePools {
nodeSizeLabel := "restore-size-" + nodePool.Name
_, ok := labels[nodeSizeLabel]
if !ok {
// set new label
labels[nodeSizeLabel] = strconv.FormatInt(currentNodeSize[nodePool.Name], 10)
}
}
parseRegion := strings.Split(cluster.Location, "/")
region := parseRegion[len(parseRegion)-1]
name := "projects/" + projectID + "/locations/" + region + "/clusters/" + cluster.Name
req := &container.SetLabelsRequest{
ResourceLabels: labels,
LabelFingerprint: fingerprint,
}
// update labels
_, err := container.NewProjectsLocationsClustersService(s).SetResourceLabels(name, req).Do()
if err != nil {
return err
}
}
return nil
}
// GetOriginalNodePoolSize returns map that key=instanceGroupName and value=originalSize
func GetOriginalNodePoolSize(ctx context.Context, projectID, targetLabel, labelValue string) (map[string]int64, error) {
s, err := container.NewService(ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
result := make(map[string]int64)
for _, cluster := range filter(clusters.Clusters, targetLabel, labelValue) {
labels := cluster.ResourceLabels
for _, nodePool := range cluster.NodePools {
restoreSize, ok := labels["restore-size-"+nodePool.Name]
if !ok {
continue
}
size, err := strconv.Atoi(restoreSize)
if err != nil {
return nil, errors.New("label: " + "restore-size-" + nodePool.Name + " value is not number format?")
}
for _, url := range nodePool.InstanceGroupUrls {
// u;rl is below format
// e.g. https://www.googleapis.com/compute/v1/projects/{ProjectID}/zones/us-central1-a/instanceGroupManagers/gke-standard-cluster-1-default-pool-1234abcd-grp
urlSplit := strings.Split(url, "/")
instanceGroupName := urlSplit[len(urlSplit)-1]
result[instanceGroupName] = int64(size)
}
}
}
return result, nil
}
// GetCurrentNodeCount returns map that key=NodePoolName and value=currentSize
func GetCurrentNodeCount(ctx context.Context, projectID, targetLabel string) (map[string]int64, error) {
s, err := container.NewService(ctx)
if err != nil {
return nil, err
}
computeService, err := compute.NewService(ctx)
| Recovery | identifier_name |
gke.go | * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package operator
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"time"
set "github.com/deckarep/golang-set"
"github.com/future-architect/gcp-instance-scheduler/model"
"github.com/hashicorp/go-multierror"
"golang.org/x/net/context"
"google.golang.org/api/compute/v1"
"google.golang.org/api/container/v1"
)
type GKENodePoolCall struct {
targetLabel string
projectID string
error error
s *compute.Service
ctx context.Context
targetLabelValue string
}
func GKENodePool(ctx context.Context, projectID string) *GKENodePoolCall {
s, err := compute.NewService(ctx)
if err != nil {
return &GKENodePoolCall{error: err}
}
// get all templates list
return &GKENodePoolCall{
s: s,
projectID: projectID,
ctx: ctx,
}
}
func (r *GKENodePoolCall) Filter(labelName, value string) *GKENodePoolCall {
if r.error != nil {
return r
}
r.targetLabel = labelName
r.targetLabelValue = value
return r
}
func (r *GKENodePoolCall) Resize(size int64) (*model.Report, error) {
if r.error != nil {
return nil, r.error
}
// get all instance group mangers list
managerList, err := compute.NewInstanceGroupManagersService(r.s).AggregatedList(r.projectID).Do()
if err != nil {
return nil, err
}
// add instance group name of cluster node pool to Set
gkeNodePoolInstanceGroupSet, err := r.getGKEInstanceGroup()
if err != nil {
return nil, err
}
fmt.Println("gkeNodePoolInstanceGroupSet:", gkeNodePoolInstanceGroupSet.ToSlice())
var res = r.error
var alreadyRes []string
var doneRes []string
for _, manager := range valuesIG(managerList.Items) {
fmt.Println("manager.InstanceTemplate:", manager.InstanceTemplate)
fmt.Println("manager.Name:", manager.Name)
// Check GKE NodePool InstanceGroup
if gkeNodePoolInstanceGroupSet.Contains(manager.Name) {
if !manager.Status.IsStable {
continue
}
if manager.TargetSize == size {
alreadyRes = append(alreadyRes, manager.Name)
continue
}
// get manager zone name
zoneUrlElements := strings.Split(manager.Zone, "/")
zone := zoneUrlElements[len(zoneUrlElements)-1]
ms := compute.NewInstanceGroupManagersService(r.s)
if _, err := ms.Resize(r.projectID, zone, manager.Name, size).Do(); err != nil {
res = multierror.Append(res, err)
continue
}
doneRes = append(doneRes, manager.Name)
}
time.Sleep(CallInterval)
}
return &model.Report{
InstanceType: model.GKENodePool,
Dones: doneRes,
Alreadies: alreadyRes,
}, res
}
func (r *GKENodePoolCall) Recovery() (*model.Report, error) {
if r.error != nil {
return nil, r.error
}
managerList, err := compute.NewInstanceGroupManagersService(r.s).AggregatedList(r.projectID).Do()
if err != nil {
return nil, err
}
// add instance group name of cluster node pool to Set
gkeNodePoolInstanceGroupSet, err := r.getGKEInstanceGroup()
if err != nil {
return nil, err
}
sizeMap, err := GetOriginalNodePoolSize(r.ctx, r.projectID, r.targetLabel, r.targetLabelValue)
if err != nil {
return nil, err
}
var res = r.error
var doneRes []string
var alreadyRes []string
for _, manager := range valuesIG(managerList.Items) {
// check instance group of gke node pool
if gkeNodePoolInstanceGroupSet.Contains(manager.Name) {
if !manager.Status.IsStable {
continue
}
split := strings.Split(manager.InstanceGroup, "/")
instanceGroupName := split[len(split)-1]
originalSize := sizeMap[instanceGroupName]
if manager.TargetSize == originalSize {
alreadyRes = append(alreadyRes, manager.Name)
continue
}
// get manager zone name
zoneUrlElements := strings.Split(manager.Zone, "/")
zone := zoneUrlElements[len(zoneUrlElements)-1] // ex) us-central1-a
ms := compute.NewInstanceGroupManagersService(r.s)
if _, err := ms.Resize(r.projectID, zone, manager.Name, originalSize).Do(); err != nil {
res = multierror.Append(res, err)
continue
}
doneRes = append(doneRes, manager.Name)
}
time.Sleep(CallInterval)
}
return &model.Report{
InstanceType: model.GKENodePool,
Dones: doneRes,
Alreadies: alreadyRes,
}, res
}
// get target GKE instance group Set
func (r *GKENodePoolCall) getGKEInstanceGroup() (set.Set, error) {
s, err := container.NewService(r.ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + r.projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
res := set.NewSet()
for _, cluster := range filter(clusters.Clusters, r.targetLabel, r.targetLabelValue) {
for _, nodePool := range cluster.NodePools {
for _, gkeInstanceGroup := range nodePool.InstanceGroupUrls {
tmpUrlElements := strings.Split(gkeInstanceGroup, "/")
managerTemplate := tmpUrlElements[len(tmpUrlElements)-1]
res.Add(managerTemplate) // e.g. gke-tky-cluster-default-pool-cb765a7d-grp
}
}
}
return res, nil
}
func SetLableIfNoLabel(ctx context.Context, projectID, targetLabel string) error {
s, err := container.NewService(ctx)
if err != nil {
return err
}
currentNodeSize, err := GetCurrentNodeCount(ctx, projectID, targetLabel)
if err != nil {
return err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return err
}
for _, cluster := range filter(clusters.Clusters, targetLabel, "true") {
labels := cluster.ResourceLabels
fingerprint := cluster.LabelFingerprint
for _, nodePool := range cluster.NodePools {
nodeSizeLabel := "restore-size-" + nodePool.Name
_, ok := labels[nodeSizeLabel]
if !ok {
// set new label
labels[nodeSizeLabel] = strconv.FormatInt(currentNodeSize[nodePool.Name], 10)
}
}
parseRegion := strings.Split(cluster.Location, "/")
region := parseRegion[len(parseRegion)-1]
name := "projects/" + projectID + "/locations/" + region + "/clusters/" + cluster.Name
req := &container.SetLabelsRequest{
ResourceLabels: labels,
LabelFingerprint: fingerprint,
}
// update labels
_, err := container.NewProjectsLocationsClustersService(s).SetResourceLabels(name, req).Do()
if err != nil {
return err
}
}
return nil
}
// GetOriginalNodePoolSize returns map that key=instanceGroupName and value=originalSize
func GetOriginalNodePoolSize(ctx context.Context, projectID, targetLabel, labelValue string) (map[string]int64, error) {
s, err := container.NewService(ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
result := make(map[string]int64)
for _, cluster := range filter(clusters.Clusters, targetLabel, labelValue) {
labels := cluster.ResourceLabels
for _, nodePool := range cluster.NodePools {
restoreSize, ok := labels["restore-size-"+nodePool.Name]
if !ok {
continue
}
size, err := strconv.Atoi(restoreSize)
if err != nil {
return nil, errors.New("label: " + "restore-size-" + node | * | random_line_split | |
gke.go | [len(zoneUrlElements)-1]
ms := compute.NewInstanceGroupManagersService(r.s)
if _, err := ms.Resize(r.projectID, zone, manager.Name, size).Do(); err != nil {
res = multierror.Append(res, err)
continue
}
doneRes = append(doneRes, manager.Name)
}
time.Sleep(CallInterval)
}
return &model.Report{
InstanceType: model.GKENodePool,
Dones: doneRes,
Alreadies: alreadyRes,
}, res
}
func (r *GKENodePoolCall) Recovery() (*model.Report, error) {
if r.error != nil {
return nil, r.error
}
managerList, err := compute.NewInstanceGroupManagersService(r.s).AggregatedList(r.projectID).Do()
if err != nil {
return nil, err
}
// add instance group name of cluster node pool to Set
gkeNodePoolInstanceGroupSet, err := r.getGKEInstanceGroup()
if err != nil {
return nil, err
}
sizeMap, err := GetOriginalNodePoolSize(r.ctx, r.projectID, r.targetLabel, r.targetLabelValue)
if err != nil {
return nil, err
}
var res = r.error
var doneRes []string
var alreadyRes []string
for _, manager := range valuesIG(managerList.Items) {
// check instance group of gke node pool
if gkeNodePoolInstanceGroupSet.Contains(manager.Name) {
if !manager.Status.IsStable {
continue
}
split := strings.Split(manager.InstanceGroup, "/")
instanceGroupName := split[len(split)-1]
originalSize := sizeMap[instanceGroupName]
if manager.TargetSize == originalSize {
alreadyRes = append(alreadyRes, manager.Name)
continue
}
// get manager zone name
zoneUrlElements := strings.Split(manager.Zone, "/")
zone := zoneUrlElements[len(zoneUrlElements)-1] // ex) us-central1-a
ms := compute.NewInstanceGroupManagersService(r.s)
if _, err := ms.Resize(r.projectID, zone, manager.Name, originalSize).Do(); err != nil {
res = multierror.Append(res, err)
continue
}
doneRes = append(doneRes, manager.Name)
}
time.Sleep(CallInterval)
}
return &model.Report{
InstanceType: model.GKENodePool,
Dones: doneRes,
Alreadies: alreadyRes,
}, res
}
// get target GKE instance group Set
func (r *GKENodePoolCall) getGKEInstanceGroup() (set.Set, error) {
s, err := container.NewService(r.ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + r.projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
res := set.NewSet()
for _, cluster := range filter(clusters.Clusters, r.targetLabel, r.targetLabelValue) {
for _, nodePool := range cluster.NodePools {
for _, gkeInstanceGroup := range nodePool.InstanceGroupUrls {
tmpUrlElements := strings.Split(gkeInstanceGroup, "/")
managerTemplate := tmpUrlElements[len(tmpUrlElements)-1]
res.Add(managerTemplate) // e.g. gke-tky-cluster-default-pool-cb765a7d-grp
}
}
}
return res, nil
}
func SetLableIfNoLabel(ctx context.Context, projectID, targetLabel string) error {
s, err := container.NewService(ctx)
if err != nil {
return err
}
currentNodeSize, err := GetCurrentNodeCount(ctx, projectID, targetLabel)
if err != nil {
return err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return err
}
for _, cluster := range filter(clusters.Clusters, targetLabel, "true") {
labels := cluster.ResourceLabels
fingerprint := cluster.LabelFingerprint
for _, nodePool := range cluster.NodePools {
nodeSizeLabel := "restore-size-" + nodePool.Name
_, ok := labels[nodeSizeLabel]
if !ok {
// set new label
labels[nodeSizeLabel] = strconv.FormatInt(currentNodeSize[nodePool.Name], 10)
}
}
parseRegion := strings.Split(cluster.Location, "/")
region := parseRegion[len(parseRegion)-1]
name := "projects/" + projectID + "/locations/" + region + "/clusters/" + cluster.Name
req := &container.SetLabelsRequest{
ResourceLabels: labels,
LabelFingerprint: fingerprint,
}
// update labels
_, err := container.NewProjectsLocationsClustersService(s).SetResourceLabels(name, req).Do()
if err != nil {
return err
}
}
return nil
}
// GetOriginalNodePoolSize returns map that key=instanceGroupName and value=originalSize
func GetOriginalNodePoolSize(ctx context.Context, projectID, targetLabel, labelValue string) (map[string]int64, error) {
s, err := container.NewService(ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
result := make(map[string]int64)
for _, cluster := range filter(clusters.Clusters, targetLabel, labelValue) {
labels := cluster.ResourceLabels
for _, nodePool := range cluster.NodePools {
restoreSize, ok := labels["restore-size-"+nodePool.Name]
if !ok {
continue
}
size, err := strconv.Atoi(restoreSize)
if err != nil {
return nil, errors.New("label: " + "restore-size-" + nodePool.Name + " value is not number format?")
}
for _, url := range nodePool.InstanceGroupUrls {
// u;rl is below format
// e.g. https://www.googleapis.com/compute/v1/projects/{ProjectID}/zones/us-central1-a/instanceGroupManagers/gke-standard-cluster-1-default-pool-1234abcd-grp
urlSplit := strings.Split(url, "/")
instanceGroupName := urlSplit[len(urlSplit)-1]
result[instanceGroupName] = int64(size)
}
}
}
return result, nil
}
// GetCurrentNodeCount returns map that key=NodePoolName and value=currentSize
func GetCurrentNodeCount(ctx context.Context, projectID, targetLabel string) (map[string]int64, error) {
s, err := container.NewService(ctx)
if err != nil {
return nil, err
}
computeService, err := compute.NewService(ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
result := make(map[string]int64)
reZone := regexp.MustCompile(".*/zones/")
reInstance := regexp.MustCompile(".*/instanceGroupManagers/")
reEtc := regexp.MustCompile("/.*")
for _, cluster := range filter(clusters.Clusters, targetLabel, "true") {
for _, nodePool := range cluster.NodePools {
// nodePool.InstanceGroupUrls's format is below
// ["https://www.googleapis.com/compute/v1/projects/<projectID>/zones/<zone1>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp",
// "https://www.googleapis.com/compute/v1/projects/<projectID>/zones/<zone2>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp",
// "https://www.googleapis.com/compute/v1/projects/<projectID>/zones/<zone3>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp"]
zone := reZone.ReplaceAllString(nodePool.InstanceGroupUrls[0], "") //"<zone1>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp"
zone = reEtc.ReplaceAllString(zone, "") //"<zone1>
instanceGroup := reInstance.ReplaceAllString(nodePool.InstanceGroupUrls[0], "")
resp, err := computeService.InstanceGroups.Get(projectID, zone, instanceGroup).Context(ctx).Do()
if err != nil {
return nil, err
}
size := resp.Size
result[nodePool.Name] = size
}
}
return result, nil
}
// grep target cluster and create target cluster list
func filter(l []*container.Cluster, label, value string) []*container.Cluster {
if label == "" | { //TODO Temp impl
return l
} | conditional_block | |
gke.go | /api/container/v1"
)
type GKENodePoolCall struct {
targetLabel string
projectID string
error error
s *compute.Service
ctx context.Context
targetLabelValue string
}
func GKENodePool(ctx context.Context, projectID string) *GKENodePoolCall {
s, err := compute.NewService(ctx)
if err != nil {
return &GKENodePoolCall{error: err}
}
// get all templates list
return &GKENodePoolCall{
s: s,
projectID: projectID,
ctx: ctx,
}
}
func (r *GKENodePoolCall) Filter(labelName, value string) *GKENodePoolCall |
func (r *GKENodePoolCall) Resize(size int64) (*model.Report, error) {
if r.error != nil {
return nil, r.error
}
// get all instance group mangers list
managerList, err := compute.NewInstanceGroupManagersService(r.s).AggregatedList(r.projectID).Do()
if err != nil {
return nil, err
}
// add instance group name of cluster node pool to Set
gkeNodePoolInstanceGroupSet, err := r.getGKEInstanceGroup()
if err != nil {
return nil, err
}
fmt.Println("gkeNodePoolInstanceGroupSet:", gkeNodePoolInstanceGroupSet.ToSlice())
var res = r.error
var alreadyRes []string
var doneRes []string
for _, manager := range valuesIG(managerList.Items) {
fmt.Println("manager.InstanceTemplate:", manager.InstanceTemplate)
fmt.Println("manager.Name:", manager.Name)
// Check GKE NodePool InstanceGroup
if gkeNodePoolInstanceGroupSet.Contains(manager.Name) {
if !manager.Status.IsStable {
continue
}
if manager.TargetSize == size {
alreadyRes = append(alreadyRes, manager.Name)
continue
}
// get manager zone name
zoneUrlElements := strings.Split(manager.Zone, "/")
zone := zoneUrlElements[len(zoneUrlElements)-1]
ms := compute.NewInstanceGroupManagersService(r.s)
if _, err := ms.Resize(r.projectID, zone, manager.Name, size).Do(); err != nil {
res = multierror.Append(res, err)
continue
}
doneRes = append(doneRes, manager.Name)
}
time.Sleep(CallInterval)
}
return &model.Report{
InstanceType: model.GKENodePool,
Dones: doneRes,
Alreadies: alreadyRes,
}, res
}
func (r *GKENodePoolCall) Recovery() (*model.Report, error) {
if r.error != nil {
return nil, r.error
}
managerList, err := compute.NewInstanceGroupManagersService(r.s).AggregatedList(r.projectID).Do()
if err != nil {
return nil, err
}
// add instance group name of cluster node pool to Set
gkeNodePoolInstanceGroupSet, err := r.getGKEInstanceGroup()
if err != nil {
return nil, err
}
sizeMap, err := GetOriginalNodePoolSize(r.ctx, r.projectID, r.targetLabel, r.targetLabelValue)
if err != nil {
return nil, err
}
var res = r.error
var doneRes []string
var alreadyRes []string
for _, manager := range valuesIG(managerList.Items) {
// check instance group of gke node pool
if gkeNodePoolInstanceGroupSet.Contains(manager.Name) {
if !manager.Status.IsStable {
continue
}
split := strings.Split(manager.InstanceGroup, "/")
instanceGroupName := split[len(split)-1]
originalSize := sizeMap[instanceGroupName]
if manager.TargetSize == originalSize {
alreadyRes = append(alreadyRes, manager.Name)
continue
}
// get manager zone name
zoneUrlElements := strings.Split(manager.Zone, "/")
zone := zoneUrlElements[len(zoneUrlElements)-1] // ex) us-central1-a
ms := compute.NewInstanceGroupManagersService(r.s)
if _, err := ms.Resize(r.projectID, zone, manager.Name, originalSize).Do(); err != nil {
res = multierror.Append(res, err)
continue
}
doneRes = append(doneRes, manager.Name)
}
time.Sleep(CallInterval)
}
return &model.Report{
InstanceType: model.GKENodePool,
Dones: doneRes,
Alreadies: alreadyRes,
}, res
}
// get target GKE instance group Set
func (r *GKENodePoolCall) getGKEInstanceGroup() (set.Set, error) {
s, err := container.NewService(r.ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + r.projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
res := set.NewSet()
for _, cluster := range filter(clusters.Clusters, r.targetLabel, r.targetLabelValue) {
for _, nodePool := range cluster.NodePools {
for _, gkeInstanceGroup := range nodePool.InstanceGroupUrls {
tmpUrlElements := strings.Split(gkeInstanceGroup, "/")
managerTemplate := tmpUrlElements[len(tmpUrlElements)-1]
res.Add(managerTemplate) // e.g. gke-tky-cluster-default-pool-cb765a7d-grp
}
}
}
return res, nil
}
func SetLableIfNoLabel(ctx context.Context, projectID, targetLabel string) error {
s, err := container.NewService(ctx)
if err != nil {
return err
}
currentNodeSize, err := GetCurrentNodeCount(ctx, projectID, targetLabel)
if err != nil {
return err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return err
}
for _, cluster := range filter(clusters.Clusters, targetLabel, "true") {
labels := cluster.ResourceLabels
fingerprint := cluster.LabelFingerprint
for _, nodePool := range cluster.NodePools {
nodeSizeLabel := "restore-size-" + nodePool.Name
_, ok := labels[nodeSizeLabel]
if !ok {
// set new label
labels[nodeSizeLabel] = strconv.FormatInt(currentNodeSize[nodePool.Name], 10)
}
}
parseRegion := strings.Split(cluster.Location, "/")
region := parseRegion[len(parseRegion)-1]
name := "projects/" + projectID + "/locations/" + region + "/clusters/" + cluster.Name
req := &container.SetLabelsRequest{
ResourceLabels: labels,
LabelFingerprint: fingerprint,
}
// update labels
_, err := container.NewProjectsLocationsClustersService(s).SetResourceLabels(name, req).Do()
if err != nil {
return err
}
}
return nil
}
// GetOriginalNodePoolSize returns map that key=instanceGroupName and value=originalSize
func GetOriginalNodePoolSize(ctx context.Context, projectID, targetLabel, labelValue string) (map[string]int64, error) {
s, err := container.NewService(ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
result := make(map[string]int64)
for _, cluster := range filter(clusters.Clusters, targetLabel, labelValue) {
labels := cluster.ResourceLabels
for _, nodePool := range cluster.NodePools {
restoreSize, ok := labels["restore-size-"+nodePool.Name]
if !ok {
continue
}
size, err := strconv.Atoi(restoreSize)
if err != nil {
return nil, errors.New("label: " + "restore-size-" + nodePool.Name + " value is not number format?")
}
for _, url := range nodePool.InstanceGroupUrls {
// u;rl is below format
// e.g. https://www.googleapis.com/compute/v1/projects/{ProjectID}/zones/us-central1-a/instanceGroupManagers/gke-standard-cluster-1-default-pool-1234abcd-grp
urlSplit := strings.Split(url, "/")
instanceGroupName := urlSplit[len(urlSplit)-1]
result[instanceGroupName] = int64(size)
}
}
}
return result, nil
}
// GetCurrentNodeCount returns map that key=NodePoolName and value=currentSize
func GetCurrentNodeCount(ctx context.Context, projectID, targetLabel string) (map[string]int64, error) {
s, err := container.NewService(ctx)
if err != nil {
return nil, err
}
computeService, err := compute.NewService(ctx)
| {
if r.error != nil {
return r
}
r.targetLabel = labelName
r.targetLabelValue = value
return r
} | identifier_body |
blob.go | TA_AOT = 0
/* OOP-JIT sub-types -- XOJIT type kept for external dependencies */
CS_LINKAGE_APPLICATION_XOJIT_PREVIEWS = 1
CS_LINKAGE_APPLICATION_OOPJIT_INVALID = 0
CS_LINKAGE_APPLICATION_OOPJIT_PREVIEWS = 1
CS_LINKAGE_APPLICATION_OOPJIT_MLCOMPILER = 2
CSTYPE_INDEX_REQUIREMENTS = 0x00000002 /* compat with amfi */
CSTYPE_INDEX_ENTITLEMENTS = 0x00000005 /* compat with amfi */
)
const (
/*
* Defined launch types
*/
CS_LAUNCH_TYPE_NONE = 0
CS_LAUNCH_TYPE_SYSTEM_SERVICE = 1
)
var NULL_PAGE_SHA256_HASH = []byte{
0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9,
0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b, 0x02,
0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a,
0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89, 0x2c, 0xa7,
}
type Magic uint32
const (
// Magic numbers used by Code Signing
MAGIC_REQUIREMENT Magic = 0xfade0c00 // single Requirement blob
MAGIC_REQUIREMENTS Magic = 0xfade0c01 // Requirements vector (internal requirements)
MAGIC_CODEDIRECTORY Magic = 0xfade0c02 // CodeDirectory blob
MAGIC_EMBEDDED_SIGNATURE Magic = 0xfade0cc0 // embedded form of signature data
MAGIC_EMBEDDED_SIGNATURE_OLD Magic = 0xfade0b02 /* XXX */
MAGIC_LIBRARY_DEPENDENCY_BLOB Magic = 0xfade0c05
MAGIC_EMBEDDED_ENTITLEMENTS Magic = 0xfade7171 /* embedded entitlements */
MAGIC_EMBEDDED_ENTITLEMENTS_DER Magic = 0xfade7172 /* embedded entitlements */
MAGIC_DETACHED_SIGNATURE Magic = 0xfade0cc1 // multi-arch collection of embedded signatures
MAGIC_BLOBWRAPPER Magic = 0xfade0b01 // used for the cms blob
MAGIC_EMBEDDED_LAUNCH_CONSTRAINT Magic = 0xfade8181 // Light weight code requirement
)
func (cm Magic) String() string {
switch cm {
case MAGIC_REQUIREMENT:
return "Requirement"
case MAGIC_REQUIREMENTS:
return "Requirements"
case MAGIC_CODEDIRECTORY:
return "Codedirectory"
case MAGIC_EMBEDDED_SIGNATURE:
return "Embedded Signature"
case MAGIC_EMBEDDED_SIGNATURE_OLD:
return "Embedded Signature (Old)"
case MAGIC_LIBRARY_DEPENDENCY_BLOB:
return "Library Dependency Blob"
case MAGIC_EMBEDDED_ENTITLEMENTS:
return "Embedded Entitlements"
case MAGIC_EMBEDDED_ENTITLEMENTS_DER:
return "Embedded Entitlements (DER)"
case MAGIC_DETACHED_SIGNATURE:
return "Detached Signature"
case MAGIC_BLOBWRAPPER:
return "Blob Wrapper"
case MAGIC_EMBEDDED_LAUNCH_CONSTRAINT:
return "Embedded Launch Constraint"
default:
return fmt.Sprintf("Magic(%#x)", uint32(cm))
}
}
type SbHeader struct {
Magic Magic `json:"magic,omitempty"` // magic number
Length uint32 `json:"length,omitempty"` // total length of SuperBlob
Count uint32 `json:"count,omitempty"` // number of index entries following
}
// SuperBlob object
type SuperBlob struct {
SbHeader
Index []BlobIndex // (count) entries
Blobs []Blob // followed by Blobs in no particular order as indicated by offsets in index
}
func NewSuperBlob(magic Magic) SuperBlob {
return SuperBlob{
SbHeader: SbHeader{
Magic: magic,
},
}
}
func (s *SuperBlob) AddBlob(typ SlotType, blob Blob) {
idx := BlobIndex{
Type: typ,
}
s.Index = append(s.Index, idx)
s.Blobs = append(s.Blobs, blob)
s.Count++
s.Length += uint32(binary.Size(BlobHeader{}.Magic)) + blob.Length + uint32(binary.Size(idx))
}
func (s *SuperBlob) GetBlob(typ SlotType) (Blob, error) {
for i, idx := range s.Index {
if idx.Type == typ {
return s.Blobs[i], nil
}
}
return Blob{}, fmt.Errorf("blob not found")
}
func (s *SuperBlob) Size() int {
sz := binary.Size(s.SbHeader) + binary.Size(BlobHeader{}) + binary.Size(s.Index)
for _, blob := range s.Blobs {
sz += binary.Size(blob.BlobHeader)
sz += len(blob.Data)
}
return sz
}
func (s *SuperBlob) Write(buf *bytes.Buffer, o binary.ByteOrder) error {
off := uint32(binary.Size(s.SbHeader) + binary.Size(s.Index))
for i := range s.Index {
s.Index[i].Offset = off
off += s.Blobs[i].Length
}
if err := binary.Write(buf, o, s.SbHeader); err != nil {
return fmt.Errorf("failed to write SuperBlob header to buffer: %v", err)
}
if err := binary.Write(buf, o, s.Index); err != nil {
return fmt.Errorf("failed to write SuperBlob indices to buffer: %v", err)
}
for _, blob := range s.Blobs {
if err := binary.Write(buf, o, blob.BlobHeader); err != nil {
return fmt.Errorf("failed to write blob header to superblob buffer: %v", err)
}
if err := binary.Write(buf, o, blob.Data); err != nil {
return fmt.Errorf("failed to write blob data to superblob buffer: %v", err)
}
}
return nil
}
type SlotType uint32
const (
CSSLOT_CODEDIRECTORY SlotType = 0
CSSLOT_INFOSLOT SlotType = 1 // Info.plist
CSSLOT_REQUIREMENTS SlotType = 2 // internal requirements
CSSLOT_RESOURCEDIR SlotType = 3 // resource directory
CSSLOT_APPLICATION SlotType = 4 // Application specific slot/Top-level directory list
CSSLOT_ENTITLEMENTS SlotType = 5 // embedded entitlement configuration
CSSLOT_REP_SPECIFIC SlotType = 6 // for use by disk images
CSSLOT_ENTITLEMENTS_DER SlotType = 7 // DER representation of entitlements plist
CSSLOT_LAUNCH_CONSTRAINT_SELF SlotType = 8
CSSLOT_LAUNCH_CONSTRAINT_PARENT SlotType = 9
CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE SlotType = 10
CSSLOT_LIBRARY_CONSTRAINT SlotType = 11
CSSLOT_ALTERNATE_CODEDIRECTORIES SlotType = 0x1000 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES1 SlotType = 0x1001 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES2 SlotType = 0x1002 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES3 SlotType = 0x1003 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES4 SlotType = 0x1004 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORY_MAX = 5
CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT = CSSLOT_ALTERNATE_CODEDIRECTORIES + CSSLOT_ALTERNATE_CODEDIRECTORY_MAX
CSSLOT_CMS_SIGNATURE SlotType = 0x10000 // CMS signature
CSSLOT_IDENTIFICATIONSLOT SlotType = 0x10001 // identification blob; used for detached signature
CSSLOT_TICKETSLOT SlotType = 0x10002 // Notarization ticket
)
func (c SlotType) String() string {
switch c {
case CSSLOT_CODEDIRECTORY:
return "CodeDirectory"
case CSSLOT_INFOSLOT:
return "Bound Info.plist"
case CSSLOT_REQUIREMENTS:
return "Requirements Blob"
case CSSLOT_RESOURCEDIR:
return "Resource Directory"
case CSSLOT_APPLICATION:
return "Application Specific"
case CSSLOT_ENTITLEMENTS:
return "Entitlements Plist"
case CSSLOT_REP_SPECIFIC:
return "DMG Specific"
case CSSLOT_ENTITLEMENTS_DER:
return "Entitlements ASN1/DER" | case CSSLOT_LAUNCH_CONSTRAINT_SELF:
return "Launch Constraint (self)"
case CSSLOT_LAUNCH_CONSTRAINT_PARENT: | random_line_split | |
blob.go | application types we support for linkage signatures */
CS_LINKAGE_APPLICATION_INVALID = 0
CS_LINKAGE_APPLICATION_ROSETTA = 1
/* XOJIT has been renamed to OOP-JIT */
CS_LINKAGE_APPLICATION_XOJIT = 2
CS_LINKAGE_APPLICATION_OOPJIT = 2
/* The set of application sub-types we support for linkage signatures */
/*
* For backwards compatibility with older signatures, the AOT sub-type is kept
* as 0.
*/
CS_LINKAGE_APPLICATION_ROSETTA_AOT = 0
/* OOP-JIT sub-types -- XOJIT type kept for external dependencies */
CS_LINKAGE_APPLICATION_XOJIT_PREVIEWS = 1
CS_LINKAGE_APPLICATION_OOPJIT_INVALID = 0
CS_LINKAGE_APPLICATION_OOPJIT_PREVIEWS = 1
CS_LINKAGE_APPLICATION_OOPJIT_MLCOMPILER = 2
CSTYPE_INDEX_REQUIREMENTS = 0x00000002 /* compat with amfi */
CSTYPE_INDEX_ENTITLEMENTS = 0x00000005 /* compat with amfi */
)
const (
/*
* Defined launch types
*/
CS_LAUNCH_TYPE_NONE = 0
CS_LAUNCH_TYPE_SYSTEM_SERVICE = 1
)
var NULL_PAGE_SHA256_HASH = []byte{
0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9,
0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b, 0x02,
0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a,
0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89, 0x2c, 0xa7,
}
type Magic uint32
const (
// Magic numbers used by Code Signing
MAGIC_REQUIREMENT Magic = 0xfade0c00 // single Requirement blob
MAGIC_REQUIREMENTS Magic = 0xfade0c01 // Requirements vector (internal requirements)
MAGIC_CODEDIRECTORY Magic = 0xfade0c02 // CodeDirectory blob
MAGIC_EMBEDDED_SIGNATURE Magic = 0xfade0cc0 // embedded form of signature data
MAGIC_EMBEDDED_SIGNATURE_OLD Magic = 0xfade0b02 /* XXX */
MAGIC_LIBRARY_DEPENDENCY_BLOB Magic = 0xfade0c05
MAGIC_EMBEDDED_ENTITLEMENTS Magic = 0xfade7171 /* embedded entitlements */
MAGIC_EMBEDDED_ENTITLEMENTS_DER Magic = 0xfade7172 /* embedded entitlements */
MAGIC_DETACHED_SIGNATURE Magic = 0xfade0cc1 // multi-arch collection of embedded signatures
MAGIC_BLOBWRAPPER Magic = 0xfade0b01 // used for the cms blob
MAGIC_EMBEDDED_LAUNCH_CONSTRAINT Magic = 0xfade8181 // Light weight code requirement
)
func (cm Magic) String() string {
switch cm {
case MAGIC_REQUIREMENT:
return "Requirement"
case MAGIC_REQUIREMENTS:
return "Requirements"
case MAGIC_CODEDIRECTORY:
return "Codedirectory"
case MAGIC_EMBEDDED_SIGNATURE:
return "Embedded Signature"
case MAGIC_EMBEDDED_SIGNATURE_OLD:
return "Embedded Signature (Old)"
case MAGIC_LIBRARY_DEPENDENCY_BLOB:
return "Library Dependency Blob"
case MAGIC_EMBEDDED_ENTITLEMENTS:
return "Embedded Entitlements"
case MAGIC_EMBEDDED_ENTITLEMENTS_DER:
return "Embedded Entitlements (DER)"
case MAGIC_DETACHED_SIGNATURE:
return "Detached Signature"
case MAGIC_BLOBWRAPPER:
return "Blob Wrapper"
case MAGIC_EMBEDDED_LAUNCH_CONSTRAINT:
return "Embedded Launch Constraint"
default:
return fmt.Sprintf("Magic(%#x)", uint32(cm))
}
}
type SbHeader struct {
Magic Magic `json:"magic,omitempty"` // magic number
Length uint32 `json:"length,omitempty"` // total length of SuperBlob
Count uint32 `json:"count,omitempty"` // number of index entries following
}
// SuperBlob object
type SuperBlob struct {
SbHeader
Index []BlobIndex // (count) entries
Blobs []Blob // followed by Blobs in no particular order as indicated by offsets in index
}
func NewSuperBlob(magic Magic) SuperBlob {
return SuperBlob{
SbHeader: SbHeader{
Magic: magic,
},
}
}
func (s *SuperBlob) AddBlob(typ SlotType, blob Blob) {
idx := BlobIndex{
Type: typ,
}
s.Index = append(s.Index, idx)
s.Blobs = append(s.Blobs, blob)
s.Count++
s.Length += uint32(binary.Size(BlobHeader{}.Magic)) + blob.Length + uint32(binary.Size(idx))
}
func (s *SuperBlob) GetBlob(typ SlotType) (Blob, error) {
for i, idx := range s.Index {
if idx.Type == typ {
return s.Blobs[i], nil
}
}
return Blob{}, fmt.Errorf("blob not found")
}
func (s *SuperBlob) Size() int {
sz := binary.Size(s.SbHeader) + binary.Size(BlobHeader{}) + binary.Size(s.Index)
for _, blob := range s.Blobs {
sz += binary.Size(blob.BlobHeader)
sz += len(blob.Data)
}
return sz
}
func (s *SuperBlob) Write(buf *bytes.Buffer, o binary.ByteOrder) error {
off := uint32(binary.Size(s.SbHeader) + binary.Size(s.Index))
for i := range s.Index {
s.Index[i].Offset = off
off += s.Blobs[i].Length
}
if err := binary.Write(buf, o, s.SbHeader); err != nil |
if err := binary.Write(buf, o, s.Index); err != nil {
return fmt.Errorf("failed to write SuperBlob indices to buffer: %v", err)
}
for _, blob := range s.Blobs {
if err := binary.Write(buf, o, blob.BlobHeader); err != nil {
return fmt.Errorf("failed to write blob header to superblob buffer: %v", err)
}
if err := binary.Write(buf, o, blob.Data); err != nil {
return fmt.Errorf("failed to write blob data to superblob buffer: %v", err)
}
}
return nil
}
type SlotType uint32
const (
CSSLOT_CODEDIRECTORY SlotType = 0
CSSLOT_INFOSLOT SlotType = 1 // Info.plist
CSSLOT_REQUIREMENTS SlotType = 2 // internal requirements
CSSLOT_RESOURCEDIR SlotType = 3 // resource directory
CSSLOT_APPLICATION SlotType = 4 // Application specific slot/Top-level directory list
CSSLOT_ENTITLEMENTS SlotType = 5 // embedded entitlement configuration
CSSLOT_REP_SPECIFIC SlotType = 6 // for use by disk images
CSSLOT_ENTITLEMENTS_DER SlotType = 7 // DER representation of entitlements plist
CSSLOT_LAUNCH_CONSTRAINT_SELF SlotType = 8
CSSLOT_LAUNCH_CONSTRAINT_PARENT SlotType = 9
CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE SlotType = 10
CSSLOT_LIBRARY_CONSTRAINT SlotType = 11
CSSLOT_ALTERNATE_CODEDIRECTORIES SlotType = 0x1000 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES1 SlotType = 0x1001 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES2 SlotType = 0x1002 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES3 SlotType = 0x1003 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES4 SlotType = 0x1004 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORY_MAX = 5
CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT = CSSLOT_ALTERNATE_CODEDIRECTORIES + CSSLOT_ALTERNATE_CODEDIRECTORY_MAX
CSSLOT_CMS_SIGNATURE SlotType = 0x10000 // CMS signature
CSSLOT_IDENTIFICATIONSLOT SlotType = 0x10001 // identification blob; used for detached signature
CSSLOT_TICKETSLOT SlotType = 0x10002 // Notarization ticket
)
func (c SlotType) String() string {
switch c {
case CSSLOT_CODEDIRECTORY:
return "CodeDirectory"
case CSSLOT_INFOSLOT | {
return fmt.Errorf("failed to write SuperBlob header to buffer: %v", err)
} | conditional_block |
blob.go | _DEPENDENCY_BLOB Magic = 0xfade0c05
MAGIC_EMBEDDED_ENTITLEMENTS Magic = 0xfade7171 /* embedded entitlements */
MAGIC_EMBEDDED_ENTITLEMENTS_DER Magic = 0xfade7172 /* embedded entitlements */
MAGIC_DETACHED_SIGNATURE Magic = 0xfade0cc1 // multi-arch collection of embedded signatures
MAGIC_BLOBWRAPPER Magic = 0xfade0b01 // used for the cms blob
MAGIC_EMBEDDED_LAUNCH_CONSTRAINT Magic = 0xfade8181 // Light weight code requirement
)
func (cm Magic) String() string {
switch cm {
case MAGIC_REQUIREMENT:
return "Requirement"
case MAGIC_REQUIREMENTS:
return "Requirements"
case MAGIC_CODEDIRECTORY:
return "Codedirectory"
case MAGIC_EMBEDDED_SIGNATURE:
return "Embedded Signature"
case MAGIC_EMBEDDED_SIGNATURE_OLD:
return "Embedded Signature (Old)"
case MAGIC_LIBRARY_DEPENDENCY_BLOB:
return "Library Dependency Blob"
case MAGIC_EMBEDDED_ENTITLEMENTS:
return "Embedded Entitlements"
case MAGIC_EMBEDDED_ENTITLEMENTS_DER:
return "Embedded Entitlements (DER)"
case MAGIC_DETACHED_SIGNATURE:
return "Detached Signature"
case MAGIC_BLOBWRAPPER:
return "Blob Wrapper"
case MAGIC_EMBEDDED_LAUNCH_CONSTRAINT:
return "Embedded Launch Constraint"
default:
return fmt.Sprintf("Magic(%#x)", uint32(cm))
}
}
type SbHeader struct {
Magic Magic `json:"magic,omitempty"` // magic number
Length uint32 `json:"length,omitempty"` // total length of SuperBlob
Count uint32 `json:"count,omitempty"` // number of index entries following
}
// SuperBlob object
type SuperBlob struct {
SbHeader
Index []BlobIndex // (count) entries
Blobs []Blob // followed by Blobs in no particular order as indicated by offsets in index
}
func NewSuperBlob(magic Magic) SuperBlob {
return SuperBlob{
SbHeader: SbHeader{
Magic: magic,
},
}
}
func (s *SuperBlob) AddBlob(typ SlotType, blob Blob) {
idx := BlobIndex{
Type: typ,
}
s.Index = append(s.Index, idx)
s.Blobs = append(s.Blobs, blob)
s.Count++
s.Length += uint32(binary.Size(BlobHeader{}.Magic)) + blob.Length + uint32(binary.Size(idx))
}
func (s *SuperBlob) GetBlob(typ SlotType) (Blob, error) {
for i, idx := range s.Index {
if idx.Type == typ {
return s.Blobs[i], nil
}
}
return Blob{}, fmt.Errorf("blob not found")
}
func (s *SuperBlob) Size() int {
sz := binary.Size(s.SbHeader) + binary.Size(BlobHeader{}) + binary.Size(s.Index)
for _, blob := range s.Blobs {
sz += binary.Size(blob.BlobHeader)
sz += len(blob.Data)
}
return sz
}
func (s *SuperBlob) Write(buf *bytes.Buffer, o binary.ByteOrder) error {
off := uint32(binary.Size(s.SbHeader) + binary.Size(s.Index))
for i := range s.Index {
s.Index[i].Offset = off
off += s.Blobs[i].Length
}
if err := binary.Write(buf, o, s.SbHeader); err != nil {
return fmt.Errorf("failed to write SuperBlob header to buffer: %v", err)
}
if err := binary.Write(buf, o, s.Index); err != nil {
return fmt.Errorf("failed to write SuperBlob indices to buffer: %v", err)
}
for _, blob := range s.Blobs {
if err := binary.Write(buf, o, blob.BlobHeader); err != nil {
return fmt.Errorf("failed to write blob header to superblob buffer: %v", err)
}
if err := binary.Write(buf, o, blob.Data); err != nil {
return fmt.Errorf("failed to write blob data to superblob buffer: %v", err)
}
}
return nil
}
type SlotType uint32
const (
CSSLOT_CODEDIRECTORY SlotType = 0
CSSLOT_INFOSLOT SlotType = 1 // Info.plist
CSSLOT_REQUIREMENTS SlotType = 2 // internal requirements
CSSLOT_RESOURCEDIR SlotType = 3 // resource directory
CSSLOT_APPLICATION SlotType = 4 // Application specific slot/Top-level directory list
CSSLOT_ENTITLEMENTS SlotType = 5 // embedded entitlement configuration
CSSLOT_REP_SPECIFIC SlotType = 6 // for use by disk images
CSSLOT_ENTITLEMENTS_DER SlotType = 7 // DER representation of entitlements plist
CSSLOT_LAUNCH_CONSTRAINT_SELF SlotType = 8
CSSLOT_LAUNCH_CONSTRAINT_PARENT SlotType = 9
CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE SlotType = 10
CSSLOT_LIBRARY_CONSTRAINT SlotType = 11
CSSLOT_ALTERNATE_CODEDIRECTORIES SlotType = 0x1000 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES1 SlotType = 0x1001 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES2 SlotType = 0x1002 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES3 SlotType = 0x1003 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES4 SlotType = 0x1004 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORY_MAX = 5
CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT = CSSLOT_ALTERNATE_CODEDIRECTORIES + CSSLOT_ALTERNATE_CODEDIRECTORY_MAX
CSSLOT_CMS_SIGNATURE SlotType = 0x10000 // CMS signature
CSSLOT_IDENTIFICATIONSLOT SlotType = 0x10001 // identification blob; used for detached signature
CSSLOT_TICKETSLOT SlotType = 0x10002 // Notarization ticket
)
func (c SlotType) String() string {
switch c {
case CSSLOT_CODEDIRECTORY:
return "CodeDirectory"
case CSSLOT_INFOSLOT:
return "Bound Info.plist"
case CSSLOT_REQUIREMENTS:
return "Requirements Blob"
case CSSLOT_RESOURCEDIR:
return "Resource Directory"
case CSSLOT_APPLICATION:
return "Application Specific"
case CSSLOT_ENTITLEMENTS:
return "Entitlements Plist"
case CSSLOT_REP_SPECIFIC:
return "DMG Specific"
case CSSLOT_ENTITLEMENTS_DER:
return "Entitlements ASN1/DER"
case CSSLOT_LAUNCH_CONSTRAINT_SELF:
return "Launch Constraint (self)"
case CSSLOT_LAUNCH_CONSTRAINT_PARENT:
return "Launch Constraint (parent)"
case CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE:
return "Launch Constraint (responsible proc)"
case CSSLOT_LIBRARY_CONSTRAINT:
return "Library Constraint"
case CSSLOT_ALTERNATE_CODEDIRECTORIES:
return "Alternate CodeDirectories 0"
case CSSLOT_ALTERNATE_CODEDIRECTORIES1:
return "Alternate CodeDirectories 1"
case CSSLOT_ALTERNATE_CODEDIRECTORIES2:
return "Alternate CodeDirectories 2"
case CSSLOT_ALTERNATE_CODEDIRECTORIES3:
return "Alternate CodeDirectories 3"
case CSSLOT_ALTERNATE_CODEDIRECTORIES4:
return "Alternate CodeDirectories 4"
case CSSLOT_CMS_SIGNATURE:
return "CMS (RFC3852) signature"
case CSSLOT_IDENTIFICATIONSLOT:
return "IdentificationSlot"
case CSSLOT_TICKETSLOT:
return "TicketSlot"
default:
return fmt.Sprintf("Unknown SlotType: %d", c)
}
}
// BlobIndex object
type BlobIndex struct {
Type SlotType `json:"type,omitempty"` // type of entry
Offset uint32 `json:"offset,omitempty"` // offset of entry
}
type BlobHeader struct {
Magic Magic `json:"magic,omitempty"` // magic number
Length uint32 `json:"length,omitempty"` // total length of blob
}
// Blob object
type Blob struct {
BlobHeader
Data []byte // (length - sizeof(blob_header)) bytes
}
func NewBlob(magic Magic, data []byte) Blob {
return Blob{
BlobHeader: BlobHeader{
Magic: magic,
Length: uint32(binary.Size(BlobHeader{}) + len(data)),
},
Data: data,
}
}
func (b Blob) Sha256Hash() ([]byte, error) | {
h := sha256.New()
if err := binary.Write(h, binary.BigEndian, b.BlobHeader); err != nil {
return nil, fmt.Errorf("failed to hash blob header: %v", err)
}
if err := binary.Write(h, binary.BigEndian, b.Data); err != nil {
return nil, fmt.Errorf("failed to hash blob header: %v", err)
}
return h.Sum(nil), nil
} | identifier_body | |
blob.go | 0xfade0c00 // single Requirement blob
MAGIC_REQUIREMENTS Magic = 0xfade0c01 // Requirements vector (internal requirements)
MAGIC_CODEDIRECTORY Magic = 0xfade0c02 // CodeDirectory blob
MAGIC_EMBEDDED_SIGNATURE Magic = 0xfade0cc0 // embedded form of signature data
MAGIC_EMBEDDED_SIGNATURE_OLD Magic = 0xfade0b02 /* XXX */
MAGIC_LIBRARY_DEPENDENCY_BLOB Magic = 0xfade0c05
MAGIC_EMBEDDED_ENTITLEMENTS Magic = 0xfade7171 /* embedded entitlements */
MAGIC_EMBEDDED_ENTITLEMENTS_DER Magic = 0xfade7172 /* embedded entitlements */
MAGIC_DETACHED_SIGNATURE Magic = 0xfade0cc1 // multi-arch collection of embedded signatures
MAGIC_BLOBWRAPPER Magic = 0xfade0b01 // used for the cms blob
MAGIC_EMBEDDED_LAUNCH_CONSTRAINT Magic = 0xfade8181 // Light weight code requirement
)
func (cm Magic) String() string {
switch cm {
case MAGIC_REQUIREMENT:
return "Requirement"
case MAGIC_REQUIREMENTS:
return "Requirements"
case MAGIC_CODEDIRECTORY:
return "Codedirectory"
case MAGIC_EMBEDDED_SIGNATURE:
return "Embedded Signature"
case MAGIC_EMBEDDED_SIGNATURE_OLD:
return "Embedded Signature (Old)"
case MAGIC_LIBRARY_DEPENDENCY_BLOB:
return "Library Dependency Blob"
case MAGIC_EMBEDDED_ENTITLEMENTS:
return "Embedded Entitlements"
case MAGIC_EMBEDDED_ENTITLEMENTS_DER:
return "Embedded Entitlements (DER)"
case MAGIC_DETACHED_SIGNATURE:
return "Detached Signature"
case MAGIC_BLOBWRAPPER:
return "Blob Wrapper"
case MAGIC_EMBEDDED_LAUNCH_CONSTRAINT:
return "Embedded Launch Constraint"
default:
return fmt.Sprintf("Magic(%#x)", uint32(cm))
}
}
type SbHeader struct {
Magic Magic `json:"magic,omitempty"` // magic number
Length uint32 `json:"length,omitempty"` // total length of SuperBlob
Count uint32 `json:"count,omitempty"` // number of index entries following
}
// SuperBlob object
type SuperBlob struct {
SbHeader
Index []BlobIndex // (count) entries
Blobs []Blob // followed by Blobs in no particular order as indicated by offsets in index
}
func NewSuperBlob(magic Magic) SuperBlob {
return SuperBlob{
SbHeader: SbHeader{
Magic: magic,
},
}
}
func (s *SuperBlob) AddBlob(typ SlotType, blob Blob) {
idx := BlobIndex{
Type: typ,
}
s.Index = append(s.Index, idx)
s.Blobs = append(s.Blobs, blob)
s.Count++
s.Length += uint32(binary.Size(BlobHeader{}.Magic)) + blob.Length + uint32(binary.Size(idx))
}
func (s *SuperBlob) GetBlob(typ SlotType) (Blob, error) {
for i, idx := range s.Index {
if idx.Type == typ {
return s.Blobs[i], nil
}
}
return Blob{}, fmt.Errorf("blob not found")
}
func (s *SuperBlob) Size() int {
sz := binary.Size(s.SbHeader) + binary.Size(BlobHeader{}) + binary.Size(s.Index)
for _, blob := range s.Blobs {
sz += binary.Size(blob.BlobHeader)
sz += len(blob.Data)
}
return sz
}
func (s *SuperBlob) Write(buf *bytes.Buffer, o binary.ByteOrder) error {
off := uint32(binary.Size(s.SbHeader) + binary.Size(s.Index))
for i := range s.Index {
s.Index[i].Offset = off
off += s.Blobs[i].Length
}
if err := binary.Write(buf, o, s.SbHeader); err != nil {
return fmt.Errorf("failed to write SuperBlob header to buffer: %v", err)
}
if err := binary.Write(buf, o, s.Index); err != nil {
return fmt.Errorf("failed to write SuperBlob indices to buffer: %v", err)
}
for _, blob := range s.Blobs {
if err := binary.Write(buf, o, blob.BlobHeader); err != nil {
return fmt.Errorf("failed to write blob header to superblob buffer: %v", err)
}
if err := binary.Write(buf, o, blob.Data); err != nil {
return fmt.Errorf("failed to write blob data to superblob buffer: %v", err)
}
}
return nil
}
type SlotType uint32
const (
CSSLOT_CODEDIRECTORY SlotType = 0
CSSLOT_INFOSLOT SlotType = 1 // Info.plist
CSSLOT_REQUIREMENTS SlotType = 2 // internal requirements
CSSLOT_RESOURCEDIR SlotType = 3 // resource directory
CSSLOT_APPLICATION SlotType = 4 // Application specific slot/Top-level directory list
CSSLOT_ENTITLEMENTS SlotType = 5 // embedded entitlement configuration
CSSLOT_REP_SPECIFIC SlotType = 6 // for use by disk images
CSSLOT_ENTITLEMENTS_DER SlotType = 7 // DER representation of entitlements plist
CSSLOT_LAUNCH_CONSTRAINT_SELF SlotType = 8
CSSLOT_LAUNCH_CONSTRAINT_PARENT SlotType = 9
CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE SlotType = 10
CSSLOT_LIBRARY_CONSTRAINT SlotType = 11
CSSLOT_ALTERNATE_CODEDIRECTORIES SlotType = 0x1000 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES1 SlotType = 0x1001 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES2 SlotType = 0x1002 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES3 SlotType = 0x1003 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES4 SlotType = 0x1004 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORY_MAX = 5
CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT = CSSLOT_ALTERNATE_CODEDIRECTORIES + CSSLOT_ALTERNATE_CODEDIRECTORY_MAX
CSSLOT_CMS_SIGNATURE SlotType = 0x10000 // CMS signature
CSSLOT_IDENTIFICATIONSLOT SlotType = 0x10001 // identification blob; used for detached signature
CSSLOT_TICKETSLOT SlotType = 0x10002 // Notarization ticket
)
func (c SlotType) String() string {
switch c {
case CSSLOT_CODEDIRECTORY:
return "CodeDirectory"
case CSSLOT_INFOSLOT:
return "Bound Info.plist"
case CSSLOT_REQUIREMENTS:
return "Requirements Blob"
case CSSLOT_RESOURCEDIR:
return "Resource Directory"
case CSSLOT_APPLICATION:
return "Application Specific"
case CSSLOT_ENTITLEMENTS:
return "Entitlements Plist"
case CSSLOT_REP_SPECIFIC:
return "DMG Specific"
case CSSLOT_ENTITLEMENTS_DER:
return "Entitlements ASN1/DER"
case CSSLOT_LAUNCH_CONSTRAINT_SELF:
return "Launch Constraint (self)"
case CSSLOT_LAUNCH_CONSTRAINT_PARENT:
return "Launch Constraint (parent)"
case CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE:
return "Launch Constraint (responsible proc)"
case CSSLOT_LIBRARY_CONSTRAINT:
return "Library Constraint"
case CSSLOT_ALTERNATE_CODEDIRECTORIES:
return "Alternate CodeDirectories 0"
case CSSLOT_ALTERNATE_CODEDIRECTORIES1:
return "Alternate CodeDirectories 1"
case CSSLOT_ALTERNATE_CODEDIRECTORIES2:
return "Alternate CodeDirectories 2"
case CSSLOT_ALTERNATE_CODEDIRECTORIES3:
return "Alternate CodeDirectories 3"
case CSSLOT_ALTERNATE_CODEDIRECTORIES4:
return "Alternate CodeDirectories 4"
case CSSLOT_CMS_SIGNATURE:
return "CMS (RFC3852) signature"
case CSSLOT_IDENTIFICATIONSLOT:
return "IdentificationSlot"
case CSSLOT_TICKETSLOT:
return "TicketSlot"
default:
return fmt.Sprintf("Unknown SlotType: %d", c)
}
}
// BlobIndex object
type BlobIndex struct {
Type SlotType `json:"type,omitempty"` // type of entry
Offset uint32 `json:"offset,omitempty"` // offset of entry
}
type BlobHeader struct {
Magic Magic `json:"magic,omitempty"` // magic number
Length uint32 `json:"length,omitempty"` // total length of blob
}
// Blob object
type Blob struct {
BlobHeader
Data []byte // (length - sizeof(blob_header)) bytes
}
func NewBlob(magic Magic, data []byte) Blob {
return Blob{
BlobHeader: BlobHeader{
Magic: magic,
Length: uint32(binary.Size(BlobHeader{}) + len(data)),
},
Data: data,
}
}
func (b Blob) | Sha256Hash | identifier_name | |
storage.go | sql.DB
func executeStatements() {
for {
if qa, ok := <-chQueryArgs; ok {
_, err := db.Exec(qa.query, qa.args...)
if err != nil {
log.Fatal(err)
}
}
}
}
//NewStorage creates returns a new Storage object.
//DBName is the name of the sqllite database file where
//all the users and tweets data will be collected. NewStorage
//create the sqlite file, if it is not already present and creates
//the tables. if the database is present, opens a connection.
func NewStorage(DBName string) *Storage {
s := &Storage{}
mutex.Lock()
if db == nil {
s.checkMakeDatabase(DBName)
db = s.db
if chQueryArgs == nil {
chQueryArgs = make(chan *queryArgs, 100)
go executeStatements()
}
s.setupTables()
}
mutex.Unlock()
return s
}
func (s *Storage) setupTables() {
tableName := "users"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s (
user_id INTEGER PRIMARY KEY,
screen_name TEXT CONSTRAINT uniquescreenname UNIQUE,
description TEXT CONSTRAINT defaultdesc DEFAULT "",
last_looked_at INTEGER CONSTRAINT defaultlastlookedat DEFAULT 0,
latest_tweet_id INTEGER CONSTRAINT defaultlatesttweetid DEFAULT 0,
latest_following_id INTEGER CONSTRAINT defaultlatestfollowingid DEFAULT 0,
latest_follower_id INTEGER CONSTRAINT defaultlatestfollowerid DEFAULT 0,
protected INTEGER CONSTRAINT defaultprotected DEFAULT 0,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0,
accepted INTEGER CONSTRAINT defaultaccepted DEFAULT 0,
blob BLOB)`, tableName))
tableName = "tweets"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(tweet_id INTEGER PRIMARY KEY,
created_at INTEGER,
langugage TEXT,
user_id INTEGER,
desc TEXT,
blob BLOB
-- FOREIGN KEY(screen_name) REFERENCES users(screen_name)
)`, tableName))
tableName = "screennames"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(screen_name TEXT PRIMARY KEY,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0)`, tableName))
tableName = "userids"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER PRIMARY KEY,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0)`, tableName))
tableName = "followers"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER,
follower_id INTEGER,
CONSTRAINT uniquemap UNIQUE (user_id, follower_id))`, tableName))
tableName = "following"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER,
following_id INTEGER,
CONSTRAINT uniquemap UNIQUE (user_id, following_id))`, tableName))
}
func (s *Storage) checkMakeDatabase(DBName string) *sql.DB {
var db *sql.DB
db, err := sql.Open("sqlite3", DBName+".db") //?cache=shared&mode=rwc")
if err != nil {
log.Fatal(err)
}
db.Exec("PRAGMA journal_mode=WAL;")
s.db = db
return db
}
func (s *Storage) makeTable(tableName, sqlStmt string) {
_, err := s.db.Exec(sqlStmt)
if err != nil |
}
//StoreScreenName inserts the given screenName into the `screenames` table
func (s *Storage) StoreScreenName(screenName string) {
_, err := s.db.Exec("INSERT OR IGNORE INTO screennames (screen_name) VALUES (?)", screenName)
if err != nil {
log.Fatal(err)
}
}
//StoreUser inserts the Twitter user details into the `users` table.
func (s *Storage) StoreUser(userID int64, screenName, description string, protected bool, blob []byte) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO users (user_id, screen_name, description, protected, blob) VALUES (?, ?, ?, ?, ?)",
[]interface{}{userID, screenName, description, protected, blob}}
}
//StoreTweet inserts the tweet details into the `tweets` table.
func (s *Storage) StoreTweet(tweetID, createdAt, userID int64, language, desc string, blob []byte) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO tweets (tweet_id, created_at, langugage, user_id, desc, blob) VALUES (?, ?, ?, ?, ?, ?)",
[]interface{}{tweetID, createdAt, language, userID, desc, blob}}
}
func (s *Storage) storeFriendOrFollower(userID, friendOrFollowerID int64, query string) {
chQueryArgs <- &queryArgs{query, []interface{}{userID, friendOrFollowerID}}
}
//StoreFriends stores the mapping between the userID and the IDs of
//users the follow into the `following` table.
func (s *Storage) StoreFriends(userID int64, friendIDs []int64) {
for _, friendID := range friendIDs {
s.storeFriendOrFollower(userID, friendID, "INSERT OR IGNORE INTO following (user_id, following_id) VALUES (?, ?)")
}
}
//StoreFollowers stores the mapping between the userID and the IDs of
//their followes into the `followers` table.
func (s *Storage) StoreFollowers(userID int64, followerIDs []int64) {
for _, followerID := range followerIDs {
s.storeFriendOrFollower(userID, followerID, "INSERT OR IGNORE INTO followers (user_id, follower_id) VALUES (?, ?)")
}
}
func (s *Storage) storeUserID(userID int64) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO userids (user_id) VALUES (?)", []interface{}{userID}}
}
//StoreUserIDs stores the given userIDs in the `userids` table
func (s *Storage) StoreUserIDs(userIDs []int64) {
for _, userID := range userIDs {
s.storeUserID(userID)
}
}
func (s *Storage) queryScreenNamesOrIDs(query string, results interface{}) {
rows, err := s.db.Query(query)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
switch x := results.(type) {
case *[]string:
var item string
rows.Scan(&item)
*x = append(*x, item)
case *[]int64:
var item int64
rows.Scan(&item)
*x = append(*x, item)
default:
log.Fatal("results type must be *[]string or *[]int64")
}
}
}
//GetScreenNames gets Twitter handles from the `screenames` table that have already been processed
func (s *Storage) GetScreenNames() []string {
var results []string
s.queryScreenNamesOrIDs("SELECT screen_name from screennames where processed=1", &results)
return results
}
//GetUnprocessedScreenNames gets Twitter handles from the `screenames` table that are yet to be processed
func (s *Storage) GetUnprocessedScreenNames() []string {
var results []string
s.queryScreenNamesOrIDs("SELECT screen_name from screennames where processed=0", &results)
return results
}
//GetUserIDs gets user ids from the `userids` table that have already been processed
func (s *Storage) GetUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from userids where processed=1", &results)
return results
}
//GetUnprocessedUserIDs gets user ids from the `userids` table that are yet to be processed
func (s *Storage) GetUnprocessedUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from userids where processed=0", &results)
return results
}
//GetAcceptedUserIDs gets user ids from the `users` table for whom the user filtering
//function has marked them as accepted for further processing
func (s *Storage) GetAcceptedUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from users where accepted=1", &results)
return results
}
//GetUserByScreenNameOrID gets the UserRow for the given screenName or ID
func (s *Storage) GetUserByScreenNameOrID(screenNameOrID interface{}) *UserRow {
var u UserRow
query := `SELECT user_id,
screen_name,
description,
last_looked_at,
latest_tweet_id,
latest_following_id,
latest_follower_id,
protected,
processed,
accepted,
blob
FROM users
WHERE %s=?`
var row *sql.Row
switch x := screenNameOrID.(type) {
case int64:
query = fmt.Sprintf(query, "user_id")
row = s.db.QueryRow(query, x)
case string:
query = fmt.Sprintf(query, "screen_name")
row = s.db.QueryRow(query, x)
}
err := row.Scan(
&u.ID,
&u.ScreenName,
&u.Description,
&u.LastLook | {
log.Fatalf("%q: %s\n", err, sqlStmt)
return
} | conditional_block |
storage.go | sql.DB
func executeStatements() {
for {
if qa, ok := <-chQueryArgs; ok {
_, err := db.Exec(qa.query, qa.args...)
if err != nil {
log.Fatal(err)
}
}
}
}
//NewStorage creates returns a new Storage object.
//DBName is the name of the sqllite database file where
//all the users and tweets data will be collected. NewStorage
//create the sqlite file, if it is not already present and creates
//the tables. if the database is present, opens a connection.
func NewStorage(DBName string) *Storage {
s := &Storage{}
mutex.Lock()
if db == nil {
s.checkMakeDatabase(DBName)
db = s.db
if chQueryArgs == nil {
chQueryArgs = make(chan *queryArgs, 100)
go executeStatements()
}
| }
mutex.Unlock()
return s
}
func (s *Storage) setupTables() {
tableName := "users"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s (
user_id INTEGER PRIMARY KEY,
screen_name TEXT CONSTRAINT uniquescreenname UNIQUE,
description TEXT CONSTRAINT defaultdesc DEFAULT "",
last_looked_at INTEGER CONSTRAINT defaultlastlookedat DEFAULT 0,
latest_tweet_id INTEGER CONSTRAINT defaultlatesttweetid DEFAULT 0,
latest_following_id INTEGER CONSTRAINT defaultlatestfollowingid DEFAULT 0,
latest_follower_id INTEGER CONSTRAINT defaultlatestfollowerid DEFAULT 0,
protected INTEGER CONSTRAINT defaultprotected DEFAULT 0,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0,
accepted INTEGER CONSTRAINT defaultaccepted DEFAULT 0,
blob BLOB)`, tableName))
tableName = "tweets"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(tweet_id INTEGER PRIMARY KEY,
created_at INTEGER,
langugage TEXT,
user_id INTEGER,
desc TEXT,
blob BLOB
-- FOREIGN KEY(screen_name) REFERENCES users(screen_name)
)`, tableName))
tableName = "screennames"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(screen_name TEXT PRIMARY KEY,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0)`, tableName))
tableName = "userids"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER PRIMARY KEY,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0)`, tableName))
tableName = "followers"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER,
follower_id INTEGER,
CONSTRAINT uniquemap UNIQUE (user_id, follower_id))`, tableName))
tableName = "following"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER,
following_id INTEGER,
CONSTRAINT uniquemap UNIQUE (user_id, following_id))`, tableName))
}
func (s *Storage) checkMakeDatabase(DBName string) *sql.DB {
var db *sql.DB
db, err := sql.Open("sqlite3", DBName+".db") //?cache=shared&mode=rwc")
if err != nil {
log.Fatal(err)
}
db.Exec("PRAGMA journal_mode=WAL;")
s.db = db
return db
}
func (s *Storage) makeTable(tableName, sqlStmt string) {
_, err := s.db.Exec(sqlStmt)
if err != nil {
log.Fatalf("%q: %s\n", err, sqlStmt)
return
}
}
//StoreScreenName inserts the given screenName into the `screenames` table
func (s *Storage) StoreScreenName(screenName string) {
_, err := s.db.Exec("INSERT OR IGNORE INTO screennames (screen_name) VALUES (?)", screenName)
if err != nil {
log.Fatal(err)
}
}
//StoreUser inserts the Twitter user details into the `users` table.
func (s *Storage) StoreUser(userID int64, screenName, description string, protected bool, blob []byte) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO users (user_id, screen_name, description, protected, blob) VALUES (?, ?, ?, ?, ?)",
[]interface{}{userID, screenName, description, protected, blob}}
}
//StoreTweet inserts the tweet details into the `tweets` table.
func (s *Storage) StoreTweet(tweetID, createdAt, userID int64, language, desc string, blob []byte) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO tweets (tweet_id, created_at, langugage, user_id, desc, blob) VALUES (?, ?, ?, ?, ?, ?)",
[]interface{}{tweetID, createdAt, language, userID, desc, blob}}
}
func (s *Storage) storeFriendOrFollower(userID, friendOrFollowerID int64, query string) {
chQueryArgs <- &queryArgs{query, []interface{}{userID, friendOrFollowerID}}
}
//StoreFriends stores the mapping between the userID and the IDs of
//users the follow into the `following` table.
func (s *Storage) StoreFriends(userID int64, friendIDs []int64) {
for _, friendID := range friendIDs {
s.storeFriendOrFollower(userID, friendID, "INSERT OR IGNORE INTO following (user_id, following_id) VALUES (?, ?)")
}
}
//StoreFollowers stores the mapping between the userID and the IDs of
//their followes into the `followers` table.
func (s *Storage) StoreFollowers(userID int64, followerIDs []int64) {
for _, followerID := range followerIDs {
s.storeFriendOrFollower(userID, followerID, "INSERT OR IGNORE INTO followers (user_id, follower_id) VALUES (?, ?)")
}
}
func (s *Storage) storeUserID(userID int64) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO userids (user_id) VALUES (?)", []interface{}{userID}}
}
//StoreUserIDs stores the given userIDs in the `userids` table
func (s *Storage) StoreUserIDs(userIDs []int64) {
for _, userID := range userIDs {
s.storeUserID(userID)
}
}
func (s *Storage) queryScreenNamesOrIDs(query string, results interface{}) {
rows, err := s.db.Query(query)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
switch x := results.(type) {
case *[]string:
var item string
rows.Scan(&item)
*x = append(*x, item)
case *[]int64:
var item int64
rows.Scan(&item)
*x = append(*x, item)
default:
log.Fatal("results type must be *[]string or *[]int64")
}
}
}
//GetScreenNames gets Twitter handles from the `screenames` table that have already been processed
func (s *Storage) GetScreenNames() []string {
var results []string
s.queryScreenNamesOrIDs("SELECT screen_name from screennames where processed=1", &results)
return results
}
//GetUnprocessedScreenNames gets Twitter handles from the `screenames` table that are yet to be processed
func (s *Storage) GetUnprocessedScreenNames() []string {
var results []string
s.queryScreenNamesOrIDs("SELECT screen_name from screennames where processed=0", &results)
return results
}
//GetUserIDs gets user ids from the `userids` table that have already been processed
func (s *Storage) GetUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from userids where processed=1", &results)
return results
}
//GetUnprocessedUserIDs gets user ids from the `userids` table that are yet to be processed
func (s *Storage) GetUnprocessedUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from userids where processed=0", &results)
return results
}
//GetAcceptedUserIDs gets user ids from the `users` table for whom the user filtering
//function has marked them as accepted for further processing
func (s *Storage) GetAcceptedUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from users where accepted=1", &results)
return results
}
//GetUserByScreenNameOrID gets the UserRow for the given screenName or ID
func (s *Storage) GetUserByScreenNameOrID(screenNameOrID interface{}) *UserRow {
var u UserRow
query := `SELECT user_id,
screen_name,
description,
last_looked_at,
latest_tweet_id,
latest_following_id,
latest_follower_id,
protected,
processed,
accepted,
blob
FROM users
WHERE %s=?`
var row *sql.Row
switch x := screenNameOrID.(type) {
case int64:
query = fmt.Sprintf(query, "user_id")
row = s.db.QueryRow(query, x)
case string:
query = fmt.Sprintf(query, "screen_name")
row = s.db.QueryRow(query, x)
}
err := row.Scan(
&u.ID,
&u.ScreenName,
&u.Description,
&u.LastLookedAt | s.setupTables() | random_line_split |
storage.go | {}
mutex.Lock()
if db == nil {
s.checkMakeDatabase(DBName)
db = s.db
if chQueryArgs == nil {
chQueryArgs = make(chan *queryArgs, 100)
go executeStatements()
}
s.setupTables()
}
mutex.Unlock()
return s
}
func (s *Storage) setupTables() {
tableName := "users"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s (
user_id INTEGER PRIMARY KEY,
screen_name TEXT CONSTRAINT uniquescreenname UNIQUE,
description TEXT CONSTRAINT defaultdesc DEFAULT "",
last_looked_at INTEGER CONSTRAINT defaultlastlookedat DEFAULT 0,
latest_tweet_id INTEGER CONSTRAINT defaultlatesttweetid DEFAULT 0,
latest_following_id INTEGER CONSTRAINT defaultlatestfollowingid DEFAULT 0,
latest_follower_id INTEGER CONSTRAINT defaultlatestfollowerid DEFAULT 0,
protected INTEGER CONSTRAINT defaultprotected DEFAULT 0,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0,
accepted INTEGER CONSTRAINT defaultaccepted DEFAULT 0,
blob BLOB)`, tableName))
tableName = "tweets"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(tweet_id INTEGER PRIMARY KEY,
created_at INTEGER,
langugage TEXT,
user_id INTEGER,
desc TEXT,
blob BLOB
-- FOREIGN KEY(screen_name) REFERENCES users(screen_name)
)`, tableName))
tableName = "screennames"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(screen_name TEXT PRIMARY KEY,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0)`, tableName))
tableName = "userids"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER PRIMARY KEY,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0)`, tableName))
tableName = "followers"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER,
follower_id INTEGER,
CONSTRAINT uniquemap UNIQUE (user_id, follower_id))`, tableName))
tableName = "following"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER,
following_id INTEGER,
CONSTRAINT uniquemap UNIQUE (user_id, following_id))`, tableName))
}
func (s *Storage) checkMakeDatabase(DBName string) *sql.DB {
var db *sql.DB
db, err := sql.Open("sqlite3", DBName+".db") //?cache=shared&mode=rwc")
if err != nil {
log.Fatal(err)
}
db.Exec("PRAGMA journal_mode=WAL;")
s.db = db
return db
}
func (s *Storage) makeTable(tableName, sqlStmt string) {
_, err := s.db.Exec(sqlStmt)
if err != nil {
log.Fatalf("%q: %s\n", err, sqlStmt)
return
}
}
//StoreScreenName inserts the given screenName into the `screenames` table
func (s *Storage) StoreScreenName(screenName string) {
_, err := s.db.Exec("INSERT OR IGNORE INTO screennames (screen_name) VALUES (?)", screenName)
if err != nil {
log.Fatal(err)
}
}
//StoreUser inserts the Twitter user details into the `users` table.
func (s *Storage) StoreUser(userID int64, screenName, description string, protected bool, blob []byte) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO users (user_id, screen_name, description, protected, blob) VALUES (?, ?, ?, ?, ?)",
[]interface{}{userID, screenName, description, protected, blob}}
}
//StoreTweet inserts the tweet details into the `tweets` table.
func (s *Storage) StoreTweet(tweetID, createdAt, userID int64, language, desc string, blob []byte) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO tweets (tweet_id, created_at, langugage, user_id, desc, blob) VALUES (?, ?, ?, ?, ?, ?)",
[]interface{}{tweetID, createdAt, language, userID, desc, blob}}
}
func (s *Storage) storeFriendOrFollower(userID, friendOrFollowerID int64, query string) {
chQueryArgs <- &queryArgs{query, []interface{}{userID, friendOrFollowerID}}
}
//StoreFriends stores the mapping between the userID and the IDs of
//users the follow into the `following` table.
func (s *Storage) StoreFriends(userID int64, friendIDs []int64) {
for _, friendID := range friendIDs {
s.storeFriendOrFollower(userID, friendID, "INSERT OR IGNORE INTO following (user_id, following_id) VALUES (?, ?)")
}
}
//StoreFollowers stores the mapping between the userID and the IDs of
//their followes into the `followers` table.
func (s *Storage) StoreFollowers(userID int64, followerIDs []int64) {
for _, followerID := range followerIDs {
s.storeFriendOrFollower(userID, followerID, "INSERT OR IGNORE INTO followers (user_id, follower_id) VALUES (?, ?)")
}
}
func (s *Storage) storeUserID(userID int64) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO userids (user_id) VALUES (?)", []interface{}{userID}}
}
//StoreUserIDs stores the given userIDs in the `userids` table
func (s *Storage) StoreUserIDs(userIDs []int64) {
for _, userID := range userIDs {
s.storeUserID(userID)
}
}
func (s *Storage) queryScreenNamesOrIDs(query string, results interface{}) {
rows, err := s.db.Query(query)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
switch x := results.(type) {
case *[]string:
var item string
rows.Scan(&item)
*x = append(*x, item)
case *[]int64:
var item int64
rows.Scan(&item)
*x = append(*x, item)
default:
log.Fatal("results type must be *[]string or *[]int64")
}
}
}
//GetScreenNames gets Twitter handles from the `screenames` table that have already been processed
func (s *Storage) GetScreenNames() []string {
var results []string
s.queryScreenNamesOrIDs("SELECT screen_name from screennames where processed=1", &results)
return results
}
//GetUnprocessedScreenNames gets Twitter handles from the `screenames` table that are yet to be processed
func (s *Storage) GetUnprocessedScreenNames() []string {
var results []string
s.queryScreenNamesOrIDs("SELECT screen_name from screennames where processed=0", &results)
return results
}
//GetUserIDs gets user ids from the `userids` table that have already been processed
func (s *Storage) GetUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from userids where processed=1", &results)
return results
}
//GetUnprocessedUserIDs gets user ids from the `userids` table that are yet to be processed
func (s *Storage) GetUnprocessedUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from userids where processed=0", &results)
return results
}
//GetAcceptedUserIDs gets user ids from the `users` table for whom the user filtering
//function has marked them as accepted for further processing
func (s *Storage) GetAcceptedUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from users where accepted=1", &results)
return results
}
//GetUserByScreenNameOrID gets the UserRow for the given screenName or ID
func (s *Storage) GetUserByScreenNameOrID(screenNameOrID interface{}) *UserRow {
var u UserRow
query := `SELECT user_id,
screen_name,
description,
last_looked_at,
latest_tweet_id,
latest_following_id,
latest_follower_id,
protected,
processed,
accepted,
blob
FROM users
WHERE %s=?`
var row *sql.Row
switch x := screenNameOrID.(type) {
case int64:
query = fmt.Sprintf(query, "user_id")
row = s.db.QueryRow(query, x)
case string:
query = fmt.Sprintf(query, "screen_name")
row = s.db.QueryRow(query, x)
}
err := row.Scan(
&u.ID,
&u.ScreenName,
&u.Description,
&u.LastLookedAt,
&u.LatestTweetID,
&u.LatestFriendID,
&u.LatestFollowerID,
&u.Protected,
&u.Processed,
&u.Accepted,
&u.Blob)
switch {
case err == sql.ErrNoRows:
return nil
case err != nil:
log.Fatal(err)
}
return &u
}
//MarkUserLatestTweetsCollected updates the `last_looked_at` timestamp and the `latest_tweet_id` for
//the given user in the `users` table
func (s *Storage) | MarkUserLatestTweetsCollected | identifier_name | |
storage.go | sql.DB
func executeStatements() {
for {
if qa, ok := <-chQueryArgs; ok {
_, err := db.Exec(qa.query, qa.args...)
if err != nil {
log.Fatal(err)
}
}
}
}
//NewStorage creates returns a new Storage object.
//DBName is the name of the sqllite database file where
//all the users and tweets data will be collected. NewStorage
//create the sqlite file, if it is not already present and creates
//the tables. if the database is present, opens a connection.
func NewStorage(DBName string) *Storage {
s := &Storage{}
mutex.Lock()
if db == nil {
s.checkMakeDatabase(DBName)
db = s.db
if chQueryArgs == nil {
chQueryArgs = make(chan *queryArgs, 100)
go executeStatements()
}
s.setupTables()
}
mutex.Unlock()
return s
}
func (s *Storage) setupTables() {
tableName := "users"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s (
user_id INTEGER PRIMARY KEY,
screen_name TEXT CONSTRAINT uniquescreenname UNIQUE,
description TEXT CONSTRAINT defaultdesc DEFAULT "",
last_looked_at INTEGER CONSTRAINT defaultlastlookedat DEFAULT 0,
latest_tweet_id INTEGER CONSTRAINT defaultlatesttweetid DEFAULT 0,
latest_following_id INTEGER CONSTRAINT defaultlatestfollowingid DEFAULT 0,
latest_follower_id INTEGER CONSTRAINT defaultlatestfollowerid DEFAULT 0,
protected INTEGER CONSTRAINT defaultprotected DEFAULT 0,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0,
accepted INTEGER CONSTRAINT defaultaccepted DEFAULT 0,
blob BLOB)`, tableName))
tableName = "tweets"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(tweet_id INTEGER PRIMARY KEY,
created_at INTEGER,
langugage TEXT,
user_id INTEGER,
desc TEXT,
blob BLOB
-- FOREIGN KEY(screen_name) REFERENCES users(screen_name)
)`, tableName))
tableName = "screennames"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(screen_name TEXT PRIMARY KEY,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0)`, tableName))
tableName = "userids"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER PRIMARY KEY,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0)`, tableName))
tableName = "followers"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER,
follower_id INTEGER,
CONSTRAINT uniquemap UNIQUE (user_id, follower_id))`, tableName))
tableName = "following"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER,
following_id INTEGER,
CONSTRAINT uniquemap UNIQUE (user_id, following_id))`, tableName))
}
func (s *Storage) checkMakeDatabase(DBName string) *sql.DB {
var db *sql.DB
db, err := sql.Open("sqlite3", DBName+".db") //?cache=shared&mode=rwc")
if err != nil {
log.Fatal(err)
}
db.Exec("PRAGMA journal_mode=WAL;")
s.db = db
return db
}
func (s *Storage) makeTable(tableName, sqlStmt string) {
_, err := s.db.Exec(sqlStmt)
if err != nil {
log.Fatalf("%q: %s\n", err, sqlStmt)
return
}
}
//StoreScreenName inserts the given screenName into the `screenames` table
func (s *Storage) StoreScreenName(screenName string) |
//StoreUser inserts the Twitter user details into the `users` table.
func (s *Storage) StoreUser(userID int64, screenName, description string, protected bool, blob []byte) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO users (user_id, screen_name, description, protected, blob) VALUES (?, ?, ?, ?, ?)",
[]interface{}{userID, screenName, description, protected, blob}}
}
//StoreTweet inserts the tweet details into the `tweets` table.
func (s *Storage) StoreTweet(tweetID, createdAt, userID int64, language, desc string, blob []byte) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO tweets (tweet_id, created_at, langugage, user_id, desc, blob) VALUES (?, ?, ?, ?, ?, ?)",
[]interface{}{tweetID, createdAt, language, userID, desc, blob}}
}
func (s *Storage) storeFriendOrFollower(userID, friendOrFollowerID int64, query string) {
chQueryArgs <- &queryArgs{query, []interface{}{userID, friendOrFollowerID}}
}
//StoreFriends stores the mapping between the userID and the IDs of
//users the follow into the `following` table.
func (s *Storage) StoreFriends(userID int64, friendIDs []int64) {
for _, friendID := range friendIDs {
s.storeFriendOrFollower(userID, friendID, "INSERT OR IGNORE INTO following (user_id, following_id) VALUES (?, ?)")
}
}
//StoreFollowers stores the mapping between the userID and the IDs of
//their followes into the `followers` table.
func (s *Storage) StoreFollowers(userID int64, followerIDs []int64) {
for _, followerID := range followerIDs {
s.storeFriendOrFollower(userID, followerID, "INSERT OR IGNORE INTO followers (user_id, follower_id) VALUES (?, ?)")
}
}
func (s *Storage) storeUserID(userID int64) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO userids (user_id) VALUES (?)", []interface{}{userID}}
}
//StoreUserIDs stores the given userIDs in the `userids` table
func (s *Storage) StoreUserIDs(userIDs []int64) {
for _, userID := range userIDs {
s.storeUserID(userID)
}
}
func (s *Storage) queryScreenNamesOrIDs(query string, results interface{}) {
rows, err := s.db.Query(query)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
switch x := results.(type) {
case *[]string:
var item string
rows.Scan(&item)
*x = append(*x, item)
case *[]int64:
var item int64
rows.Scan(&item)
*x = append(*x, item)
default:
log.Fatal("results type must be *[]string or *[]int64")
}
}
}
//GetScreenNames gets Twitter handles from the `screenames` table that have already been processed
func (s *Storage) GetScreenNames() []string {
var results []string
s.queryScreenNamesOrIDs("SELECT screen_name from screennames where processed=1", &results)
return results
}
//GetUnprocessedScreenNames gets Twitter handles from the `screenames` table that are yet to be processed
func (s *Storage) GetUnprocessedScreenNames() []string {
var results []string
s.queryScreenNamesOrIDs("SELECT screen_name from screennames where processed=0", &results)
return results
}
//GetUserIDs gets user ids from the `userids` table that have already been processed
func (s *Storage) GetUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from userids where processed=1", &results)
return results
}
//GetUnprocessedUserIDs gets user ids from the `userids` table that are yet to be processed
func (s *Storage) GetUnprocessedUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from userids where processed=0", &results)
return results
}
//GetAcceptedUserIDs gets user ids from the `users` table for whom the user filtering
//function has marked them as accepted for further processing
func (s *Storage) GetAcceptedUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from users where accepted=1", &results)
return results
}
//GetUserByScreenNameOrID gets the UserRow for the given screenName or ID
func (s *Storage) GetUserByScreenNameOrID(screenNameOrID interface{}) *UserRow {
var u UserRow
query := `SELECT user_id,
screen_name,
description,
last_looked_at,
latest_tweet_id,
latest_following_id,
latest_follower_id,
protected,
processed,
accepted,
blob
FROM users
WHERE %s=?`
var row *sql.Row
switch x := screenNameOrID.(type) {
case int64:
query = fmt.Sprintf(query, "user_id")
row = s.db.QueryRow(query, x)
case string:
query = fmt.Sprintf(query, "screen_name")
row = s.db.QueryRow(query, x)
}
err := row.Scan(
&u.ID,
&u.ScreenName,
&u.Description,
&u.LastLook | {
_, err := s.db.Exec("INSERT OR IGNORE INTO screennames (screen_name) VALUES (?)", screenName)
if err != nil {
log.Fatal(err)
}
} | identifier_body |
equilibrium_computation.py | 3.5)+Iext2 =0
p0 = 0.2 * x1eq - 0.3 * (zeq - 3.5) + Iext2
x2eq = numpy.zeros(shape, dtype=type)
for i in range(shape[1]):
x2eq[0 ,i] = numpy.min(numpy.real(numpy.roots([-1.0, 0.0, 1.0, p0[0 ,i]])))
return x2eq, y2eq
# def pop2eq_calc(n_regions,x1eq,zeq,Iext2):
# shape = x1eq.shape
# type = x1eq.dtype
# #g_eq = 0.1*x1eq (1)
# #y2eq = 6*(x2eq+0.25)*x1eq (2)
# #-x2eq**3 + x2eq -y2eq+2*g_eq-0.3*(zeq-3.5)+Iext2 =0=> (1),(2)
# #-x2eq**3 + x2eq -6*(x2eq+0.25)*x1eq+2*0.1*x1eq-0.3*(zeq-3.5)+Iext2 =0=>
# #-x2eq**3 + (1.0-6*x1eq)*x2eq -1.5*x1eq+ 0.2*x1eq-0.3*(zeq-3.5)+Iext2 =0
# #p3 p1 p0
# #-x2eq**3 + (1.0-6*x1eq)*x2eq -1.3*x1eq -0.3*(zeq-3.5) +Iext2 =0
# p0 = -1.3*x1eq-0.3*(zeq-3.5)+Iext2
# p1 = 1.0-6*x1eq
# x2eq = numpy.zeros(shape, dtype=type)
# for i in range(shape[1]):
# x2eq[0 ,i] = numpy.min( numpy.real( numpy.roots([-1.0, 0.0, p1[i,0], p0[i,0] ]) ) )
# #(2):
# y2eq = 6*(x2eq+0.25)*x1eq
# return x2eq, y2eq
def geq_calc(x1eq):
return 0.1 * x1eq
def x1eq_x0_hypo_optimize_fun(x, ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
no_x0 = len(ix0)
no_e = len(iE)
type = x1EQ.dtype
i_e = numpy.ones((no_e,1), dtype=type)
i_x0 = numpy.ones((no_x0,1), dtype=type)
#Coupling to from from to
w_e_to_e = numpy.sum(numpy.dot(w[iE][:,iE], numpy.dot(i_e, x1EQ[:,iE]) - numpy.dot(i_e, x1EQ[:,iE]).T), axis=1)
w_x0_to_e = numpy.sum(numpy.dot(w[iE][:, ix0], numpy.dot(i_e, x0) - numpy.dot(i_x0, x1EQ[:,iE]).T), axis=1)
w_e_to_x0 = numpy.sum(numpy.dot(w[ix0][:,iE], numpy.dot(i_x0, x1EQ[:,iE]) - numpy.dot(i_e, x0).T), axis=1)
w_x0_to_x0 = numpy.sum(numpy.dot(w[ix0][:,ix0], numpy.dot(i_x0, x0) - numpy.dot(i_x0, x0).T), axis=1)
fun = numpy.array(x1EQ.shape)
#Known x1eq, unknown x0:
fun[iE] = fz_lin_calc(x1EQ[iE], x[iE], x0cr[iE], rx0[iE], z=zEQ[iE], coupl=K[iE] * (w_e_to_e + w_x0_to_e))
# Known x0, unknown x1eq:
fun[ix0] = fz_lin_calc(x[ix0], x0, x0cr[ix0], rx0[ix0], z=zeq_2d_calc(x[ix0], y0[ix0], Iext1[ix0]),
coupl=K[ix0] * (w_e_to_x0 + w_x0_to_x0))
return fun
def x1eq_x0_hypo_optimize_jac(x, ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
no_x0 = len(ix0)
no_e = len(iE)
n_regions = no_e + no_x0
type = x1EQ.dtype
i_x0 = numpy.ones((no_x0, 1), dtype=type) | jac_e_x1o = -numpy.dot(i_x0, K[:,iE]) * w[iE][:,ix0]
jac_x0_x0e = numpy.zeros((no_x0,no_e),dtype = type)
jac_x0_x1o = numpy.diag(4 + 3 * x[ix0] ** 2 + 4 * x[ix0] + K[ix0] * numpy.sum(w[ix0][:,ix0], axis=1)) \
- numpy.dot(i_x0, K[:, ix0]) * w[ix0][:, ix0]
jac = numpy.zeros((n_regions,n_regions), dtype=type)
jac[iE][:,iE] = jac_e_x0e
jac[iE][:, ix0] = jac_e_x1o
jac[ix0][:, iE] = jac_x0_x0e
jac[ix0][:, ix0] = jac_x0_x1o
return jac
def x1eq_x0_hypo_optimize(ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
xinit = numpy.zeros(x1EQ.shape, dtype = x1EQ.dtype)
#Set initial conditions for the optimization algorithm, by ignoring coupling (=0)
# fz = 4 * (x1 - r * x0 + x0cr) - z -coupling = 0
#x0init = (x1 + x0cr -z/4) / rx0
xinit[:, iE] = x0_calc(x1EQ[:, iE], zEQ[:, iE], x0cr[:, iE], rx0[:, iE], 0.0)
#x1eqinit = rx0 * x0 - x0cr + z / 4
xinit[:, ix0] = rx0[:, ix0] * x0 - x0cr[:, ix0] + zEQ[:, ix0] / 4
#Solve:
sol = root(x1eq_x0_hypo_optimize_fun, xinit, args=(ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w),
method='lm', jac=x1eq_x0_hypo_optimize_jac, tol=10**(-6), callback=None, options=None) #method='hybr'
if sol.success:
x1EQ[:,ix0] = sol.x[:, ix0]
return x1EQ
else:
raise ValueError(sol.message)
def x1eq_x0_hypo_linTaylor(ix0,iE,x1EQ,zEQ,x0,x0cr,rx0,y0,Iext1,K,w):
no_x0 = len(ix0)
no_e = len(iE)
n_regions = no_e + no_x0
# The equilibria of the nodes of fixed epileptogenicity
x1_eq = x1EQ[:, iE]
z_eq = zEQ[:, iE]
#Prepare linear system to solve:
#The point of the linear Taylor expansion
x1LIN = x1_lin_def(X1_DEF, X1_EQ_CR_DEF, n_regions)
# For regions of fixed equilibria:
ii_e = numpy.ones((1, no_e), dtype=numpy.float32)
we_to_e = numpy.expand_dims(numpy.sum(w[iE][:, iE] * (numpy.dot(ii_e.T, x1_eq) -
numpy.dot(x1_eq.T, ii_e)), axis=1), 1).T
wx0_to_e = -x1_eq * numpy.expand_dims(numpy.sum(w[ix0][:, iE], axis=0), 0)
be = 4.0 * (x1_eq + x0cr[:, iE]) - z_eq - K[:, iE] * (we_to_e + wx0_to_e)
# For regions of fixed x0:
ii_x0 = numpy.ones(( |
jac_e_x0e = numpy.diag(- 4 * rx0[iE]) | random_line_split |
equilibrium_computation.py | .5)+Iext2 =0
p0 = 0.2 * x1eq - 0.3 * (zeq - 3.5) + Iext2
x2eq = numpy.zeros(shape, dtype=type)
for i in range(shape[1]):
|
return x2eq, y2eq
# def pop2eq_calc(n_regions,x1eq,zeq,Iext2):
# shape = x1eq.shape
# type = x1eq.dtype
# #g_eq = 0.1*x1eq (1)
# #y2eq = 6*(x2eq+0.25)*x1eq (2)
# #-x2eq**3 + x2eq -y2eq+2*g_eq-0.3*(zeq-3.5)+Iext2 =0=> (1),(2)
# #-x2eq**3 + x2eq -6*(x2eq+0.25)*x1eq+2*0.1*x1eq-0.3*(zeq-3.5)+Iext2 =0=>
# #-x2eq**3 + (1.0-6*x1eq)*x2eq -1.5*x1eq+ 0.2*x1eq-0.3*(zeq-3.5)+Iext2 =0
# #p3 p1 p0
# #-x2eq**3 + (1.0-6*x1eq)*x2eq -1.3*x1eq -0.3*(zeq-3.5) +Iext2 =0
# p0 = -1.3*x1eq-0.3*(zeq-3.5)+Iext2
# p1 = 1.0-6*x1eq
# x2eq = numpy.zeros(shape, dtype=type)
# for i in range(shape[1]):
# x2eq[0 ,i] = numpy.min( numpy.real( numpy.roots([-1.0, 0.0, p1[i,0], p0[i,0] ]) ) )
# #(2):
# y2eq = 6*(x2eq+0.25)*x1eq
# return x2eq, y2eq
def geq_calc(x1eq):
return 0.1 * x1eq
def x1eq_x0_hypo_optimize_fun(x, ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
no_x0 = len(ix0)
no_e = len(iE)
type = x1EQ.dtype
i_e = numpy.ones((no_e,1), dtype=type)
i_x0 = numpy.ones((no_x0,1), dtype=type)
#Coupling to from from to
w_e_to_e = numpy.sum(numpy.dot(w[iE][:,iE], numpy.dot(i_e, x1EQ[:,iE]) - numpy.dot(i_e, x1EQ[:,iE]).T), axis=1)
w_x0_to_e = numpy.sum(numpy.dot(w[iE][:, ix0], numpy.dot(i_e, x0) - numpy.dot(i_x0, x1EQ[:,iE]).T), axis=1)
w_e_to_x0 = numpy.sum(numpy.dot(w[ix0][:,iE], numpy.dot(i_x0, x1EQ[:,iE]) - numpy.dot(i_e, x0).T), axis=1)
w_x0_to_x0 = numpy.sum(numpy.dot(w[ix0][:,ix0], numpy.dot(i_x0, x0) - numpy.dot(i_x0, x0).T), axis=1)
fun = numpy.array(x1EQ.shape)
#Known x1eq, unknown x0:
fun[iE] = fz_lin_calc(x1EQ[iE], x[iE], x0cr[iE], rx0[iE], z=zEQ[iE], coupl=K[iE] * (w_e_to_e + w_x0_to_e))
# Known x0, unknown x1eq:
fun[ix0] = fz_lin_calc(x[ix0], x0, x0cr[ix0], rx0[ix0], z=zeq_2d_calc(x[ix0], y0[ix0], Iext1[ix0]),
coupl=K[ix0] * (w_e_to_x0 + w_x0_to_x0))
return fun
def x1eq_x0_hypo_optimize_jac(x, ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
no_x0 = len(ix0)
no_e = len(iE)
n_regions = no_e + no_x0
type = x1EQ.dtype
i_x0 = numpy.ones((no_x0, 1), dtype=type)
jac_e_x0e = numpy.diag(- 4 * rx0[iE])
jac_e_x1o = -numpy.dot(i_x0, K[:,iE]) * w[iE][:,ix0]
jac_x0_x0e = numpy.zeros((no_x0,no_e),dtype = type)
jac_x0_x1o = numpy.diag(4 + 3 * x[ix0] ** 2 + 4 * x[ix0] + K[ix0] * numpy.sum(w[ix0][:,ix0], axis=1)) \
- numpy.dot(i_x0, K[:, ix0]) * w[ix0][:, ix0]
jac = numpy.zeros((n_regions,n_regions), dtype=type)
jac[iE][:,iE] = jac_e_x0e
jac[iE][:, ix0] = jac_e_x1o
jac[ix0][:, iE] = jac_x0_x0e
jac[ix0][:, ix0] = jac_x0_x1o
return jac
def x1eq_x0_hypo_optimize(ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
xinit = numpy.zeros(x1EQ.shape, dtype = x1EQ.dtype)
#Set initial conditions for the optimization algorithm, by ignoring coupling (=0)
# fz = 4 * (x1 - r * x0 + x0cr) - z -coupling = 0
#x0init = (x1 + x0cr -z/4) / rx0
xinit[:, iE] = x0_calc(x1EQ[:, iE], zEQ[:, iE], x0cr[:, iE], rx0[:, iE], 0.0)
#x1eqinit = rx0 * x0 - x0cr + z / 4
xinit[:, ix0] = rx0[:, ix0] * x0 - x0cr[:, ix0] + zEQ[:, ix0] / 4
#Solve:
sol = root(x1eq_x0_hypo_optimize_fun, xinit, args=(ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w),
method='lm', jac=x1eq_x0_hypo_optimize_jac, tol=10**(-6), callback=None, options=None) #method='hybr'
if sol.success:
x1EQ[:,ix0] = sol.x[:, ix0]
return x1EQ
else:
raise ValueError(sol.message)
def x1eq_x0_hypo_linTaylor(ix0,iE,x1EQ,zEQ,x0,x0cr,rx0,y0,Iext1,K,w):
no_x0 = len(ix0)
no_e = len(iE)
n_regions = no_e + no_x0
# The equilibria of the nodes of fixed epileptogenicity
x1_eq = x1EQ[:, iE]
z_eq = zEQ[:, iE]
#Prepare linear system to solve:
#The point of the linear Taylor expansion
x1LIN = x1_lin_def(X1_DEF, X1_EQ_CR_DEF, n_regions)
# For regions of fixed equilibria:
ii_e = numpy.ones((1, no_e), dtype=numpy.float32)
we_to_e = numpy.expand_dims(numpy.sum(w[iE][:, iE] * (numpy.dot(ii_e.T, x1_eq) -
numpy.dot(x1_eq.T, ii_e)), axis=1), 1).T
wx0_to_e = -x1_eq * numpy.expand_dims(numpy.sum(w[ix0][:, iE], axis=0), 0)
be = 4.0 * (x1_eq + x0cr[:, iE]) - z_eq - K[:, iE] * (we_to_e + wx0_to_e)
# For regions of fixed x0:
ii_x0 = numpy.ones((1 | x2eq[0 ,i] = numpy.min(numpy.real(numpy.roots([-1.0, 0.0, 1.0, p0[0 ,i]]))) | conditional_block |
equilibrium_computation.py | (X1_DEF, X1_EQ_CR_DEF, n_regions):
#The default initial condition for x1 equilibrium search
return numpy.repeat(numpy.array((X1_EQ_CR_DEF + X1_DEF) / 2.0), n_regions)
def x1_lin_def(X1_DEF, X1_EQ_CR_DEF, n_regions):
# The point of the linear Taylor expansion
return numpy.repeat(numpy.array((X1_EQ_CR_DEF + X1_DEF) / 2.0), n_regions)
def fx1_2d_calc(x1, z=0, y0=0, Iext1=0):
x12 = x1 ** 2
return -x1 * x12 - 2 * x12 - z + y0 + Iext1
def fx1_6d_calc(x1, z=0, y0=0, Iext1=0):
x12 = x1 ** 2
return -x1 * x12 + 3 * x12 - z + y0 + Iext1
def fz_lin_calc(x1, x0, x0cr, r, z=0, coupl=0):
return 4 * (x1 - r * x0 + x0cr) - z - coupl
def fz_sig_calc(x1, x0, x0cr, r, z=0, coupl=0):
return 3/(1 + exp(-10 * (x1 + 0.5))) - r * x0 + x0cr - z - coupl
def zeq_2d_calc(x1eq, y0, Iext1):
return fx1_2d_calc(x1eq, z=0, y0=y0, Iext1=Iext1)
def zeq_6d_calc(x1eq, y0, Iext1):
return fx1_6d_calc(x1eq, z=0, y0=y0, Iext1=Iext1)
def y1eq_calc(x1eq, d=5.0):
return 1 - d * x1eq ** 2
def pop2eq_calc(x1eq, zeq, Iext2):
shape = x1eq.shape
type = x1eq.dtype
# g_eq = 0.1*x1eq (1)
# y2eq = 0 (2)
y2eq = numpy.zeros(shape, dtype=type)
# -x2eq**3 + x2eq -y2eq+2*g_eq-0.3*(zeq-3.5)+Iext2 =0=> (1),(2)
# -x2eq**3 + x2eq +2*0.1*x1eq-0.3*(zeq-3.5)+Iext2 =0=>
# p3 p1 p0
# -x2eq**3 + x2eq +0.2*x1eq-0.3*(zeq-3.5)+Iext2 =0
p0 = 0.2 * x1eq - 0.3 * (zeq - 3.5) + Iext2
x2eq = numpy.zeros(shape, dtype=type)
for i in range(shape[1]):
x2eq[0 ,i] = numpy.min(numpy.real(numpy.roots([-1.0, 0.0, 1.0, p0[0 ,i]])))
return x2eq, y2eq
# def pop2eq_calc(n_regions,x1eq,zeq,Iext2):
# shape = x1eq.shape
# type = x1eq.dtype
# #g_eq = 0.1*x1eq (1)
# #y2eq = 6*(x2eq+0.25)*x1eq (2)
# #-x2eq**3 + x2eq -y2eq+2*g_eq-0.3*(zeq-3.5)+Iext2 =0=> (1),(2)
# #-x2eq**3 + x2eq -6*(x2eq+0.25)*x1eq+2*0.1*x1eq-0.3*(zeq-3.5)+Iext2 =0=>
# #-x2eq**3 + (1.0-6*x1eq)*x2eq -1.5*x1eq+ 0.2*x1eq-0.3*(zeq-3.5)+Iext2 =0
# #p3 p1 p0
# #-x2eq**3 + (1.0-6*x1eq)*x2eq -1.3*x1eq -0.3*(zeq-3.5) +Iext2 =0
# p0 = -1.3*x1eq-0.3*(zeq-3.5)+Iext2
# p1 = 1.0-6*x1eq
# x2eq = numpy.zeros(shape, dtype=type)
# for i in range(shape[1]):
# x2eq[0 ,i] = numpy.min( numpy.real( numpy.roots([-1.0, 0.0, p1[i,0], p0[i,0] ]) ) )
# #(2):
# y2eq = 6*(x2eq+0.25)*x1eq
# return x2eq, y2eq
def geq_calc(x1eq):
return 0.1 * x1eq
def x1eq_x0_hypo_optimize_fun(x, ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
no_x0 = len(ix0)
no_e = len(iE)
type = x1EQ.dtype
i_e = numpy.ones((no_e,1), dtype=type)
i_x0 = numpy.ones((no_x0,1), dtype=type)
#Coupling to from from to
w_e_to_e = numpy.sum(numpy.dot(w[iE][:,iE], numpy.dot(i_e, x1EQ[:,iE]) - numpy.dot(i_e, x1EQ[:,iE]).T), axis=1)
w_x0_to_e = numpy.sum(numpy.dot(w[iE][:, ix0], numpy.dot(i_e, x0) - numpy.dot(i_x0, x1EQ[:,iE]).T), axis=1)
w_e_to_x0 = numpy.sum(numpy.dot(w[ix0][:,iE], numpy.dot(i_x0, x1EQ[:,iE]) - numpy.dot(i_e, x0).T), axis=1)
w_x0_to_x0 = numpy.sum(numpy.dot(w[ix0][:,ix0], numpy.dot(i_x0, x0) - numpy.dot(i_x0, x0).T), axis=1)
fun = numpy.array(x1EQ.shape)
#Known x1eq, unknown x0:
fun[iE] = fz_lin_calc(x1EQ[iE], x[iE], x0cr[iE], rx0[iE], z=zEQ[iE], coupl=K[iE] * (w_e_to_e + w_x0_to_e))
# Known x0, unknown x1eq:
fun[ix0] = fz_lin_calc(x[ix0], x0, x0cr[ix0], rx0[ix0], z=zeq_2d_calc(x[ix0], y0[ix0], Iext1[ix0]),
coupl=K[ix0] * (w_e_to_x0 + w_x0_to_x0))
return fun
def x1eq_x0_hypo_optimize_jac(x, ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
no_x0 = len(ix0)
no_e = len(iE)
n_regions = no_e + no_x0
type = x1EQ.dtype
i_x0 = numpy.ones((no_x0, 1), dtype=type)
jac_e_x0e = numpy.diag(- 4 * rx0[iE])
jac_e_x1o = -numpy.dot(i_x0, K[:,iE]) * w[iE][:,ix0]
jac_x0_x0e = numpy.zeros((no_x0,no_e),dtype = type)
jac_x0_x1o = numpy.diag(4 + 3 * x[ix0] ** 2 + 4 * x[ix0] + K[ix0] * numpy.sum(w[ix0][:,ix0], axis=1)) \
- numpy.dot(i_x0, K[:, ix0]) * w[ix0][:, ix0]
jac = numpy.zeros((n_regions,n_regions), dtype=type)
jac[iE][:,iE] = jac_e_x0e
jac[iE][:, ix0] = jac_e_x1o
jac[ix0][:, iE] = jac_x0_x0e
jac[ix0][:, ix0] = jac_x0_x1o
return jac
def x1eq_x0_hypo_optimize(ix0, i | x1eq_def | identifier_name | |
equilibrium_computation.py |
def zeq_6d_calc(x1eq, y0, Iext1):
return fx1_6d_calc(x1eq, z=0, y0=y0, Iext1=Iext1)
def y1eq_calc(x1eq, d=5.0):
return 1 - d * x1eq ** 2
def pop2eq_calc(x1eq, zeq, Iext2):
shape = x1eq.shape
type = x1eq.dtype
# g_eq = 0.1*x1eq (1)
# y2eq = 0 (2)
y2eq = numpy.zeros(shape, dtype=type)
# -x2eq**3 + x2eq -y2eq+2*g_eq-0.3*(zeq-3.5)+Iext2 =0=> (1),(2)
# -x2eq**3 + x2eq +2*0.1*x1eq-0.3*(zeq-3.5)+Iext2 =0=>
# p3 p1 p0
# -x2eq**3 + x2eq +0.2*x1eq-0.3*(zeq-3.5)+Iext2 =0
p0 = 0.2 * x1eq - 0.3 * (zeq - 3.5) + Iext2
x2eq = numpy.zeros(shape, dtype=type)
for i in range(shape[1]):
x2eq[0 ,i] = numpy.min(numpy.real(numpy.roots([-1.0, 0.0, 1.0, p0[0 ,i]])))
return x2eq, y2eq
# def pop2eq_calc(n_regions,x1eq,zeq,Iext2):
# shape = x1eq.shape
# type = x1eq.dtype
# #g_eq = 0.1*x1eq (1)
# #y2eq = 6*(x2eq+0.25)*x1eq (2)
# #-x2eq**3 + x2eq -y2eq+2*g_eq-0.3*(zeq-3.5)+Iext2 =0=> (1),(2)
# #-x2eq**3 + x2eq -6*(x2eq+0.25)*x1eq+2*0.1*x1eq-0.3*(zeq-3.5)+Iext2 =0=>
# #-x2eq**3 + (1.0-6*x1eq)*x2eq -1.5*x1eq+ 0.2*x1eq-0.3*(zeq-3.5)+Iext2 =0
# #p3 p1 p0
# #-x2eq**3 + (1.0-6*x1eq)*x2eq -1.3*x1eq -0.3*(zeq-3.5) +Iext2 =0
# p0 = -1.3*x1eq-0.3*(zeq-3.5)+Iext2
# p1 = 1.0-6*x1eq
# x2eq = numpy.zeros(shape, dtype=type)
# for i in range(shape[1]):
# x2eq[0 ,i] = numpy.min( numpy.real( numpy.roots([-1.0, 0.0, p1[i,0], p0[i,0] ]) ) )
# #(2):
# y2eq = 6*(x2eq+0.25)*x1eq
# return x2eq, y2eq
def geq_calc(x1eq):
return 0.1 * x1eq
def x1eq_x0_hypo_optimize_fun(x, ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
no_x0 = len(ix0)
no_e = len(iE)
type = x1EQ.dtype
i_e = numpy.ones((no_e,1), dtype=type)
i_x0 = numpy.ones((no_x0,1), dtype=type)
#Coupling to from from to
w_e_to_e = numpy.sum(numpy.dot(w[iE][:,iE], numpy.dot(i_e, x1EQ[:,iE]) - numpy.dot(i_e, x1EQ[:,iE]).T), axis=1)
w_x0_to_e = numpy.sum(numpy.dot(w[iE][:, ix0], numpy.dot(i_e, x0) - numpy.dot(i_x0, x1EQ[:,iE]).T), axis=1)
w_e_to_x0 = numpy.sum(numpy.dot(w[ix0][:,iE], numpy.dot(i_x0, x1EQ[:,iE]) - numpy.dot(i_e, x0).T), axis=1)
w_x0_to_x0 = numpy.sum(numpy.dot(w[ix0][:,ix0], numpy.dot(i_x0, x0) - numpy.dot(i_x0, x0).T), axis=1)
fun = numpy.array(x1EQ.shape)
#Known x1eq, unknown x0:
fun[iE] = fz_lin_calc(x1EQ[iE], x[iE], x0cr[iE], rx0[iE], z=zEQ[iE], coupl=K[iE] * (w_e_to_e + w_x0_to_e))
# Known x0, unknown x1eq:
fun[ix0] = fz_lin_calc(x[ix0], x0, x0cr[ix0], rx0[ix0], z=zeq_2d_calc(x[ix0], y0[ix0], Iext1[ix0]),
coupl=K[ix0] * (w_e_to_x0 + w_x0_to_x0))
return fun
def x1eq_x0_hypo_optimize_jac(x, ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
no_x0 = len(ix0)
no_e = len(iE)
n_regions = no_e + no_x0
type = x1EQ.dtype
i_x0 = numpy.ones((no_x0, 1), dtype=type)
jac_e_x0e = numpy.diag(- 4 * rx0[iE])
jac_e_x1o = -numpy.dot(i_x0, K[:,iE]) * w[iE][:,ix0]
jac_x0_x0e = numpy.zeros((no_x0,no_e),dtype = type)
jac_x0_x1o = numpy.diag(4 + 3 * x[ix0] ** 2 + 4 * x[ix0] + K[ix0] * numpy.sum(w[ix0][:,ix0], axis=1)) \
- numpy.dot(i_x0, K[:, ix0]) * w[ix0][:, ix0]
jac = numpy.zeros((n_regions,n_regions), dtype=type)
jac[iE][:,iE] = jac_e_x0e
jac[iE][:, ix0] = jac_e_x1o
jac[ix0][:, iE] = jac_x0_x0e
jac[ix0][:, ix0] = jac_x0_x1o
return jac
def x1eq_x0_hypo_optimize(ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
xinit = numpy.zeros(x1EQ.shape, dtype = x1EQ.dtype)
#Set initial conditions for the optimization algorithm, by ignoring coupling (=0)
# fz = 4 * (x1 - r * x0 + x0cr) - z -coupling = 0
#x0init = (x1 + x0cr -z/4) / rx0
xinit[:, iE] = x0_calc(x1EQ[:, iE], zEQ[:, iE], x0cr[:, iE], rx0[:, iE], 0.0)
#x1eqinit = rx0 * x0 - x0cr + z / 4
xinit[:, ix0] = rx0[:, ix0] * x0 - x0cr[:, ix0] + zEQ[:, ix0] / 4
#Solve:
sol = root(x1eq_x0_hypo_optimize_fun, xinit, args=(ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w),
method='lm', jac=x1eq_x0_hypo_optimize_jac, tol=10**(-6), callback=None, options=None) #method='hybr'
if sol.success:
x1EQ[:,ix0] = sol.x[:, ix0]
return x1EQ
else:
raise ValueError(sol.message)
def x1eq_x0_hypo_linTaylor(ix0,i | return fx1_2d_calc(x1eq, z=0, y0=y0, Iext1=Iext1) | identifier_body | |
hverse-encounter-helper.user.js | 1.8.0 - title text now includes a short description of the last event detected, updating cross-tab
v1.9.0 - reverted to background tabs for the automatic link opening - the rest has changed enough that foregrounding is too annoying; if `#game` is added to URL, opens in current tab
v1.10.0 - shorten the time-since label in the title
v1.11.0 - cleans up the eventpane contents from "dawn of a new day" events
v2.0.0 - now operates on a master/slave system with BroadcastChannel messages to keep everything synchronised; foundation built for slave displays to not calc updates
v2.0.1 - no longer operates on the hentaiverse pages, BroadcastChannels are not cross-origin so each domain gets one master instance
v3.0.0 - un-stupified the backend (organic code growth is bad, kids)
v3.1.0 - added an "enter the hentaiverse" link on the event pane if there isn't one already
v3.1.1 - fixed a typo, cleaned the file, and bumped the version (forgot to set to 3.1.0)
v3.2.0 - added a timer to reload the page (master page only) after 24 hours, for automated new day xp collection
v3.2.1 - fixed a nasty edge case bug causing an infinite reload loop
PLANNED:
[MINOR] Make the master page post a notification (via GM.notification) when the timer runs out
[MAJOR] Use AJAX to get the news page and update the eventpane with the new content when the timer runs out
*/
/* eslint-enable max-len */
/* global GM_addValueChangeListener, jQuery, $, moment */
// SCRIPT INITIALISATION BEGINS \\
const SCRIPT_NAME = `${GM_info.script.name} V${GM_info.script.version || '???'}`;
const EVTPANE_CSS = [
"width: 720px;",
"height: auto;",
"margin: 5px auto 0px;",
"background: rgb(242, 239, 223);",
"border: 1px solid rgb(92, 13, 18);",
"padding: 3px;",
"font-size: 9pt;",
"text-align: center !important;",
];
const LAST_EVENT_TIMESTAMP_KEY = "lastEventTime";
const LAST_EVENT_NAME_KEY = "lastEventName";
const AUTO_OPEN_IN_BACKGROUND = true;
const PAGE_TITLE = `[$PERIOD.SHORT$ $EVENT.SHORT$] $STATUS$ E-Hentai`;
const HEADER_TEXT = `You $EVENT$ $PERIOD$ ago!`;
const EVENT_CHECKS = {
NEW_DAY: /dawn.+?new\s+day/ui,
RANDOM_FIGHT: /encountered\s+a\s+monster/ui,
};
const EVENT_LABELS = {
NEW_DAY: "woke to a new day",
RANDOM_FIGHT: "encountered a monster",
NO_EVENT: "have been bored since",
};
const EVENT_TITLES = {
NEW_DAY: "🌞",
RANDOM_FIGHT: "💥",
NO_EVENT: "❌",
};
const BUG_CHARS = Object.defineProperty([
'💀',
'💣',
'💔',
'💢',
'💥',
'❌',
'🛑',
'❗',
'🐛',
'🦟',
'🦗',
'🐜',
'🐝',
], 'toString', {
value() {
let s = '';
const bits = Array.from(this);
for (let i = 0; i < 5; i++) {
s += bits.splice(Math.floor(Math.random() * bits.length), 1);
}
return s;
},
});
// SCRIPT CORE BEGINS \\
((window, $, moment) => {
// eslint-disable-next-line no-extend-native
Set.prototype.addAll = Set.prototype.addAll || function addAll(iterable) {
Array.from(iterable).forEach(e => this.add(e));
};
const genid = () => ([1e7] + 1e3 + 4e3 + 8e3 + 1e11)
.repeat(2)
.replace(
/[018]/gu,
c => (c ^ crypto.getRandomValues(new Uint8Array(1))[0] & 15 >> c / 4).toString(16)
);
const SCRIPT_ID = genid();
const REGISTRY = new Set([SCRIPT_ID]);
const RADIO = new BroadcastChannel(SCRIPT_NAME);
const broadcast = (message, disguise) => RADIO.postMessage({
source: disguise || SCRIPT_ID,
event: message,
known: Array.from(REGISTRY.values()),
}); // So, you can lie about the source, for good reason! ...well, not GOOD reason.
RADIO.INITIAL_PING = 'PING';
RADIO.SET_SLAVE = 'SYNC';
RADIO.NEW_MASTER = 'EXCH';
RADIO.INSTANCE_GONE = 'GONE';
RADIO.TICK = 'EXEC';
RADIO.initialise = () => {
console.log(`${SCRIPT_ID} << ${RADIO.INITIAL_PING}`);
broadcast(RADIO.INITIAL_PING);
};
RADIO.slaveToMe = () => {
console.log(`${SCRIPT_ID} << ${RADIO.SET_SLAVE}`);
broadcast(RADIO.SET_SLAVE);
};
RADIO.switchMaster = to => {
console.log(`${SCRIPT_ID} << ${RADIO.NEW_MASTER} // ${to}`);
broadcast(RADIO.NEW_MASTER, to);
};
RADIO.unloadSelf = () => {
console.log(`${SCRIPT_ID} << ${RADIO.INSTANCE_GONE}`);
broadcast(RADIO.INSTANCE_GONE);
};
RADIO.runSlaves = () => {
console.log(`${SCRIPT_ID} << ${RADIO.TICK}`);
broadcast(RADIO.TICK);
};
let MASTER_ID = SCRIPT_ID;
let eventPane = $('#eventpane');
let header;
if (eventPane.length) {
eventPane.css('height', 'auto');
const eventLinks = eventPane.find('a[href]');
eventLinks.each((i, e) => {
const link = $(e);
e.addEventListener('click', () => true);
if (link.text().match(/\bfight\b/ui)) {
if (location.hash == "#debug") {
return;
}
if (location.hash == "#game") {
location.replace(e.href);
}
else {
GM.openInTab(e.href, AUTO_OPEN_IN_BACKGROUND);
}
link.hide();
}
});
const lines = eventPane.children('p, div');
header = lines.first();
}
else {
GM_addStyle(`#eventpane {\n${EVTPANE_CSS.map(e => `\t${e}`).join("\n")}\n}`);
eventPane = $('<div id="eventpane"></div>');
header = $('<div style="font-size:10pt; font-weight:bold; padding:0px; margin:12px auto 2px"></div>');
eventPane.append(header);
eventPane.append('<div style="margin-top: 10px;"></div>');
header.text(BUG_CHARS); // You shouldn't actually SEE this, so if you do...
const news = $('#newsinner'); | news.first().prepend(eventPane);
}
else if (gallery.length) {
gallery.after(eventPane);
}
}
if (!eventPane
.find('a[href]')
.filter((i, e) => (e.href || '').toLowerCase().includes('hentaiverse.org'))
.length
) {
eventPane.append('<p><a href="https://hentaiverse.org/">Enter the HentaiVerse</a></p>');
}
$('#nb a[href]').filter((i, e) => (e.href || '').toLowerCase().includes('hentaiverse.org'))
.parents('#nb > *')
.hide();
if (!moment) {
header.attr('title', 'Failed to load moment.js library').css('border-bottom', '2px dotted red');
return;
}
const lastEvent = () => moment(GM_getValue(LAST_EVENT_TIMESTAMP_KEY, 0) || Date.now().valueOf());
const expandTemplate = (tmpl, durationObj, eventKey) => {
const durationStr = durationObj.humanize();
return tmpl
.replace(/\$PERIOD\$/gu, durationStr)
.replace(
/\$PERIOD.SHORT\$/gu,
durationStr
.replace(/^a\s+few\s+/ui, "0 ")
.replace(/^an?\s+/ui, "1 ")
.replace(/^(\d+)\s+([dhms]).*$/u, "$1$2")
)
.replace(/\$EVENT\$/gu, EVENT_LABELS[eventKey])
.replace(/\$EVENT.SHORT\$/gu, EVENT_TITLES[eventKey])
.replace(/\$STATUS\$/gu, MASTER_ID == SCRIPT_ID ? '👑' : '⛓')
.replace(/^(.)(.+)$/u, (match, g1, g2) => g1.toUpperCase() + g2);
};
let start = lastEvent();
let eventKey = GM_getValue(LAST_EVENT_NAME_KEY, 'NO_EVENT');
const headerText = header.text();
console.log(`Retrieved event header: ${headerText}`);
let foundHeader | const gallery = $('#nb');
if (news.length) { | random_line_split |
hverse-encounter-helper.user.js | 1.8.0 - title text now includes a short description of the last event detected, updating cross-tab
v1.9.0 - reverted to background tabs for the automatic link opening - the rest has changed enough that foregrounding is too annoying; if `#game` is added to URL, opens in current tab
v1.10.0 - shorten the time-since label in the title
v1.11.0 - cleans up the eventpane contents from "dawn of a new day" events
v2.0.0 - now operates on a master/slave system with BroadcastChannel messages to keep everything synchronised; foundation built for slave displays to not calc updates
v2.0.1 - no longer operates on the hentaiverse pages, BroadcastChannels are not cross-origin so each domain gets one master instance
v3.0.0 - un-stupified the backend (organic code growth is bad, kids)
v3.1.0 - added an "enter the hentaiverse" link on the event pane if there isn't one already
v3.1.1 - fixed a typo, cleaned the file, and bumped the version (forgot to set to 3.1.0)
v3.2.0 - added a timer to reload the page (master page only) after 24 hours, for automated new day xp collection
v3.2.1 - fixed a nasty edge case bug causing an infinite reload loop
PLANNED:
[MINOR] Make the master page post a notification (via GM.notification) when the timer runs out
[MAJOR] Use AJAX to get the news page and update the eventpane with the new content when the timer runs out
*/
/* eslint-enable max-len */
/* global GM_addValueChangeListener, jQuery, $, moment */
// SCRIPT INITIALISATION BEGINS \\
const SCRIPT_NAME = `${GM_info.script.name} V${GM_info.script.version || '???'}`;
const EVTPANE_CSS = [
"width: 720px;",
"height: auto;",
"margin: 5px auto 0px;",
"background: rgb(242, 239, 223);",
"border: 1px solid rgb(92, 13, 18);",
"padding: 3px;",
"font-size: 9pt;",
"text-align: center !important;",
];
const LAST_EVENT_TIMESTAMP_KEY = "lastEventTime";
const LAST_EVENT_NAME_KEY = "lastEventName";
const AUTO_OPEN_IN_BACKGROUND = true;
const PAGE_TITLE = `[$PERIOD.SHORT$ $EVENT.SHORT$] $STATUS$ E-Hentai`;
const HEADER_TEXT = `You $EVENT$ $PERIOD$ ago!`;
const EVENT_CHECKS = {
NEW_DAY: /dawn.+?new\s+day/ui,
RANDOM_FIGHT: /encountered\s+a\s+monster/ui,
};
const EVENT_LABELS = {
NEW_DAY: "woke to a new day",
RANDOM_FIGHT: "encountered a monster",
NO_EVENT: "have been bored since",
};
const EVENT_TITLES = {
NEW_DAY: "🌞",
RANDOM_FIGHT: "💥",
NO_EVENT: "❌",
};
const BUG_CHARS = Object.defineProperty([
'💀',
'💣',
'💔',
'💢',
'💥',
'❌',
'🛑',
'❗',
'🐛',
'🦟',
'🦗',
'🐜',
'🐝',
], 'toString', {
value() {
let s = '';
const bits = Array. | this);
for (let i = 0; i < 5; i++) {
s += bits.splice(Math.floor(Math.random() * bits.length), 1);
}
return s;
},
});
// SCRIPT CORE BEGINS \\
((window, $, moment) => {
// eslint-disable-next-line no-extend-native
Set.prototype.addAll = Set.prototype.addAll || function addAll(iterable) {
Array.from(iterable).forEach(e => this.add(e));
};
const genid = () => ([1e7] + 1e3 + 4e3 + 8e3 + 1e11)
.repeat(2)
.replace(
/[018]/gu,
c => (c ^ crypto.getRandomValues(new Uint8Array(1))[0] & 15 >> c / 4).toString(16)
);
const SCRIPT_ID = genid();
const REGISTRY = new Set([SCRIPT_ID]);
const RADIO = new BroadcastChannel(SCRIPT_NAME);
const broadcast = (message, disguise) => RADIO.postMessage({
source: disguise || SCRIPT_ID,
event: message,
known: Array.from(REGISTRY.values()),
}); // So, you can lie about the source, for good reason! ...well, not GOOD reason.
RADIO.INITIAL_PING = 'PING';
RADIO.SET_SLAVE = 'SYNC';
RADIO.NEW_MASTER = 'EXCH';
RADIO.INSTANCE_GONE = 'GONE';
RADIO.TICK = 'EXEC';
RADIO.initialise = () => {
console.log(`${SCRIPT_ID} << ${RADIO.INITIAL_PING}`);
broadcast(RADIO.INITIAL_PING);
};
RADIO.slaveToMe = () => {
console.log(`${SCRIPT_ID} << ${RADIO.SET_SLAVE}`);
broadcast(RADIO.SET_SLAVE);
};
RADIO.switchMaster = to => {
console.log(`${SCRIPT_ID} << ${RADIO.NEW_MASTER} // ${to}`);
broadcast(RADIO.NEW_MASTER, to);
};
RADIO.unloadSelf = () => {
console.log(`${SCRIPT_ID} << ${RADIO.INSTANCE_GONE}`);
broadcast(RADIO.INSTANCE_GONE);
};
RADIO.runSlaves = () => {
console.log(`${SCRIPT_ID} << ${RADIO.TICK}`);
broadcast(RADIO.TICK);
};
let MASTER_ID = SCRIPT_ID;
let eventPane = $('#eventpane');
let header;
if (eventPane.length) {
eventPane.css('height', 'auto');
const eventLinks = eventPane.find('a[href]');
eventLinks.each((i, e) => {
const link = $(e);
e.addEventListener('click', () => true);
if (link.text().match(/\bfight\b/ui)) {
if (location.hash == "#debug") {
return;
}
if (location.hash == "#game") {
location.replace(e.href);
}
else {
GM.openInTab(e.href, AUTO_OPEN_IN_BACKGROUND);
}
link.hide();
}
});
const lines = eventPane.children('p, div');
header = lines.first();
}
else {
GM_addStyle(`#eventpane {\n${EVTPANE_CSS.map(e => `\t${e}`).join("\n")}\n}`);
eventPane = $('<div id="eventpane"></div>');
header = $('<div style="font-size:10pt; font-weight:bold; padding:0px; margin:12px auto 2px"></div>');
eventPane.append(header);
eventPane.append('<div style="margin-top: 10px;"></div>');
header.text(BUG_CHARS); // You shouldn't actually SEE this, so if you do...
const news = $('#newsinner');
const gallery = $('#nb');
if (news.length) {
news.first().prepend(eventPane);
}
else if (gallery.length) {
gallery.after(eventPane);
}
}
if (!eventPane
.find('a[href]')
.filter((i, e) => (e.href || '').toLowerCase().includes('hentaiverse.org'))
.length
) {
eventPane.append('<p><a href="https://hentaiverse.org/">Enter the HentaiVerse</a></p>');
}
$('#nb a[href]').filter((i, e) => (e.href || '').toLowerCase().includes('hentaiverse.org'))
.parents('#nb > *')
.hide();
if (!moment) {
header.attr('title', 'Failed to load moment.js library').css('border-bottom', '2px dotted red');
return;
}
const lastEvent = () => moment(GM_getValue(LAST_EVENT_TIMESTAMP_KEY, 0) || Date.now().valueOf());
const expandTemplate = (tmpl, durationObj, eventKey) => {
const durationStr = durationObj.humanize();
return tmpl
.replace(/\$PERIOD\$/gu, durationStr)
.replace(
/\$PERIOD.SHORT\$/gu,
durationStr
.replace(/^a\s+few\s+/ui, "0 ")
.replace(/^an?\s+/ui, "1 ")
.replace(/^(\d+)\s+([dhms]).*$/u, "$1$2")
)
.replace(/\$EVENT\$/gu, EVENT_LABELS[eventKey])
.replace(/\$EVENT.SHORT\$/gu, EVENT_TITLES[eventKey])
.replace(/\$STATUS\$/gu, MASTER_ID == SCRIPT_ID ? '👑' : '⛓')
.replace(/^(.)(.+)$/u, (match, g1, g2) => g1.toUpperCase() + g2);
};
let start = lastEvent();
let eventKey = GM_getValue(LAST_EVENT_NAME_KEY, 'NO_EVENT');
const headerText = header.text();
console.log(`Retrieved event header: ${headerText}`);
let | from( | identifier_name |
hverse-encounter-helper.user.js | 1.8.0 - title text now includes a short description of the last event detected, updating cross-tab
v1.9.0 - reverted to background tabs for the automatic link opening - the rest has changed enough that foregrounding is too annoying; if `#game` is added to URL, opens in current tab
v1.10.0 - shorten the time-since label in the title
v1.11.0 - cleans up the eventpane contents from "dawn of a new day" events
v2.0.0 - now operates on a master/slave system with BroadcastChannel messages to keep everything synchronised; foundation built for slave displays to not calc updates
v2.0.1 - no longer operates on the hentaiverse pages, BroadcastChannels are not cross-origin so each domain gets one master instance
v3.0.0 - un-stupified the backend (organic code growth is bad, kids)
v3.1.0 - added an "enter the hentaiverse" link on the event pane if there isn't one already
v3.1.1 - fixed a typo, cleaned the file, and bumped the version (forgot to set to 3.1.0)
v3.2.0 - added a timer to reload the page (master page only) after 24 hours, for automated new day xp collection
v3.2.1 - fixed a nasty edge case bug causing an infinite reload loop
PLANNED:
[MINOR] Make the master page post a notification (via GM.notification) when the timer runs out
[MAJOR] Use AJAX to get the news page and update the eventpane with the new content when the timer runs out
*/
/* eslint-enable max-len */
/* global GM_addValueChangeListener, jQuery, $, moment */
// SCRIPT INITIALISATION BEGINS \\
const SCRIPT_NAME = `${GM_info.script.name} V${GM_info.script.version || '???'}`;
const EVTPANE_CSS = [
"width: 720px;",
"height: auto;",
"margin: 5px auto 0px;",
"background: rgb(242, 239, 223);",
"border: 1px solid rgb(92, 13, 18);",
"padding: 3px;",
"font-size: 9pt;",
"text-align: center !important;",
];
const LAST_EVENT_TIMESTAMP_KEY = "lastEventTime";
const LAST_EVENT_NAME_KEY = "lastEventName";
const AUTO_OPEN_IN_BACKGROUND = true;
const PAGE_TITLE = `[$PERIOD.SHORT$ $EVENT.SHORT$] $STATUS$ E-Hentai`;
const HEADER_TEXT = `You $EVENT$ $PERIOD$ ago!`;
const EVENT_CHECKS = {
NEW_DAY: /dawn.+?new\s+day/ui,
RANDOM_FIGHT: /encountered\s+a\s+monster/ui,
};
const EVENT_LABELS = {
NEW_DAY: "woke to a new day",
RANDOM_FIGHT: "encountered a monster",
NO_EVENT: "have been bored since",
};
const EVENT_TITLES = {
NEW_DAY: "🌞",
RANDOM_FIGHT: "💥",
NO_EVENT: "❌",
};
const BUG_CHARS = Object.defineProperty([
'💀',
'💣',
'💔',
'💢',
'💥',
'❌',
'🛑',
'❗',
'🐛',
'🦟',
'🦗',
'🐜',
'🐝',
], 'toString', {
value() {
let s = '';
const bits = Array.from(this);
for (let i = 0; i < 5; i++) {
s += bits.splice(Math.floor(Math.random() * bits.length), 1);
}
return s;
},
});
// SCRIPT CORE BEGINS \\
((window, $, moment) => {
// eslint-disable-next-line no-extend-native
Set.prototype.addAll = Set.prototype.addAll || function addAll(iterable) {
Array.from(iterable).forEach(e => this.add(e));
};
const genid = () => ([1e7] + 1e3 + 4e3 + 8e3 + 1e11)
.repeat(2)
.replace(
/[018]/gu,
c => (c ^ crypto.getRandomValues(new Uint8Array(1))[0] & 15 >> c / 4).toString(16)
);
const SCRIPT_ID = genid();
const REGISTRY = new Set([SCRIPT_ID]);
const RADIO = new BroadcastChannel(SCRIPT_NAME);
const broadcast = (message, disguise) => RADIO.postMessage({
source: disguise || SCRIPT_ID,
event: message,
known: Array.from(REGISTRY.values()),
}); // So, you can lie about the source, for good reason! ...well, not GOOD reason.
RADIO.INITIAL_PING = 'PING';
RADIO.SET_SLAVE = 'SYNC';
RADIO.NEW_MASTER = 'EXCH';
RADIO.INSTANCE_GONE = 'GONE';
RADIO.TICK = 'EXEC';
RADIO.initialise = () => {
console.log(`${SCRIPT_ID} << ${RADIO.INITIAL_PING}`);
broadcast(RADIO.INITIAL_PING);
};
RADIO.slaveToMe = () => {
console.log(`${SCRIPT_ID} << ${RADIO.SET_SLAVE}`);
broadcast(RADIO.SET_SLAVE);
};
RADIO.switchMaster = to => {
console.log(`${SCRIPT_ID} << ${RADIO.NEW_MASTER} // ${to}`);
broadcast(RADIO.NEW_MASTER, to);
};
RADIO.unloadSelf = () => {
console.log(`${SCRIPT_ID} << ${RADIO.INSTANCE_GONE}`);
broadcast(RADIO.INSTANCE_GONE);
};
RADIO.runSlaves = () => {
console.log(`${SCRIPT_ID} << ${RADIO.TICK}`);
broadcast(RADIO.TICK);
};
let MASTER_ID = SCRIPT_ID;
let eventPane = $('#eventpane');
let header;
if (eventPane.length) {
eventPane.css('height', 'auto');
const eventLinks = eventPane.find('a[href]');
eventLinks.each((i, e) => {
const link = $(e);
e.addEventListener('click', () => true);
if (link.text().match(/\bfight\b/ui)) {
if (location.hash == "#debug") {
return;
}
if (location.hash == "#game") {
location.replace(e.href);
}
else {
GM.openInTab(e.href, AUTO_OPEN_IN_BACKGROUND);
}
link.hide();
}
});
const lines = eventPane.children('p, div');
header = lines.first();
}
else {
GM_addStyle(`#eventpane {\n${EVTPANE_CSS.map(e => `\t${e}`).join("\n")}\n}`);
eventPane = $('<div id="eventpane"></div>');
header = $('<div style="font-size:10pt; font-weight:bold; padding:0px; margin:12px auto 2px"></div>');
eventPane.append(header);
eventPane.append('<div style="margin-top: 10px;"></div>');
header.text(BUG_CHARS); // You shouldn't actually SEE this, so if you do...
const news = $('#newsinner');
const gallery = $('#nb');
if (news.length) {
news.first().prepend(eventPane);
}
else if (gallery.length) {
gallery.after(eventPane);
}
}
if (!eventPane
.find('a[href]')
.filter((i, e) => (e.href || '').toLowerCase().includes('hentaiverse.org'))
.length
) {
eventPane.append('<p><a href="https://hentaiverse.org/">Enter the HentaiVerse</a></p>');
}
$('#nb a[href]').filter((i, e) => (e.href || '').toLowerCase().includes('hentaiverse.org'))
.parents('#nb > *')
.hide();
if (!moment) {
header.attr('title', 'Failed to load mome | LAST_EVENT_TIMESTAMP_KEY, 0) || Date.now().valueOf());
const expandTemplate = (tmpl, durationObj, eventKey) => {
const durationStr = durationObj.humanize();
return tmpl
.replace(/\$PERIOD\$/gu, durationStr)
.replace(
/\$PERIOD.SHORT\$/gu,
durationStr
.replace(/^a\s+few\s+/ui, "0 ")
.replace(/^an?\s+/ui, "1 ")
.replace(/^(\d+)\s+([dhms]).*$/u, "$1$2")
)
.replace(/\$EVENT\$/gu, EVENT_LABELS[eventKey])
.replace(/\$EVENT.SHORT\$/gu, EVENT_TITLES[eventKey])
.replace(/\$STATUS\$/gu, MASTER_ID == SCRIPT_ID ? '👑' : '⛓')
.replace(/^(.)(.+)$/u, (match, g1, g2) => g1.toUpperCase() + g2);
};
let start = lastEvent();
let eventKey = GM_getValue(LAST_EVENT_NAME_KEY, 'NO_EVENT');
const headerText = header.text();
console.log(`Retrieved event header: ${headerText}`);
let | nt.js library').css('border-bottom', '2px dotted red');
return;
}
const lastEvent = () => moment(GM_getValue( | conditional_block |
hverse-encounter-helper.user.js | 1.8.0 - title text now includes a short description of the last event detected, updating cross-tab
v1.9.0 - reverted to background tabs for the automatic link opening - the rest has changed enough that foregrounding is too annoying; if `#game` is added to URL, opens in current tab
v1.10.0 - shorten the time-since label in the title
v1.11.0 - cleans up the eventpane contents from "dawn of a new day" events
v2.0.0 - now operates on a master/slave system with BroadcastChannel messages to keep everything synchronised; foundation built for slave displays to not calc updates
v2.0.1 - no longer operates on the hentaiverse pages, BroadcastChannels are not cross-origin so each domain gets one master instance
v3.0.0 - un-stupified the backend (organic code growth is bad, kids)
v3.1.0 - added an "enter the hentaiverse" link on the event pane if there isn't one already
v3.1.1 - fixed a typo, cleaned the file, and bumped the version (forgot to set to 3.1.0)
v3.2.0 - added a timer to reload the page (master page only) after 24 hours, for automated new day xp collection
v3.2.1 - fixed a nasty edge case bug causing an infinite reload loop
PLANNED:
[MINOR] Make the master page post a notification (via GM.notification) when the timer runs out
[MAJOR] Use AJAX to get the news page and update the eventpane with the new content when the timer runs out
*/
/* eslint-enable max-len */
/* global GM_addValueChangeListener, jQuery, $, moment */
// SCRIPT INITIALISATION BEGINS \\
const SCRIPT_NAME = `${GM_info.script.name} V${GM_info.script.version || '???'}`;
const EVTPANE_CSS = [
"width: 720px;",
"height: auto;",
"margin: 5px auto 0px;",
"background: rgb(242, 239, 223);",
"border: 1px solid rgb(92, 13, 18);",
"padding: 3px;",
"font-size: 9pt;",
"text-align: center !important;",
];
const LAST_EVENT_TIMESTAMP_KEY = "lastEventTime";
const LAST_EVENT_NAME_KEY = "lastEventName";
const AUTO_OPEN_IN_BACKGROUND = true;
const PAGE_TITLE = `[$PERIOD.SHORT$ $EVENT.SHORT$] $STATUS$ E-Hentai`;
const HEADER_TEXT = `You $EVENT$ $PERIOD$ ago!`;
const EVENT_CHECKS = {
NEW_DAY: /dawn.+?new\s+day/ui,
RANDOM_FIGHT: /encountered\s+a\s+monster/ui,
};
const EVENT_LABELS = {
NEW_DAY: "woke to a new day",
RANDOM_FIGHT: "encountered a monster",
NO_EVENT: "have been bored since",
};
const EVENT_TITLES = {
NEW_DAY: "🌞",
RANDOM_FIGHT: "💥",
NO_EVENT: "❌",
};
const BUG_CHARS = Object.defineProperty([
'💀',
'💣',
'💔',
'💢',
'💥',
'❌',
'🛑',
'❗',
'🐛',
'🦟',
'🦗',
'🐜',
'🐝',
], 'toString', {
value() {
let s = '';
const bits = Array.from(thi | moment) => {
// eslint-disable-next-line no-extend-native
Set.prototype.addAll = Set.prototype.addAll || function addAll(iterable) {
Array.from(iterable).forEach(e => this.add(e));
};
const genid = () => ([1e7] + 1e3 + 4e3 + 8e3 + 1e11)
.repeat(2)
.replace(
/[018]/gu,
c => (c ^ crypto.getRandomValues(new Uint8Array(1))[0] & 15 >> c / 4).toString(16)
);
const SCRIPT_ID = genid();
const REGISTRY = new Set([SCRIPT_ID]);
const RADIO = new BroadcastChannel(SCRIPT_NAME);
const broadcast = (message, disguise) => RADIO.postMessage({
source: disguise || SCRIPT_ID,
event: message,
known: Array.from(REGISTRY.values()),
}); // So, you can lie about the source, for good reason! ...well, not GOOD reason.
RADIO.INITIAL_PING = 'PING';
RADIO.SET_SLAVE = 'SYNC';
RADIO.NEW_MASTER = 'EXCH';
RADIO.INSTANCE_GONE = 'GONE';
RADIO.TICK = 'EXEC';
RADIO.initialise = () => {
console.log(`${SCRIPT_ID} << ${RADIO.INITIAL_PING}`);
broadcast(RADIO.INITIAL_PING);
};
RADIO.slaveToMe = () => {
console.log(`${SCRIPT_ID} << ${RADIO.SET_SLAVE}`);
broadcast(RADIO.SET_SLAVE);
};
RADIO.switchMaster = to => {
console.log(`${SCRIPT_ID} << ${RADIO.NEW_MASTER} // ${to}`);
broadcast(RADIO.NEW_MASTER, to);
};
RADIO.unloadSelf = () => {
console.log(`${SCRIPT_ID} << ${RADIO.INSTANCE_GONE}`);
broadcast(RADIO.INSTANCE_GONE);
};
RADIO.runSlaves = () => {
console.log(`${SCRIPT_ID} << ${RADIO.TICK}`);
broadcast(RADIO.TICK);
};
let MASTER_ID = SCRIPT_ID;
let eventPane = $('#eventpane');
let header;
if (eventPane.length) {
eventPane.css('height', 'auto');
const eventLinks = eventPane.find('a[href]');
eventLinks.each((i, e) => {
const link = $(e);
e.addEventListener('click', () => true);
if (link.text().match(/\bfight\b/ui)) {
if (location.hash == "#debug") {
return;
}
if (location.hash == "#game") {
location.replace(e.href);
}
else {
GM.openInTab(e.href, AUTO_OPEN_IN_BACKGROUND);
}
link.hide();
}
});
const lines = eventPane.children('p, div');
header = lines.first();
}
else {
GM_addStyle(`#eventpane {\n${EVTPANE_CSS.map(e => `\t${e}`).join("\n")}\n}`);
eventPane = $('<div id="eventpane"></div>');
header = $('<div style="font-size:10pt; font-weight:bold; padding:0px; margin:12px auto 2px"></div>');
eventPane.append(header);
eventPane.append('<div style="margin-top: 10px;"></div>');
header.text(BUG_CHARS); // You shouldn't actually SEE this, so if you do...
const news = $('#newsinner');
const gallery = $('#nb');
if (news.length) {
news.first().prepend(eventPane);
}
else if (gallery.length) {
gallery.after(eventPane);
}
}
if (!eventPane
.find('a[href]')
.filter((i, e) => (e.href || '').toLowerCase().includes('hentaiverse.org'))
.length
) {
eventPane.append('<p><a href="https://hentaiverse.org/">Enter the HentaiVerse</a></p>');
}
$('#nb a[href]').filter((i, e) => (e.href || '').toLowerCase().includes('hentaiverse.org'))
.parents('#nb > *')
.hide();
if (!moment) {
header.attr('title', 'Failed to load moment.js library').css('border-bottom', '2px dotted red');
return;
}
const lastEvent = () => moment(GM_getValue(LAST_EVENT_TIMESTAMP_KEY, 0) || Date.now().valueOf());
const expandTemplate = (tmpl, durationObj, eventKey) => {
const durationStr = durationObj.humanize();
return tmpl
.replace(/\$PERIOD\$/gu, durationStr)
.replace(
/\$PERIOD.SHORT\$/gu,
durationStr
.replace(/^a\s+few\s+/ui, "0 ")
.replace(/^an?\s+/ui, "1 ")
.replace(/^(\d+)\s+([dhms]).*$/u, "$1$2")
)
.replace(/\$EVENT\$/gu, EVENT_LABELS[eventKey])
.replace(/\$EVENT.SHORT\$/gu, EVENT_TITLES[eventKey])
.replace(/\$STATUS\$/gu, MASTER_ID == SCRIPT_ID ? '👑' : '⛓')
.replace(/^(.)(.+)$/u, (match, g1, g2) => g1.toUpperCase() + g2);
};
let start = lastEvent();
let eventKey = GM_getValue(LAST_EVENT_NAME_KEY, 'NO_EVENT');
const headerText = header.text();
console.log(`Retrieved event header: ${headerText}`);
let | s);
for (let i = 0; i < 5; i++) {
s += bits.splice(Math.floor(Math.random() * bits.length), 1);
}
return s;
},
});
// SCRIPT CORE BEGINS \\
((window, $, | identifier_body |
experiment_types.go | Names of candidates",format="byte"
// +kubebuilder:printcolumn:name="phase",type="string",JSONPath=".status.phase",description="Phase of the experiment",format="byte"
// +kubebuilder:printcolumn:name="winner found",type="boolean",JSONPath=".status.assessment.winner.winning_version_found",description="Winner identified",format="byte"
// +kubebuilder:printcolumn:name="current best",type="string",JSONPath=".status.assessment.winner.name",description="Current best version",format="byte"
// +kubebuilder:printcolumn:name="confidence",priority=1,type="string",JSONPath=".status.assessment.winner.probability_of_winning_for_best_version",description="Confidence current bets version will be the winner",format="float"
// +kubebuilder:printcolumn:name="status",type="string",JSONPath=".status.message",description="Detailed Status of the experiment",format="byte"
// +kubebuilder:printcolumn:name="baseline",priority=1,type="string",JSONPath=".spec.service.baseline",description="Name of baseline",format="byte"
// +kubebuilder:printcolumn:name="candidates",priority=1,type="string",JSONPath=".spec.service.candidates",description="Names of candidates",format="byte"
type Experiment struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ExperimentSpec `json:"spec"`
// +optional
Status ExperimentStatus `json:"status,omitempty"`
}
// ExperimentList contains a list of Experiment
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ExperimentList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Experiment `json:"items"`
}
// ExperimentSpec defines the desired state of Experiment
type ExperimentSpec struct {
// Service is a reference to the service componenets that this experiment is targeting at
Service `json:"service"`
| // Noted that at most one reward metric is allowed
// If more than one reward criterion is included, the first would be used while others would be omitted
// +optional
Criteria []Criterion `json:"criteria,omitempty"`
// TrafficControl provides instructions on traffic management for an experiment
// +optional
TrafficControl *TrafficControl `json:"trafficControl,omitempty"`
// Endpoint of reaching analytics service
// default is http://iter8-analytics:8080
// +optional
AnalyticsEndpoint *string `json:"analyticsEndpoint,omitempty"`
// Duration specifies how often/many times the expriment should re-evaluate the assessment
// +optional
Duration *Duration `json:"duration,omitempty"`
// Cleanup indicates whether routing rules and deployment receiving no traffic should be deleted at the end of experiment
// +optional
Cleanup *bool `json:"cleanup,omitempty"`
// The metrics used in the experiment
// +optional
Metrics *Metrics `json:"metrics,omitempty"`
// User actions to override the current status of the experiment
// +optional
ManualOverride *ManualOverride `json:"manualOverride,omitempty"`
// Networking describes how traffic network should be configured for the experiment
// +optional
Networking *Networking `json:"networking,omitempty"`
}
// Service is a reference to the service that this experiment is targeting at
type Service struct {
// defines the object reference to the service
*corev1.ObjectReference `json:",inline"`
// Name of the baseline deployment
Baseline string `json:"baseline"`
// List of names of candidate deployments
Candidates []string `json:"candidates"`
// Port number exposed by internal services
Port *int32 `json:"port,omitempty"`
}
// Host holds the name of host and gateway associated with it
type Host struct {
// Name of the Host
Name string `json:"name"`
// The gateway associated with the host
Gateway string `json:"gateway"`
}
// Criterion defines the criterion for assessing a target
type Criterion struct {
// Name of metric used in the assessment
Metric string `json:"metric"`
// Threshold specifies the numerical value for a success criterion
// Metric value above threhsold violates the criterion
// +optional
Threshold *Threshold `json:"threshold,omitempty"`
// IsReward indicates whether the metric is a reward metric or not
// +optional
IsReward *bool `json:"isReward,omitempty"`
}
// Threshold defines the value and type of a criterion threshold
type Threshold struct {
// Type of threshold
// relative: value of threshold specifies the relative amount of changes
// absolute: value of threshold indicates an absolute value
//+kubebuilder:validation:Enum={relative,absolute}
Type string `json:"type"`
// Value of threshold
Value float32 `json:"value"`
// Once a target metric violates this threshold, traffic to the target should be cutoff or not
// +optional
CutoffTrafficOnViolation *bool `json:"cutoffTrafficOnViolation,omitempty"`
}
// Duration specifies how often/many times the expriment should re-evaluate the assessment
type Duration struct {
// Interval specifies duration between iterations
// default is 30s
// +optional
Interval *string `json:"interval,omitempty"`
// MaxIterations indicates the amount of iteration
// default is 100
// +optional
MaxIterations *int32 `json:"maxIterations,omitempty"`
}
// TrafficControl specifies constrains on traffic and stratgy used to update the traffic
type TrafficControl struct {
// Strategy used to shift traffic
// default is progressive
// +kubebuilder:validation:Enum={progressive, top_2, uniform}
// +optional
Strategy *StrategyType `json:"strategy,omitempty"`
// OnTermination determines traffic split status at the end of experiment
// +kubebuilder:validation:Enum={to_winner,to_baseline,keep_last}
// +optional
OnTermination *OnTerminationType `json:"onTermination,omitempty"`
// Only requests fulfill the match section would be used in experiment
// Istio matching rules are used
// +optional
Match *Match `json:"match,omitempty"`
// Percentage specifies the amount of traffic to service that would be used in experiment
// default is 100
// +optional
Percentage *int32 `json:"percentage,omitempty"`
// MaxIncrement is the upperlimit of traffic increment for a target in one iteration
// default is 2
// +optional
MaxIncrement *int32 `json:"maxIncrement,omitempty"`
// RouterID refers to the id of router used to handle traffic for the experiment
// If it's not specified, the first entry of effictive host will be used as the id
// +optional
RouterID *string `json:"routerID,omitempty"`
}
// Match contains matching criteria for requests
type Match struct {
// Matching criteria for HTTP requests
// +optional
HTTP []*HTTPMatchRequest `json:"http,omitempty"`
}
// ManualOverride defines actions that the user can perform to an experiment
type ManualOverride struct {
// Action to perform
//+kubebuilder:validation:Enum={pause,resume,terminate}
Action ActionType `json:"action"`
// Traffic split status specification
// Applied to action terminate only
// example:
// reviews-v2:80
// reviews-v3:20
// +optional
TrafficSplit map[string]int32 `json:"trafficSplit,omitempty"`
}
// Networking describes how traffic network should be configured for the experiment
type Networking struct {
// id of router
// +optional
ID *string `json:"id,omitempty"`
// List of hosts used to receive external traffic
// +optional
Hosts []Host `json:"hosts,omitempty"`
}
// Metrics contains definitions for metrics used in the experiment
type Metrics struct {
// List of counter metrics definiton
// +optional
CounterMetrics []CounterMetric `json:"counter_metrics,omitempty"`
// List of ratio metrics definiton
// +optional
RatioMetrics []RatioMetric `json:"ratio_metrics,omitempty"`
}
// CounterMetric is the definition of Counter Metric
type CounterMetric struct {
// Name of metric
Name string `json:"name" yaml:"name"`
// Query template of this metric
QueryTemplate string `json:"query_template" yaml:"query_template"`
// Preferred direction of the metric value
// +optional
PreferredDirection *string `json:"preferred_direction,omitempty" yaml:"preferred_direction,omitempty"`
// Unit of the metric value
// +optional
Unit *string `json:"unit,omitempty" yaml:"unit,omitempty"`
}
// RatioMetric is the definiton of Ratio Metric
type RatioMetric struct {
// name of metric
Name string `json:"name" yaml:"name"`
// Counter metric used in numerator
Numerator string `json:"numerator" yaml:"numerator"`
// Counter metric used in denominator
Denominator string `json:"denominator" yaml:"denominator"`
// Boolean flag indicating if the value of this metric is always in the range 0 to 1
// +optional
ZeroToOne *bool `json:"zero_to_one,omitempty" yaml:"zero_to_one,omitempty"`
// Preferred direction of | // Criteria contains a list of Criterion for assessing the target service | random_line_split |
Blockchain.go | }
if b != nil {
// Create the genesis block with a coinbase transaction
txCoinbase := NewCoinbaseTransacion(address)
genesisBlock := CreateGenesisBlock([]*Transaction{txCoinbase})
err := b.Put(genesisBlock.BlockHash, gobEncode(genesisBlock))
if err != nil {
log.Panic(err)
}
// Update Tip of blockchain
err = b.Put([]byte("l"), genesisBlock.BlockHash)
if err != nil {
log.Panic(err)
}
}
return nil
})
if err != nil {
log.Panic(err)
}
}
// Convert command variables to Transaction Objects
func (blockchain *Blockchain) hanldeTransations(from []string, to []string, amount []string, nodeId string) []*Transaction {
var txs []*Transaction
utxoSet := &UTXOSet{blockchain}
for i := 0; i < len(from); i++ {
amountInt, _ := strconv.Atoi(amount[i])
tx := NewSimpleTransation(from[i], to[i], int64(amountInt), utxoSet, txs, nodeId)
txs = append(txs, tx)
}
return txs
}
// Package transactions and mine a new Block
func (blockchain *Blockchain) MineNewBlock(originalTxs []*Transaction) *Block {
// Reward of mining a block
coinBaseTransaction := NewRewardTransacion()
txs := []*Transaction{coinBaseTransaction}
txs = append(txs, originalTxs...)
// Verify transactions
for _, tx := range txs {
if !tx.IsCoinBaseTransaction() {
if blockchain.VerifityTransaction(tx, txs) == false {
log.Panic("Verify transaction failed...")
}
}
}
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
// Get the latest block
var block Block
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
hash := b.Get([]byte("l"))
blockBytes := b.Get(hash)
gobDecode(blockBytes, &block)
}
return nil
})
if err != nil {
log.Panic(err)
}
// Mine a new block
newBlock := NewBlock(txs, block.Height+1, block.BlockHash)
return newBlock
}
// Save a block to the database
func (blockchain *Blockchain) SaveNewBlockToBlockchain(newBlock *Block) {
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
b.Put(newBlock.BlockHash, gobEncode(newBlock))
b.Put([]byte("l"), newBlock.BlockHash)
blockchain.Tip = newBlock.BlockHash
}
return nil
})
if err != nil {
log.Panic(err)
}
}
// Get Unspent transaction outputs(UTXOs)
func (blc *Blockchain) getUTXOsByAddress(address string, txs []*Transaction) []*UTXO {
var utxos []*UTXO
spentTxOutputMap := make(map[string][]int)
// calculate UTXOs by querying txs
for i := len(txs) - 1; i >= 0; i-- {
utxos = caculate(txs[i], address, spentTxOutputMap, utxos)
}
// calculate UTXOs by querying Blocks
it := blc.Iterator()
for {
block := it.Next()
for i := len(block.Transactions) - 1; i >= 0; i-- {
utxos = caculate(block.Transactions[i], address, spentTxOutputMap, utxos)
}
hashInt := new(big.Int)
hashInt.SetBytes(block.PrevBlockHash)
// If current block is genesis block, exit loop
if big.NewInt(0).Cmp(hashInt) == 0 {
break
}
}
return utxos
}
// calculate utxos
func caculate(tx *Transaction, address string, spentOutputMap map[string][]int, utxos []*UTXO) []*UTXO {
// collect all inputs into spentOutputMap
if !tx.IsCoinBaseTransaction() {
for _, input := range tx.Inputs {
full_payload := Base58Decode([]byte(address))
pubKeyHash := full_payload[1 : len(full_payload)-addressCheckSumLen]
if input.UnlockWithAddress(pubKeyHash) {
transactionHash := hex.EncodeToString(input.TransactionHash)
spentOutputMap[transactionHash] = append(spentOutputMap[transactionHash], input.IndexOfOutputs)
}
}
}
// Tranverse all outputs, unSpentUTXOs = all outputs - spent outputs
outputsLoop:
for index, output := range tx.Outputs {
if output.UnlockWithAddress(address) {
if len(spentOutputMap) != 0 {
var isSpent bool
for transactionHash, indexArray := range spentOutputMap { //143d,[]int{1}
//遍历 记录已经花费的下标的数组
for _, i := range indexArray {
if i == index && hex.EncodeToString(tx.TransactionHash) == transactionHash {
isSpent = true //标记当前的output是已经花费
continue outputsLoop
}
}
}
if !isSpent {
utxo := &UTXO{tx.TransactionHash, index, output}
utxos = append(utxos, utxo)
}
} else {
utxo := &UTXO{tx.TransactionHash, index, output}
utxos = append(utxos, utxo)
}
}
}
return utxos
}
// Find UTXOs which can be regarded as inputs in this transaction
func (bc *Blockchain) FindSpendableUTXOs(from string, amount int64, txs []*Transaction) (int64, map[string][]int) {
var total int64
spendableMap := make(map[string][]int)
utxos := bc.getUTXOsByAddress(from, txs)
for _, utxo := range utxos {
total += utxo.Output.Value
transactionHash := hex.EncodeToString(utxo.TransactionHash)
spendableMap[transactionHash] = append(spendableMap[transactionHash], utxo.Index)
if total >= amount {
break
}
}
if total < amount {
fmt.Printf("%s,余额不足,无法转账。。", from)
os.Exit(1)
}
return total, spendableMap
}
func (blc *Blockchain) Printchain() {
blockIterator := blc.Iterator()
for {
block := blockIterator.Next()
fmt.Println(block)
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(&hashInt) == 0 {
break
}
}
}
func (blockchain *Blockchain) Iterator() *BlockchainIterator {
return &BlockchainIterator{blockchain.Tip, blockchain.DB}
}
func DBExists(DBName string) bool {
if _, err := os.Stat(DBName); os.IsNotExist(err) {
return false
}
return true
}
func BlockchainObject(nodeID string) *Blockchain {
DBName := fmt.Sprintf(DBName, nodeID)
if DBExists(DBName) {
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
var blockchain *Blockchain
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
hash := b.Get([]byte("l"))
blockchain = &Blockchain{hash, db}
}
return nil
})
if err != nil {
log.Panic(err)
}
return blockchain
} else {
fmt.Println("数据库不存在,无法获取BlockChain对象。。。")
return nil
}
}
func (bc *Blockchain) SignTransaction(tx *Transaction, privateKey ecdsa.PrivateKey, txs []*Transaction) {
if tx.IsCoinBaseTransaction() {
return
}
prevTransactionMap := make(map[string]*Transaction | {
DBName := fmt.Sprintf(DBName, nodeID)
if DBExists(DBName) {
fmt.Println("Genesis block already exist!")
os.Exit(1)
}
fmt.Println("Creating genesis block....")
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(BlockBucketName))
if err != nil {
log.Panic(err) | identifier_body | |
Blockchain.go | = append(txs, tx)
}
return txs
}
// Package transactions and mine a new Block
func (blockchain *Blockchain) MineNewBlock(originalTxs []*Transaction) *Block {
// Reward of mining a block
coinBaseTransaction := NewRewardTransacion()
txs := []*Transaction{coinBaseTransaction}
txs = append(txs, originalTxs...)
// Verify transactions
for _, tx := range txs {
if !tx.IsCoinBaseTransaction() {
if blockchain.VerifityTransaction(tx, txs) == false {
log.Panic("Verify transaction failed...")
}
}
}
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
// Get the latest block
var block Block
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
hash := b.Get([]byte("l"))
blockBytes := b.Get(hash)
gobDecode(blockBytes, &block)
}
return nil
})
if err != nil {
log.Panic(err)
}
// Mine a new block
newBlock := NewBlock(txs, block.Height+1, block.BlockHash)
return newBlock
}
// Save a block to the database
func (blockchain *Blockchain) SaveNewBlockToBlockchain(newBlock *Block) {
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
b.Put(newBlock.BlockHash, gobEncode(newBlock))
b.Put([]byte("l"), newBlock.BlockHash)
blockchain.Tip = newBlock.BlockHash
}
return nil
})
if err != nil {
log.Panic(err)
}
}
// Get Unspent transaction outputs(UTXOs)
func (blc *Blockchain) getUTXOsByAddress(address string, txs []*Transaction) []*UTXO {
var utxos []*UTXO
spentTxOutputMap := make(map[string][]int)
// calculate UTXOs by querying txs
for i := len(txs) - 1; i >= 0; i-- {
utxos = caculate(txs[i], address, spentTxOutputMap, utxos)
}
// calculate UTXOs by querying Blocks
it := blc.Iterator()
for {
block := it.Next()
for i := len(block.Transactions) - 1; i >= 0; i-- {
utxos = caculate(block.Transactions[i], address, spentTxOutputMap, utxos)
}
hashInt := new(big.Int)
hashInt.SetBytes(block.PrevBlockHash)
// If current block is genesis block, exit loop
if big.NewInt(0).Cmp(hashInt) == 0 {
break
}
}
return utxos
}
// calculate utxos
func caculate(tx *Transaction, address string, spentOutputMap map[string][]int, utxos []*UTXO) []*UTXO {
// collect all inputs into spentOutputMap
if !tx.IsCoinBaseTransaction() {
for _, input := range tx.Inputs {
full_payload := Base58Decode([]byte(address))
pubKeyHash := full_payload[1 : len(full_payload)-addressCheckSumLen]
if input.UnlockWithAddress(pubKeyHash) {
transactionHash := hex.EncodeToString(input.TransactionHash)
spentOutputMap[transactionHash] = append(spentOutputMap[transactionHash], input.IndexOfOutputs)
}
}
}
// Tranverse all outputs, unSpentUTXOs = all outputs - spent outputs
outputsLoop:
for index, output := range tx.Outputs {
if output.UnlockWithAddress(address) {
if len(spentOutputMap) != 0 {
var isSpent bool
for transactionHash, indexArray := range spentOutputMap { //143d,[]int{1}
//遍历 记录已经花费的下标的数组
for _, i := range indexArray {
if i == index && hex.EncodeToString(tx.TransactionHash) == transactionHash {
isSpent = true //标记当前的output是已经花费
continue outputsLoop
}
}
}
if !isSpent {
utxo := &UTXO{tx.TransactionHash, index, output}
utxos = append(utxos, utxo)
}
} else {
utxo := &UTXO{tx.TransactionHash, index, output}
utxos = append(utxos, utxo)
}
}
}
return utxos
}
// Find UTXOs which can be regarded as inputs in this transaction
func (bc *Blockchain) FindSpendableUTXOs(from string, amount int64, txs []*Transaction) (int64, map[string][]int) {
var total int64
spendableMap := make(map[string][]int)
utxos := bc.getUTXOsByAddress(from, txs)
for _, utxo := range utxos {
total += utxo.Output.Value
transactionHash := hex.EncodeToString(utxo.TransactionHash)
spendableMap[transactionHash] = append(spendableMap[transactionHash], utxo.Index)
if total >= amount {
break
}
}
if total < amount {
fmt.Printf("%s,余额不足,无法转账。。", from)
os.Exit(1)
}
return total, spendableMap
}
func (blc *Blockchain) Printchain() {
blockIterator := blc.Iterator()
for {
block := blockIterator.Next()
fmt.Println(block)
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(&hashInt) == 0 {
break
}
}
}
func (blockchain *Blockchain) Iterator() *BlockchainIterator {
return &BlockchainIterator{blockchain.Tip, blockchain.DB}
}
func DBExists(DBName string) bool {
if _, err := os.Stat(DBName); os.IsNotExist(err) {
return false
}
return true
}
func BlockchainObject(nodeID string) *Blockchain {
DBName := fmt.Sprintf(DBName, nodeID)
if DBExists(DBName) {
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
var blockchain *Blockchain
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
hash := b.Get([]byte("l"))
blockchain = &Blockchain{hash, db}
}
return nil
})
if err != nil {
log.Panic(err)
}
return blockchain
} else {
fmt.Println("数据库不存在,无法获取BlockChain对象。。。")
return nil
}
}
func (bc *Blockchain) SignTransaction(tx *Transaction, privateKey ecdsa.PrivateKey, txs []*Transaction) {
if tx.IsCoinBaseTransaction() {
return
}
prevTransactionMap := make(map[string]*Transaction)
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
prevTransactionMap[transactionHash] = bc.FindTransactionByTransactionHash(input.TransactionHash, txs)
}
tx.Sign(privateKey, prevTransactionMap)
}
func (bc *Blockchain) FindTransactionByTransactionHash(transactionHash []byte, txs []*Transaction) *Transaction {
for _, tx := range txs {
if bytes.Compare(tx.TransactionHash, transactionHash) == 0 {
return tx
}
}
iterator := bc.Iterator()
for {
block := iterator.Next()
for _, tx := range block.Transactions {
if bytes.Compare(tx.TransactionHash, transactionHash) == 0 {
return tx
}
}
bigInt := new(big.Int)
bigInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(bigInt) == 0 {
break
}
}
return &Transaction{}
}
/*
验证交易的数字签名
*/
func (bc *Blockchain) VerifityTransaction(tx *Transaction, txs []*Transaction) bool {
//要想验证数字签名:私钥+数据 (tx的副本+之前的交易)
//2.获取该tx中的Input,引用之前的transaction中的未花费的output
prevTxs := make(map[string]*Transaction)
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
prevTxs[transactionHash] = bc.FindTransactionByTransactionHash(input.TransactionHash, txs)
}
| fmt.Println("没找到对应交易")
} else {
//fmt.Println("preTxs___________________________________")
//fmt.Println(prevT | if len(prevTxs) == 0 { | random_line_split |
Blockchain.go | = append(txs, tx)
}
return txs
}
// Package transactions and mine a new Block
func (blockchain *Blockchain) MineNewBlock(originalTxs []*Transaction) *Block {
// Reward of mining a block
coinBaseTransaction := NewRewardTransacion()
txs := []*Transaction{coinBaseTransaction}
txs = append(txs, originalTxs...)
// Verify transactions
for _, tx := range txs {
if !tx.IsCoinBaseTransaction() {
if blockchain.VerifityTransaction(tx, txs) == false {
log.Panic("Verify transaction failed...")
}
}
}
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
// Get the latest block
var block Block
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
hash := b.Get([]byte("l"))
blockBytes := b.Get(hash)
gobDecode(blockBytes, &block)
}
return nil
})
if err != nil {
log.Panic(err)
}
// Mine a new block
newBlock := NewBlock(txs, block.Height+1, block.BlockHash)
return newBlock
}
// Save a block to the database
func (blockchain *Blockchain) SaveNewBlockToBlockchain(newBlock *Block) {
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
b.Put(newBlock.BlockHash, gobEncode(newBlock))
b.Put([]byte("l"), newBlock.BlockHash)
blockchain.Tip = newBlock.BlockHash
}
return nil
})
if err != nil {
log.Panic(err)
}
}
// Get Unspent transaction outputs(UTXOs)
func (blc *Blockchain) getUTXOsByAddress(address string, txs []*Transaction) []*UTXO {
var utxos []*UTXO
spentTxOutputMap := make(map[string][]int)
// calculate UTXOs by querying txs
for i := len(txs) - 1; i >= 0; i-- {
utxos = caculate(txs[i], address, spentTxOutputMap, utxos)
}
// calculate UTXOs by querying Blocks
it := blc.Iterator()
for {
block := it.Next()
for i := len(block.Transactions) - 1; i >= 0; i-- {
utxos = caculate(block.Transactions[i], address, spentTxOutputMap, utxos)
}
hashInt := new(big.Int)
hashInt.SetBytes(block.PrevBlockHash)
// If current block is genesis block, exit loop
if big.NewInt(0).Cmp(hashInt) == 0 {
break
}
}
return utxos
}
// calculate utxos
func caculate(tx *Transaction, address string, spentOutputMap map[string][]int, utxos []*UTXO) []*UTXO {
// collect all inputs into spentOutputMap
if !tx.IsCoinBaseTransaction() {
for _, input := range tx.Inputs {
full_payload := Base58Decode([]byte(address))
pubKeyHash := full_payload[1 : len(full_payload)-addressCheckSumLen]
if input.UnlockWithAddress(pubKeyHash) {
transactionHash := hex.EncodeToString(input.TransactionHash)
spentOutputMap[transactionHash] = append(spentOutputMap[transactionHash], input.IndexOfOutputs)
}
}
}
// Tranverse all outputs, unSpentUTXOs = all outputs - spent outputs
outputsLoop:
for index, output := range tx.Outputs {
if output.UnlockWithAddress(address) {
if len(spentOutputMap) != 0 {
var isSpent bool
for transactionHash, indexArray := range spentOutputMap { //143d,[]int{1}
//遍历 记录已经花费的下标的数组
for _, i := range indexArray {
if i == index && hex.EncodeToString(tx.TransactionHash) == transactionHash {
isSpent = true //标记当前的output是已经花费
continue outputsLoop
}
}
}
if !isSpent {
utxo := &UTXO{tx.TransactionHash, index, output}
utxos = append(utxos, utxo)
}
} else {
utxo := &UTXO{tx.TransactionHash, index, output}
utxos = append(utxos, utxo)
}
}
}
return utxos
}
// Find UTXOs which can be regarded as inputs in this transaction
func (bc *Blockchain) FindSpendableUTXOs(from string, amount int64, txs []*Transaction) (int64, map[string][]int) {
var total int64
spendableMap := make(map[string][]int)
utxos := bc.getUTXOsByAddress(from, txs)
for _, utxo := range utxos {
total += utxo.Output.Value
transactionHash := hex.EncodeToString(utxo.TransactionHash)
spendableMap[transactionHash] = append(spendableMap[transactionHash], utxo.Index)
if total >= amount {
break
}
}
if total < amount {
fmt.Printf("%s,余额不足,无法转账。。", from)
os.Exit(1)
}
return total, spendableMap
}
func (blc *Blockchain) Printchain() {
blockIterator := blc.Iterator()
for {
block := blockIterator.Next()
fmt.Println(block)
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(&hashInt) == 0 {
break
}
}
}
func (blockchain *Blockchain) Iterator() *BlockchainIterator {
return &BlockchainIterator{blockchain.Tip, blockchain.DB}
}
func DBExists(DBName string) bool {
if _, err := os.Stat(DBName); os.IsNotExist(err) {
return false
}
return true
}
func BlockchainObject(nodeID string) | ame := fmt.Sprintf(DBName, nodeID)
if DBExists(DBName) {
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
var blockchain *Blockchain
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
hash := b.Get([]byte("l"))
blockchain = &Blockchain{hash, db}
}
return nil
})
if err != nil {
log.Panic(err)
}
return blockchain
} else {
fmt.Println("数据库不存在,无法获取BlockChain对象。。。")
return nil
}
}
func (bc *Blockchain) SignTransaction(tx *Transaction, privateKey ecdsa.PrivateKey, txs []*Transaction) {
if tx.IsCoinBaseTransaction() {
return
}
prevTransactionMap := make(map[string]*Transaction)
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
prevTransactionMap[transactionHash] = bc.FindTransactionByTransactionHash(input.TransactionHash, txs)
}
tx.Sign(privateKey, prevTransactionMap)
}
func (bc *Blockchain) FindTransactionByTransactionHash(transactionHash []byte, txs []*Transaction) *Transaction {
for _, tx := range txs {
if bytes.Compare(tx.TransactionHash, transactionHash) == 0 {
return tx
}
}
iterator := bc.Iterator()
for {
block := iterator.Next()
for _, tx := range block.Transactions {
if bytes.Compare(tx.TransactionHash, transactionHash) == 0 {
return tx
}
}
bigInt := new(big.Int)
bigInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(bigInt) == 0 {
break
}
}
return &Transaction{}
}
/*
验证交易的数字签名
*/
func (bc *Blockchain) VerifityTransaction(tx *Transaction, txs []*Transaction) bool {
//要想验证数字签名:私钥+数据 (tx的副本+之前的交易)
//2.获取该tx中的Input,引用之前的transaction中的未花费的output
prevTxs := make(map[string]*Transaction)
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
prevTxs[transactionHash] = bc.FindTransactionByTransactionHash(input.TransactionHash, txs)
}
if len(prevTxs) == 0 {
fmt.Println("没找到对应交易")
} else {
//fmt.Println("preTxs___________________________________")
//fmt.Println(prev | *Blockchain {
DBN | conditional_block |
Blockchain.go | // If current block is genesis block, exit loop
if big.NewInt(0).Cmp(hashInt) == 0 {
break
}
}
return utxos
}
// calculate utxos
func caculate(tx *Transaction, address string, spentOutputMap map[string][]int, utxos []*UTXO) []*UTXO {
// collect all inputs into spentOutputMap
if !tx.IsCoinBaseTransaction() {
for _, input := range tx.Inputs {
full_payload := Base58Decode([]byte(address))
pubKeyHash := full_payload[1 : len(full_payload)-addressCheckSumLen]
if input.UnlockWithAddress(pubKeyHash) {
transactionHash := hex.EncodeToString(input.TransactionHash)
spentOutputMap[transactionHash] = append(spentOutputMap[transactionHash], input.IndexOfOutputs)
}
}
}
// Tranverse all outputs, unSpentUTXOs = all outputs - spent outputs
outputsLoop:
for index, output := range tx.Outputs {
if output.UnlockWithAddress(address) {
if len(spentOutputMap) != 0 {
var isSpent bool
for transactionHash, indexArray := range spentOutputMap { //143d,[]int{1}
//遍历 记录已经花费的下标的数组
for _, i := range indexArray {
if i == index && hex.EncodeToString(tx.TransactionHash) == transactionHash {
isSpent = true //标记当前的output是已经花费
continue outputsLoop
}
}
}
if !isSpent {
utxo := &UTXO{tx.TransactionHash, index, output}
utxos = append(utxos, utxo)
}
} else {
utxo := &UTXO{tx.TransactionHash, index, output}
utxos = append(utxos, utxo)
}
}
}
return utxos
}
// Find UTXOs which can be regarded as inputs in this transaction
func (bc *Blockchain) FindSpendableUTXOs(from string, amount int64, txs []*Transaction) (int64, map[string][]int) {
var total int64
spendableMap := make(map[string][]int)
utxos := bc.getUTXOsByAddress(from, txs)
for _, utxo := range utxos {
total += utxo.Output.Value
transactionHash := hex.EncodeToString(utxo.TransactionHash)
spendableMap[transactionHash] = append(spendableMap[transactionHash], utxo.Index)
if total >= amount {
break
}
}
if total < amount {
fmt.Printf("%s,余额不足,无法转账。。", from)
os.Exit(1)
}
return total, spendableMap
}
func (blc *Blockchain) Printchain() {
blockIterator := blc.Iterator()
for {
block := blockIterator.Next()
fmt.Println(block)
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(&hashInt) == 0 {
break
}
}
}
func (blockchain *Blockchain) Iterator() *BlockchainIterator {
return &BlockchainIterator{blockchain.Tip, blockchain.DB}
}
func DBExists(DBName string) bool {
if _, err := os.Stat(DBName); os.IsNotExist(err) {
return false
}
return true
}
func BlockchainObject(nodeID string) *Blockchain {
DBName := fmt.Sprintf(DBName, nodeID)
if DBExists(DBName) {
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
var blockchain *Blockchain
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
hash := b.Get([]byte("l"))
blockchain = &Blockchain{hash, db}
}
return nil
})
if err != nil {
log.Panic(err)
}
return blockchain
} else {
fmt.Println("数据库不存在,无法获取BlockChain对象。。。")
return nil
}
}
func (bc *Blockchain) SignTransaction(tx *Transaction, privateKey ecdsa.PrivateKey, txs []*Transaction) {
if tx.IsCoinBaseTransaction() {
return
}
prevTransactionMap := make(map[string]*Transaction)
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
prevTransactionMap[transactionHash] = bc.FindTransactionByTransactionHash(input.TransactionHash, txs)
}
tx.Sign(privateKey, prevTransactionMap)
}
func (bc *Blockchain) FindTransactionByTransactionHash(transactionHash []byte, txs []*Transaction) *Transaction {
for _, tx := range txs {
if bytes.Compare(tx.TransactionHash, transactionHash) == 0 {
return tx
}
}
iterator := bc.Iterator()
for {
block := iterator.Next()
for _, tx := range block.Transactions {
if bytes.Compare(tx.TransactionHash, transactionHash) == 0 {
return tx
}
}
bigInt := new(big.Int)
bigInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(bigInt) == 0 {
break
}
}
return &Transaction{}
}
/*
验证交易的数字签名
*/
func (bc *Blockchain) VerifityTransaction(tx *Transaction, txs []*Transaction) bool {
//要想验证数字签名:私钥+数据 (tx的副本+之前的交易)
//2.获取该tx中的Input,引用之前的transaction中的未花费的output
prevTxs := make(map[string]*Transaction)
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
prevTxs[transactionHash] = bc.FindTransactionByTransactionHash(input.TransactionHash, txs)
}
if len(prevTxs) == 0 {
fmt.Println("没找到对应交易")
} else {
//fmt.Println("preTxs___________________________________")
//fmt.Println(prevTxs)
}
//验证
return tx.VerifyTransaction(prevTxs)
//return true
}
func (bc *Blockchain) GetAllUTXOs() map[string]*UTXOArray {
iterator := bc.Iterator()
utxoMap := make(map[string]*UTXOArray)
//已花费的input map
inputMap := make(map[string][]*Input)
for {
block := iterator.Next()
for i := len(block.Transactions) - 1; i >= 0; i-- {
// collect inputs
tx := block.Transactions[i]
transactionHash := hex.EncodeToString(tx.TransactionHash)
utxoArray := &UTXOArray{[]*UTXO{}}
if !tx.IsCoinBaseTransaction() {
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
inputMap[transactionHash] = append(inputMap[transactionHash], input)
}
}
//根据inputMap,遍历outputs 找出 UTXO
outputLoop:
for index, output := range tx.Outputs {
if len(inputMap) > 0 {
//isSpent := false
inputs := inputMap[transactionHash] //如果inputs 存在, 则对应的交易里面某笔output肯定已经被消费
for _, input := range inputs {
//判断input对应的是否当期的output
if index == input.IndexOfOutputs && input.UnlockWithAddress(output.PubKeyHash) {
//此笔output已被消费
//isSpent = true
continue outputLoop
}
}
//if isSpent == false {
//outputs 加进utxoMap
utxo := &UTXO{tx.TransactionHash, index, output}
utxoArray.UTXOs = append(utxoArray.UTXOs, utxo)
//}
} else {
//outputs 加进utxoMap
utxo := &UTXO{tx.TransactionHash, index, output}
utxoArray.UTXOs = append(utxoArray.UTXOs, utxo)
}
}
if len(utxoArray.UTXOs) > 0 {
utxoMap[transactionHash] = utxoArray
}
}
//退出条件
hashBigInt := new(big.Int)
hashBigInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(hashBigInt) == 0 {
break
}
}
return utxoMap
}
func (bc *Blockchain) GetHeight() int64 {
return bc.Iterator().Next().Height
}
func (bc *Blockchain) getAllBlocksHash() [][]byte {
iterator := bc.Iterator()
var blocksHashes [][]byte
for {
block := iterator.Next()
blocksHashes = append(blocksHashes, block.BlockHash)
bigInt := new(big.Int)
bigInt.SetBytes(block.PrevBlockHash)
if big | .NewInt(0 | identifier_name | |
channel.pb.go | ,name=payload,proto3" json:"payload,omitempty"`
}
func (x *Message) Reset() {
*x = Message{}
if protoimpl.UnsafeEnabled {
mi := &file_channel_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Message) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Message) ProtoMessage() {}
func (x *Message) ProtoReflect() protoreflect.Message {
mi := &file_channel_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Message.ProtoReflect.Descriptor instead.
func (*Message) Descriptor() ([]byte, []int) {
return file_channel_proto_rawDescGZIP(), []int{0}
}
func (x *Message) GetPayload() []byte {
if x != nil {
return x.Payload
}
return nil
}
var File_channel_proto protoreflect.FileDescriptor
var file_channel_proto_rawDesc = []byte{
0x0a, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x23, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x32, 0x6f, 0x0a, 0x07, 0x43,
0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x32, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x43, 0x68, 0x61, 0x74, 0x12, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x1a, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x65,
0x65, 0x72, 0x43, 0x68, 0x61, 0x74, 0x12, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_channel_proto_rawDescOnce sync.Once
file_channel_proto_rawDescData = file_channel_proto_rawDesc
)
func file_channel_proto_rawDescGZIP() []byte {
file_channel_proto_rawDescOnce.Do(func() {
file_channel_proto_rawDescData = protoimpl.X.CompressGZIP(file_channel_proto_rawDescData)
})
return file_channel_proto_rawDescData
}
var file_channel_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_channel_proto_goTypes = []interface{}{
(*Message)(nil), // 0: proto.Message
}
var file_channel_proto_depIdxs = []int32{
0, // 0: proto.Channel.ClientChat:input_type -> proto.Message
0, // 1: proto.Channel.PeerChat:input_type -> proto.Message
0, // 2: proto.Channel.ClientChat:output_type -> proto.Message
0, // 3: proto.Channel.PeerChat:output_type -> proto.Message
2, // [2:4] is the sub-list for method output_type
0, // [0:2] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_channel_proto_init() }
func file_channel_proto_init() {
if File_channel_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_channel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Message); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_channel_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_channel_proto_goTypes,
DependencyIndexes: file_channel_proto_depIdxs,
MessageInfos: file_channel_proto_msgTypes, | }.Build()
File_channel_proto = out.File
file_channel_proto_rawDesc = nil
file_channel_proto_goTypes = nil
file_channel_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// ChannelClient is the client API for Channel service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ChannelClient interface {
ClientChat(ctx context.Context, opts ...grpc.CallOption) ( | random_line_split | |
channel.pb.go | 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x1a, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x65,
0x65, 0x72, 0x43, 0x68, 0x61, 0x74, 0x12, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_channel_proto_rawDescOnce sync.Once
file_channel_proto_rawDescData = file_channel_proto_rawDesc
)
func file_channel_proto_rawDescGZIP() []byte {
file_channel_proto_rawDescOnce.Do(func() {
file_channel_proto_rawDescData = protoimpl.X.CompressGZIP(file_channel_proto_rawDescData)
})
return file_channel_proto_rawDescData
}
var file_channel_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_channel_proto_goTypes = []interface{}{
(*Message)(nil), // 0: proto.Message
}
var file_channel_proto_depIdxs = []int32{
0, // 0: proto.Channel.ClientChat:input_type -> proto.Message
0, // 1: proto.Channel.PeerChat:input_type -> proto.Message
0, // 2: proto.Channel.ClientChat:output_type -> proto.Message
0, // 3: proto.Channel.PeerChat:output_type -> proto.Message
2, // [2:4] is the sub-list for method output_type
0, // [0:2] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_channel_proto_init() }
func file_channel_proto_init() {
if File_channel_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_channel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Message); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_channel_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_channel_proto_goTypes,
DependencyIndexes: file_channel_proto_depIdxs,
MessageInfos: file_channel_proto_msgTypes,
}.Build()
File_channel_proto = out.File
file_channel_proto_rawDesc = nil
file_channel_proto_goTypes = nil
file_channel_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// ChannelClient is the client API for Channel service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ChannelClient interface {
ClientChat(ctx context.Context, opts ...grpc.CallOption) (Channel_ClientChatClient, error)
PeerChat(ctx context.Context, opts ...grpc.CallOption) (Channel_PeerChatClient, error)
}
type channelClient struct {
cc grpc.ClientConnInterface
}
func NewChannelClient(cc grpc.ClientConnInterface) ChannelClient {
return &channelClient{cc}
}
func (c *channelClient) ClientChat(ctx context.Context, opts ...grpc.CallOption) (Channel_ClientChatClient, error) {
stream, err := c.cc.NewStream(ctx, &_Channel_serviceDesc.Streams[0], "/proto.Channel/ClientChat", opts...)
if err != nil {
return nil, err
}
x := &channelClientChatClient{stream}
return x, nil
}
type Channel_ClientChatClient interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ClientStream
}
type channelClientChatClient struct {
grpc.ClientStream
}
func (x *channelClientChatClient) Send(m *Message) error {
return x.ClientStream.SendMsg(m)
}
func (x *channelClientChatClient) Recv() (*Message, error) {
m := new(Message)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *channelClient) PeerChat(ctx context.Context, opts ...grpc.CallOption) (Channel_PeerChatClient, error) {
stream, err := c.cc.NewStream(ctx, &_Channel_serviceDesc.Streams[1], "/proto.Channel/PeerChat", opts...)
if err != nil {
return nil, err
}
x := &channelPeerChatClient{stream}
return x, nil
}
type Channel_PeerChatClient interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ClientStream
}
type channelPeerChatClient struct {
grpc.ClientStream
}
func (x *channelPeerChatClient) Send(m *Message) error {
return x.ClientStream.SendMsg(m)
}
func (x *channelPeerChatClient) Recv() (*Message, error) {
m := new(Message)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// ChannelServer is the server API for Channel service.
type ChannelServer interface {
ClientChat(Channel_ClientChatServer) error
PeerChat(Channel_PeerChatServer) error
}
// UnimplementedChannelServer can be embedded to have forward compatible implementations.
type UnimplementedChannelServer struct {
}
func (*UnimplementedChannelServer) ClientChat(Channel_ClientChatServer) error {
return status.Errorf(codes.Unimplemented, "method ClientChat not implemented")
}
func (*UnimplementedChannelServer) PeerChat(Channel_PeerChatServer) error {
return status.Errorf(codes.Unimplemented, "method PeerChat not implemented")
}
func RegisterChannelServer(s *grpc.Server, srv ChannelServer) {
s.RegisterService(&_Channel_serviceDesc, srv)
}
func _Channel_ClientChat_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(ChannelServer).ClientChat(&channelClientChatServer{stream})
}
type Channel_ClientChatServer interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ServerStream
}
type channelClientChatServer struct {
grpc.ServerStream
}
func (x *channelClientChatServer) Send(m *Message) error {
return x.ServerStream.SendMsg(m)
}
func (x *channelClientChatServer) Recv() (*Message, error) {
m := new(Message)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _Channel_PeerChat_Handler(srv interface{}, stream grpc.ServerStream) error | {
return srv.(ChannelServer).PeerChat(&channelPeerChatServer{stream})
} | identifier_body | |
channel.pb.go | =payload,proto3" json:"payload,omitempty"`
}
func (x *Message) Reset() {
*x = Message{}
if protoimpl.UnsafeEnabled {
mi := &file_channel_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Message) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Message) ProtoMessage() {}
func (x *Message) ProtoReflect() protoreflect.Message {
mi := &file_channel_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Message.ProtoReflect.Descriptor instead.
func (*Message) Descriptor() ([]byte, []int) {
return file_channel_proto_rawDescGZIP(), []int{0}
}
func (x *Message) GetPayload() []byte {
if x != nil {
return x.Payload
}
return nil
}
var File_channel_proto protoreflect.FileDescriptor
var file_channel_proto_rawDesc = []byte{
0x0a, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x23, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x32, 0x6f, 0x0a, 0x07, 0x43,
0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x32, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x43, 0x68, 0x61, 0x74, 0x12, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x1a, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x65,
0x65, 0x72, 0x43, 0x68, 0x61, 0x74, 0x12, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_channel_proto_rawDescOnce sync.Once
file_channel_proto_rawDescData = file_channel_proto_rawDesc
)
func file_channel_proto_rawDescGZIP() []byte {
file_channel_proto_rawDescOnce.Do(func() {
file_channel_proto_rawDescData = protoimpl.X.CompressGZIP(file_channel_proto_rawDescData)
})
return file_channel_proto_rawDescData
}
var file_channel_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_channel_proto_goTypes = []interface{}{
(*Message)(nil), // 0: proto.Message
}
var file_channel_proto_depIdxs = []int32{
0, // 0: proto.Channel.ClientChat:input_type -> proto.Message
0, // 1: proto.Channel.PeerChat:input_type -> proto.Message
0, // 2: proto.Channel.ClientChat:output_type -> proto.Message
0, // 3: proto.Channel.PeerChat:output_type -> proto.Message
2, // [2:4] is the sub-list for method output_type
0, // [0:2] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_channel_proto_init() }
func file_channel_proto_init() {
if File_channel_proto != nil {
return
}
if !protoimpl.UnsafeEnabled |
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_channel_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_channel_proto_goTypes,
DependencyIndexes: file_channel_proto_depIdxs,
MessageInfos: file_channel_proto_msgTypes,
}.Build()
File_channel_proto = out.File
file_channel_proto_rawDesc = nil
file_channel_proto_goTypes = nil
file_channel_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// ChannelClient is the client API for Channel service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ChannelClient interface {
ClientChat(ctx context.Context, opts ...grpc.CallOption) | {
file_channel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Message); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
} | conditional_block |
channel.pb.go | =payload,proto3" json:"payload,omitempty"`
}
func (x *Message) Reset() {
*x = Message{}
if protoimpl.UnsafeEnabled {
mi := &file_channel_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Message) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Message) ProtoMessage() {}
func (x *Message) ProtoReflect() protoreflect.Message {
mi := &file_channel_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Message.ProtoReflect.Descriptor instead.
func (*Message) | () ([]byte, []int) {
return file_channel_proto_rawDescGZIP(), []int{0}
}
func (x *Message) GetPayload() []byte {
if x != nil {
return x.Payload
}
return nil
}
var File_channel_proto protoreflect.FileDescriptor
var file_channel_proto_rawDesc = []byte{
0x0a, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x23, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x32, 0x6f, 0x0a, 0x07, 0x43,
0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x32, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x43, 0x68, 0x61, 0x74, 0x12, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x1a, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x65,
0x65, 0x72, 0x43, 0x68, 0x61, 0x74, 0x12, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_channel_proto_rawDescOnce sync.Once
file_channel_proto_rawDescData = file_channel_proto_rawDesc
)
func file_channel_proto_rawDescGZIP() []byte {
file_channel_proto_rawDescOnce.Do(func() {
file_channel_proto_rawDescData = protoimpl.X.CompressGZIP(file_channel_proto_rawDescData)
})
return file_channel_proto_rawDescData
}
var file_channel_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_channel_proto_goTypes = []interface{}{
(*Message)(nil), // 0: proto.Message
}
var file_channel_proto_depIdxs = []int32{
0, // 0: proto.Channel.ClientChat:input_type -> proto.Message
0, // 1: proto.Channel.PeerChat:input_type -> proto.Message
0, // 2: proto.Channel.ClientChat:output_type -> proto.Message
0, // 3: proto.Channel.PeerChat:output_type -> proto.Message
2, // [2:4] is the sub-list for method output_type
0, // [0:2] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_channel_proto_init() }
func file_channel_proto_init() {
if File_channel_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_channel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Message); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_channel_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_channel_proto_goTypes,
DependencyIndexes: file_channel_proto_depIdxs,
MessageInfos: file_channel_proto_msgTypes,
}.Build()
File_channel_proto = out.File
file_channel_proto_rawDesc = nil
file_channel_proto_goTypes = nil
file_channel_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// ChannelClient is the client API for Channel service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ChannelClient interface {
ClientChat(ctx context.Context, opts ...grpc.CallOption) ( | Descriptor | identifier_name |
ag.py | .sub("(\n\r|\n|\r\n)[ \t]*(\n\r|\n|\r\n)","\n\n",s)
defined=set()
data=[]
rs=p.split(s) # cut via "\n\n\n"
#print(p.groups+1),print(rs[1:1+p.groups+1]),print(rs[1+(p.groups+1)*1:1+(p.groups+1)*2]),exit()
#print(rs[0]),exit() # debug
for i in range(1,len(rs),p.groups+1):
# rs[0] is "not match", omitted
# start from 1 =>
# (^|[\n]) , currName , succName , precNames , precName_last , pu** , pu**_last , (-pull|-push) , pu**_func_last , goals , others
# +0 , +1 , +2 , +3 , +4 , +5 , +6 , +7 , +8 , +9 , + >=10
#print(i,p.groups+1,rs[i-1]),print(rs[i:i+p.groups+1]) # debug
#if i>p.groups: exit() # debug
curr=rs[i+1]
if curr in defined:
raise TypeError("Error: '"+curr+"' is defined twice")
defined.add(curr)
#print(i,curr,defined) # debug
succ = rs[i+2]
prec = set(sts.split(rs[i+3])[1:]) # or
opts = {"-push":(set(),[],[]),"-pull":(set(),[],[])} # (fooNamesLookupForRepeated,fooContent)
for opt in nodeopt.split(rs[i+5])[1::nodeopt.groups+1]:
arr=sts.split(opt) # opt_type foo1 foo2 ...
dest=[k for k in opts if arr[0]==k] # opt_type
if len(dest)==0: raise TypeError("Error: "+arr[0]+" is not an option")
arr,dst=tuple(arr[1:]),opts[dest[0]]
if not (arr in dst[0]): # trim repeated combination
dst[0].add(arr)
dst[1].append([getattr(self.extendedView,f) for f in arr])
else: print("warning: permutation:",arr,"in",dest[0],"already exists in this node")
dst[2].append(arr)
gsv = re.split("[\n][ \t]*[\n]",rs[i+9]) # or
data.append((curr, ([ Goal().fromStr(gs,cd=cd,extView=self.extendedView).flatten() for gs in gsv ],succ,set(),[''],prec,opts) ))
# curr:( Goal()s , succ , succSet , succStrs , prec , opts)
#data.sort()
#print(defined),exit() # debug
#print(sorted(list(defined))) # debug
#pprint(data) # debug
self.sets=dict(data)
del data
'''
def getTarget(c):
tmp=c[1].split(":")[1] if ":" in c[1] else c[1]
return c[0],tmp,c[2]
for k in self.sets:
node=self.sets[k]
if node[1]=='-': continue
gs_node=node[0]
if len(gs_node)!=1: continue
gs_node=gs_node[0]
gs_node.arrange()
sn=set(gs_node.constraints)
succ=self.sets[node[1]]
gs_succ=succ[0]
for g in gs_succ:
if abs(len(g.constraints)-len(sn))>1: continue
ss=set(g.constraints)
delta=ss^sn
if len(delta)>2: continue
rem_sn,rem_ss=delta&sn,delta&ss
if len(rem_sn)!=1 or len(rem_ss)!=1: continue # no idea how to do
rem_sn,rem_ss=rem_sn.pop(),rem_ss.pop()
if not (":" in rem_sn[1] or ":" in rem_ss[1]): continue # not value
rem1_sn=re.split(r'[ \t]+',rem_sn[1])
rem1_ss=re.split(r'[ \t]+',rem_ss[1])
if len(rem1_sn)!=len(rem1_ss)!=1: continue
rem1_sn.sort(),rem1_ss.sort()
diff=[]
for i in range(len(rem1_sn)):
if rem1_sn[i]!=rem1_ss[i]:
diff.append((rem1_sn[i],rem1_ss[i]))
if len(diff)!=1 or diff[0]==diff[1]: continue
target=[ x[:x.index(":")] for x in diff ]
if target[0]!=target[1]: continue
vals=[ x[len(target[0])+1:] for x in diff ]
if not ',' in vals[0]: vals[0]=vals[0]+','+vals[0]
if not ',' in vals[1]: vals[1]=vals[1]+','+vals[1]
newNodes=[]
if vals[0]
print("?",gs_node),exit()
'''
self.isSuccsOf=dict([(k,set()) for k in self.sets])
for k,v in self.sets.items():
succSet,succStr=self._getSuccs(k)
v[2].update(succSet)
v[3][0]+=succStr
# basic keys
allKeys=set([k for k in self.sets])
for k in allKeys:
# all lower nodes
self.learned["nextgoal"][k]=dict([ (kk,(0.0-len(self.getSuccs(kk)))/len(allKeys)) for kk in allKeys-self.isSuccsOf[k] if kk!=k ])
self.learned["nextgoal"][""]=dict([ (k,(0.0-len(self.getSuccs(k)))/len(allKeys)) for k in allKeys if len(self.getPrecs(k))==0 ])
return self
def toStr(self,labelMinLen=0):
kv=[ k for k in self.sets ]
kv.sort()
rtv=""
tmpv=[]
for k in kv:
tmps=""
tmps+=k+'\t'+self.getSucc(k)
if len(self.getPrecs(k))!=0:
tmps+='\t'.join([""]+sorted([ kk for kk in self.getPrecs(k) ]))
opts=self.sets[k][5]
optstrs=[]
for opt in sorted([_ for _ in opts]):
if len(opts[opt][2])!=0:
optstrs.append('\t'.join([x for v in opts[opt][2]for x in[opt]+list(v)]))
tmps+='\t'.join([""]+optstrs)
tmpgsv=[ g.toStr(labelMinLen=labelMinLen) for g in self.getGoals(k) ]
tmpv.append('\n'.join([tmps,"\n\n".join(tmpgsv)]))
rtv+="\n\n\n".join(tmpv)
return rtv
def fromTxt(self,filename,_cd='./'):
'''
concept:
a block with a name is a set of Goal. that means reach one of them is a 'match', and can to further more (try the successor)
'''
'''
format prototype:
( none or more empty lines )
...
( none or more empty lines )
name successorName(if None, use '-')
# lines which cannot be parsed as <class: Goal>
label item
# lines which cannot be parsed as <class: Goal>
label item
...
label item
label item
( an empty line )
label item
label item
...
label item
label item
( two or more empty lines )
...
( two or more empty lines )
name successorName(if None, use '-')
label item
...
in regex:
'''
cd=_cd+filename[:filename.rindex('/')+1] if '/' in filename else _cd
filename=_cd+filename
try:
path=filename+".py"
if os.path.isfile(path):
spec = importlib.util.spec_from_file_location(filename,path)
self.extendedView = importlib.util.module_from_spec(spec)
spec.loader.exec_module(self.extendedView)
#print(inspect.getsource(self.extendedView)) # debug
except:
print("WARNING: file exists but it cannot be import:",path)
with open(filename,'rb') as f:
self.fromStr("".join(map(chr,f.read())),cd=cd,extView=self.extendedView)
self.filename=filename
return self
def size(self):
rtv={"byname":len(self.sets),"bygoal":0}
for _,d in self.sets.items():
arr=d[0]
for x in arr:
tmp= | x.size()
for k,v in tmp.items(): rtv[k]+=v
| conditional_block | |
ag.py | ommited
this function will NOT do the arrangement
make sure the lines of constraints:
labels are in increasing order
items of the same label are in lexicographical order
format:
each line: label item
([ \t]*\~?[0-9]+|include|gonear)
lines not match will be omitted
'''
# preserve
old=self.constraints
# clean
self.constraints=[]
self.maxLabel=-1
self.extendedView=extView
# start
lines=s.split('\n')
p=self.__class__.parser_item
rs=p.split(s)
#print('*'*11),pprint(rs) # debug
for i in range(1,len(rs),p.groups+1):
# not match , ([\n]|^) , [ \t]*\~?[0-9]+|KWs , [^\n]+
# start from 1 =>
# ([\n]|^) , [ \t]*[0-9]+|KWs , [^\n]+ , not match
isKW=False
negate=False
label=sts.sub('',rs[i+1])
if label[0]=='~':
negate=True
label=label[1:]
content=rs[i+2]
#print(rs[i],rs[i+1]) # debug
if label==self.__class__.KW_include_txt:
isKW=True
label=self.__class__.KW_include_label
self.including=True
tmp=Goaltree()
tmp.fromTxt(content,_cd=cd)
item=(content,tmp)
if label==self.__class__.KW_gonear_txt:
isKW=True
label=self.__class__.KW_gonear_label
tmp=None # TODO
item=(content,tmp)
if isKW==False:
item=content
label=int(label)
self.add(item,label,negate=negate,arrangeLater=True)
'''
for line in lines:
m=self.__class__.parser_item.match(line)
if isNone(m): continue
res=m.group(1,2)
self.add(res[1],int(res[0]),arrangeLater=True)
# TODO: need ORs
'''
return self
def toStr(self,labelMinLen=0):
length=max(len(str(self.maxLabel)),labelMinLen)
if self.including:
length=max(length,self.__class__.KW_include_lentxt)
rtv=""
tmpv=[]
for c in self.constraints:
useLen=length+c[2] # len of neg no usage
label=c[0]
content=c[1]
if label==self.__class__.KW_include_label:
useLen=0
label=self.__class__.KW_include_txt
content=c[1][0]
#if 0!=0: tmpv.append(c[1][1].toStr(labelMinLen=length).split('\n')[1:])
if label==self.__class__.KW_gonear_label:
useLen=0
label=self.__class__.KW_gonear_txt
content=c[1][0]
label=('~' if c[2] else '')+str(label)
tmpv.append("%*s\t%s"%(useLen,label,content))
rtv+='\n'.join(tmpv)
return rtv
def fromTxt(self,filename,_cd='./',extView=None):
cd=_cd+filename[:filename.rindex('/')+1] if '/' in filename else _cd
with open(_cd+filename,'rb') as f:
self.fromStr("".join(map(chr,f.read())),cd=cd,extView=extView)
return self
# TODO:
with open(_cd+filename+".learn",'rb') as f:
pass
def size(self):
rtv={"byname":0,"bygoal":1}
for c in self.constraints:
if c[0]=="include":
tmp=c[1][1].size()
for k,v in tmp.items(): rtv[k]+=v
return rtv
class Goaltree:
# lots of goalset
'''
definitions:
successor:
the next goalset to match after matching a goalset
closer the root(tree), closer the final goalset
'''
parser_set=re.compile(token_goalset)
def __init__(self):
self.sets={}
self.filename=None
self.extendedView=None
# an extendedView is an importlib.import_module() object
# this can only be used when using 'fromTxt'
# using 'fromStr' will remove (=None) previous extendedView from 'fromTxt'
# file name is '_cd' and 'filename' given to 'fromTxt' concatenate '.py'
# i.e. _cd+filename+".py"
## it is recommended to construct a hashtable ( key is tuple(*.outputs()) or you can specify other methods ) with timeout to prevent re-calculating same condition within the same goal to achive
self.learned={"nextgoal":{}}
self.isSuccsOf={}
# learn file is self.filename+".learn", self.filename will be set after self.fromTxt()
pass
def __repr__(self):
rtv='{Goaltree:\n'
tmp=[ (k,v) for k,v in self.sets.items() ]
tmp.sort()
for x in tmp:
rtv+="\t"+x[0]+":"+str(x[1])+",\n"
rtv+='\t-:0\n}'
return rtv
def __getitem__(self,k):
return self.sets[k] if k in self.sets else None
def newNode(self,goals=[],name="",successorName='-'):
node=(goals,successorName,set(),[''])
return name,node
def addNode(self,goalset=[],name="",successorName='-'):
# TODO
if name=="" or (name in self.sets): return 1
pass
def keys(self,notBelow=None,beforeKeys=set()):
def valid_prec(k):
precs=self.getPrecs(k)
return len(precs)==0 or len(precs&beforeKeys)!=0
if isNone(notBelow):
rtv=[k for k in self.sets if valid_prec(k)]
rtv.sort()
return rtv
else:
#rtv=[k for k in self.sets if not self.getSucc(k) in notBelow]
rtv=[k for k in self.sets if len(self.getSuccs(k)¬Below)==0 and valid_prec(k)]
rtv.sort()
return rtv
def getGoals(self,k):
# return a goalset
return self.sets[k][0] if k in self.sets else None
def getSucc(self,k):
return self.sets[k][1]
def _getSuccs(self,k):
rtvSet=set()
rtvStr=k
tmpsucc=self.getSucc(k)
while not ( tmpsucc=='-' or tmpsucc=='+' or (tmpsucc in rtvSet) ):
# rtv
rtvSet.add(tmpsucc)
rtvStr+='-'
rtvStr+=tmpsucc
# reversed succs
#if not tmpsucc in self.isSuccsOf: self.isSuccsOf[tmpsucc]=set() # is set before
self.isSuccsOf[tmpsucc].add(k)
# next
tmpsucc=self.getSucc(tmpsucc)
return rtvSet,rtvStr
def getSuccs(self,k):
return self.sets[k][2]
def getSuccsStr(self,k):
return self.sets[k][3][0]
def getPrecs(self,k):
return self.sets[k][4]
def getOpts(self,k):
rtv=dict(self.sets[k][5])
for i in rtv: rtv[i]=rtv[i][1]
return rtv
def getFinals(self):
retu | ef fromStr(self,s,cd='./',extView=None):
s=s.replace('\r','')
'''
\r\n , \n\r , \n -> \n
format: see self.fromTxt
'''
# unset filename
self.filename=None
self.extendedView=extView
#old=self.sets
p=self.__class__.parser_set
s=re.sub("(\n\r|\n|\r\n)[ \t]*(\n\r|\n|\r\n)","\n\n",s)
defined=set()
data=[]
rs=p.split(s) # cut via "\n\n\n"
#print(p.groups+1),print(rs[1:1+p.groups+1]),print(rs[1+(p.groups+1)*1:1+(p.groups+1)*2]),exit()
#print(rs[0]),exit() # debug
for i in range(1,len(rs),p.groups+1):
# rs[0] is "not match", omitted
# start from 1 =>
# (^|[\n]) , currName , succ | rn [ k for k in self.sets if self.getSucc(k)=='-' ]
d | identifier_body |
ag.py | self.constraints=[] # [ (int(label),item,negate?) ... ]
self.maxLabel=-1
self.including=False
self.arrangeNeeded=False
self.extendedView=None # inherit from Goaltree
def __eq__(self,rhs):
self.arrange()
if isinstance(rhs,self.__class__):
return self.constraints==rhs.constraints
else:
raise TypeError("unsupport: %s == %s"%(self.__class__,type(rhs)))
def __repr__(self):
return "[Goal:"+str(self.constraints)+"]"
def isSpecial(self):
# having constraints labels != 0
for c in self.constraints:
if c[0]!=0:
return 1
return 0
def flatten(self):
add=[]
delItSet=set()
rg=range(len(self.constraints))
for i in rg:
c=self.constraints[i]
if c[0]==self.__class__.KW_include_label:
src=c[1][1]
if len(src.sets)<=1 and len(src.pushs(""))==0 and len(src.pulls(""))==0:
cFinalGs=src[src.getFinals()[0]][0]
if len(cFinalGs)<=1 and cFinalGs[0].isSpecial()==0:
add+=cFinalGs[0].constraints
delItSet.add(i)
if len(add)!=0:
newCs=[ self.constraints[i] for i in rg if not i in delItSet ]
self.constraints=newCs
for c in add:
self.add(c[1],c[0],c[2],arrangeLater=True)
self.arrange()
return self
def arrange(self):
if self.arrangeNeeded!=False:
self.arrangeNeeded=False
self.constraints.sort(key=lambda x:(x[2],x[:2]))
tmpv=[]
tmp=0
for c in self.constraints:
if tmp==c: continue
tmpv.append(c)
tmp=c
self.constraints=tmpv
if len([ c[0] for c in tmpv if c[0]==-1])==0:
self.including=False
return self
def add(self,item,label=0,negate=False,arrangeLater=False):
# label must be an integer
self.constraints.append((label,item,negate))
if self.maxLabel<label: self.maxLabel=label
if arrangeLater==False: self.arrange()
else: self.arrangeNeeded=arrangeLater
def fromStr(self,s,cd='./',extView=None):
s=s.replace('\r','')
'''
character:'\r' is ommited
this function will NOT do the arrangement
make sure the lines of constraints:
labels are in increasing order
items of the same label are in lexicographical order
format:
each line: label item
([ \t]*\~?[0-9]+|include|gonear)
lines not match will be omitted
'''
# preserve
old=self.constraints
# clean
self.constraints=[]
self.maxLabel=-1
self.extendedView=extView
# start
lines=s.split('\n')
p=self.__class__.parser_item
rs=p.split(s)
#print('*'*11),pprint(rs) # debug
for i in range(1,len(rs),p.groups+1):
# not match , ([\n]|^) , [ \t]*\~?[0-9]+|KWs , [^\n]+
# start from 1 =>
# ([\n]|^) , [ \t]*[0-9]+|KWs , [^\n]+ , not match
isKW=False
negate=False
label=sts.sub('',rs[i+1])
if label[0]=='~':
negate=True
label=label[1:]
content=rs[i+2]
#print(rs[i],rs[i+1]) # debug
if label==self.__class__.KW_include_txt:
isKW=True
label=self.__class__.KW_include_label
self.including=True
tmp=Goaltree()
tmp.fromTxt(content,_cd=cd)
item=(content,tmp)
if label==self.__class__.KW_gonear_txt:
isKW=True
label=self.__class__.KW_gonear_label
tmp=None # TODO
item=(content,tmp)
if isKW==False:
item=content
label=int(label)
self.add(item,label,negate=negate,arrangeLater=True)
'''
for line in lines:
m=self.__class__.parser_item.match(line)
if isNone(m): continue
res=m.group(1,2)
self.add(res[1],int(res[0]),arrangeLater=True)
# TODO: need ORs
'''
return self
def toStr(self,labelMinLen=0):
length=max(len(str(self.maxLabel)),labelMinLen)
if self.including:
length=max(length,self.__class__.KW_include_lentxt)
rtv=""
tmpv=[]
for c in self.constraints:
useLen=length+c[2] # len of neg no usage
label=c[0]
content=c[1]
if label==self.__class__.KW_include_label:
useLen=0
label=self.__class__.KW_include_txt
content=c[1][0]
#if 0!=0: tmpv.append(c[1][1].toStr(labelMinLen=length).split('\n')[1:])
if label==self.__class__.KW_gonear_label:
useLen=0
label=self.__class__.KW_gonear_txt
content=c[1][0]
label=('~' if c[2] else '')+str(label)
tmpv.append("%*s\t%s"%(useLen,label,content))
rtv+='\n'.join(tmpv)
return rtv
def fromTxt(self,filename,_cd='./',extView=None):
cd=_cd+filename[:filename.rindex('/')+1] if '/' in filename else _cd
with open(_cd+filename,'rb') as f:
self.fromStr("".join(map(chr,f.read())),cd=cd,extView=extView)
return self
# TODO:
with open(_cd+filename+".learn",'rb') as f:
pass
def size(self):
rtv={"byname":0,"bygoal":1}
for c in self.constraints:
if c[0]=="include":
tmp=c[1][1].size()
for k,v in tmp.items(): rtv[k]+=v
return rtv
class Goaltree:
# lots of goalset
'''
definitions:
successor:
the next goalset to match after matching a goalset
closer the root(tree), closer the final goalset
'''
parser_set=re.compile(token_goalset)
def __init__(self):
self.sets={}
self.filename=None
self.extendedView=None
# an extendedView is an importlib.import_module() object
# this can only be used when using 'fromTxt'
# using 'fromStr' will remove (=None) previous extendedView from 'fromTxt'
# file name is '_cd' and 'filename' given to 'fromTxt' concatenate '.py'
# i.e. _cd+filename+".py"
## it is recommended to construct a hashtable ( key is tuple(*.outputs()) or you can specify other methods ) with timeout to prevent re-calculating same condition within the same goal to achive
self.learned={"nextgoal":{}}
self.isSuccsOf={}
# learn file is self.filename+".learn", self.filename will be set after self.fromTxt()
pass
def __repr__(self):
rtv='{Goaltree:\n'
tmp=[ (k,v) for k,v in self.sets.items() ]
tmp.sort()
for x in tmp:
rtv+="\t"+x[0]+":"+str(x[1])+",\n"
rtv+='\t-:0\n}'
return rtv
def __getitem__(self,k):
return self.sets[k] if k in self.sets else None
def newNode(self,goals=[],name="",successorName='-'):
node=(goals,successorName,set(),[''])
return name,node
def addNode(self,goalset=[],name="",successorName='-'):
# TODO
if name=="" or (name in self.sets): return 1
pass
def keys(self,notBelow=None,beforeKeys=set()):
def valid_prec(k):
precs=self.getPrecs(k)
return len(precs)==0 or len(precs&beforeKeys)!=0
if isNone(notBelow):
rtv=[k for k in self.sets if valid_prec(k)]
rtv.sort()
return rtv
else:
#rtv=[k for k in self.sets if not self.getSucc(k) in notBelow]
rtv=[k for k in self.sets if len(self.getSuccs(k)¬Below)==0 and valid_prec(k)]
| random_line_split | ||
ag.py | is ommited
this function will NOT do the arrangement
make sure the lines of constraints:
labels are in increasing order
items of the same label are in lexicographical order
format:
each line: label item
([ \t]*\~?[0-9]+|include|gonear)
lines not match will be omitted
'''
# preserve
old=self.constraints
# clean
self.constraints=[]
self.maxLabel=-1
self.extendedView=extView
# start
lines=s.split('\n')
p=self.__class__.parser_item
rs=p.split(s)
#print('*'*11),pprint(rs) # debug
for i in range(1,len(rs),p.groups+1):
# not match , ([\n]|^) , [ \t]*\~?[0-9]+|KWs , [^\n]+
# start from 1 =>
# ([\n]|^) , [ \t]*[0-9]+|KWs , [^\n]+ , not match
isKW=False
negate=False
label=sts.sub('',rs[i+1])
if label[0]=='~':
negate=True
label=label[1:]
content=rs[i+2]
#print(rs[i],rs[i+1]) # debug
if label==self.__class__.KW_include_txt:
isKW=True
label=self.__class__.KW_include_label
self.including=True
tmp=Goaltree()
tmp.fromTxt(content,_cd=cd)
item=(content,tmp)
if label==self.__class__.KW_gonear_txt:
isKW=True
label=self.__class__.KW_gonear_label
tmp=None # TODO
item=(content,tmp)
if isKW==False:
item=content
label=int(label)
self.add(item,label,negate=negate,arrangeLater=True)
'''
for line in lines:
m=self.__class__.parser_item.match(line)
if isNone(m): continue
res=m.group(1,2)
self.add(res[1],int(res[0]),arrangeLater=True)
# TODO: need ORs
'''
return self
def toStr(self,labelMinLen=0):
length=max(len(str(self.maxLabel)),labelMinLen)
if self.including:
length=max(length,self.__class__.KW_include_lentxt)
rtv=""
tmpv=[]
for c in self.constraints:
useLen=length+c[2] # len of neg no usage
label=c[0]
content=c[1]
if label==self.__class__.KW_include_label:
useLen=0
label=self.__class__.KW_include_txt
content=c[1][0]
#if 0!=0: tmpv.append(c[1][1].toStr(labelMinLen=length).split('\n')[1:])
if label==self.__class__.KW_gonear_label:
useLen=0
label=self.__class__.KW_gonear_txt
content=c[1][0]
label=('~' if c[2] else '')+str(label)
tmpv.append("%*s\t%s"%(useLen,label,content))
rtv+='\n'.join(tmpv)
return rtv
def fromTxt(self,filename,_cd='./',extView=None):
cd=_cd+filename[:filename.rindex('/')+1] if '/' in filename else _cd
with open(_cd+filename,'rb') as f:
self.fromStr("".join(map(chr,f.read())),cd=cd,extView=extView)
return self
# TODO:
with open(_cd+filename+".learn",'rb') as f:
pass
def size(self):
rtv={"byname":0,"bygoal":1}
for c in self.constraints:
if c[0]=="include":
tmp=c[1][1].size()
for k,v in tmp.items(): rtv[k]+=v
return rtv
class Goaltree:
# lots of goalset
'''
definitions:
successor:
the next goalset to match after matching a goalset
closer the root(tree), closer the final goalset
'''
parser_set=re.compile(token_goalset)
def __init__(self):
self.sets={}
self.filename=None
self.extendedView=None
# an extendedView is an importlib.import_module() object
# this can only be used when using 'fromTxt'
# using 'fromStr' will remove (=None) previous extendedView from 'fromTxt'
# file name is '_cd' and 'filename' given to 'fromTxt' concatenate '.py'
# i.e. _cd+filename+".py"
## it is recommended to construct a hashtable ( key is tuple(*.outputs()) or you can specify other methods ) with timeout to prevent re-calculating same condition within the same goal to achive
self.learned={"nextgoal":{}}
self.isSuccsOf={}
# learn file is self.filename+".learn", self.filename will be set after self.fromTxt()
pass
def __repr__(self):
rtv='{Goaltree:\n'
tmp=[ (k,v) for k,v in self.sets.items() ]
tmp.sort()
for x in tmp:
rtv+="\t"+x[0]+":"+str(x[1])+",\n"
rtv+='\t-:0\n}'
return rtv
def __getitem__(self,k):
return self.sets[k] if k in self.sets else None
def newNode(self,goals=[],name="",successorName='-'):
node=(goals,successorName,set(),[''])
return name,node
def addNode(self,goalset=[],name="",successorName='-'):
# TODO
if name=="" or (name in self.sets): return 1
pass
def keys(self,notBelow=None,beforeKeys=set()):
def valid_prec(k):
precs=self.getPrecs(k)
return len(precs)==0 or len(precs&beforeKeys)!=0
if isNone(notBelow):
rtv=[k for k in self.sets if valid_prec(k)]
rtv.sort()
return rtv
else:
#rtv=[k for k in self.sets if not self.getSucc(k) in notBelow]
rtv=[k for k in self.sets if len(self.getSuccs(k)¬Below)==0 and valid_prec(k)]
rtv.sort()
return rtv
def getGoals(self,k):
# return a goalset
return self.sets[k][0] if k in self.sets else None
def getSucc(self,k):
return self.sets[k][1]
def _getSuccs(self,k):
rtvSet=set()
rtvStr=k
tmpsucc=self.getSucc(k)
while not ( tmpsucc=='-' or tmpsucc=='+' or (tmpsucc in rtvSet) ):
# rtv
rtvSet.add(tmpsucc)
rtvStr+='-'
rtvStr+=tmpsucc
# reversed succs
#if not tmpsucc in self.isSuccsOf: self.isSuccsOf[tmpsucc]=set() # is set before
self.isSuccsOf[tmpsucc].add(k)
# next
tmpsucc=self.getSucc(tmpsucc)
return rtvSet,rtvStr
def getS | f,k):
return self.sets[k][2]
def getSuccsStr(self,k):
return self.sets[k][3][0]
def getPrecs(self,k):
return self.sets[k][4]
def getOpts(self,k):
rtv=dict(self.sets[k][5])
for i in rtv: rtv[i]=rtv[i][1]
return rtv
def getFinals(self):
return [ k for k in self.sets if self.getSucc(k)=='-' ]
def fromStr(self,s,cd='./',extView=None):
s=s.replace('\r','')
'''
\r\n , \n\r , \n -> \n
format: see self.fromTxt
'''
# unset filename
self.filename=None
self.extendedView=extView
#old=self.sets
p=self.__class__.parser_set
s=re.sub("(\n\r|\n|\r\n)[ \t]*(\n\r|\n|\r\n)","\n\n",s)
defined=set()
data=[]
rs=p.split(s) # cut via "\n\n\n"
#print(p.groups+1),print(rs[1:1+p.groups+1]),print(rs[1+(p.groups+1)*1:1+(p.groups+1)*2]),exit()
#print(rs[0]),exit() # debug
for i in range(1,len(rs),p.groups+1):
# rs[0] is "not match", omitted
# start from 1 =>
# (^|[\n]) , currName , succName | uccs(sel | identifier_name |
daemon.go | Manager returns the underlay peer.TaskManager for downloading when embed dragonfly in custom binary
ExportTaskManager() peer.TaskManager
// ExportPeerHost returns the underlay scheduler.PeerHost for scheduling
ExportPeerHost() *scheduler.PeerHost
}
type clientDaemon struct {
once *sync.Once
done chan bool
schedPeerHost *scheduler.PeerHost
Option config.DaemonOption
RPCManager rpcserver.Server
UploadManager upload.Manager
ProxyManager proxy.Manager
StorageManager storage.Manager
GCManager gc.Manager
PeerTaskManager peer.TaskManager
PieceManager peer.PieceManager
}
var _ Daemon = (*clientDaemon)(nil)
func New(opt *config.DaemonOption) (Daemon, error) {
host := &scheduler.PeerHost{
Uuid: idgen.UUIDString(),
Ip: opt.Host.AdvertiseIP,
RpcPort: int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start),
DownPort: 0,
HostName: iputils.HostName,
SecurityDomain: opt.Host.SecurityDomain,
Location: opt.Host.Location,
Idc: opt.Host.IDC,
NetTopology: opt.Host.NetTopology,
}
var opts []grpc.DialOption
if opt.Options.Telemetry.Jaeger != "" {
opts = append(opts, grpc.WithChainUnaryInterceptor(otelgrpc.UnaryClientInterceptor()), grpc.WithChainStreamInterceptor(otelgrpc.StreamClientInterceptor()))
}
sched, err := schedulerclient.GetClientByAddr(opt.Scheduler.NetAddrs, opts...)
if err != nil {
return nil, errors.Wrap(err, "failed to get schedulers")
}
// Storage.Option.DataPath is same with Daemon DataDir
opt.Storage.DataPath = opt.DataDir
storageManager, err := storage.NewStorageManager(opt.Storage.StoreStrategy, &opt.Storage,
/* gc callback */
func(request storage.CommonTaskRequest) {
er := sched.LeaveTask(context.Background(), &scheduler.PeerTarget{
TaskId: request.TaskID,
PeerId: request.PeerID,
})
if er != nil {
logger.Errorf("step 4:leave task %s/%s, error: %v", request.TaskID, request.PeerID, er)
} else {
logger.Infof("step 4:leave task %s/%s state ok", request.TaskID, request.PeerID)
}
})
if err != nil {
return nil, err
}
pieceManager, err := peer.NewPieceManager(storageManager,
opt.Download.PieceDownloadTimeout,
peer.WithLimiter(rate.NewLimiter(opt.Download.TotalRateLimit.Limit, int(opt.Download.TotalRateLimit.Limit))),
peer.WithCalculateDigest(opt.Download.CalculateDigest),
)
if err != nil {
return nil, err
}
peerTaskManager, err := peer.NewPeerTaskManager(host, pieceManager, storageManager, sched, opt.Scheduler,
opt.Download.PerPeerRateLimit.Limit, opt.Storage.Multiplex)
if err != nil {
return nil, err
}
// TODO(jim): more server options
var downloadServerOption []grpc.ServerOption
if !opt.Download.DownloadGRPC.Security.Insecure {
tlsCredentials, err := loadGPRCTLSCredentials(opt.Download.DownloadGRPC.Security)
if err != nil {
return nil, err
}
downloadServerOption = append(downloadServerOption, grpc.Creds(tlsCredentials))
}
var peerServerOption []grpc.ServerOption
if !opt.Download.PeerGRPC.Security.Insecure {
tlsCredentials, err := loadGPRCTLSCredentials(opt.Download.PeerGRPC.Security)
if err != nil {
return nil, err
}
peerServerOption = append(peerServerOption, grpc.Creds(tlsCredentials))
}
rpcManager, err := rpcserver.New(host, peerTaskManager, storageManager, downloadServerOption, peerServerOption)
if err != nil {
return nil, err
}
var proxyManager proxy.Manager
proxyManager, err = proxy.NewProxyManager(host, peerTaskManager, opt.Proxy)
if err != nil {
return nil, err
}
uploadManager, err := upload.NewUploadManager(storageManager,
upload.WithLimiter(rate.NewLimiter(opt.Upload.RateLimit.Limit, int(opt.Upload.RateLimit.Limit))))
if err != nil {
return nil, err
}
return &clientDaemon{
once: &sync.Once{},
done: make(chan bool),
schedPeerHost: host,
Option: *opt,
RPCManager: rpcManager,
PeerTaskManager: peerTaskManager,
PieceManager: pieceManager,
ProxyManager: proxyManager,
UploadManager: uploadManager,
StorageManager: storageManager,
GCManager: gc.NewManager(opt.GCInterval.Duration),
}, nil
}
func loadGPRCTLSCredentials(opt config.SecurityOption) (credentials.TransportCredentials, error) {
// Load certificate of the CA who signed client's certificate
pemClientCA, err := ioutil.ReadFile(opt.CACert)
if err != nil {
return nil, err
}
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(pemClientCA) {
return nil, fmt.Errorf("failed to add client CA's certificate")
}
// Load server's certificate and private key
serverCert, err := tls.LoadX509KeyPair(opt.Cert, opt.Key)
if err != nil {
return nil, err
}
// Create the credentials and return it
if opt.TLSConfig == nil {
opt.TLSConfig = &tls.Config{
Certificates: []tls.Certificate{serverCert},
ClientAuth: tls.RequireAndVerifyClientCert,
ClientCAs: certPool,
}
} else {
opt.TLSConfig.Certificates = []tls.Certificate{serverCert}
opt.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
opt.TLSConfig.ClientCAs = certPool
}
return credentials.NewTLS(opt.TLSConfig), nil
}
func (*clientDaemon) prepareTCPListener(opt config.ListenOption, withTLS bool) (net.Listener, int, error) {
if len(opt.TCPListen.Namespace) > 0 {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
recoverFunc, err := switchNetNamespace(opt.TCPListen.Namespace)
if err != nil {
logger.Errorf("failed to change net namespace: %v", err)
return nil, -1, err
}
defer func() {
err := recoverFunc()
if err != nil {
logger.Errorf("failed to recover net namespace: %v", err)
}
}()
}
var (
ln net.Listener
port int
err error
)
if opt.TCPListen != nil {
ln, port, err = rpc.ListenWithPortRange(opt.TCPListen.Listen, opt.TCPListen.PortRange.Start, opt.TCPListen.PortRange.End)
}
if err != nil {
return nil, -1, err
}
// when use grpc, tls config is in server option
if !withTLS || opt.Security.Insecure |
if opt.Security.Cert == "" || opt.Security.Key == "" {
return nil, -1, errors.New("empty cert or key for tls")
}
// Create the TLS ClientOption with the CA pool and enable Client certificate validation
if opt.Security.TLSConfig == nil {
opt.Security.TLSConfig = &tls.Config{}
}
tlsConfig := opt.Security.TLSConfig
if opt.Security.CACert != "" {
caCert, err := ioutil.ReadFile(opt.Security.CACert)
if err != nil {
return nil, -1, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig.ClientCAs = caCertPool
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
}
tlsConfig.Certificates = make([]tls.Certificate, 1)
tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(opt.Security.Cert, opt.Security.Key)
if err != nil {
return nil, -1, err
}
return tls.NewListener(ln, tlsConfig), port, nil
}
func (cd *clientDaemon) Serve() error {
cd.GCManager.Start()
// TODO remove this field, and use directly dfpath.DaemonSockPath
cd.Option.Download.DownloadGRPC.UnixListen.Socket = dfpath.DaemonSockPath
// prepare download service listen
if cd.Option.Download.DownloadGRPC.UnixListen == nil {
return errors.New("download grpc unix listen option is empty")
}
_ = os.Remove(cd.Option.Download.DownloadGRPC.UnixListen.Socket)
downloadListener, err := rpc.Listen(dfnet.NetAddr{
Type: dfnet.UNIX,
Addr: cd.Option.Download.DownloadGRPC.UnixListen.Socket,
})
if err != nil {
logger.Errorf("failed to listen for download grpc service: %v", err)
return err
}
// prepare | {
return ln, port, err
} | conditional_block |
daemon.go | (pemClientCA) {
return nil, fmt.Errorf("failed to add client CA's certificate")
}
// Load server's certificate and private key
serverCert, err := tls.LoadX509KeyPair(opt.Cert, opt.Key)
if err != nil {
return nil, err
}
// Create the credentials and return it
if opt.TLSConfig == nil {
opt.TLSConfig = &tls.Config{
Certificates: []tls.Certificate{serverCert},
ClientAuth: tls.RequireAndVerifyClientCert,
ClientCAs: certPool,
}
} else {
opt.TLSConfig.Certificates = []tls.Certificate{serverCert}
opt.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
opt.TLSConfig.ClientCAs = certPool
}
return credentials.NewTLS(opt.TLSConfig), nil
}
func (*clientDaemon) prepareTCPListener(opt config.ListenOption, withTLS bool) (net.Listener, int, error) {
if len(opt.TCPListen.Namespace) > 0 {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
recoverFunc, err := switchNetNamespace(opt.TCPListen.Namespace)
if err != nil {
logger.Errorf("failed to change net namespace: %v", err)
return nil, -1, err
}
defer func() {
err := recoverFunc()
if err != nil {
logger.Errorf("failed to recover net namespace: %v", err)
}
}()
}
var (
ln net.Listener
port int
err error
)
if opt.TCPListen != nil {
ln, port, err = rpc.ListenWithPortRange(opt.TCPListen.Listen, opt.TCPListen.PortRange.Start, opt.TCPListen.PortRange.End)
}
if err != nil {
return nil, -1, err
}
// when use grpc, tls config is in server option
if !withTLS || opt.Security.Insecure {
return ln, port, err
}
if opt.Security.Cert == "" || opt.Security.Key == "" {
return nil, -1, errors.New("empty cert or key for tls")
}
// Create the TLS ClientOption with the CA pool and enable Client certificate validation
if opt.Security.TLSConfig == nil {
opt.Security.TLSConfig = &tls.Config{}
}
tlsConfig := opt.Security.TLSConfig
if opt.Security.CACert != "" {
caCert, err := ioutil.ReadFile(opt.Security.CACert)
if err != nil {
return nil, -1, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig.ClientCAs = caCertPool
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
}
tlsConfig.Certificates = make([]tls.Certificate, 1)
tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(opt.Security.Cert, opt.Security.Key)
if err != nil {
return nil, -1, err
}
return tls.NewListener(ln, tlsConfig), port, nil
}
func (cd *clientDaemon) Serve() error {
cd.GCManager.Start()
// TODO remove this field, and use directly dfpath.DaemonSockPath
cd.Option.Download.DownloadGRPC.UnixListen.Socket = dfpath.DaemonSockPath
// prepare download service listen
if cd.Option.Download.DownloadGRPC.UnixListen == nil {
return errors.New("download grpc unix listen option is empty")
}
_ = os.Remove(cd.Option.Download.DownloadGRPC.UnixListen.Socket)
downloadListener, err := rpc.Listen(dfnet.NetAddr{
Type: dfnet.UNIX,
Addr: cd.Option.Download.DownloadGRPC.UnixListen.Socket,
})
if err != nil {
logger.Errorf("failed to listen for download grpc service: %v", err)
return err
}
// prepare peer service listen
if cd.Option.Download.PeerGRPC.TCPListen == nil {
return errors.New("peer grpc tcp listen option is empty")
}
peerListener, peerPort, err := cd.prepareTCPListener(cd.Option.Download.PeerGRPC, false)
if err != nil {
logger.Errorf("failed to listen for peer grpc service: %v", err)
return err
}
cd.schedPeerHost.RpcPort = int32(peerPort)
// prepare upload service listen
if cd.Option.Upload.TCPListen == nil {
return errors.New("upload tcp listen option is empty")
}
uploadListener, uploadPort, err := cd.prepareTCPListener(cd.Option.Upload.ListenOption, true)
if err != nil {
logger.Errorf("failed to listen for upload service: %v", err)
return err
}
cd.schedPeerHost.DownPort = int32(uploadPort)
g := errgroup.Group{}
// serve download grpc service
g.Go(func() error {
defer downloadListener.Close()
logger.Infof("serve download grpc at unix://%s", cd.Option.Download.DownloadGRPC.UnixListen.Socket)
if err := cd.RPCManager.ServeDownload(downloadListener); err != nil {
logger.Errorf("failed to serve for download grpc service: %v", err)
return err
}
return nil
})
// serve peer grpc service
g.Go(func() error {
defer peerListener.Close()
logger.Infof("serve peer grpc at %s://%s", peerListener.Addr().Network(), peerListener.Addr().String())
if err := cd.RPCManager.ServePeer(peerListener); err != nil {
logger.Errorf("failed to serve for peer grpc service: %v", err)
return err
}
return nil
})
if cd.ProxyManager.IsEnabled() {
// prepare proxy service listen
if cd.Option.Proxy.TCPListen == nil {
return errors.New("proxy tcp listen option is empty")
}
proxyListener, proxyPort, err := cd.prepareTCPListener(cd.Option.Proxy.ListenOption, true)
if err != nil {
logger.Errorf("failed to listen for proxy service: %v", err)
return err
}
// serve proxy service
g.Go(func() error {
defer proxyListener.Close()
logger.Infof("serve proxy at tcp://%s:%d", cd.Option.Proxy.TCPListen.Listen, proxyPort)
if err = cd.ProxyManager.Serve(proxyListener); err != nil && err != http.ErrServerClosed {
logger.Errorf("failed to serve for proxy service: %v", err)
return err
} else if err == http.ErrServerClosed {
logger.Infof("proxy service closed")
}
return nil
})
// serve proxy sni service
if cd.Option.Proxy.HijackHTTPS != nil && len(cd.Option.Proxy.HijackHTTPS.SNI) > 0 {
for _, opt := range cd.Option.Proxy.HijackHTTPS.SNI {
listener, port, err := cd.prepareTCPListener(config.ListenOption{
TCPListen: opt,
}, false)
if err != nil {
logger.Errorf("failed to listen for proxy sni service: %v", err)
return err
}
logger.Infof("serve proxy sni at tcp://%s:%d", opt.Listen, port)
g.Go(func() error {
err := cd.ProxyManager.ServeSNI(listener)
if err != nil {
logger.Errorf("failed to serve proxy sni service: %v", err)
}
return err
})
}
}
}
// serve upload service
g.Go(func() error {
defer uploadListener.Close()
logger.Infof("serve upload service at %s://%s", uploadListener.Addr().Network(), uploadListener.Addr().String())
if err := cd.UploadManager.Serve(uploadListener); err != nil && err != http.ErrServerClosed {
logger.Errorf("failed to serve for upload service: %v", err)
return err
} else if err == http.ErrServerClosed {
logger.Infof("upload service closed")
}
return nil
})
if cd.Option.AliveTime.Duration > 0 {
g.Go(func() error {
select {
case <-time.After(cd.Option.AliveTime.Duration):
var keepalives = []clientutil.KeepAlive{
cd.StorageManager,
cd.RPCManager,
}
var keep bool
for _, keepalive := range keepalives {
if keepalive.Alive(cd.Option.AliveTime.Duration) {
keep = true
}
}
if !keep {
cd.Stop()
logger.Infof("alive time reached, stop daemon")
}
case <-cd.done:
logger.Infof("peer host done, stop watch alive time")
}
return nil
})
}
werr := g.Wait()
cd.Stop()
return werr
}
func (cd *clientDaemon) Stop() {
cd.once.Do(func() {
close(cd.done)
cd.GCManager.Stop()
cd.RPCManager.Stop()
cd.UploadManager.Stop()
if cd.ProxyManager.IsEnabled() {
cd.ProxyManager.Stop()
}
if !cd.Option.KeepStorage {
logger.Infof("keep storage disabled")
cd.StorageManager.CleanUp()
}
})
}
func (cd *clientDaemon) ExportTaskManager() peer.TaskManager | {
return cd.PeerTaskManager
} | identifier_body | |
daemon.go | Manager returns the underlay peer.TaskManager for downloading when embed dragonfly in custom binary
ExportTaskManager() peer.TaskManager
// ExportPeerHost returns the underlay scheduler.PeerHost for scheduling
ExportPeerHost() *scheduler.PeerHost
}
type clientDaemon struct {
once *sync.Once
done chan bool
schedPeerHost *scheduler.PeerHost
Option config.DaemonOption
RPCManager rpcserver.Server
UploadManager upload.Manager
ProxyManager proxy.Manager
StorageManager storage.Manager
GCManager gc.Manager
PeerTaskManager peer.TaskManager
PieceManager peer.PieceManager
}
var _ Daemon = (*clientDaemon)(nil)
func New(opt *config.DaemonOption) (Daemon, error) {
host := &scheduler.PeerHost{
Uuid: idgen.UUIDString(),
Ip: opt.Host.AdvertiseIP,
RpcPort: int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start),
DownPort: 0,
HostName: iputils.HostName,
SecurityDomain: opt.Host.SecurityDomain,
Location: opt.Host.Location,
Idc: opt.Host.IDC,
NetTopology: opt.Host.NetTopology,
}
var opts []grpc.DialOption
if opt.Options.Telemetry.Jaeger != "" {
opts = append(opts, grpc.WithChainUnaryInterceptor(otelgrpc.UnaryClientInterceptor()), grpc.WithChainStreamInterceptor(otelgrpc.StreamClientInterceptor()))
}
sched, err := schedulerclient.GetClientByAddr(opt.Scheduler.NetAddrs, opts...)
if err != nil {
return nil, errors.Wrap(err, "failed to get schedulers")
}
// Storage.Option.DataPath is same with Daemon DataDir
opt.Storage.DataPath = opt.DataDir
storageManager, err := storage.NewStorageManager(opt.Storage.StoreStrategy, &opt.Storage,
/* gc callback */
func(request storage.CommonTaskRequest) {
er := sched.LeaveTask(context.Background(), &scheduler.PeerTarget{
TaskId: request.TaskID,
PeerId: request.PeerID,
})
if er != nil {
logger.Errorf("step 4:leave task %s/%s, error: %v", request.TaskID, request.PeerID, er)
} else {
logger.Infof("step 4:leave task %s/%s state ok", request.TaskID, request.PeerID)
}
})
if err != nil {
return nil, err
}
pieceManager, err := peer.NewPieceManager(storageManager,
opt.Download.PieceDownloadTimeout,
peer.WithLimiter(rate.NewLimiter(opt.Download.TotalRateLimit.Limit, int(opt.Download.TotalRateLimit.Limit))),
peer.WithCalculateDigest(opt.Download.CalculateDigest),
)
if err != nil {
return nil, err
}
peerTaskManager, err := peer.NewPeerTaskManager(host, pieceManager, storageManager, sched, opt.Scheduler,
opt.Download.PerPeerRateLimit.Limit, opt.Storage.Multiplex)
if err != nil {
return nil, err
}
// TODO(jim): more server options
var downloadServerOption []grpc.ServerOption
if !opt.Download.DownloadGRPC.Security.Insecure {
tlsCredentials, err := loadGPRCTLSCredentials(opt.Download.DownloadGRPC.Security)
if err != nil {
return nil, err
}
downloadServerOption = append(downloadServerOption, grpc.Creds(tlsCredentials))
}
var peerServerOption []grpc.ServerOption
if !opt.Download.PeerGRPC.Security.Insecure {
tlsCredentials, err := loadGPRCTLSCredentials(opt.Download.PeerGRPC.Security)
if err != nil {
return nil, err
}
peerServerOption = append(peerServerOption, grpc.Creds(tlsCredentials))
}
rpcManager, err := rpcserver.New(host, peerTaskManager, storageManager, downloadServerOption, peerServerOption)
if err != nil {
return nil, err
}
var proxyManager proxy.Manager
proxyManager, err = proxy.NewProxyManager(host, peerTaskManager, opt.Proxy)
if err != nil {
return nil, err
}
uploadManager, err := upload.NewUploadManager(storageManager,
upload.WithLimiter(rate.NewLimiter(opt.Upload.RateLimit.Limit, int(opt.Upload.RateLimit.Limit))))
if err != nil {
return nil, err
}
return &clientDaemon{ | schedPeerHost: host,
Option: *opt,
RPCManager: rpcManager,
PeerTaskManager: peerTaskManager,
PieceManager: pieceManager,
ProxyManager: proxyManager,
UploadManager: uploadManager,
StorageManager: storageManager,
GCManager: gc.NewManager(opt.GCInterval.Duration),
}, nil
}
func loadGPRCTLSCredentials(opt config.SecurityOption) (credentials.TransportCredentials, error) {
// Load certificate of the CA who signed client's certificate
pemClientCA, err := ioutil.ReadFile(opt.CACert)
if err != nil {
return nil, err
}
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(pemClientCA) {
return nil, fmt.Errorf("failed to add client CA's certificate")
}
// Load server's certificate and private key
serverCert, err := tls.LoadX509KeyPair(opt.Cert, opt.Key)
if err != nil {
return nil, err
}
// Create the credentials and return it
if opt.TLSConfig == nil {
opt.TLSConfig = &tls.Config{
Certificates: []tls.Certificate{serverCert},
ClientAuth: tls.RequireAndVerifyClientCert,
ClientCAs: certPool,
}
} else {
opt.TLSConfig.Certificates = []tls.Certificate{serverCert}
opt.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
opt.TLSConfig.ClientCAs = certPool
}
return credentials.NewTLS(opt.TLSConfig), nil
}
func (*clientDaemon) prepareTCPListener(opt config.ListenOption, withTLS bool) (net.Listener, int, error) {
if len(opt.TCPListen.Namespace) > 0 {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
recoverFunc, err := switchNetNamespace(opt.TCPListen.Namespace)
if err != nil {
logger.Errorf("failed to change net namespace: %v", err)
return nil, -1, err
}
defer func() {
err := recoverFunc()
if err != nil {
logger.Errorf("failed to recover net namespace: %v", err)
}
}()
}
var (
ln net.Listener
port int
err error
)
if opt.TCPListen != nil {
ln, port, err = rpc.ListenWithPortRange(opt.TCPListen.Listen, opt.TCPListen.PortRange.Start, opt.TCPListen.PortRange.End)
}
if err != nil {
return nil, -1, err
}
// when use grpc, tls config is in server option
if !withTLS || opt.Security.Insecure {
return ln, port, err
}
if opt.Security.Cert == "" || opt.Security.Key == "" {
return nil, -1, errors.New("empty cert or key for tls")
}
// Create the TLS ClientOption with the CA pool and enable Client certificate validation
if opt.Security.TLSConfig == nil {
opt.Security.TLSConfig = &tls.Config{}
}
tlsConfig := opt.Security.TLSConfig
if opt.Security.CACert != "" {
caCert, err := ioutil.ReadFile(opt.Security.CACert)
if err != nil {
return nil, -1, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig.ClientCAs = caCertPool
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
}
tlsConfig.Certificates = make([]tls.Certificate, 1)
tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(opt.Security.Cert, opt.Security.Key)
if err != nil {
return nil, -1, err
}
return tls.NewListener(ln, tlsConfig), port, nil
}
func (cd *clientDaemon) Serve() error {
cd.GCManager.Start()
// TODO remove this field, and use directly dfpath.DaemonSockPath
cd.Option.Download.DownloadGRPC.UnixListen.Socket = dfpath.DaemonSockPath
// prepare download service listen
if cd.Option.Download.DownloadGRPC.UnixListen == nil {
return errors.New("download grpc unix listen option is empty")
}
_ = os.Remove(cd.Option.Download.DownloadGRPC.UnixListen.Socket)
downloadListener, err := rpc.Listen(dfnet.NetAddr{
Type: dfnet.UNIX,
Addr: cd.Option.Download.DownloadGRPC.UnixListen.Socket,
})
if err != nil {
logger.Errorf("failed to listen for download grpc service: %v", err)
return err
}
// prepare peer service | once: &sync.Once{},
done: make(chan bool), | random_line_split |
daemon.go | Manager returns the underlay peer.TaskManager for downloading when embed dragonfly in custom binary
ExportTaskManager() peer.TaskManager
// ExportPeerHost returns the underlay scheduler.PeerHost for scheduling
ExportPeerHost() *scheduler.PeerHost
}
type clientDaemon struct {
once *sync.Once
done chan bool
schedPeerHost *scheduler.PeerHost
Option config.DaemonOption
RPCManager rpcserver.Server
UploadManager upload.Manager
ProxyManager proxy.Manager
StorageManager storage.Manager
GCManager gc.Manager
PeerTaskManager peer.TaskManager
PieceManager peer.PieceManager
}
var _ Daemon = (*clientDaemon)(nil)
func New(opt *config.DaemonOption) (Daemon, error) {
host := &scheduler.PeerHost{
Uuid: idgen.UUIDString(),
Ip: opt.Host.AdvertiseIP,
RpcPort: int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start),
DownPort: 0,
HostName: iputils.HostName,
SecurityDomain: opt.Host.SecurityDomain,
Location: opt.Host.Location,
Idc: opt.Host.IDC,
NetTopology: opt.Host.NetTopology,
}
var opts []grpc.DialOption
if opt.Options.Telemetry.Jaeger != "" {
opts = append(opts, grpc.WithChainUnaryInterceptor(otelgrpc.UnaryClientInterceptor()), grpc.WithChainStreamInterceptor(otelgrpc.StreamClientInterceptor()))
}
sched, err := schedulerclient.GetClientByAddr(opt.Scheduler.NetAddrs, opts...)
if err != nil {
return nil, errors.Wrap(err, "failed to get schedulers")
}
// Storage.Option.DataPath is same with Daemon DataDir
opt.Storage.DataPath = opt.DataDir
storageManager, err := storage.NewStorageManager(opt.Storage.StoreStrategy, &opt.Storage,
/* gc callback */
func(request storage.CommonTaskRequest) {
er := sched.LeaveTask(context.Background(), &scheduler.PeerTarget{
TaskId: request.TaskID,
PeerId: request.PeerID,
})
if er != nil {
logger.Errorf("step 4:leave task %s/%s, error: %v", request.TaskID, request.PeerID, er)
} else {
logger.Infof("step 4:leave task %s/%s state ok", request.TaskID, request.PeerID)
}
})
if err != nil {
return nil, err
}
pieceManager, err := peer.NewPieceManager(storageManager,
opt.Download.PieceDownloadTimeout,
peer.WithLimiter(rate.NewLimiter(opt.Download.TotalRateLimit.Limit, int(opt.Download.TotalRateLimit.Limit))),
peer.WithCalculateDigest(opt.Download.CalculateDigest),
)
if err != nil {
return nil, err
}
peerTaskManager, err := peer.NewPeerTaskManager(host, pieceManager, storageManager, sched, opt.Scheduler,
opt.Download.PerPeerRateLimit.Limit, opt.Storage.Multiplex)
if err != nil {
return nil, err
}
// TODO(jim): more server options
var downloadServerOption []grpc.ServerOption
if !opt.Download.DownloadGRPC.Security.Insecure {
tlsCredentials, err := loadGPRCTLSCredentials(opt.Download.DownloadGRPC.Security)
if err != nil {
return nil, err
}
downloadServerOption = append(downloadServerOption, grpc.Creds(tlsCredentials))
}
var peerServerOption []grpc.ServerOption
if !opt.Download.PeerGRPC.Security.Insecure {
tlsCredentials, err := loadGPRCTLSCredentials(opt.Download.PeerGRPC.Security)
if err != nil {
return nil, err
}
peerServerOption = append(peerServerOption, grpc.Creds(tlsCredentials))
}
rpcManager, err := rpcserver.New(host, peerTaskManager, storageManager, downloadServerOption, peerServerOption)
if err != nil {
return nil, err
}
var proxyManager proxy.Manager
proxyManager, err = proxy.NewProxyManager(host, peerTaskManager, opt.Proxy)
if err != nil {
return nil, err
}
uploadManager, err := upload.NewUploadManager(storageManager,
upload.WithLimiter(rate.NewLimiter(opt.Upload.RateLimit.Limit, int(opt.Upload.RateLimit.Limit))))
if err != nil {
return nil, err
}
return &clientDaemon{
once: &sync.Once{},
done: make(chan bool),
schedPeerHost: host,
Option: *opt,
RPCManager: rpcManager,
PeerTaskManager: peerTaskManager,
PieceManager: pieceManager,
ProxyManager: proxyManager,
UploadManager: uploadManager,
StorageManager: storageManager,
GCManager: gc.NewManager(opt.GCInterval.Duration),
}, nil
}
func | (opt config.SecurityOption) (credentials.TransportCredentials, error) {
// Load certificate of the CA who signed client's certificate
pemClientCA, err := ioutil.ReadFile(opt.CACert)
if err != nil {
return nil, err
}
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(pemClientCA) {
return nil, fmt.Errorf("failed to add client CA's certificate")
}
// Load server's certificate and private key
serverCert, err := tls.LoadX509KeyPair(opt.Cert, opt.Key)
if err != nil {
return nil, err
}
// Create the credentials and return it
if opt.TLSConfig == nil {
opt.TLSConfig = &tls.Config{
Certificates: []tls.Certificate{serverCert},
ClientAuth: tls.RequireAndVerifyClientCert,
ClientCAs: certPool,
}
} else {
opt.TLSConfig.Certificates = []tls.Certificate{serverCert}
opt.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
opt.TLSConfig.ClientCAs = certPool
}
return credentials.NewTLS(opt.TLSConfig), nil
}
func (*clientDaemon) prepareTCPListener(opt config.ListenOption, withTLS bool) (net.Listener, int, error) {
if len(opt.TCPListen.Namespace) > 0 {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
recoverFunc, err := switchNetNamespace(opt.TCPListen.Namespace)
if err != nil {
logger.Errorf("failed to change net namespace: %v", err)
return nil, -1, err
}
defer func() {
err := recoverFunc()
if err != nil {
logger.Errorf("failed to recover net namespace: %v", err)
}
}()
}
var (
ln net.Listener
port int
err error
)
if opt.TCPListen != nil {
ln, port, err = rpc.ListenWithPortRange(opt.TCPListen.Listen, opt.TCPListen.PortRange.Start, opt.TCPListen.PortRange.End)
}
if err != nil {
return nil, -1, err
}
// when use grpc, tls config is in server option
if !withTLS || opt.Security.Insecure {
return ln, port, err
}
if opt.Security.Cert == "" || opt.Security.Key == "" {
return nil, -1, errors.New("empty cert or key for tls")
}
// Create the TLS ClientOption with the CA pool and enable Client certificate validation
if opt.Security.TLSConfig == nil {
opt.Security.TLSConfig = &tls.Config{}
}
tlsConfig := opt.Security.TLSConfig
if opt.Security.CACert != "" {
caCert, err := ioutil.ReadFile(opt.Security.CACert)
if err != nil {
return nil, -1, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig.ClientCAs = caCertPool
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
}
tlsConfig.Certificates = make([]tls.Certificate, 1)
tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(opt.Security.Cert, opt.Security.Key)
if err != nil {
return nil, -1, err
}
return tls.NewListener(ln, tlsConfig), port, nil
}
func (cd *clientDaemon) Serve() error {
cd.GCManager.Start()
// TODO remove this field, and use directly dfpath.DaemonSockPath
cd.Option.Download.DownloadGRPC.UnixListen.Socket = dfpath.DaemonSockPath
// prepare download service listen
if cd.Option.Download.DownloadGRPC.UnixListen == nil {
return errors.New("download grpc unix listen option is empty")
}
_ = os.Remove(cd.Option.Download.DownloadGRPC.UnixListen.Socket)
downloadListener, err := rpc.Listen(dfnet.NetAddr{
Type: dfnet.UNIX,
Addr: cd.Option.Download.DownloadGRPC.UnixListen.Socket,
})
if err != nil {
logger.Errorf("failed to listen for download grpc service: %v", err)
return err
}
// prepare peer | loadGPRCTLSCredentials | identifier_name |
main.rs | : {}", details),
}
}
}
fn connect_db() -> Result<Connection, ServiceError> {
let url = env::var("DATABASE_URL").unwrap_or(String::from(DEFAULT_URL));
println!("Connecting: {}", &url);
match Connection::connect(url, TlsMode::None) {
Ok(connection) => Ok(connection),
Err(error) => {
println!("Connection: {}", error);
Err(ServiceError::NoDatabaseConnection(format!("{}", error)))
}
}
}
fn verses_by_chapters(db: &Connection, id: i16, chapters: Vec<i16>) -> Vec<Value> {
db.query(
"SELECT row_to_json(rst_bible)
FROM rst_bible
WHERE book_id = $1 AND chapter = ANY($2)",
&[&id, &chapters],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn verses_in_chapter_by_verses(
db: &Connection,
id: i16,
chapter: i16,
verses: Vec<i16>,
) -> Vec<Value> {
db.query(
"SELECT row_to_json(rst_bible)
FROM rst_bible
WHERE book_id = $1 AND chapter = $2 AND verse = ANY($3)",
&[&id, &chapter, &verses],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn fetch_results(db: &Connection, refs: Vec<BibleReference>) -> Vec<Value> {
if refs.is_empty() {
return vec![];
}
let valid: Vec<BookRef> = refs
.iter()
.flat_map(|r| {
let statement = db
.prepare(
"SELECT id, book as title, alt, abbr
FROM rst_bible_books
WHERE book ~* $1 OR alt ~* $1 OR abbr ~* $1
LIMIT 1",
).unwrap();
let rows = statement.query(&[&r.book]).unwrap();
if rows.is_empty() {
None
} else {
let row = rows.iter().next().unwrap();
Some(BookRef {
id: row.get(0),
name: row.get(1),
alt: row.get(2),
locations: r.locations.clone(),
})
}
}).collect();
valid
.iter()
.map(|reference| {
let book_id = reference.id;
let book_title = &reference.name;
let book_alt = &reference.alt;
let texts = reference
.locations
.iter()
.flat_map(
move |location| match (&location.chapters, &location.verses) {
// Fetch verses by chapters
(chapters, None) => {
let ch = chapters.into_iter().map(|v| *v as i16).collect();
Some(verses_by_chapters(&db, book_id, ch))
}
// Fetch verses by chapter and verses
(chapters, Some(verses)) if chapters.len() == 1 => {
let ch = chapters[0] as i16;
let vs = verses.into_iter().map(|v| *v as i16).collect();
Some(verses_in_chapter_by_verses(&db, book_id, ch, vs))
}
_ => None,
},
).collect::<Vec<_>>();
json!({ "reference": { "title": book_title, "alt": book_alt }, "texts": texts })
}).collect::<Vec<_>>()
}
fn fetch_daily_verses(db: &Connection) -> Vec<String> {
use chrono::{Datelike, Utc};
let now = Utc::now();
let month = now.month() as i16;
let day = now.day() as i16;
db.query(
"SELECT verses
FROM rst_bible_daily
WHERE month = $1 AND day = $2",
&[&month, &day],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn parse_query(query: Option<&str>) -> FutureResult<String, ServiceError> {
use std::collections::HashMap;
let query = &query.unwrap_or("");
let args = url::form_urlencoded::parse(&query.as_bytes())
.into_owned()
.collect::<HashMap<String, String>>();
match args
.get("q")
.map(|v| v.to_string())
.filter(|s| !s.is_empty())
{
Some(value) => futures::future::ok(value),
None => futures::future::err(ServiceError::NoInput),
}
}
#[derive(Debug)]
struct SearchPaginate {
text: String,
page: i16,
}
fn parse_query_paginate(query: Option<&str>) -> FutureResult<SearchPaginate, ServiceError> {
use std::collections::HashMap;
let query = &query.unwrap_or("");
let args = url::form_urlencoded::parse(&query.as_bytes())
.into_owned()
.collect::<HashMap<String, String>>();
let q = args
.get("q")
.map(|v| v.to_string())
.filter(|s| !s.is_empty());
let p = args
.get("p")
.map(|v| v.parse::<i16>().unwrap_or(1))
.unwrap_or(1);
match (q, p) {
(Some(q), p) => futures::future::ok(SearchPaginate { text: q, page: p }),
_ => futures::future::err(ServiceError::NoInput),
}
}
// Verse Of the Day
fn vod_response_body(db: &Connection) -> Body {
let results = fetch_daily_verses(&db)
.into_iter()
.flat_map(|daily| {
let refs = parse(daily.as_str());
let results = fetch_results(&db, refs);
if results.is_empty() {
None
} else {
Some(results)
}
}).flatten()
.collect::<Vec<_>>();
Body::from(json!({ "results": results }).to_string())
}
fn search_results(query: String, db: &Connection) -> FutureResult<Body, ServiceError> {
let refs = parse(query.as_str());
futures::future::ok(Body::from(
json!({ "results": fetch_results(&db, refs) }).to_string(),
))
}
fn fetch_search_results(text: String, page: i16, db: &Connection) -> (Vec<Value>, i64) {
let page = if page <= 0 { 1 } else { page };
let count_rows = db
.query(
"SELECT COUNT(book_id)
FROM rst_bible
WHERE text ~* $1",
&[&text],
).unwrap();
let mut total: i64 = 0;
if count_rows.is_empty() {
return (vec![json!([])], total);
} else {
total = count_rows.get(0).get("count");
}
let offset = ((page - 1) * 10) as i64;
let rows = db
.query(
"SELECT row_to_json(t)
FROM (
SELECT v.book_id, v.text, v.chapter, v.verse, b.book as book_name, b.alt as book_alt from rst_bible v
LEFT OUTER JOIN rst_bible_books b on (v.book_id = b.id)
WHERE text ~* $1
) t
LIMIT 10
OFFSET $2",
&[&text, &offset],
).unwrap();
let results = rows.into_iter().map(|r| r.get(0)).collect::<Vec<Value>>();
(vec![json!(results)], (total as f64 / 10_f64).ceil() as i64)
}
fn search_text(query: SearchPaginate, db: &Connection) -> FutureResult<Body, ServiceError> {
let text = &query.text;
let results = fetch_search_results(text.to_string(), query.page, db);
futures::future::ok(Body::from(
json!({
"meta": { "text": text, "page": query.page, "total": results.1 },
"results": results.0
}).to_string(),
))
}
fn success_response(body: Body) -> FutureResult<Response<Body>, ServiceError> |
struct SearchService;
impl NewService for SearchService {
type ReqBody = Body;
type ResBody = Body;
type Error = ServiceError;
type Service = SearchService;
type Future = Box<Future<Item = Self::Service, Error = Self::Error> + Send>;
type InitError = ServiceError;
fn new_service(&self) -> Self::Future {
Box::new(futures::future::ok(SearchService))
}
}
impl Service for SearchService {
type ReqBody = Body;
type ResBody | {
futures::future::ok(
Response::builder()
.header(header::CONTENT_TYPE, "application/json")
.header(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.header(header::ACCESS_CONTROL_ALLOW_METHODS, "GET")
.header(header::ACCESS_CONTROL_ALLOW_HEADERS, "Content-Type")
.body(body)
.unwrap(),
)
} | identifier_body |
main.rs | : {}", details),
}
}
}
fn connect_db() -> Result<Connection, ServiceError> {
let url = env::var("DATABASE_URL").unwrap_or(String::from(DEFAULT_URL));
println!("Connecting: {}", &url);
match Connection::connect(url, TlsMode::None) {
Ok(connection) => Ok(connection),
Err(error) => {
println!("Connection: {}", error);
Err(ServiceError::NoDatabaseConnection(format!("{}", error)))
}
}
}
fn verses_by_chapters(db: &Connection, id: i16, chapters: Vec<i16>) -> Vec<Value> {
db.query(
"SELECT row_to_json(rst_bible)
FROM rst_bible
WHERE book_id = $1 AND chapter = ANY($2)",
&[&id, &chapters],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn verses_in_chapter_by_verses(
db: &Connection,
id: i16,
chapter: i16,
verses: Vec<i16>,
) -> Vec<Value> {
db.query(
"SELECT row_to_json(rst_bible)
FROM rst_bible
WHERE book_id = $1 AND chapter = $2 AND verse = ANY($3)",
&[&id, &chapter, &verses],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn fetch_results(db: &Connection, refs: Vec<BibleReference>) -> Vec<Value> {
if refs.is_empty() {
return vec![];
}
let valid: Vec<BookRef> = refs
.iter()
.flat_map(|r| {
let statement = db
.prepare(
"SELECT id, book as title, alt, abbr
FROM rst_bible_books
WHERE book ~* $1 OR alt ~* $1 OR abbr ~* $1
LIMIT 1",
).unwrap();
let rows = statement.query(&[&r.book]).unwrap();
if rows.is_empty() {
None
} else {
let row = rows.iter().next().unwrap();
Some(BookRef {
id: row.get(0),
name: row.get(1),
alt: row.get(2),
locations: r.locations.clone(),
})
}
}).collect();
valid
.iter()
.map(|reference| {
let book_id = reference.id;
let book_title = &reference.name;
let book_alt = &reference.alt;
let texts = reference
.locations
.iter()
.flat_map(
move |location| match (&location.chapters, &location.verses) {
// Fetch verses by chapters
(chapters, None) => {
let ch = chapters.into_iter().map(|v| *v as i16).collect();
Some(verses_by_chapters(&db, book_id, ch))
}
// Fetch verses by chapter and verses
(chapters, Some(verses)) if chapters.len() == 1 => {
let ch = chapters[0] as i16;
let vs = verses.into_iter().map(|v| *v as i16).collect();
Some(verses_in_chapter_by_verses(&db, book_id, ch, vs))
}
_ => None,
},
).collect::<Vec<_>>();
json!({ "reference": { "title": book_title, "alt": book_alt }, "texts": texts })
}).collect::<Vec<_>>()
}
fn fetch_daily_verses(db: &Connection) -> Vec<String> {
use chrono::{Datelike, Utc};
let now = Utc::now();
let month = now.month() as i16;
let day = now.day() as i16;
db.query(
"SELECT verses
FROM rst_bible_daily
WHERE month = $1 AND day = $2",
&[&month, &day],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn parse_query(query: Option<&str>) -> FutureResult<String, ServiceError> {
use std::collections::HashMap;
let query = &query.unwrap_or("");
let args = url::form_urlencoded::parse(&query.as_bytes())
.into_owned()
.collect::<HashMap<String, String>>();
match args
.get("q")
.map(|v| v.to_string())
.filter(|s| !s.is_empty())
{
Some(value) => futures::future::ok(value),
None => futures::future::err(ServiceError::NoInput),
}
}
#[derive(Debug)]
struct SearchPaginate {
text: String,
page: i16,
}
fn parse_query_paginate(query: Option<&str>) -> FutureResult<SearchPaginate, ServiceError> {
use std::collections::HashMap;
let query = &query.unwrap_or("");
let args = url::form_urlencoded::parse(&query.as_bytes())
.into_owned()
.collect::<HashMap<String, String>>();
let q = args
.get("q")
.map(|v| v.to_string())
.filter(|s| !s.is_empty());
let p = args
.get("p")
.map(|v| v.parse::<i16>().unwrap_or(1))
.unwrap_or(1);
match (q, p) {
(Some(q), p) => futures::future::ok(SearchPaginate { text: q, page: p }),
_ => futures::future::err(ServiceError::NoInput),
}
}
// Verse Of the Day
fn vod_response_body(db: &Connection) -> Body {
let results = fetch_daily_verses(&db)
.into_iter()
.flat_map(|daily| {
let refs = parse(daily.as_str());
let results = fetch_results(&db, refs);
if results.is_empty() {
None
} else {
Some(results)
}
}).flatten()
.collect::<Vec<_>>();
Body::from(json!({ "results": results }).to_string())
}
fn search_results(query: String, db: &Connection) -> FutureResult<Body, ServiceError> {
let refs = parse(query.as_str());
futures::future::ok(Body::from(
json!({ "results": fetch_results(&db, refs) }).to_string(),
))
}
fn fetch_search_results(text: String, page: i16, db: &Connection) -> (Vec<Value>, i64) {
let page = if page <= 0 { 1 } else { page };
let count_rows = db
.query(
"SELECT COUNT(book_id)
FROM rst_bible
WHERE text ~* $1",
&[&text],
).unwrap();
let mut total: i64 = 0;
if count_rows.is_empty() {
return (vec![json!([])], total);
} else {
total = count_rows.get(0).get("count");
}
let offset = ((page - 1) * 10) as i64;
let rows = db
.query(
"SELECT row_to_json(t)
FROM (
SELECT v.book_id, v.text, v.chapter, v.verse, b.book as book_name, b.alt as book_alt from rst_bible v
LEFT OUTER JOIN rst_bible_books b on (v.book_id = b.id)
WHERE text ~* $1
) t
LIMIT 10
OFFSET $2",
&[&text, &offset],
).unwrap();
let results = rows.into_iter().map(|r| r.get(0)).collect::<Vec<Value>>();
(vec![json!(results)], (total as f64 / 10_f64).ceil() as i64)
}
fn | (query: SearchPaginate, db: &Connection) -> FutureResult<Body, ServiceError> {
let text = &query.text;
let results = fetch_search_results(text.to_string(), query.page, db);
futures::future::ok(Body::from(
json!({
"meta": { "text": text, "page": query.page, "total": results.1 },
"results": results.0
}).to_string(),
))
}
fn success_response(body: Body) -> FutureResult<Response<Body>, ServiceError> {
futures::future::ok(
Response::builder()
.header(header::CONTENT_TYPE, "application/json")
.header(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.header(header::ACCESS_CONTROL_ALLOW_METHODS, "GET")
.header(header::ACCESS_CONTROL_ALLOW_HEADERS, "Content-Type")
.body(body)
.unwrap(),
)
}
struct SearchService;
impl NewService for SearchService {
type ReqBody = Body;
type ResBody = Body;
type Error = ServiceError;
type Service = SearchService;
type Future = Box<Future<Item = Self::Service, Error = Self::Error> + Send>;
type InitError = ServiceError;
fn new_service(&self) -> Self::Future {
Box::new(futures::future::ok(SearchService))
}
}
impl Service for SearchService {
type ReqBody = Body;
type ResBody = | search_text | identifier_name |
main.rs | : {}", details),
}
}
}
fn connect_db() -> Result<Connection, ServiceError> {
let url = env::var("DATABASE_URL").unwrap_or(String::from(DEFAULT_URL));
println!("Connecting: {}", &url);
match Connection::connect(url, TlsMode::None) {
Ok(connection) => Ok(connection),
Err(error) => {
println!("Connection: {}", error);
Err(ServiceError::NoDatabaseConnection(format!("{}", error)))
}
}
}
fn verses_by_chapters(db: &Connection, id: i16, chapters: Vec<i16>) -> Vec<Value> {
db.query(
"SELECT row_to_json(rst_bible)
FROM rst_bible
WHERE book_id = $1 AND chapter = ANY($2)",
&[&id, &chapters],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn verses_in_chapter_by_verses(
db: &Connection,
id: i16,
chapter: i16,
verses: Vec<i16>,
) -> Vec<Value> {
db.query(
"SELECT row_to_json(rst_bible)
FROM rst_bible
WHERE book_id = $1 AND chapter = $2 AND verse = ANY($3)",
&[&id, &chapter, &verses],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn fetch_results(db: &Connection, refs: Vec<BibleReference>) -> Vec<Value> {
if refs.is_empty() {
return vec![];
}
let valid: Vec<BookRef> = refs
.iter()
.flat_map(|r| {
let statement = db
.prepare(
"SELECT id, book as title, alt, abbr
FROM rst_bible_books
WHERE book ~* $1 OR alt ~* $1 OR abbr ~* $1
LIMIT 1",
).unwrap();
let rows = statement.query(&[&r.book]).unwrap();
if rows.is_empty() {
None
} else {
let row = rows.iter().next().unwrap();
Some(BookRef {
id: row.get(0),
name: row.get(1),
alt: row.get(2),
locations: r.locations.clone(),
})
}
}).collect();
valid
.iter()
.map(|reference| {
let book_id = reference.id;
let book_title = &reference.name;
let book_alt = &reference.alt;
let texts = reference
.locations
.iter()
.flat_map(
move |location| match (&location.chapters, &location.verses) {
// Fetch verses by chapters
(chapters, None) => {
let ch = chapters.into_iter().map(|v| *v as i16).collect();
Some(verses_by_chapters(&db, book_id, ch))
}
// Fetch verses by chapter and verses
(chapters, Some(verses)) if chapters.len() == 1 => {
let ch = chapters[0] as i16;
let vs = verses.into_iter().map(|v| *v as i16).collect();
Some(verses_in_chapter_by_verses(&db, book_id, ch, vs))
}
_ => None,
},
).collect::<Vec<_>>();
json!({ "reference": { "title": book_title, "alt": book_alt }, "texts": texts })
}).collect::<Vec<_>>()
}
fn fetch_daily_verses(db: &Connection) -> Vec<String> {
use chrono::{Datelike, Utc};
let now = Utc::now();
let month = now.month() as i16;
let day = now.day() as i16;
db.query(
"SELECT verses
FROM rst_bible_daily
WHERE month = $1 AND day = $2",
&[&month, &day],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn parse_query(query: Option<&str>) -> FutureResult<String, ServiceError> {
use std::collections::HashMap;
let query = &query.unwrap_or("");
let args = url::form_urlencoded::parse(&query.as_bytes())
.into_owned()
.collect::<HashMap<String, String>>();
match args
.get("q")
.map(|v| v.to_string())
.filter(|s| !s.is_empty())
{
Some(value) => futures::future::ok(value),
None => futures::future::err(ServiceError::NoInput),
}
}
#[derive(Debug)]
struct SearchPaginate {
text: String,
page: i16,
}
fn parse_query_paginate(query: Option<&str>) -> FutureResult<SearchPaginate, ServiceError> {
use std::collections::HashMap;
let query = &query.unwrap_or("");
let args = url::form_urlencoded::parse(&query.as_bytes())
.into_owned()
.collect::<HashMap<String, String>>();
let q = args
.get("q")
.map(|v| v.to_string())
.filter(|s| !s.is_empty());
let p = args
.get("p")
.map(|v| v.parse::<i16>().unwrap_or(1))
.unwrap_or(1);
match (q, p) {
(Some(q), p) => futures::future::ok(SearchPaginate { text: q, page: p }),
_ => futures::future::err(ServiceError::NoInput),
}
}
// Verse Of the Day
fn vod_response_body(db: &Connection) -> Body {
let results = fetch_daily_verses(&db)
.into_iter()
.flat_map(|daily| {
let refs = parse(daily.as_str());
let results = fetch_results(&db, refs);
if results.is_empty() {
None
} else {
Some(results)
}
}).flatten()
.collect::<Vec<_>>();
Body::from(json!({ "results": results }).to_string())
}
fn search_results(query: String, db: &Connection) -> FutureResult<Body, ServiceError> {
let refs = parse(query.as_str());
futures::future::ok(Body::from(
json!({ "results": fetch_results(&db, refs) }).to_string(),
))
}
fn fetch_search_results(text: String, page: i16, db: &Connection) -> (Vec<Value>, i64) {
let page = if page <= 0 { 1 } else { page };
let count_rows = db
.query(
"SELECT COUNT(book_id)
FROM rst_bible
WHERE text ~* $1",
&[&text],
).unwrap();
let mut total: i64 = 0;
if count_rows.is_empty() {
return (vec![json!([])], total);
} else {
total = count_rows.get(0).get("count");
}
let offset = ((page - 1) * 10) as i64;
let rows = db
.query(
"SELECT row_to_json(t)
FROM (
SELECT v.book_id, v.text, v.chapter, v.verse, b.book as book_name, b.alt as book_alt from rst_bible v
LEFT OUTER JOIN rst_bible_books b on (v.book_id = b.id)
WHERE text ~* $1
) t
LIMIT 10
OFFSET $2",
&[&text, &offset],
).unwrap();
let results = rows.into_iter().map(|r| r.get(0)).collect::<Vec<Value>>();
(vec![json!(results)], (total as f64 / 10_f64).ceil() as i64)
}
fn search_text(query: SearchPaginate, db: &Connection) -> FutureResult<Body, ServiceError> {
let text = &query.text;
let results = fetch_search_results(text.to_string(), query.page, db);
futures::future::ok(Body::from(
json!({
"meta": { "text": text, "page": query.page, "total": results.1 },
"results": results.0
}).to_string(),
))
}
fn success_response(body: Body) -> FutureResult<Response<Body>, ServiceError> {
futures::future::ok(
Response::builder()
.header(header::CONTENT_TYPE, "application/json")
.header(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") | .unwrap(),
)
}
struct SearchService;
impl NewService for SearchService {
type ReqBody = Body;
type ResBody = Body;
type Error = ServiceError;
type Service = SearchService;
type Future = Box<Future<Item = Self::Service, Error = Self::Error> + Send>;
type InitError = ServiceError;
fn new_service(&self) -> Self::Future {
Box::new(futures::future::ok(SearchService))
}
}
impl Service for SearchService {
type ReqBody = Body;
type ResBody = Body | .header(header::ACCESS_CONTROL_ALLOW_METHODS, "GET")
.header(header::ACCESS_CONTROL_ALLOW_HEADERS, "Content-Type")
.body(body) | random_line_split |
domain_randomization.py | to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from random import randint
from typing import List, Mapping, Optional, Tuple, Union
import numpy as np
from init_args_serializer import Serializable
import pyrado
from pyrado.domain_randomization.domain_randomizer import DomainRandomizer
from pyrado.environment_wrappers.base import EnvWrapper
from pyrado.environment_wrappers.utils import all_envs, inner_env, remove_env
from pyrado.environments.base import Env
from pyrado.environments.sim_base import SimEnv
from pyrado.utils.input_output import completion_context, print_cbt
class DomainRandWrapper(EnvWrapper, Serializable):
"""Base class for environment wrappers which call a `DomainRandomizer` to randomize the domain parameters"""
def __init__(self, wrapped_env: Union[SimEnv, EnvWrapper], randomizer: Optional[DomainRandomizer]):
"""
Constructor
:param wrapped_env: environment to wrap
:param randomizer: `DomainRandomizer` object holding the probability distribution of all randomizable
domain parameters, pass `None` if you want to subclass wrapping another `DomainRandWrapper`
and use its randomizer
"""
if not isinstance(inner_env(wrapped_env), SimEnv):
raise pyrado.TypeErr(given=wrapped_env, expected_type=SimEnv)
if not isinstance(randomizer, DomainRandomizer) and randomizer is not None:
raise pyrado.TypeErr(given=randomizer, expected_type=DomainRandomizer)
Serializable._init(self, locals())
# Invoke EnvWrapper's constructor
super().__init__(wrapped_env)
self._randomizer = randomizer
@property
def randomizer(self) -> DomainRandomizer:
return self._randomizer
@randomizer.setter
def randomizer(self, randomizer: DomainRandomizer):
if not isinstance(randomizer, DomainRandomizer):
raise pyrado.TypeErr(given=randomizer, expected_type=DomainRandomizer)
self._randomizer = randomizer
class MetaDomainRandWrapper(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which wraps another `DomainRandWrapper` to adapt its parameters,
called domain distribution parameters.
"""
def __init__(self, wrapped_rand_env: DomainRandWrapper, dp_mapping: Mapping[int, Tuple[str, str]]):
"""
Constructor
:param wrapped_rand_env: randomized environment to wrap
:param dp_mapping: mapping from index of the numpy array (coming from the algorithm) to domain parameter name
(e.g. mass, length) and the domain distribution parameter (e.g. mean, std)
.. code-block:: python
# For the mapping arg use the this dict constructor
```
m = {0: ('name1', 'parameter_type1'), 1: ('name2', 'parameter_type2')}
```
"""
if not isinstance(wrapped_rand_env, DomainRandWrapper):
raise pyrado.TypeErr(given=wrapped_rand_env, expected_type=DomainRandWrapper)
Serializable._init(self, locals())
# Invoke the DomainRandWrapper's constructor
super().__init__(wrapped_rand_env, None) | def randomizer(self) -> DomainRandomizer:
# Forward to the wrapped DomainRandWrapper
return self._wrapped_env.randomizer
@randomizer.setter
def randomizer(self, dr: DomainRandomizer):
# Forward to the wrapped DomainRandWrapper
self._wrapped_env.randomizer = dr
def adapt_randomizer(self, domain_distr_param_values: np.ndarray):
# Check the input dimension and reshape if necessary
if domain_distr_param_values.ndim == 1:
pass
elif domain_distr_param_values.ndim == 2:
domain_distr_param_values = domain_distr_param_values.ravel()
else:
raise pyrado.ShapeErr(given=domain_distr_param_values, expected_match=(1,))
# Reconfigure the wrapped environment's DomainRandomizer
for i, value in enumerate(domain_distr_param_values):
dp_name, ddp_name = self.dp_mapping.get(i)
self._wrapped_env.randomizer.adapt_one_distr_param(dp_name, ddp_name, value)
class DomainRandWrapperLive(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which randomized the wrapped env at every reset.
Thus every rollout is done with different domain parameters.
"""
def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
if domain_param is None:
# No explicit specification of domain parameters, so randomizer is called to draw a parameter dict
self._randomizer.randomize(num_samples=1)
domain_param = self._randomizer.get_params(fmt="dict", dtype="numpy")
# Forward to EnvWrapper, which delegates to self._wrapped_env
return super().reset(init_state=init_state, domain_param=domain_param)
class DomainRandWrapperBuffer(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which randomized the wrapped env using a buffer of domain parameter sets.
At every call of the reset method this wrapper cycles through that buffer.
"""
def __init__(self, wrapped_env, randomizer: Optional[DomainRandomizer], selection: Optional[str] = "cyclic"):
"""
Constructor
:param wrapped_env: environment to wrap around
:param randomizer: `DomainRandomizer` object that manages the randomization. If `None`, the user has to set the
buffer manually, the circular reset however works the same way
:param selection: method to draw samples from the buffer, either cyclic or random
"""
if selection not in ["cyclic", "random"]:
raise pyrado.ValueErr(given=selection, eq_constraint="cyclic or random")
Serializable._init(self, locals())
# Invoke the DomainRandWrapper's constructor
super().__init__(wrapped_env, randomizer)
self._ring_idx = None
self._buffer = None
self.selection = selection
@property
def ring_idx(self) -> int:
"""Get the buffer's index."""
return self._ring_idx
@ring_idx.setter
def ring_idx(self, idx: int):
"""Set the buffer's index."""
if not (isinstance(idx, int) or not 0 <= idx < len(self._buffer)):
raise pyrado.ValueErr(given=idx, ge_constraint="0 (int)", l_constraint=len(self._buffer))
self._ring_idx = idx
@property
def selection(self) -> str:
"""Get the selection method."""
return self._selection
@selection.setter
def selection(self, selection: str):
"""Set the selection method."""
if selection not in ["cyclic", "random"]:
raise pyrado.ValueErr(given=selection, eq_constraint="cyclic or random")
self._selection = selection
def fill_buffer(self, num_domains: int):
"""
Fill the internal buffer with domains.
:param num_domains: number of randomized domain parameter sets to store in the buffer
"""
if self._randomizer is None:
raise pyrado.TypeErr(msg="The randomizer must not be None to call fill_buffer()!")
if not isinstance(num_domains, int) or num_domains < 0:
raise pyrado.ValueErr(given=num_domains, g_constraint="0 (int)")
self._randomizer.randomize(num_domains)
self._buffer = self._randomizer.get_params(-1, fmt="list", dtype="numpy")
self._ring_idx = 0
@property
def buffer(self):
"""Get the domain parameter buffer."""
return self._buffer
@buffer.setter
def buffer(self, buffer: Union[List[dict], dict]):
"""
Set the domain parameter buffer.
Depends on the way the buffer has been saved, see the `DomainRandomizer.get_params()` arguments.
:param buffer: list of dicts, each describing a domain ,or just one dict for one domain
"""
if not (isinstance(buffer, list) or isinstance(buffer, dict)):
raise pyrado.TypeErr(given=buffer, expected_type=[list, dict])
self._buffer = buffer
def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
if domain_param is None:
# No explicit specification of domain parameters, so randomizer is requested
if isinstance(self._buffer, dict):
# The buffer consists of one domain parameter set
domain_param = self._buffer
elif isinstance(self._buffer, list):
# The buffer |
self.dp_mapping = dp_mapping
@property | random_line_split |
domain_randomization.py | distribution of all randomizable
domain parameters, pass `None` if you want to subclass wrapping another `DomainRandWrapper`
and use its randomizer
"""
if not isinstance(inner_env(wrapped_env), SimEnv):
raise pyrado.TypeErr(given=wrapped_env, expected_type=SimEnv)
if not isinstance(randomizer, DomainRandomizer) and randomizer is not None:
raise pyrado.TypeErr(given=randomizer, expected_type=DomainRandomizer)
Serializable._init(self, locals())
# Invoke EnvWrapper's constructor
super().__init__(wrapped_env)
self._randomizer = randomizer
@property
def randomizer(self) -> DomainRandomizer:
return self._randomizer
@randomizer.setter
def randomizer(self, randomizer: DomainRandomizer):
if not isinstance(randomizer, DomainRandomizer):
raise pyrado.TypeErr(given=randomizer, expected_type=DomainRandomizer)
self._randomizer = randomizer
class MetaDomainRandWrapper(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which wraps another `DomainRandWrapper` to adapt its parameters,
called domain distribution parameters.
"""
def __init__(self, wrapped_rand_env: DomainRandWrapper, dp_mapping: Mapping[int, Tuple[str, str]]):
"""
Constructor
:param wrapped_rand_env: randomized environment to wrap
:param dp_mapping: mapping from index of the numpy array (coming from the algorithm) to domain parameter name
(e.g. mass, length) and the domain distribution parameter (e.g. mean, std)
.. code-block:: python
# For the mapping arg use the this dict constructor
```
m = {0: ('name1', 'parameter_type1'), 1: ('name2', 'parameter_type2')}
```
"""
if not isinstance(wrapped_rand_env, DomainRandWrapper):
raise pyrado.TypeErr(given=wrapped_rand_env, expected_type=DomainRandWrapper)
Serializable._init(self, locals())
# Invoke the DomainRandWrapper's constructor
super().__init__(wrapped_rand_env, None)
self.dp_mapping = dp_mapping
@property
def randomizer(self) -> DomainRandomizer:
# Forward to the wrapped DomainRandWrapper
return self._wrapped_env.randomizer
@randomizer.setter
def randomizer(self, dr: DomainRandomizer):
# Forward to the wrapped DomainRandWrapper
self._wrapped_env.randomizer = dr
def adapt_randomizer(self, domain_distr_param_values: np.ndarray):
# Check the input dimension and reshape if necessary
if domain_distr_param_values.ndim == 1:
pass
elif domain_distr_param_values.ndim == 2:
domain_distr_param_values = domain_distr_param_values.ravel()
else:
raise pyrado.ShapeErr(given=domain_distr_param_values, expected_match=(1,))
# Reconfigure the wrapped environment's DomainRandomizer
for i, value in enumerate(domain_distr_param_values):
dp_name, ddp_name = self.dp_mapping.get(i)
self._wrapped_env.randomizer.adapt_one_distr_param(dp_name, ddp_name, value)
class DomainRandWrapperLive(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which randomized the wrapped env at every reset.
Thus every rollout is done with different domain parameters.
"""
def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
if domain_param is None:
# No explicit specification of domain parameters, so randomizer is called to draw a parameter dict
self._randomizer.randomize(num_samples=1)
domain_param = self._randomizer.get_params(fmt="dict", dtype="numpy")
# Forward to EnvWrapper, which delegates to self._wrapped_env
return super().reset(init_state=init_state, domain_param=domain_param)
class DomainRandWrapperBuffer(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which randomized the wrapped env using a buffer of domain parameter sets.
At every call of the reset method this wrapper cycles through that buffer.
"""
def __init__(self, wrapped_env, randomizer: Optional[DomainRandomizer], selection: Optional[str] = "cyclic"):
"""
Constructor
:param wrapped_env: environment to wrap around
:param randomizer: `DomainRandomizer` object that manages the randomization. If `None`, the user has to set the
buffer manually, the circular reset however works the same way
:param selection: method to draw samples from the buffer, either cyclic or random
"""
if selection not in ["cyclic", "random"]:
raise pyrado.ValueErr(given=selection, eq_constraint="cyclic or random")
Serializable._init(self, locals())
# Invoke the DomainRandWrapper's constructor
super().__init__(wrapped_env, randomizer)
self._ring_idx = None
self._buffer = None
self.selection = selection
@property
def ring_idx(self) -> int:
"""Get the buffer's index."""
return self._ring_idx
@ring_idx.setter
def ring_idx(self, idx: int):
"""Set the buffer's index."""
if not (isinstance(idx, int) or not 0 <= idx < len(self._buffer)):
raise pyrado.ValueErr(given=idx, ge_constraint="0 (int)", l_constraint=len(self._buffer))
self._ring_idx = idx
@property
def selection(self) -> str:
"""Get the selection method."""
return self._selection
@selection.setter
def selection(self, selection: str):
"""Set the selection method."""
if selection not in ["cyclic", "random"]:
raise pyrado.ValueErr(given=selection, eq_constraint="cyclic or random")
self._selection = selection
def fill_buffer(self, num_domains: int):
"""
Fill the internal buffer with domains.
:param num_domains: number of randomized domain parameter sets to store in the buffer
"""
if self._randomizer is None:
raise pyrado.TypeErr(msg="The randomizer must not be None to call fill_buffer()!")
if not isinstance(num_domains, int) or num_domains < 0:
raise pyrado.ValueErr(given=num_domains, g_constraint="0 (int)")
self._randomizer.randomize(num_domains)
self._buffer = self._randomizer.get_params(-1, fmt="list", dtype="numpy")
self._ring_idx = 0
@property
def buffer(self):
"""Get the domain parameter buffer."""
return self._buffer
@buffer.setter
def buffer(self, buffer: Union[List[dict], dict]):
"""
Set the domain parameter buffer.
Depends on the way the buffer has been saved, see the `DomainRandomizer.get_params()` arguments.
:param buffer: list of dicts, each describing a domain ,or just one dict for one domain
"""
if not (isinstance(buffer, list) or isinstance(buffer, dict)):
raise pyrado.TypeErr(given=buffer, expected_type=[list, dict])
self._buffer = buffer
def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
if domain_param is None:
# No explicit specification of domain parameters, so randomizer is requested
if isinstance(self._buffer, dict):
# The buffer consists of one domain parameter set
domain_param = self._buffer
elif isinstance(self._buffer, list):
# The buffer consists of a list of domain parameter sets
domain_param = self._buffer[self._ring_idx] # first selection will be index 0
if self._selection == "cyclic":
self._ring_idx = (self._ring_idx + 1) % len(self._buffer)
elif self._selection == "random":
self._ring_idx = randint(0, len(self._buffer) - 1)
else:
raise pyrado.TypeErr(given=self._buffer, expected_type=[dict, list])
# Forward to EnvWrapper, which delegates to self._wrapped_env
return super().reset(init_state=init_state, domain_param=domain_param)
def _get_state(self, state_dict: dict):
super()._get_state(state_dict)
state_dict["buffer"] = self._buffer
state_dict["ring_idx"] = self._ring_idx
def _set_state(self, state_dict: dict, copying: bool = False):
super()._set_state(state_dict, copying)
self._buffer = state_dict["buffer"]
self._ring_idx = state_dict["ring_idx"]
def remove_all_dr_wrappers(env: Env, verbose: bool = False):
"""
Go through the environment chain and remove all wrappers of type `DomainRandWrapper` (and subclasses).
:param env: env chain with domain randomization wrappers
:param verbose: choose if status messages should be printed
:return: env chain without domain randomization wrappers
"""
while any(isinstance(subenv, DomainRandWrapper) for subenv in all_envs(env)):
if verbose:
with completion_context(
f"Found domain randomization wrapper of type {type(env).__name__}. Removing it now",
color="y",
bright=True,
):
env = remove_env(env, DomainRandWrapper)
else:
| env = remove_env(env, DomainRandWrapper) | conditional_block | |
domain_randomization.py | to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from random import randint
from typing import List, Mapping, Optional, Tuple, Union
import numpy as np
from init_args_serializer import Serializable
import pyrado
from pyrado.domain_randomization.domain_randomizer import DomainRandomizer
from pyrado.environment_wrappers.base import EnvWrapper
from pyrado.environment_wrappers.utils import all_envs, inner_env, remove_env
from pyrado.environments.base import Env
from pyrado.environments.sim_base import SimEnv
from pyrado.utils.input_output import completion_context, print_cbt
class DomainRandWrapper(EnvWrapper, Serializable):
"""Base class for environment wrappers which call a `DomainRandomizer` to randomize the domain parameters"""
def __init__(self, wrapped_env: Union[SimEnv, EnvWrapper], randomizer: Optional[DomainRandomizer]):
"""
Constructor
:param wrapped_env: environment to wrap
:param randomizer: `DomainRandomizer` object holding the probability distribution of all randomizable
domain parameters, pass `None` if you want to subclass wrapping another `DomainRandWrapper`
and use its randomizer
"""
if not isinstance(inner_env(wrapped_env), SimEnv):
raise pyrado.TypeErr(given=wrapped_env, expected_type=SimEnv)
if not isinstance(randomizer, DomainRandomizer) and randomizer is not None:
raise pyrado.TypeErr(given=randomizer, expected_type=DomainRandomizer)
Serializable._init(self, locals())
# Invoke EnvWrapper's constructor
super().__init__(wrapped_env)
self._randomizer = randomizer
@property
def randomizer(self) -> DomainRandomizer:
return self._randomizer
@randomizer.setter
def randomizer(self, randomizer: DomainRandomizer):
if not isinstance(randomizer, DomainRandomizer):
raise pyrado.TypeErr(given=randomizer, expected_type=DomainRandomizer)
self._randomizer = randomizer
class MetaDomainRandWrapper(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which wraps another `DomainRandWrapper` to adapt its parameters,
called domain distribution parameters.
"""
def __init__(self, wrapped_rand_env: DomainRandWrapper, dp_mapping: Mapping[int, Tuple[str, str]]):
"""
Constructor
:param wrapped_rand_env: randomized environment to wrap
:param dp_mapping: mapping from index of the numpy array (coming from the algorithm) to domain parameter name
(e.g. mass, length) and the domain distribution parameter (e.g. mean, std)
.. code-block:: python
# For the mapping arg use the this dict constructor
```
m = {0: ('name1', 'parameter_type1'), 1: ('name2', 'parameter_type2')}
```
"""
if not isinstance(wrapped_rand_env, DomainRandWrapper):
raise pyrado.TypeErr(given=wrapped_rand_env, expected_type=DomainRandWrapper)
Serializable._init(self, locals())
# Invoke the DomainRandWrapper's constructor
super().__init__(wrapped_rand_env, None)
self.dp_mapping = dp_mapping
@property
def randomizer(self) -> DomainRandomizer:
# Forward to the wrapped DomainRandWrapper
return self._wrapped_env.randomizer
@randomizer.setter
def randomizer(self, dr: DomainRandomizer):
# Forward to the wrapped DomainRandWrapper
self._wrapped_env.randomizer = dr
def adapt_randomizer(self, domain_distr_param_values: np.ndarray):
# Check the input dimension and reshape if necessary
if domain_distr_param_values.ndim == 1:
pass
elif domain_distr_param_values.ndim == 2:
domain_distr_param_values = domain_distr_param_values.ravel()
else:
raise pyrado.ShapeErr(given=domain_distr_param_values, expected_match=(1,))
# Reconfigure the wrapped environment's DomainRandomizer
for i, value in enumerate(domain_distr_param_values):
dp_name, ddp_name = self.dp_mapping.get(i)
self._wrapped_env.randomizer.adapt_one_distr_param(dp_name, ddp_name, value)
class DomainRandWrapperLive(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which randomized the wrapped env at every reset.
Thus every rollout is done with different domain parameters.
"""
def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
if domain_param is None:
# No explicit specification of domain parameters, so randomizer is called to draw a parameter dict
self._randomizer.randomize(num_samples=1)
domain_param = self._randomizer.get_params(fmt="dict", dtype="numpy")
# Forward to EnvWrapper, which delegates to self._wrapped_env
return super().reset(init_state=init_state, domain_param=domain_param)
class DomainRandWrapperBuffer(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which randomized the wrapped env using a buffer of domain parameter sets.
At every call of the reset method this wrapper cycles through that buffer.
"""
def __init__(self, wrapped_env, randomizer: Optional[DomainRandomizer], selection: Optional[str] = "cyclic"):
"""
Constructor
:param wrapped_env: environment to wrap around
:param randomizer: `DomainRandomizer` object that manages the randomization. If `None`, the user has to set the
buffer manually, the circular reset however works the same way
:param selection: method to draw samples from the buffer, either cyclic or random
"""
if selection not in ["cyclic", "random"]:
raise pyrado.ValueErr(given=selection, eq_constraint="cyclic or random")
Serializable._init(self, locals())
# Invoke the DomainRandWrapper's constructor
super().__init__(wrapped_env, randomizer)
self._ring_idx = None
self._buffer = None
self.selection = selection
@property
def ring_idx(self) -> int:
"""Get the buffer's index."""
return self._ring_idx
@ring_idx.setter
def ring_idx(self, idx: int):
"""Set the buffer's index."""
if not (isinstance(idx, int) or not 0 <= idx < len(self._buffer)):
raise pyrado.ValueErr(given=idx, ge_constraint="0 (int)", l_constraint=len(self._buffer))
self._ring_idx = idx
@property
def selection(self) -> str:
"""Get the selection method."""
return self._selection
@selection.setter
def selection(self, selection: str):
"""Set the selection method."""
if selection not in ["cyclic", "random"]:
raise pyrado.ValueErr(given=selection, eq_constraint="cyclic or random")
self._selection = selection
def fill_buffer(self, num_domains: int):
"""
Fill the internal buffer with domains.
:param num_domains: number of randomized domain parameter sets to store in the buffer
"""
if self._randomizer is None:
raise pyrado.TypeErr(msg="The randomizer must not be None to call fill_buffer()!")
if not isinstance(num_domains, int) or num_domains < 0:
raise pyrado.ValueErr(given=num_domains, g_constraint="0 (int)")
self._randomizer.randomize(num_domains)
self._buffer = self._randomizer.get_params(-1, fmt="list", dtype="numpy")
self._ring_idx = 0
@property
def buffer(self):
"""Get the domain parameter buffer."""
return self._buffer
@buffer.setter
def buffer(self, buffer: Union[List[dict], dict]):
"""
Set the domain parameter buffer.
Depends on the way the buffer has been saved, see the `DomainRandomizer.get_params()` arguments.
:param buffer: list of dicts, each describing a domain ,or just one dict for one domain
"""
if not (isinstance(buffer, list) or isinstance(buffer, dict)):
raise pyrado.TypeErr(given=buffer, expected_type=[list, dict])
self._buffer = buffer
def | (self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
if domain_param is None:
# No explicit specification of domain parameters, so randomizer is requested
if isinstance(self._buffer, dict):
# The buffer consists of one domain parameter set
domain_param = self._buffer
elif isinstance(self._buffer, list):
# The | reset | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.