file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
script.js | ///IMPORTS-----------------------------------------
import screens from './components/screens.js'
import PausedTimeout from './functions/pausedTimeout.js'
import map1 from './maps/map1.js'
import map2 from './maps/map2.js'
import map3 from './maps/map3.js'
import map4 from './maps/map4.js'
import map5 from './maps/map5.js'
///GLOBAL VARIABLES-----------------------------------------
//values for car position
window.gameArea = document.getElementById('game-area')
//storing the intervals
window.objIntervals = {
}
window.myIntervals = {
moving:null,
rotating:null,
}
window.myIntervalValues = {
moving:50,
rotating:50,
}
window.timeouts = {
}
window.maps = [
map1,
map2,
map3,
map4,
map5
]
window.state = {
paused:true,
gameStart:true,
crashed:false,
completed:false,
mapIndex:0,
}
///GLOBAL METHODS-----------------------------------------
window.killScreen = function(){
let classList = document.getElementById('screen').classList
classList.add('fadeoutslide')
classList.remove('fadeinslide')
state.paused = false
state.gameStart = false
setTimeout(()=>{
document.getElementById('screen').remove()
},600)
}
window.rotationRatio = function(){
let ratio = (rotationAngle%360)/360
if(ratio < 0 ) ratio=Math.abs(ratio + 1)
return ratio
}
window.PausedTimeout = PausedTimeout
window.pauseTimeouts = function(){
for(let timeout in timeouts){
timeouts[timeout].pauseTimeout()
}
}
window.resumeTimeouts = function(){
for(let timeout in timeouts){
timeouts[timeout].resumeTimeout()
}
}
| timeouts[timeout].stopTimeout()
delete timeouts[timeout]
}
}
window.rotationPercentage = function(){
let ratio
(rotationAngle%360)/360 < 0 ? ratio = Math.abs((rotationAngle%360)/360 + 1) : ratio = (rotationAngle%360)/360
if(ratio >= 0.5) ratio = (1 - ratio)
ratio*=4
if(ratio >1) ratio = 1 - (ratio - 1)
return ratio
}
window.handleCrash = function(){
clearIntervals()
screens({
title:'You crashed!',
content:"Play again?",
button:"Continue",
})
state.paused=true
state.crashed=true
return null
}
//keys for multiple key listeners
let activeKeys={}
let crashed = false
//FUNCTIONS-----------------------------------------
function clearIntervals(){
clearInterval(myIntervals.moving)
myIntervals.moving = null
clearInterval(myIntervals.rotating)
myIntervals.rotating = null
deleteKeys()
return null
}
function deleteKeys(){
for(let key in activeKeys){
delete activeKeys[key]
}
}
function handleVictory(){
clearIntervals()
screens({
title:'Goal reached!',
content:"",
button:"Next Level",
})
state.paused=true
state.completed=true
}
var crashListener = setInterval(()=>{
if( checkCrash() ){
handleCrash()
clearTimeout(crashListener)
}
},50)
function checkVictory(){
const point = document.getElementById('victory-point')
//the values should adjust for what percentage to the side the car has rotated
let ratio = rotationPercentage()
//If the car is fully rotated to the side, tbe difference will be 25 pixels less to top, so 25px should be added.
if(
//from bottom to to p
(yPosition + (25*ratio) ) < (point.offsetTop + point.offsetHeight) &&
(yPosition + (25*ratio)) > point.offsetTop &&
(xPosition + 40) > point.offsetLeft &&
xPosition < (point.offsetLeft + point.offsetWidth)
){
return true
}else{
return false
}
}
function checkCrash(){
if(state.paused) return
let ratio = rotationPercentage()
function checkBoundaries(){
if(
(yPosition + (25 * ratio) ) < 0 | //TOP
((yPosition + mycar.offsetHeight) - (25 * ratio) ) > gameArea.offsetHeight | //BOTTOM
(xPosition - (25 * ratio) ) < 0 | //LEFT
((xPosition + mycar.offsetWidth) + (25 * ratio) ) > gameArea.offsetWidth //RIGHT
){
return true
}
}
const calcPoints = {
center(){
return ((mycar.offsetTop + mycar.offsetHeight)/2 + (mycar.offsetLeft + mycar.offsetWidth)/2)
},
topleft(){
return{
x:1,
y:1
}
}
}
function checkForeignObjects(){
let crashed = false
document.querySelectorAll('[crashable="true"]').forEach(crashable=>{
let foreignRatio, foreignRotation;
if(crashable.style.transform){
//this only works because rotateZ is the only transform applied
foreignRotation = parseInt(crashable.style.transform.match(/[0-9]+/));
//this tests if the foreign object is rotated
(foreignRotation%360)/360 < 0 ? foreignRatio = Math.abs((foreignRotation%360)/360 + 1) : foreignRatio = (foreignRotation%360)/360
if(foreignRatio >= 0.5) foreignRatio = (1 - foreignRatio)
foreignRatio*=4
if(foreignRatio >1) foreignRatio = 1 - (foreignRatio - 1)
}else{
foreignRatio = 0
}
//defines boundaries, adjusts for rotation
let top =(crashable.offsetTop + crashable.offsetHeight)
let bottom = crashable.offsetTop
let left = (crashable.offsetLeft+crashable.offsetWidth)
let right = crashable.offsetLeft
let difference = (crashable.offsetHeight - crashable.offsetWidth) /2
//tests the values
if(
(yPosition + (25 * ratio) ) < top - (difference * foreignRatio) && //INTO BOTTOM
((yPosition + mycar.offsetHeight) - (25 * ratio) ) > bottom + (difference * foreignRatio) && //INTO TOP
(xPosition - (25 * ratio) ) < left + (difference * foreignRatio) && //INTO LEFT
((xPosition + mycar.offsetWidth) + (25 * ratio) ) > right - (difference * foreignRatio) //INTO RIGHT
){
crashed = true
}
})
return crashed
}
if( checkBoundaries() | checkForeignObjects() ) return true
}
function move(isForward){
myIntervals.moving = setInterval(()=>{
if(state.paused) return
let ratio = (rotationAngle%360)/360
if(ratio < 0 ) ratio=Math.abs(ratio + 1)
let ratio2 = (10 * (ratio*4))
if(ratio2 > 20) ratio2 -= 2*(ratio2 - 20)
let ratio3 = (10 * (ratio*4))
if(ratio3 > 10 && ratio3 < 30) ratio3 -= 2*(ratio3 - 10)
else if(ratio3 >= 30) ratio3 -=40
if(isForward){
yPosition -= (10 - ratio2)
xPosition += ratio3
}else{
yPosition += (10 - ratio2)
xPosition -=ratio3
}
if( checkVictory() ) return handleVictory()
mycar.style.top=`${yPosition}px`
mycar.style.left=`${xPosition}px`
},myIntervalValues.moving)
}
//EVENT LISTENERS ---------------------------------------------------
window.initListeners = function(){
document.addEventListener('keypress',e=>{
//WHEN YOU PRESS THE SPACEBAR
if(e.keyCode==32){
//PAUSES GAME
if(!state.paused){
screens({
title:'Paused',
content:'Press space to continue.',
})
state.paused = true
clearIntervals()
pauseTimeouts()
}else{
killScreen()
resumeTimeouts()
//ADDITIONAL OPTIONS IF SPACEBAR IS PRESSED
if(state.crashed){
destroyTimeouts()
state.crashed=false
maps[state.mapIndex].reset()
return crashListener = setInterval(()=>{
if( checkCrash() ){
handleCrash()
clearTimeout(crashListener)
}
},50)
}
if(state.completed){
state.completed = false
maps[state.mapIndex].destroy()
state.mapIndex++
return maps[state.mapIndex].init()
}
}
}
})
//WHEN YOU PRESS ANY OTHER KEY
document.addEventListener('keydown',function handleKeyDown(e){
1
//38: top arrow....39 right arrow..... 40 bottom arrow... 37 left arrow
//16: shift, 32: spacebar
activeKeys[e.keyCode]=e.keyCode
// console.log(e.keyCode)
for(let key in activeKeys){
//toggle headlights
if(key==16){
document.querySelectorAll('#my-car .headlight').forEach(element=>{
if(!element.classList.contains('highbeams-in')){
element.classList.add('highbeams-in')
element.classList.remove('highbeams-out')
}
else{
element.classList.remove('highbeams-in')
element.classList.add('highbeams-out')
}
})
}
//move forward
if(key==38&&!myIntervals.moving){
if(state.paused) return
move(true)
}
//move backward
if(key==40&&!myIntervals.moving){
if(state.paused) return
move(false)
}
//rotate left
if(key==37&&!myIntervals.rotating){
if(state.paused) return
myIntervals.rotating = setInterval(()=>{
if(state.paused) return
rotationAngle-=10
mycar.style.transform = `rotateZ(${rotationAngle}deg)`
},myIntervalValues.rotating)
}
//rotate right
if(key==39&&!myIntervals.rotating){
if(state.paused) return
myIntervals.rotating = setInterval(()=>{
if(state.paused) return
rotationAngle+=10
mycar.style.transform = `rotateZ(${rotationAngle}deg)`
},myIntervalValues.rotating)
}
}
})
document.addEventListener('keyup',function handleKeyUp(e){
if(state.paused) return
delete activeKeys[e.keyCode]
if(e.keyCode==38|e.keyCode==40){
clearInterval(myIntervals.moving)
myIntervals.moving = null
}
if(e.keyCode==37|e.keyCode==39){
clearInterval(myIntervals.rotating)
myIntervals.rotating = null
}
})
}
//INITIALIZATION ---------------------------------------------------
maps[state.mapIndex].init()
initListeners()
screens({
title:'Road Whiz',
content:'Can you survive the challenges?',
button:'Continue'
}) | window.destroyTimeouts = function(){
for(let timeout in timeouts){ | random_line_split |
script.js | ///IMPORTS-----------------------------------------
import screens from './components/screens.js'
import PausedTimeout from './functions/pausedTimeout.js'
import map1 from './maps/map1.js'
import map2 from './maps/map2.js'
import map3 from './maps/map3.js'
import map4 from './maps/map4.js'
import map5 from './maps/map5.js'
///GLOBAL VARIABLES-----------------------------------------
//values for car position
window.gameArea = document.getElementById('game-area')
//storing the intervals
window.objIntervals = {
}
window.myIntervals = {
moving:null,
rotating:null,
}
window.myIntervalValues = {
moving:50,
rotating:50,
}
window.timeouts = {
}
window.maps = [
map1,
map2,
map3,
map4,
map5
]
window.state = {
paused:true,
gameStart:true,
crashed:false,
completed:false,
mapIndex:0,
}
///GLOBAL METHODS-----------------------------------------
window.killScreen = function(){
let classList = document.getElementById('screen').classList
classList.add('fadeoutslide')
classList.remove('fadeinslide')
state.paused = false
state.gameStart = false
setTimeout(()=>{
document.getElementById('screen').remove()
},600)
}
window.rotationRatio = function(){
let ratio = (rotationAngle%360)/360
if(ratio < 0 ) ratio=Math.abs(ratio + 1)
return ratio
}
window.PausedTimeout = PausedTimeout
window.pauseTimeouts = function(){
for(let timeout in timeouts){
timeouts[timeout].pauseTimeout()
}
}
window.resumeTimeouts = function(){
for(let timeout in timeouts){
timeouts[timeout].resumeTimeout()
}
}
window.destroyTimeouts = function(){
for(let timeout in timeouts){
timeouts[timeout].stopTimeout()
delete timeouts[timeout]
}
}
window.rotationPercentage = function(){
let ratio
(rotationAngle%360)/360 < 0 ? ratio = Math.abs((rotationAngle%360)/360 + 1) : ratio = (rotationAngle%360)/360
if(ratio >= 0.5) ratio = (1 - ratio)
ratio*=4
if(ratio >1) ratio = 1 - (ratio - 1)
return ratio
}
window.handleCrash = function(){
clearIntervals()
screens({
title:'You crashed!',
content:"Play again?",
button:"Continue",
})
state.paused=true
state.crashed=true
return null
}
//keys for multiple key listeners
let activeKeys={}
let crashed = false
//FUNCTIONS-----------------------------------------
function clearIntervals(){
clearInterval(myIntervals.moving)
myIntervals.moving = null
clearInterval(myIntervals.rotating)
myIntervals.rotating = null
deleteKeys()
return null
}
function deleteKeys(){
for(let key in activeKeys){
delete activeKeys[key]
}
}
function handleVictory(){
clearIntervals()
screens({
title:'Goal reached!',
content:"",
button:"Next Level",
})
state.paused=true
state.completed=true
}
var crashListener = setInterval(()=>{
if( checkCrash() ){
handleCrash()
clearTimeout(crashListener)
}
},50)
function checkVictory(){
const point = document.getElementById('victory-point')
//the values should adjust for what percentage to the side the car has rotated
let ratio = rotationPercentage()
//If the car is fully rotated to the side, tbe difference will be 25 pixels less to top, so 25px should be added.
if(
//from bottom to to p
(yPosition + (25*ratio) ) < (point.offsetTop + point.offsetHeight) &&
(yPosition + (25*ratio)) > point.offsetTop &&
(xPosition + 40) > point.offsetLeft &&
xPosition < (point.offsetLeft + point.offsetWidth)
){
return true
}else{
return false
}
}
function checkCrash(){
if(state.paused) return
let ratio = rotationPercentage()
function checkBoundaries(){
if(
(yPosition + (25 * ratio) ) < 0 | //TOP
((yPosition + mycar.offsetHeight) - (25 * ratio) ) > gameArea.offsetHeight | //BOTTOM
(xPosition - (25 * ratio) ) < 0 | //LEFT
((xPosition + mycar.offsetWidth) + (25 * ratio) ) > gameArea.offsetWidth //RIGHT
){
return true
}
}
const calcPoints = {
center(){
return ((mycar.offsetTop + mycar.offsetHeight)/2 + (mycar.offsetLeft + mycar.offsetWidth)/2)
},
topleft(){
return{
x:1,
y:1
}
}
}
function checkForeignObjects(){
let crashed = false
document.querySelectorAll('[crashable="true"]').forEach(crashable=>{
let foreignRatio, foreignRotation;
if(crashable.style.transform){
//this only works because rotateZ is the only transform applied
foreignRotation = parseInt(crashable.style.transform.match(/[0-9]+/));
//this tests if the foreign object is rotated
(foreignRotation%360)/360 < 0 ? foreignRatio = Math.abs((foreignRotation%360)/360 + 1) : foreignRatio = (foreignRotation%360)/360
if(foreignRatio >= 0.5) foreignRatio = (1 - foreignRatio)
foreignRatio*=4
if(foreignRatio >1) foreignRatio = 1 - (foreignRatio - 1)
}else{
foreignRatio = 0
}
//defines boundaries, adjusts for rotation
let top =(crashable.offsetTop + crashable.offsetHeight)
let bottom = crashable.offsetTop
let left = (crashable.offsetLeft+crashable.offsetWidth)
let right = crashable.offsetLeft
let difference = (crashable.offsetHeight - crashable.offsetWidth) /2
//tests the values
if(
(yPosition + (25 * ratio) ) < top - (difference * foreignRatio) && //INTO BOTTOM
((yPosition + mycar.offsetHeight) - (25 * ratio) ) > bottom + (difference * foreignRatio) && //INTO TOP
(xPosition - (25 * ratio) ) < left + (difference * foreignRatio) && //INTO LEFT
((xPosition + mycar.offsetWidth) + (25 * ratio) ) > right - (difference * foreignRatio) //INTO RIGHT
){
crashed = true
}
})
return crashed
}
if( checkBoundaries() | checkForeignObjects() ) return true
}
function move(isForward) |
//EVENT LISTENERS ---------------------------------------------------
window.initListeners = function(){
document.addEventListener('keypress',e=>{
//WHEN YOU PRESS THE SPACEBAR
if(e.keyCode==32){
//PAUSES GAME
if(!state.paused){
screens({
title:'Paused',
content:'Press space to continue.',
})
state.paused = true
clearIntervals()
pauseTimeouts()
}else{
killScreen()
resumeTimeouts()
//ADDITIONAL OPTIONS IF SPACEBAR IS PRESSED
if(state.crashed){
destroyTimeouts()
state.crashed=false
maps[state.mapIndex].reset()
return crashListener = setInterval(()=>{
if( checkCrash() ){
handleCrash()
clearTimeout(crashListener)
}
},50)
}
if(state.completed){
state.completed = false
maps[state.mapIndex].destroy()
state.mapIndex++
return maps[state.mapIndex].init()
}
}
}
})
//WHEN YOU PRESS ANY OTHER KEY
document.addEventListener('keydown',function handleKeyDown(e){
1
//38: top arrow....39 right arrow..... 40 bottom arrow... 37 left arrow
//16: shift, 32: spacebar
activeKeys[e.keyCode]=e.keyCode
// console.log(e.keyCode)
for(let key in activeKeys){
//toggle headlights
if(key==16){
document.querySelectorAll('#my-car .headlight').forEach(element=>{
if(!element.classList.contains('highbeams-in')){
element.classList.add('highbeams-in')
element.classList.remove('highbeams-out')
}
else{
element.classList.remove('highbeams-in')
element.classList.add('highbeams-out')
}
})
}
//move forward
if(key==38&&!myIntervals.moving){
if(state.paused) return
move(true)
}
//move backward
if(key==40&&!myIntervals.moving){
if(state.paused) return
move(false)
}
//rotate left
if(key==37&&!myIntervals.rotating){
if(state.paused) return
myIntervals.rotating = setInterval(()=>{
if(state.paused) return
rotationAngle-=10
mycar.style.transform = `rotateZ(${rotationAngle}deg)`
},myIntervalValues.rotating)
}
//rotate right
if(key==39&&!myIntervals.rotating){
if(state.paused) return
myIntervals.rotating = setInterval(()=>{
if(state.paused) return
rotationAngle+=10
mycar.style.transform = `rotateZ(${rotationAngle}deg)`
},myIntervalValues.rotating)
}
}
})
document.addEventListener('keyup',function handleKeyUp(e){
if(state.paused) return
delete activeKeys[e.keyCode]
if(e.keyCode==38|e.keyCode==40){
clearInterval(myIntervals.moving)
myIntervals.moving = null
}
if(e.keyCode==37|e.keyCode==39){
clearInterval(myIntervals.rotating)
myIntervals.rotating = null
}
})
}
//INITIALIZATION ---------------------------------------------------
maps[state.mapIndex].init()
initListeners()
screens({
title:'Road Whiz',
content:'Can you survive the challenges?',
button:'Continue'
})
| {
myIntervals.moving = setInterval(()=>{
if(state.paused) return
let ratio = (rotationAngle%360)/360
if(ratio < 0 ) ratio=Math.abs(ratio + 1)
let ratio2 = (10 * (ratio*4))
if(ratio2 > 20) ratio2 -= 2*(ratio2 - 20)
let ratio3 = (10 * (ratio*4))
if(ratio3 > 10 && ratio3 < 30) ratio3 -= 2*(ratio3 - 10)
else if(ratio3 >= 30) ratio3 -=40
if(isForward){
yPosition -= (10 - ratio2)
xPosition += ratio3
}else{
yPosition += (10 - ratio2)
xPosition -=ratio3
}
if( checkVictory() ) return handleVictory()
mycar.style.top=`${yPosition}px`
mycar.style.left=`${xPosition}px`
},myIntervalValues.moving)
} | identifier_body |
codon_usage.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 17:34:32 2019
@author: fanlizhou
Analyze codon usage of sequence from 'SP_gene_seq.txt' and 'LP_gene_seq.txt'
Plot heatmap of amino acid usage and codon usage
Plot codon usage in each gene for each amino acid. Genes were arranged so that
the gene expression of SP decrease from 0 to 50 (x-axis) and the gene expression
of LP increase from 51 to 100 (x-axis)
Usage: codon_usage.py [-h] [--label LABEL] sp_file lp_file
Options:
--label Define the label of out-put files. Default="top"
sp_file Path to the SP data files
lp_file Path to the LP data files
"""
import io, os, argparse, collections
from scipy import stats
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description=
'Analyze codon usage of SP and LP\n')
parser.add_argument('sp_file', help = 'one input SP data file\n')
parser.add_argument('lp_file', help = 'one input LP data file\n')
parser.add_argument('--label', '-l',
type = str, required = False, default = 'top',
help = 'Define the label of out-put files. Default="top"\n')
args = parser.parse_args()
for path in [args.sp_file, args.lp_file]:
if not os.path.isfile(path):
parser.error('File "%s" cannot be found.' % (path))
return args
# a Codon_Usage class to store codon usage information for each genotype
class Codon_Usage:
def __init__(self, filename):
self.seq, self.gene_num = self.get_seq(filename)
def get_seq(self, filename):
file = io.open(filename)
# list of selected gene sequences, excluded genes that are non-triple
all_seq = []
gene_seq = ''
count_all = 0
count_non_triple = 0
for line in file:
# read a gene information line
if line[0]=='>':
count_all += 1
# if a gene has been read, then append it to all_seq if the
# sequence is triple
if gene_seq!='':
if len(gene_seq)%3:
count_non_triple += 1
else:
all_seq.append(gene_seq)
gene_seq = ''
# read a gene sequence line
else:
gene_seq += line.strip()
file.close()
print('%s:\n%d genes added\n%d are non-triple\n'%
(filename[:2],count_all, count_non_triple))
return (all_seq, count_all - count_non_triple)
def get_AA(self, codon):
# dict key: codon -> AA
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TTA':'Leu', 'TTG':'Leu',
'TCT':'Ser', 'TCC':'Ser', 'TCA':'Ser', 'TCG':'Ser',
'TAT':'Tyr', 'TAC':'Tyr', 'TAA':'STOP', 'TAG':'STOP',
'TGT':'Cys', 'TGC':'Cys', 'TGA':'STOP', 'TGG':'Trp',
'CTT':'Leu', 'CTC':'Leu', 'CTA':'Leu', 'CTG':'Leu',
'CCT':'Pro', 'CCC':'Pro', 'CCA':'Pro', 'CCG':'Pro',
'CAT':'His', 'CAC':'His', 'CAA':'Gln', 'CAG':'Gln',
'CGT':'Arg', 'CGC':'Arg', 'CGA':'Arg', 'CGG':'Arg',
'ATT':'Ile', 'ATC':'Ile', 'ATA':'Ile', 'ATG':'Met',
'ACT':'Thr', 'ACC':'Thr', 'ACA':'Thr', 'ACG':'Thr',
'AAT':'Asn', 'AAC':'Asn', 'AAA':'Lys', 'AAG':'Lys',
'AGT':'Ser', 'AGC':'Ser', 'AGA':'Arg', 'AGG':'Arg',
'GTT':'Val', 'GTC':'Val', 'GTA':'Val', 'GTG':'Val',
'GCT':'Ala', 'GCC':'Ala', 'GCA':'Ala', 'GCG':'Ala',
'GAT':'Asp', 'GAC':'Asp', 'GAA':'Glu', 'GAG':'Glu',
'GGT':'Gly', 'GGC':'Gly', 'GGA':'Gly', 'GGG':'Gly'}
if codon in codon_map:
return codon_map[codon]
else:
return ''
def get_usage_dict(self, seq):
# usage_dict structure:
# dict key: AA -> [
# dict key: codon ->
# [codon_count,
# codon_count/AA_count]
# AA_count
# ]
usage_dict = \
collections.defaultdict(lambda:
[
collections.defaultdict(
lambda: [0, 0]),
0
])
# save AAs usage information
for index in range(0, len(seq), 3):
codon = seq[index:index+3]
AA = self.get_AA(codon)
if AA:
# count how many times the AA appears
usage_dict[AA][1] += 1
# count how many times the codon is used
usage_dict[AA][0][codon][0] += 1
# calculate the codon usage percentage for an AA
for AA in usage_dict:
for codon in usage_dict[AA][0]:
usage_dict[AA][0][codon][1] = \
usage_dict[AA][0][codon][0]/usage_dict[AA][1]
return usage_dict
def get_AA_dict(self):
# AA_dict structure:
# 1st dict key: AA -> 2nd dict key: codon -> a list of codon usage
# percentage of each gene
AA_dict = \
collections.defaultdict(
lambda:collections.defaultdict(list))
# dict key: AA -> codon list
AA_map = {
'Phe':['TTT', 'TTC'],
'Leu':['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],
'Ser':['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],
'Tyr':['TAT', 'TAC'],
'STOP':['TAA', 'TAG', 'TGA'],
'Cys':['TGT', 'TGC'],
'Trp':['TGG'],
'Pro':['CCT', 'CCC', 'CCA', 'CCG'],
'His':['CAT', 'CAC'],
'Gln':['CAA', 'CAG'],
'Arg':['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],
'Ile':['ATT', 'ATC', 'ATA'],
'Met':['ATG'],
'Thr':['ACT', 'ACC', 'ACA', 'ACG'],
'Asn':['AAT', 'AAC'],
'Lys':['AAA', 'AAG'],
'Val':['GTT', 'GTC', 'GTA', 'GTG'],
'Ala':['GCT', 'GCC', 'GCA', 'GCG'],
'Asp':['GAT', 'GAC'],
'Glu':['GAA', 'GAG'],
'Gly':['GGT', 'GGC', 'GGA', 'GGG']
}
# list of codon usage for each gene
usage_dict_list = []
# get codon usage information for each gene
for seq in self.seq:
usage_dict_list.append(self.get_usage_dict(seq))
# get the list of codon usage percentage from each gene
for AA in list(AA_map.keys()):
for codon in AA_map[AA]:
# get codon usage information from each gene
for usage_dict in usage_dict_list:
# append codon usage percentage in the gene
AA_dict[AA][codon].append(
usage_dict[AA][0][codon][1])
return AA_dict
def heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label):
# list of Chi-Square test results
AA_chisquare = []
# AA plotting annotation information
AA_text = []
# list of student's t-test results
codon_ttest = []
# codon plotting annotaion information
codon_text = []
i = 0
j = 0
# number of genes analyzed
count_all = 0
# number of genes that show significant results
count_sig = 0
for AA in list(sp_AA_dict.keys()):
# mean values of codon usage for each AA
sp_codon_mean = []
lp_codon_mean = []
for codon in sp_AA_dict[AA]:
# calculate ttest results
p_val = stats.ttest_ind(sp_AA_dict[AA][codon],
lp_AA_dict[AA][codon],
equal_var = False)[1]
# display eight codons in a row
if not i % 8:
codon_ttest.append([])
codon_text.append([])
i += 1
# handle NULL values
if np.isnan(p_val):
codon_ttest[-1].append(0)
codon_text[-1].append(codon + '\n NA')
# save ttest p-values and annotation information
else:
codon_ttest[-1].append(p_val)
codon_text[-1].append(codon + '\n' + str(round(p_val, 2)))
count_all += 1
if p_val < 0.5:
count_sig += 1
sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))
lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon]))
# get Chi-Square test results of each AA
p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]),
axis = None)[1]
# display six AA in a row
if not j % 6:
AA_chisquare.append([])
AA_text.append([])
j += 1
# handle Null values
if np.isnan(p_val):
AA_chisquare[-1].append(0)
AA_text[-1].append(AA + '\n NA')
# save Chi-Square test p-values and annotation information
else:
AA_chisquare[-1].append(p_val)
AA_text[-1].append(AA + '\n' + str(round(p_val, 2)))
# handle empty cells
for n in range(j % 6, 6):
AA_chisquare[-1].append(0)
AA_text[-1].append('')
# get list of AAs that show significant difference between SP and LP groups
AAs = choose_codons(codon_ttest, codon_text)
AA_chisquare = np.array(AA_chisquare)
codon_ttest = np.array(codon_ttest)
AA_text = np.array(AA_text)
codon_text = np.array(codon_text)
print('%d out of %d codon show significant usage difference \
between SP and LP genes (p_value < 0.5)\n' %
(count_sig, count_all))
plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)
plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)
return AAs
def plot_heatmap(data, text, cbarlabel, label):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 5))
im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)
annotate_heatmap(im, text)
fig.tight_layout()
plt.show
plt.savefig(f'../results/{cbarlabel}_{label}.png')
def heatmap(data, ax, cmap, cbarlabel):
if not ax:
ax = plt.gca()
im = ax.imshow(data, cmap)
cbar = ax.figure.colorbar(im, ax=ax)
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
ax.set_xticklabels(range(data.shape[1]))
ax.set_yticklabels(range(data.shape[0]))
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# draw white space between squares
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor = True)
ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor = True)
ax.grid(which = 'minor', color = 'w', linestyle = '-', linewidth = 3)
ax.tick_params(which = 'minor', bottom = False, left = False)
cbar.ax.set_ylabel(cbarlabel, va = 'top')
return im, cbar
def annotate_heatmap(im, text_label):
textcolors = ['black','white']
data = im.get_array()
# set threshold to decide color
threshold = im.norm(data.max()) / 2
kw = dict(horizontalalignment = 'center',
verticalalignment = 'center')
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color = textcolors[im.norm(data[i,j]) > threshold])
im.axes.text(j, i, text_label[i,j], **kw)
def choose_codons(ttest, text):
# dict key: AA -> codon
# only contains AAs with only two codon choices
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TAT':'Tyr', 'TAC':'Tyr',
'TGT':'Cys', 'TGC':'Cys', 'CAT':'His', 'CAC':'His',
'CAA':'Gln', 'CAG':'Gln', 'AAT':'Asn', 'AAC':'Asn',
'AAA':'Lys', 'AAG':'Lys', 'GAT':'Asp', 'GAC':'Asp',
'GAA':'Glu', 'GAG':'Glu'}
codon_dict = collections.defaultdict(list)
for i in range(len(ttest)):
for j in range(len(ttest[i])):
if ttest[i][j] < 0.01:
codon = text[i][j][:3]
if codon in codon_map:
codon_dict[codon_map[codon]].append(codon)
| # codon usage difference between SP and LP groups
AAs = []
for AA in codon_dict.keys():
AAs.append(AA)
if len(codon_dict[AA]) == 2:
file.write('%s: %s, %s\n' %
(AA, codon_dict[AA][0], codon_dict[AA][1]))
else:
file.write('%s: %s\n' % (AA, codon_dict[AA][0]))
file.close()
return AAs
def plot_SP_LP(sp_AA_dict, lp_AA_dict):
# plot each AA
for AA in list(sp_AA_dict.keys()):
# list of codon usage information
codon_data = []
# List of codon names
codons = []
for codon in sp_AA_dict[AA]:
# LP group data is displayed from lowest expressed genes
# to highest expressed genes
lp_AA_dict[AA][codon].reverse()
codons.append(codon)
codon_data.append([])
# display SP group data first and then LP group data
codon_data[-1].append(sp_AA_dict[AA][codon])
codon_data[-1].append(lp_AA_dict[AA][codon])
# plot usage curves
codon_usage_plot(codon_data, AA, codons)
def codon_usage_plot(data, AA, codons):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (15,5))
for i in range(len(data)):
# 0-50 shows SP group data
x_sp = np.linspace(0, 50, len(data[i][0]))
# 50-100 shows LP group data
x_lp = np.linspace(50, 100, len(data[i][1]))
ax.plot(x_sp, data[i][0], label = 'sp_' + codons[i])
ax.plot(x_lp, data[i][1], label = 'lp_' + codons[i])
ax.legend(loc = 1)
ax.set_title(AA)
def plot_distribution(sp_dict, lp_dict, AA):
fig, axes = plt.subplots(nrows = 2, ncols =1, figsize = (40, 20))
for codon in sp_dict[AA]:
x = np.arange(len(sp_dict[AA][codon]))
sp_y = np.array(sp_dict[AA][codon])
lp_y = np.array(lp_dict[AA][codon])
axes[0].plot(x, sp_y)
axes[1].plot(x, lp_y)
plt.show
def get_skellam_distribution(sp_dict, lp_dict, AA):
sp_mu = {}
lp_mu = {}
codons = []
# get mean values
for codon in sp_dict[AA]:
codons.append(codon)
sp_mu[codon] = np.mean(sp_dict[AA][codon])
lp_mu[codon] = np.mean(lp_dict[AA][codon])
skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)
skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)
def skellam_plot(mu1, mu2, name):
print(mu1,' ', mu2, ' ', mu1-mu2, ' ', name)
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (5, 5))
x = np.arange(stats.skellam.ppf(0.01, mu1, mu2),
stats.skellam.ppf(0.99, mu1, mu2))
ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker = 'o', label = name)
ax.legend(loc = 1)
plt.show
# main flow
args = parse_args()
sp_codon_usage = Codon_Usage(args.sp_file)
lp_codon_usage = Codon_Usage(args.lp_file)
sp_AA_dict = sp_codon_usage.get_AA_dict()
lp_AA_dict = lp_codon_usage.get_AA_dict()
print("Analyzing SP and LP %s group data\n" % (args.label))
AAs = heatmap_SP_LP(sp_AA_dict, lp_AA_dict, args.label)
plot_SP_LP(sp_AA_dict, lp_AA_dict)
# optional
# get Skellam distributions of AAs that have only two codon choices
# and show distictive usage between SP and LP
'''
sp_all_codon_usage = Codon_Usage('SP_all_gene_seq.txt')
lp_all_codon_usage = Codon_Usage('LP_all_gene_seq.txt')
sp_all_AA_dict = sp_all_codon_usage.get_AA_dict()
lp_all_AA_dict = lp_all_codon_usage.get_AA_dict()
for AA in AAs:
plot_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)
get_skellam_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)
''' | file = io.open('AAs_to_compare.txt', 'w')
file.write('Compare following AAs\n')
# AAs that have only two codon choices and show significant | random_line_split |
codon_usage.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 17:34:32 2019
@author: fanlizhou
Analyze codon usage of sequence from 'SP_gene_seq.txt' and 'LP_gene_seq.txt'
Plot heatmap of amino acid usage and codon usage
Plot codon usage in each gene for each amino acid. Genes were arranged so that
the gene expression of SP decrease from 0 to 50 (x-axis) and the gene expression
of LP increase from 51 to 100 (x-axis)
Usage: codon_usage.py [-h] [--label LABEL] sp_file lp_file
Options:
--label Define the label of out-put files. Default="top"
sp_file Path to the SP data files
lp_file Path to the LP data files
"""
import io, os, argparse, collections
from scipy import stats
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description=
'Analyze codon usage of SP and LP\n')
parser.add_argument('sp_file', help = 'one input SP data file\n')
parser.add_argument('lp_file', help = 'one input LP data file\n')
parser.add_argument('--label', '-l',
type = str, required = False, default = 'top',
help = 'Define the label of out-put files. Default="top"\n')
args = parser.parse_args()
for path in [args.sp_file, args.lp_file]:
if not os.path.isfile(path):
parser.error('File "%s" cannot be found.' % (path))
return args
# a Codon_Usage class to store codon usage information for each genotype
class Codon_Usage:
def __init__(self, filename):
self.seq, self.gene_num = self.get_seq(filename)
def get_seq(self, filename):
|
def get_AA(self, codon):
# dict key: codon -> AA
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TTA':'Leu', 'TTG':'Leu',
'TCT':'Ser', 'TCC':'Ser', 'TCA':'Ser', 'TCG':'Ser',
'TAT':'Tyr', 'TAC':'Tyr', 'TAA':'STOP', 'TAG':'STOP',
'TGT':'Cys', 'TGC':'Cys', 'TGA':'STOP', 'TGG':'Trp',
'CTT':'Leu', 'CTC':'Leu', 'CTA':'Leu', 'CTG':'Leu',
'CCT':'Pro', 'CCC':'Pro', 'CCA':'Pro', 'CCG':'Pro',
'CAT':'His', 'CAC':'His', 'CAA':'Gln', 'CAG':'Gln',
'CGT':'Arg', 'CGC':'Arg', 'CGA':'Arg', 'CGG':'Arg',
'ATT':'Ile', 'ATC':'Ile', 'ATA':'Ile', 'ATG':'Met',
'ACT':'Thr', 'ACC':'Thr', 'ACA':'Thr', 'ACG':'Thr',
'AAT':'Asn', 'AAC':'Asn', 'AAA':'Lys', 'AAG':'Lys',
'AGT':'Ser', 'AGC':'Ser', 'AGA':'Arg', 'AGG':'Arg',
'GTT':'Val', 'GTC':'Val', 'GTA':'Val', 'GTG':'Val',
'GCT':'Ala', 'GCC':'Ala', 'GCA':'Ala', 'GCG':'Ala',
'GAT':'Asp', 'GAC':'Asp', 'GAA':'Glu', 'GAG':'Glu',
'GGT':'Gly', 'GGC':'Gly', 'GGA':'Gly', 'GGG':'Gly'}
if codon in codon_map:
return codon_map[codon]
else:
return ''
def get_usage_dict(self, seq):
# usage_dict structure:
# dict key: AA -> [
# dict key: codon ->
# [codon_count,
# codon_count/AA_count]
# AA_count
# ]
usage_dict = \
collections.defaultdict(lambda:
[
collections.defaultdict(
lambda: [0, 0]),
0
])
# save AAs usage information
for index in range(0, len(seq), 3):
codon = seq[index:index+3]
AA = self.get_AA(codon)
if AA:
# count how many times the AA appears
usage_dict[AA][1] += 1
# count how many times the codon is used
usage_dict[AA][0][codon][0] += 1
# calculate the codon usage percentage for an AA
for AA in usage_dict:
for codon in usage_dict[AA][0]:
usage_dict[AA][0][codon][1] = \
usage_dict[AA][0][codon][0]/usage_dict[AA][1]
return usage_dict
def get_AA_dict(self):
# AA_dict structure:
# 1st dict key: AA -> 2nd dict key: codon -> a list of codon usage
# percentage of each gene
AA_dict = \
collections.defaultdict(
lambda:collections.defaultdict(list))
# dict key: AA -> codon list
AA_map = {
'Phe':['TTT', 'TTC'],
'Leu':['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],
'Ser':['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],
'Tyr':['TAT', 'TAC'],
'STOP':['TAA', 'TAG', 'TGA'],
'Cys':['TGT', 'TGC'],
'Trp':['TGG'],
'Pro':['CCT', 'CCC', 'CCA', 'CCG'],
'His':['CAT', 'CAC'],
'Gln':['CAA', 'CAG'],
'Arg':['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],
'Ile':['ATT', 'ATC', 'ATA'],
'Met':['ATG'],
'Thr':['ACT', 'ACC', 'ACA', 'ACG'],
'Asn':['AAT', 'AAC'],
'Lys':['AAA', 'AAG'],
'Val':['GTT', 'GTC', 'GTA', 'GTG'],
'Ala':['GCT', 'GCC', 'GCA', 'GCG'],
'Asp':['GAT', 'GAC'],
'Glu':['GAA', 'GAG'],
'Gly':['GGT', 'GGC', 'GGA', 'GGG']
}
# list of codon usage for each gene
usage_dict_list = []
# get codon usage information for each gene
for seq in self.seq:
usage_dict_list.append(self.get_usage_dict(seq))
# get the list of codon usage percentage from each gene
for AA in list(AA_map.keys()):
for codon in AA_map[AA]:
# get codon usage information from each gene
for usage_dict in usage_dict_list:
# append codon usage percentage in the gene
AA_dict[AA][codon].append(
usage_dict[AA][0][codon][1])
return AA_dict
def heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label):
# list of Chi-Square test results
AA_chisquare = []
# AA plotting annotation information
AA_text = []
# list of student's t-test results
codon_ttest = []
# codon plotting annotaion information
codon_text = []
i = 0
j = 0
# number of genes analyzed
count_all = 0
# number of genes that show significant results
count_sig = 0
for AA in list(sp_AA_dict.keys()):
# mean values of codon usage for each AA
sp_codon_mean = []
lp_codon_mean = []
for codon in sp_AA_dict[AA]:
# calculate ttest results
p_val = stats.ttest_ind(sp_AA_dict[AA][codon],
lp_AA_dict[AA][codon],
equal_var = False)[1]
# display eight codons in a row
if not i % 8:
codon_ttest.append([])
codon_text.append([])
i += 1
# handle NULL values
if np.isnan(p_val):
codon_ttest[-1].append(0)
codon_text[-1].append(codon + '\n NA')
# save ttest p-values and annotation information
else:
codon_ttest[-1].append(p_val)
codon_text[-1].append(codon + '\n' + str(round(p_val, 2)))
count_all += 1
if p_val < 0.5:
count_sig += 1
sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))
lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon]))
# get Chi-Square test results of each AA
p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]),
axis = None)[1]
# display six AA in a row
if not j % 6:
AA_chisquare.append([])
AA_text.append([])
j += 1
# handle Null values
if np.isnan(p_val):
AA_chisquare[-1].append(0)
AA_text[-1].append(AA + '\n NA')
# save Chi-Square test p-values and annotation information
else:
AA_chisquare[-1].append(p_val)
AA_text[-1].append(AA + '\n' + str(round(p_val, 2)))
# handle empty cells
for n in range(j % 6, 6):
AA_chisquare[-1].append(0)
AA_text[-1].append('')
# get list of AAs that show significant difference between SP and LP groups
AAs = choose_codons(codon_ttest, codon_text)
AA_chisquare = np.array(AA_chisquare)
codon_ttest = np.array(codon_ttest)
AA_text = np.array(AA_text)
codon_text = np.array(codon_text)
print('%d out of %d codon show significant usage difference \
between SP and LP genes (p_value < 0.5)\n' %
(count_sig, count_all))
plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)
plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)
return AAs
def plot_heatmap(data, text, cbarlabel, label):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 5))
im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)
annotate_heatmap(im, text)
fig.tight_layout()
plt.show
plt.savefig(f'../results/{cbarlabel}_{label}.png')
def heatmap(data, ax, cmap, cbarlabel):
if not ax:
ax = plt.gca()
im = ax.imshow(data, cmap)
cbar = ax.figure.colorbar(im, ax=ax)
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
ax.set_xticklabels(range(data.shape[1]))
ax.set_yticklabels(range(data.shape[0]))
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# draw white space between squares
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor = True)
ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor = True)
ax.grid(which = 'minor', color = 'w', linestyle = '-', linewidth = 3)
ax.tick_params(which = 'minor', bottom = False, left = False)
cbar.ax.set_ylabel(cbarlabel, va = 'top')
return im, cbar
def annotate_heatmap(im, text_label):
textcolors = ['black','white']
data = im.get_array()
# set threshold to decide color
threshold = im.norm(data.max()) / 2
kw = dict(horizontalalignment = 'center',
verticalalignment = 'center')
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color = textcolors[im.norm(data[i,j]) > threshold])
im.axes.text(j, i, text_label[i,j], **kw)
def choose_codons(ttest, text):
# dict key: AA -> codon
# only contains AAs with only two codon choices
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TAT':'Tyr', 'TAC':'Tyr',
'TGT':'Cys', 'TGC':'Cys', 'CAT':'His', 'CAC':'His',
'CAA':'Gln', 'CAG':'Gln', 'AAT':'Asn', 'AAC':'Asn',
'AAA':'Lys', 'AAG':'Lys', 'GAT':'Asp', 'GAC':'Asp',
'GAA':'Glu', 'GAG':'Glu'}
codon_dict = collections.defaultdict(list)
for i in range(len(ttest)):
for j in range(len(ttest[i])):
if ttest[i][j] < 0.01:
codon = text[i][j][:3]
if codon in codon_map:
codon_dict[codon_map[codon]].append(codon)
file = io.open('AAs_to_compare.txt', 'w')
file.write('Compare following AAs\n')
# AAs that have only two codon choices and show significant
# codon usage difference between SP and LP groups
AAs = []
for AA in codon_dict.keys():
AAs.append(AA)
if len(codon_dict[AA]) == 2:
file.write('%s: %s, %s\n' %
(AA, codon_dict[AA][0], codon_dict[AA][1]))
else:
file.write('%s: %s\n' % (AA, codon_dict[AA][0]))
file.close()
return AAs
def plot_SP_LP(sp_AA_dict, lp_AA_dict):
# plot each AA
for AA in list(sp_AA_dict.keys()):
# list of codon usage information
codon_data = []
# List of codon names
codons = []
for codon in sp_AA_dict[AA]:
# LP group data is displayed from lowest expressed genes
# to highest expressed genes
lp_AA_dict[AA][codon].reverse()
codons.append(codon)
codon_data.append([])
# display SP group data first and then LP group data
codon_data[-1].append(sp_AA_dict[AA][codon])
codon_data[-1].append(lp_AA_dict[AA][codon])
# plot usage curves
codon_usage_plot(codon_data, AA, codons)
def codon_usage_plot(data, AA, codons):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (15,5))
for i in range(len(data)):
# 0-50 shows SP group data
x_sp = np.linspace(0, 50, len(data[i][0]))
# 50-100 shows LP group data
x_lp = np.linspace(50, 100, len(data[i][1]))
ax.plot(x_sp, data[i][0], label = 'sp_' + codons[i])
ax.plot(x_lp, data[i][1], label = 'lp_' + codons[i])
ax.legend(loc = 1)
ax.set_title(AA)
def plot_distribution(sp_dict, lp_dict, AA):
fig, axes = plt.subplots(nrows = 2, ncols =1, figsize = (40, 20))
for codon in sp_dict[AA]:
x = np.arange(len(sp_dict[AA][codon]))
sp_y = np.array(sp_dict[AA][codon])
lp_y = np.array(lp_dict[AA][codon])
axes[0].plot(x, sp_y)
axes[1].plot(x, lp_y)
plt.show
def get_skellam_distribution(sp_dict, lp_dict, AA):
sp_mu = {}
lp_mu = {}
codons = []
# get mean values
for codon in sp_dict[AA]:
codons.append(codon)
sp_mu[codon] = np.mean(sp_dict[AA][codon])
lp_mu[codon] = np.mean(lp_dict[AA][codon])
skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)
skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)
def skellam_plot(mu1, mu2, name):
print(mu1,' ', mu2, ' ', mu1-mu2, ' ', name)
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (5, 5))
x = np.arange(stats.skellam.ppf(0.01, mu1, mu2),
stats.skellam.ppf(0.99, mu1, mu2))
ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker = 'o', label = name)
ax.legend(loc = 1)
plt.show
# main flow
args = parse_args()
sp_codon_usage = Codon_Usage(args.sp_file)
lp_codon_usage = Codon_Usage(args.lp_file)
sp_AA_dict = sp_codon_usage.get_AA_dict()
lp_AA_dict = lp_codon_usage.get_AA_dict()
print("Analyzing SP and LP %s group data\n" % (args.label))
AAs = heatmap_SP_LP(sp_AA_dict, lp_AA_dict, args.label)
plot_SP_LP(sp_AA_dict, lp_AA_dict)
# optional
# get Skellam distributions of AAs that have only two codon choices
# and show distictive usage between SP and LP
'''
sp_all_codon_usage = Codon_Usage('SP_all_gene_seq.txt')
lp_all_codon_usage = Codon_Usage('LP_all_gene_seq.txt')
sp_all_AA_dict = sp_all_codon_usage.get_AA_dict()
lp_all_AA_dict = lp_all_codon_usage.get_AA_dict()
for AA in AAs:
plot_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)
get_skellam_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)
''' | file = io.open(filename)
# list of selected gene sequences, excluded genes that are non-triple
all_seq = []
gene_seq = ''
count_all = 0
count_non_triple = 0
for line in file:
# read a gene information line
if line[0]=='>':
count_all += 1
# if a gene has been read, then append it to all_seq if the
# sequence is triple
if gene_seq!='':
if len(gene_seq)%3:
count_non_triple += 1
else:
all_seq.append(gene_seq)
gene_seq = ''
# read a gene sequence line
else:
gene_seq += line.strip()
file.close()
print('%s:\n%d genes added\n%d are non-triple\n'%
(filename[:2],count_all, count_non_triple))
return (all_seq, count_all - count_non_triple) | identifier_body |
codon_usage.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 17:34:32 2019
@author: fanlizhou
Analyze codon usage of sequence from 'SP_gene_seq.txt' and 'LP_gene_seq.txt'
Plot heatmap of amino acid usage and codon usage
Plot codon usage in each gene for each amino acid. Genes were arranged so that
the gene expression of SP decrease from 0 to 50 (x-axis) and the gene expression
of LP increase from 51 to 100 (x-axis)
Usage: codon_usage.py [-h] [--label LABEL] sp_file lp_file
Options:
--label Define the label of out-put files. Default="top"
sp_file Path to the SP data files
lp_file Path to the LP data files
"""
import io, os, argparse, collections
from scipy import stats
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description=
'Analyze codon usage of SP and LP\n')
parser.add_argument('sp_file', help = 'one input SP data file\n')
parser.add_argument('lp_file', help = 'one input LP data file\n')
parser.add_argument('--label', '-l',
type = str, required = False, default = 'top',
help = 'Define the label of out-put files. Default="top"\n')
args = parser.parse_args()
for path in [args.sp_file, args.lp_file]:
if not os.path.isfile(path):
parser.error('File "%s" cannot be found.' % (path))
return args
# a Codon_Usage class to store codon usage information for each genotype
class Codon_Usage:
def __init__(self, filename):
self.seq, self.gene_num = self.get_seq(filename)
def get_seq(self, filename):
file = io.open(filename)
# list of selected gene sequences, excluded genes that are non-triple
all_seq = []
gene_seq = ''
count_all = 0
count_non_triple = 0
for line in file:
# read a gene information line
if line[0]=='>':
count_all += 1
# if a gene has been read, then append it to all_seq if the
# sequence is triple
if gene_seq!='':
if len(gene_seq)%3:
count_non_triple += 1
else:
all_seq.append(gene_seq)
gene_seq = ''
# read a gene sequence line
else:
gene_seq += line.strip()
file.close()
print('%s:\n%d genes added\n%d are non-triple\n'%
(filename[:2],count_all, count_non_triple))
return (all_seq, count_all - count_non_triple)
def get_AA(self, codon):
# dict key: codon -> AA
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TTA':'Leu', 'TTG':'Leu',
'TCT':'Ser', 'TCC':'Ser', 'TCA':'Ser', 'TCG':'Ser',
'TAT':'Tyr', 'TAC':'Tyr', 'TAA':'STOP', 'TAG':'STOP',
'TGT':'Cys', 'TGC':'Cys', 'TGA':'STOP', 'TGG':'Trp',
'CTT':'Leu', 'CTC':'Leu', 'CTA':'Leu', 'CTG':'Leu',
'CCT':'Pro', 'CCC':'Pro', 'CCA':'Pro', 'CCG':'Pro',
'CAT':'His', 'CAC':'His', 'CAA':'Gln', 'CAG':'Gln',
'CGT':'Arg', 'CGC':'Arg', 'CGA':'Arg', 'CGG':'Arg',
'ATT':'Ile', 'ATC':'Ile', 'ATA':'Ile', 'ATG':'Met',
'ACT':'Thr', 'ACC':'Thr', 'ACA':'Thr', 'ACG':'Thr',
'AAT':'Asn', 'AAC':'Asn', 'AAA':'Lys', 'AAG':'Lys',
'AGT':'Ser', 'AGC':'Ser', 'AGA':'Arg', 'AGG':'Arg',
'GTT':'Val', 'GTC':'Val', 'GTA':'Val', 'GTG':'Val',
'GCT':'Ala', 'GCC':'Ala', 'GCA':'Ala', 'GCG':'Ala',
'GAT':'Asp', 'GAC':'Asp', 'GAA':'Glu', 'GAG':'Glu',
'GGT':'Gly', 'GGC':'Gly', 'GGA':'Gly', 'GGG':'Gly'}
if codon in codon_map:
return codon_map[codon]
else:
return ''
def get_usage_dict(self, seq):
# usage_dict structure:
# dict key: AA -> [
# dict key: codon ->
# [codon_count,
# codon_count/AA_count]
# AA_count
# ]
usage_dict = \
collections.defaultdict(lambda:
[
collections.defaultdict(
lambda: [0, 0]),
0
])
# save AAs usage information
for index in range(0, len(seq), 3):
codon = seq[index:index+3]
AA = self.get_AA(codon)
if AA:
# count how many times the AA appears
usage_dict[AA][1] += 1
# count how many times the codon is used
usage_dict[AA][0][codon][0] += 1
# calculate the codon usage percentage for an AA
for AA in usage_dict:
for codon in usage_dict[AA][0]:
usage_dict[AA][0][codon][1] = \
usage_dict[AA][0][codon][0]/usage_dict[AA][1]
return usage_dict
def get_AA_dict(self):
# AA_dict structure:
# 1st dict key: AA -> 2nd dict key: codon -> a list of codon usage
# percentage of each gene
AA_dict = \
collections.defaultdict(
lambda:collections.defaultdict(list))
# dict key: AA -> codon list
AA_map = {
'Phe':['TTT', 'TTC'],
'Leu':['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],
'Ser':['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],
'Tyr':['TAT', 'TAC'],
'STOP':['TAA', 'TAG', 'TGA'],
'Cys':['TGT', 'TGC'],
'Trp':['TGG'],
'Pro':['CCT', 'CCC', 'CCA', 'CCG'],
'His':['CAT', 'CAC'],
'Gln':['CAA', 'CAG'],
'Arg':['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],
'Ile':['ATT', 'ATC', 'ATA'],
'Met':['ATG'],
'Thr':['ACT', 'ACC', 'ACA', 'ACG'],
'Asn':['AAT', 'AAC'],
'Lys':['AAA', 'AAG'],
'Val':['GTT', 'GTC', 'GTA', 'GTG'],
'Ala':['GCT', 'GCC', 'GCA', 'GCG'],
'Asp':['GAT', 'GAC'],
'Glu':['GAA', 'GAG'],
'Gly':['GGT', 'GGC', 'GGA', 'GGG']
}
# list of codon usage for each gene
usage_dict_list = []
# get codon usage information for each gene
for seq in self.seq:
usage_dict_list.append(self.get_usage_dict(seq))
# get the list of codon usage percentage from each gene
for AA in list(AA_map.keys()):
for codon in AA_map[AA]:
# get codon usage information from each gene
for usage_dict in usage_dict_list:
# append codon usage percentage in the gene
AA_dict[AA][codon].append(
usage_dict[AA][0][codon][1])
return AA_dict
def heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label):
# list of Chi-Square test results
AA_chisquare = []
# AA plotting annotation information
AA_text = []
# list of student's t-test results
codon_ttest = []
# codon plotting annotaion information
codon_text = []
i = 0
j = 0
# number of genes analyzed
count_all = 0
# number of genes that show significant results
count_sig = 0
for AA in list(sp_AA_dict.keys()):
# mean values of codon usage for each AA
sp_codon_mean = []
lp_codon_mean = []
for codon in sp_AA_dict[AA]:
# calculate ttest results
p_val = stats.ttest_ind(sp_AA_dict[AA][codon],
lp_AA_dict[AA][codon],
equal_var = False)[1]
# display eight codons in a row
if not i % 8:
codon_ttest.append([])
codon_text.append([])
i += 1
# handle NULL values
if np.isnan(p_val):
codon_ttest[-1].append(0)
codon_text[-1].append(codon + '\n NA')
# save ttest p-values and annotation information
else:
codon_ttest[-1].append(p_val)
codon_text[-1].append(codon + '\n' + str(round(p_val, 2)))
count_all += 1
if p_val < 0.5:
count_sig += 1
sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))
lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon]))
# get Chi-Square test results of each AA
p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]),
axis = None)[1]
# display six AA in a row
if not j % 6:
AA_chisquare.append([])
AA_text.append([])
j += 1
# handle Null values
if np.isnan(p_val):
AA_chisquare[-1].append(0)
AA_text[-1].append(AA + '\n NA')
# save Chi-Square test p-values and annotation information
else:
AA_chisquare[-1].append(p_val)
AA_text[-1].append(AA + '\n' + str(round(p_val, 2)))
# handle empty cells
for n in range(j % 6, 6):
AA_chisquare[-1].append(0)
AA_text[-1].append('')
# get list of AAs that show significant difference between SP and LP groups
AAs = choose_codons(codon_ttest, codon_text)
AA_chisquare = np.array(AA_chisquare)
codon_ttest = np.array(codon_ttest)
AA_text = np.array(AA_text)
codon_text = np.array(codon_text)
print('%d out of %d codon show significant usage difference \
between SP and LP genes (p_value < 0.5)\n' %
(count_sig, count_all))
plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)
plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)
return AAs
def plot_heatmap(data, text, cbarlabel, label):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 5))
im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)
annotate_heatmap(im, text)
fig.tight_layout()
plt.show
plt.savefig(f'../results/{cbarlabel}_{label}.png')
def heatmap(data, ax, cmap, cbarlabel):
if not ax:
ax = plt.gca()
im = ax.imshow(data, cmap)
cbar = ax.figure.colorbar(im, ax=ax)
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
ax.set_xticklabels(range(data.shape[1]))
ax.set_yticklabels(range(data.shape[0]))
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# draw white space between squares
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor = True)
ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor = True)
ax.grid(which = 'minor', color = 'w', linestyle = '-', linewidth = 3)
ax.tick_params(which = 'minor', bottom = False, left = False)
cbar.ax.set_ylabel(cbarlabel, va = 'top')
return im, cbar
def annotate_heatmap(im, text_label):
textcolors = ['black','white']
data = im.get_array()
# set threshold to decide color
threshold = im.norm(data.max()) / 2
kw = dict(horizontalalignment = 'center',
verticalalignment = 'center')
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color = textcolors[im.norm(data[i,j]) > threshold])
im.axes.text(j, i, text_label[i,j], **kw)
def choose_codons(ttest, text):
# dict key: AA -> codon
# only contains AAs with only two codon choices
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TAT':'Tyr', 'TAC':'Tyr',
'TGT':'Cys', 'TGC':'Cys', 'CAT':'His', 'CAC':'His',
'CAA':'Gln', 'CAG':'Gln', 'AAT':'Asn', 'AAC':'Asn',
'AAA':'Lys', 'AAG':'Lys', 'GAT':'Asp', 'GAC':'Asp',
'GAA':'Glu', 'GAG':'Glu'}
codon_dict = collections.defaultdict(list)
for i in range(len(ttest)):
for j in range(len(ttest[i])):
if ttest[i][j] < 0.01:
codon = text[i][j][:3]
if codon in codon_map:
codon_dict[codon_map[codon]].append(codon)
file = io.open('AAs_to_compare.txt', 'w')
file.write('Compare following AAs\n')
# AAs that have only two codon choices and show significant
# codon usage difference between SP and LP groups
AAs = []
for AA in codon_dict.keys():
AAs.append(AA)
if len(codon_dict[AA]) == 2:
file.write('%s: %s, %s\n' %
(AA, codon_dict[AA][0], codon_dict[AA][1]))
else:
file.write('%s: %s\n' % (AA, codon_dict[AA][0]))
file.close()
return AAs
def | (sp_AA_dict, lp_AA_dict):
# plot each AA
for AA in list(sp_AA_dict.keys()):
# list of codon usage information
codon_data = []
# List of codon names
codons = []
for codon in sp_AA_dict[AA]:
# LP group data is displayed from lowest expressed genes
# to highest expressed genes
lp_AA_dict[AA][codon].reverse()
codons.append(codon)
codon_data.append([])
# display SP group data first and then LP group data
codon_data[-1].append(sp_AA_dict[AA][codon])
codon_data[-1].append(lp_AA_dict[AA][codon])
# plot usage curves
codon_usage_plot(codon_data, AA, codons)
def codon_usage_plot(data, AA, codons):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (15,5))
for i in range(len(data)):
# 0-50 shows SP group data
x_sp = np.linspace(0, 50, len(data[i][0]))
# 50-100 shows LP group data
x_lp = np.linspace(50, 100, len(data[i][1]))
ax.plot(x_sp, data[i][0], label = 'sp_' + codons[i])
ax.plot(x_lp, data[i][1], label = 'lp_' + codons[i])
ax.legend(loc = 1)
ax.set_title(AA)
def plot_distribution(sp_dict, lp_dict, AA):
fig, axes = plt.subplots(nrows = 2, ncols =1, figsize = (40, 20))
for codon in sp_dict[AA]:
x = np.arange(len(sp_dict[AA][codon]))
sp_y = np.array(sp_dict[AA][codon])
lp_y = np.array(lp_dict[AA][codon])
axes[0].plot(x, sp_y)
axes[1].plot(x, lp_y)
plt.show
def get_skellam_distribution(sp_dict, lp_dict, AA):
sp_mu = {}
lp_mu = {}
codons = []
# get mean values
for codon in sp_dict[AA]:
codons.append(codon)
sp_mu[codon] = np.mean(sp_dict[AA][codon])
lp_mu[codon] = np.mean(lp_dict[AA][codon])
skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)
skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)
def skellam_plot(mu1, mu2, name):
print(mu1,' ', mu2, ' ', mu1-mu2, ' ', name)
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (5, 5))
x = np.arange(stats.skellam.ppf(0.01, mu1, mu2),
stats.skellam.ppf(0.99, mu1, mu2))
ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker = 'o', label = name)
ax.legend(loc = 1)
plt.show
# main flow
args = parse_args()
sp_codon_usage = Codon_Usage(args.sp_file)
lp_codon_usage = Codon_Usage(args.lp_file)
sp_AA_dict = sp_codon_usage.get_AA_dict()
lp_AA_dict = lp_codon_usage.get_AA_dict()
print("Analyzing SP and LP %s group data\n" % (args.label))
AAs = heatmap_SP_LP(sp_AA_dict, lp_AA_dict, args.label)
plot_SP_LP(sp_AA_dict, lp_AA_dict)
# optional
# get Skellam distributions of AAs that have only two codon choices
# and show distictive usage between SP and LP
'''
sp_all_codon_usage = Codon_Usage('SP_all_gene_seq.txt')
lp_all_codon_usage = Codon_Usage('LP_all_gene_seq.txt')
sp_all_AA_dict = sp_all_codon_usage.get_AA_dict()
lp_all_AA_dict = lp_all_codon_usage.get_AA_dict()
for AA in AAs:
plot_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)
get_skellam_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)
''' | plot_SP_LP | identifier_name |
codon_usage.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 17:34:32 2019
@author: fanlizhou
Analyze codon usage of sequence from 'SP_gene_seq.txt' and 'LP_gene_seq.txt'
Plot heatmap of amino acid usage and codon usage
Plot codon usage in each gene for each amino acid. Genes were arranged so that
the gene expression of SP decrease from 0 to 50 (x-axis) and the gene expression
of LP increase from 51 to 100 (x-axis)
Usage: codon_usage.py [-h] [--label LABEL] sp_file lp_file
Options:
--label Define the label of out-put files. Default="top"
sp_file Path to the SP data files
lp_file Path to the LP data files
"""
import io, os, argparse, collections
from scipy import stats
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description=
'Analyze codon usage of SP and LP\n')
parser.add_argument('sp_file', help = 'one input SP data file\n')
parser.add_argument('lp_file', help = 'one input LP data file\n')
parser.add_argument('--label', '-l',
type = str, required = False, default = 'top',
help = 'Define the label of out-put files. Default="top"\n')
args = parser.parse_args()
for path in [args.sp_file, args.lp_file]:
if not os.path.isfile(path):
parser.error('File "%s" cannot be found.' % (path))
return args
# a Codon_Usage class to store codon usage information for each genotype
class Codon_Usage:
def __init__(self, filename):
self.seq, self.gene_num = self.get_seq(filename)
def get_seq(self, filename):
file = io.open(filename)
# list of selected gene sequences, excluded genes that are non-triple
all_seq = []
gene_seq = ''
count_all = 0
count_non_triple = 0
for line in file:
# read a gene information line
if line[0]=='>':
|
# read a gene sequence line
else:
gene_seq += line.strip()
file.close()
print('%s:\n%d genes added\n%d are non-triple\n'%
(filename[:2],count_all, count_non_triple))
return (all_seq, count_all - count_non_triple)
def get_AA(self, codon):
# dict key: codon -> AA
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TTA':'Leu', 'TTG':'Leu',
'TCT':'Ser', 'TCC':'Ser', 'TCA':'Ser', 'TCG':'Ser',
'TAT':'Tyr', 'TAC':'Tyr', 'TAA':'STOP', 'TAG':'STOP',
'TGT':'Cys', 'TGC':'Cys', 'TGA':'STOP', 'TGG':'Trp',
'CTT':'Leu', 'CTC':'Leu', 'CTA':'Leu', 'CTG':'Leu',
'CCT':'Pro', 'CCC':'Pro', 'CCA':'Pro', 'CCG':'Pro',
'CAT':'His', 'CAC':'His', 'CAA':'Gln', 'CAG':'Gln',
'CGT':'Arg', 'CGC':'Arg', 'CGA':'Arg', 'CGG':'Arg',
'ATT':'Ile', 'ATC':'Ile', 'ATA':'Ile', 'ATG':'Met',
'ACT':'Thr', 'ACC':'Thr', 'ACA':'Thr', 'ACG':'Thr',
'AAT':'Asn', 'AAC':'Asn', 'AAA':'Lys', 'AAG':'Lys',
'AGT':'Ser', 'AGC':'Ser', 'AGA':'Arg', 'AGG':'Arg',
'GTT':'Val', 'GTC':'Val', 'GTA':'Val', 'GTG':'Val',
'GCT':'Ala', 'GCC':'Ala', 'GCA':'Ala', 'GCG':'Ala',
'GAT':'Asp', 'GAC':'Asp', 'GAA':'Glu', 'GAG':'Glu',
'GGT':'Gly', 'GGC':'Gly', 'GGA':'Gly', 'GGG':'Gly'}
if codon in codon_map:
return codon_map[codon]
else:
return ''
def get_usage_dict(self, seq):
# usage_dict structure:
# dict key: AA -> [
# dict key: codon ->
# [codon_count,
# codon_count/AA_count]
# AA_count
# ]
usage_dict = \
collections.defaultdict(lambda:
[
collections.defaultdict(
lambda: [0, 0]),
0
])
# save AAs usage information
for index in range(0, len(seq), 3):
codon = seq[index:index+3]
AA = self.get_AA(codon)
if AA:
# count how many times the AA appears
usage_dict[AA][1] += 1
# count how many times the codon is used
usage_dict[AA][0][codon][0] += 1
# calculate the codon usage percentage for an AA
for AA in usage_dict:
for codon in usage_dict[AA][0]:
usage_dict[AA][0][codon][1] = \
usage_dict[AA][0][codon][0]/usage_dict[AA][1]
return usage_dict
def get_AA_dict(self):
# AA_dict structure:
# 1st dict key: AA -> 2nd dict key: codon -> a list of codon usage
# percentage of each gene
AA_dict = \
collections.defaultdict(
lambda:collections.defaultdict(list))
# dict key: AA -> codon list
AA_map = {
'Phe':['TTT', 'TTC'],
'Leu':['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],
'Ser':['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],
'Tyr':['TAT', 'TAC'],
'STOP':['TAA', 'TAG', 'TGA'],
'Cys':['TGT', 'TGC'],
'Trp':['TGG'],
'Pro':['CCT', 'CCC', 'CCA', 'CCG'],
'His':['CAT', 'CAC'],
'Gln':['CAA', 'CAG'],
'Arg':['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],
'Ile':['ATT', 'ATC', 'ATA'],
'Met':['ATG'],
'Thr':['ACT', 'ACC', 'ACA', 'ACG'],
'Asn':['AAT', 'AAC'],
'Lys':['AAA', 'AAG'],
'Val':['GTT', 'GTC', 'GTA', 'GTG'],
'Ala':['GCT', 'GCC', 'GCA', 'GCG'],
'Asp':['GAT', 'GAC'],
'Glu':['GAA', 'GAG'],
'Gly':['GGT', 'GGC', 'GGA', 'GGG']
}
# list of codon usage for each gene
usage_dict_list = []
# get codon usage information for each gene
for seq in self.seq:
usage_dict_list.append(self.get_usage_dict(seq))
# get the list of codon usage percentage from each gene
for AA in list(AA_map.keys()):
for codon in AA_map[AA]:
# get codon usage information from each gene
for usage_dict in usage_dict_list:
# append codon usage percentage in the gene
AA_dict[AA][codon].append(
usage_dict[AA][0][codon][1])
return AA_dict
def heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label):
# list of Chi-Square test results
AA_chisquare = []
# AA plotting annotation information
AA_text = []
# list of student's t-test results
codon_ttest = []
# codon plotting annotaion information
codon_text = []
i = 0
j = 0
# number of genes analyzed
count_all = 0
# number of genes that show significant results
count_sig = 0
for AA in list(sp_AA_dict.keys()):
# mean values of codon usage for each AA
sp_codon_mean = []
lp_codon_mean = []
for codon in sp_AA_dict[AA]:
# calculate ttest results
p_val = stats.ttest_ind(sp_AA_dict[AA][codon],
lp_AA_dict[AA][codon],
equal_var = False)[1]
# display eight codons in a row
if not i % 8:
codon_ttest.append([])
codon_text.append([])
i += 1
# handle NULL values
if np.isnan(p_val):
codon_ttest[-1].append(0)
codon_text[-1].append(codon + '\n NA')
# save ttest p-values and annotation information
else:
codon_ttest[-1].append(p_val)
codon_text[-1].append(codon + '\n' + str(round(p_val, 2)))
count_all += 1
if p_val < 0.5:
count_sig += 1
sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))
lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon]))
# get Chi-Square test results of each AA
p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]),
axis = None)[1]
# display six AA in a row
if not j % 6:
AA_chisquare.append([])
AA_text.append([])
j += 1
# handle Null values
if np.isnan(p_val):
AA_chisquare[-1].append(0)
AA_text[-1].append(AA + '\n NA')
# save Chi-Square test p-values and annotation information
else:
AA_chisquare[-1].append(p_val)
AA_text[-1].append(AA + '\n' + str(round(p_val, 2)))
# handle empty cells
for n in range(j % 6, 6):
AA_chisquare[-1].append(0)
AA_text[-1].append('')
# get list of AAs that show significant difference between SP and LP groups
AAs = choose_codons(codon_ttest, codon_text)
AA_chisquare = np.array(AA_chisquare)
codon_ttest = np.array(codon_ttest)
AA_text = np.array(AA_text)
codon_text = np.array(codon_text)
print('%d out of %d codon show significant usage difference \
between SP and LP genes (p_value < 0.5)\n' %
(count_sig, count_all))
plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)
plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)
return AAs
def plot_heatmap(data, text, cbarlabel, label):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 5))
im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)
annotate_heatmap(im, text)
fig.tight_layout()
plt.show
plt.savefig(f'../results/{cbarlabel}_{label}.png')
def heatmap(data, ax, cmap, cbarlabel):
if not ax:
ax = plt.gca()
im = ax.imshow(data, cmap)
cbar = ax.figure.colorbar(im, ax=ax)
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
ax.set_xticklabels(range(data.shape[1]))
ax.set_yticklabels(range(data.shape[0]))
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# draw white space between squares
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor = True)
ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor = True)
ax.grid(which = 'minor', color = 'w', linestyle = '-', linewidth = 3)
ax.tick_params(which = 'minor', bottom = False, left = False)
cbar.ax.set_ylabel(cbarlabel, va = 'top')
return im, cbar
def annotate_heatmap(im, text_label):
textcolors = ['black','white']
data = im.get_array()
# set threshold to decide color
threshold = im.norm(data.max()) / 2
kw = dict(horizontalalignment = 'center',
verticalalignment = 'center')
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color = textcolors[im.norm(data[i,j]) > threshold])
im.axes.text(j, i, text_label[i,j], **kw)
def choose_codons(ttest, text):
# dict key: AA -> codon
# only contains AAs with only two codon choices
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TAT':'Tyr', 'TAC':'Tyr',
'TGT':'Cys', 'TGC':'Cys', 'CAT':'His', 'CAC':'His',
'CAA':'Gln', 'CAG':'Gln', 'AAT':'Asn', 'AAC':'Asn',
'AAA':'Lys', 'AAG':'Lys', 'GAT':'Asp', 'GAC':'Asp',
'GAA':'Glu', 'GAG':'Glu'}
codon_dict = collections.defaultdict(list)
for i in range(len(ttest)):
for j in range(len(ttest[i])):
if ttest[i][j] < 0.01:
codon = text[i][j][:3]
if codon in codon_map:
codon_dict[codon_map[codon]].append(codon)
file = io.open('AAs_to_compare.txt', 'w')
file.write('Compare following AAs\n')
# AAs that have only two codon choices and show significant
# codon usage difference between SP and LP groups
AAs = []
for AA in codon_dict.keys():
AAs.append(AA)
if len(codon_dict[AA]) == 2:
file.write('%s: %s, %s\n' %
(AA, codon_dict[AA][0], codon_dict[AA][1]))
else:
file.write('%s: %s\n' % (AA, codon_dict[AA][0]))
file.close()
return AAs
def plot_SP_LP(sp_AA_dict, lp_AA_dict):
# plot each AA
for AA in list(sp_AA_dict.keys()):
# list of codon usage information
codon_data = []
# List of codon names
codons = []
for codon in sp_AA_dict[AA]:
# LP group data is displayed from lowest expressed genes
# to highest expressed genes
lp_AA_dict[AA][codon].reverse()
codons.append(codon)
codon_data.append([])
# display SP group data first and then LP group data
codon_data[-1].append(sp_AA_dict[AA][codon])
codon_data[-1].append(lp_AA_dict[AA][codon])
# plot usage curves
codon_usage_plot(codon_data, AA, codons)
def codon_usage_plot(data, AA, codons):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (15,5))
for i in range(len(data)):
# 0-50 shows SP group data
x_sp = np.linspace(0, 50, len(data[i][0]))
# 50-100 shows LP group data
x_lp = np.linspace(50, 100, len(data[i][1]))
ax.plot(x_sp, data[i][0], label = 'sp_' + codons[i])
ax.plot(x_lp, data[i][1], label = 'lp_' + codons[i])
ax.legend(loc = 1)
ax.set_title(AA)
def plot_distribution(sp_dict, lp_dict, AA):
fig, axes = plt.subplots(nrows = 2, ncols =1, figsize = (40, 20))
for codon in sp_dict[AA]:
x = np.arange(len(sp_dict[AA][codon]))
sp_y = np.array(sp_dict[AA][codon])
lp_y = np.array(lp_dict[AA][codon])
axes[0].plot(x, sp_y)
axes[1].plot(x, lp_y)
plt.show
def get_skellam_distribution(sp_dict, lp_dict, AA):
sp_mu = {}
lp_mu = {}
codons = []
# get mean values
for codon in sp_dict[AA]:
codons.append(codon)
sp_mu[codon] = np.mean(sp_dict[AA][codon])
lp_mu[codon] = np.mean(lp_dict[AA][codon])
skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)
skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)
def skellam_plot(mu1, mu2, name):
print(mu1,' ', mu2, ' ', mu1-mu2, ' ', name)
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (5, 5))
x = np.arange(stats.skellam.ppf(0.01, mu1, mu2),
stats.skellam.ppf(0.99, mu1, mu2))
ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker = 'o', label = name)
ax.legend(loc = 1)
plt.show
# main flow
args = parse_args()
sp_codon_usage = Codon_Usage(args.sp_file)
lp_codon_usage = Codon_Usage(args.lp_file)
sp_AA_dict = sp_codon_usage.get_AA_dict()
lp_AA_dict = lp_codon_usage.get_AA_dict()
print("Analyzing SP and LP %s group data\n" % (args.label))
AAs = heatmap_SP_LP(sp_AA_dict, lp_AA_dict, args.label)
plot_SP_LP(sp_AA_dict, lp_AA_dict)
# optional
# get Skellam distributions of AAs that have only two codon choices
# and show distictive usage between SP and LP
'''
sp_all_codon_usage = Codon_Usage('SP_all_gene_seq.txt')
lp_all_codon_usage = Codon_Usage('LP_all_gene_seq.txt')
sp_all_AA_dict = sp_all_codon_usage.get_AA_dict()
lp_all_AA_dict = lp_all_codon_usage.get_AA_dict()
for AA in AAs:
plot_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)
get_skellam_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)
''' | count_all += 1
# if a gene has been read, then append it to all_seq if the
# sequence is triple
if gene_seq!='':
if len(gene_seq)%3:
count_non_triple += 1
else:
all_seq.append(gene_seq)
gene_seq = '' | conditional_block |
game.rs | use std::ops::Add;
use super::rand::{thread_rng, Rng};
use super::direction::Direction;
/// A mask with a single section of 16 bits set to 0.
/// Used to extract a "horizontal slice" out of a 64 bit integer.
pub static ROW_MASK: u64 = 0xFFFF;
/// A `u64` mask with 4 sections each starting after the n * 16th bit.
/// Used to extract a "vertical slice" out of a 64 bit integer.
pub static COL_MASK: u64 = 0x000F_000F_000F_000F_u64;
/// Struct that contains all available moves per row for up, down, right and left.
/// Also stores the score for a given row.
///
/// Moves are stored as power values for tiles.
/// if a power value is `> 0`, print the tile value using `2 << tile` where tile is any 4-bit
/// "nybble" otherwise print a `0` instead.
struct Moves {
pub left: Vec<u64>,
pub right: Vec<u64>,
pub down: Vec<u64>,
pub up: Vec<u64>,
pub scores: Vec<u64>
}
impl Moves {
/// Returns the 4th bit from each row in given board OR'd.
pub fn column_from(board: u64) -> u64 {
(board | (board << 12) | (board << 24) | (board << 36)) & COL_MASK
}
}
lazy_static! {
/// Constructs a new `tfe::Moves`.
///
/// `Moves` stores `right`, `left`, `up`, and `down` moves per row.
/// e.g. left: `0x0011 -> 0x2000` and right: `0x0011 -> 0x0002`.
///
/// Also stores the `scores` per row.
/// The score of a row is the sum of the tile and all intermediate tile merges.
/// e.g. row `0x0002` has a score of `4` and row `0x0003` has a score of `16`.
static ref MOVES: Moves = {
// initialization of move tables
let mut left_moves = vec![0; 65536];
let mut right_moves = vec![0; 65536];
let mut up_moves = vec![0; 65536];
let mut down_moves = vec![0; 65536];
let mut scores = vec![0; 65536];
for row in 0 .. 65536 {
// break row into cells
let mut line = [
(row >> 0) & 0xF,
(row >> 4) & 0xF,
(row >> 8) & 0xF,
(row >> 12) & 0xF
];
// calculate score for given row
let mut s = 0;
for i in 0 .. 4 {
if line[i] > 1 { s += (line[i] - 1) * (2 << line[i]) }
}
scores[row as usize] = s;
let mut i = 0;
// perform a move to the left using current {row} as board
// generates 4 output moves for up, down, left and right by transposing and reversing
// this result.
while i < 3 {
// initial counter for the cell next to the current one (j)
let mut j = i + 1;
// find the next non-zero cell index
while j < 4 {
if line[j] != 0 { break };
j += 1;
};
// if j is out of bounds (> 3), all other cells are empty and we are done looping
if j == 4 { break };
// this is the part responsible for skipping empty (0 value) cells
// if the current cell is zero, shift the next non-zero cell to position i
// and retry this entry until line[i] becomes non-zero
if line[i] == 0 {
line[i] = line[j];
line[j] = 0;
continue;
// otherwise, if the current cell and next cell are the same, merge them
} else if line[i] == line[j] {
if line[i] != 0xF { line[i] += 1 };
line[j] = 0;
}
// finally, move to the next (or current, if i was 0) row
i += 1;
}
// put the new row after merging back together into a "merged" row
let result = (line[0] << 0) |
(line[1] << 4) |
(line[2] << 8) |
(line[3] << 12);
// right and down use normal row and result variables.
// for left and up, we create a reverse of the row and result.
let rev_row = (row >> 12) & 0x000F | (row >> 4) & 0x00F0 | (row << 4) & 0x0F00 | (row << 12) & 0xF000;
let rev_res = (result >> 12) & 0x000F | (result >> 4) & 0x00F0 | (result << 4) & 0x0F00 | (result << 12) & 0xF000;
// results are keyed by row / reverse row index.
let row_idx = row as usize;
let rev_idx = rev_row as usize;
right_moves[row_idx] = row ^ result;
left_moves[rev_idx] = rev_row ^ rev_res;
up_moves[rev_idx] = Moves::column_from(rev_row) ^ Moves::column_from(rev_res);
down_moves[row_idx] = Moves::column_from(row) ^ Moves::column_from(result);
};
Moves { left: left_moves, right: right_moves, down: down_moves, up: up_moves, scores: scores }
};
}
/// Struct used to play a single game of 2048.
///
/// `tfe::Game` uses a single `u64` as board value.
/// The board itself is divided into rows (x4 16 bit "row" per "board") which are
/// divided into tiles (4x 4 bit "nybbles" per "row").
///
/// All manipulations are done using bit-shifts and a precomputed table of moves and scores.
/// Every move is stored as four lookups total, one for each row. The result of XOR'ing each row
/// back into the board at the right position is the output board.
pub struct Game { pub board: u64 }
impl Game {
/// Constructs a new `tfe::Game`.
///
/// `Game` stores a board internally as a `u64`.
///
/// # Examples
///
/// Simple example:
///
/// ```
/// use tfe::Game;
///
/// let mut game = Game::new();
/// # println!("{:016x}", game.board);
/// ```
///
/// Accessing board value:
///
/// ```
/// use tfe::Game;
///
/// let mut game = Game::new();
/// println!("{:016x}", game.board);
/// ```
pub fn new() -> Self {
let mut game = Game { board: 0x0000_0000_0000_0000_u64 };
game.board |= Self::spawn_tile(game.board);
game.board |= Self::spawn_tile(game.board);
game
}
/// Like `new` but takes a closure that accepts two parameters and returns
/// a `Direction`. The parameters passed to the closure:
///
/// - `u64`: The current board
/// - `&Vec<Direction>`: A list of attempted moves that had no effect.
/// Gets cleared when a move succeeds.
///
/// # Examples
///
/// Simple example:
///
/// ```
/// use tfe::{Game, Direction};
///
/// let game = Game::play(|_board, failed| Direction::sample_without(failed));
/// ```
///
/// In this example, the variable `game` will have a value of a single `Game` played to
/// completion. A game is over when it has no moves left. This is true when all possible
/// moves return the same resulting board as before the move was executed.
///
/// The `failed: &Vec<Direction>` will contain **at most** 3 items, when the 4th item is added
/// the game ends automatically without calling the closure again.
pub fn play<F: Fn(u64, &Vec<Direction>) -> Direction>(mv: F) -> Self {
let mut game = Self::new();
let mut attempted: Vec<Direction> = Vec::with_capacity(4);
loop {
let mv = mv(game.board, &attempted);
if !attempted.iter().any(|dir| dir == &mv) {
let result_board = Self::execute(game.board, &mv);
if game.board == result_board {
if attempted.len() == 3 { break }
attempted.push(mv);
} else {
game.board = result_board | Self::spawn_tile(result_board);
attempted.clear();
}
}
}
game
}
/// Returns `board` moved in given `direction`.
///
/// - When `Direction::Left`, return board moved left
/// - When `Direction::Right`, return board moved right
/// - When `Direction::Down`, return board moved down
/// - When `Direction::Up`, return board moved up
///
/// # Examples
///
/// Simple example:
///
/// ```
/// use tfe::{Game, Direction};
///
/// let board = 0x0000_0000_0022_1100;
/// let moved = Game::execute(board, &Direction::Left);
///
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 4 | 4 | | 8 | 0 | 0 | 0 |
/// // | 2 | 2 | 0 | 0 | | 4 | 0 | 0 | 0 |
///
/// assert_eq!(board, 0x0000_0000_0022_1100);
/// assert_eq!(moved, 0x0000_0000_3000_2000);
/// ```
pub fn execute(board: u64, direction: &Direction) -> u64 {
match direction {
Direction::Left => Self::move_left(board),
Direction::Right => Self::move_right(board),
Direction::Down => Self::move_down(board),
Direction::Up => Self::move_up(board)
}
}
/// Returns a transposed board where rows are transformed into columns and vice versa.
///
/// ```
/// use tfe::Game; | ///
/// // | F | E | D | C | | F | B | 7 | 3 |
/// // | B | A | 9 | 8 | => | E | A | 6 | 2 |
/// // | 7 | 6 | 5 | 4 | | D | 9 | 5 | 1 |
/// // | 3 | 2 | 1 | 0 | | C | 8 | 4 | 0 |
///
/// assert_eq!(Game::transpose(0xFEDC_BA98_7654_3210), 0xFB73_EA62_D951_C840);
/// ```
pub fn transpose(board: u64) -> u64 {
let a1 = board & 0xF0F0_0F0F_F0F0_0F0F_u64;
let a2 = board & 0x0000_F0F0_0000_F0F0_u64;
let a3 = board & 0x0F0F_0000_0F0F_0000_u64;
let a = a1 | (a2 << 12) | (a3 >> 12);
let b1 = a & 0xFF00_FF00_00FF_00FF_u64;
let b2 = a & 0x00FF_00FF_0000_0000_u64;
let b3 = a & 0x0000_0000_FF00_FF00_u64;
b1 | (b2 >> 24) | (b3 << 24)
}
/// Returns a `u64` board moved up.
/// This is the same as calling `Game::execute(board, &Direction::Up)`;
///
/// # Examples
///
/// ```
/// use tfe::Game;
///
/// let board = 0x0000_0000_0000_0011_u64;
/// let result = Game::move_up(board);
///
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 1 | 1 |
/// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 1 | 1 | | 0 | 0 | 0 | 0 |
///
/// assert_eq!(result, 0x0011_0000_0000_0000);
/// ```
pub fn move_up(board: u64) -> u64 {
let mut result = board;
let transposed = Self::transpose(board);
result ^= MOVES.up[((transposed >> 0) & ROW_MASK) as usize] << 0;
result ^= MOVES.up[((transposed >> 16) & ROW_MASK) as usize] << 4;
result ^= MOVES.up[((transposed >> 32) & ROW_MASK) as usize] << 8;
result ^= MOVES.up[((transposed >> 48) & ROW_MASK) as usize] << 12;
result
}
/// Returns a `u64` board moved down.
/// This is the same as calling `Game::execute(board, &Direction::Down)`;
///
/// # Examples
///
/// ```
/// use tfe::Game;
///
/// let board = 0x0011_0000_0000_0011_u64;
/// let result = Game::move_down(board);
///
/// // | 0 | 0 | 1 | 1 | | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 1 | 1 | | 0 | 0 | 2 | 2 |
///
/// assert_eq!(result, 0x0000_0000_0000_0022);
/// ```
pub fn move_down(board: u64) -> u64 {
let mut result = board;
let transposed = Self::transpose(board);
result ^= MOVES.down[((transposed >> 0) & ROW_MASK) as usize] << 0;
result ^= MOVES.down[((transposed >> 16) & ROW_MASK) as usize] << 4;
result ^= MOVES.down[((transposed >> 32) & ROW_MASK) as usize] << 8;
result ^= MOVES.down[((transposed >> 48) & ROW_MASK) as usize] << 12;
result
}
/// Returns a `u64` board moved right.
/// This is the same as calling `Game::execute(board, &Direction::Right)`;
///
/// # Examples
///
/// ```
/// use tfe::Game;
///
/// let board = 0x0000_0000_0000_2211_u64;
/// let result = Game::move_right(board);
///
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
/// // | 2 | 2 | 1 | 1 | | 0 | 0 | 3 | 2 |
///
/// assert_eq!(result, 0x0000_0000_0000_0032);
/// ```
pub fn move_right(board: u64) -> u64 {
let mut result = board;
result ^= MOVES.right[((board >> 0) & ROW_MASK) as usize] << 0;
result ^= MOVES.right[((board >> 16) & ROW_MASK) as usize] << 16;
result ^= MOVES.right[((board >> 32) & ROW_MASK) as usize] << 32;
result ^= MOVES.right[((board >> 48) & ROW_MASK) as usize] << 48;
result
}
/// Returns a `u64` board moved left.
/// This is the same as calling `Game::execute(board, &Direction::Left)`;
///
/// # Examples
///
/// ```
/// use tfe::Game;
///
/// let board = 0x0000_0000_0000_2211_u64;
/// let result = Game::move_left(board);
///
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
/// // | 2 | 2 | 1 | 1 | | 3 | 2 | 0 | 0 |
///
/// assert_eq!(result, 0x0000_0000_0000_3200);
/// ```
pub fn move_left(board: u64) -> u64 {
let mut result: u64 = board;
result ^= MOVES.left[((board >> 0) & ROW_MASK) as usize] << 0;
result ^= MOVES.left[((board >> 16) & ROW_MASK) as usize] << 16;
result ^= MOVES.left[((board >> 32) & ROW_MASK) as usize] << 32;
result ^= MOVES.left[((board >> 48) & ROW_MASK) as usize] << 48;
result
}
/// Returns the count of tiles with a value of `0`.
///
/// # Examples
///
/// ```
/// use tfe::Game;
///
/// let board = 0x0000_0000_0000_2211_u64;
/// let result = Game::count_empty(board);
///
/// assert_eq!(result, 12);
/// ```
pub fn count_empty(board: u64) -> u32 {
let mut empty = 0;
for i in 0 .. 16 { if ((board >> (i * 4)) & 0xF) == 0 { empty += 1 } }
empty
}
/// Returns the sum of 4 lookups in `table` for each "row" in `board`.
pub fn table_helper<T: Clone + Add<Output = T>>(board: u64, table: &Vec<T>) -> T {
table[((board >> 0) & ROW_MASK) as usize].clone() +
table[((board >> 16) & ROW_MASK) as usize].clone() +
table[((board >> 32) & ROW_MASK) as usize].clone() +
table[((board >> 48) & ROW_MASK) as usize].clone()
}
/// Returns the score of a given `board`.
/// The score of a single tile is the sum of the tile value and all intermediate merged tiles.
pub fn score(board: u64) -> u64 {
Self::table_helper(board, &MOVES.scores)
}
/// Returns a `2` with 90% chance and `4` with 10% chance.
pub fn tile() -> u64 {
if thread_rng().gen_range(0, 10) == 10 { 2 } else { 1 }
}
/// Returns a `1` shifted to the position of any `0` bit in `board` randomly.
pub fn spawn_tile(board: u64) -> u64 {
let mut tmp = board;
let mut idx = thread_rng().gen_range(0, Self::count_empty(board));
let mut t = Self::tile();
loop {
while (tmp & 0xF) != 0 {
tmp >>= 4;
t <<= 4;
}
if idx == 0 { break } else { idx -= 1 }
tmp >>= 4;
t <<= 4
}
t
}
} | random_line_split | |
game.rs | use std::ops::Add;
use super::rand::{thread_rng, Rng};
use super::direction::Direction;
/// A mask with a single section of 16 bits set to 0.
/// Used to extract a "horizontal slice" out of a 64 bit integer.
pub static ROW_MASK: u64 = 0xFFFF;
/// A `u64` mask with 4 sections each starting after the n * 16th bit.
/// Used to extract a "vertical slice" out of a 64 bit integer.
pub static COL_MASK: u64 = 0x000F_000F_000F_000F_u64;
/// Struct that contains all available moves per row for up, down, right and left.
/// Also stores the score for a given row.
///
/// Moves are stored as power values for tiles.
/// if a power value is `> 0`, print the tile value using `2 << tile` where tile is any 4-bit
/// "nybble" otherwise print a `0` instead.
struct Moves {
pub left: Vec<u64>,
pub right: Vec<u64>,
pub down: Vec<u64>,
pub up: Vec<u64>,
pub scores: Vec<u64>
}
impl Moves {
/// Returns the 4th bit from each row in given board OR'd.
pub fn column_from(board: u64) -> u64 {
(board | (board << 12) | (board << 24) | (board << 36)) & COL_MASK
}
}
lazy_static! {
/// Constructs a new `tfe::Moves`.
///
/// `Moves` stores `right`, `left`, `up`, and `down` moves per row.
/// e.g. left: `0x0011 -> 0x2000` and right: `0x0011 -> 0x0002`.
///
/// Also stores the `scores` per row.
/// The score of a row is the sum of the tile and all intermediate tile merges.
/// e.g. row `0x0002` has a score of `4` and row `0x0003` has a score of `16`.
static ref MOVES: Moves = {
// initialization of move tables
let mut left_moves = vec![0; 65536];
let mut right_moves = vec![0; 65536];
let mut up_moves = vec![0; 65536];
let mut down_moves = vec![0; 65536];
let mut scores = vec![0; 65536];
for row in 0 .. 65536 {
// break row into cells
let mut line = [
(row >> 0) & 0xF,
(row >> 4) & 0xF,
(row >> 8) & 0xF,
(row >> 12) & 0xF
];
// calculate score for given row
let mut s = 0;
for i in 0 .. 4 {
if line[i] > 1 { s += (line[i] - 1) * (2 << line[i]) }
}
scores[row as usize] = s;
let mut i = 0;
// perform a move to the left using current {row} as board
// generates 4 output moves for up, down, left and right by transposing and reversing
// this result.
while i < 3 {
// initial counter for the cell next to the current one (j)
let mut j = i + 1;
// find the next non-zero cell index
while j < 4 {
if line[j] != 0 { break };
j += 1;
};
// if j is out of bounds (> 3), all other cells are empty and we are done looping
if j == 4 { break };
// this is the part responsible for skipping empty (0 value) cells
// if the current cell is zero, shift the next non-zero cell to position i
// and retry this entry until line[i] becomes non-zero
if line[i] == 0 {
line[i] = line[j];
line[j] = 0;
continue;
// otherwise, if the current cell and next cell are the same, merge them
} else if line[i] == line[j] {
if line[i] != 0xF { line[i] += 1 };
line[j] = 0;
}
// finally, move to the next (or current, if i was 0) row
i += 1;
}
// put the new row after merging back together into a "merged" row
let result = (line[0] << 0) |
(line[1] << 4) |
(line[2] << 8) |
(line[3] << 12);
// right and down use normal row and result variables.
// for left and up, we create a reverse of the row and result.
let rev_row = (row >> 12) & 0x000F | (row >> 4) & 0x00F0 | (row << 4) & 0x0F00 | (row << 12) & 0xF000;
let rev_res = (result >> 12) & 0x000F | (result >> 4) & 0x00F0 | (result << 4) & 0x0F00 | (result << 12) & 0xF000;
// results are keyed by row / reverse row index.
let row_idx = row as usize;
let rev_idx = rev_row as usize;
right_moves[row_idx] = row ^ result;
left_moves[rev_idx] = rev_row ^ rev_res;
up_moves[rev_idx] = Moves::column_from(rev_row) ^ Moves::column_from(rev_res);
down_moves[row_idx] = Moves::column_from(row) ^ Moves::column_from(result);
};
Moves { left: left_moves, right: right_moves, down: down_moves, up: up_moves, scores: scores }
};
}
/// Struct used to play a single game of 2048.
///
/// `tfe::Game` uses a single `u64` as board value.
/// The board itself is divided into rows (x4 16 bit "row" per "board") which are
/// divided into tiles (4x 4 bit "nybbles" per "row").
///
/// All manipulations are done using bit-shifts and a precomputed table of moves and scores.
/// Every move is stored as four lookups total, one for each row. The result of XOR'ing each row
/// back into the board at the right position is the output board.
pub struct | { pub board: u64 }
impl Game {
/// Constructs a new `tfe::Game`.
///
/// `Game` stores a board internally as a `u64`.
///
/// # Examples
///
/// Simple example:
///
/// ```
/// use tfe::Game;
///
/// let mut game = Game::new();
/// # println!("{:016x}", game.board);
/// ```
///
/// Accessing board value:
///
/// ```
/// use tfe::Game;
///
/// let mut game = Game::new();
/// println!("{:016x}", game.board);
/// ```
pub fn new() -> Self {
let mut game = Game { board: 0x0000_0000_0000_0000_u64 };
game.board |= Self::spawn_tile(game.board);
game.board |= Self::spawn_tile(game.board);
game
}
/// Like `new` but takes a closure that accepts two parameters and returns
/// a `Direction`. The parameters passed to the closure:
///
/// - `u64`: The current board
/// - `&Vec<Direction>`: A list of attempted moves that had no effect.
/// Gets cleared when a move succeeds.
///
/// # Examples
///
/// Simple example:
///
/// ```
/// use tfe::{Game, Direction};
///
/// let game = Game::play(|_board, failed| Direction::sample_without(failed));
/// ```
///
/// In this example, the variable `game` will have a value of a single `Game` played to
/// completion. A game is over when it has no moves left. This is true when all possible
/// moves return the same resulting board as before the move was executed.
///
/// The `failed: &Vec<Direction>` will contain **at most** 3 items, when the 4th item is added
/// the game ends automatically without calling the closure again.
pub fn play<F: Fn(u64, &Vec<Direction>) -> Direction>(mv: F) -> Self {
let mut game = Self::new();
let mut attempted: Vec<Direction> = Vec::with_capacity(4);
loop {
let mv = mv(game.board, &attempted);
if !attempted.iter().any(|dir| dir == &mv) {
let result_board = Self::execute(game.board, &mv);
if game.board == result_board {
if attempted.len() == 3 { break }
attempted.push(mv);
} else {
game.board = result_board | Self::spawn_tile(result_board);
attempted.clear();
}
}
}
game
}
/// Returns `board` moved in given `direction`.
///
/// - When `Direction::Left`, return board moved left
/// - When `Direction::Right`, return board moved right
/// - When `Direction::Down`, return board moved down
/// - When `Direction::Up`, return board moved up
///
/// # Examples
///
/// Simple example:
///
/// ```
/// use tfe::{Game, Direction};
///
/// let board = 0x0000_0000_0022_1100;
/// let moved = Game::execute(board, &Direction::Left);
///
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 4 | 4 | | 8 | 0 | 0 | 0 |
/// // | 2 | 2 | 0 | 0 | | 4 | 0 | 0 | 0 |
///
/// assert_eq!(board, 0x0000_0000_0022_1100);
/// assert_eq!(moved, 0x0000_0000_3000_2000);
/// ```
pub fn execute(board: u64, direction: &Direction) -> u64 {
match direction {
Direction::Left => Self::move_left(board),
Direction::Right => Self::move_right(board),
Direction::Down => Self::move_down(board),
Direction::Up => Self::move_up(board)
}
}
/// Returns a transposed board where rows are transformed into columns and vice versa.
///
/// ```
/// use tfe::Game;
///
/// // | F | E | D | C | | F | B | 7 | 3 |
/// // | B | A | 9 | 8 | => | E | A | 6 | 2 |
/// // | 7 | 6 | 5 | 4 | | D | 9 | 5 | 1 |
/// // | 3 | 2 | 1 | 0 | | C | 8 | 4 | 0 |
///
/// assert_eq!(Game::transpose(0xFEDC_BA98_7654_3210), 0xFB73_EA62_D951_C840);
/// ```
pub fn transpose(board: u64) -> u64 {
let a1 = board & 0xF0F0_0F0F_F0F0_0F0F_u64;
let a2 = board & 0x0000_F0F0_0000_F0F0_u64;
let a3 = board & 0x0F0F_0000_0F0F_0000_u64;
let a = a1 | (a2 << 12) | (a3 >> 12);
let b1 = a & 0xFF00_FF00_00FF_00FF_u64;
let b2 = a & 0x00FF_00FF_0000_0000_u64;
let b3 = a & 0x0000_0000_FF00_FF00_u64;
b1 | (b2 >> 24) | (b3 << 24)
}
/// Returns a `u64` board moved up.
/// This is the same as calling `Game::execute(board, &Direction::Up)`;
///
/// # Examples
///
/// ```
/// use tfe::Game;
///
/// let board = 0x0000_0000_0000_0011_u64;
/// let result = Game::move_up(board);
///
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 1 | 1 |
/// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 1 | 1 | | 0 | 0 | 0 | 0 |
///
/// assert_eq!(result, 0x0011_0000_0000_0000);
/// ```
pub fn move_up(board: u64) -> u64 {
let mut result = board;
let transposed = Self::transpose(board);
result ^= MOVES.up[((transposed >> 0) & ROW_MASK) as usize] << 0;
result ^= MOVES.up[((transposed >> 16) & ROW_MASK) as usize] << 4;
result ^= MOVES.up[((transposed >> 32) & ROW_MASK) as usize] << 8;
result ^= MOVES.up[((transposed >> 48) & ROW_MASK) as usize] << 12;
result
}
/// Returns a `u64` board moved down.
/// This is the same as calling `Game::execute(board, &Direction::Down)`;
///
/// # Examples
///
/// ```
/// use tfe::Game;
///
/// let board = 0x0011_0000_0000_0011_u64;
/// let result = Game::move_down(board);
///
/// // | 0 | 0 | 1 | 1 | | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 1 | 1 | | 0 | 0 | 2 | 2 |
///
/// assert_eq!(result, 0x0000_0000_0000_0022);
/// ```
pub fn move_down(board: u64) -> u64 {
let mut result = board;
let transposed = Self::transpose(board);
result ^= MOVES.down[((transposed >> 0) & ROW_MASK) as usize] << 0;
result ^= MOVES.down[((transposed >> 16) & ROW_MASK) as usize] << 4;
result ^= MOVES.down[((transposed >> 32) & ROW_MASK) as usize] << 8;
result ^= MOVES.down[((transposed >> 48) & ROW_MASK) as usize] << 12;
result
}
/// Returns a `u64` board moved right.
/// This is the same as calling `Game::execute(board, &Direction::Right)`;
///
/// # Examples
///
/// ```
/// use tfe::Game;
///
/// let board = 0x0000_0000_0000_2211_u64;
/// let result = Game::move_right(board);
///
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
/// // | 2 | 2 | 1 | 1 | | 0 | 0 | 3 | 2 |
///
/// assert_eq!(result, 0x0000_0000_0000_0032);
/// ```
pub fn move_right(board: u64) -> u64 {
let mut result = board;
result ^= MOVES.right[((board >> 0) & ROW_MASK) as usize] << 0;
result ^= MOVES.right[((board >> 16) & ROW_MASK) as usize] << 16;
result ^= MOVES.right[((board >> 32) & ROW_MASK) as usize] << 32;
result ^= MOVES.right[((board >> 48) & ROW_MASK) as usize] << 48;
result
}
/// Returns a `u64` board moved left.
/// This is the same as calling `Game::execute(board, &Direction::Left)`;
///
/// # Examples
///
/// ```
/// use tfe::Game;
///
/// let board = 0x0000_0000_0000_2211_u64;
/// let result = Game::move_left(board);
///
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 |
/// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 |
/// // | 2 | 2 | 1 | 1 | | 3 | 2 | 0 | 0 |
///
/// assert_eq!(result, 0x0000_0000_0000_3200);
/// ```
pub fn move_left(board: u64) -> u64 {
let mut result: u64 = board;
result ^= MOVES.left[((board >> 0) & ROW_MASK) as usize] << 0;
result ^= MOVES.left[((board >> 16) & ROW_MASK) as usize] << 16;
result ^= MOVES.left[((board >> 32) & ROW_MASK) as usize] << 32;
result ^= MOVES.left[((board >> 48) & ROW_MASK) as usize] << 48;
result
}
/// Returns the count of tiles with a value of `0`.
///
/// # Examples
///
/// ```
/// use tfe::Game;
///
/// let board = 0x0000_0000_0000_2211_u64;
/// let result = Game::count_empty(board);
///
/// assert_eq!(result, 12);
/// ```
pub fn count_empty(board: u64) -> u32 {
let mut empty = 0;
for i in 0 .. 16 { if ((board >> (i * 4)) & 0xF) == 0 { empty += 1 } }
empty
}
/// Returns the sum of 4 lookups in `table` for each "row" in `board`.
pub fn table_helper<T: Clone + Add<Output = T>>(board: u64, table: &Vec<T>) -> T {
table[((board >> 0) & ROW_MASK) as usize].clone() +
table[((board >> 16) & ROW_MASK) as usize].clone() +
table[((board >> 32) & ROW_MASK) as usize].clone() +
table[((board >> 48) & ROW_MASK) as usize].clone()
}
/// Returns the score of a given `board`.
/// The score of a single tile is the sum of the tile value and all intermediate merged tiles.
pub fn score(board: u64) -> u64 {
Self::table_helper(board, &MOVES.scores)
}
/// Returns a `2` with 90% chance and `4` with 10% chance.
pub fn tile() -> u64 {
if thread_rng().gen_range(0, 10) == 10 { 2 } else { 1 }
}
/// Returns a `1` shifted to the position of any `0` bit in `board` randomly.
pub fn spawn_tile(board: u64) -> u64 {
let mut tmp = board;
let mut idx = thread_rng().gen_range(0, Self::count_empty(board));
let mut t = Self::tile();
loop {
while (tmp & 0xF) != 0 {
tmp >>= 4;
t <<= 4;
}
if idx == 0 { break } else { idx -= 1 }
tmp >>= 4;
t <<= 4
}
t
}
}
| Game | identifier_name |
svg.go | // +build darwin
package bg
import (
"encoding/xml"
"fmt"
"image"
"io"
"log"
"regexp"
"strconv"
"strings"
"text/scanner"
//polyclip "github.com/akavel/polyclip-go"
"github.com/paulsmith/gogeos/geos"
)
type svgVert struct {
X, Y float64
}
type svgPolyline struct {
RawPoints string `xml:"points,attr"`
Points []svgVert
}
type svgPolygon struct {
Points []svgVert
Mode int
PointsData string `xml:"points,attr"`
}
type svgPath struct {
Name string `xml:"id,attr"`
D string `xml:"d,attr"`
Description string `xml:"desc"`
Polygons []svgPolygon
}
type SvgGroup struct {
Name string `xml:"id,attr"`
Groups []SvgGroup `xml:"g"`
Paths []svgPath `xml:"path"`
Polygons []svgPolygon `xml:"polygon"`
Transform string `xml:"transform,attr"`
translate svgVert
}
type SvgFile struct {
XMLName xml.Name `xml:"svg"`
Width int `xml:"width,attr"`
Height int `xml:"height,attr"`
Groups []SvgGroup `xml:"g"`
}
func (path *svgPath) mode() int {
var settingRegexp = regexp.MustCompile(`([a-zA-Z]+)\:([ 0-9]+)`)
if path.Description != "" {
matches := settingRegexp.FindStringSubmatch(path.Description)
if matches != nil {
switch matches[1] {
case "mode":
val, _ := strconv.Atoi(strings.TrimSpace(matches[2]))
return val
}
}
}
return 0
}
func (poly *svgPolygon) JsonPoly() jsonWedPolygon {
wall := jsonWedPolygon{Mode: poly.Mode}
wall.Verts = make([]image.Point, len(poly.Points))
for idx, pt := range poly.Points {
wall.Verts[idx].X = int(pt.X)
wall.Verts[idx].Y = int(pt.Y)
}
return wall
}
func (poly *svgPolygon) generatePoints() {
vals := strings.FieldsFunc(poly.PointsData, func(r rune) bool {
return (r == ' ' || r == '\t' || r == ',')
})
poly.Points = make([]svgVert, len(vals)/2)
for idx, _ := range poly.Points {
x, _ := strconv.ParseFloat(vals[idx*2], 32)
y, _ := strconv.ParseFloat(vals[idx*2+1], 32)
poly.Points[idx].X = x
poly.Points[idx].Y = y
}
}
type svgPathScanner struct {
Path string
Polygons []svgPolygon
CurrentPolygon *svgPolygon
Cursor svgVert
Mode int
S scanner.Scanner
}
func NewPathScanner(path string, mode int) svgPathScanner {
log.Printf("Scanner path: %s\n", path)
sps := svgPathScanner{Path: path, Mode: mode}
return sps
}
func (sps *svgPathScanner) scanTwoInts() (int, int) {
X := sps.scanOneInt()
sps.scanWhitespace()
Y := sps.scanOneInt()
log.Printf("X: %d Y: %d\n", X, Y)
return X, Y
}
func (sps *svgPathScanner) scanWhitespace() {
for r := sps.S.Peek(); r == ' ' || r == ','; r = sps.S.Peek() {
r = sps.S.Next()
}
}
func (sps *svgPathScanner) scanOneInt() int {
r := sps.S.Scan()
sign := 1
if r == '-' {
sps.S.Scan()
sign = -1
}
X, _ := strconv.ParseFloat(sps.S.TokenText(), 32)
return int(X) * sign
}
func (sps svgPathScanner) GeneratePolygons() ([]svgPolygon, error) {
sps.S.Init(strings.NewReader(sps.Path))
sps.S.Mode = scanner.ScanFloats | scanner.ScanChars
tok := sps.S.Scan()
lastTokenText := ""
for tok != scanner.EOF {
tokenText := sps.S.TokenText()
log.Printf("TT: %s LTT:%s\n", tokenText, lastTokenText)
if !sps.handleToken(tokenText) {
log.Printf("Retry\n")
sps.handleToken(lastTokenText)
}
lastTokenText = tokenText
tok = sps.S.Scan()
}
return sps.Polygons, nil
}
func (sps *svgPathScanner) handleToken(cmd string) bool {
log.Printf("Cmd: %s\n", cmd)
switch cmd {
case "M":
poly := svgPolygon{Mode: sps.Mode}
sps.CurrentPolygon = &poly
x, y := sps.scanTwoInts()
sps.Cursor.X, sps.Cursor.Y = float64(x), float64(y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "m":
poly := svgPolygon{Mode: sps.Mode}
sps.CurrentPolygon = &poly
X, Y := sps.scanTwoInts()
sps.Cursor.X += float64(X)
sps.Cursor.Y += float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "L":
X, Y := sps.scanTwoInts()
sps.Cursor.X, sps.Cursor.Y = float64(X), float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "l":
X, Y := sps.scanTwoInts()
sps.Cursor.X += float64(X)
sps.Cursor.Y += float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "H":
sps.Cursor.X = float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "h":
sps.Cursor.X += float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "V":
sps.Cursor.Y = float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "v":
sps.Cursor.Y += float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "z":
sps.Polygons = append(sps.Polygons, *sps.CurrentPolygon)
sps.CurrentPolygon = nil
case "Z":
sps.Polygons = append(sps.Polygons, *sps.CurrentPolygon)
sps.CurrentPolygon = nil
default:
return false
}
return true
}
func (path *svgPath) generatePath() {
polys, err := NewPathScanner(path.D, path.mode()).GeneratePolygons()
if err != nil {
log.Printf("Error generating polygons: %v", err)
}
log.Printf("Polys: %+v\n", polys)
path.Polygons = polys
}
func (group *SvgGroup) updateTransform() {
var transformRegex = regexp.MustCompile(`([a-zA-Z]+)\((.*)\)`)
if group.Transform != "" {
matches := transformRegex.FindStringSubmatch(group.Transform)
if matches == nil {
log.Printf("Unknown transform: %s\n", group.Transform)
} else if matches[1] == "translate" {
coords := strings.Split(matches[2], ",")
group.translate.X, _ = strconv.ParseFloat(coords[0], 64)
group.translate.Y, _ = strconv.ParseFloat(coords[1], 64)
} else {
log.Printf("Unknown transform: [%s] in %s\n", matches[1], group.Transform)
}
}
}
func (group *SvgGroup) updatePaths() {
for idx := range group.Paths {
group.Paths[idx].generatePath()
group.Paths[idx].Name = group.Name
}
}
func (group *SvgGroup) updatePolygons() {
for idx := range group.Polygons {
group.Polygons[idx].generatePoints()
}
}
func (group *SvgGroup) process() {
group.updateTransform()
group.updatePaths()
group.updatePolygons()
for idx := range group.Groups {
g := &group.Groups[idx]
g.process()
}
}
func (group *SvgGroup) GetPaths() []svgPath {
paths := make([]svgPath, 0)
if strings.HasPrefix(group.Name, "door_open_") || strings.HasPrefix(group.Name, "door_closed_") || strings.HasPrefix(group.Name, "walls") {
for _, p := range group.Paths {
paths = append(paths, p)
}
}
for _, g := range group.Groups {
paths = append(paths, g.GetPaths()...)
}
for idx, _ := range paths {
p := &paths[idx]
for pIdx, _ := range p.Polygons {
poly := &p.Polygons[pIdx]
for vIdx, _ := range poly.Points {
v := &poly.Points[vIdx]
v.X += group.translate.X
v.Y += group.translate.Y
}
}
}
return paths
}
func geosPolygonToPolygon(poly *geos.Geometry) svgPolygon {
shell, err := poly.Shell()
if err != nil {
log.Fatal(fmt.Errorf("Shell creation error: %+v", err))
}
mergedPoly := svgPolygon{}
coords, err := shell.Coords()
if err != nil {
log.Fatal(fmt.Errorf("Coords error: %+v", err))
}
for _, pt := range coords {
mergedPoly.Points = append(mergedPoly.Points, svgVert{pt.X, pt.Y})
}
return mergedPoly
}
func (group *SvgGroup) MergePolygons() {
if len(group.Polygons) > 0 {
var poly *geos.Geometry
for _, p := range group.Polygons {
if len(p.Points) > 2 {
verts := make([]geos.Coord, 0)
for _, v := range p.Points {
verts = append(verts, geos.NewCoord(v.X, v.Y)) | }
verts = append(verts, geos.NewCoord(p.Points[0].X, p.Points[0].Y))
if poly == nil {
newPoly, err := geos.NewPolygon(verts)
if err != nil {
log.Fatal(fmt.Errorf("New poly creation error: %+v", err))
}
poly = newPoly
} else {
uPoly, _ := geos.NewPolygon(verts)
uPolyType, _ := uPoly.Type()
if uPolyType == geos.POLYGON {
union, err := poly.Union(uPoly)
if err != nil {
log.Printf("Skipping poly: Poly union error: %+v %+v", err, uPoly)
} else {
poly = union
}
} else {
log.Printf("Not a poly: %d %+v", uPolyType, uPoly)
}
}
}
}
polyType, err := poly.Type()
if err != nil {
log.Fatal(fmt.Errorf("Poly type error: %+v", err))
}
if polyType == geos.POLYGON {
group.Polygons = make([]svgPolygon, 1)
group.Polygons[0] = geosPolygonToPolygon(poly)
} else if polyType == geos.MULTIPOLYGON || polyType == geos.GEOMETRYCOLLECTION {
geomCount, err := poly.NGeometry()
if err != nil {
log.Fatal(fmt.Errorf("Error getting geometry count", err))
}
log.Printf("GC: %d PLEN: %d\n", geomCount, len(group.Polygons))
group.Polygons = make([]svgPolygon, 0)
for i := 0; i < geomCount; i++ {
geom, err := poly.Geometry(i)
if err != nil {
log.Fatal(fmt.Errorf("Error getting geometry: %d %+v", i, err))
}
geomType, _ := geom.Type()
if geomType == geos.POLYGON {
group.Polygons = append(group.Polygons, geosPolygonToPolygon(geom))
}
}
} else {
log.Printf("DUMP: %+v", poly)
log.Printf("Polytype: %d\n", polyType)
}
}
}
/*
func (group *SvgGroup) MergePolygons() {
if len(group.Polygons) > 0 {
poly := polyclip.Polygon{}
for _, cont := range group.Polygons {
c := polyclip.Contour{}
for _, v := range cont.Points {
c.Add(polyclip.Point{v.X * 100, v.Y * 100})
}
poly.Add(c)
}
union := poly
unionPoints := 0
for cidx, cont := range group.Polygons {
c := polyclip.Contour{}
for _, v := range cont.Points {
c.Add(polyclip.Point{v.X * 100, v.Y * 100})
}
u2 := union.Construct(polyclip.UNION, polyclip.Polygon{c})
bb := c.BoundingBox()
newPoints := 0
for _, c := range u2 {
newPoints += len(c)
}
if newPoints >= unionPoints {
union = u2
unionPoints = newPoints
} else {
log.Printf("Contour: %d %+v %d Area: %0.3f %d %d", cidx, c.BoundingBox(), len(c), (bb.Max.X-bb.Min.X) * (bb.Max.Y - bb.Min.Y), newPoints, unionPoints)
log.Printf("Clockwise: %v", polyClockwise(c[0].X, c[0].Y, c[1].X, c[1].Y, c[len(c)-1].X, c[len(c)-1].Y))
log.Printf("Union: %+v", union)
log.Printf("Union2: %+v", u2)
}
}
group.Polygons = make([]svgPolygon, len(union))
for _, p := range union {
mergedPoly := svgPolygon{}
for _, pt := range p {
mergedPoly.Points = append(mergedPoly.Points, svgVert{pt.X, pt.Y})
}
group.Polygons = append(group.Polygons, mergedPoly)
}
}
}
*/
func (group *SvgGroup) MergePolygonsRecursive() {
group.MergePolygons()
for idx := range group.Groups {
group.Groups[idx].MergePolygonsRecursive()
}
}
func (svg *SvgFile) process() {
for idx := range svg.Groups {
g := &svg.Groups[idx]
g.process()
}
}
func (svg *SvgFile) MergePolygonsByGroup() {
for idx := range svg.Groups {
svg.Groups[idx].MergePolygonsRecursive()
}
}
func (svg *SvgFile) Paths() []svgPath {
paths := make([]svgPath, 0)
for _, g := range svg.Groups {
paths = append(paths, g.GetPaths()...)
}
return paths
}
func OpenSVG(r io.Reader) (*SvgFile, error) {
svg := &SvgFile{}
decoder := xml.NewDecoder(r)
if err := decoder.Decode(&svg); err != nil {
return nil, err
}
svg.process()
return svg, nil
} | random_line_split | |
svg.go | // +build darwin
package bg
import (
"encoding/xml"
"fmt"
"image"
"io"
"log"
"regexp"
"strconv"
"strings"
"text/scanner"
//polyclip "github.com/akavel/polyclip-go"
"github.com/paulsmith/gogeos/geos"
)
type svgVert struct {
X, Y float64
}
type svgPolyline struct {
RawPoints string `xml:"points,attr"`
Points []svgVert
}
type svgPolygon struct {
Points []svgVert
Mode int
PointsData string `xml:"points,attr"`
}
type svgPath struct {
Name string `xml:"id,attr"`
D string `xml:"d,attr"`
Description string `xml:"desc"`
Polygons []svgPolygon
}
type SvgGroup struct {
Name string `xml:"id,attr"`
Groups []SvgGroup `xml:"g"`
Paths []svgPath `xml:"path"`
Polygons []svgPolygon `xml:"polygon"`
Transform string `xml:"transform,attr"`
translate svgVert
}
type SvgFile struct {
XMLName xml.Name `xml:"svg"`
Width int `xml:"width,attr"`
Height int `xml:"height,attr"`
Groups []SvgGroup `xml:"g"`
}
func (path *svgPath) mode() int {
var settingRegexp = regexp.MustCompile(`([a-zA-Z]+)\:([ 0-9]+)`)
if path.Description != "" {
matches := settingRegexp.FindStringSubmatch(path.Description)
if matches != nil {
switch matches[1] {
case "mode":
val, _ := strconv.Atoi(strings.TrimSpace(matches[2]))
return val
}
}
}
return 0
}
func (poly *svgPolygon) JsonPoly() jsonWedPolygon |
func (poly *svgPolygon) generatePoints() {
vals := strings.FieldsFunc(poly.PointsData, func(r rune) bool {
return (r == ' ' || r == '\t' || r == ',')
})
poly.Points = make([]svgVert, len(vals)/2)
for idx, _ := range poly.Points {
x, _ := strconv.ParseFloat(vals[idx*2], 32)
y, _ := strconv.ParseFloat(vals[idx*2+1], 32)
poly.Points[idx].X = x
poly.Points[idx].Y = y
}
}
type svgPathScanner struct {
Path string
Polygons []svgPolygon
CurrentPolygon *svgPolygon
Cursor svgVert
Mode int
S scanner.Scanner
}
func NewPathScanner(path string, mode int) svgPathScanner {
log.Printf("Scanner path: %s\n", path)
sps := svgPathScanner{Path: path, Mode: mode}
return sps
}
func (sps *svgPathScanner) scanTwoInts() (int, int) {
X := sps.scanOneInt()
sps.scanWhitespace()
Y := sps.scanOneInt()
log.Printf("X: %d Y: %d\n", X, Y)
return X, Y
}
func (sps *svgPathScanner) scanWhitespace() {
for r := sps.S.Peek(); r == ' ' || r == ','; r = sps.S.Peek() {
r = sps.S.Next()
}
}
func (sps *svgPathScanner) scanOneInt() int {
r := sps.S.Scan()
sign := 1
if r == '-' {
sps.S.Scan()
sign = -1
}
X, _ := strconv.ParseFloat(sps.S.TokenText(), 32)
return int(X) * sign
}
func (sps svgPathScanner) GeneratePolygons() ([]svgPolygon, error) {
sps.S.Init(strings.NewReader(sps.Path))
sps.S.Mode = scanner.ScanFloats | scanner.ScanChars
tok := sps.S.Scan()
lastTokenText := ""
for tok != scanner.EOF {
tokenText := sps.S.TokenText()
log.Printf("TT: %s LTT:%s\n", tokenText, lastTokenText)
if !sps.handleToken(tokenText) {
log.Printf("Retry\n")
sps.handleToken(lastTokenText)
}
lastTokenText = tokenText
tok = sps.S.Scan()
}
return sps.Polygons, nil
}
func (sps *svgPathScanner) handleToken(cmd string) bool {
log.Printf("Cmd: %s\n", cmd)
switch cmd {
case "M":
poly := svgPolygon{Mode: sps.Mode}
sps.CurrentPolygon = &poly
x, y := sps.scanTwoInts()
sps.Cursor.X, sps.Cursor.Y = float64(x), float64(y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "m":
poly := svgPolygon{Mode: sps.Mode}
sps.CurrentPolygon = &poly
X, Y := sps.scanTwoInts()
sps.Cursor.X += float64(X)
sps.Cursor.Y += float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "L":
X, Y := sps.scanTwoInts()
sps.Cursor.X, sps.Cursor.Y = float64(X), float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "l":
X, Y := sps.scanTwoInts()
sps.Cursor.X += float64(X)
sps.Cursor.Y += float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "H":
sps.Cursor.X = float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "h":
sps.Cursor.X += float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "V":
sps.Cursor.Y = float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "v":
sps.Cursor.Y += float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "z":
sps.Polygons = append(sps.Polygons, *sps.CurrentPolygon)
sps.CurrentPolygon = nil
case "Z":
sps.Polygons = append(sps.Polygons, *sps.CurrentPolygon)
sps.CurrentPolygon = nil
default:
return false
}
return true
}
func (path *svgPath) generatePath() {
polys, err := NewPathScanner(path.D, path.mode()).GeneratePolygons()
if err != nil {
log.Printf("Error generating polygons: %v", err)
}
log.Printf("Polys: %+v\n", polys)
path.Polygons = polys
}
func (group *SvgGroup) updateTransform() {
var transformRegex = regexp.MustCompile(`([a-zA-Z]+)\((.*)\)`)
if group.Transform != "" {
matches := transformRegex.FindStringSubmatch(group.Transform)
if matches == nil {
log.Printf("Unknown transform: %s\n", group.Transform)
} else if matches[1] == "translate" {
coords := strings.Split(matches[2], ",")
group.translate.X, _ = strconv.ParseFloat(coords[0], 64)
group.translate.Y, _ = strconv.ParseFloat(coords[1], 64)
} else {
log.Printf("Unknown transform: [%s] in %s\n", matches[1], group.Transform)
}
}
}
func (group *SvgGroup) updatePaths() {
for idx := range group.Paths {
group.Paths[idx].generatePath()
group.Paths[idx].Name = group.Name
}
}
func (group *SvgGroup) updatePolygons() {
for idx := range group.Polygons {
group.Polygons[idx].generatePoints()
}
}
func (group *SvgGroup) process() {
group.updateTransform()
group.updatePaths()
group.updatePolygons()
for idx := range group.Groups {
g := &group.Groups[idx]
g.process()
}
}
func (group *SvgGroup) GetPaths() []svgPath {
paths := make([]svgPath, 0)
if strings.HasPrefix(group.Name, "door_open_") || strings.HasPrefix(group.Name, "door_closed_") || strings.HasPrefix(group.Name, "walls") {
for _, p := range group.Paths {
paths = append(paths, p)
}
}
for _, g := range group.Groups {
paths = append(paths, g.GetPaths()...)
}
for idx, _ := range paths {
p := &paths[idx]
for pIdx, _ := range p.Polygons {
poly := &p.Polygons[pIdx]
for vIdx, _ := range poly.Points {
v := &poly.Points[vIdx]
v.X += group.translate.X
v.Y += group.translate.Y
}
}
}
return paths
}
func geosPolygonToPolygon(poly *geos.Geometry) svgPolygon {
shell, err := poly.Shell()
if err != nil {
log.Fatal(fmt.Errorf("Shell creation error: %+v", err))
}
mergedPoly := svgPolygon{}
coords, err := shell.Coords()
if err != nil {
log.Fatal(fmt.Errorf("Coords error: %+v", err))
}
for _, pt := range coords {
mergedPoly.Points = append(mergedPoly.Points, svgVert{pt.X, pt.Y})
}
return mergedPoly
}
func (group *SvgGroup) MergePolygons() {
if len(group.Polygons) > 0 {
var poly *geos.Geometry
for _, p := range group.Polygons {
if len(p.Points) > 2 {
verts := make([]geos.Coord, 0)
for _, v := range p.Points {
verts = append(verts, geos.NewCoord(v.X, v.Y))
}
verts = append(verts, geos.NewCoord(p.Points[0].X, p.Points[0].Y))
if poly == nil {
newPoly, err := geos.NewPolygon(verts)
if err != nil {
log.Fatal(fmt.Errorf("New poly creation error: %+v", err))
}
poly = newPoly
} else {
uPoly, _ := geos.NewPolygon(verts)
uPolyType, _ := uPoly.Type()
if uPolyType == geos.POLYGON {
union, err := poly.Union(uPoly)
if err != nil {
log.Printf("Skipping poly: Poly union error: %+v %+v", err, uPoly)
} else {
poly = union
}
} else {
log.Printf("Not a poly: %d %+v", uPolyType, uPoly)
}
}
}
}
polyType, err := poly.Type()
if err != nil {
log.Fatal(fmt.Errorf("Poly type error: %+v", err))
}
if polyType == geos.POLYGON {
group.Polygons = make([]svgPolygon, 1)
group.Polygons[0] = geosPolygonToPolygon(poly)
} else if polyType == geos.MULTIPOLYGON || polyType == geos.GEOMETRYCOLLECTION {
geomCount, err := poly.NGeometry()
if err != nil {
log.Fatal(fmt.Errorf("Error getting geometry count", err))
}
log.Printf("GC: %d PLEN: %d\n", geomCount, len(group.Polygons))
group.Polygons = make([]svgPolygon, 0)
for i := 0; i < geomCount; i++ {
geom, err := poly.Geometry(i)
if err != nil {
log.Fatal(fmt.Errorf("Error getting geometry: %d %+v", i, err))
}
geomType, _ := geom.Type()
if geomType == geos.POLYGON {
group.Polygons = append(group.Polygons, geosPolygonToPolygon(geom))
}
}
} else {
log.Printf("DUMP: %+v", poly)
log.Printf("Polytype: %d\n", polyType)
}
}
}
/*
func (group *SvgGroup) MergePolygons() {
if len(group.Polygons) > 0 {
poly := polyclip.Polygon{}
for _, cont := range group.Polygons {
c := polyclip.Contour{}
for _, v := range cont.Points {
c.Add(polyclip.Point{v.X * 100, v.Y * 100})
}
poly.Add(c)
}
union := poly
unionPoints := 0
for cidx, cont := range group.Polygons {
c := polyclip.Contour{}
for _, v := range cont.Points {
c.Add(polyclip.Point{v.X * 100, v.Y * 100})
}
u2 := union.Construct(polyclip.UNION, polyclip.Polygon{c})
bb := c.BoundingBox()
newPoints := 0
for _, c := range u2 {
newPoints += len(c)
}
if newPoints >= unionPoints {
union = u2
unionPoints = newPoints
} else {
log.Printf("Contour: %d %+v %d Area: %0.3f %d %d", cidx, c.BoundingBox(), len(c), (bb.Max.X-bb.Min.X) * (bb.Max.Y - bb.Min.Y), newPoints, unionPoints)
log.Printf("Clockwise: %v", polyClockwise(c[0].X, c[0].Y, c[1].X, c[1].Y, c[len(c)-1].X, c[len(c)-1].Y))
log.Printf("Union: %+v", union)
log.Printf("Union2: %+v", u2)
}
}
group.Polygons = make([]svgPolygon, len(union))
for _, p := range union {
mergedPoly := svgPolygon{}
for _, pt := range p {
mergedPoly.Points = append(mergedPoly.Points, svgVert{pt.X, pt.Y})
}
group.Polygons = append(group.Polygons, mergedPoly)
}
}
}
*/
func (group *SvgGroup) MergePolygonsRecursive() {
group.MergePolygons()
for idx := range group.Groups {
group.Groups[idx].MergePolygonsRecursive()
}
}
func (svg *SvgFile) process() {
for idx := range svg.Groups {
g := &svg.Groups[idx]
g.process()
}
}
func (svg *SvgFile) MergePolygonsByGroup() {
for idx := range svg.Groups {
svg.Groups[idx].MergePolygonsRecursive()
}
}
func (svg *SvgFile) Paths() []svgPath {
paths := make([]svgPath, 0)
for _, g := range svg.Groups {
paths = append(paths, g.GetPaths()...)
}
return paths
}
func OpenSVG(r io.Reader) (*SvgFile, error) {
svg := &SvgFile{}
decoder := xml.NewDecoder(r)
if err := decoder.Decode(&svg); err != nil {
return nil, err
}
svg.process()
return svg, nil
}
| {
wall := jsonWedPolygon{Mode: poly.Mode}
wall.Verts = make([]image.Point, len(poly.Points))
for idx, pt := range poly.Points {
wall.Verts[idx].X = int(pt.X)
wall.Verts[idx].Y = int(pt.Y)
}
return wall
} | identifier_body |
svg.go | // +build darwin
package bg
import (
"encoding/xml"
"fmt"
"image"
"io"
"log"
"regexp"
"strconv"
"strings"
"text/scanner"
//polyclip "github.com/akavel/polyclip-go"
"github.com/paulsmith/gogeos/geos"
)
type svgVert struct {
X, Y float64
}
type svgPolyline struct {
RawPoints string `xml:"points,attr"`
Points []svgVert
}
type svgPolygon struct {
Points []svgVert
Mode int
PointsData string `xml:"points,attr"`
}
type svgPath struct {
Name string `xml:"id,attr"`
D string `xml:"d,attr"`
Description string `xml:"desc"`
Polygons []svgPolygon
}
type SvgGroup struct {
Name string `xml:"id,attr"`
Groups []SvgGroup `xml:"g"`
Paths []svgPath `xml:"path"`
Polygons []svgPolygon `xml:"polygon"`
Transform string `xml:"transform,attr"`
translate svgVert
}
type SvgFile struct {
XMLName xml.Name `xml:"svg"`
Width int `xml:"width,attr"`
Height int `xml:"height,attr"`
Groups []SvgGroup `xml:"g"`
}
func (path *svgPath) mode() int {
var settingRegexp = regexp.MustCompile(`([a-zA-Z]+)\:([ 0-9]+)`)
if path.Description != "" {
matches := settingRegexp.FindStringSubmatch(path.Description)
if matches != nil {
switch matches[1] {
case "mode":
val, _ := strconv.Atoi(strings.TrimSpace(matches[2]))
return val
}
}
}
return 0
}
func (poly *svgPolygon) JsonPoly() jsonWedPolygon {
wall := jsonWedPolygon{Mode: poly.Mode}
wall.Verts = make([]image.Point, len(poly.Points))
for idx, pt := range poly.Points {
wall.Verts[idx].X = int(pt.X)
wall.Verts[idx].Y = int(pt.Y)
}
return wall
}
func (poly *svgPolygon) generatePoints() {
vals := strings.FieldsFunc(poly.PointsData, func(r rune) bool {
return (r == ' ' || r == '\t' || r == ',')
})
poly.Points = make([]svgVert, len(vals)/2)
for idx, _ := range poly.Points {
x, _ := strconv.ParseFloat(vals[idx*2], 32)
y, _ := strconv.ParseFloat(vals[idx*2+1], 32)
poly.Points[idx].X = x
poly.Points[idx].Y = y
}
}
type svgPathScanner struct {
Path string
Polygons []svgPolygon
CurrentPolygon *svgPolygon
Cursor svgVert
Mode int
S scanner.Scanner
}
func NewPathScanner(path string, mode int) svgPathScanner {
log.Printf("Scanner path: %s\n", path)
sps := svgPathScanner{Path: path, Mode: mode}
return sps
}
func (sps *svgPathScanner) scanTwoInts() (int, int) {
X := sps.scanOneInt()
sps.scanWhitespace()
Y := sps.scanOneInt()
log.Printf("X: %d Y: %d\n", X, Y)
return X, Y
}
func (sps *svgPathScanner) scanWhitespace() {
for r := sps.S.Peek(); r == ' ' || r == ','; r = sps.S.Peek() {
r = sps.S.Next()
}
}
func (sps *svgPathScanner) scanOneInt() int {
r := sps.S.Scan()
sign := 1
if r == '-' {
sps.S.Scan()
sign = -1
}
X, _ := strconv.ParseFloat(sps.S.TokenText(), 32)
return int(X) * sign
}
func (sps svgPathScanner) GeneratePolygons() ([]svgPolygon, error) {
sps.S.Init(strings.NewReader(sps.Path))
sps.S.Mode = scanner.ScanFloats | scanner.ScanChars
tok := sps.S.Scan()
lastTokenText := ""
for tok != scanner.EOF {
tokenText := sps.S.TokenText()
log.Printf("TT: %s LTT:%s\n", tokenText, lastTokenText)
if !sps.handleToken(tokenText) {
log.Printf("Retry\n")
sps.handleToken(lastTokenText)
}
lastTokenText = tokenText
tok = sps.S.Scan()
}
return sps.Polygons, nil
}
func (sps *svgPathScanner) handleToken(cmd string) bool {
log.Printf("Cmd: %s\n", cmd)
switch cmd {
case "M":
poly := svgPolygon{Mode: sps.Mode}
sps.CurrentPolygon = &poly
x, y := sps.scanTwoInts()
sps.Cursor.X, sps.Cursor.Y = float64(x), float64(y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "m":
poly := svgPolygon{Mode: sps.Mode}
sps.CurrentPolygon = &poly
X, Y := sps.scanTwoInts()
sps.Cursor.X += float64(X)
sps.Cursor.Y += float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "L":
X, Y := sps.scanTwoInts()
sps.Cursor.X, sps.Cursor.Y = float64(X), float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "l":
X, Y := sps.scanTwoInts()
sps.Cursor.X += float64(X)
sps.Cursor.Y += float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "H":
sps.Cursor.X = float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "h":
sps.Cursor.X += float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "V":
sps.Cursor.Y = float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "v":
sps.Cursor.Y += float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "z":
sps.Polygons = append(sps.Polygons, *sps.CurrentPolygon)
sps.CurrentPolygon = nil
case "Z":
sps.Polygons = append(sps.Polygons, *sps.CurrentPolygon)
sps.CurrentPolygon = nil
default:
return false
}
return true
}
func (path *svgPath) generatePath() {
polys, err := NewPathScanner(path.D, path.mode()).GeneratePolygons()
if err != nil {
log.Printf("Error generating polygons: %v", err)
}
log.Printf("Polys: %+v\n", polys)
path.Polygons = polys
}
func (group *SvgGroup) updateTransform() {
var transformRegex = regexp.MustCompile(`([a-zA-Z]+)\((.*)\)`)
if group.Transform != "" {
matches := transformRegex.FindStringSubmatch(group.Transform)
if matches == nil {
log.Printf("Unknown transform: %s\n", group.Transform)
} else if matches[1] == "translate" {
coords := strings.Split(matches[2], ",")
group.translate.X, _ = strconv.ParseFloat(coords[0], 64)
group.translate.Y, _ = strconv.ParseFloat(coords[1], 64)
} else {
log.Printf("Unknown transform: [%s] in %s\n", matches[1], group.Transform)
}
}
}
func (group *SvgGroup) updatePaths() {
for idx := range group.Paths {
group.Paths[idx].generatePath()
group.Paths[idx].Name = group.Name
}
}
func (group *SvgGroup) updatePolygons() {
for idx := range group.Polygons {
group.Polygons[idx].generatePoints()
}
}
func (group *SvgGroup) process() {
group.updateTransform()
group.updatePaths()
group.updatePolygons()
for idx := range group.Groups {
g := &group.Groups[idx]
g.process()
}
}
func (group *SvgGroup) GetPaths() []svgPath {
paths := make([]svgPath, 0)
if strings.HasPrefix(group.Name, "door_open_") || strings.HasPrefix(group.Name, "door_closed_") || strings.HasPrefix(group.Name, "walls") {
for _, p := range group.Paths {
paths = append(paths, p)
}
}
for _, g := range group.Groups {
paths = append(paths, g.GetPaths()...)
}
for idx, _ := range paths {
p := &paths[idx]
for pIdx, _ := range p.Polygons {
poly := &p.Polygons[pIdx]
for vIdx, _ := range poly.Points {
v := &poly.Points[vIdx]
v.X += group.translate.X
v.Y += group.translate.Y
}
}
}
return paths
}
func geosPolygonToPolygon(poly *geos.Geometry) svgPolygon {
shell, err := poly.Shell()
if err != nil {
log.Fatal(fmt.Errorf("Shell creation error: %+v", err))
}
mergedPoly := svgPolygon{}
coords, err := shell.Coords()
if err != nil {
log.Fatal(fmt.Errorf("Coords error: %+v", err))
}
for _, pt := range coords {
mergedPoly.Points = append(mergedPoly.Points, svgVert{pt.X, pt.Y})
}
return mergedPoly
}
func (group *SvgGroup) | () {
if len(group.Polygons) > 0 {
var poly *geos.Geometry
for _, p := range group.Polygons {
if len(p.Points) > 2 {
verts := make([]geos.Coord, 0)
for _, v := range p.Points {
verts = append(verts, geos.NewCoord(v.X, v.Y))
}
verts = append(verts, geos.NewCoord(p.Points[0].X, p.Points[0].Y))
if poly == nil {
newPoly, err := geos.NewPolygon(verts)
if err != nil {
log.Fatal(fmt.Errorf("New poly creation error: %+v", err))
}
poly = newPoly
} else {
uPoly, _ := geos.NewPolygon(verts)
uPolyType, _ := uPoly.Type()
if uPolyType == geos.POLYGON {
union, err := poly.Union(uPoly)
if err != nil {
log.Printf("Skipping poly: Poly union error: %+v %+v", err, uPoly)
} else {
poly = union
}
} else {
log.Printf("Not a poly: %d %+v", uPolyType, uPoly)
}
}
}
}
polyType, err := poly.Type()
if err != nil {
log.Fatal(fmt.Errorf("Poly type error: %+v", err))
}
if polyType == geos.POLYGON {
group.Polygons = make([]svgPolygon, 1)
group.Polygons[0] = geosPolygonToPolygon(poly)
} else if polyType == geos.MULTIPOLYGON || polyType == geos.GEOMETRYCOLLECTION {
geomCount, err := poly.NGeometry()
if err != nil {
log.Fatal(fmt.Errorf("Error getting geometry count", err))
}
log.Printf("GC: %d PLEN: %d\n", geomCount, len(group.Polygons))
group.Polygons = make([]svgPolygon, 0)
for i := 0; i < geomCount; i++ {
geom, err := poly.Geometry(i)
if err != nil {
log.Fatal(fmt.Errorf("Error getting geometry: %d %+v", i, err))
}
geomType, _ := geom.Type()
if geomType == geos.POLYGON {
group.Polygons = append(group.Polygons, geosPolygonToPolygon(geom))
}
}
} else {
log.Printf("DUMP: %+v", poly)
log.Printf("Polytype: %d\n", polyType)
}
}
}
/*
func (group *SvgGroup) MergePolygons() {
if len(group.Polygons) > 0 {
poly := polyclip.Polygon{}
for _, cont := range group.Polygons {
c := polyclip.Contour{}
for _, v := range cont.Points {
c.Add(polyclip.Point{v.X * 100, v.Y * 100})
}
poly.Add(c)
}
union := poly
unionPoints := 0
for cidx, cont := range group.Polygons {
c := polyclip.Contour{}
for _, v := range cont.Points {
c.Add(polyclip.Point{v.X * 100, v.Y * 100})
}
u2 := union.Construct(polyclip.UNION, polyclip.Polygon{c})
bb := c.BoundingBox()
newPoints := 0
for _, c := range u2 {
newPoints += len(c)
}
if newPoints >= unionPoints {
union = u2
unionPoints = newPoints
} else {
log.Printf("Contour: %d %+v %d Area: %0.3f %d %d", cidx, c.BoundingBox(), len(c), (bb.Max.X-bb.Min.X) * (bb.Max.Y - bb.Min.Y), newPoints, unionPoints)
log.Printf("Clockwise: %v", polyClockwise(c[0].X, c[0].Y, c[1].X, c[1].Y, c[len(c)-1].X, c[len(c)-1].Y))
log.Printf("Union: %+v", union)
log.Printf("Union2: %+v", u2)
}
}
group.Polygons = make([]svgPolygon, len(union))
for _, p := range union {
mergedPoly := svgPolygon{}
for _, pt := range p {
mergedPoly.Points = append(mergedPoly.Points, svgVert{pt.X, pt.Y})
}
group.Polygons = append(group.Polygons, mergedPoly)
}
}
}
*/
func (group *SvgGroup) MergePolygonsRecursive() {
group.MergePolygons()
for idx := range group.Groups {
group.Groups[idx].MergePolygonsRecursive()
}
}
func (svg *SvgFile) process() {
for idx := range svg.Groups {
g := &svg.Groups[idx]
g.process()
}
}
func (svg *SvgFile) MergePolygonsByGroup() {
for idx := range svg.Groups {
svg.Groups[idx].MergePolygonsRecursive()
}
}
func (svg *SvgFile) Paths() []svgPath {
paths := make([]svgPath, 0)
for _, g := range svg.Groups {
paths = append(paths, g.GetPaths()...)
}
return paths
}
func OpenSVG(r io.Reader) (*SvgFile, error) {
svg := &SvgFile{}
decoder := xml.NewDecoder(r)
if err := decoder.Decode(&svg); err != nil {
return nil, err
}
svg.process()
return svg, nil
}
| MergePolygons | identifier_name |
svg.go | // +build darwin
package bg
import (
"encoding/xml"
"fmt"
"image"
"io"
"log"
"regexp"
"strconv"
"strings"
"text/scanner"
//polyclip "github.com/akavel/polyclip-go"
"github.com/paulsmith/gogeos/geos"
)
type svgVert struct {
X, Y float64
}
type svgPolyline struct {
RawPoints string `xml:"points,attr"`
Points []svgVert
}
type svgPolygon struct {
Points []svgVert
Mode int
PointsData string `xml:"points,attr"`
}
type svgPath struct {
Name string `xml:"id,attr"`
D string `xml:"d,attr"`
Description string `xml:"desc"`
Polygons []svgPolygon
}
type SvgGroup struct {
Name string `xml:"id,attr"`
Groups []SvgGroup `xml:"g"`
Paths []svgPath `xml:"path"`
Polygons []svgPolygon `xml:"polygon"`
Transform string `xml:"transform,attr"`
translate svgVert
}
type SvgFile struct {
XMLName xml.Name `xml:"svg"`
Width int `xml:"width,attr"`
Height int `xml:"height,attr"`
Groups []SvgGroup `xml:"g"`
}
func (path *svgPath) mode() int {
var settingRegexp = regexp.MustCompile(`([a-zA-Z]+)\:([ 0-9]+)`)
if path.Description != "" {
matches := settingRegexp.FindStringSubmatch(path.Description)
if matches != nil {
switch matches[1] {
case "mode":
val, _ := strconv.Atoi(strings.TrimSpace(matches[2]))
return val
}
}
}
return 0
}
func (poly *svgPolygon) JsonPoly() jsonWedPolygon {
wall := jsonWedPolygon{Mode: poly.Mode}
wall.Verts = make([]image.Point, len(poly.Points))
for idx, pt := range poly.Points {
wall.Verts[idx].X = int(pt.X)
wall.Verts[idx].Y = int(pt.Y)
}
return wall
}
func (poly *svgPolygon) generatePoints() {
vals := strings.FieldsFunc(poly.PointsData, func(r rune) bool {
return (r == ' ' || r == '\t' || r == ',')
})
poly.Points = make([]svgVert, len(vals)/2)
for idx, _ := range poly.Points {
x, _ := strconv.ParseFloat(vals[idx*2], 32)
y, _ := strconv.ParseFloat(vals[idx*2+1], 32)
poly.Points[idx].X = x
poly.Points[idx].Y = y
}
}
type svgPathScanner struct {
Path string
Polygons []svgPolygon
CurrentPolygon *svgPolygon
Cursor svgVert
Mode int
S scanner.Scanner
}
func NewPathScanner(path string, mode int) svgPathScanner {
log.Printf("Scanner path: %s\n", path)
sps := svgPathScanner{Path: path, Mode: mode}
return sps
}
func (sps *svgPathScanner) scanTwoInts() (int, int) {
X := sps.scanOneInt()
sps.scanWhitespace()
Y := sps.scanOneInt()
log.Printf("X: %d Y: %d\n", X, Y)
return X, Y
}
func (sps *svgPathScanner) scanWhitespace() {
for r := sps.S.Peek(); r == ' ' || r == ','; r = sps.S.Peek() {
r = sps.S.Next()
}
}
func (sps *svgPathScanner) scanOneInt() int {
r := sps.S.Scan()
sign := 1
if r == '-' {
sps.S.Scan()
sign = -1
}
X, _ := strconv.ParseFloat(sps.S.TokenText(), 32)
return int(X) * sign
}
func (sps svgPathScanner) GeneratePolygons() ([]svgPolygon, error) {
sps.S.Init(strings.NewReader(sps.Path))
sps.S.Mode = scanner.ScanFloats | scanner.ScanChars
tok := sps.S.Scan()
lastTokenText := ""
for tok != scanner.EOF {
tokenText := sps.S.TokenText()
log.Printf("TT: %s LTT:%s\n", tokenText, lastTokenText)
if !sps.handleToken(tokenText) {
log.Printf("Retry\n")
sps.handleToken(lastTokenText)
}
lastTokenText = tokenText
tok = sps.S.Scan()
}
return sps.Polygons, nil
}
func (sps *svgPathScanner) handleToken(cmd string) bool {
log.Printf("Cmd: %s\n", cmd)
switch cmd {
case "M":
poly := svgPolygon{Mode: sps.Mode}
sps.CurrentPolygon = &poly
x, y := sps.scanTwoInts()
sps.Cursor.X, sps.Cursor.Y = float64(x), float64(y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "m":
poly := svgPolygon{Mode: sps.Mode}
sps.CurrentPolygon = &poly
X, Y := sps.scanTwoInts()
sps.Cursor.X += float64(X)
sps.Cursor.Y += float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "L":
X, Y := sps.scanTwoInts()
sps.Cursor.X, sps.Cursor.Y = float64(X), float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "l":
X, Y := sps.scanTwoInts()
sps.Cursor.X += float64(X)
sps.Cursor.Y += float64(Y)
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "H":
sps.Cursor.X = float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "h":
sps.Cursor.X += float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "V":
sps.Cursor.Y = float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "v":
sps.Cursor.Y += float64(sps.scanOneInt())
sps.CurrentPolygon.Points = append(sps.CurrentPolygon.Points, sps.Cursor)
case "z":
sps.Polygons = append(sps.Polygons, *sps.CurrentPolygon)
sps.CurrentPolygon = nil
case "Z":
sps.Polygons = append(sps.Polygons, *sps.CurrentPolygon)
sps.CurrentPolygon = nil
default:
return false
}
return true
}
func (path *svgPath) generatePath() {
polys, err := NewPathScanner(path.D, path.mode()).GeneratePolygons()
if err != nil {
log.Printf("Error generating polygons: %v", err)
}
log.Printf("Polys: %+v\n", polys)
path.Polygons = polys
}
func (group *SvgGroup) updateTransform() {
var transformRegex = regexp.MustCompile(`([a-zA-Z]+)\((.*)\)`)
if group.Transform != "" {
matches := transformRegex.FindStringSubmatch(group.Transform)
if matches == nil {
log.Printf("Unknown transform: %s\n", group.Transform)
} else if matches[1] == "translate" {
coords := strings.Split(matches[2], ",")
group.translate.X, _ = strconv.ParseFloat(coords[0], 64)
group.translate.Y, _ = strconv.ParseFloat(coords[1], 64)
} else {
log.Printf("Unknown transform: [%s] in %s\n", matches[1], group.Transform)
}
}
}
func (group *SvgGroup) updatePaths() {
for idx := range group.Paths {
group.Paths[idx].generatePath()
group.Paths[idx].Name = group.Name
}
}
func (group *SvgGroup) updatePolygons() {
for idx := range group.Polygons {
group.Polygons[idx].generatePoints()
}
}
func (group *SvgGroup) process() {
group.updateTransform()
group.updatePaths()
group.updatePolygons()
for idx := range group.Groups {
g := &group.Groups[idx]
g.process()
}
}
func (group *SvgGroup) GetPaths() []svgPath {
paths := make([]svgPath, 0)
if strings.HasPrefix(group.Name, "door_open_") || strings.HasPrefix(group.Name, "door_closed_") || strings.HasPrefix(group.Name, "walls") {
for _, p := range group.Paths {
paths = append(paths, p)
}
}
for _, g := range group.Groups {
paths = append(paths, g.GetPaths()...)
}
for idx, _ := range paths |
return paths
}
func geosPolygonToPolygon(poly *geos.Geometry) svgPolygon {
shell, err := poly.Shell()
if err != nil {
log.Fatal(fmt.Errorf("Shell creation error: %+v", err))
}
mergedPoly := svgPolygon{}
coords, err := shell.Coords()
if err != nil {
log.Fatal(fmt.Errorf("Coords error: %+v", err))
}
for _, pt := range coords {
mergedPoly.Points = append(mergedPoly.Points, svgVert{pt.X, pt.Y})
}
return mergedPoly
}
func (group *SvgGroup) MergePolygons() {
if len(group.Polygons) > 0 {
var poly *geos.Geometry
for _, p := range group.Polygons {
if len(p.Points) > 2 {
verts := make([]geos.Coord, 0)
for _, v := range p.Points {
verts = append(verts, geos.NewCoord(v.X, v.Y))
}
verts = append(verts, geos.NewCoord(p.Points[0].X, p.Points[0].Y))
if poly == nil {
newPoly, err := geos.NewPolygon(verts)
if err != nil {
log.Fatal(fmt.Errorf("New poly creation error: %+v", err))
}
poly = newPoly
} else {
uPoly, _ := geos.NewPolygon(verts)
uPolyType, _ := uPoly.Type()
if uPolyType == geos.POLYGON {
union, err := poly.Union(uPoly)
if err != nil {
log.Printf("Skipping poly: Poly union error: %+v %+v", err, uPoly)
} else {
poly = union
}
} else {
log.Printf("Not a poly: %d %+v", uPolyType, uPoly)
}
}
}
}
polyType, err := poly.Type()
if err != nil {
log.Fatal(fmt.Errorf("Poly type error: %+v", err))
}
if polyType == geos.POLYGON {
group.Polygons = make([]svgPolygon, 1)
group.Polygons[0] = geosPolygonToPolygon(poly)
} else if polyType == geos.MULTIPOLYGON || polyType == geos.GEOMETRYCOLLECTION {
geomCount, err := poly.NGeometry()
if err != nil {
log.Fatal(fmt.Errorf("Error getting geometry count", err))
}
log.Printf("GC: %d PLEN: %d\n", geomCount, len(group.Polygons))
group.Polygons = make([]svgPolygon, 0)
for i := 0; i < geomCount; i++ {
geom, err := poly.Geometry(i)
if err != nil {
log.Fatal(fmt.Errorf("Error getting geometry: %d %+v", i, err))
}
geomType, _ := geom.Type()
if geomType == geos.POLYGON {
group.Polygons = append(group.Polygons, geosPolygonToPolygon(geom))
}
}
} else {
log.Printf("DUMP: %+v", poly)
log.Printf("Polytype: %d\n", polyType)
}
}
}
/*
func (group *SvgGroup) MergePolygons() {
if len(group.Polygons) > 0 {
poly := polyclip.Polygon{}
for _, cont := range group.Polygons {
c := polyclip.Contour{}
for _, v := range cont.Points {
c.Add(polyclip.Point{v.X * 100, v.Y * 100})
}
poly.Add(c)
}
union := poly
unionPoints := 0
for cidx, cont := range group.Polygons {
c := polyclip.Contour{}
for _, v := range cont.Points {
c.Add(polyclip.Point{v.X * 100, v.Y * 100})
}
u2 := union.Construct(polyclip.UNION, polyclip.Polygon{c})
bb := c.BoundingBox()
newPoints := 0
for _, c := range u2 {
newPoints += len(c)
}
if newPoints >= unionPoints {
union = u2
unionPoints = newPoints
} else {
log.Printf("Contour: %d %+v %d Area: %0.3f %d %d", cidx, c.BoundingBox(), len(c), (bb.Max.X-bb.Min.X) * (bb.Max.Y - bb.Min.Y), newPoints, unionPoints)
log.Printf("Clockwise: %v", polyClockwise(c[0].X, c[0].Y, c[1].X, c[1].Y, c[len(c)-1].X, c[len(c)-1].Y))
log.Printf("Union: %+v", union)
log.Printf("Union2: %+v", u2)
}
}
group.Polygons = make([]svgPolygon, len(union))
for _, p := range union {
mergedPoly := svgPolygon{}
for _, pt := range p {
mergedPoly.Points = append(mergedPoly.Points, svgVert{pt.X, pt.Y})
}
group.Polygons = append(group.Polygons, mergedPoly)
}
}
}
*/
func (group *SvgGroup) MergePolygonsRecursive() {
group.MergePolygons()
for idx := range group.Groups {
group.Groups[idx].MergePolygonsRecursive()
}
}
func (svg *SvgFile) process() {
for idx := range svg.Groups {
g := &svg.Groups[idx]
g.process()
}
}
func (svg *SvgFile) MergePolygonsByGroup() {
for idx := range svg.Groups {
svg.Groups[idx].MergePolygonsRecursive()
}
}
func (svg *SvgFile) Paths() []svgPath {
paths := make([]svgPath, 0)
for _, g := range svg.Groups {
paths = append(paths, g.GetPaths()...)
}
return paths
}
func OpenSVG(r io.Reader) (*SvgFile, error) {
svg := &SvgFile{}
decoder := xml.NewDecoder(r)
if err := decoder.Decode(&svg); err != nil {
return nil, err
}
svg.process()
return svg, nil
}
| {
p := &paths[idx]
for pIdx, _ := range p.Polygons {
poly := &p.Polygons[pIdx]
for vIdx, _ := range poly.Points {
v := &poly.Points[vIdx]
v.X += group.translate.X
v.Y += group.translate.Y
}
}
} | conditional_block |
db_feeds.go | package database
import (
"database/sql"
"fmt"
"log"
"strings"
"github.com/mmcdole/gofeed"
)
//GetFeedInfo -- Pulls the rss feed from the website and dumps the needed info into the database
func GetFeedInfo(db *sql.DB, feedID int64) (err error) {
var feedData *gofeed.Feed
//Get the URl
url, err := GetFeedURL(db, feedID)
if err != nil {
return
}
rawData, err := GetFeedDataFromSite(url)
if err != nil {
return
}
//Add Raw Data to DB
err = UpdateFeedRawData(db, feedID, rawData)
if err != nil {
return
}
if !strings.EqualFold(rawData, "") {
//Need to convert the data to a gofeed object
feedParser := gofeed.NewParser()
feedData, err = feedParser.Parse(strings.NewReader(rawData))
if err != nil {
return fmt.Errorf("gofeed parser was unable to parse data: %s -- %s", rawData, err.Error())
}
}
//Add Title
if !strings.EqualFold(feedData.Title, "") {
err = UpdateFeedTitle(db, feedID, feedData.Title)
}
//Add author
if feedData.Author != nil {
//If both the name and the email is not blank
if !strings.EqualFold(feedData.Author.Email, "") || !strings.EqualFold(feedData.Author.Name, "") {
var authorID int64
if !AuthorExist(db, feedData.Author.Name, feedData.Author.Email) {
authorID, err = AddAuthor(db, feedData.Author.Name, feedData.Author.Email)
if err != nil {
return err
}
} else {
authorID, err = GetAuthorByNameAndEmail(db, feedData.Author.Name, feedData.Author.Email)
if err != nil {
return err
}
}
//Updating the feed author
err = UpdateFeedAuthor(db, feedID, authorID)
if err != nil {
return err
}
}
}
//Add Episodes
for _, episode := range feedData.Items {
var rssHTML string
if len(episode.Description) > len(episode.Content) {
rssHTML = episode.Description
} else {
rssHTML = episode.Content
}
if EpisodeExist(db, episode.Title) {
//TODO: need to check if this works...
continue
//Continue should skipp to the next loop interations
}
episodeID, err := AddEpisode(db, feedID, episode.Link, episode.Title, episode.PublishedParsed, rssHTML)
if err != nil {
return err
}
//Add media content
media, ok := episode.Extensions["media"]
if ok {
content, ok := media["content"]
if ok {
for i := 0; i < len(content); i++ {
var mediaContent string
url, ok := content[i].Attrs["url"]
if ok {
mediaContent += url
itemType, ok := content[i].Attrs["type"]
if ok {
mediaContent = fmt.Sprintf("%s (type: %s)", mediaContent, itemType)
err = UpdateEpisodeMediaContent(db, episodeID, mediaContent)
if err != nil {
return err
}
}
}
}
}
}
//Add author
if episode.Author != nil {
//If both the name and the email is not blank
if !strings.EqualFold(episode.Author.Email, "") || !strings.EqualFold(episode.Author.Name, "") {
var authorID int64
if !AuthorExist(db, episode.Author.Name, episode.Author.Email) {
authorID, err = AddAuthor(db, episode.Author.Name, episode.Author.Email)
if err != nil {
return err
}
} else {
authorID, err = GetAuthorByNameAndEmail(db, episode.Author.Name, episode.Author.Email)
if err != nil {
return err
}
}
//Updating the episode author
err = UpdateEpisodeAuthor(db, episodeID, authorID)
if err != nil {
return err
}
}
}
}
return
}
//LoadFeed -- Loads a feed from the database
func LoadFeed(db *sql.DB, id int64) (feed *Feed, err error) {
var feedData *gofeed.Feed
url, err := GetFeedURL(db, id)
if err != nil {
log.Fatal(err)
}
title, err := GetFeedTitle(db, id)
if err != nil {
log.Fatal(err)
}
if strings.EqualFold(title, "") {
title = url
}
data, err := GetFeedRawData(db, id)
if err != nil |
if !strings.EqualFold(data, "") {
//Need to convert the data to a gofeed object
feedParser := gofeed.NewParser()
feedData, err = feedParser.Parse(strings.NewReader(data))
if err != nil {
return feed, fmt.Errorf("gofeed parser was unable to parse data: %s -- %s", data, err.Error())
}
}
var tags []string
activeTags := AllActiveFeedTags(db, id)
for _, tag := range activeTags {
tags = append(tags, tag)
}
return &Feed{id, url, title, tags, feedData}, nil
}
//GetFeedAuthor -- returns the feed author
func GetFeedAuthor(db *sql.DB, feedID int64) (name, email string, err error) {
stmt := "SELECT authors.name, authors.email FROM feeds INNER JOIN authors ON authors.id = feeds.author_id WHERE feeds.id = $1"
row := db.QueryRow(stmt, feedID)
err = row.Scan(&name, &email)
return
}
//FeedHasAuthor -- returns true is an author id exists and false otherwise
func FeedHasAuthor(db *sql.DB, feedID int64) (result bool) {
var count int64
row := db.QueryRow("SELECT COUNT(author_id) FROM feeds WHERE id = $1", feedID)
err := row.Scan(&count)
if err != nil {
log.Fatal(err)
}
if count > 0 {
result = true
}
return
}
//GetFeedURL -- returnd the feed's url
func GetFeedURL(db *sql.DB, feedID int64) (url string, err error) {
row := db.QueryRow("SELECT uri FROM feeds WHERE id = $1", feedID)
err = row.Scan(&url)
if err != nil {
return url, fmt.Errorf("Error occured while trying to find the url for feed id (%d): %s", feedID, err.Error())
}
return url, nil
}
//GetFeedAuthorID -- returns the feed's author ID
func GetFeedAuthorID(db *sql.DB, feedID int64) (int64, error) {
var authorID int64
row := db.QueryRow("SELECT author_id FROM feeds WHERE id = $1", feedID)
err := row.Scan(&authorID)
if err != nil {
return authorID, fmt.Errorf("Error occured while trying to find the author_id for feed id (%d): %s", feedID, err.Error())
}
return authorID, nil
}
//UpdateFeedAuthor -- Updates the feed's author
func UpdateFeedAuthor(db *sql.DB, feedID, authorID int64) error {
_, err := db.Exec("UPDATE feeds SET author_id = $1 WHERE id = $2", authorID, feedID)
return err
}
//GetFeedRawData -- returns the feed's raw data
func GetFeedRawData(db *sql.DB, feedID int64) (string, error) {
var rawData string
row := db.QueryRow("SELECT raw_data FROM feeds WHERE id = $1", feedID)
err := row.Scan(&rawData)
if err != nil {
return rawData, fmt.Errorf("Error occured while trying to find the raw_data for feed id (%d): %s", feedID, err.Error())
}
return rawData, nil
}
//UpdateFeedRawData -- Updates the feed's raw data
func UpdateFeedRawData(db *sql.DB, feedID int64, rawData string) error {
_, err := db.Exec("UPDATE feeds SET raw_data = $1 WHERE id = $2", rawData, feedID)
return err
}
//GetFeedTitle -- returns the feed title
func GetFeedTitle(db *sql.DB, feedID int64) (string, error) {
var title string
row := db.QueryRow("SELECT title FROM feeds WHERE id = $1", feedID)
err := row.Scan(&title)
if err != nil {
return title, fmt.Errorf("Error occured while trying to find the feed title for id (%d): %s", feedID, err.Error())
}
return title, nil
}
//UpdateFeedTitle -- Updates the feed title
func UpdateFeedTitle(db *sql.DB, feedID int64, title string) error {
_, err := db.Exec("UPDATE feeds SET title = $1 WHERE id = $2", title, feedID)
return err
}
//GetFeedID -- Given a url or title, it returns the feed id
func GetFeedID(db *sql.DB, item string) (int64, error) {
var id int64
row := db.QueryRow("SELECT id FROM feeds WHERE uri = $1 OR title = $2", item, item)
err := row.Scan(&id)
if err != nil {
return id, fmt.Errorf("Error occured while trying to find the feed id for url/title (%s): %s", item, err.Error())
}
return id, nil
}
//AllActiveFeeds -- Returns all active feeds
func AllActiveFeeds(db *sql.DB) map[int64]string {
var result = make(map[int64]string)
rows, err := db.Query("SELECT id, uri FROM feeds WHERE deleted = 0")
if err != nil {
log.Fatalf("Error happened when trying to get all active feeds: %s", err)
}
defer func() {
if err = rows.Close(); err != nil {
log.Fatalf("Error happened while trying to close a row: %s", err.Error())
}
}()
for rows.Next() {
var id int64
var url string
err := rows.Scan(&id, &url)
if err != nil {
log.Fatalf("Error happened while scanning the rows for the all active feeds function: %s", err.Error())
}
result[id] = url
}
return result
}
//FilterFeeds -- Takes in a list of feeds and compares them with the feeds listed in the Database.
//Returns all the feeds that are listed as active in the database but where not in the list.
func FilterFeeds(db *sql.DB, feeds map[int64]string) map[int64]string {
var result = make(map[int64]string)
allFeeds := AllActiveFeeds(db)
for dbKey, dbValue := range allFeeds {
found := false
for feedKey, feedValue := range feeds {
if dbKey == feedKey && strings.EqualFold(dbValue, feedValue) {
found = true
break
}
}
if !found {
result[dbKey] = dbValue
}
}
return result
}
//DeleteFeed -- Flips the delete flag on for a feed in the database
func DeleteFeed(db *sql.DB, feedID int64) error {
_, err := db.Exec("UPDATE feeds SET deleted = 1 WHERE id = $1", feedID)
return err
}
//UndeleteFeed -- Flips the delete flag off for a feed in the database
func UndeleteFeed(db *sql.DB, feedID int64) error {
_, err := db.Exec("UPDATE feeds SET deleted = 0 WHERE id = $1", feedID)
return err
}
//IsFeedDeleted -- Checks to see if the feed is currently marked as deleted
func IsFeedDeleted(db *sql.DB, feedID int64) bool {
var result bool
var deleted int64
row := db.QueryRow("SELECT deleted FROM feeds WHERE id = $1", feedID)
err := row.Scan(&deleted)
if err != nil {
if err != sql.ErrNoRows {
log.Fatalf("Feed (%d) does not exist: %s", feedID, err.Error())
} else {
log.Fatalf("Error happened while trying check the value of the delete flag for feed (%d): %s", feedID, err.Error())
}
}
if deleted == 1 {
result = true
} else {
result = false
}
return result
}
//FeedURLExist -- Checks to see if a feed exists
func FeedURLExist(db *sql.DB, url string) bool {
var id int64
var result bool
row := db.QueryRow("SELECT id FROM feeds WHERE uri = $1", url)
err := row.Scan(&id)
if err != nil {
if err != sql.ErrNoRows {
log.Fatalf("Error happened when trying to check if the feed (%s) exists: %s", url, err.Error())
}
} else {
result = true
}
return result
}
//AddFeedURL -- Adds a feed url to the database
func AddFeedURL(db *sql.DB, url string) (int64, error) {
var result int64
feedStmt := "INSERT INTO feeds (uri) VALUES ($1)"
if FeedURLExist(db, url) {
return result, fmt.Errorf("Feed already exists")
}
dbResult, err := db.Exec(feedStmt, url)
if err != nil {
log.Fatal(err)
}
result, err = dbResult.LastInsertId()
if err != nil {
log.Fatal(err)
}
return result, nil
}
| {
return feed, fmt.Errorf("No data to retrieve: %s", err.Error())
} | conditional_block |
db_feeds.go | package database
import (
"database/sql"
"fmt"
"log"
"strings"
"github.com/mmcdole/gofeed"
)
//GetFeedInfo -- Pulls the rss feed from the website and dumps the needed info into the database
func GetFeedInfo(db *sql.DB, feedID int64) (err error) {
var feedData *gofeed.Feed
//Get the URl
url, err := GetFeedURL(db, feedID)
if err != nil {
return
}
rawData, err := GetFeedDataFromSite(url)
if err != nil {
return
}
//Add Raw Data to DB
err = UpdateFeedRawData(db, feedID, rawData)
if err != nil {
return
}
if !strings.EqualFold(rawData, "") {
//Need to convert the data to a gofeed object
feedParser := gofeed.NewParser()
feedData, err = feedParser.Parse(strings.NewReader(rawData))
if err != nil {
return fmt.Errorf("gofeed parser was unable to parse data: %s -- %s", rawData, err.Error())
}
}
//Add Title
if !strings.EqualFold(feedData.Title, "") {
err = UpdateFeedTitle(db, feedID, feedData.Title)
}
//Add author
if feedData.Author != nil {
//If both the name and the email is not blank
if !strings.EqualFold(feedData.Author.Email, "") || !strings.EqualFold(feedData.Author.Name, "") {
var authorID int64
if !AuthorExist(db, feedData.Author.Name, feedData.Author.Email) {
authorID, err = AddAuthor(db, feedData.Author.Name, feedData.Author.Email)
if err != nil {
return err
}
} else {
authorID, err = GetAuthorByNameAndEmail(db, feedData.Author.Name, feedData.Author.Email)
if err != nil {
return err
}
}
//Updating the feed author
err = UpdateFeedAuthor(db, feedID, authorID)
if err != nil {
return err
}
}
}
//Add Episodes
for _, episode := range feedData.Items {
var rssHTML string
if len(episode.Description) > len(episode.Content) {
rssHTML = episode.Description
} else {
rssHTML = episode.Content
}
if EpisodeExist(db, episode.Title) {
//TODO: need to check if this works...
continue
//Continue should skipp to the next loop interations
}
episodeID, err := AddEpisode(db, feedID, episode.Link, episode.Title, episode.PublishedParsed, rssHTML)
if err != nil {
return err
}
//Add media content
media, ok := episode.Extensions["media"]
if ok {
content, ok := media["content"]
if ok {
for i := 0; i < len(content); i++ {
var mediaContent string
url, ok := content[i].Attrs["url"]
if ok {
mediaContent += url
itemType, ok := content[i].Attrs["type"]
if ok {
mediaContent = fmt.Sprintf("%s (type: %s)", mediaContent, itemType)
err = UpdateEpisodeMediaContent(db, episodeID, mediaContent)
if err != nil {
return err
}
}
}
}
}
}
//Add author
if episode.Author != nil {
//If both the name and the email is not blank
if !strings.EqualFold(episode.Author.Email, "") || !strings.EqualFold(episode.Author.Name, "") {
var authorID int64
if !AuthorExist(db, episode.Author.Name, episode.Author.Email) {
authorID, err = AddAuthor(db, episode.Author.Name, episode.Author.Email)
if err != nil {
return err
}
} else {
authorID, err = GetAuthorByNameAndEmail(db, episode.Author.Name, episode.Author.Email)
if err != nil {
return err
}
}
//Updating the episode author
err = UpdateEpisodeAuthor(db, episodeID, authorID)
if err != nil {
return err
}
}
}
}
return
}
//LoadFeed -- Loads a feed from the database
func LoadFeed(db *sql.DB, id int64) (feed *Feed, err error) {
var feedData *gofeed.Feed
url, err := GetFeedURL(db, id)
if err != nil {
log.Fatal(err)
}
title, err := GetFeedTitle(db, id)
if err != nil {
log.Fatal(err)
}
if strings.EqualFold(title, "") {
title = url
}
data, err := GetFeedRawData(db, id)
if err != nil {
return feed, fmt.Errorf("No data to retrieve: %s", err.Error())
}
if !strings.EqualFold(data, "") {
//Need to convert the data to a gofeed object
feedParser := gofeed.NewParser()
feedData, err = feedParser.Parse(strings.NewReader(data))
if err != nil {
return feed, fmt.Errorf("gofeed parser was unable to parse data: %s -- %s", data, err.Error())
}
}
var tags []string
activeTags := AllActiveFeedTags(db, id)
for _, tag := range activeTags {
tags = append(tags, tag)
}
return &Feed{id, url, title, tags, feedData}, nil
}
//GetFeedAuthor -- returns the feed author
func GetFeedAuthor(db *sql.DB, feedID int64) (name, email string, err error) {
stmt := "SELECT authors.name, authors.email FROM feeds INNER JOIN authors ON authors.id = feeds.author_id WHERE feeds.id = $1"
row := db.QueryRow(stmt, feedID)
err = row.Scan(&name, &email)
return
}
//FeedHasAuthor -- returns true is an author id exists and false otherwise
func FeedHasAuthor(db *sql.DB, feedID int64) (result bool) {
var count int64
row := db.QueryRow("SELECT COUNT(author_id) FROM feeds WHERE id = $1", feedID)
err := row.Scan(&count)
if err != nil {
log.Fatal(err)
}
if count > 0 {
result = true
}
return
}
//GetFeedURL -- returnd the feed's url
func GetFeedURL(db *sql.DB, feedID int64) (url string, err error) {
row := db.QueryRow("SELECT uri FROM feeds WHERE id = $1", feedID)
err = row.Scan(&url)
if err != nil {
return url, fmt.Errorf("Error occured while trying to find the url for feed id (%d): %s", feedID, err.Error())
}
return url, nil
}
//GetFeedAuthorID -- returns the feed's author ID
func GetFeedAuthorID(db *sql.DB, feedID int64) (int64, error) {
var authorID int64
row := db.QueryRow("SELECT author_id FROM feeds WHERE id = $1", feedID)
err := row.Scan(&authorID)
if err != nil {
return authorID, fmt.Errorf("Error occured while trying to find the author_id for feed id (%d): %s", feedID, err.Error())
}
return authorID, nil
}
//UpdateFeedAuthor -- Updates the feed's author
func UpdateFeedAuthor(db *sql.DB, feedID, authorID int64) error {
_, err := db.Exec("UPDATE feeds SET author_id = $1 WHERE id = $2", authorID, feedID)
return err
}
//GetFeedRawData -- returns the feed's raw data
func GetFeedRawData(db *sql.DB, feedID int64) (string, error) {
var rawData string
row := db.QueryRow("SELECT raw_data FROM feeds WHERE id = $1", feedID)
err := row.Scan(&rawData)
if err != nil {
return rawData, fmt.Errorf("Error occured while trying to find the raw_data for feed id (%d): %s", feedID, err.Error())
}
return rawData, nil
}
//UpdateFeedRawData -- Updates the feed's raw data
func UpdateFeedRawData(db *sql.DB, feedID int64, rawData string) error {
_, err := db.Exec("UPDATE feeds SET raw_data = $1 WHERE id = $2", rawData, feedID)
return err
}
//GetFeedTitle -- returns the feed title
func GetFeedTitle(db *sql.DB, feedID int64) (string, error) {
var title string
row := db.QueryRow("SELECT title FROM feeds WHERE id = $1", feedID)
err := row.Scan(&title)
if err != nil {
return title, fmt.Errorf("Error occured while trying to find the feed title for id (%d): %s", feedID, err.Error())
}
return title, nil
}
//UpdateFeedTitle -- Updates the feed title
func UpdateFeedTitle(db *sql.DB, feedID int64, title string) error {
_, err := db.Exec("UPDATE feeds SET title = $1 WHERE id = $2", title, feedID)
return err
}
//GetFeedID -- Given a url or title, it returns the feed id
func GetFeedID(db *sql.DB, item string) (int64, error) {
var id int64
row := db.QueryRow("SELECT id FROM feeds WHERE uri = $1 OR title = $2", item, item)
err := row.Scan(&id)
if err != nil {
return id, fmt.Errorf("Error occured while trying to find the feed id for url/title (%s): %s", item, err.Error())
}
return id, nil
}
//AllActiveFeeds -- Returns all active feeds
func AllActiveFeeds(db *sql.DB) map[int64]string {
var result = make(map[int64]string)
rows, err := db.Query("SELECT id, uri FROM feeds WHERE deleted = 0")
if err != nil {
log.Fatalf("Error happened when trying to get all active feeds: %s", err)
}
defer func() {
if err = rows.Close(); err != nil {
log.Fatalf("Error happened while trying to close a row: %s", err.Error())
}
}()
for rows.Next() {
var id int64
var url string
err := rows.Scan(&id, &url)
if err != nil {
log.Fatalf("Error happened while scanning the rows for the all active feeds function: %s", err.Error())
}
result[id] = url
}
return result
}
//FilterFeeds -- Takes in a list of feeds and compares them with the feeds listed in the Database.
//Returns all the feeds that are listed as active in the database but where not in the list.
func FilterFeeds(db *sql.DB, feeds map[int64]string) map[int64]string {
var result = make(map[int64]string)
allFeeds := AllActiveFeeds(db)
for dbKey, dbValue := range allFeeds {
found := false
for feedKey, feedValue := range feeds {
if dbKey == feedKey && strings.EqualFold(dbValue, feedValue) {
found = true
break
}
}
if !found {
result[dbKey] = dbValue
}
}
return result
}
//DeleteFeed -- Flips the delete flag on for a feed in the database
func DeleteFeed(db *sql.DB, feedID int64) error {
_, err := db.Exec("UPDATE feeds SET deleted = 1 WHERE id = $1", feedID)
return err
}
//UndeleteFeed -- Flips the delete flag off for a feed in the database
func UndeleteFeed(db *sql.DB, feedID int64) error {
_, err := db.Exec("UPDATE feeds SET deleted = 0 WHERE id = $1", feedID)
return err
}
//IsFeedDeleted -- Checks to see if the feed is currently marked as deleted
func IsFeedDeleted(db *sql.DB, feedID int64) bool {
var result bool
var deleted int64
row := db.QueryRow("SELECT deleted FROM feeds WHERE id = $1", feedID)
err := row.Scan(&deleted)
if err != nil {
if err != sql.ErrNoRows {
log.Fatalf("Feed (%d) does not exist: %s", feedID, err.Error())
} else {
log.Fatalf("Error happened while trying check the value of the delete flag for feed (%d): %s", feedID, err.Error())
}
}
if deleted == 1 {
result = true
} else {
result = false
}
return result
}
//FeedURLExist -- Checks to see if a feed exists
func | (db *sql.DB, url string) bool {
var id int64
var result bool
row := db.QueryRow("SELECT id FROM feeds WHERE uri = $1", url)
err := row.Scan(&id)
if err != nil {
if err != sql.ErrNoRows {
log.Fatalf("Error happened when trying to check if the feed (%s) exists: %s", url, err.Error())
}
} else {
result = true
}
return result
}
//AddFeedURL -- Adds a feed url to the database
func AddFeedURL(db *sql.DB, url string) (int64, error) {
var result int64
feedStmt := "INSERT INTO feeds (uri) VALUES ($1)"
if FeedURLExist(db, url) {
return result, fmt.Errorf("Feed already exists")
}
dbResult, err := db.Exec(feedStmt, url)
if err != nil {
log.Fatal(err)
}
result, err = dbResult.LastInsertId()
if err != nil {
log.Fatal(err)
}
return result, nil
}
| FeedURLExist | identifier_name |
db_feeds.go | package database
import (
"database/sql"
"fmt"
"log"
"strings"
"github.com/mmcdole/gofeed"
)
//GetFeedInfo -- Pulls the rss feed from the website and dumps the needed info into the database
func GetFeedInfo(db *sql.DB, feedID int64) (err error) {
var feedData *gofeed.Feed
//Get the URl
url, err := GetFeedURL(db, feedID)
if err != nil {
return
}
rawData, err := GetFeedDataFromSite(url)
if err != nil {
return
}
//Add Raw Data to DB
err = UpdateFeedRawData(db, feedID, rawData)
if err != nil {
return
}
if !strings.EqualFold(rawData, "") {
//Need to convert the data to a gofeed object
feedParser := gofeed.NewParser()
feedData, err = feedParser.Parse(strings.NewReader(rawData))
if err != nil {
return fmt.Errorf("gofeed parser was unable to parse data: %s -- %s", rawData, err.Error())
}
}
//Add Title
if !strings.EqualFold(feedData.Title, "") {
err = UpdateFeedTitle(db, feedID, feedData.Title)
}
//Add author
if feedData.Author != nil {
//If both the name and the email is not blank
if !strings.EqualFold(feedData.Author.Email, "") || !strings.EqualFold(feedData.Author.Name, "") {
var authorID int64
if !AuthorExist(db, feedData.Author.Name, feedData.Author.Email) {
authorID, err = AddAuthor(db, feedData.Author.Name, feedData.Author.Email)
if err != nil {
return err
}
} else {
authorID, err = GetAuthorByNameAndEmail(db, feedData.Author.Name, feedData.Author.Email)
if err != nil {
return err
}
}
//Updating the feed author
err = UpdateFeedAuthor(db, feedID, authorID)
if err != nil {
return err
}
}
}
//Add Episodes
for _, episode := range feedData.Items {
var rssHTML string
if len(episode.Description) > len(episode.Content) {
rssHTML = episode.Description
} else {
rssHTML = episode.Content
}
if EpisodeExist(db, episode.Title) {
//TODO: need to check if this works...
continue
//Continue should skipp to the next loop interations
}
episodeID, err := AddEpisode(db, feedID, episode.Link, episode.Title, episode.PublishedParsed, rssHTML)
if err != nil {
return err
}
//Add media content
media, ok := episode.Extensions["media"]
if ok {
content, ok := media["content"]
if ok {
for i := 0; i < len(content); i++ {
var mediaContent string
url, ok := content[i].Attrs["url"]
if ok {
mediaContent += url
itemType, ok := content[i].Attrs["type"]
if ok {
mediaContent = fmt.Sprintf("%s (type: %s)", mediaContent, itemType)
err = UpdateEpisodeMediaContent(db, episodeID, mediaContent)
if err != nil {
return err
}
}
}
}
}
}
//Add author
if episode.Author != nil {
//If both the name and the email is not blank
if !strings.EqualFold(episode.Author.Email, "") || !strings.EqualFold(episode.Author.Name, "") {
var authorID int64
if !AuthorExist(db, episode.Author.Name, episode.Author.Email) {
authorID, err = AddAuthor(db, episode.Author.Name, episode.Author.Email)
if err != nil {
return err
}
} else {
authorID, err = GetAuthorByNameAndEmail(db, episode.Author.Name, episode.Author.Email)
if err != nil {
return err
}
}
//Updating the episode author
err = UpdateEpisodeAuthor(db, episodeID, authorID)
if err != nil {
return err
}
}
}
}
return
}
//LoadFeed -- Loads a feed from the database
func LoadFeed(db *sql.DB, id int64) (feed *Feed, err error) |
//GetFeedAuthor -- returns the feed author
func GetFeedAuthor(db *sql.DB, feedID int64) (name, email string, err error) {
stmt := "SELECT authors.name, authors.email FROM feeds INNER JOIN authors ON authors.id = feeds.author_id WHERE feeds.id = $1"
row := db.QueryRow(stmt, feedID)
err = row.Scan(&name, &email)
return
}
//FeedHasAuthor -- returns true is an author id exists and false otherwise
func FeedHasAuthor(db *sql.DB, feedID int64) (result bool) {
var count int64
row := db.QueryRow("SELECT COUNT(author_id) FROM feeds WHERE id = $1", feedID)
err := row.Scan(&count)
if err != nil {
log.Fatal(err)
}
if count > 0 {
result = true
}
return
}
//GetFeedURL -- returnd the feed's url
func GetFeedURL(db *sql.DB, feedID int64) (url string, err error) {
row := db.QueryRow("SELECT uri FROM feeds WHERE id = $1", feedID)
err = row.Scan(&url)
if err != nil {
return url, fmt.Errorf("Error occured while trying to find the url for feed id (%d): %s", feedID, err.Error())
}
return url, nil
}
//GetFeedAuthorID -- returns the feed's author ID
func GetFeedAuthorID(db *sql.DB, feedID int64) (int64, error) {
var authorID int64
row := db.QueryRow("SELECT author_id FROM feeds WHERE id = $1", feedID)
err := row.Scan(&authorID)
if err != nil {
return authorID, fmt.Errorf("Error occured while trying to find the author_id for feed id (%d): %s", feedID, err.Error())
}
return authorID, nil
}
//UpdateFeedAuthor -- Updates the feed's author
func UpdateFeedAuthor(db *sql.DB, feedID, authorID int64) error {
_, err := db.Exec("UPDATE feeds SET author_id = $1 WHERE id = $2", authorID, feedID)
return err
}
//GetFeedRawData -- returns the feed's raw data
func GetFeedRawData(db *sql.DB, feedID int64) (string, error) {
var rawData string
row := db.QueryRow("SELECT raw_data FROM feeds WHERE id = $1", feedID)
err := row.Scan(&rawData)
if err != nil {
return rawData, fmt.Errorf("Error occured while trying to find the raw_data for feed id (%d): %s", feedID, err.Error())
}
return rawData, nil
}
//UpdateFeedRawData -- Updates the feed's raw data
func UpdateFeedRawData(db *sql.DB, feedID int64, rawData string) error {
_, err := db.Exec("UPDATE feeds SET raw_data = $1 WHERE id = $2", rawData, feedID)
return err
}
//GetFeedTitle -- returns the feed title
func GetFeedTitle(db *sql.DB, feedID int64) (string, error) {
var title string
row := db.QueryRow("SELECT title FROM feeds WHERE id = $1", feedID)
err := row.Scan(&title)
if err != nil {
return title, fmt.Errorf("Error occured while trying to find the feed title for id (%d): %s", feedID, err.Error())
}
return title, nil
}
//UpdateFeedTitle -- Updates the feed title
func UpdateFeedTitle(db *sql.DB, feedID int64, title string) error {
_, err := db.Exec("UPDATE feeds SET title = $1 WHERE id = $2", title, feedID)
return err
}
//GetFeedID -- Given a url or title, it returns the feed id
func GetFeedID(db *sql.DB, item string) (int64, error) {
var id int64
row := db.QueryRow("SELECT id FROM feeds WHERE uri = $1 OR title = $2", item, item)
err := row.Scan(&id)
if err != nil {
return id, fmt.Errorf("Error occured while trying to find the feed id for url/title (%s): %s", item, err.Error())
}
return id, nil
}
//AllActiveFeeds -- Returns all active feeds
func AllActiveFeeds(db *sql.DB) map[int64]string {
var result = make(map[int64]string)
rows, err := db.Query("SELECT id, uri FROM feeds WHERE deleted = 0")
if err != nil {
log.Fatalf("Error happened when trying to get all active feeds: %s", err)
}
defer func() {
if err = rows.Close(); err != nil {
log.Fatalf("Error happened while trying to close a row: %s", err.Error())
}
}()
for rows.Next() {
var id int64
var url string
err := rows.Scan(&id, &url)
if err != nil {
log.Fatalf("Error happened while scanning the rows for the all active feeds function: %s", err.Error())
}
result[id] = url
}
return result
}
//FilterFeeds -- Takes in a list of feeds and compares them with the feeds listed in the Database.
//Returns all the feeds that are listed as active in the database but where not in the list.
func FilterFeeds(db *sql.DB, feeds map[int64]string) map[int64]string {
var result = make(map[int64]string)
allFeeds := AllActiveFeeds(db)
for dbKey, dbValue := range allFeeds {
found := false
for feedKey, feedValue := range feeds {
if dbKey == feedKey && strings.EqualFold(dbValue, feedValue) {
found = true
break
}
}
if !found {
result[dbKey] = dbValue
}
}
return result
}
//DeleteFeed -- Flips the delete flag on for a feed in the database
func DeleteFeed(db *sql.DB, feedID int64) error {
_, err := db.Exec("UPDATE feeds SET deleted = 1 WHERE id = $1", feedID)
return err
}
//UndeleteFeed -- Flips the delete flag off for a feed in the database
func UndeleteFeed(db *sql.DB, feedID int64) error {
_, err := db.Exec("UPDATE feeds SET deleted = 0 WHERE id = $1", feedID)
return err
}
//IsFeedDeleted -- Checks to see if the feed is currently marked as deleted
func IsFeedDeleted(db *sql.DB, feedID int64) bool {
var result bool
var deleted int64
row := db.QueryRow("SELECT deleted FROM feeds WHERE id = $1", feedID)
err := row.Scan(&deleted)
if err != nil {
if err != sql.ErrNoRows {
log.Fatalf("Feed (%d) does not exist: %s", feedID, err.Error())
} else {
log.Fatalf("Error happened while trying check the value of the delete flag for feed (%d): %s", feedID, err.Error())
}
}
if deleted == 1 {
result = true
} else {
result = false
}
return result
}
//FeedURLExist -- Checks to see if a feed exists
func FeedURLExist(db *sql.DB, url string) bool {
var id int64
var result bool
row := db.QueryRow("SELECT id FROM feeds WHERE uri = $1", url)
err := row.Scan(&id)
if err != nil {
if err != sql.ErrNoRows {
log.Fatalf("Error happened when trying to check if the feed (%s) exists: %s", url, err.Error())
}
} else {
result = true
}
return result
}
//AddFeedURL -- Adds a feed url to the database
func AddFeedURL(db *sql.DB, url string) (int64, error) {
var result int64
feedStmt := "INSERT INTO feeds (uri) VALUES ($1)"
if FeedURLExist(db, url) {
return result, fmt.Errorf("Feed already exists")
}
dbResult, err := db.Exec(feedStmt, url)
if err != nil {
log.Fatal(err)
}
result, err = dbResult.LastInsertId()
if err != nil {
log.Fatal(err)
}
return result, nil
}
| {
var feedData *gofeed.Feed
url, err := GetFeedURL(db, id)
if err != nil {
log.Fatal(err)
}
title, err := GetFeedTitle(db, id)
if err != nil {
log.Fatal(err)
}
if strings.EqualFold(title, "") {
title = url
}
data, err := GetFeedRawData(db, id)
if err != nil {
return feed, fmt.Errorf("No data to retrieve: %s", err.Error())
}
if !strings.EqualFold(data, "") {
//Need to convert the data to a gofeed object
feedParser := gofeed.NewParser()
feedData, err = feedParser.Parse(strings.NewReader(data))
if err != nil {
return feed, fmt.Errorf("gofeed parser was unable to parse data: %s -- %s", data, err.Error())
}
}
var tags []string
activeTags := AllActiveFeedTags(db, id)
for _, tag := range activeTags {
tags = append(tags, tag)
}
return &Feed{id, url, title, tags, feedData}, nil
} | identifier_body |
db_feeds.go | package database
import (
"database/sql"
"fmt"
"log"
"strings"
"github.com/mmcdole/gofeed"
)
//GetFeedInfo -- Pulls the rss feed from the website and dumps the needed info into the database
func GetFeedInfo(db *sql.DB, feedID int64) (err error) {
var feedData *gofeed.Feed
//Get the URl
url, err := GetFeedURL(db, feedID)
if err != nil {
return
}
rawData, err := GetFeedDataFromSite(url)
if err != nil {
return
}
//Add Raw Data to DB
err = UpdateFeedRawData(db, feedID, rawData)
if err != nil {
return
}
if !strings.EqualFold(rawData, "") {
//Need to convert the data to a gofeed object
feedParser := gofeed.NewParser()
feedData, err = feedParser.Parse(strings.NewReader(rawData))
if err != nil {
return fmt.Errorf("gofeed parser was unable to parse data: %s -- %s", rawData, err.Error())
}
}
//Add Title
if !strings.EqualFold(feedData.Title, "") {
err = UpdateFeedTitle(db, feedID, feedData.Title)
}
//Add author
if feedData.Author != nil {
//If both the name and the email is not blank
if !strings.EqualFold(feedData.Author.Email, "") || !strings.EqualFold(feedData.Author.Name, "") {
var authorID int64
if !AuthorExist(db, feedData.Author.Name, feedData.Author.Email) {
authorID, err = AddAuthor(db, feedData.Author.Name, feedData.Author.Email)
if err != nil {
return err
}
} else {
authorID, err = GetAuthorByNameAndEmail(db, feedData.Author.Name, feedData.Author.Email)
if err != nil {
return err
}
}
//Updating the feed author
err = UpdateFeedAuthor(db, feedID, authorID)
if err != nil {
return err
}
}
}
//Add Episodes
for _, episode := range feedData.Items {
var rssHTML string
if len(episode.Description) > len(episode.Content) {
rssHTML = episode.Description
} else {
rssHTML = episode.Content
}
if EpisodeExist(db, episode.Title) {
//TODO: need to check if this works...
continue
//Continue should skipp to the next loop interations
}
episodeID, err := AddEpisode(db, feedID, episode.Link, episode.Title, episode.PublishedParsed, rssHTML)
if err != nil {
return err
}
//Add media content
media, ok := episode.Extensions["media"]
if ok {
content, ok := media["content"]
if ok {
for i := 0; i < len(content); i++ {
var mediaContent string
url, ok := content[i].Attrs["url"]
if ok {
mediaContent += url
itemType, ok := content[i].Attrs["type"]
if ok {
mediaContent = fmt.Sprintf("%s (type: %s)", mediaContent, itemType)
err = UpdateEpisodeMediaContent(db, episodeID, mediaContent)
if err != nil {
return err
}
}
}
}
}
}
//Add author
if episode.Author != nil {
//If both the name and the email is not blank
if !strings.EqualFold(episode.Author.Email, "") || !strings.EqualFold(episode.Author.Name, "") {
var authorID int64
if !AuthorExist(db, episode.Author.Name, episode.Author.Email) {
authorID, err = AddAuthor(db, episode.Author.Name, episode.Author.Email)
if err != nil {
return err
}
} else {
authorID, err = GetAuthorByNameAndEmail(db, episode.Author.Name, episode.Author.Email)
if err != nil {
return err
}
}
//Updating the episode author
err = UpdateEpisodeAuthor(db, episodeID, authorID)
if err != nil {
return err
}
}
}
}
return
}
//LoadFeed -- Loads a feed from the database
func LoadFeed(db *sql.DB, id int64) (feed *Feed, err error) {
var feedData *gofeed.Feed
url, err := GetFeedURL(db, id)
if err != nil {
log.Fatal(err)
}
title, err := GetFeedTitle(db, id)
if err != nil {
log.Fatal(err)
}
if strings.EqualFold(title, "") {
title = url
}
data, err := GetFeedRawData(db, id)
if err != nil {
return feed, fmt.Errorf("No data to retrieve: %s", err.Error())
}
if !strings.EqualFold(data, "") {
//Need to convert the data to a gofeed object
feedParser := gofeed.NewParser()
feedData, err = feedParser.Parse(strings.NewReader(data))
if err != nil {
return feed, fmt.Errorf("gofeed parser was unable to parse data: %s -- %s", data, err.Error())
}
}
var tags []string
activeTags := AllActiveFeedTags(db, id)
for _, tag := range activeTags {
tags = append(tags, tag)
}
return &Feed{id, url, title, tags, feedData}, nil
}
//GetFeedAuthor -- returns the feed author
func GetFeedAuthor(db *sql.DB, feedID int64) (name, email string, err error) {
stmt := "SELECT authors.name, authors.email FROM feeds INNER JOIN authors ON authors.id = feeds.author_id WHERE feeds.id = $1"
row := db.QueryRow(stmt, feedID)
err = row.Scan(&name, &email)
return
}
//FeedHasAuthor -- returns true is an author id exists and false otherwise
func FeedHasAuthor(db *sql.DB, feedID int64) (result bool) {
var count int64
row := db.QueryRow("SELECT COUNT(author_id) FROM feeds WHERE id = $1", feedID)
err := row.Scan(&count)
if err != nil {
log.Fatal(err)
}
if count > 0 {
result = true
}
return
}
//GetFeedURL -- returnd the feed's url
func GetFeedURL(db *sql.DB, feedID int64) (url string, err error) {
row := db.QueryRow("SELECT uri FROM feeds WHERE id = $1", feedID)
err = row.Scan(&url)
if err != nil {
return url, fmt.Errorf("Error occured while trying to find the url for feed id (%d): %s", feedID, err.Error())
}
return url, nil
}
//GetFeedAuthorID -- returns the feed's author ID
func GetFeedAuthorID(db *sql.DB, feedID int64) (int64, error) {
var authorID int64
row := db.QueryRow("SELECT author_id FROM feeds WHERE id = $1", feedID)
err := row.Scan(&authorID)
if err != nil {
return authorID, fmt.Errorf("Error occured while trying to find the author_id for feed id (%d): %s", feedID, err.Error())
}
return authorID, nil
}
//UpdateFeedAuthor -- Updates the feed's author
func UpdateFeedAuthor(db *sql.DB, feedID, authorID int64) error {
_, err := db.Exec("UPDATE feeds SET author_id = $1 WHERE id = $2", authorID, feedID)
return err
}
//GetFeedRawData -- returns the feed's raw data
func GetFeedRawData(db *sql.DB, feedID int64) (string, error) {
var rawData string
row := db.QueryRow("SELECT raw_data FROM feeds WHERE id = $1", feedID)
err := row.Scan(&rawData)
if err != nil {
return rawData, fmt.Errorf("Error occured while trying to find the raw_data for feed id (%d): %s", feedID, err.Error())
}
return rawData, nil
}
//UpdateFeedRawData -- Updates the feed's raw data
func UpdateFeedRawData(db *sql.DB, feedID int64, rawData string) error {
_, err := db.Exec("UPDATE feeds SET raw_data = $1 WHERE id = $2", rawData, feedID)
return err
}
//GetFeedTitle -- returns the feed title
func GetFeedTitle(db *sql.DB, feedID int64) (string, error) {
var title string
row := db.QueryRow("SELECT title FROM feeds WHERE id = $1", feedID)
err := row.Scan(&title)
if err != nil {
return title, fmt.Errorf("Error occured while trying to find the feed title for id (%d): %s", feedID, err.Error())
}
return title, nil
}
//UpdateFeedTitle -- Updates the feed title
func UpdateFeedTitle(db *sql.DB, feedID int64, title string) error {
_, err := db.Exec("UPDATE feeds SET title = $1 WHERE id = $2", title, feedID)
return err
}
//GetFeedID -- Given a url or title, it returns the feed id
func GetFeedID(db *sql.DB, item string) (int64, error) {
var id int64
row := db.QueryRow("SELECT id FROM feeds WHERE uri = $1 OR title = $2", item, item)
err := row.Scan(&id)
if err != nil {
return id, fmt.Errorf("Error occured while trying to find the feed id for url/title (%s): %s", item, err.Error())
}
return id, nil
}
//AllActiveFeeds -- Returns all active feeds
func AllActiveFeeds(db *sql.DB) map[int64]string {
var result = make(map[int64]string)
rows, err := db.Query("SELECT id, uri FROM feeds WHERE deleted = 0")
if err != nil {
log.Fatalf("Error happened when trying to get all active feeds: %s", err)
}
defer func() {
if err = rows.Close(); err != nil {
log.Fatalf("Error happened while trying to close a row: %s", err.Error())
}
}()
for rows.Next() {
var id int64
var url string
err := rows.Scan(&id, &url)
if err != nil {
log.Fatalf("Error happened while scanning the rows for the all active feeds function: %s", err.Error())
}
result[id] = url
}
return result
}
//FilterFeeds -- Takes in a list of feeds and compares them with the feeds listed in the Database.
//Returns all the feeds that are listed as active in the database but where not in the list.
func FilterFeeds(db *sql.DB, feeds map[int64]string) map[int64]string {
var result = make(map[int64]string)
allFeeds := AllActiveFeeds(db)
for dbKey, dbValue := range allFeeds {
found := false
for feedKey, feedValue := range feeds {
if dbKey == feedKey && strings.EqualFold(dbValue, feedValue) {
found = true
break
}
}
if !found {
result[dbKey] = dbValue
}
}
return result
}
//DeleteFeed -- Flips the delete flag on for a feed in the database
func DeleteFeed(db *sql.DB, feedID int64) error {
_, err := db.Exec("UPDATE feeds SET deleted = 1 WHERE id = $1", feedID)
return err
}
//UndeleteFeed -- Flips the delete flag off for a feed in the database
func UndeleteFeed(db *sql.DB, feedID int64) error {
_, err := db.Exec("UPDATE feeds SET deleted = 0 WHERE id = $1", feedID)
return err
}
//IsFeedDeleted -- Checks to see if the feed is currently marked as deleted
func IsFeedDeleted(db *sql.DB, feedID int64) bool {
var result bool
var deleted int64
row := db.QueryRow("SELECT deleted FROM feeds WHERE id = $1", feedID)
err := row.Scan(&deleted)
if err != nil {
if err != sql.ErrNoRows {
log.Fatalf("Feed (%d) does not exist: %s", feedID, err.Error())
} else {
log.Fatalf("Error happened while trying check the value of the delete flag for feed (%d): %s", feedID, err.Error())
}
}
if deleted == 1 {
result = true
} else {
result = false
}
return result
}
//FeedURLExist -- Checks to see if a feed exists
func FeedURLExist(db *sql.DB, url string) bool {
var id int64
var result bool
row := db.QueryRow("SELECT id FROM feeds WHERE uri = $1", url)
err := row.Scan(&id)
if err != nil {
if err != sql.ErrNoRows {
log.Fatalf("Error happened when trying to check if the feed (%s) exists: %s", url, err.Error())
}
} else {
result = true
}
return result
}
//AddFeedURL -- Adds a feed url to the database
func AddFeedURL(db *sql.DB, url string) (int64, error) {
var result int64
feedStmt := "INSERT INTO feeds (uri) VALUES ($1)"
if FeedURLExist(db, url) {
return result, fmt.Errorf("Feed already exists")
}
dbResult, err := db.Exec(feedStmt, url)
if err != nil {
log.Fatal(err)
}
result, err = dbResult.LastInsertId() | }
return result, nil
} | if err != nil {
log.Fatal(err) | random_line_split |
rtmDiskUsage.js | Ext.define('rtm.src.rtmDiskUsage', {
extend: 'Exem.DockForm',
title : common.Util.TR('Disk Usage'),
layout: 'fit',
width : '100%',
height: '100%',
isClosedDockForm: false,
listeners: {
beforedestroy: function() {
this.isClosedDockForm = true;
this.stopRefreshData();
this.refreshTimer = null;
}
},
initProperty: function() {
this.monitorType = 'WAS';
this.openViewType = Comm.RTComm.getCurrentMonitorType();
this.displayHostList = Comm.hosts.concat();
// 1: WAS, 2: DB, 3: WebServer, 15: C Daemon (APIM)
this.serverType = 1;
this.envKeyUsageLimit = 'rtm_' + this.monitorType.toLocaleLowerCase() + '_diskusage_limit';
this.diskusageLimit = Comm.web_env_info[this.envKeyUsageLimit];
if (this.diskusageLimit) {
this.txnFilterDiskUsage = +this.diskusageLimit;
} else {
this.txnFilterDiskUsage = 0;
}
},
init: function() {
this.initProperty();
this.initLayout();
this.frameRefresh();
this.loadingMask = Ext.create('Exem.LoadingMask', {
target: this
});
},
initLayout: function() {
this.background = Ext.create('Exem.Container', {
width : '100%',
height: '100%',
layout: 'vbox',
border: 1,
cls : 'rtm-topsql-base'
});
this.topContentsArea = Ext.create('Exem.Container', {
width : '100%',
height : 22,
layout : 'hbox',
margin : '5 0 0 0'
});
this.centerArea = Ext.create('Exem.Container', {
width : '100%',
height : '100%',
layout: {
type: 'vbox',
align: 'stretch'
},
flex : 1,
margin : '5 10 10 10'
});
this.frameTitle = Ext.create('Ext.form.Label', {
height : 20,
margin : '0 0 0 10',
cls : 'header-title',
text : this.title
});
this.expendIcon = Ext.create('Ext.container.Container', {
width : 17,
height: 17,
margin: '2 10 0 0',
html : '<div class="trend-chart-icon" title="' + common.Util.TR('Expand View') + '"/>',
listeners: {
scope: this,
render : function(me) {
me.el.on( 'click', function(){
this.dockContainer.toggleExpand(this);
}, this);
}
}
});
this.filterUsageText = Ext.create('Exem.NumberField',{
cls: 'rtm-list-condition',
fieldLabel: common.Util.TR('Disk Usage') + '(%)',
labelWidth : 90,
width: 140,
maxLength: 2,
minValue: 0,
maxValue: 99,
value : this.txnFilterDiskUsage,
margin: '0 10 0 0',
enforceMaxLength: true,
enableKeyEvents: true,
allowBlank: false,
allowDecimals: false,
allowExponential: false,
listeners: {
scope: this,
keydown: this.keyDownEvent,
change: this.changeEvent,
blur: this.blurEvent,
specialkey: this.specialkeyEvent
}
});
this.createTabPanel();
this.createGrid();
this.topContentsArea.add([this.frameTitle, {xtype: 'tbfill'}, this.filterUsageText, this.expendIcon]);
this.centerArea.add(this.tabPanel, this.diskUsageGrid);
this.background.add([this.topContentsArea, this.centerArea]);
this.add(this.background);
// 플로팅 상태에서는 title hide
if (this.floatingLayer) {
this.frameTitle.hide();
this.expendIcon.hide();
}
},
keyDownEvent: function (me, e) {
if (!Ext.isNumeric(me.value)) {
e.stopEvent();
return;
}
},
changeEvent: function(me, newValue, oldValue) {
if (!Ext.isNumeric(me.value)) {
me.setValue(oldValue);
} else {
if (me.value < me.minValue) {
me.setValue(me.minValue);
} else if (me.value > me.maxValue) {
me.setValue(me.maxValue);
}
}
},
blurEvent: function() {
if (+this.txnFilterDiskUsage !== +this.filterUsageText.getValue()) {
this.txnFilterDiskUsage = +this.filterUsageText.getValue();
common.WebEnv.Save(this.envKeyUsageLimit, this.txnFilterDiskUsage);
this.frameRefresh();
}
},
specialkeyEvent: function(me, e) {
if (e.getKey() === e.ENTER && me.oldValue !== me.value) {
if (me.value < 0) {
me.setValue(0);
} else if (me.value > 99) {
me.setValue(99);
}
me.oldValue = me.value;
me.fireEvent('blur', me);
}
},
/**
* 모니터링 서버들의 호스트별로 탭 화면을 구성
*/
createTabPanel: function() {
this.tabPanel = Ext.create('Exem.TabPanel', {
layout: 'fit',
width: '100%',
height: 25,
items: [{
title: common.Util.TR('Total'),
itemId: 'total',
layout: 'fit'
}],
listeners: {
scope: this,
tabchange: function(tabpanel, newcard) {
this.loadingMask.show(null, true);
this.activeTabTitle = newcard.title;
this.frameRefresh();
}
}
});
var hostName;
for (var ix = 0, ixLen = this.displayHostList.length; ix < ixLen ; ix++ ) {
hostName = this.displayHostList[ix];
this.tabPanel.add({
layout: 'fit',
title : hostName,
itemId: hostName
});
}
this.tabPanel.setActiveTab(0);
this.activeTabTitle = this.tabPanel.getActiveTab().title;
},
/**
* Grid 생성
*/
createGrid: function () {
this.diskUsageGrid = Ext.create('Exem.BaseGrid', {
layout : 'fit',
usePager : false,
autoScroll : false,
borderVisible: true,
localeType : 'H:i:s',
columnLines : true,
baseGridCls : 'baseGridRTM',
exportFileName: this.title,
useEmptyText: true,
emptyTextMsg: common.Util.TR('No data to display'),
style: {
'overflow-x': 'hidden'
}
});
this.diskUsageGrid.beginAddColumns();
this.diskUsageGrid.addColumn(common.Util.CTR('Host Name'), 'host_name', 80, Grid.String, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('Mount Name'), 'mount_name', 75, Grid.String, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('File System'), 'file_system', 95, Grid.String, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('Usage(%)'), 'usage', 95, Grid.Float, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('Use Size(MB)'), 'use_size', 95, Grid.Number, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('Total Size(MB)'), 'total_size', 95, Grid.Number, true, false);
this.diskUsageGrid.endAddColumns();
this.diskUsageGrid.addRenderer('usage', this.gridBarRenderer.bind(this), RendererType.bar);
this.diskUsageGrid._columnsList[3].on({
scope: this,
resize: function() {
this.progressFillWidth = $('#'+this.id+' .progress-bar').width();
if (this.progressFillWidth) {
$('#'+this.id+' .progress-fill-text').css('width', this.progressFillWidth);
}
}
});
this.diskUsageGrid._columnsList[1].minWidth = 150;
this.diskUsageGrid._columnsList[1].flex = 1;
// 필터 설정 후 다른 탭으로 전환하고 설정된 필터를 해제하면 변경 전 탭에서 표시된 데이터가
// 보여지는 이슈로 인해 필터 설정 시 그리드를 새로 고침하도록 수정.
this.diskUsageGrid.pnlExGrid.on('filterchange', function() {
this.diskUsageGrid.clearRows();
this.frameRefresh();
}.bind(this));
},
/**
* 그리드에 보여지는 막대 그래프 설정.
* value, metaData, record, rowIndex, colIndex, store, view
*
* @param {} value
* @param {} metaData
* @param {} record
* @param {} rowIndex
* @param {} colIndex
* @param {} store
* @param {} view
* @return {}
*/
gridBarRenderer: function() {
var htmlStr;
var value = arguments[0];
if (value !== 0) {
if (!this.progressFillWidth) {
this.progressFillWidth = 83;
}
htmlStr =
'<div class="progress-bar" style="border: 0px solid #666; height:13px; width: 100%;position:relative; text-align:center;">'+
'<div class="progress-fill" style="width:' + value + '%;">'+
'<div class="progress-fill-text" style="width:'+this.progressFillWidth+'px">'+value+'%</div>'+
'</div>'+ value + '%' +
'</div>';
} else {
htmlStr = '<div data-qtip="" style="text-align:center;">'+'0%'+'</div>';
}
return htmlStr;
},
/**
* 데이터 새로고침을 중지.
*/
stopRefreshData: function() {
if (this.refreshTimer) {
clearTimeout(this.refreshTimer);
}
},
/**
* 데이터 새로 고침.
* 새로고침 간격 (1분)
*/
frameRefresh: function() {
this.stopRefreshData();
var isDisplayCmp = Comm.RTComm.isEnableRtmView(this.openViewType);
if (isDisplayCmp || this.floatingLayer) {
this.selectDiskUsage();
}
this.refreshTimer = setTimeout(this.frameRefresh.bind(this), PlotChart.time.exMin * 1);
},
/**
* 디스크 사용량 조회
*/
selectDiskUsage: function() {
var hostName = this.activeTabTitle;
if (common.Util.TR('Total') === hostName) {
hostName = '';
for (var ix = 0, ixLen = this.displayHostList.length; ix < ixLen ; ix++ ) {
hostName += (ix === 0? '\'' : ',\'') + this.displayHostList[ix] + '\'';
}
} else {
hostName = '\'' + hostName + '\'';
}
if (Ext.isEmpty(hostName) === true) {
console.debug('%c [Disk Usage] [WARNING] ', 'color:#800000;background-color:gold;font-weight:bold;', 'No Selected Host Name');
return;
}
WS.SQLExec({
sql_file: 'IMXRT_DiskUsage.sql',
bind : [{
name: 'server_type', value: this.serverType, type: SQLBindType.INTEGER
}],
replace_string: [{
name: 'host_name', value: hostName
}]
}, function(aheader, adata) {
this.loadingMask.hide();
if (adata === null || adata === undefined) {
console.debug('%c [Disk Usage] [WARNING] ', 'color:#800000;background-color:gold;font-weight:bold;', aheader.message);
}
if (this.isClosedDockForm === true) {
return;
}
this.drawData(adata);
aheader = null;
adata = null;
}, this);
},
/**
* 디스크 사용량 데이터 표시
*
* @param {object} adata
*/
drawData: function(adata) {
this.diskUsageGrid.clearRows();
if (this.diskUsageGrid.pnlExGrid.headerCt === undefined ||
this.diskUsageGrid.pnlExGrid.headerCt === null) {
return;
}
var isDownHost;
for (var ix = 0, ixLen = adata.rows.length; ix < ixLen; ix++) {
if (+this.txnFilterDiskUsage <= +adata.rows[ix][2]) {
isDownHost = Comm.RTComm.isDownByHostName(adata.rows[ix][5]);
if (isDownHost === true) {
continue;
}
this.diskUsageGrid.addRow([
adata.rows[ix][5], // host name
adat | id.drawGrid();
adata = null;
}
});
| a.rows[ix][0], // mount name
adata.rows[ix][1], // file system
adata.rows[ix][2], // ratio
Math.trunc(+adata.rows[ix][3]), // used size
Math.trunc(+adata.rows[ix][4]) // tota size
]);
}
}
if (isDownHost === true) {
this.diskUsageGrid.emptyTextMsg = common.Util.TR('Host Down');
} else {
this.diskUsageGrid.emptyTextMsg = common.Util.TR('No data to display');
}
this.diskUsageGrid.showEmptyText();
this.diskUsageGr | conditional_block |
rtmDiskUsage.js | Ext.define('rtm.src.rtmDiskUsage', {
extend: 'Exem.DockForm',
title : common.Util.TR('Disk Usage'),
layout: 'fit',
width : '100%',
height: '100%',
isClosedDockForm: false,
listeners: {
beforedestroy: function() {
this.isClosedDockForm = true;
this.stopRefreshData();
this.refreshTimer = null;
}
},
initProperty: function() {
this.monitorType = 'WAS';
this.openViewType = Comm.RTComm.getCurrentMonitorType();
this.displayHostList = Comm.hosts.concat();
// 1: WAS, 2: DB, 3: WebServer, 15: C Daemon (APIM)
this.serverType = 1;
this.envKeyUsageLimit = 'rtm_' + this.monitorType.toLocaleLowerCase() + '_diskusage_limit';
this.diskusageLimit = Comm.web_env_info[this.envKeyUsageLimit];
if (this.diskusageLimit) {
this.txnFilterDiskUsage = +this.diskusageLimit;
} else {
this.txnFilterDiskUsage = 0;
}
},
init: function() {
this.initProperty();
this.initLayout();
this.frameRefresh();
this.loadingMask = Ext.create('Exem.LoadingMask', {
target: this
});
},
initLayout: function() {
this.background = Ext.create('Exem.Container', {
width : '100%',
height: '100%',
layout: 'vbox',
border: 1,
cls : 'rtm-topsql-base'
});
this.topContentsArea = Ext.create('Exem.Container', {
width : '100%',
height : 22,
layout : 'hbox',
margin : '5 0 0 0'
});
this.centerArea = Ext.create('Exem.Container', {
width : '100%',
height : '100%',
layout: {
type: 'vbox',
align: 'stretch'
},
flex : 1,
margin : '5 10 10 10'
});
this.frameTitle = Ext.create('Ext.form.Label', {
height : 20,
margin : '0 0 0 10',
cls : 'header-title',
text : this.title
});
this.expendIcon = Ext.create('Ext.container.Container', {
width : 17,
height: 17,
margin: '2 10 0 0',
html : '<div class="trend-chart-icon" title="' + common.Util.TR('Expand View') + '"/>',
listeners: {
scope: this,
render : function(me) {
me.el.on( 'click', function(){
this.dockContainer.toggleExpand(this);
}, this);
}
}
});
this.filterUsageText = Ext.create('Exem.NumberField',{
cls: 'rtm-list-condition',
fieldLabel: common.Util.TR('Disk Usage') + '(%)',
labelWidth : 90,
width: 140,
maxLength: 2,
minValue: 0,
maxValue: 99,
value : this.txnFilterDiskUsage,
margin: '0 10 0 0',
enforceMaxLength: true,
enableKeyEvents: true,
allowBlank: false,
allowDecimals: false,
allowExponential: false,
listeners: {
scope: this,
keydown: this.keyDownEvent,
change: this.changeEvent,
blur: this.blurEvent,
specialkey: this.specialkeyEvent
}
});
this.createTabPanel();
this.createGrid();
this.topContentsArea.add([this.frameTitle, {xtype: 'tbfill'}, this.filterUsageText, this.expendIcon]);
this.centerArea.add(this.tabPanel, this.diskUsageGrid);
this.background.add([this.topContentsArea, this.centerArea]);
this.add(this.background);
// 플로팅 상태에서는 title hide
if (this.floatingLayer) {
this.frameTitle.hide();
this.expendIcon.hide();
}
},
keyDownEvent: function (me, e) {
if (!Ext.isNumeric(me.value)) {
e.stopEvent();
return;
}
},
changeEvent: function(me, newValue, oldValue) {
if (!Ext.isNumeric(me.value)) {
me.setValue(oldValue);
} else {
if (me.value < me.minValue) {
me.setValue(me.minValue);
} else if (me.value > me.maxValue) {
me.setValue(me.maxValue);
}
}
},
blurEvent: function() {
if (+this.txnFilterDiskUsage !== +this.filterUsageText.getValue()) {
this.txnFilterDiskUsage = +this.filterUsageText.getValue();
common.WebEnv.Save(this.envKeyUsageLimit, this.txnFilterDiskUsage);
this.frameRefresh();
}
},
specialkeyEvent: function(me, e) {
if (e.getKey() === e.ENTER && me.oldValue !== me.value) {
if (me.value < 0) {
me.setValue(0);
} else if (me.value > 99) {
me.setValue(99);
}
me.oldValue = me.value;
me.fireEvent('blur', me);
}
},
/**
* 모니터링 서버들의 호스트별로 탭 화면을 구성
*/
createTabPanel: function() {
this.tabPanel = Ext.create('Exem.TabPanel', {
layout: 'fit',
width: '100%',
height: 25,
items: [{
title: common.Util.TR('Total'),
itemId: 'total',
layout: 'fit'
}],
listeners: {
scope: this,
tabchange: function(tabpanel, newcard) {
this.loadingMask.show(null, true);
this.activeTabTitle = newcard.title;
this.frameRefresh();
}
}
});
var hostName;
for (var ix = 0, ixLen = this.displayHostList.length; ix < ixLen ; ix++ ) {
hostName = this.displayHostList[ix];
this.tabPanel.add({
layout: 'fit',
title : hostName,
itemId: hostName
});
}
this.tabPanel.setActiveTab(0);
this.activeTabTitle = this.tabPanel.getActiveTab().title;
},
/**
* Grid 생성
*/
createGrid: function () {
this.diskUsageGrid = Ext.create('Exem.BaseGrid', {
layout : 'fit',
usePager : false,
autoScroll : false,
borderVisible: true,
localeType : 'H:i:s',
columnLines : true,
baseGridCls : 'baseGridRTM',
exportFileName: this.title,
useEmptyText: true,
emptyTextMsg: common.Util.TR('No data to display'),
style: {
'overflow-x': 'hidden'
}
});
this.diskUsageGrid.beginAddColumns();
this.diskUsageGrid.addColumn(common.Util.CTR('Host Name'), 'host_name', 80, Grid.String, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('Mount Name'), 'mount_name', 75, Grid.String, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('File System'), 'file_system', 95, Grid.String, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('Usage(%)'), 'usage', 95, Grid.Float, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('Use Size(MB)'), 'use_size', 95, Grid.Number, true, false);
this.diskUsageGrid.addColumn(common.Util.CTR('Total Size(MB)'), 'total_size', 95, Grid.Number, true, false);
this.diskUsageGrid.endAddColumns();
this.diskUsageGrid.addRenderer('usage', this.gridBarRenderer.bind(this), RendererType.bar);
this.diskUsageGrid._columnsList[3].on({
scope: this,
resize: function() {
this.progressFillWidth = $('#'+this.id+' .progress-bar').width();
if (this.progressFillWidth) {
$('#'+this.id+' .progress-fill-text').css('width', this.progressFillWidth);
}
}
});
this.diskUsageGrid._columnsList[1].minWidth = 150;
this.diskUsageGrid._columnsList[1].flex = 1;
// 필터 설정 후 다른 탭으로 전환하고 설정된 필터를 해제하면 변경 전 탭에서 표시된 데이터가
// 보여지는 이슈로 인해 필터 설정 시 그리드를 새로 고침하도록 수정.
this.diskUsageGrid.pnlExGrid.on('filterchange', function() {
this.diskUsageGrid.clearRows();
this.frameRefresh();
}.bind(this));
},
/**
* 그리드에 보여지는 막대 그래프 설정.
* value, metaData, record, rowIndex, colIndex, store, view
*
* @param {} value
* @param {} metaData
* @param {} record
* @param {} rowIndex
* @param {} colIndex
* @param {} store
* @param {} view
* @return {}
*/
gridBarRenderer: function() {
var htmlStr;
var value = arguments[0];
if (value !== 0) {
if (!this.progressFillWidth) {
this.progressFillWidth = 83;
}
htmlStr =
'<div class="progress-bar" style="border: 0px solid #666; height:13px; width: 100%;position:relative; text-align:center;">'+
'<div class="progress-fill" style="width:' + value + '%;">'+
'<div class="progress-fill-text" style="width:'+this.progressFillWidth+'px">'+value+'%</div>'+
'</div>'+ value + '%' +
'</div>';
} else {
htmlStr = '<div data-qtip="" style="text-align:center;">'+'0%'+'</div>';
}
return htmlStr;
},
/**
* 데이터 새로고침을 중지.
*/
stopRefreshData: function() {
if (this.refreshTimer) {
clearTimeout(this.refreshTimer);
}
},
/**
* 데이터 새로 고침.
* 새로고침 간격 (1분)
*/
frameRefresh: function() {
this.stopRefreshData();
var isDisplayCmp = Comm.RTComm.isEnableRtmView(this.openViewType);
if (isDisplayCmp || this.floatingLayer) {
this.selectDiskUsage();
}
this.refreshTimer = setTimeout(this.frameRefresh.bind(this), PlotChart.time.exMin * 1);
},
/**
* 디스크 사용량 조회
*/
selectDiskUsage: function() {
var hostName = this.activeTabTitle;
if (common.Util.TR('Total') === hostName) {
hostName = '';
for (var ix = 0, ixLen = this.displayHostList.length; ix < ixLen ; ix++ ) {
hostName += (ix === 0? '\'' : ',\'') + this.displayHostList[ix] + '\'';
}
} else {
hostName = '\'' + hostName + '\'';
}
if (Ext.isEmpty(hostName) === true) {
console.debug('%c [Disk Usage] [WARNING] ', 'color:#800000;background-color:gold;font-weight:bold;', 'No Selected Host Name');
return;
}
WS.SQLExec({
sql_file: 'IMXRT_DiskUsage.sql',
bind : [{
name: 'server_type', value: this.serverType, type: SQLBindType.INTEGER
}],
replace_string: [{
name: 'host_name', value: hostName
}]
}, function(aheader, adata) {
this.loadingMask.hide();
if (adata === null || adata === undefined) {
console.debug('%c [Disk Usage] [WARNING] ', 'color:#800000;background-color:gold;font-weight:bold;', aheader.message);
}
if (this.isClosedDockForm === true) {
return;
}
this.drawData(adata);
aheader = null;
adata = null;
}, this);
},
/**
* 디스크 사용량 데이터 표시
*
* @param {object} adata
*/
drawData: function(adata) {
this.diskUsageGrid.clearRows();
if (this.diskUsageGrid.pnlExGrid.headerCt === undefined ||
this.diskUsageGrid.pnlExGrid.headerCt === null) {
return;
}
var isDownHost;
for (var ix = 0, ixLen = adata.rows.length; ix < ixLen; ix++) {
if (+this.txnFilterDiskUsage <= +adata.rows[ix][2]) {
isDownHost = Comm.RTComm.isDownByHostName(adata.rows[ix][5]);
if (isDownHost === true) {
continue;
}
this.diskUsageGrid.addRow([
adata.rows[ix][5], // host name
adata.rows[ix][0], // mount name
adata.rows[ix][1], // file system
adata.rows[ix][2], // ratio
Math.trunc(+adata.rows[ix][3]), // used size
Math.trunc(+adata.rows[ix][4]) // tota size
]);
}
}
if (isDownHost === true) {
this.diskUsageGrid.emptyTextMsg = common.Util.TR('Host Down');
} else {
this.diskUsageGrid.emptyTextMsg = common.Util.TR('No data to display');
}
this.diskUsageGrid.showEmptyText();
this.diskUsageGrid.drawGrid();
adata = null;
}
| }); | random_line_split | |
exif-tags.ts | import { ExifMetadataType } from "./types";
export interface TagNames {
[id: number]: string;
}
export interface IfdTags {
[ExifMetadataType.Exif]: TagNames;
[ExifMetadataType.Gps]: TagNames;
[ExifMetadataType.Interoperability]: TagNames;
}
const tags: IfdTags = {
// Exif tags
[ExifMetadataType.Exif] : {
0x0001 : "InteropIndex",
0x0002 : "InteropVersion",
0x000B : "ProcessingSoftware",
0x00FE : "SubfileType",
0x00FF : "OldSubfileType",
0x0100 : "ImageWidth",
0x0101 : "ImageHeight",
0x0102 : "BitsPerSample",
0x0103 : "Compression",
0x0106 : "PhotometricInterpretation",
0x0107 : "Thresholding",
0x0108 : "CellWidth",
0x0109 : "CellLength",
0x010A : "FillOrder",
0x010D : "DocumentName",
0x010E : "ImageDescription",
0x010F : "Make",
0x0110 : "Model",
0x0111 : "StripOffsets",
0x0112 : "Orientation",
0x0115 : "SamplesPerPixel",
0x0116 : "RowsPerStrip",
0x0117 : "StripByteCounts",
0x0118 : "MinSampleValue",
0x0119 : "MaxSampleValue",
0x011A : "XResolution",
0x011B : "YResolution",
0x011C : "PlanarConfiguration",
0x011D : "PageName",
0x011E : "XPosition",
0x011F : "YPosition",
0x0120 : "FreeOffsets",
0x0121 : "FreeByteCounts",
0x0122 : "GrayResponseUnit",
0x0123 : "GrayResponseCurve",
0x0124 : "T4Options",
0x0125 : "T6Options",
0x0128 : "ResolutionUnit",
0x0129 : "PageNumber",
0x012C : "ColorResponseUnit",
0x012D : "TransferFunction",
0x0131 : "Software",
0x0132 : "ModifyDate",
0x013B : "Artist",
0x013C : "HostComputer",
0x013D : "Predictor",
0x013E : "WhitePoint",
0x013F : "PrimaryChromaticities",
0x0140 : "ColorMap",
0x0141 : "HalftoneHints",
0x0142 : "TileWidth",
0x0143 : "TileLength",
0x0144 : "TileOffsets",
0x0145 : "TileByteCounts",
0x0146 : "BadFaxLines",
0x0147 : "CleanFaxData",
0x0148 : "ConsecutiveBadFaxLines",
0x014A : "SubIFD",
0x014C : "InkSet",
0x014D : "InkNames",
0x014E : "NumberofInks",
0x0150 : "DotRange",
0x0151 : "TargetPrinter",
0x0152 : "ExtraSamples",
0x0153 : "SampleFormat",
0x0154 : "SMinSampleValue",
0x0155 : "SMaxSampleValue",
0x0156 : "TransferRange",
0x0157 : "ClipPath",
0x0158 : "XClipPathUnits",
0x0159 : "YClipPathUnits",
0x015A : "Indexed",
0x015B : "JPEGTables",
0x015F : "OPIProxy",
0x0190 : "GlobalParametersIFD",
0x0191 : "ProfileType",
0x0192 : "FaxProfile",
0x0193 : "CodingMethods",
0x0194 : "VersionYear",
0x0195 : "ModeNumber",
0x01B1 : "Decode",
0x01B2 : "DefaultImageColor",
0x01B3 : "T82Options",
0x01B5 : "JPEGTables",
0x0200 : "JPEGProc",
0x0201 : "ThumbnailOffset",
0x0202 : "ThumbnailLength",
0x0203 : "JPEGRestartInterval",
0x0205 : "JPEGLosslessPredictors",
0x0206 : "JPEGPointTransforms",
0x0207 : "JPEGQTables",
0x0208 : "JPEGDCTables",
0x0209 : "JPEGACTables",
0x0211 : "YCbCrCoefficients",
0x0212 : "YCbCrSubSampling",
0x0213 : "YCbCrPositioning",
0x0214 : "ReferenceBlackWhite",
0x022F : "StripRowCounts",
0x02BC : "ApplicationNotes",
0x03E7 : "USPTOMiscellaneous",
0x1000 : "RelatedImageFileFormat",
0x1001 : "RelatedImageWidth",
0x1002 : "RelatedImageHeight",
0x4746 : "Rating",
0x4747 : "XP_DIP_XML",
0x4748 : "StitchInfo",
0x4749 : "RatingPercent",
0x800D : "ImageID",
0x80A3 : "WangTag1",
0x80A4 : "WangAnnotation",
0x80A5 : "WangTag3",
0x80A6 : "WangTag4",
0x80E3 : "Matteing",
0x80E4 : "DataType",
0x80E5 : "ImageDepth",
0x80E6 : "TileDepth",
0x827D : "Model2",
0x828D : "CFARepeatPatternDim",
0x828E : "CFAPattern2",
0x828F : "BatteryLevel",
0x8290 : "KodakIFD",
0x8298 : "Copyright",
0x829A : "ExposureTime",
0x829D : "FNumber",
0x82A5 : "MDFileTag",
0x82A6 : "MDScalePixel",
0x82A7 : "MDColorTable",
0x82A8 : "MDLabName",
0x82A9 : "MDSampleInfo",
0x82AA : "MDPrepDate",
0x82AB : "MDPrepTime",
0x82AC : "MDFileUnits",
0x830E : "PixelScale",
0x8335 : "AdventScale",
0x8336 : "AdventRevision",
0x835C : "UIC1Tag",
0x835D : "UIC2Tag",
0x835E : "UIC3Tag",
0x835F : "UIC4Tag",
0x83BB : "IPTC-NAA",
0x847E : "IntergraphPacketData",
0x847F : "IntergraphFlagRegisters",
0x8480 : "IntergraphMatrix",
0x8481 : "INGRReserved",
0x8482 : "ModelTiePoint",
0x84E0 : "Site",
0x84E1 : "ColorSequence",
0x84E2 : "IT8Header",
0x84E3 : "RasterPadding",
0x84E4 : "BitsPerRunLength",
0x84E5 : "BitsPerExtendedRunLength",
0x84E6 : "ColorTable",
0x84E7 : "ImageColorIndicator",
0x84E8 : "BackgroundColorIndicator",
0x84E9 : "ImageColorValue",
0x84EA : "BackgroundColorValue",
0x84EB : "PixelIntensityRange",
0x84EC : "TransparencyIndicator",
0x84ED : "ColorCharacterization",
0x84EE : "HCUsage",
0x84EF : "TrapIndicator",
0x84F0 : "CMYKEquivalent",
0x8546 : "SEMInfo",
0x8568 : "AFCP_IPTC",
0x85B8 : "PixelMagicJBIGOptions",
0x85D8 : "ModelTransform",
0x8602 : "WB_GRGBLevels",
0x8606 : "LeafData",
0x8649 : "PhotoshopSettings",
0x8769 : "ExifOffset",
0x8773 : "ICC_Profile",
0x877F : "TIFF_FXExtensions",
0x8780 : "MultiProfiles",
0x8781 : "SharedData",
0x8782 : "T88Options",
0x87AC : "ImageLayer",
0x87AF : "GeoTiffDirectory",
0x87B0 : "GeoTiffDoubleParams",
0x87B1 : "GeoTiffAsciiParams",
0x8822 : "ExposureProgram",
0x8824 : "SpectralSensitivity",
0x8825 : "GPSInfo",
0x8827 : "ISO",
0x8828 : "Opto-ElectricConvFactor",
0x8829 : "Interlace",
0x882A : "TimeZoneOffset",
0x882B : "SelfTimerMode",
0x8830 : "SensitivityType",
0x8831 : "StandardOutputSensitivity",
0x8832 : "RecommendedExposureIndex",
0x8833 : "ISOSpeed",
0x8834 : "ISOSpeedLatitudeyyy",
0x8835 : "ISOSpeedLatitudezzz",
0x885C : "FaxRecvParams",
0x885D : "FaxSubAddress",
0x885E : "FaxRecvTime",
0x888A : "LeafSubIFD",
0x9000 : "ExifVersion",
0x9003 : "DateTimeOriginal",
0x9004 : "CreateDate",
0x9101 : "ComponentsConfiguration",
0x9102 : "CompressedBitsPerPixel",
0x9201 : "ShutterSpeedValue",
0x9202 : "ApertureValue",
0x9203 : "BrightnessValue",
0x9204 : "ExposureCompensation",
0x9205 : "MaxApertureValue",
0x9206 : "SubjectDistance",
0x9207 : "MeteringMode",
0x9208 : "LightSource",
0x9209 : "Flash",
0x920A : "FocalLength",
0x920B : "FlashEnergy",
0x920C : "SpatialFrequencyResponse",
0x920D : "Noise",
0x920E : "FocalPlaneXResolution",
0x920F : "FocalPlaneYResolution",
0x9210 : "FocalPlaneResolutionUnit",
0x9211 : "ImageNumber",
0x9212 : "SecurityClassification",
0x9213 : "ImageHistory",
0x9214 : "SubjectArea",
0x9215 : "ExposureIndex",
0x9216 : "TIFF-EPStandardID",
0x9217 : "SensingMethod",
0x923A : "CIP3DataFile",
0x923B : "CIP3Sheet",
0x923C : "CIP3Side",
0x923F : "StoNits",
0x927C : "MakerNote",
0x9286 : "UserComment",
0x9290 : "SubSecTime",
0x9291 : "SubSecTimeOriginal",
0x9292 : "SubSecTimeDigitized",
0x932F : "MSDocumentText",
0x9330 : "MSPropertySetStorage",
0x9331 : "MSDocumentTextPosition",
0x935C : "ImageSourceData",
0x9C9B : "XPTitle",
0x9C9C : "XPComment",
0x9C9D : "XPAuthor",
0x9C9E : "XPKeywords",
0x9C9F : "XPSubject",
0xA000 : "FlashpixVersion",
0xA001 : "ColorSpace",
0xA002 : "ExifImageWidth",
0xA003 : "ExifImageHeight",
0xA004 : "RelatedSoundFile",
0xA005 : "InteropOffset",
0xA20B : "FlashEnergy",
0xA20C : "SpatialFrequencyResponse",
0xA20D : "Noise",
0xA20E : "FocalPlaneXResolution",
0xA20F : "FocalPlaneYResolution",
0xA210 : "FocalPlaneResolutionUnit",
0xA211 : "ImageNumber",
0xA212 : "SecurityClassification",
0xA213 : "ImageHistory",
0xA214 : "SubjectLocation",
0xA215 : "ExposureIndex",
0xA216 : "TIFF-EPStandardID",
0xA217 : "SensingMethod",
0xA300 : "FileSource",
0xA301 : "SceneType",
0xA302 : "CFAPattern",
0xA401 : "CustomRendered",
0xA402 : "ExposureMode",
0xA403 : "WhiteBalance",
0xA404 : "DigitalZoomRatio",
0xA405 : "FocalLengthIn35mmFormat",
0xA406 : "SceneCaptureType",
0xA407 : "GainControl",
0xA408 : "Contrast",
0xA409 : "Saturation",
0xA40A : "Sharpness",
0xA40B : "DeviceSettingDescription",
0xA40C : "SubjectDistanceRange",
0xA420 : "ImageUniqueID",
0xA430 : "OwnerName",
0xA431 : "SerialNumber",
0xA432 : "LensInfo",
0xA433 : "LensMake",
0xA434 : "LensModel",
0xA435 : "LensSerialNumber",
0xA480 : "GDALMetadata",
0xA481 : "GDALNoData",
0xA500 : "Gamma",
0xAFC0 : "ExpandSoftware",
0xAFC1 : "ExpandLens",
0xAFC2 : "ExpandFilm",
0xAFC3 : "ExpandFilterLens",
0xAFC4 : "ExpandScanner",
0xAFC5 : "ExpandFlashLamp",
0xBC01 : "PixelFormat",
0xBC02 : "Transformation",
0xBC03 : "Uncompressed",
0xBC04 : "ImageType",
0xBC80 : "ImageWidth",
0xBC81 : "ImageHeight",
0xBC82 : "WidthResolution",
0xBC83 : "HeightResolution",
0xBCC0 : "ImageOffset",
0xBCC1 : "ImageByteCount",
0xBCC2 : "AlphaOffset",
0xBCC3 : "AlphaByteCount",
0xBCC4 : "ImageDataDiscard",
0xBCC5 : "AlphaDataDiscard",
0xC427 : "OceScanjobDesc",
0xC428 : "OceApplicationSelector",
0xC429 : "OceIDNumber",
0xC42A : "OceImageLogic",
0xC44F : "Annotations",
0xC4A5 : "PrintIM",
0xC580 : "USPTOOriginalContentType",
0xC612 : "DNGVersion",
0xC613 : "DNGBackwardVersion",
0xC614 : "UniqueCameraModel",
0xC615 : "LocalizedCameraModel",
0xC616 : "CFAPlaneColor",
0xC617 : "CFALayout",
0xC618 : "LinearizationTable",
0xC619 : "BlackLevelRepeatDim",
0xC61A : "BlackLevel",
0xC61B : "BlackLevelDeltaH",
0xC61C : "BlackLevelDeltaV",
0xC61D : "WhiteLevel",
0xC61E : "DefaultScale",
0xC61F : "DefaultCropOrigin",
0xC620 : "DefaultCropSize",
0xC621 : "ColorMatrix1",
0xC622 : "ColorMatrix2",
0xC623 : "CameraCalibration1",
0xC624 : "CameraCalibration2",
0xC625 : "ReductionMatrix1",
0xC626 : "ReductionMatrix2",
0xC627 : "AnalogBalance",
0xC628 : "AsShotNeutral",
0xC629 : "AsShotWhiteXY",
0xC62A : "BaselineExposure",
0xC62B : "BaselineNoise",
0xC62C : "BaselineSharpness",
0xC62D : "BayerGreenSplit",
0xC62E : "LinearResponseLimit",
0xC62F : "CameraSerialNumber",
0xC630 : "DNGLensInfo",
0xC631 : "ChromaBlurRadius",
0xC632 : "AntiAliasStrength",
0xC633 : "ShadowScale",
0xC634 : "DNGPrivateData",
0xC635 : "MakerNoteSafety",
0xC640 : "RawImageSegmentation",
0xC65A : "CalibrationIlluminant1",
0xC65B : "CalibrationIlluminant2",
0xC65C : "BestQualityScale",
0xC65D : "RawDataUniqueID",
0xC660 : "AliasLayerMetadata",
0xC68B : "OriginalRawFileName",
0xC68C : "OriginalRawFileData",
0xC68D : "ActiveArea",
0xC68E : "MaskedAreas",
0xC68F : "AsShotICCProfile",
0xC690 : "AsShotPreProfileMatrix",
0xC691 : "CurrentICCProfile",
0xC692 : "CurrentPreProfileMatrix",
0xC6BF : "ColorimetricReference",
0xC6D2 : "PanasonicTitle",
0xC6D3 : "PanasonicTitle2",
0xC6F3 : "CameraCalibrationSig",
0xC6F4 : "ProfileCalibrationSig",
0xC6F5 : "ProfileIFD",
0xC6F6 : "AsShotProfileName",
0xC6F7 : "NoiseReductionApplied",
0xC6F8 : "ProfileName",
0xC6F9 : "ProfileHueSatMapDims",
0xC6FA : "ProfileHueSatMapData1",
0xC6FB : "ProfileHueSatMapData2",
0xC6FC : "ProfileToneCurve",
0xC6FD : "ProfileEmbedPolicy",
0xC6FE : "ProfileCopyright",
0xC714 : "ForwardMatrix1", | 0xC716 : "PreviewApplicationName",
0xC717 : "PreviewApplicationVersion",
0xC718 : "PreviewSettingsName",
0xC719 : "PreviewSettingsDigest",
0xC71A : "PreviewColorSpace",
0xC71B : "PreviewDateTime",
0xC71C : "RawImageDigest",
0xC71D : "OriginalRawFileDigest",
0xC71E : "SubTileBlockSize",
0xC71F : "RowInterleaveFactor",
0xC725 : "ProfileLookTableDims",
0xC726 : "ProfileLookTableData",
0xC740 : "OpcodeList1",
0xC741 : "OpcodeList2",
0xC74E : "OpcodeList3",
0xC761 : "NoiseProfile",
0xC763 : "TimeCodes",
0xC764 : "FrameRate",
0xC772 : "TStop",
0xC789 : "ReelName",
0xC791 : "OriginalDefaultFinalSize",
0xC792 : "OriginalBestQualitySize",
0xC793 : "OriginalDefaultCropSize",
0xC7A1 : "CameraLabel",
0xC7A3 : "ProfileHueSatMapEncoding",
0xC7A4 : "ProfileLookTableEncoding",
0xC7A5 : "BaselineExposureOffset",
0xC7A6 : "DefaultBlackRender",
0xC7A7 : "NewRawImageDigest",
0xC7A8 : "RawToPreviewGain",
0xC7B5 : "DefaultUserCrop",
0xEA1C : "Padding",
0xEA1D : "OffsetSchema",
0xFDE8 : "OwnerName",
0xFDE9 : "SerialNumber",
0xFDEA : "Lens",
0xFE00 : "KDC_IFD",
0xFE4C : "RawFile",
0xFE4D : "Converter",
0xFE4E : "WhiteBalance",
0xFE51 : "Exposure",
0xFE52 : "Shadows",
0xFE53 : "Brightness",
0xFE54 : "Contrast",
0xFE55 : "Saturation",
0xFE56 : "Sharpness",
0xFE57 : "Smoothness",
0xFE58 : "MoireFilter",
},
// GPS Tags
[ExifMetadataType.Gps] : {
0x0000 : "GPSVersionID",
0x0001 : "GPSLatitudeRef",
0x0002 : "GPSLatitude",
0x0003 : "GPSLongitudeRef",
0x0004 : "GPSLongitude",
0x0005 : "GPSAltitudeRef",
0x0006 : "GPSAltitude",
0x0007 : "GPSTimeStamp",
0x0008 : "GPSSatellites",
0x0009 : "GPSStatus",
0x000A : "GPSMeasureMode",
0x000B : "GPSDOP",
0x000C : "GPSSpeedRef",
0x000D : "GPSSpeed",
0x000E : "GPSTrackRef",
0x000F : "GPSTrack",
0x0010 : "GPSImgDirectionRef",
0x0011 : "GPSImgDirection",
0x0012 : "GPSMapDatum",
0x0013 : "GPSDestLatitudeRef",
0x0014 : "GPSDestLatitude",
0x0015 : "GPSDestLongitudeRef",
0x0016 : "GPSDestLongitude",
0x0017 : "GPSDestBearingRef",
0x0018 : "GPSDestBearing",
0x0019 : "GPSDestDistanceRef",
0x001A : "GPSDestDistance",
0x001B : "GPSProcessingMethod",
0x001C : "GPSAreaInformation",
0x001D : "GPSDateStamp",
0x001E : "GPSDifferential",
0x001F : "GPSHPositioningError",
},
// Interoperability Tags
[ExifMetadataType.Interoperability] : {
},
};
export default tags; | 0xC715 : "ForwardMatrix2", | random_line_split |
preprocessing_lpba40.py | import numpy as np
import SimpleITK as sitk
import matplotlib.pyplot as plt
import scipy.stats as stats
DEFAULT_CUTOFF = 0.01, 0.99
STANDARD_RANGE = 0, 100
def resample_image(itk_image, out_spacing=(2.0, 2.0, 2.0), is_label=False):
original_spacing = itk_image.GetSpacing()
original_size = itk_image.GetSize()
out_size = [int(np.round(original_size[0] * (original_spacing[0] / out_spacing[0]))),
int(np.round(original_size[1] * (original_spacing[1] / out_spacing[1]))),
int(np.round(original_size[2] * (original_spacing[2] / out_spacing[2])))]
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(out_spacing)
resample.SetSize(out_size)
resample.SetOutputDirection(itk_image.GetDirection())
resample.SetOutputOrigin(itk_image.GetOrigin())
resample.SetTransform(sitk.Transform())
resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
def center_crop(img, size_ratio):
x, y, z = img.shape
size_ratio_x, size_ratio_y, size_ratio_z = size_ratio
size_x = size_ratio_x
size_y = size_ratio_y
size_z = size_ratio_z
if x < size_x or y < size_y and z < size_z:
raise ValueError
x1 = 0
y1 = int((y - size_y) / 2)
z1 = int((z - size_z) / 2)
img_crop = img[x1: x1 + size_x, y1: y1 + size_y, z1: z1 + size_z]
return img_crop
def _get_percentiles(percentiles_cutoff):
quartiles = np.arange(25, 100, 25).tolist()
deciles = np.arange(10, 100, 10).tolist()
all_percentiles = list(percentiles_cutoff) + quartiles + deciles
percentiles = sorted(set(all_percentiles))
return np.array(percentiles)
def _get_average_mapping(percentiles_database):
"""Map the landmarks of the database to the chosen range.
Args:
percentiles_database: Percentiles database over which to perform the
averaging.
"""
# Assuming percentiles_database.shape == (num_data_points, num_percentiles)
pc1 = percentiles_database[:, 0]
pc2 = percentiles_database[:, -1]
s1, s2 = STANDARD_RANGE
slopes = (s2 - s1) / (pc2 - pc1)
slopes = np.nan_to_num(slopes)
intercepts = np.mean(s1 - slopes * pc1)
num_images = len(percentiles_database)
final_map = slopes.dot(percentiles_database) / num_images + intercepts
return final_map
def _standardize_cutoff(cutoff):
"""Standardize the cutoff values given in the configuration.
Computes percentile landmark normalization by default.
"""
cutoff = np.asarray(cutoff)
cutoff[0] = max(0., cutoff[0])
cutoff[1] = min(1., cutoff[1])
cutoff[0] = np.min([cutoff[0], 0.09])
cutoff[1] = np.max([cutoff[1], 0.91])
return cutoff
def normalize(array, landmarks, mask=None, cutoff=None, epsilon=1e-5):
cutoff_ = DEFAULT_CUTOFF if cutoff is None else cutoff
mapping = landmarks
data = array
shape = data.shape
data = data.reshape(-1).astype(np.float32)
if mask is None:
mask = np.ones_like(data, np.bool)
mask = mask.reshape(-1)
range_to_use = [0, 1, 2, 4, 5, 6, 7, 8, 10, 11, 12]
quantiles_cutoff = _standardize_cutoff(cutoff_)
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles = _get_percentiles(percentiles_cutoff)
percentile_values = np.percentile(data[mask], percentiles)
# Apply linear histogram standardization
range_mapping = mapping[range_to_use]
range_perc = percentile_values[range_to_use]
diff_mapping = np.diff(range_mapping)
diff_perc = np.diff(range_perc)
# Handling the case where two landmarks are the same
# for a given input image. This usually happens when
# image background is not removed from the image.
diff_perc[diff_perc < epsilon] = np.inf
affine_map = np.zeros([2, len(range_to_use) - 1])
# Compute slopes of the linear models
affine_map[0] = diff_mapping / diff_perc
# Compute intercepts of the linear models
affine_map[1] = range_mapping[:-1] - affine_map[0] * range_perc[:-1]
bin_id = np.digitize(data, range_perc[1:-1], right=False)
lin_img = affine_map[0, bin_id]
aff_img = affine_map[1, bin_id]
new_img = lin_img * data + aff_img
new_img = new_img.reshape(shape)
new_img = new_img.astype(np.float32)
return new_img
def calculate_landmarks(image_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs'):
quantiles_cutoff = DEFAULT_CUTOFF
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles_database = []
percentiles = _get_percentiles(percentiles_cutoff)
count = 1
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
img_path = os.path.join(image_path, 'l{}_to_l{}.hdr'.format(str(i), str(j)))
atlas_path = os.path.join(
image_path.replace('LPBA40_rigidly_registered_pairs', 'LPBA40_rigidly_registered_label_pairs'),
'l{}_to_l{}.hdr'.format(str(i), str(j)))
if os.path.exists(img_path) and os.path.exists(atlas_path):
img_sitk = sitk.ReadImage(str(img_path))
img_np = sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2)
mask = img_np > 0
percentile_values = np.percentile(img_np[mask], percentiles)
percentiles_database.append(percentile_values)
count += 1
else:
raise FileNotFoundError
percentiles_database = np.vstack(percentiles_database)
mapping = _get_average_mapping(percentiles_database)
print(mapping)
np.save('../datasets/LPBA40/mapping.npy', mapping)
return mapping
def histogram_stardardization_resample_center_crop(mapping,
input_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs',
output_path_hs_small='../datasets/LPBA40/LPBA40_rigidly_registered_pairs_histogram_standardization_small',
output_path_mask='../datasets/LPBA40/LPBA40_rigidly_registered_label_pairs_small'):
if not os.path.exists(str(output_path_hs_small)):
os.makedirs(str(output_path_hs_small))
if not os.path.exists(str(output_path_mask)):
os.makedirs(str(output_path_mask))
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
# ~~~~~~~~~~~~~~~ images ~~~~~~~~~~~~~~~
volpath = os.path.join(input_path, 'l{}_to_l{}.nii'.format(str(i), str(j)))
img_sitk = sitk.ReadImage(str(volpath))
img_np = sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2)
mask = img_np > 0
# 1. histogram_stardardization
img_np_hs = normalize(img_np, mapping, mask)
img_sitk_hs = sitk.GetImageFromArray(img_np_hs.swapaxes(0, 2))
img_sitk_hs.SetSpacing(img_sitk.GetSpacing())
img_sitk_hs.SetDirection(img_sitk.GetDirection())
img_sitk_hs.SetOrigin(img_sitk.GetOrigin())
# 2. resample
img_sitk_hs_small = resample_image(img_sitk_hs)
img_np_hs_small = sitk.GetArrayFromImage(img_sitk_hs_small).swapaxes(0, 2)
# 3. center_crop
img_crop = center_crop(img=img_np_hs_small, size_ratio=(80, 106, 80)).swapaxes(0, 2)
new_img = sitk.GetImageFromArray(img_crop)
new_img.SetSpacing(img_sitk_hs_small.GetSpacing())
new_img.SetDirection(img_sitk_hs_small.GetDirection())
new_img.SetOrigin(img_sitk_hs_small.GetOrigin())
output_path_hs_small_img = os.path.join(output_path_hs_small, 'l{}_to_l{}.nii'.format(str(i), str(j)))
sitk.WriteImage(new_img, str(output_path_hs_small_img))
# ~~~~~~~~~~~~~~~ masks ~~~~~~~~~~~~~~~
atlas_path = volpath.replace('LPBA40_rigidly_registered_pairs', 'LPBA40_rigidly_registered_label_pairs')
atlas = sitk.ReadImage(str(atlas_path))
# 1. resample
atlas_resampled = resample_image(atlas, is_label=True)
atlas_np = sitk.GetArrayFromImage(atlas_resampled).swapaxes(0, 2)
# 2. center_crop
atlas_crop = center_crop(img=atlas_np, size_ratio=(80, 106, 80)).swapaxes(0, 2)
new_atlas = sitk.GetImageFromArray(atlas_crop)
new_atlas.SetSpacing(atlas_resampled.GetSpacing())
new_atlas.SetDirection(atlas_resampled.GetDirection())
new_atlas.SetOrigin(atlas_resampled.GetOrigin())
output_path_hs_small_atlas = os.path.join(output_path_mask, 'l{}_to_l{}.nii'.format(str(i), str(j)))
sitk.WriteImage(new_atlas, str(output_path_hs_small_atlas))
def plot_hist(image_path_hs='../datasets/LPBA40/LPBA40_rigidly_registered_pairs_histogram_standardization_small'):
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
volpath = os.path.join(image_path_hs, 'l{}_to_l{}.nii'.format(str(i), str(j)))
img_sitk = sitk.ReadImage(str(volpath))
img_np = np.clip(sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2), 50, 100)
data = np.reshape(img_np[img_np>50], -1)
print(data.shape)
density = stats.gaussian_kde(data)
xs = np.linspace(40, 110, 70)
density.covariance_factor = lambda: .25
density._compute_covariance()
plt.plot(xs, density(xs))
plt.show()
if __name__ == '__main__':
mapping = calculate_landmarks(image_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs')
# mapping = np.asarray([1.77635684e-15, 4.02863140e+01, 5.86044434e+01, 6.33688576e+01, 6.66438972e+01, 7.12987107e+01, 7.53526276e+01, 7.96537020e+01, 8.43034770e+01, 8.67112286e+01, 8.91208850e+01, 9.35115887e+01, 1.00000000e+02])
histogram_stardardization_resample_center_crop(mapping=mapping,
input_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs',
output_path_hs_small='../datasets/LPBA40/LPBA40_rigidly_registered_pairs_histogram_standardization_small',
output_path_mask='../datasets/LPBA40/LPBA40_rigidly_registered_label_pairs_small')
plot_hist() | import os
import subprocess | random_line_split | |
preprocessing_lpba40.py | import os
import subprocess
import numpy as np
import SimpleITK as sitk
import matplotlib.pyplot as plt
import scipy.stats as stats
DEFAULT_CUTOFF = 0.01, 0.99
STANDARD_RANGE = 0, 100
def resample_image(itk_image, out_spacing=(2.0, 2.0, 2.0), is_label=False):
original_spacing = itk_image.GetSpacing()
original_size = itk_image.GetSize()
out_size = [int(np.round(original_size[0] * (original_spacing[0] / out_spacing[0]))),
int(np.round(original_size[1] * (original_spacing[1] / out_spacing[1]))),
int(np.round(original_size[2] * (original_spacing[2] / out_spacing[2])))]
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(out_spacing)
resample.SetSize(out_size)
resample.SetOutputDirection(itk_image.GetDirection())
resample.SetOutputOrigin(itk_image.GetOrigin())
resample.SetTransform(sitk.Transform())
resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
def center_crop(img, size_ratio):
x, y, z = img.shape
size_ratio_x, size_ratio_y, size_ratio_z = size_ratio
size_x = size_ratio_x
size_y = size_ratio_y
size_z = size_ratio_z
if x < size_x or y < size_y and z < size_z:
raise ValueError
x1 = 0
y1 = int((y - size_y) / 2)
z1 = int((z - size_z) / 2)
img_crop = img[x1: x1 + size_x, y1: y1 + size_y, z1: z1 + size_z]
return img_crop
def _get_percentiles(percentiles_cutoff):
quartiles = np.arange(25, 100, 25).tolist()
deciles = np.arange(10, 100, 10).tolist()
all_percentiles = list(percentiles_cutoff) + quartiles + deciles
percentiles = sorted(set(all_percentiles))
return np.array(percentiles)
def _get_average_mapping(percentiles_database):
"""Map the landmarks of the database to the chosen range.
Args:
percentiles_database: Percentiles database over which to perform the
averaging.
"""
# Assuming percentiles_database.shape == (num_data_points, num_percentiles)
pc1 = percentiles_database[:, 0]
pc2 = percentiles_database[:, -1]
s1, s2 = STANDARD_RANGE
slopes = (s2 - s1) / (pc2 - pc1)
slopes = np.nan_to_num(slopes)
intercepts = np.mean(s1 - slopes * pc1)
num_images = len(percentiles_database)
final_map = slopes.dot(percentiles_database) / num_images + intercepts
return final_map
def _standardize_cutoff(cutoff):
"""Standardize the cutoff values given in the configuration.
Computes percentile landmark normalization by default.
"""
cutoff = np.asarray(cutoff)
cutoff[0] = max(0., cutoff[0])
cutoff[1] = min(1., cutoff[1])
cutoff[0] = np.min([cutoff[0], 0.09])
cutoff[1] = np.max([cutoff[1], 0.91])
return cutoff
def normalize(array, landmarks, mask=None, cutoff=None, epsilon=1e-5):
cutoff_ = DEFAULT_CUTOFF if cutoff is None else cutoff
mapping = landmarks
data = array
shape = data.shape
data = data.reshape(-1).astype(np.float32)
if mask is None:
mask = np.ones_like(data, np.bool)
mask = mask.reshape(-1)
range_to_use = [0, 1, 2, 4, 5, 6, 7, 8, 10, 11, 12]
quantiles_cutoff = _standardize_cutoff(cutoff_)
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles = _get_percentiles(percentiles_cutoff)
percentile_values = np.percentile(data[mask], percentiles)
# Apply linear histogram standardization
range_mapping = mapping[range_to_use]
range_perc = percentile_values[range_to_use]
diff_mapping = np.diff(range_mapping)
diff_perc = np.diff(range_perc)
# Handling the case where two landmarks are the same
# for a given input image. This usually happens when
# image background is not removed from the image.
diff_perc[diff_perc < epsilon] = np.inf
affine_map = np.zeros([2, len(range_to_use) - 1])
# Compute slopes of the linear models
affine_map[0] = diff_mapping / diff_perc
# Compute intercepts of the linear models
affine_map[1] = range_mapping[:-1] - affine_map[0] * range_perc[:-1]
bin_id = np.digitize(data, range_perc[1:-1], right=False)
lin_img = affine_map[0, bin_id]
aff_img = affine_map[1, bin_id]
new_img = lin_img * data + aff_img
new_img = new_img.reshape(shape)
new_img = new_img.astype(np.float32)
return new_img
def calculate_landmarks(image_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs'):
quantiles_cutoff = DEFAULT_CUTOFF
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles_database = []
percentiles = _get_percentiles(percentiles_cutoff)
count = 1
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
img_path = os.path.join(image_path, 'l{}_to_l{}.hdr'.format(str(i), str(j)))
atlas_path = os.path.join(
image_path.replace('LPBA40_rigidly_registered_pairs', 'LPBA40_rigidly_registered_label_pairs'),
'l{}_to_l{}.hdr'.format(str(i), str(j)))
if os.path.exists(img_path) and os.path.exists(atlas_path):
img_sitk = sitk.ReadImage(str(img_path))
img_np = sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2)
mask = img_np > 0
percentile_values = np.percentile(img_np[mask], percentiles)
percentiles_database.append(percentile_values)
count += 1
else:
raise FileNotFoundError
percentiles_database = np.vstack(percentiles_database)
mapping = _get_average_mapping(percentiles_database)
print(mapping)
np.save('../datasets/LPBA40/mapping.npy', mapping)
return mapping
def histogram_stardardization_resample_center_crop(mapping,
input_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs',
output_path_hs_small='../datasets/LPBA40/LPBA40_rigidly_registered_pairs_histogram_standardization_small',
output_path_mask='../datasets/LPBA40/LPBA40_rigidly_registered_label_pairs_small'):
if not os.path.exists(str(output_path_hs_small)):
|
if not os.path.exists(str(output_path_mask)):
os.makedirs(str(output_path_mask))
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
# ~~~~~~~~~~~~~~~ images ~~~~~~~~~~~~~~~
volpath = os.path.join(input_path, 'l{}_to_l{}.nii'.format(str(i), str(j)))
img_sitk = sitk.ReadImage(str(volpath))
img_np = sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2)
mask = img_np > 0
# 1. histogram_stardardization
img_np_hs = normalize(img_np, mapping, mask)
img_sitk_hs = sitk.GetImageFromArray(img_np_hs.swapaxes(0, 2))
img_sitk_hs.SetSpacing(img_sitk.GetSpacing())
img_sitk_hs.SetDirection(img_sitk.GetDirection())
img_sitk_hs.SetOrigin(img_sitk.GetOrigin())
# 2. resample
img_sitk_hs_small = resample_image(img_sitk_hs)
img_np_hs_small = sitk.GetArrayFromImage(img_sitk_hs_small).swapaxes(0, 2)
# 3. center_crop
img_crop = center_crop(img=img_np_hs_small, size_ratio=(80, 106, 80)).swapaxes(0, 2)
new_img = sitk.GetImageFromArray(img_crop)
new_img.SetSpacing(img_sitk_hs_small.GetSpacing())
new_img.SetDirection(img_sitk_hs_small.GetDirection())
new_img.SetOrigin(img_sitk_hs_small.GetOrigin())
output_path_hs_small_img = os.path.join(output_path_hs_small, 'l{}_to_l{}.nii'.format(str(i), str(j)))
sitk.WriteImage(new_img, str(output_path_hs_small_img))
# ~~~~~~~~~~~~~~~ masks ~~~~~~~~~~~~~~~
atlas_path = volpath.replace('LPBA40_rigidly_registered_pairs', 'LPBA40_rigidly_registered_label_pairs')
atlas = sitk.ReadImage(str(atlas_path))
# 1. resample
atlas_resampled = resample_image(atlas, is_label=True)
atlas_np = sitk.GetArrayFromImage(atlas_resampled).swapaxes(0, 2)
# 2. center_crop
atlas_crop = center_crop(img=atlas_np, size_ratio=(80, 106, 80)).swapaxes(0, 2)
new_atlas = sitk.GetImageFromArray(atlas_crop)
new_atlas.SetSpacing(atlas_resampled.GetSpacing())
new_atlas.SetDirection(atlas_resampled.GetDirection())
new_atlas.SetOrigin(atlas_resampled.GetOrigin())
output_path_hs_small_atlas = os.path.join(output_path_mask, 'l{}_to_l{}.nii'.format(str(i), str(j)))
sitk.WriteImage(new_atlas, str(output_path_hs_small_atlas))
def plot_hist(image_path_hs='../datasets/LPBA40/LPBA40_rigidly_registered_pairs_histogram_standardization_small'):
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
volpath = os.path.join(image_path_hs, 'l{}_to_l{}.nii'.format(str(i), str(j)))
img_sitk = sitk.ReadImage(str(volpath))
img_np = np.clip(sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2), 50, 100)
data = np.reshape(img_np[img_np>50], -1)
print(data.shape)
density = stats.gaussian_kde(data)
xs = np.linspace(40, 110, 70)
density.covariance_factor = lambda: .25
density._compute_covariance()
plt.plot(xs, density(xs))
plt.show()
if __name__ == '__main__':
mapping = calculate_landmarks(image_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs')
# mapping = np.asarray([1.77635684e-15, 4.02863140e+01, 5.86044434e+01, 6.33688576e+01, 6.66438972e+01, 7.12987107e+01, 7.53526276e+01, 7.96537020e+01, 8.43034770e+01, 8.67112286e+01, 8.91208850e+01, 9.35115887e+01, 1.00000000e+02])
histogram_stardardization_resample_center_crop(mapping=mapping,
input_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs',
output_path_hs_small='../datasets/LPBA40/LPBA40_rigidly_registered_pairs_histogram_standardization_small',
output_path_mask='../datasets/LPBA40/LPBA40_rigidly_registered_label_pairs_small')
plot_hist()
| os.makedirs(str(output_path_hs_small)) | conditional_block |
preprocessing_lpba40.py | import os
import subprocess
import numpy as np
import SimpleITK as sitk
import matplotlib.pyplot as plt
import scipy.stats as stats
DEFAULT_CUTOFF = 0.01, 0.99
STANDARD_RANGE = 0, 100
def resample_image(itk_image, out_spacing=(2.0, 2.0, 2.0), is_label=False):
original_spacing = itk_image.GetSpacing()
original_size = itk_image.GetSize()
out_size = [int(np.round(original_size[0] * (original_spacing[0] / out_spacing[0]))),
int(np.round(original_size[1] * (original_spacing[1] / out_spacing[1]))),
int(np.round(original_size[2] * (original_spacing[2] / out_spacing[2])))]
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(out_spacing)
resample.SetSize(out_size)
resample.SetOutputDirection(itk_image.GetDirection())
resample.SetOutputOrigin(itk_image.GetOrigin())
resample.SetTransform(sitk.Transform())
resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
def center_crop(img, size_ratio):
x, y, z = img.shape
size_ratio_x, size_ratio_y, size_ratio_z = size_ratio
size_x = size_ratio_x
size_y = size_ratio_y
size_z = size_ratio_z
if x < size_x or y < size_y and z < size_z:
raise ValueError
x1 = 0
y1 = int((y - size_y) / 2)
z1 = int((z - size_z) / 2)
img_crop = img[x1: x1 + size_x, y1: y1 + size_y, z1: z1 + size_z]
return img_crop
def _get_percentiles(percentiles_cutoff):
|
def _get_average_mapping(percentiles_database):
"""Map the landmarks of the database to the chosen range.
Args:
percentiles_database: Percentiles database over which to perform the
averaging.
"""
# Assuming percentiles_database.shape == (num_data_points, num_percentiles)
pc1 = percentiles_database[:, 0]
pc2 = percentiles_database[:, -1]
s1, s2 = STANDARD_RANGE
slopes = (s2 - s1) / (pc2 - pc1)
slopes = np.nan_to_num(slopes)
intercepts = np.mean(s1 - slopes * pc1)
num_images = len(percentiles_database)
final_map = slopes.dot(percentiles_database) / num_images + intercepts
return final_map
def _standardize_cutoff(cutoff):
"""Standardize the cutoff values given in the configuration.
Computes percentile landmark normalization by default.
"""
cutoff = np.asarray(cutoff)
cutoff[0] = max(0., cutoff[0])
cutoff[1] = min(1., cutoff[1])
cutoff[0] = np.min([cutoff[0], 0.09])
cutoff[1] = np.max([cutoff[1], 0.91])
return cutoff
def normalize(array, landmarks, mask=None, cutoff=None, epsilon=1e-5):
cutoff_ = DEFAULT_CUTOFF if cutoff is None else cutoff
mapping = landmarks
data = array
shape = data.shape
data = data.reshape(-1).astype(np.float32)
if mask is None:
mask = np.ones_like(data, np.bool)
mask = mask.reshape(-1)
range_to_use = [0, 1, 2, 4, 5, 6, 7, 8, 10, 11, 12]
quantiles_cutoff = _standardize_cutoff(cutoff_)
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles = _get_percentiles(percentiles_cutoff)
percentile_values = np.percentile(data[mask], percentiles)
# Apply linear histogram standardization
range_mapping = mapping[range_to_use]
range_perc = percentile_values[range_to_use]
diff_mapping = np.diff(range_mapping)
diff_perc = np.diff(range_perc)
# Handling the case where two landmarks are the same
# for a given input image. This usually happens when
# image background is not removed from the image.
diff_perc[diff_perc < epsilon] = np.inf
affine_map = np.zeros([2, len(range_to_use) - 1])
# Compute slopes of the linear models
affine_map[0] = diff_mapping / diff_perc
# Compute intercepts of the linear models
affine_map[1] = range_mapping[:-1] - affine_map[0] * range_perc[:-1]
bin_id = np.digitize(data, range_perc[1:-1], right=False)
lin_img = affine_map[0, bin_id]
aff_img = affine_map[1, bin_id]
new_img = lin_img * data + aff_img
new_img = new_img.reshape(shape)
new_img = new_img.astype(np.float32)
return new_img
def calculate_landmarks(image_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs'):
quantiles_cutoff = DEFAULT_CUTOFF
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles_database = []
percentiles = _get_percentiles(percentiles_cutoff)
count = 1
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
img_path = os.path.join(image_path, 'l{}_to_l{}.hdr'.format(str(i), str(j)))
atlas_path = os.path.join(
image_path.replace('LPBA40_rigidly_registered_pairs', 'LPBA40_rigidly_registered_label_pairs'),
'l{}_to_l{}.hdr'.format(str(i), str(j)))
if os.path.exists(img_path) and os.path.exists(atlas_path):
img_sitk = sitk.ReadImage(str(img_path))
img_np = sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2)
mask = img_np > 0
percentile_values = np.percentile(img_np[mask], percentiles)
percentiles_database.append(percentile_values)
count += 1
else:
raise FileNotFoundError
percentiles_database = np.vstack(percentiles_database)
mapping = _get_average_mapping(percentiles_database)
print(mapping)
np.save('../datasets/LPBA40/mapping.npy', mapping)
return mapping
def histogram_stardardization_resample_center_crop(mapping,
input_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs',
output_path_hs_small='../datasets/LPBA40/LPBA40_rigidly_registered_pairs_histogram_standardization_small',
output_path_mask='../datasets/LPBA40/LPBA40_rigidly_registered_label_pairs_small'):
if not os.path.exists(str(output_path_hs_small)):
os.makedirs(str(output_path_hs_small))
if not os.path.exists(str(output_path_mask)):
os.makedirs(str(output_path_mask))
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
# ~~~~~~~~~~~~~~~ images ~~~~~~~~~~~~~~~
volpath = os.path.join(input_path, 'l{}_to_l{}.nii'.format(str(i), str(j)))
img_sitk = sitk.ReadImage(str(volpath))
img_np = sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2)
mask = img_np > 0
# 1. histogram_stardardization
img_np_hs = normalize(img_np, mapping, mask)
img_sitk_hs = sitk.GetImageFromArray(img_np_hs.swapaxes(0, 2))
img_sitk_hs.SetSpacing(img_sitk.GetSpacing())
img_sitk_hs.SetDirection(img_sitk.GetDirection())
img_sitk_hs.SetOrigin(img_sitk.GetOrigin())
# 2. resample
img_sitk_hs_small = resample_image(img_sitk_hs)
img_np_hs_small = sitk.GetArrayFromImage(img_sitk_hs_small).swapaxes(0, 2)
# 3. center_crop
img_crop = center_crop(img=img_np_hs_small, size_ratio=(80, 106, 80)).swapaxes(0, 2)
new_img = sitk.GetImageFromArray(img_crop)
new_img.SetSpacing(img_sitk_hs_small.GetSpacing())
new_img.SetDirection(img_sitk_hs_small.GetDirection())
new_img.SetOrigin(img_sitk_hs_small.GetOrigin())
output_path_hs_small_img = os.path.join(output_path_hs_small, 'l{}_to_l{}.nii'.format(str(i), str(j)))
sitk.WriteImage(new_img, str(output_path_hs_small_img))
# ~~~~~~~~~~~~~~~ masks ~~~~~~~~~~~~~~~
atlas_path = volpath.replace('LPBA40_rigidly_registered_pairs', 'LPBA40_rigidly_registered_label_pairs')
atlas = sitk.ReadImage(str(atlas_path))
# 1. resample
atlas_resampled = resample_image(atlas, is_label=True)
atlas_np = sitk.GetArrayFromImage(atlas_resampled).swapaxes(0, 2)
# 2. center_crop
atlas_crop = center_crop(img=atlas_np, size_ratio=(80, 106, 80)).swapaxes(0, 2)
new_atlas = sitk.GetImageFromArray(atlas_crop)
new_atlas.SetSpacing(atlas_resampled.GetSpacing())
new_atlas.SetDirection(atlas_resampled.GetDirection())
new_atlas.SetOrigin(atlas_resampled.GetOrigin())
output_path_hs_small_atlas = os.path.join(output_path_mask, 'l{}_to_l{}.nii'.format(str(i), str(j)))
sitk.WriteImage(new_atlas, str(output_path_hs_small_atlas))
def plot_hist(image_path_hs='../datasets/LPBA40/LPBA40_rigidly_registered_pairs_histogram_standardization_small'):
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
volpath = os.path.join(image_path_hs, 'l{}_to_l{}.nii'.format(str(i), str(j)))
img_sitk = sitk.ReadImage(str(volpath))
img_np = np.clip(sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2), 50, 100)
data = np.reshape(img_np[img_np>50], -1)
print(data.shape)
density = stats.gaussian_kde(data)
xs = np.linspace(40, 110, 70)
density.covariance_factor = lambda: .25
density._compute_covariance()
plt.plot(xs, density(xs))
plt.show()
if __name__ == '__main__':
mapping = calculate_landmarks(image_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs')
# mapping = np.asarray([1.77635684e-15, 4.02863140e+01, 5.86044434e+01, 6.33688576e+01, 6.66438972e+01, 7.12987107e+01, 7.53526276e+01, 7.96537020e+01, 8.43034770e+01, 8.67112286e+01, 8.91208850e+01, 9.35115887e+01, 1.00000000e+02])
histogram_stardardization_resample_center_crop(mapping=mapping,
input_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs',
output_path_hs_small='../datasets/LPBA40/LPBA40_rigidly_registered_pairs_histogram_standardization_small',
output_path_mask='../datasets/LPBA40/LPBA40_rigidly_registered_label_pairs_small')
plot_hist()
| quartiles = np.arange(25, 100, 25).tolist()
deciles = np.arange(10, 100, 10).tolist()
all_percentiles = list(percentiles_cutoff) + quartiles + deciles
percentiles = sorted(set(all_percentiles))
return np.array(percentiles) | identifier_body |
preprocessing_lpba40.py | import os
import subprocess
import numpy as np
import SimpleITK as sitk
import matplotlib.pyplot as plt
import scipy.stats as stats
DEFAULT_CUTOFF = 0.01, 0.99
STANDARD_RANGE = 0, 100
def resample_image(itk_image, out_spacing=(2.0, 2.0, 2.0), is_label=False):
original_spacing = itk_image.GetSpacing()
original_size = itk_image.GetSize()
out_size = [int(np.round(original_size[0] * (original_spacing[0] / out_spacing[0]))),
int(np.round(original_size[1] * (original_spacing[1] / out_spacing[1]))),
int(np.round(original_size[2] * (original_spacing[2] / out_spacing[2])))]
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(out_spacing)
resample.SetSize(out_size)
resample.SetOutputDirection(itk_image.GetDirection())
resample.SetOutputOrigin(itk_image.GetOrigin())
resample.SetTransform(sitk.Transform())
resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
def center_crop(img, size_ratio):
x, y, z = img.shape
size_ratio_x, size_ratio_y, size_ratio_z = size_ratio
size_x = size_ratio_x
size_y = size_ratio_y
size_z = size_ratio_z
if x < size_x or y < size_y and z < size_z:
raise ValueError
x1 = 0
y1 = int((y - size_y) / 2)
z1 = int((z - size_z) / 2)
img_crop = img[x1: x1 + size_x, y1: y1 + size_y, z1: z1 + size_z]
return img_crop
def _get_percentiles(percentiles_cutoff):
quartiles = np.arange(25, 100, 25).tolist()
deciles = np.arange(10, 100, 10).tolist()
all_percentiles = list(percentiles_cutoff) + quartiles + deciles
percentiles = sorted(set(all_percentiles))
return np.array(percentiles)
def _get_average_mapping(percentiles_database):
"""Map the landmarks of the database to the chosen range.
Args:
percentiles_database: Percentiles database over which to perform the
averaging.
"""
# Assuming percentiles_database.shape == (num_data_points, num_percentiles)
pc1 = percentiles_database[:, 0]
pc2 = percentiles_database[:, -1]
s1, s2 = STANDARD_RANGE
slopes = (s2 - s1) / (pc2 - pc1)
slopes = np.nan_to_num(slopes)
intercepts = np.mean(s1 - slopes * pc1)
num_images = len(percentiles_database)
final_map = slopes.dot(percentiles_database) / num_images + intercepts
return final_map
def _standardize_cutoff(cutoff):
"""Standardize the cutoff values given in the configuration.
Computes percentile landmark normalization by default.
"""
cutoff = np.asarray(cutoff)
cutoff[0] = max(0., cutoff[0])
cutoff[1] = min(1., cutoff[1])
cutoff[0] = np.min([cutoff[0], 0.09])
cutoff[1] = np.max([cutoff[1], 0.91])
return cutoff
def | (array, landmarks, mask=None, cutoff=None, epsilon=1e-5):
cutoff_ = DEFAULT_CUTOFF if cutoff is None else cutoff
mapping = landmarks
data = array
shape = data.shape
data = data.reshape(-1).astype(np.float32)
if mask is None:
mask = np.ones_like(data, np.bool)
mask = mask.reshape(-1)
range_to_use = [0, 1, 2, 4, 5, 6, 7, 8, 10, 11, 12]
quantiles_cutoff = _standardize_cutoff(cutoff_)
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles = _get_percentiles(percentiles_cutoff)
percentile_values = np.percentile(data[mask], percentiles)
# Apply linear histogram standardization
range_mapping = mapping[range_to_use]
range_perc = percentile_values[range_to_use]
diff_mapping = np.diff(range_mapping)
diff_perc = np.diff(range_perc)
# Handling the case where two landmarks are the same
# for a given input image. This usually happens when
# image background is not removed from the image.
diff_perc[diff_perc < epsilon] = np.inf
affine_map = np.zeros([2, len(range_to_use) - 1])
# Compute slopes of the linear models
affine_map[0] = diff_mapping / diff_perc
# Compute intercepts of the linear models
affine_map[1] = range_mapping[:-1] - affine_map[0] * range_perc[:-1]
bin_id = np.digitize(data, range_perc[1:-1], right=False)
lin_img = affine_map[0, bin_id]
aff_img = affine_map[1, bin_id]
new_img = lin_img * data + aff_img
new_img = new_img.reshape(shape)
new_img = new_img.astype(np.float32)
return new_img
def calculate_landmarks(image_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs'):
quantiles_cutoff = DEFAULT_CUTOFF
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles_database = []
percentiles = _get_percentiles(percentiles_cutoff)
count = 1
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
img_path = os.path.join(image_path, 'l{}_to_l{}.hdr'.format(str(i), str(j)))
atlas_path = os.path.join(
image_path.replace('LPBA40_rigidly_registered_pairs', 'LPBA40_rigidly_registered_label_pairs'),
'l{}_to_l{}.hdr'.format(str(i), str(j)))
if os.path.exists(img_path) and os.path.exists(atlas_path):
img_sitk = sitk.ReadImage(str(img_path))
img_np = sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2)
mask = img_np > 0
percentile_values = np.percentile(img_np[mask], percentiles)
percentiles_database.append(percentile_values)
count += 1
else:
raise FileNotFoundError
percentiles_database = np.vstack(percentiles_database)
mapping = _get_average_mapping(percentiles_database)
print(mapping)
np.save('../datasets/LPBA40/mapping.npy', mapping)
return mapping
def histogram_stardardization_resample_center_crop(mapping,
input_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs',
output_path_hs_small='../datasets/LPBA40/LPBA40_rigidly_registered_pairs_histogram_standardization_small',
output_path_mask='../datasets/LPBA40/LPBA40_rigidly_registered_label_pairs_small'):
if not os.path.exists(str(output_path_hs_small)):
os.makedirs(str(output_path_hs_small))
if not os.path.exists(str(output_path_mask)):
os.makedirs(str(output_path_mask))
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
# ~~~~~~~~~~~~~~~ images ~~~~~~~~~~~~~~~
volpath = os.path.join(input_path, 'l{}_to_l{}.nii'.format(str(i), str(j)))
img_sitk = sitk.ReadImage(str(volpath))
img_np = sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2)
mask = img_np > 0
# 1. histogram_stardardization
img_np_hs = normalize(img_np, mapping, mask)
img_sitk_hs = sitk.GetImageFromArray(img_np_hs.swapaxes(0, 2))
img_sitk_hs.SetSpacing(img_sitk.GetSpacing())
img_sitk_hs.SetDirection(img_sitk.GetDirection())
img_sitk_hs.SetOrigin(img_sitk.GetOrigin())
# 2. resample
img_sitk_hs_small = resample_image(img_sitk_hs)
img_np_hs_small = sitk.GetArrayFromImage(img_sitk_hs_small).swapaxes(0, 2)
# 3. center_crop
img_crop = center_crop(img=img_np_hs_small, size_ratio=(80, 106, 80)).swapaxes(0, 2)
new_img = sitk.GetImageFromArray(img_crop)
new_img.SetSpacing(img_sitk_hs_small.GetSpacing())
new_img.SetDirection(img_sitk_hs_small.GetDirection())
new_img.SetOrigin(img_sitk_hs_small.GetOrigin())
output_path_hs_small_img = os.path.join(output_path_hs_small, 'l{}_to_l{}.nii'.format(str(i), str(j)))
sitk.WriteImage(new_img, str(output_path_hs_small_img))
# ~~~~~~~~~~~~~~~ masks ~~~~~~~~~~~~~~~
atlas_path = volpath.replace('LPBA40_rigidly_registered_pairs', 'LPBA40_rigidly_registered_label_pairs')
atlas = sitk.ReadImage(str(atlas_path))
# 1. resample
atlas_resampled = resample_image(atlas, is_label=True)
atlas_np = sitk.GetArrayFromImage(atlas_resampled).swapaxes(0, 2)
# 2. center_crop
atlas_crop = center_crop(img=atlas_np, size_ratio=(80, 106, 80)).swapaxes(0, 2)
new_atlas = sitk.GetImageFromArray(atlas_crop)
new_atlas.SetSpacing(atlas_resampled.GetSpacing())
new_atlas.SetDirection(atlas_resampled.GetDirection())
new_atlas.SetOrigin(atlas_resampled.GetOrigin())
output_path_hs_small_atlas = os.path.join(output_path_mask, 'l{}_to_l{}.nii'.format(str(i), str(j)))
sitk.WriteImage(new_atlas, str(output_path_hs_small_atlas))
def plot_hist(image_path_hs='../datasets/LPBA40/LPBA40_rigidly_registered_pairs_histogram_standardization_small'):
for i in list(range(1, 41, 1)):
for j in list(range(1, 41, 1)):
volpath = os.path.join(image_path_hs, 'l{}_to_l{}.nii'.format(str(i), str(j)))
img_sitk = sitk.ReadImage(str(volpath))
img_np = np.clip(sitk.GetArrayFromImage(img_sitk).swapaxes(0, 2), 50, 100)
data = np.reshape(img_np[img_np>50], -1)
print(data.shape)
density = stats.gaussian_kde(data)
xs = np.linspace(40, 110, 70)
density.covariance_factor = lambda: .25
density._compute_covariance()
plt.plot(xs, density(xs))
plt.show()
if __name__ == '__main__':
mapping = calculate_landmarks(image_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs')
# mapping = np.asarray([1.77635684e-15, 4.02863140e+01, 5.86044434e+01, 6.33688576e+01, 6.66438972e+01, 7.12987107e+01, 7.53526276e+01, 7.96537020e+01, 8.43034770e+01, 8.67112286e+01, 8.91208850e+01, 9.35115887e+01, 1.00000000e+02])
histogram_stardardization_resample_center_crop(mapping=mapping,
input_path='../datasets/LPBA40/LPBA40_rigidly_registered_pairs',
output_path_hs_small='../datasets/LPBA40/LPBA40_rigidly_registered_pairs_histogram_standardization_small',
output_path_mask='../datasets/LPBA40/LPBA40_rigidly_registered_label_pairs_small')
plot_hist()
| normalize | identifier_name |
git.rs | //---------------------------------------------------------------------------//
// Copyright (c) 2017-2023 Ismael Gutiérrez González. All rights reserved.
//
// This file is part of the Rusted PackFile Manager (RPFM) project,
// which can be found here: https://github.com/Frodo45127/rpfm.
//
// This file is licensed under the MIT license, which can be found here:
// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
//---------------------------------------------------------------------------//
//! This module contains the code for the limited Git support.
use git2::{Reference, ReferenceFormat, Repository, Signature, StashFlags, build::CheckoutBuilder};
use std::fs::{DirBuilder, File};
use std::io::{BufWriter, Write};
use std::path::{Path, PathBuf};
use std::process::Command as SystemCommand;
use crate::error::{RLibError, Result};
//-------------------------------------------------------------------------------//
// Enums & Structs
//-------------------------------------------------------------------------------//
/// Struct containing the data needed to perform a fetch/pull from a repo.
#[derive(Debug)]
pub struct GitIntegration {
/// Local Path of the repo.
local_path: PathBuf,
/// URL of the repo.
url: String,
/// Branch to fetch/pull.
branch: String,
/// Remote to fetch/pull from.
remote: String,
}
/// Possible responses we can get from a fetch/pull.
#[derive(Debug)]
pub enum GitResponse {
NewUpdate,
NoUpdate,
NoLocalFiles,
Diverged,
}
//---------------------------------------------------------------------------//
// Enum & Structs Implementations
//---------------------------------------------------------------------------//
impl GitIntegration {
/// This function creates a new GitIntegration struct with data for a git operation.
pub fn new(local_path: &Path, url: &str, branch: &str, remote: &str) -> Self {
Self {
local_path: local_path.to_owned(),
url: url.to_owned(),
branch: branch.to_owned(),
remote: remote.to_owned(),
}
}
/// This function tries to initializes a git repo.
pub fn init(&self) -> Result<Repository> {
Repository::init(&self.local_path).map_err(From::from)
}
/// This function generates a gitignore file for the git repo.
///
/// If it already exists, it'll replace the existing file.
pub fn add_gitignore(&self, contents: &str) -> Result<()> {
let mut file = BufWriter::new(File::create(self.local_path.join(".gitignore"))?);
file.write_all(contents.as_bytes()).map_err(From::from)
}
/// This function switches the branch of a `GitIntegration` to the provided refspec.
pub fn checkout_branch(&self, repo: &Repository, refs: &str) -> Result<()> {
let head = repo.head().unwrap();
let oid = head.target().unwrap();
let commit = repo.find_commit(oid)?;
let branch_name = refs.splitn(3, '/').collect::<Vec<_>>()[2].to_owned();
let _ = repo.branch(&branch_name, &commit, false);
let branch_object = repo.revparse_single(refs)?;
repo.checkout_tree(&branch_object, None)?;
repo.set_head(refs)?;
Ok(())
}
/// This function checks if there is a new update for the current repo.
pub fn check_update(&self) -> Result<GitResponse> {
let mut repo = match Repository::open(&self.local_path) {
Ok(repo) => repo,
// If this fails, it means we either we don´t have the repo downloaded, or we have a folder without the .git folder.
Err(_) => return Ok(GitResponse::NoLocalFiles),
};
// Just in case there are loose changes, stash them.
// Ignore a fail on this, as it's possible we don't have contents to stash.
let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase();
let master_refname = format!("refs/heads/{}", self.branch);
let signature = Signature::now("RPFM Updater", "-")?;
let stash_id = repo.stash_save(&signature, &format!("Stashed changes before checking for updates from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED));
// In case we're not in master, checkout the master branch.
if current_branch_name != master_refname {
self.checkout_branch(&repo, &master_refname)?;
}
// Fetch the info of the master branch.
repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?;
let analysis = {
let fetch_head = repo.find_reference("FETCH_HEAD")?;
let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?;
repo.merge_analysis(&[&fetch_commit])?
};
// Reset the repo to his original state after the check
if current_branch_name != master_refname {
self.checkout_branch(&repo, ¤t_branch_name)?;
}
if stash_id.is_ok() {
let _ = repo.stash_pop(0, None);
}
if analysis.0.is_up_to_date() {
Ok(GitResponse::NoUpdate)
}
// If the branch is a fast-forward, or has diverged, ask for an update.
else if analysis.0.is_fast_forward() || analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() {
Ok(GitResponse::NewUpdate)
}
// Otherwise, it means the branches diverged. In this case, return a diverged.
else {
Ok(GitResponse::Diverged)
}
}
/// This function downloads the latest revision of the current repository.
pub fn update_repo(&self) -> Result<()> {
let mut new_repo = false;
let mut repo = match Repository::open(&self.local_path) {
Ok(repo) => repo,
Err(_) => {
// If it fails to open, it means either we don't have the .git folder, or we don't have a folder at all.
// In either case, recreate it and redownload the repo. No more steps are needed here.
// On windows, remove the read-only flags before doing anything else, or this will fail.
if cfg!(target_os = "windows") {
let path = self.local_path.to_string_lossy().to_string() + "\\*.*";
let _ = SystemCommand::new("attrib").arg("-r").arg(path).arg("/s").output();
}
let _ = std::fs::remove_dir_all(&self.local_path);
DirBuilder::new().recursive(true).create(&self.local_path)?;
match Repository::clone(&self.url, &self.local_path) {
Ok(repo) => {
new_repo = true;
repo
},
Err(_) => return Err(RLibError::GitErrorDownloadFromRepo(self.url.to_owned())),
}
}
};
// Just in case there are loose changes, stash them.
// Ignore a fail on this, as it's possible we don't have contents to stash.
let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase();
let master_refname = format!("refs/heads/{}", self.branch);
let signature = Signature::now("RPFM Updater", "-")?;
let stash_id = repo.stash_save(&signature, &format!("Stashed changes before update from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED));
// In case we're not in master, checkout the master branch.
if current_branch_name != master_refname {
self.checkout_branch(&repo, &master_refname)?;
}
// If we just cloned a new repo and changed branches, return.
if new_repo {
return Ok(());
}
// If it worked, now we have to do a pull from master. Sadly, git2-rs does not support pull.
// Instead, we kinda force a fast-forward. Made in StackOverflow.
repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?;
let (analysis, fetch_commit_id) = {
let fetch_head = repo.find_reference("FETCH_HEAD")?;
let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?;
(repo.merge_analysis(&[&fetch_commit])?, fetch_commit.id())
};
// If we're up to date, nothing more is needed.
if analysis.0.is_up_to_date() {
// Reset the repo to his original state after the check
if current_branch_name != master_refname {
self.checkout_branch(&repo, ¤t_branch_name)?;
}
if stash_id.is_ok() {
let _ = repo.stash_pop(0, None);
}
Err(RLibError::GitErrorNoUpdatesAvailable(self.url.to_owned()))
}
// If we can do a fast-forward, we do it. This is the preferred option.
else if analysis.0.is_fast_forward() {
let mut reference = repo.find_reference(&master_refname)?;
reference.set_target(fetch_commit_id, "Fast-Forward")?;
repo.set_head(&master_refname)?;
repo.checkout_head(Some(CheckoutBuilder::default().force())).map_err(From::from)
}
// If not, we face multiple problems:
// - If there are uncommitted changes: covered by the stash.
// - If we're not in the branch: covered by the branch switch.
// - If the branches diverged: this one... the cleanest way to deal with it should be redownload the repo.
else if analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() {
// On windows, remove the read-only flags before doing anything else, or this will fail.
if cfg!(target_os = "windows") {
| let _ = std::fs::remove_dir_all(&self.local_path);
self.update_repo()
}
else {
// Reset the repo to his original state after the check
if current_branch_name != master_refname {
self.checkout_branch(&repo, ¤t_branch_name)?;
}
if stash_id.is_ok() {
let _ = repo.stash_pop(0, None);
}
Err(RLibError::GitErrorDownloadFromRepo(self.url.to_owned()))
}
}
}
| let path = self.local_path.to_string_lossy().to_string() + "\\*.*";
let _ = SystemCommand::new("attrib").arg("-r").arg(path).arg("/s").output();
}
| conditional_block |
git.rs | //---------------------------------------------------------------------------//
// Copyright (c) 2017-2023 Ismael Gutiérrez González. All rights reserved.
//
// This file is part of the Rusted PackFile Manager (RPFM) project,
// which can be found here: https://github.com/Frodo45127/rpfm.
//
// This file is licensed under the MIT license, which can be found here:
// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
//---------------------------------------------------------------------------//
//! This module contains the code for the limited Git support.
use git2::{Reference, ReferenceFormat, Repository, Signature, StashFlags, build::CheckoutBuilder};
use std::fs::{DirBuilder, File};
use std::io::{BufWriter, Write};
use std::path::{Path, PathBuf};
use std::process::Command as SystemCommand;
use crate::error::{RLibError, Result};
//-------------------------------------------------------------------------------//
// Enums & Structs
//-------------------------------------------------------------------------------//
/// Struct containing the data needed to perform a fetch/pull from a repo.
#[derive(Debug)]
pub struct GitIntegration {
/// Local Path of the repo.
local_path: PathBuf,
/// URL of the repo.
url: String,
/// Branch to fetch/pull.
branch: String,
/// Remote to fetch/pull from.
remote: String,
}
/// Possible responses we can get from a fetch/pull.
#[derive(Debug)]
pub enum GitResponse {
NewUpdate,
NoUpdate,
NoLocalFiles,
Diverged,
}
//---------------------------------------------------------------------------//
// Enum & Structs Implementations
//---------------------------------------------------------------------------//
impl GitIntegration {
/// This function creates a new GitIntegration struct with data for a git operation.
pub fn new(local_path: &Path, url: &str, branch: &str, remote: &str) -> Self {
Self {
local_path: local_path.to_owned(),
url: url.to_owned(),
branch: branch.to_owned(),
remote: remote.to_owned(),
}
}
/// This function tries to initializes a git repo.
pub fn init(&self) -> Result<Repository> {
Repository::init(&self.local_path).map_err(From::from)
}
/// This function generates a gitignore file for the git repo.
///
/// If it already exists, it'll replace the existing file.
pub fn ad | self, contents: &str) -> Result<()> {
let mut file = BufWriter::new(File::create(self.local_path.join(".gitignore"))?);
file.write_all(contents.as_bytes()).map_err(From::from)
}
/// This function switches the branch of a `GitIntegration` to the provided refspec.
pub fn checkout_branch(&self, repo: &Repository, refs: &str) -> Result<()> {
let head = repo.head().unwrap();
let oid = head.target().unwrap();
let commit = repo.find_commit(oid)?;
let branch_name = refs.splitn(3, '/').collect::<Vec<_>>()[2].to_owned();
let _ = repo.branch(&branch_name, &commit, false);
let branch_object = repo.revparse_single(refs)?;
repo.checkout_tree(&branch_object, None)?;
repo.set_head(refs)?;
Ok(())
}
/// This function checks if there is a new update for the current repo.
pub fn check_update(&self) -> Result<GitResponse> {
let mut repo = match Repository::open(&self.local_path) {
Ok(repo) => repo,
// If this fails, it means we either we don´t have the repo downloaded, or we have a folder without the .git folder.
Err(_) => return Ok(GitResponse::NoLocalFiles),
};
// Just in case there are loose changes, stash them.
// Ignore a fail on this, as it's possible we don't have contents to stash.
let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase();
let master_refname = format!("refs/heads/{}", self.branch);
let signature = Signature::now("RPFM Updater", "-")?;
let stash_id = repo.stash_save(&signature, &format!("Stashed changes before checking for updates from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED));
// In case we're not in master, checkout the master branch.
if current_branch_name != master_refname {
self.checkout_branch(&repo, &master_refname)?;
}
// Fetch the info of the master branch.
repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?;
let analysis = {
let fetch_head = repo.find_reference("FETCH_HEAD")?;
let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?;
repo.merge_analysis(&[&fetch_commit])?
};
// Reset the repo to his original state after the check
if current_branch_name != master_refname {
self.checkout_branch(&repo, ¤t_branch_name)?;
}
if stash_id.is_ok() {
let _ = repo.stash_pop(0, None);
}
if analysis.0.is_up_to_date() {
Ok(GitResponse::NoUpdate)
}
// If the branch is a fast-forward, or has diverged, ask for an update.
else if analysis.0.is_fast_forward() || analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() {
Ok(GitResponse::NewUpdate)
}
// Otherwise, it means the branches diverged. In this case, return a diverged.
else {
Ok(GitResponse::Diverged)
}
}
/// This function downloads the latest revision of the current repository.
pub fn update_repo(&self) -> Result<()> {
let mut new_repo = false;
let mut repo = match Repository::open(&self.local_path) {
Ok(repo) => repo,
Err(_) => {
// If it fails to open, it means either we don't have the .git folder, or we don't have a folder at all.
// In either case, recreate it and redownload the repo. No more steps are needed here.
// On windows, remove the read-only flags before doing anything else, or this will fail.
if cfg!(target_os = "windows") {
let path = self.local_path.to_string_lossy().to_string() + "\\*.*";
let _ = SystemCommand::new("attrib").arg("-r").arg(path).arg("/s").output();
}
let _ = std::fs::remove_dir_all(&self.local_path);
DirBuilder::new().recursive(true).create(&self.local_path)?;
match Repository::clone(&self.url, &self.local_path) {
Ok(repo) => {
new_repo = true;
repo
},
Err(_) => return Err(RLibError::GitErrorDownloadFromRepo(self.url.to_owned())),
}
}
};
// Just in case there are loose changes, stash them.
// Ignore a fail on this, as it's possible we don't have contents to stash.
let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase();
let master_refname = format!("refs/heads/{}", self.branch);
let signature = Signature::now("RPFM Updater", "-")?;
let stash_id = repo.stash_save(&signature, &format!("Stashed changes before update from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED));
// In case we're not in master, checkout the master branch.
if current_branch_name != master_refname {
self.checkout_branch(&repo, &master_refname)?;
}
// If we just cloned a new repo and changed branches, return.
if new_repo {
return Ok(());
}
// If it worked, now we have to do a pull from master. Sadly, git2-rs does not support pull.
// Instead, we kinda force a fast-forward. Made in StackOverflow.
repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?;
let (analysis, fetch_commit_id) = {
let fetch_head = repo.find_reference("FETCH_HEAD")?;
let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?;
(repo.merge_analysis(&[&fetch_commit])?, fetch_commit.id())
};
// If we're up to date, nothing more is needed.
if analysis.0.is_up_to_date() {
// Reset the repo to his original state after the check
if current_branch_name != master_refname {
self.checkout_branch(&repo, ¤t_branch_name)?;
}
if stash_id.is_ok() {
let _ = repo.stash_pop(0, None);
}
Err(RLibError::GitErrorNoUpdatesAvailable(self.url.to_owned()))
}
// If we can do a fast-forward, we do it. This is the preferred option.
else if analysis.0.is_fast_forward() {
let mut reference = repo.find_reference(&master_refname)?;
reference.set_target(fetch_commit_id, "Fast-Forward")?;
repo.set_head(&master_refname)?;
repo.checkout_head(Some(CheckoutBuilder::default().force())).map_err(From::from)
}
// If not, we face multiple problems:
// - If there are uncommitted changes: covered by the stash.
// - If we're not in the branch: covered by the branch switch.
// - If the branches diverged: this one... the cleanest way to deal with it should be redownload the repo.
else if analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() {
// On windows, remove the read-only flags before doing anything else, or this will fail.
if cfg!(target_os = "windows") {
let path = self.local_path.to_string_lossy().to_string() + "\\*.*";
let _ = SystemCommand::new("attrib").arg("-r").arg(path).arg("/s").output();
}
let _ = std::fs::remove_dir_all(&self.local_path);
self.update_repo()
}
else {
// Reset the repo to his original state after the check
if current_branch_name != master_refname {
self.checkout_branch(&repo, ¤t_branch_name)?;
}
if stash_id.is_ok() {
let _ = repo.stash_pop(0, None);
}
Err(RLibError::GitErrorDownloadFromRepo(self.url.to_owned()))
}
}
}
| d_gitignore(& | identifier_name |
git.rs | //---------------------------------------------------------------------------//
// Copyright (c) 2017-2023 Ismael Gutiérrez González. All rights reserved.
//
// This file is part of the Rusted PackFile Manager (RPFM) project,
// which can be found here: https://github.com/Frodo45127/rpfm.
//
// This file is licensed under the MIT license, which can be found here:
// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
//---------------------------------------------------------------------------//
//! This module contains the code for the limited Git support.
use git2::{Reference, ReferenceFormat, Repository, Signature, StashFlags, build::CheckoutBuilder};
use std::fs::{DirBuilder, File};
use std::io::{BufWriter, Write};
use std::path::{Path, PathBuf};
use std::process::Command as SystemCommand;
use crate::error::{RLibError, Result};
//-------------------------------------------------------------------------------//
// Enums & Structs
//-------------------------------------------------------------------------------//
/// Struct containing the data needed to perform a fetch/pull from a repo.
#[derive(Debug)]
pub struct GitIntegration {
/// Local Path of the repo.
local_path: PathBuf,
/// URL of the repo.
url: String,
/// Branch to fetch/pull.
branch: String,
/// Remote to fetch/pull from.
remote: String,
}
/// Possible responses we can get from a fetch/pull.
#[derive(Debug)]
pub enum GitResponse {
NewUpdate,
NoUpdate,
NoLocalFiles,
Diverged,
}
//---------------------------------------------------------------------------//
// Enum & Structs Implementations
//---------------------------------------------------------------------------//
impl GitIntegration {
/// This function creates a new GitIntegration struct with data for a git operation.
pub fn new(local_path: &Path, url: &str, branch: &str, remote: &str) -> Self {
Self {
local_path: local_path.to_owned(),
url: url.to_owned(),
branch: branch.to_owned(),
remote: remote.to_owned(),
}
}
/// This function tries to initializes a git repo.
pub fn init(&self) -> Result<Repository> {
Repository::init(&self.local_path).map_err(From::from)
}
/// This function generates a gitignore file for the git repo.
///
/// If it already exists, it'll replace the existing file.
pub fn add_gitignore(&self, contents: &str) -> Result<()> {
let mut file = BufWriter::new(File::create(self.local_path.join(".gitignore"))?);
file.write_all(contents.as_bytes()).map_err(From::from)
}
/// This function switches the branch of a `GitIntegration` to the provided refspec.
pub fn checkout_branch(&self, repo: &Repository, refs: &str) -> Result<()> {
let head = repo.head().unwrap();
let oid = head.target().unwrap();
let commit = repo.find_commit(oid)?;
let branch_name = refs.splitn(3, '/').collect::<Vec<_>>()[2].to_owned();
let _ = repo.branch(&branch_name, &commit, false);
let branch_object = repo.revparse_single(refs)?;
repo.checkout_tree(&branch_object, None)?;
repo.set_head(refs)?;
Ok(())
}
/// This function checks if there is a new update for the current repo.
pub fn check_update(&self) -> Result<GitResponse> { | Err(_) => return Ok(GitResponse::NoLocalFiles),
};
// Just in case there are loose changes, stash them.
// Ignore a fail on this, as it's possible we don't have contents to stash.
let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase();
let master_refname = format!("refs/heads/{}", self.branch);
let signature = Signature::now("RPFM Updater", "-")?;
let stash_id = repo.stash_save(&signature, &format!("Stashed changes before checking for updates from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED));
// In case we're not in master, checkout the master branch.
if current_branch_name != master_refname {
self.checkout_branch(&repo, &master_refname)?;
}
// Fetch the info of the master branch.
repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?;
let analysis = {
let fetch_head = repo.find_reference("FETCH_HEAD")?;
let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?;
repo.merge_analysis(&[&fetch_commit])?
};
// Reset the repo to his original state after the check
if current_branch_name != master_refname {
self.checkout_branch(&repo, ¤t_branch_name)?;
}
if stash_id.is_ok() {
let _ = repo.stash_pop(0, None);
}
if analysis.0.is_up_to_date() {
Ok(GitResponse::NoUpdate)
}
// If the branch is a fast-forward, or has diverged, ask for an update.
else if analysis.0.is_fast_forward() || analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() {
Ok(GitResponse::NewUpdate)
}
// Otherwise, it means the branches diverged. In this case, return a diverged.
else {
Ok(GitResponse::Diverged)
}
}
/// This function downloads the latest revision of the current repository.
pub fn update_repo(&self) -> Result<()> {
let mut new_repo = false;
let mut repo = match Repository::open(&self.local_path) {
Ok(repo) => repo,
Err(_) => {
// If it fails to open, it means either we don't have the .git folder, or we don't have a folder at all.
// In either case, recreate it and redownload the repo. No more steps are needed here.
// On windows, remove the read-only flags before doing anything else, or this will fail.
if cfg!(target_os = "windows") {
let path = self.local_path.to_string_lossy().to_string() + "\\*.*";
let _ = SystemCommand::new("attrib").arg("-r").arg(path).arg("/s").output();
}
let _ = std::fs::remove_dir_all(&self.local_path);
DirBuilder::new().recursive(true).create(&self.local_path)?;
match Repository::clone(&self.url, &self.local_path) {
Ok(repo) => {
new_repo = true;
repo
},
Err(_) => return Err(RLibError::GitErrorDownloadFromRepo(self.url.to_owned())),
}
}
};
// Just in case there are loose changes, stash them.
// Ignore a fail on this, as it's possible we don't have contents to stash.
let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase();
let master_refname = format!("refs/heads/{}", self.branch);
let signature = Signature::now("RPFM Updater", "-")?;
let stash_id = repo.stash_save(&signature, &format!("Stashed changes before update from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED));
// In case we're not in master, checkout the master branch.
if current_branch_name != master_refname {
self.checkout_branch(&repo, &master_refname)?;
}
// If we just cloned a new repo and changed branches, return.
if new_repo {
return Ok(());
}
// If it worked, now we have to do a pull from master. Sadly, git2-rs does not support pull.
// Instead, we kinda force a fast-forward. Made in StackOverflow.
repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?;
let (analysis, fetch_commit_id) = {
let fetch_head = repo.find_reference("FETCH_HEAD")?;
let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?;
(repo.merge_analysis(&[&fetch_commit])?, fetch_commit.id())
};
// If we're up to date, nothing more is needed.
if analysis.0.is_up_to_date() {
// Reset the repo to his original state after the check
if current_branch_name != master_refname {
self.checkout_branch(&repo, ¤t_branch_name)?;
}
if stash_id.is_ok() {
let _ = repo.stash_pop(0, None);
}
Err(RLibError::GitErrorNoUpdatesAvailable(self.url.to_owned()))
}
// If we can do a fast-forward, we do it. This is the preferred option.
else if analysis.0.is_fast_forward() {
let mut reference = repo.find_reference(&master_refname)?;
reference.set_target(fetch_commit_id, "Fast-Forward")?;
repo.set_head(&master_refname)?;
repo.checkout_head(Some(CheckoutBuilder::default().force())).map_err(From::from)
}
// If not, we face multiple problems:
// - If there are uncommitted changes: covered by the stash.
// - If we're not in the branch: covered by the branch switch.
// - If the branches diverged: this one... the cleanest way to deal with it should be redownload the repo.
else if analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() {
// On windows, remove the read-only flags before doing anything else, or this will fail.
if cfg!(target_os = "windows") {
let path = self.local_path.to_string_lossy().to_string() + "\\*.*";
let _ = SystemCommand::new("attrib").arg("-r").arg(path).arg("/s").output();
}
let _ = std::fs::remove_dir_all(&self.local_path);
self.update_repo()
}
else {
// Reset the repo to his original state after the check
if current_branch_name != master_refname {
self.checkout_branch(&repo, ¤t_branch_name)?;
}
if stash_id.is_ok() {
let _ = repo.stash_pop(0, None);
}
Err(RLibError::GitErrorDownloadFromRepo(self.url.to_owned()))
}
}
} | let mut repo = match Repository::open(&self.local_path) {
Ok(repo) => repo,
// If this fails, it means we either we don´t have the repo downloaded, or we have a folder without the .git folder. | random_line_split |
RPMutils.py | """Utility functions and global parameters for the model."""
import time
import math
import os
from ca.nengo.model import SimulationMode
from ca.nengo.model import Units
from ca.nengo.model.impl import FunctionInput
from ca.nengo.model.nef.impl import NEFEnsembleFactoryImpl
from ca.nengo.math import PDFTools
from ca.nengo.math.impl import IndicatorPDF
from ca.nengo.math.impl import GaussianPDF
from ca.nengo.math.impl import ConstantFunction
from java.lang import System
#note: the following are all constants, in that they are set for a given model.
#however, this does not guarantee that their values will be the ones listed below.
#in particular, the batch module sets these constants to (possibly) different
#values before starting each run. these values should be thought of as defaults.
#the number of dimensions to use in HRR vectors
VECTOR_DIMENSION = 30
#the number of neurons to use per dimension
NEURONS_PER_DIMENSION = 25
#random seed to use when generating vocabulary vectors
VOCABULARY_SEED = 100
#the minimum confidence value we will require in order to decide we have a match in cleanup memory
MIN_CONFIDENCE = 0.7
#the minimum value we will require to pick either the same or different result
#SAMEDIFF_CHOICE = 0.5
#the minimum score we will require in order to decide we have found a correct rule
CORRECTNESS_THRESHOLD_FIG = 0.8
CORRECTNESS_THRESHOLD_SEQ = 0.7
CORRECTNESS_THRESHOLD_SET = 0.9
#whether or not to add probes when building networks
USE_PROBES = True
#the time (in seconds) for which we present each input
STEP_SIZE = 0.2
#the size (number of base words) of vocabulary to use (we have different versions depending on how many base words are allowed)
VOCAB_SIZE = 80
#true if we are running the controller, false if we are just running the individual modules
RUN_WITH_CONTROLLER = True
#the maximum similarity we will allow when generating a set of vectors
VECTOR_SIMILARITY = 1.0
#if running jobs concurrently, use this to ensure they don't use overlapping data files
JOB_ID = 0
#the mode to run model in
SIMULATION_MODE = SimulationMode.DEFAULT
#whether or not to use cleanup memory
USE_CLEANUP = False
#whether or not to update the cleanup memory after a run
DYNAMIC_MEMORY = False
#the number of threads we want to run with
NUM_THREADS = 0
#whether or not to split n-dimensional populations into n 1-dimensional populations
SPLIT_DIMENSIONS = True
#the threshold to use when detecting same features in figure solver
SAME_THRESHOLD = 1.0
#the threshold to use when detecting different features in figure solver
DIFF_THRESHOLD = 0.9
#the minimum difference required to differentiate between matrix answers
SIMILARITY_THRESHOLD = 0.0
#the folder in which to read/write all files throughout the run
FOLDER_NAME = "test"
#scale on the total number of neurons
NEURON_SCALE = 1.0
#whether or not to do same/diff calculations in neurons
NEURO_SAMEDIFF = True
#whether or not to load rules from file
LOAD_RULES = False
#kill the given percentage of neurons after generation
KILL_NEURONS = 0.0
#returns the appropriate correctness threshold for each module
def correctnessThreshold(module):
if module == "figuresolver":
return CORRECTNESS_THRESHOLD_FIG
if module == "sequencesolver":
return CORRECTNESS_THRESHOLD_SEQ
if module == "setsolver":
return CORRECTNESS_THRESHOLD_SET
#returns a string containing the current value of all the parameters
def getParameterSettings():
keys = getParameterSettings.func_globals.keys()
values = getParameterSettings.func_globals.values()
parms = [[keys[i],values[i]] for i,key in enumerate(keys) if key.isupper()]
return ",".join(["=".join([str(x) for x in pair]) for pair in parms])
#output from origin (used to update cleanup memory)
def cleanupDataFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "cleanupoutputdata_" + str(JOB_ID) + ".txt")
#file containing word-vector associations
def vocabFile(d, numwords, seed):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "RPMvocab_" + str(numwords) + "x" + str(d) + "_" + str(seed) + ".txt")
#file containing vectors in cleanup memory
def cleanupFile(d, numwords):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "cleanup_" + str(numwords) + "x" + str(d) + "_" + str(JOB_ID) + ".txt") | #prediction of blank cell from neural module
def hypothesisFile(modulename):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, modulename + "_hypothesis_" + str(JOB_ID) + ".txt")
#file to record the rules used to solve a matrix
def ruleFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "rules_" + str(JOB_ID) + ".txt")
#vocabulary present in the matrix
def matrixVocabFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "matrixvocab_" + str(JOB_ID) + ".txt")
#returns a dxd matrix with the given value along the diagonal
def eye(d, val):
identity = [[0 for x in range(d)] for x in range(d)]
for i in range(d):
identity[i][i] = val
return(identity)
def str2floatlist(str):
return [float(word) for word in str.split()]
def floatlist2str(floatlist):
return " ".join([str(x) for x in floatlist])
#generates a random d-dimensional vector
def genVector(d):
result = [PDFTools.sampleFloat(GaussianPDF()) for i in range(d)]
result = normalize(result)
return result
#creates a function which outputs a random unit vector
def makeInputVector(name, d, randomSeed=None):
vec = []
if randomSeed == None:
randomSeed = long(time.clock()*100000000000000000)
if randomSeed > -1:
PDFTools.setSeed(randomSeed)
length = 0
for i in range(d):
tmp = PDFTools.sampleFloat(GaussianPDF())
vec = vec + [tmp]
length = length + tmp**2
length = math.sqrt(length)
f = []
for i in range(d):
vec[i] = vec[i] / length
f = f + [ConstantFunction(1, vec[i])]
if randomSeed > -1:
PDFTools.setSeed(long(time.clock()*1000000000000000))
print vec
return(FunctionInput(name, f, Units.UNK))
#create function inputs, where each function outputs one of the given vectors
def makeInputVectors(names, vectors):
return [FunctionInput(names[i], [ConstantFunction(1,x) for x in vec], Units.UNK) for i,vec in enumerate(vectors)]
#load vectors from a file and create corresponding output functions
def loadInputVectors(filename):
file = open(filename)
vectors = [str2floatlist(line) for line in file]
file.close()
return makeInputVectors(["vec_" + str(i) for i in range(len(vectors))], vectors)
#an NEF ensemble factory with more evaluation points than normal
class NEFMorePoints(NEFEnsembleFactoryImpl):
def getNumEvalPoints(self, d):
#add shortcut so that it doesn't waste time evaluating a bunch of points when its in direct mode
if SIMULATION_MODE == SimulationMode.DIRECT:
return 1
pointsPerDim = [0, 1000, 2000]
if d < 3:
return(pointsPerDim[d])
else:
return(d*500)
#default ensemble factory used in the model
def defaultEnsembleFactory():
ef=NEFMorePoints()
ef.nodeFactory.tauRC = 0.02
ef.nodeFactory.tauRef = 0.002
ef.nodeFactory.maxRate=IndicatorPDF(200,500)
ef.nodeFactory.intercept=IndicatorPDF(-1, 1)
ef.beQuiet()
return(ef)
#returns all the probes containing name
def findMatchingProbes(probes, name, subname=None):
result = []
for probe in probes:
if name in probe.getTarget().getName() or ((probe.getEnsembleName() != None) and (probe.getEnsembleName().count(name) > 0)):
result = result + [probe]
if subname == None:
return result
else:
return findMatchingProbes(result, subname)
#calculate circular convolution of vec1 and vec2
def cconv(vec1, vec2):
if vec1 == None:
return(vec2)
if vec2 == None:
return(vec1)
d = len(vec1)
result = [0 for i in range(d)]
for i in range(d):
for j in range(d):
result[i] = result[i] + vec1[j] * vec2[(i - j) % d]
return(result)
#calculate vector addition of vec1 and vec2
def vecsum(vec1, vec2):
if vec1 == None:
return(vec2)
if vec2 == None:
return(vec1)
return [x+y for x,y in zip(vec1,vec2)]
#calculate length of vec
def length(vec):
return math.sqrt(sum([x**2 for x in vec]))
#normalize vec
def normalize(vec):
l = length(vec)
if l == 0:
return vec
return [x/l for x in vec]
#calculate similarity between vec1 and vec2
def similarity(vec1, vec2):
if len(vec1) != len(vec2):
System.err.println("vectors not the same length in RPMutils.similarity(), something is wrong")
System.err.println(str(len(vec1)) + " " + str(len(vec2)))
return sum([x*y for x,y in zip(vec1,vec2)])
def ainv(vec):
newvec = []
for i,val in enumerate(vec):
newvec += [vec[-i % len(vec)]]
return newvec
#calculate mean value of vec
def mean(vec):
if len(vec) == 0:
return 0.0
return float(sum(vec)) / len(vec)
#calculate the words in vocab that vec1 and vec2 have in common
def calcSame(vec1, vec2, vocab, threshold, weight1, weight2):
vec1 = [x*weight1 for x in vec1]
vec2 = [x*weight2 for x in vec2]
vec = vecsum(vec1,vec2)
ans = [0 for i in range(len(vec))]
for word in vocab:
if similarity(vec,word) > threshold:
ans = vecsum(ans,word)
return normalize(ans)
#calculate the words in vocab that vec1 and vec2 have distinct
def calcDiff(vec1, vec2, vocab, threshold, weight1, weight2):
vec1 = [x*weight1 for x in vec1]
vec2 = [x*weight2 for x in vec2]
vec = [x-y for x,y in zip(vec1,vec2)]
ans = [0 for i in range(len(vec))]
for word in vocab:
if similarity(vec,word) > threshold or similarity(vec,word) < -threshold:
ans = vecsum(ans,word)
return normalize(ans) |
#rule output from neural module
def resultFile(modulename):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, modulename + "_result_" + str(JOB_ID) + ".txt")
| random_line_split |
RPMutils.py | """Utility functions and global parameters for the model."""
import time
import math
import os
from ca.nengo.model import SimulationMode
from ca.nengo.model import Units
from ca.nengo.model.impl import FunctionInput
from ca.nengo.model.nef.impl import NEFEnsembleFactoryImpl
from ca.nengo.math import PDFTools
from ca.nengo.math.impl import IndicatorPDF
from ca.nengo.math.impl import GaussianPDF
from ca.nengo.math.impl import ConstantFunction
from java.lang import System
#note: the following are all constants, in that they are set for a given model.
#however, this does not guarantee that their values will be the ones listed below.
#in particular, the batch module sets these constants to (possibly) different
#values before starting each run. these values should be thought of as defaults.
#the number of dimensions to use in HRR vectors
VECTOR_DIMENSION = 30
#the number of neurons to use per dimension
NEURONS_PER_DIMENSION = 25
#random seed to use when generating vocabulary vectors
VOCABULARY_SEED = 100
#the minimum confidence value we will require in order to decide we have a match in cleanup memory
MIN_CONFIDENCE = 0.7
#the minimum value we will require to pick either the same or different result
#SAMEDIFF_CHOICE = 0.5
#the minimum score we will require in order to decide we have found a correct rule
CORRECTNESS_THRESHOLD_FIG = 0.8
CORRECTNESS_THRESHOLD_SEQ = 0.7
CORRECTNESS_THRESHOLD_SET = 0.9
#whether or not to add probes when building networks
USE_PROBES = True
#the time (in seconds) for which we present each input
STEP_SIZE = 0.2
#the size (number of base words) of vocabulary to use (we have different versions depending on how many base words are allowed)
VOCAB_SIZE = 80
#true if we are running the controller, false if we are just running the individual modules
RUN_WITH_CONTROLLER = True
#the maximum similarity we will allow when generating a set of vectors
VECTOR_SIMILARITY = 1.0
#if running jobs concurrently, use this to ensure they don't use overlapping data files
JOB_ID = 0
#the mode to run model in
SIMULATION_MODE = SimulationMode.DEFAULT
#whether or not to use cleanup memory
USE_CLEANUP = False
#whether or not to update the cleanup memory after a run
DYNAMIC_MEMORY = False
#the number of threads we want to run with
NUM_THREADS = 0
#whether or not to split n-dimensional populations into n 1-dimensional populations
SPLIT_DIMENSIONS = True
#the threshold to use when detecting same features in figure solver
SAME_THRESHOLD = 1.0
#the threshold to use when detecting different features in figure solver
DIFF_THRESHOLD = 0.9
#the minimum difference required to differentiate between matrix answers
SIMILARITY_THRESHOLD = 0.0
#the folder in which to read/write all files throughout the run
FOLDER_NAME = "test"
#scale on the total number of neurons
NEURON_SCALE = 1.0
#whether or not to do same/diff calculations in neurons
NEURO_SAMEDIFF = True
#whether or not to load rules from file
LOAD_RULES = False
#kill the given percentage of neurons after generation
KILL_NEURONS = 0.0
#returns the appropriate correctness threshold for each module
def correctnessThreshold(module):
if module == "figuresolver":
return CORRECTNESS_THRESHOLD_FIG
if module == "sequencesolver":
return CORRECTNESS_THRESHOLD_SEQ
if module == "setsolver":
return CORRECTNESS_THRESHOLD_SET
#returns a string containing the current value of all the parameters
def getParameterSettings():
keys = getParameterSettings.func_globals.keys()
values = getParameterSettings.func_globals.values()
parms = [[keys[i],values[i]] for i,key in enumerate(keys) if key.isupper()]
return ",".join(["=".join([str(x) for x in pair]) for pair in parms])
#output from origin (used to update cleanup memory)
def cleanupDataFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "cleanupoutputdata_" + str(JOB_ID) + ".txt")
#file containing word-vector associations
def vocabFile(d, numwords, seed):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "RPMvocab_" + str(numwords) + "x" + str(d) + "_" + str(seed) + ".txt")
#file containing vectors in cleanup memory
def cleanupFile(d, numwords):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "cleanup_" + str(numwords) + "x" + str(d) + "_" + str(JOB_ID) + ".txt")
#rule output from neural module
def resultFile(modulename):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, modulename + "_result_" + str(JOB_ID) + ".txt")
#prediction of blank cell from neural module
def hypothesisFile(modulename):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, modulename + "_hypothesis_" + str(JOB_ID) + ".txt")
#file to record the rules used to solve a matrix
def ruleFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "rules_" + str(JOB_ID) + ".txt")
#vocabulary present in the matrix
def matrixVocabFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "matrixvocab_" + str(JOB_ID) + ".txt")
#returns a dxd matrix with the given value along the diagonal
def eye(d, val):
identity = [[0 for x in range(d)] for x in range(d)]
for i in range(d):
identity[i][i] = val
return(identity)
def str2floatlist(str):
return [float(word) for word in str.split()]
def floatlist2str(floatlist):
return " ".join([str(x) for x in floatlist])
#generates a random d-dimensional vector
def genVector(d):
result = [PDFTools.sampleFloat(GaussianPDF()) for i in range(d)]
result = normalize(result)
return result
#creates a function which outputs a random unit vector
def makeInputVector(name, d, randomSeed=None):
vec = []
if randomSeed == None:
randomSeed = long(time.clock()*100000000000000000)
if randomSeed > -1:
PDFTools.setSeed(randomSeed)
length = 0
for i in range(d):
tmp = PDFTools.sampleFloat(GaussianPDF())
vec = vec + [tmp]
length = length + tmp**2
length = math.sqrt(length)
f = []
for i in range(d):
vec[i] = vec[i] / length
f = f + [ConstantFunction(1, vec[i])]
if randomSeed > -1:
PDFTools.setSeed(long(time.clock()*1000000000000000))
print vec
return(FunctionInput(name, f, Units.UNK))
#create function inputs, where each function outputs one of the given vectors
def | (names, vectors):
return [FunctionInput(names[i], [ConstantFunction(1,x) for x in vec], Units.UNK) for i,vec in enumerate(vectors)]
#load vectors from a file and create corresponding output functions
def loadInputVectors(filename):
file = open(filename)
vectors = [str2floatlist(line) for line in file]
file.close()
return makeInputVectors(["vec_" + str(i) for i in range(len(vectors))], vectors)
#an NEF ensemble factory with more evaluation points than normal
class NEFMorePoints(NEFEnsembleFactoryImpl):
def getNumEvalPoints(self, d):
#add shortcut so that it doesn't waste time evaluating a bunch of points when its in direct mode
if SIMULATION_MODE == SimulationMode.DIRECT:
return 1
pointsPerDim = [0, 1000, 2000]
if d < 3:
return(pointsPerDim[d])
else:
return(d*500)
#default ensemble factory used in the model
def defaultEnsembleFactory():
ef=NEFMorePoints()
ef.nodeFactory.tauRC = 0.02
ef.nodeFactory.tauRef = 0.002
ef.nodeFactory.maxRate=IndicatorPDF(200,500)
ef.nodeFactory.intercept=IndicatorPDF(-1, 1)
ef.beQuiet()
return(ef)
#returns all the probes containing name
def findMatchingProbes(probes, name, subname=None):
result = []
for probe in probes:
if name in probe.getTarget().getName() or ((probe.getEnsembleName() != None) and (probe.getEnsembleName().count(name) > 0)):
result = result + [probe]
if subname == None:
return result
else:
return findMatchingProbes(result, subname)
#calculate circular convolution of vec1 and vec2
def cconv(vec1, vec2):
if vec1 == None:
return(vec2)
if vec2 == None:
return(vec1)
d = len(vec1)
result = [0 for i in range(d)]
for i in range(d):
for j in range(d):
result[i] = result[i] + vec1[j] * vec2[(i - j) % d]
return(result)
#calculate vector addition of vec1 and vec2
def vecsum(vec1, vec2):
if vec1 == None:
return(vec2)
if vec2 == None:
return(vec1)
return [x+y for x,y in zip(vec1,vec2)]
#calculate length of vec
def length(vec):
return math.sqrt(sum([x**2 for x in vec]))
#normalize vec
def normalize(vec):
l = length(vec)
if l == 0:
return vec
return [x/l for x in vec]
#calculate similarity between vec1 and vec2
def similarity(vec1, vec2):
if len(vec1) != len(vec2):
System.err.println("vectors not the same length in RPMutils.similarity(), something is wrong")
System.err.println(str(len(vec1)) + " " + str(len(vec2)))
return sum([x*y for x,y in zip(vec1,vec2)])
def ainv(vec):
newvec = []
for i,val in enumerate(vec):
newvec += [vec[-i % len(vec)]]
return newvec
#calculate mean value of vec
def mean(vec):
if len(vec) == 0:
return 0.0
return float(sum(vec)) / len(vec)
#calculate the words in vocab that vec1 and vec2 have in common
def calcSame(vec1, vec2, vocab, threshold, weight1, weight2):
vec1 = [x*weight1 for x in vec1]
vec2 = [x*weight2 for x in vec2]
vec = vecsum(vec1,vec2)
ans = [0 for i in range(len(vec))]
for word in vocab:
if similarity(vec,word) > threshold:
ans = vecsum(ans,word)
return normalize(ans)
#calculate the words in vocab that vec1 and vec2 have distinct
def calcDiff(vec1, vec2, vocab, threshold, weight1, weight2):
vec1 = [x*weight1 for x in vec1]
vec2 = [x*weight2 for x in vec2]
vec = [x-y for x,y in zip(vec1,vec2)]
ans = [0 for i in range(len(vec))]
for word in vocab:
if similarity(vec,word) > threshold or similarity(vec,word) < -threshold:
ans = vecsum(ans,word)
return normalize(ans)
| makeInputVectors | identifier_name |
RPMutils.py | """Utility functions and global parameters for the model."""
import time
import math
import os
from ca.nengo.model import SimulationMode
from ca.nengo.model import Units
from ca.nengo.model.impl import FunctionInput
from ca.nengo.model.nef.impl import NEFEnsembleFactoryImpl
from ca.nengo.math import PDFTools
from ca.nengo.math.impl import IndicatorPDF
from ca.nengo.math.impl import GaussianPDF
from ca.nengo.math.impl import ConstantFunction
from java.lang import System
#note: the following are all constants, in that they are set for a given model.
#however, this does not guarantee that their values will be the ones listed below.
#in particular, the batch module sets these constants to (possibly) different
#values before starting each run. these values should be thought of as defaults.
#the number of dimensions to use in HRR vectors
VECTOR_DIMENSION = 30
#the number of neurons to use per dimension
NEURONS_PER_DIMENSION = 25
#random seed to use when generating vocabulary vectors
VOCABULARY_SEED = 100
#the minimum confidence value we will require in order to decide we have a match in cleanup memory
MIN_CONFIDENCE = 0.7
#the minimum value we will require to pick either the same or different result
#SAMEDIFF_CHOICE = 0.5
#the minimum score we will require in order to decide we have found a correct rule
CORRECTNESS_THRESHOLD_FIG = 0.8
CORRECTNESS_THRESHOLD_SEQ = 0.7
CORRECTNESS_THRESHOLD_SET = 0.9
#whether or not to add probes when building networks
USE_PROBES = True
#the time (in seconds) for which we present each input
STEP_SIZE = 0.2
#the size (number of base words) of vocabulary to use (we have different versions depending on how many base words are allowed)
VOCAB_SIZE = 80
#true if we are running the controller, false if we are just running the individual modules
RUN_WITH_CONTROLLER = True
#the maximum similarity we will allow when generating a set of vectors
VECTOR_SIMILARITY = 1.0
#if running jobs concurrently, use this to ensure they don't use overlapping data files
JOB_ID = 0
#the mode to run model in
SIMULATION_MODE = SimulationMode.DEFAULT
#whether or not to use cleanup memory
USE_CLEANUP = False
#whether or not to update the cleanup memory after a run
DYNAMIC_MEMORY = False
#the number of threads we want to run with
NUM_THREADS = 0
#whether or not to split n-dimensional populations into n 1-dimensional populations
SPLIT_DIMENSIONS = True
#the threshold to use when detecting same features in figure solver
SAME_THRESHOLD = 1.0
#the threshold to use when detecting different features in figure solver
DIFF_THRESHOLD = 0.9
#the minimum difference required to differentiate between matrix answers
SIMILARITY_THRESHOLD = 0.0
#the folder in which to read/write all files throughout the run
FOLDER_NAME = "test"
#scale on the total number of neurons
NEURON_SCALE = 1.0
#whether or not to do same/diff calculations in neurons
NEURO_SAMEDIFF = True
#whether or not to load rules from file
LOAD_RULES = False
#kill the given percentage of neurons after generation
KILL_NEURONS = 0.0
#returns the appropriate correctness threshold for each module
def correctnessThreshold(module):
if module == "figuresolver":
return CORRECTNESS_THRESHOLD_FIG
if module == "sequencesolver":
return CORRECTNESS_THRESHOLD_SEQ
if module == "setsolver":
return CORRECTNESS_THRESHOLD_SET
#returns a string containing the current value of all the parameters
def getParameterSettings():
keys = getParameterSettings.func_globals.keys()
values = getParameterSettings.func_globals.values()
parms = [[keys[i],values[i]] for i,key in enumerate(keys) if key.isupper()]
return ",".join(["=".join([str(x) for x in pair]) for pair in parms])
#output from origin (used to update cleanup memory)
def cleanupDataFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "cleanupoutputdata_" + str(JOB_ID) + ".txt")
#file containing word-vector associations
def vocabFile(d, numwords, seed):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "RPMvocab_" + str(numwords) + "x" + str(d) + "_" + str(seed) + ".txt")
#file containing vectors in cleanup memory
def cleanupFile(d, numwords):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "cleanup_" + str(numwords) + "x" + str(d) + "_" + str(JOB_ID) + ".txt")
#rule output from neural module
def resultFile(modulename):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, modulename + "_result_" + str(JOB_ID) + ".txt")
#prediction of blank cell from neural module
def hypothesisFile(modulename):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, modulename + "_hypothesis_" + str(JOB_ID) + ".txt")
#file to record the rules used to solve a matrix
def ruleFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "rules_" + str(JOB_ID) + ".txt")
#vocabulary present in the matrix
def matrixVocabFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "matrixvocab_" + str(JOB_ID) + ".txt")
#returns a dxd matrix with the given value along the diagonal
def eye(d, val):
identity = [[0 for x in range(d)] for x in range(d)]
for i in range(d):
identity[i][i] = val
return(identity)
def str2floatlist(str):
return [float(word) for word in str.split()]
def floatlist2str(floatlist):
return " ".join([str(x) for x in floatlist])
#generates a random d-dimensional vector
def genVector(d):
result = [PDFTools.sampleFloat(GaussianPDF()) for i in range(d)]
result = normalize(result)
return result
#creates a function which outputs a random unit vector
def makeInputVector(name, d, randomSeed=None):
vec = []
if randomSeed == None:
randomSeed = long(time.clock()*100000000000000000)
if randomSeed > -1:
PDFTools.setSeed(randomSeed)
length = 0
for i in range(d):
tmp = PDFTools.sampleFloat(GaussianPDF())
vec = vec + [tmp]
length = length + tmp**2
length = math.sqrt(length)
f = []
for i in range(d):
vec[i] = vec[i] / length
f = f + [ConstantFunction(1, vec[i])]
if randomSeed > -1:
PDFTools.setSeed(long(time.clock()*1000000000000000))
print vec
return(FunctionInput(name, f, Units.UNK))
#create function inputs, where each function outputs one of the given vectors
def makeInputVectors(names, vectors):
return [FunctionInput(names[i], [ConstantFunction(1,x) for x in vec], Units.UNK) for i,vec in enumerate(vectors)]
#load vectors from a file and create corresponding output functions
def loadInputVectors(filename):
file = open(filename)
vectors = [str2floatlist(line) for line in file]
file.close()
return makeInputVectors(["vec_" + str(i) for i in range(len(vectors))], vectors)
#an NEF ensemble factory with more evaluation points than normal
class NEFMorePoints(NEFEnsembleFactoryImpl):
def getNumEvalPoints(self, d):
#add shortcut so that it doesn't waste time evaluating a bunch of points when its in direct mode
if SIMULATION_MODE == SimulationMode.DIRECT:
return 1
pointsPerDim = [0, 1000, 2000]
if d < 3:
return(pointsPerDim[d])
else:
return(d*500)
#default ensemble factory used in the model
def defaultEnsembleFactory():
ef=NEFMorePoints()
ef.nodeFactory.tauRC = 0.02
ef.nodeFactory.tauRef = 0.002
ef.nodeFactory.maxRate=IndicatorPDF(200,500)
ef.nodeFactory.intercept=IndicatorPDF(-1, 1)
ef.beQuiet()
return(ef)
#returns all the probes containing name
def findMatchingProbes(probes, name, subname=None):
result = []
for probe in probes:
if name in probe.getTarget().getName() or ((probe.getEnsembleName() != None) and (probe.getEnsembleName().count(name) > 0)):
result = result + [probe]
if subname == None:
return result
else:
return findMatchingProbes(result, subname)
#calculate circular convolution of vec1 and vec2
def cconv(vec1, vec2):
if vec1 == None:
return(vec2)
if vec2 == None:
return(vec1)
d = len(vec1)
result = [0 for i in range(d)]
for i in range(d):
for j in range(d):
result[i] = result[i] + vec1[j] * vec2[(i - j) % d]
return(result)
#calculate vector addition of vec1 and vec2
def vecsum(vec1, vec2):
if vec1 == None:
return(vec2)
if vec2 == None:
return(vec1)
return [x+y for x,y in zip(vec1,vec2)]
#calculate length of vec
def length(vec):
|
#normalize vec
def normalize(vec):
l = length(vec)
if l == 0:
return vec
return [x/l for x in vec]
#calculate similarity between vec1 and vec2
def similarity(vec1, vec2):
if len(vec1) != len(vec2):
System.err.println("vectors not the same length in RPMutils.similarity(), something is wrong")
System.err.println(str(len(vec1)) + " " + str(len(vec2)))
return sum([x*y for x,y in zip(vec1,vec2)])
def ainv(vec):
newvec = []
for i,val in enumerate(vec):
newvec += [vec[-i % len(vec)]]
return newvec
#calculate mean value of vec
def mean(vec):
if len(vec) == 0:
return 0.0
return float(sum(vec)) / len(vec)
#calculate the words in vocab that vec1 and vec2 have in common
def calcSame(vec1, vec2, vocab, threshold, weight1, weight2):
vec1 = [x*weight1 for x in vec1]
vec2 = [x*weight2 for x in vec2]
vec = vecsum(vec1,vec2)
ans = [0 for i in range(len(vec))]
for word in vocab:
if similarity(vec,word) > threshold:
ans = vecsum(ans,word)
return normalize(ans)
#calculate the words in vocab that vec1 and vec2 have distinct
def calcDiff(vec1, vec2, vocab, threshold, weight1, weight2):
vec1 = [x*weight1 for x in vec1]
vec2 = [x*weight2 for x in vec2]
vec = [x-y for x,y in zip(vec1,vec2)]
ans = [0 for i in range(len(vec))]
for word in vocab:
if similarity(vec,word) > threshold or similarity(vec,word) < -threshold:
ans = vecsum(ans,word)
return normalize(ans)
| return math.sqrt(sum([x**2 for x in vec])) | identifier_body |
RPMutils.py | """Utility functions and global parameters for the model."""
import time
import math
import os
from ca.nengo.model import SimulationMode
from ca.nengo.model import Units
from ca.nengo.model.impl import FunctionInput
from ca.nengo.model.nef.impl import NEFEnsembleFactoryImpl
from ca.nengo.math import PDFTools
from ca.nengo.math.impl import IndicatorPDF
from ca.nengo.math.impl import GaussianPDF
from ca.nengo.math.impl import ConstantFunction
from java.lang import System
#note: the following are all constants, in that they are set for a given model.
#however, this does not guarantee that their values will be the ones listed below.
#in particular, the batch module sets these constants to (possibly) different
#values before starting each run. these values should be thought of as defaults.
#the number of dimensions to use in HRR vectors
VECTOR_DIMENSION = 30
#the number of neurons to use per dimension
NEURONS_PER_DIMENSION = 25
#random seed to use when generating vocabulary vectors
VOCABULARY_SEED = 100
#the minimum confidence value we will require in order to decide we have a match in cleanup memory
MIN_CONFIDENCE = 0.7
#the minimum value we will require to pick either the same or different result
#SAMEDIFF_CHOICE = 0.5
#the minimum score we will require in order to decide we have found a correct rule
CORRECTNESS_THRESHOLD_FIG = 0.8
CORRECTNESS_THRESHOLD_SEQ = 0.7
CORRECTNESS_THRESHOLD_SET = 0.9
#whether or not to add probes when building networks
USE_PROBES = True
#the time (in seconds) for which we present each input
STEP_SIZE = 0.2
#the size (number of base words) of vocabulary to use (we have different versions depending on how many base words are allowed)
VOCAB_SIZE = 80
#true if we are running the controller, false if we are just running the individual modules
RUN_WITH_CONTROLLER = True
#the maximum similarity we will allow when generating a set of vectors
VECTOR_SIMILARITY = 1.0
#if running jobs concurrently, use this to ensure they don't use overlapping data files
JOB_ID = 0
#the mode to run model in
SIMULATION_MODE = SimulationMode.DEFAULT
#whether or not to use cleanup memory
USE_CLEANUP = False
#whether or not to update the cleanup memory after a run
DYNAMIC_MEMORY = False
#the number of threads we want to run with
NUM_THREADS = 0
#whether or not to split n-dimensional populations into n 1-dimensional populations
SPLIT_DIMENSIONS = True
#the threshold to use when detecting same features in figure solver
SAME_THRESHOLD = 1.0
#the threshold to use when detecting different features in figure solver
DIFF_THRESHOLD = 0.9
#the minimum difference required to differentiate between matrix answers
SIMILARITY_THRESHOLD = 0.0
#the folder in which to read/write all files throughout the run
FOLDER_NAME = "test"
#scale on the total number of neurons
NEURON_SCALE = 1.0
#whether or not to do same/diff calculations in neurons
NEURO_SAMEDIFF = True
#whether or not to load rules from file
LOAD_RULES = False
#kill the given percentage of neurons after generation
KILL_NEURONS = 0.0
#returns the appropriate correctness threshold for each module
def correctnessThreshold(module):
if module == "figuresolver":
return CORRECTNESS_THRESHOLD_FIG
if module == "sequencesolver":
return CORRECTNESS_THRESHOLD_SEQ
if module == "setsolver":
return CORRECTNESS_THRESHOLD_SET
#returns a string containing the current value of all the parameters
def getParameterSettings():
keys = getParameterSettings.func_globals.keys()
values = getParameterSettings.func_globals.values()
parms = [[keys[i],values[i]] for i,key in enumerate(keys) if key.isupper()]
return ",".join(["=".join([str(x) for x in pair]) for pair in parms])
#output from origin (used to update cleanup memory)
def cleanupDataFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "cleanupoutputdata_" + str(JOB_ID) + ".txt")
#file containing word-vector associations
def vocabFile(d, numwords, seed):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "RPMvocab_" + str(numwords) + "x" + str(d) + "_" + str(seed) + ".txt")
#file containing vectors in cleanup memory
def cleanupFile(d, numwords):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "cleanup_" + str(numwords) + "x" + str(d) + "_" + str(JOB_ID) + ".txt")
#rule output from neural module
def resultFile(modulename):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, modulename + "_result_" + str(JOB_ID) + ".txt")
#prediction of blank cell from neural module
def hypothesisFile(modulename):
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, modulename + "_hypothesis_" + str(JOB_ID) + ".txt")
#file to record the rules used to solve a matrix
def ruleFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "rules_" + str(JOB_ID) + ".txt")
#vocabulary present in the matrix
def matrixVocabFile():
return os.path.join(CURR_LOCATION, "data", FOLDER_NAME, "matrixvocab_" + str(JOB_ID) + ".txt")
#returns a dxd matrix with the given value along the diagonal
def eye(d, val):
identity = [[0 for x in range(d)] for x in range(d)]
for i in range(d):
identity[i][i] = val
return(identity)
def str2floatlist(str):
return [float(word) for word in str.split()]
def floatlist2str(floatlist):
return " ".join([str(x) for x in floatlist])
#generates a random d-dimensional vector
def genVector(d):
result = [PDFTools.sampleFloat(GaussianPDF()) for i in range(d)]
result = normalize(result)
return result
#creates a function which outputs a random unit vector
def makeInputVector(name, d, randomSeed=None):
vec = []
if randomSeed == None:
randomSeed = long(time.clock()*100000000000000000)
if randomSeed > -1:
PDFTools.setSeed(randomSeed)
length = 0
for i in range(d):
tmp = PDFTools.sampleFloat(GaussianPDF())
vec = vec + [tmp]
length = length + tmp**2
length = math.sqrt(length)
f = []
for i in range(d):
vec[i] = vec[i] / length
f = f + [ConstantFunction(1, vec[i])]
if randomSeed > -1:
PDFTools.setSeed(long(time.clock()*1000000000000000))
print vec
return(FunctionInput(name, f, Units.UNK))
#create function inputs, where each function outputs one of the given vectors
def makeInputVectors(names, vectors):
return [FunctionInput(names[i], [ConstantFunction(1,x) for x in vec], Units.UNK) for i,vec in enumerate(vectors)]
#load vectors from a file and create corresponding output functions
def loadInputVectors(filename):
file = open(filename)
vectors = [str2floatlist(line) for line in file]
file.close()
return makeInputVectors(["vec_" + str(i) for i in range(len(vectors))], vectors)
#an NEF ensemble factory with more evaluation points than normal
class NEFMorePoints(NEFEnsembleFactoryImpl):
def getNumEvalPoints(self, d):
#add shortcut so that it doesn't waste time evaluating a bunch of points when its in direct mode
if SIMULATION_MODE == SimulationMode.DIRECT:
return 1
pointsPerDim = [0, 1000, 2000]
if d < 3:
return(pointsPerDim[d])
else:
return(d*500)
#default ensemble factory used in the model
def defaultEnsembleFactory():
ef=NEFMorePoints()
ef.nodeFactory.tauRC = 0.02
ef.nodeFactory.tauRef = 0.002
ef.nodeFactory.maxRate=IndicatorPDF(200,500)
ef.nodeFactory.intercept=IndicatorPDF(-1, 1)
ef.beQuiet()
return(ef)
#returns all the probes containing name
def findMatchingProbes(probes, name, subname=None):
result = []
for probe in probes:
if name in probe.getTarget().getName() or ((probe.getEnsembleName() != None) and (probe.getEnsembleName().count(name) > 0)):
result = result + [probe]
if subname == None:
return result
else:
return findMatchingProbes(result, subname)
#calculate circular convolution of vec1 and vec2
def cconv(vec1, vec2):
if vec1 == None:
return(vec2)
if vec2 == None:
return(vec1)
d = len(vec1)
result = [0 for i in range(d)]
for i in range(d):
for j in range(d):
result[i] = result[i] + vec1[j] * vec2[(i - j) % d]
return(result)
#calculate vector addition of vec1 and vec2
def vecsum(vec1, vec2):
if vec1 == None:
return(vec2)
if vec2 == None:
return(vec1)
return [x+y for x,y in zip(vec1,vec2)]
#calculate length of vec
def length(vec):
return math.sqrt(sum([x**2 for x in vec]))
#normalize vec
def normalize(vec):
l = length(vec)
if l == 0:
return vec
return [x/l for x in vec]
#calculate similarity between vec1 and vec2
def similarity(vec1, vec2):
if len(vec1) != len(vec2):
System.err.println("vectors not the same length in RPMutils.similarity(), something is wrong")
System.err.println(str(len(vec1)) + " " + str(len(vec2)))
return sum([x*y for x,y in zip(vec1,vec2)])
def ainv(vec):
newvec = []
for i,val in enumerate(vec):
newvec += [vec[-i % len(vec)]]
return newvec
#calculate mean value of vec
def mean(vec):
if len(vec) == 0:
|
return float(sum(vec)) / len(vec)
#calculate the words in vocab that vec1 and vec2 have in common
def calcSame(vec1, vec2, vocab, threshold, weight1, weight2):
vec1 = [x*weight1 for x in vec1]
vec2 = [x*weight2 for x in vec2]
vec = vecsum(vec1,vec2)
ans = [0 for i in range(len(vec))]
for word in vocab:
if similarity(vec,word) > threshold:
ans = vecsum(ans,word)
return normalize(ans)
#calculate the words in vocab that vec1 and vec2 have distinct
def calcDiff(vec1, vec2, vocab, threshold, weight1, weight2):
vec1 = [x*weight1 for x in vec1]
vec2 = [x*weight2 for x in vec2]
vec = [x-y for x,y in zip(vec1,vec2)]
ans = [0 for i in range(len(vec))]
for word in vocab:
if similarity(vec,word) > threshold or similarity(vec,word) < -threshold:
ans = vecsum(ans,word)
return normalize(ans)
| return 0.0 | conditional_block |
runtime.py | # Copyright (c) 2015 - 2022, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
"""Tools to support runtime implementations that use the GEOPM service
"""
import math
import time
import subprocess # nosec
import sys
import shlex
from . import pio
class TimedLoop:
"""Object that can be iterated over to run a timed loop
Use in a for loop to execute a fixed number of timed delays. The
overhead time for executing what is inside of the loop is
accounted for. Calls to time.sleep() are made to delay until the
targeted end time for each iteration.
Example:
>>> from time import time
>>> time_0 = time()
>>> for index in TimedLoop(0.1, 10):
... print(f'{index}: {time() - time_0}')
...
0: 0.0008680820465087891
1: 0.10126090049743652
2: 0.20174455642700195
3: 0.30123186111450195
4: 0.4010961055755615
5: 0.5020360946655273
6: 0.6011238098144531
7: 0.7011349201202393
8: 0.8020164966583252
9: 0.9015650749206543
10: 1.0021190643310547
"""
def __init__(self, period, num_period=None):
"""Constructor for timed loop object
The number of loops executed is one greater than the number of
time intervals requested, and that the first iteration is not
delayed. The total amount of time spanned by the loop is the
product of the two input parameters.
To create an infinite loop, specify num_period is None.
Args:
period (float): Target interval for the loop execution in
units of seconds.
num_period (int): Number of time periods spanned by the
loop. The total loop time is
num_periods * period, but since there is
no delay in the first loop, there will
be num_period + 1 loop iterations.
"""
if period < 0.0:
raise RuntimeError('Specified period is invalid. Must be >= 0.')
if num_period is not None:
if num_period < 0:
raise RuntimeError('Specified num_period is invalid. Must be > 0.')
if not isinstance(num_period, int):
raise ValueError('num_period must be a whole number.')
self._period = period
self._num_loop = num_period
# Add one to ensure:
# total_time == num_loop * period
# because we do not delay the start iteration
if self._num_loop is not None:
self._num_loop += 1
def __iter__(self):
"""Set up a timed loop
Iteration method for timed loop. This iterator can be used in
a for statement to execute the loop periodically.
"""
self._loop_idx = 0
self._target_time = time.time()
return self
def __next__(self):
"""Sleep until next targeted time for loop and update counter
"""
result = self._loop_idx
if self._loop_idx == self._num_loop:
raise StopIteration
if self._loop_idx != 0:
sleep_time = self._target_time - time.time()
if sleep_time > 0:
self.wait(sleep_time)
self._target_time += self._period
self._loop_idx += 1
return result
def wait(self, timeout):
"""Pass-through to time.sleep()
Args:
timeout (float): Target interval for the loop execution in
units of seconds.
"""
time.sleep(timeout)
class PIDTimedLoop(TimedLoop):
def __init__(self, pid, period, num_period=None):
"""Similar to the TimedLoop but stop when subprocess ends
The number of loops executed is one greater than the number of
time intervals requested, and that the first iteration is not
delayed. The total amount of time spanned by the loop is the
product of the two input parameters.
To create an infinite loop, specify num_period is None.
Loop will always terminate when the subprocess pid terminates.
Args:
pid (Popen): Object returned by subprocess.Popen() constructor.
period (float): Target interval for the loop execution in
units of seconds.
num_period (int): Number of time periods spanned by the
loop. The total loop time is
num_periods * period, but since there is
no delay in the first loop, there will
be num_period + 1 loop iterations.
"""
super(PIDTimedLoop, self).__init__(period, num_period)
self._pid = pid
self._is_active = pid.poll() is None
def wait(self, timeout):
"""Wait for timeout seconds or until pid ends
Args:
timeout (float): Target interval for the loop execution in
units of seconds.
Raises:
StopIteration: When last call to wait termintated due to
the process ending
"""
if not self._is_active:
raise StopIteration
try:
self._pid.wait(timeout=timeout)
self._is_active = False
except subprocess.TimeoutExpired:
pass
class Agent:
"""Base class that documents the interfaces required by an agent
Agent objects are used to initialize a Controller object and
define the control algorithm.
"""
def __init__(self):
raise NotImplementedError('Agent is an abstract base class')
def get_signals(self):
"""Get list of read requests
The returned signals will be sampled from the platform by the
Controller and passed into Agent.update().
Returns:
list((str, int, int)): List of request tuples. Each
request comprises a signal name,
domain type, and domain index.
"""
raise NotImplementedError('Agent is an abstract base class')
def get_controls(self):
"""Get list of control requests
The returned controls will be set in the platform by the Controller
based on the return value from Agent.update().
Returns:
list((str, int, int)): List of request tuples. Each
request comprises a control name,
domain type, and domain index.
"""
raise NotImplementedError('Agent is an abstract base class')
def run_begin(self, policy, profile):
"""Called by Controller at the start of each run
The policy for the run is passed through to the agent from the
Controller.run() input. For some agents, the policy may always be
None.
Args:
policy (object): The Agent specific policy provided to the
Controller.run() method.
profile (str): Profile name to associate with the report
"""
raise NotImplementedError('Agent is an abstract base class')
def policy_repr(self, policy):
"""Create a string representation of a policy suitable for printing
"""
return policy.__repr__()
def run_end(self):
"""Called by the Controller at the end of each run
The result of calling the get_report() method after run_end() should
reflect the same report until the next call to run_end(). This report
will document the measurements made between the last calls to
run_begin() and run_end(). Each call to run_end() will follow a
previous call to run_begin(). The run_end() method will be called by
the Controller even if the run resulted in an error that raises an
exception. In this way resources associated with a single run can be
released when the run_end() method is called.
"""
raise NotImplementedError('Agent is an abstract base class')
def update(self, signals):
"""Called periodically by the Controller
The signals that specified by get_signals() will be passed as inputs
to the method by the Controller. The update() method will be called
periodically and the interval is set by the value returned by
Agent.get_period().
Args:
signals (list(float)): Recently read signal values
Returns:
list(float): Control values for next control interval
"""
raise NotImplementedError('Agent is an abstract base class')
def get_period(self):
"""Get the target time interval for the control loop
Returns:
float: Time interval in seconds
"""
raise NotImplementedError('Agent is an abstract base class')
def get_report(self):
"""Summary of all data collected by calls to update()
The report covers the interval of time between the last two calls to
Agent.begin_run() / Agent.end_run(). Until the next call to
Agent.begin_run(), the same report will be returned by this method.
The Controller.run() method will return this report upon completion of
the run.
Returns:
str: Human readable report
"""
raise NotImplementedError('Agent is an abstract base class')
class Controller:
"""Class that supports a runtime control algorithm
"""
def __init__(self, agent, timeout=0):
"""Controller constructor
Args:
agent (Agent): Object that conforms to the Agent class
interface
timeout (float): The agent algorithm will run for the full
duration of the application execution if timeout
is 0. Setting the timeout to a non-zero value
will end the agent algorithm after the specified
period of time or when the application ends,
whichever occurs first.
"""
if not isinstance(agent, Agent):
raise ValueError('agent must be a subclass of Agent.')
if timeout < 0:
raise ValueError('timeout must be >= 0')
self._agent = agent
self._signals = agent.get_signals()
self._controls = agent.get_controls()
self._signals_idx = []
self._controls_idx = []
self._update_period = agent.get_period()
self._num_update = None
if timeout != 0:
self._num_update = math.ceil(timeout / self._update_period)
self._returncode = None
def push_all(self):
self._signals_idx = [pio.push_signal(*ss) for ss in self._signals]
self._controls_idx = [pio.push_control(*cc) for cc in self._controls]
def read_all_signals(self):
"""Sample for all signals pushed with pio
Returns:
list(float): Sampled values for each signal
"""
return [pio.sample(signal_idx)
for signal_idx in self._signals_idx]
def returncode(self):
"""Get the return code of the application process
Returns:
int: Return code of app process
"""
if self._returncode is None:
raise RuntimeError('App process is still running')
return self._returncode
def run(self, argv, policy=None, profile=None):
| """Execute control loop defined by agent
Interfaces with PlatformIO directly through the geopmdpy.pio module.
Args:
argv (list(str)): Arguments for application that is executed
policy (object): Policy for Agent to use during the run
Returns:
str: Human readable report created by agent
"""
sys.stderr.write('<geopmdpy> RUN BEGIN\n')
if profile is None:
profile = ' '.join([shlex.quote(arg) for arg in argv])
try:
pio.save_control()
self.push_all()
pid = subprocess.Popen(argv)
self._agent.run_begin(policy, profile)
for loop_idx in PIDTimedLoop(pid, self._update_period, self._num_update):
pio.read_batch()
signals = self.read_all_signals()
new_settings = self._agent.update(signals)
if pid.poll() is not None:
break
for control_idx, new_setting in zip(self._controls_idx, new_settings):
pio.adjust(control_idx, new_setting)
pio.write_batch()
self._agent.run_end()
except:
raise
finally:
pio.restore_control()
self._returncode = pid.returncode
sys.stderr.write(f'<geopmdpy> RUN END, return: {self._returncode}\n')
return self._agent.get_report() | identifier_body | |
runtime.py | # Copyright (c) 2015 - 2022, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
"""Tools to support runtime implementations that use the GEOPM service
"""
import math
import time
import subprocess # nosec
import sys
import shlex
from . import pio
class TimedLoop:
"""Object that can be iterated over to run a timed loop
Use in a for loop to execute a fixed number of timed delays. The
overhead time for executing what is inside of the loop is
accounted for. Calls to time.sleep() are made to delay until the
targeted end time for each iteration.
Example:
>>> from time import time
>>> time_0 = time()
>>> for index in TimedLoop(0.1, 10):
... print(f'{index}: {time() - time_0}')
...
0: 0.0008680820465087891
1: 0.10126090049743652
2: 0.20174455642700195
3: 0.30123186111450195
4: 0.4010961055755615
5: 0.5020360946655273
6: 0.6011238098144531
7: 0.7011349201202393
8: 0.8020164966583252
9: 0.9015650749206543
10: 1.0021190643310547
"""
def __init__(self, period, num_period=None):
"""Constructor for timed loop object
The number of loops executed is one greater than the number of
time intervals requested, and that the first iteration is not
delayed. The total amount of time spanned by the loop is the
product of the two input parameters.
To create an infinite loop, specify num_period is None.
Args:
period (float): Target interval for the loop execution in
units of seconds.
num_period (int): Number of time periods spanned by the
loop. The total loop time is
num_periods * period, but since there is
no delay in the first loop, there will
be num_period + 1 loop iterations.
"""
if period < 0.0:
raise RuntimeError('Specified period is invalid. Must be >= 0.')
if num_period is not None:
if num_period < 0:
raise RuntimeError('Specified num_period is invalid. Must be > 0.')
if not isinstance(num_period, int):
raise ValueError('num_period must be a whole number.')
self._period = period
self._num_loop = num_period
# Add one to ensure:
# total_time == num_loop * period
# because we do not delay the start iteration
if self._num_loop is not None:
self._num_loop += 1
def __iter__(self):
"""Set up a timed loop
Iteration method for timed loop. This iterator can be used in
a for statement to execute the loop periodically.
"""
self._loop_idx = 0
self._target_time = time.time()
return self
def __next__(self):
"""Sleep until next targeted time for loop and update counter
"""
result = self._loop_idx
if self._loop_idx == self._num_loop:
raise StopIteration
if self._loop_idx != 0:
sleep_time = self._target_time - time.time()
if sleep_time > 0:
self.wait(sleep_time)
self._target_time += self._period
self._loop_idx += 1
return result
def wait(self, timeout):
"""Pass-through to time.sleep()
Args:
timeout (float): Target interval for the loop execution in
units of seconds.
"""
time.sleep(timeout)
class PIDTimedLoop(TimedLoop):
def __init__(self, pid, period, num_period=None):
"""Similar to the TimedLoop but stop when subprocess ends
The number of loops executed is one greater than the number of
time intervals requested, and that the first iteration is not
delayed. The total amount of time spanned by the loop is the
product of the two input parameters.
To create an infinite loop, specify num_period is None.
Loop will always terminate when the subprocess pid terminates.
Args:
pid (Popen): Object returned by subprocess.Popen() constructor.
period (float): Target interval for the loop execution in
units of seconds.
num_period (int): Number of time periods spanned by the
loop. The total loop time is
num_periods * period, but since there is
no delay in the first loop, there will
be num_period + 1 loop iterations.
"""
super(PIDTimedLoop, self).__init__(period, num_period)
self._pid = pid
self._is_active = pid.poll() is None
def wait(self, timeout):
"""Wait for timeout seconds or until pid ends
Args:
timeout (float): Target interval for the loop execution in
units of seconds.
Raises:
StopIteration: When last call to wait termintated due to
the process ending
"""
if not self._is_active:
raise StopIteration
try:
self._pid.wait(timeout=timeout)
self._is_active = False
except subprocess.TimeoutExpired:
pass
class Agent:
"""Base class that documents the interfaces required by an agent
Agent objects are used to initialize a Controller object and
define the control algorithm.
"""
def __init__(self):
raise NotImplementedError('Agent is an abstract base class')
def get_signals(self):
"""Get list of read requests
The returned signals will be sampled from the platform by the
Controller and passed into Agent.update().
Returns:
list((str, int, int)): List of request tuples. Each
request comprises a signal name,
domain type, and domain index.
"""
raise NotImplementedError('Agent is an abstract base class')
def get_controls(self):
"""Get list of control requests
The returned controls will be set in the platform by the Controller
based on the return value from Agent.update().
Returns:
list((str, int, int)): List of request tuples. Each
request comprises a control name,
domain type, and domain index.
"""
raise NotImplementedError('Agent is an abstract base class')
def run_begin(self, policy, profile):
"""Called by Controller at the start of each run
The policy for the run is passed through to the agent from the
Controller.run() input. For some agents, the policy may always be
None.
Args:
policy (object): The Agent specific policy provided to the
Controller.run() method.
profile (str): Profile name to associate with the report
"""
raise NotImplementedError('Agent is an abstract base class')
def policy_repr(self, policy):
"""Create a string representation of a policy suitable for printing
"""
return policy.__repr__()
def run_end(self):
"""Called by the Controller at the end of each run
The result of calling the get_report() method after run_end() should
reflect the same report until the next call to run_end(). This report
will document the measurements made between the last calls to
run_begin() and run_end(). Each call to run_end() will follow a
previous call to run_begin(). The run_end() method will be called by
the Controller even if the run resulted in an error that raises an
exception. In this way resources associated with a single run can be
released when the run_end() method is called.
"""
raise NotImplementedError('Agent is an abstract base class')
def update(self, signals):
"""Called periodically by the Controller
The signals that specified by get_signals() will be passed as inputs
to the method by the Controller. The update() method will be called
periodically and the interval is set by the value returned by
Agent.get_period().
Args:
signals (list(float)): Recently read signal values
Returns:
list(float): Control values for next control interval
"""
raise NotImplementedError('Agent is an abstract base class')
def get_period(self):
"""Get the target time interval for the control loop
Returns:
float: Time interval in seconds
"""
raise NotImplementedError('Agent is an abstract base class')
def get_report(self):
"""Summary of all data collected by calls to update()
The report covers the interval of time between the last two calls to
Agent.begin_run() / Agent.end_run(). Until the next call to
Agent.begin_run(), the same report will be returned by this method.
The Controller.run() method will return this report upon completion of
the run.
Returns:
str: Human readable report
"""
raise NotImplementedError('Agent is an abstract base class')
class Controller:
"""Class that supports a runtime control algorithm
"""
def __init__(self, agent, timeout=0):
"""Controller constructor
Args:
agent (Agent): Object that conforms to the Agent class
interface
timeout (float): The agent algorithm will run for the full
duration of the application execution if timeout
is 0. Setting the timeout to a non-zero value
will end the agent algorithm after the specified
period of time or when the application ends,
whichever occurs first.
"""
if not isinstance(agent, Agent):
raise ValueError('agent must be a subclass of Agent.')
if timeout < 0:
raise ValueError('timeout must be >= 0')
self._agent = agent
self._signals = agent.get_signals()
self._controls = agent.get_controls()
self._signals_idx = []
self._controls_idx = []
self._update_period = agent.get_period()
self._num_update = None
if timeout != 0:
self._num_update = math.ceil(timeout / self._update_period)
self._returncode = None
def push_all(self):
self._signals_idx = [pio.push_signal(*ss) for ss in self._signals]
self._controls_idx = [pio.push_control(*cc) for cc in self._controls]
def read_all_signals(self):
"""Sample for all signals pushed with pio
Returns:
list(float): Sampled values for each signal
"""
return [pio.sample(signal_idx)
for signal_idx in self._signals_idx]
def returncode(self):
"""Get the return code of the application process
Returns:
int: Return code of app process
"""
if self._returncode is None:
raise RuntimeError('App process is still running') | return self._returncode
def run(self, argv, policy=None, profile=None):
"""Execute control loop defined by agent
Interfaces with PlatformIO directly through the geopmdpy.pio module.
Args:
argv (list(str)): Arguments for application that is executed
policy (object): Policy for Agent to use during the run
Returns:
str: Human readable report created by agent
"""
sys.stderr.write('<geopmdpy> RUN BEGIN\n')
if profile is None:
profile = ' '.join([shlex.quote(arg) for arg in argv])
try:
pio.save_control()
self.push_all()
pid = subprocess.Popen(argv)
self._agent.run_begin(policy, profile)
for loop_idx in PIDTimedLoop(pid, self._update_period, self._num_update):
pio.read_batch()
signals = self.read_all_signals()
new_settings = self._agent.update(signals)
if pid.poll() is not None:
break
for control_idx, new_setting in zip(self._controls_idx, new_settings):
pio.adjust(control_idx, new_setting)
pio.write_batch()
self._agent.run_end()
except:
raise
finally:
pio.restore_control()
self._returncode = pid.returncode
sys.stderr.write(f'<geopmdpy> RUN END, return: {self._returncode}\n')
return self._agent.get_report() | random_line_split | |
runtime.py | # Copyright (c) 2015 - 2022, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
"""Tools to support runtime implementations that use the GEOPM service
"""
import math
import time
import subprocess # nosec
import sys
import shlex
from . import pio
class TimedLoop:
"""Object that can be iterated over to run a timed loop
Use in a for loop to execute a fixed number of timed delays. The
overhead time for executing what is inside of the loop is
accounted for. Calls to time.sleep() are made to delay until the
targeted end time for each iteration.
Example:
>>> from time import time
>>> time_0 = time()
>>> for index in TimedLoop(0.1, 10):
... print(f'{index}: {time() - time_0}')
...
0: 0.0008680820465087891
1: 0.10126090049743652
2: 0.20174455642700195
3: 0.30123186111450195
4: 0.4010961055755615
5: 0.5020360946655273
6: 0.6011238098144531
7: 0.7011349201202393
8: 0.8020164966583252
9: 0.9015650749206543
10: 1.0021190643310547
"""
def __init__(self, period, num_period=None):
"""Constructor for timed loop object
The number of loops executed is one greater than the number of
time intervals requested, and that the first iteration is not
delayed. The total amount of time spanned by the loop is the
product of the two input parameters.
To create an infinite loop, specify num_period is None.
Args:
period (float): Target interval for the loop execution in
units of seconds.
num_period (int): Number of time periods spanned by the
loop. The total loop time is
num_periods * period, but since there is
no delay in the first loop, there will
be num_period + 1 loop iterations.
"""
if period < 0.0:
raise RuntimeError('Specified period is invalid. Must be >= 0.')
if num_period is not None:
if num_period < 0:
raise RuntimeError('Specified num_period is invalid. Must be > 0.')
if not isinstance(num_period, int):
raise ValueError('num_period must be a whole number.')
self._period = period
self._num_loop = num_period
# Add one to ensure:
# total_time == num_loop * period
# because we do not delay the start iteration
if self._num_loop is not None:
self._num_loop += 1
def __iter__(self):
"""Set up a timed loop
Iteration method for timed loop. This iterator can be used in
a for statement to execute the loop periodically.
"""
self._loop_idx = 0
self._target_time = time.time()
return self
def __next__(self):
"""Sleep until next targeted time for loop and update counter
"""
result = self._loop_idx
if self._loop_idx == self._num_loop:
raise StopIteration
if self._loop_idx != 0:
sleep_time = self._target_time - time.time()
if sleep_time > 0:
self.wait(sleep_time)
self._target_time += self._period
self._loop_idx += 1
return result
def wait(self, timeout):
"""Pass-through to time.sleep()
Args:
timeout (float): Target interval for the loop execution in
units of seconds.
"""
time.sleep(timeout)
class PIDTimedLoop(TimedLoop):
def __init__(self, pid, period, num_period=None):
"""Similar to the TimedLoop but stop when subprocess ends
The number of loops executed is one greater than the number of
time intervals requested, and that the first iteration is not
delayed. The total amount of time spanned by the loop is the
product of the two input parameters.
To create an infinite loop, specify num_period is None.
Loop will always terminate when the subprocess pid terminates.
Args:
pid (Popen): Object returned by subprocess.Popen() constructor.
period (float): Target interval for the loop execution in
units of seconds.
num_period (int): Number of time periods spanned by the
loop. The total loop time is
num_periods * period, but since there is
no delay in the first loop, there will
be num_period + 1 loop iterations.
"""
super(PIDTimedLoop, self).__init__(period, num_period)
self._pid = pid
self._is_active = pid.poll() is None
def wait(self, timeout):
"""Wait for timeout seconds or until pid ends
Args:
timeout (float): Target interval for the loop execution in
units of seconds.
Raises:
StopIteration: When last call to wait termintated due to
the process ending
"""
if not self._is_active:
raise StopIteration
try:
self._pid.wait(timeout=timeout)
self._is_active = False
except subprocess.TimeoutExpired:
pass
class Agent:
"""Base class that documents the interfaces required by an agent
Agent objects are used to initialize a Controller object and
define the control algorithm.
"""
def __init__(self):
raise NotImplementedError('Agent is an abstract base class')
def get_signals(self):
"""Get list of read requests
The returned signals will be sampled from the platform by the
Controller and passed into Agent.update().
Returns:
list((str, int, int)): List of request tuples. Each
request comprises a signal name,
domain type, and domain index.
"""
raise NotImplementedError('Agent is an abstract base class')
def get_controls(self):
"""Get list of control requests
The returned controls will be set in the platform by the Controller
based on the return value from Agent.update().
Returns:
list((str, int, int)): List of request tuples. Each
request comprises a control name,
domain type, and domain index.
"""
raise NotImplementedError('Agent is an abstract base class')
def run_begin(self, policy, profile):
"""Called by Controller at the start of each run
The policy for the run is passed through to the agent from the
Controller.run() input. For some agents, the policy may always be
None.
Args:
policy (object): The Agent specific policy provided to the
Controller.run() method.
profile (str): Profile name to associate with the report
"""
raise NotImplementedError('Agent is an abstract base class')
def policy_repr(self, policy):
"""Create a string representation of a policy suitable for printing
"""
return policy.__repr__()
def run_end(self):
"""Called by the Controller at the end of each run
The result of calling the get_report() method after run_end() should
reflect the same report until the next call to run_end(). This report
will document the measurements made between the last calls to
run_begin() and run_end(). Each call to run_end() will follow a
previous call to run_begin(). The run_end() method will be called by
the Controller even if the run resulted in an error that raises an
exception. In this way resources associated with a single run can be
released when the run_end() method is called.
"""
raise NotImplementedError('Agent is an abstract base class')
def update(self, signals):
"""Called periodically by the Controller
The signals that specified by get_signals() will be passed as inputs
to the method by the Controller. The update() method will be called
periodically and the interval is set by the value returned by
Agent.get_period().
Args:
signals (list(float)): Recently read signal values
Returns:
list(float): Control values for next control interval
"""
raise NotImplementedError('Agent is an abstract base class')
def | (self):
"""Get the target time interval for the control loop
Returns:
float: Time interval in seconds
"""
raise NotImplementedError('Agent is an abstract base class')
def get_report(self):
"""Summary of all data collected by calls to update()
The report covers the interval of time between the last two calls to
Agent.begin_run() / Agent.end_run(). Until the next call to
Agent.begin_run(), the same report will be returned by this method.
The Controller.run() method will return this report upon completion of
the run.
Returns:
str: Human readable report
"""
raise NotImplementedError('Agent is an abstract base class')
class Controller:
"""Class that supports a runtime control algorithm
"""
def __init__(self, agent, timeout=0):
"""Controller constructor
Args:
agent (Agent): Object that conforms to the Agent class
interface
timeout (float): The agent algorithm will run for the full
duration of the application execution if timeout
is 0. Setting the timeout to a non-zero value
will end the agent algorithm after the specified
period of time or when the application ends,
whichever occurs first.
"""
if not isinstance(agent, Agent):
raise ValueError('agent must be a subclass of Agent.')
if timeout < 0:
raise ValueError('timeout must be >= 0')
self._agent = agent
self._signals = agent.get_signals()
self._controls = agent.get_controls()
self._signals_idx = []
self._controls_idx = []
self._update_period = agent.get_period()
self._num_update = None
if timeout != 0:
self._num_update = math.ceil(timeout / self._update_period)
self._returncode = None
def push_all(self):
self._signals_idx = [pio.push_signal(*ss) for ss in self._signals]
self._controls_idx = [pio.push_control(*cc) for cc in self._controls]
def read_all_signals(self):
"""Sample for all signals pushed with pio
Returns:
list(float): Sampled values for each signal
"""
return [pio.sample(signal_idx)
for signal_idx in self._signals_idx]
def returncode(self):
"""Get the return code of the application process
Returns:
int: Return code of app process
"""
if self._returncode is None:
raise RuntimeError('App process is still running')
return self._returncode
def run(self, argv, policy=None, profile=None):
"""Execute control loop defined by agent
Interfaces with PlatformIO directly through the geopmdpy.pio module.
Args:
argv (list(str)): Arguments for application that is executed
policy (object): Policy for Agent to use during the run
Returns:
str: Human readable report created by agent
"""
sys.stderr.write('<geopmdpy> RUN BEGIN\n')
if profile is None:
profile = ' '.join([shlex.quote(arg) for arg in argv])
try:
pio.save_control()
self.push_all()
pid = subprocess.Popen(argv)
self._agent.run_begin(policy, profile)
for loop_idx in PIDTimedLoop(pid, self._update_period, self._num_update):
pio.read_batch()
signals = self.read_all_signals()
new_settings = self._agent.update(signals)
if pid.poll() is not None:
break
for control_idx, new_setting in zip(self._controls_idx, new_settings):
pio.adjust(control_idx, new_setting)
pio.write_batch()
self._agent.run_end()
except:
raise
finally:
pio.restore_control()
self._returncode = pid.returncode
sys.stderr.write(f'<geopmdpy> RUN END, return: {self._returncode}\n')
return self._agent.get_report()
| get_period | identifier_name |
runtime.py | # Copyright (c) 2015 - 2022, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
"""Tools to support runtime implementations that use the GEOPM service
"""
import math
import time
import subprocess # nosec
import sys
import shlex
from . import pio
class TimedLoop:
"""Object that can be iterated over to run a timed loop
Use in a for loop to execute a fixed number of timed delays. The
overhead time for executing what is inside of the loop is
accounted for. Calls to time.sleep() are made to delay until the
targeted end time for each iteration.
Example:
>>> from time import time
>>> time_0 = time()
>>> for index in TimedLoop(0.1, 10):
... print(f'{index}: {time() - time_0}')
...
0: 0.0008680820465087891
1: 0.10126090049743652
2: 0.20174455642700195
3: 0.30123186111450195
4: 0.4010961055755615
5: 0.5020360946655273
6: 0.6011238098144531
7: 0.7011349201202393
8: 0.8020164966583252
9: 0.9015650749206543
10: 1.0021190643310547
"""
def __init__(self, period, num_period=None):
"""Constructor for timed loop object
The number of loops executed is one greater than the number of
time intervals requested, and that the first iteration is not
delayed. The total amount of time spanned by the loop is the
product of the two input parameters.
To create an infinite loop, specify num_period is None.
Args:
period (float): Target interval for the loop execution in
units of seconds.
num_period (int): Number of time periods spanned by the
loop. The total loop time is
num_periods * period, but since there is
no delay in the first loop, there will
be num_period + 1 loop iterations.
"""
if period < 0.0:
raise RuntimeError('Specified period is invalid. Must be >= 0.')
if num_period is not None:
if num_period < 0:
raise RuntimeError('Specified num_period is invalid. Must be > 0.')
if not isinstance(num_period, int):
raise ValueError('num_period must be a whole number.')
self._period = period
self._num_loop = num_period
# Add one to ensure:
# total_time == num_loop * period
# because we do not delay the start iteration
if self._num_loop is not None:
self._num_loop += 1
def __iter__(self):
"""Set up a timed loop
Iteration method for timed loop. This iterator can be used in
a for statement to execute the loop periodically.
"""
self._loop_idx = 0
self._target_time = time.time()
return self
def __next__(self):
"""Sleep until next targeted time for loop and update counter
"""
result = self._loop_idx
if self._loop_idx == self._num_loop:
raise StopIteration
if self._loop_idx != 0:
sleep_time = self._target_time - time.time()
if sleep_time > 0:
self.wait(sleep_time)
self._target_time += self._period
self._loop_idx += 1
return result
def wait(self, timeout):
"""Pass-through to time.sleep()
Args:
timeout (float): Target interval for the loop execution in
units of seconds.
"""
time.sleep(timeout)
class PIDTimedLoop(TimedLoop):
def __init__(self, pid, period, num_period=None):
"""Similar to the TimedLoop but stop when subprocess ends
The number of loops executed is one greater than the number of
time intervals requested, and that the first iteration is not
delayed. The total amount of time spanned by the loop is the
product of the two input parameters.
To create an infinite loop, specify num_period is None.
Loop will always terminate when the subprocess pid terminates.
Args:
pid (Popen): Object returned by subprocess.Popen() constructor.
period (float): Target interval for the loop execution in
units of seconds.
num_period (int): Number of time periods spanned by the
loop. The total loop time is
num_periods * period, but since there is
no delay in the first loop, there will
be num_period + 1 loop iterations.
"""
super(PIDTimedLoop, self).__init__(period, num_period)
self._pid = pid
self._is_active = pid.poll() is None
def wait(self, timeout):
"""Wait for timeout seconds or until pid ends
Args:
timeout (float): Target interval for the loop execution in
units of seconds.
Raises:
StopIteration: When last call to wait termintated due to
the process ending
"""
if not self._is_active:
raise StopIteration
try:
self._pid.wait(timeout=timeout)
self._is_active = False
except subprocess.TimeoutExpired:
pass
class Agent:
"""Base class that documents the interfaces required by an agent
Agent objects are used to initialize a Controller object and
define the control algorithm.
"""
def __init__(self):
raise NotImplementedError('Agent is an abstract base class')
def get_signals(self):
"""Get list of read requests
The returned signals will be sampled from the platform by the
Controller and passed into Agent.update().
Returns:
list((str, int, int)): List of request tuples. Each
request comprises a signal name,
domain type, and domain index.
"""
raise NotImplementedError('Agent is an abstract base class')
def get_controls(self):
"""Get list of control requests
The returned controls will be set in the platform by the Controller
based on the return value from Agent.update().
Returns:
list((str, int, int)): List of request tuples. Each
request comprises a control name,
domain type, and domain index.
"""
raise NotImplementedError('Agent is an abstract base class')
def run_begin(self, policy, profile):
"""Called by Controller at the start of each run
The policy for the run is passed through to the agent from the
Controller.run() input. For some agents, the policy may always be
None.
Args:
policy (object): The Agent specific policy provided to the
Controller.run() method.
profile (str): Profile name to associate with the report
"""
raise NotImplementedError('Agent is an abstract base class')
def policy_repr(self, policy):
"""Create a string representation of a policy suitable for printing
"""
return policy.__repr__()
def run_end(self):
"""Called by the Controller at the end of each run
The result of calling the get_report() method after run_end() should
reflect the same report until the next call to run_end(). This report
will document the measurements made between the last calls to
run_begin() and run_end(). Each call to run_end() will follow a
previous call to run_begin(). The run_end() method will be called by
the Controller even if the run resulted in an error that raises an
exception. In this way resources associated with a single run can be
released when the run_end() method is called.
"""
raise NotImplementedError('Agent is an abstract base class')
def update(self, signals):
"""Called periodically by the Controller
The signals that specified by get_signals() will be passed as inputs
to the method by the Controller. The update() method will be called
periodically and the interval is set by the value returned by
Agent.get_period().
Args:
signals (list(float)): Recently read signal values
Returns:
list(float): Control values for next control interval
"""
raise NotImplementedError('Agent is an abstract base class')
def get_period(self):
"""Get the target time interval for the control loop
Returns:
float: Time interval in seconds
"""
raise NotImplementedError('Agent is an abstract base class')
def get_report(self):
"""Summary of all data collected by calls to update()
The report covers the interval of time between the last two calls to
Agent.begin_run() / Agent.end_run(). Until the next call to
Agent.begin_run(), the same report will be returned by this method.
The Controller.run() method will return this report upon completion of
the run.
Returns:
str: Human readable report
"""
raise NotImplementedError('Agent is an abstract base class')
class Controller:
"""Class that supports a runtime control algorithm
"""
def __init__(self, agent, timeout=0):
"""Controller constructor
Args:
agent (Agent): Object that conforms to the Agent class
interface
timeout (float): The agent algorithm will run for the full
duration of the application execution if timeout
is 0. Setting the timeout to a non-zero value
will end the agent algorithm after the specified
period of time or when the application ends,
whichever occurs first.
"""
if not isinstance(agent, Agent):
raise ValueError('agent must be a subclass of Agent.')
if timeout < 0:
raise ValueError('timeout must be >= 0')
self._agent = agent
self._signals = agent.get_signals()
self._controls = agent.get_controls()
self._signals_idx = []
self._controls_idx = []
self._update_period = agent.get_period()
self._num_update = None
if timeout != 0:
self._num_update = math.ceil(timeout / self._update_period)
self._returncode = None
def push_all(self):
self._signals_idx = [pio.push_signal(*ss) for ss in self._signals]
self._controls_idx = [pio.push_control(*cc) for cc in self._controls]
def read_all_signals(self):
"""Sample for all signals pushed with pio
Returns:
list(float): Sampled values for each signal
"""
return [pio.sample(signal_idx)
for signal_idx in self._signals_idx]
def returncode(self):
"""Get the return code of the application process
Returns:
int: Return code of app process
"""
if self._returncode is None:
raise RuntimeError('App process is still running')
return self._returncode
def run(self, argv, policy=None, profile=None):
"""Execute control loop defined by agent
Interfaces with PlatformIO directly through the geopmdpy.pio module.
Args:
argv (list(str)): Arguments for application that is executed
policy (object): Policy for Agent to use during the run
Returns:
str: Human readable report created by agent
"""
sys.stderr.write('<geopmdpy> RUN BEGIN\n')
if profile is None:
profile = ' '.join([shlex.quote(arg) for arg in argv])
try:
pio.save_control()
self.push_all()
pid = subprocess.Popen(argv)
self._agent.run_begin(policy, profile)
for loop_idx in PIDTimedLoop(pid, self._update_period, self._num_update):
pio.read_batch()
signals = self.read_all_signals()
new_settings = self._agent.update(signals)
if pid.poll() is not None:
|
for control_idx, new_setting in zip(self._controls_idx, new_settings):
pio.adjust(control_idx, new_setting)
pio.write_batch()
self._agent.run_end()
except:
raise
finally:
pio.restore_control()
self._returncode = pid.returncode
sys.stderr.write(f'<geopmdpy> RUN END, return: {self._returncode}\n')
return self._agent.get_report()
| break | conditional_block |
FutureWork.py | '''In this script, we will produce preliminary results for the ideas that are
emitted in the disrtation entiteled "A new Bayesian framework for the
interpretation of geophysical data".
Those ideas are:
1) Building a prior with a fixed, large number of layers
2) Propagating the posterior model space from close-by points
3) Providing insight on models for falsification
'''
from scipy import stats # To build the prior model space
def buildMODELSET_MASW():
'''BUILDMODELSET is a function that will build the benchmark model.
It does not take any arguments. '''
# Values for the benchmark model parameters:
TrueModel1 = np.asarray([0.01, 0.05, 0.120, 0.280, 0.600]) # Thickness and Vs for the 3 layers (variable of the problem)
TrueModel2 = np.asarray([0.0125, 0.0525, 0.120, 0.280, 0.600])
Vp = np.asarray([0.300, 0.750, 1.5]) # Vp for the 3 layers
rho = np.asarray([1.5, 1.9, 2.2]) # rho for the 3 layers
nLayer = 3 # Number of layers in the model
Frequency = np.logspace(0.1,1.5,50) # Frequencies at which the signal is simulated
Periods = np.divide(1,Frequency) # Corresponding periods
# Forward modelling using surf96:
Dataset1 = surf96(thickness=np.append(TrueModel1[0:nLayer-1], [0]),vp=Vp,vs=TrueModel1[nLayer-1:2*nLayer-1],rho=rho,periods=Periods,wave="rayleigh",mode=1,velocity="phase",flat_earth=True)
Dataset2 = surf96(thickness=np.append(TrueModel2[0:nLayer-1], [0]),vp=Vp,vs=TrueModel2[nLayer-1:2*nLayer-1],rho=rho,periods=Periods,wave="rayleigh",mode=1,velocity="phase",flat_earth=True)
# Building the noise model (Boaga et al., 2011)
ErrorModelSynth = [0.075, 20]
NoiseEstimate = np.asarray(np.divide(ErrorModelSynth[0]*Dataset1*1000 + np.divide(ErrorModelSynth[1],Frequency),1000)) # Standard deviation for all measurements in km/s
RMSE_Noise = np.sqrt(np.square(NoiseEstimate).mean(axis=-1))
print('The RMSE for the dataset with 1 times the standard deviation is: {} km/s'.format(RMSE_Noise))
# Define the prior model space:
# Find min and max Vp for each layer in the range of Poisson's ratio [0.2, 0.45]:
# For Vp1=0.3, the roots are : 0.183712 and 0.0904534 -> Vs1 = [0.1, 0.18]
# For Vp2=0.75, the roots are : 0.459279 and 0.226134 -> Vs2 = [0.25, 0.45]
# For Vp3=1.5, the roots are : 0.918559 and 0.452267 -> Vs2 = [0.5, 0.9]
prior = np.array([[0.001, 0.03, 0.1, 0.18],[0.01, 0.1, 0.25, 0.45],[0.0, 0.0, 0.5, 0.9]])# Thicknesses min and max, Vs min and max for each layers.
# Defining names of the variables (for graphical outputs).
nParam = 2 # e and Vs
ListPrior = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesFullUnits = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesShort = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesShortUnits = [None] * ((nLayer*nParam)-1)# Half space at bottom
Mins = np.zeros(((nLayer*nParam)-1,))
Maxs = np.zeros(((nLayer*nParam)-1,))
Units = ["\\ [km]", "\\ [km/s]"]
NFull = ["Thickness\\ ","s-Wave\\ velocity\\ "]
NShort = ["th_{", "Vs_{"]
ident = 0
for j in range(nParam):
for i in range(nLayer):
if not((i == nLayer-1) and (j == 0)):# Not the half-space thickness
ListPrior[ident] = stats.uniform(loc=prior[i,j*2],scale=prior[i,j*2+1]-prior[i,j*2])
Mins[ident] = prior[i,j*2]
Maxs[ident] = prior[i,j*2+1]
NamesFullUnits[ident] = NFull[j] + str(i+1) + Units[j]
NamesShortUnits[ident] = NShort[j] + str(i+1) + "}" + Units[j]
NamesShort[ident] = NShort[j] + str(i+1) + "}"
ident += 1
method = "DC"
Periods = np.divide(1,Frequency)
paramNames = {"NamesFU":NamesFullUnits,
"NamesSU":NamesShortUnits,
"NamesS":NamesShort,
"NamesGlobal":NFull,
"NamesGlobalS":["Depth\\ [km]", "Vs\\ [km/s]", "Vp\\ [km/s]", "\\rho\\ [T/m^3]"],
"DataUnits":"[km/s]",
"DataName":"Phase\\ velocity\\ [km/s]",
"DataAxis":"Periods\\ [s]"}
# Defining the forward modelling function
def funcSurf96(model):
import numpy as np
from pysurf96 import surf96
Vp = np.asarray([0.300, 0.750, 1.5]) # Defined again inside the function for parallelization
rho = np.asarray([1.5, 1.9, 2.2]) # Idem
nLayer = 3 # Idem
Frequency = np.logspace(0.1,1.5,50) # Idem
Periods = np.divide(1,Frequency) # Idem
return surf96(thickness=np.append(model[0:nLayer-1], [0]), # The 2 first values of the model are the thicknesses
vp=Vp, # Fixed value for Vp
vs=model[nLayer-1:2*nLayer-1], # The 3 last values of the model are the Vs
rho=rho, # Fixed value for rho
periods=Periods, # Periods at which to compute the model
wave="rayleigh", # Type of wave to simulate
mode=1, # Only compute the fundamental mode
velocity="phase", # Use phase velocity and not group velocity
flat_earth=True) # Local model where the flat-earth hypothesis makes sens
forwardFun = funcSurf96
forward = {"Fun":forwardFun,"Axis":Periods}
# Building the function for conditions (here, just checks that a sampled model is inside the prior)
cond = lambda model: (np.logical_and(np.greater_equal(model,Mins),np.less_equal(model,Maxs))).all()
# Initialize the model parameters for BEL1D
ModelSynthetic = BEL1D.MODELSET(prior=ListPrior,cond=cond,method=method,forwardFun=forward,paramNames=paramNames,nbLayer=nLayer)
return TrueModel1, TrueModel2, Periods, Dataset1, Dataset2, NoiseEstimate, ModelSynthetic
if __name__ == '__main__':
import numpy as np
from pyBEL1D import BEL1D
from pathos import multiprocessing as mp
from pathos import pools as pp
from matplotlib import pyplot as plt
from pysurf96 import surf96 # Code for the forward modelling of dispersion curves
### Parameters for the computation:
RunFixedLayers = False
RunPostPropag = True
ParallelComputing = True
RandomSeed = False
if not(RandomSeed):
np.random.seed(0) # For reproductibilty
from random import seed
seed(0)
if ParallelComputing:
pool = pp.ProcessPool(mp.cpu_count())# Create the parallel pool with at most the number of available CPU cores
ppComp = [True, pool]
else:
ppComp = [False, None] # No parallel computing
'''1) Building a prior with fixed, large number of layers'''
if RunFixedLayers:
### Building the synthetic benchmark:
Kernel = "Data/sNMR/MRS2021.mrsk"
Timing = np.arange(0.005, 0.5, 0.005)
SyntheticBenchmarkSNMR = np.asarray([0.05, 0.05, 0.05, 0.06, 0.07, 0.08, 0.10, 0.12, 0.14, 0.15, 0.05, 0.05, 0.06, 0.07, 0.08, 0.12, 0.16, 0.20, 0.24, 0.25]) # 3-layers model
### Building the prior/forward model class (MODELSET)
InitialModel = BEL1D.MODELSET.sNMR_logLayers(Kernel=Kernel, Timing=Timing, logUniform=False ,nbLayers=10, maxThick=10)
### Computing the model:
DatasetBenchmark = InitialModel.forwardFun["Fun"](SyntheticBenchmarkSNMR)
Noise = np.mean(DatasetBenchmark)/20
print('The noise level is {} nV'.format(Noise))
DatasetBenchmark += np.random.normal(scale=Noise, size=DatasetBenchmark.shape)
## Creating the BEL1D instances and IPR:
Prebel, Postbel, PrebelInit , stats = BEL1D.IPR(MODEL=InitialModel, Dataset=DatasetBenchmark, NoiseEstimate=Noise*1e9, Parallelization=ppComp,
nbModelsBase=10000, nbModelsSample=10000, stats=True, reduceModels=True, Mixing=(lambda x: 1), Graphs=False, saveIters=False, verbose=True)
# Displaying the results:
Postbel.ShowPostModels(TrueModel=SyntheticBenchmarkSNMR, RMSE=True, Parallelization=ppComp)
plt.show()
'''2) Propagating the posterior model from space from close-by points'''
if RunPostPropag:
### Defining the synthetic bechmarks:
TrueModel1, TrueModel2, Periods, Dataset1, Dataset2, NoiseEstimate, ModelSynthetic = buildMODELSET_MASW()
### Creating the firts BEL1D instance:
nbModelsBase = 1000
def MixingFunc(iter:int) -> float:
return 1# Always keeping the same proportion of models as the initial prior (see paper for argumentation).
Prebel1, Postbel1, PrebelInit1, statsCompute1 = BEL1D.IPR(MODEL=ModelSynthetic,Dataset=Dataset1,NoiseEstimate=NoiseEstimate,Parallelization=ppComp,
nbModelsBase=nbModelsBase,nbModelsSample=nbModelsBase,stats=True, Mixing=MixingFunc,
Graphs=False, TrueModel=TrueModel1, verbose=True, nbIterMax=10)
Prebel2, Postbel2, PrebelInit2, statsCompute2 = BEL1D.IPR(MODEL=ModelSynthetic,Dataset=Dataset2,NoiseEstimate=NoiseEstimate,Parallelization=ppComp,
nbModelsBase=nbModelsBase,nbModelsSample=nbModelsBase,stats=True, Mixing=MixingFunc,
Graphs=False, TrueModel=TrueModel1, verbose=True, PriorSampled=PrebelInit1.MODELS, nbIterMax=10)
Postbel1.ShowPostModels(TrueModel=TrueModel1, RMSE=True, Parallelization=ppComp)
fig = plt.gcf()
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.140, top=0.0)
ax.set_xlim(left=0.0, right=0.8)
ax.set_title('Initial Prior: Model 1', fontsize=16)
plt.tight_layout()
Postbel2.ShowPostModels(TrueModel=TrueModel2, RMSE=True, Parallelization=ppComp)
fig = plt.gcf()
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.140, top=0.0)
ax.set_xlim(left=0.0, right=0.8)
ax.set_title('Initial Prior: Model 2', fontsize=16)
plt.tight_layout()
### Creating a new instance with mixing of initial prior and posterior 1 form dataset 2:
sharePost = 1/4
ModelsPrior = PrebelInit1.MODELS[:int(PrebelInit1.nbModels*(1-sharePost)),:]
ModelsPosterior = Postbel1.SAMPLES[:int(Postbel1.nbSamples*sharePost),:]
MixedPrior = np.vstack((ModelsPrior, ModelsPosterior))
Prebel2_bis, Postbel2_bis, PrebelInit2_bis, statsCompute2_bis = BEL1D.IPR(MODEL=ModelSynthetic,Dataset=Dataset2,NoiseEstimate=NoiseEstimate,Parallelization=ppComp,
nbModelsBase=nbModelsBase,nbModelsSample=nbModelsBase,stats=True, Mixing=MixingFunc,
Graphs=False, TrueModel=TrueModel1, verbose=True, PriorSampled=MixedPrior, nbIterMax=10)
Postbel2_bis.ShowPostModels(TrueModel=TrueModel2, RMSE=True, Parallelization=ppComp)
fig = plt.gcf()
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.140, top=0.0)
ax.set_xlim(left=0.0, right=0.8)
ax.set_title('Propagated Posterior: Model 2', fontsize=16)
plt.tight_layout()
plt.show()
if ParallelComputing:
| pool.terminate() | conditional_block | |
FutureWork.py | '''In this script, we will produce preliminary results for the ideas that are
emitted in the disrtation entiteled "A new Bayesian framework for the
interpretation of geophysical data".
Those ideas are:
1) Building a prior with a fixed, large number of layers
2) Propagating the posterior model space from close-by points
3) Providing insight on models for falsification
'''
from scipy import stats # To build the prior model space
def | ():
'''BUILDMODELSET is a function that will build the benchmark model.
It does not take any arguments. '''
# Values for the benchmark model parameters:
TrueModel1 = np.asarray([0.01, 0.05, 0.120, 0.280, 0.600]) # Thickness and Vs for the 3 layers (variable of the problem)
TrueModel2 = np.asarray([0.0125, 0.0525, 0.120, 0.280, 0.600])
Vp = np.asarray([0.300, 0.750, 1.5]) # Vp for the 3 layers
rho = np.asarray([1.5, 1.9, 2.2]) # rho for the 3 layers
nLayer = 3 # Number of layers in the model
Frequency = np.logspace(0.1,1.5,50) # Frequencies at which the signal is simulated
Periods = np.divide(1,Frequency) # Corresponding periods
# Forward modelling using surf96:
Dataset1 = surf96(thickness=np.append(TrueModel1[0:nLayer-1], [0]),vp=Vp,vs=TrueModel1[nLayer-1:2*nLayer-1],rho=rho,periods=Periods,wave="rayleigh",mode=1,velocity="phase",flat_earth=True)
Dataset2 = surf96(thickness=np.append(TrueModel2[0:nLayer-1], [0]),vp=Vp,vs=TrueModel2[nLayer-1:2*nLayer-1],rho=rho,periods=Periods,wave="rayleigh",mode=1,velocity="phase",flat_earth=True)
# Building the noise model (Boaga et al., 2011)
ErrorModelSynth = [0.075, 20]
NoiseEstimate = np.asarray(np.divide(ErrorModelSynth[0]*Dataset1*1000 + np.divide(ErrorModelSynth[1],Frequency),1000)) # Standard deviation for all measurements in km/s
RMSE_Noise = np.sqrt(np.square(NoiseEstimate).mean(axis=-1))
print('The RMSE for the dataset with 1 times the standard deviation is: {} km/s'.format(RMSE_Noise))
# Define the prior model space:
# Find min and max Vp for each layer in the range of Poisson's ratio [0.2, 0.45]:
# For Vp1=0.3, the roots are : 0.183712 and 0.0904534 -> Vs1 = [0.1, 0.18]
# For Vp2=0.75, the roots are : 0.459279 and 0.226134 -> Vs2 = [0.25, 0.45]
# For Vp3=1.5, the roots are : 0.918559 and 0.452267 -> Vs2 = [0.5, 0.9]
prior = np.array([[0.001, 0.03, 0.1, 0.18],[0.01, 0.1, 0.25, 0.45],[0.0, 0.0, 0.5, 0.9]])# Thicknesses min and max, Vs min and max for each layers.
# Defining names of the variables (for graphical outputs).
nParam = 2 # e and Vs
ListPrior = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesFullUnits = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesShort = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesShortUnits = [None] * ((nLayer*nParam)-1)# Half space at bottom
Mins = np.zeros(((nLayer*nParam)-1,))
Maxs = np.zeros(((nLayer*nParam)-1,))
Units = ["\\ [km]", "\\ [km/s]"]
NFull = ["Thickness\\ ","s-Wave\\ velocity\\ "]
NShort = ["th_{", "Vs_{"]
ident = 0
for j in range(nParam):
for i in range(nLayer):
if not((i == nLayer-1) and (j == 0)):# Not the half-space thickness
ListPrior[ident] = stats.uniform(loc=prior[i,j*2],scale=prior[i,j*2+1]-prior[i,j*2])
Mins[ident] = prior[i,j*2]
Maxs[ident] = prior[i,j*2+1]
NamesFullUnits[ident] = NFull[j] + str(i+1) + Units[j]
NamesShortUnits[ident] = NShort[j] + str(i+1) + "}" + Units[j]
NamesShort[ident] = NShort[j] + str(i+1) + "}"
ident += 1
method = "DC"
Periods = np.divide(1,Frequency)
paramNames = {"NamesFU":NamesFullUnits,
"NamesSU":NamesShortUnits,
"NamesS":NamesShort,
"NamesGlobal":NFull,
"NamesGlobalS":["Depth\\ [km]", "Vs\\ [km/s]", "Vp\\ [km/s]", "\\rho\\ [T/m^3]"],
"DataUnits":"[km/s]",
"DataName":"Phase\\ velocity\\ [km/s]",
"DataAxis":"Periods\\ [s]"}
# Defining the forward modelling function
def funcSurf96(model):
import numpy as np
from pysurf96 import surf96
Vp = np.asarray([0.300, 0.750, 1.5]) # Defined again inside the function for parallelization
rho = np.asarray([1.5, 1.9, 2.2]) # Idem
nLayer = 3 # Idem
Frequency = np.logspace(0.1,1.5,50) # Idem
Periods = np.divide(1,Frequency) # Idem
return surf96(thickness=np.append(model[0:nLayer-1], [0]), # The 2 first values of the model are the thicknesses
vp=Vp, # Fixed value for Vp
vs=model[nLayer-1:2*nLayer-1], # The 3 last values of the model are the Vs
rho=rho, # Fixed value for rho
periods=Periods, # Periods at which to compute the model
wave="rayleigh", # Type of wave to simulate
mode=1, # Only compute the fundamental mode
velocity="phase", # Use phase velocity and not group velocity
flat_earth=True) # Local model where the flat-earth hypothesis makes sens
forwardFun = funcSurf96
forward = {"Fun":forwardFun,"Axis":Periods}
# Building the function for conditions (here, just checks that a sampled model is inside the prior)
cond = lambda model: (np.logical_and(np.greater_equal(model,Mins),np.less_equal(model,Maxs))).all()
# Initialize the model parameters for BEL1D
ModelSynthetic = BEL1D.MODELSET(prior=ListPrior,cond=cond,method=method,forwardFun=forward,paramNames=paramNames,nbLayer=nLayer)
return TrueModel1, TrueModel2, Periods, Dataset1, Dataset2, NoiseEstimate, ModelSynthetic
if __name__ == '__main__':
import numpy as np
from pyBEL1D import BEL1D
from pathos import multiprocessing as mp
from pathos import pools as pp
from matplotlib import pyplot as plt
from pysurf96 import surf96 # Code for the forward modelling of dispersion curves
### Parameters for the computation:
RunFixedLayers = False
RunPostPropag = True
ParallelComputing = True
RandomSeed = False
if not(RandomSeed):
np.random.seed(0) # For reproductibilty
from random import seed
seed(0)
if ParallelComputing:
pool = pp.ProcessPool(mp.cpu_count())# Create the parallel pool with at most the number of available CPU cores
ppComp = [True, pool]
else:
ppComp = [False, None] # No parallel computing
'''1) Building a prior with fixed, large number of layers'''
if RunFixedLayers:
### Building the synthetic benchmark:
Kernel = "Data/sNMR/MRS2021.mrsk"
Timing = np.arange(0.005, 0.5, 0.005)
SyntheticBenchmarkSNMR = np.asarray([0.05, 0.05, 0.05, 0.06, 0.07, 0.08, 0.10, 0.12, 0.14, 0.15, 0.05, 0.05, 0.06, 0.07, 0.08, 0.12, 0.16, 0.20, 0.24, 0.25]) # 3-layers model
### Building the prior/forward model class (MODELSET)
InitialModel = BEL1D.MODELSET.sNMR_logLayers(Kernel=Kernel, Timing=Timing, logUniform=False ,nbLayers=10, maxThick=10)
### Computing the model:
DatasetBenchmark = InitialModel.forwardFun["Fun"](SyntheticBenchmarkSNMR)
Noise = np.mean(DatasetBenchmark)/20
print('The noise level is {} nV'.format(Noise))
DatasetBenchmark += np.random.normal(scale=Noise, size=DatasetBenchmark.shape)
## Creating the BEL1D instances and IPR:
Prebel, Postbel, PrebelInit , stats = BEL1D.IPR(MODEL=InitialModel, Dataset=DatasetBenchmark, NoiseEstimate=Noise*1e9, Parallelization=ppComp,
nbModelsBase=10000, nbModelsSample=10000, stats=True, reduceModels=True, Mixing=(lambda x: 1), Graphs=False, saveIters=False, verbose=True)
# Displaying the results:
Postbel.ShowPostModels(TrueModel=SyntheticBenchmarkSNMR, RMSE=True, Parallelization=ppComp)
plt.show()
'''2) Propagating the posterior model from space from close-by points'''
if RunPostPropag:
### Defining the synthetic bechmarks:
TrueModel1, TrueModel2, Periods, Dataset1, Dataset2, NoiseEstimate, ModelSynthetic = buildMODELSET_MASW()
### Creating the firts BEL1D instance:
nbModelsBase = 1000
def MixingFunc(iter:int) -> float:
return 1# Always keeping the same proportion of models as the initial prior (see paper for argumentation).
Prebel1, Postbel1, PrebelInit1, statsCompute1 = BEL1D.IPR(MODEL=ModelSynthetic,Dataset=Dataset1,NoiseEstimate=NoiseEstimate,Parallelization=ppComp,
nbModelsBase=nbModelsBase,nbModelsSample=nbModelsBase,stats=True, Mixing=MixingFunc,
Graphs=False, TrueModel=TrueModel1, verbose=True, nbIterMax=10)
Prebel2, Postbel2, PrebelInit2, statsCompute2 = BEL1D.IPR(MODEL=ModelSynthetic,Dataset=Dataset2,NoiseEstimate=NoiseEstimate,Parallelization=ppComp,
nbModelsBase=nbModelsBase,nbModelsSample=nbModelsBase,stats=True, Mixing=MixingFunc,
Graphs=False, TrueModel=TrueModel1, verbose=True, PriorSampled=PrebelInit1.MODELS, nbIterMax=10)
Postbel1.ShowPostModels(TrueModel=TrueModel1, RMSE=True, Parallelization=ppComp)
fig = plt.gcf()
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.140, top=0.0)
ax.set_xlim(left=0.0, right=0.8)
ax.set_title('Initial Prior: Model 1', fontsize=16)
plt.tight_layout()
Postbel2.ShowPostModels(TrueModel=TrueModel2, RMSE=True, Parallelization=ppComp)
fig = plt.gcf()
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.140, top=0.0)
ax.set_xlim(left=0.0, right=0.8)
ax.set_title('Initial Prior: Model 2', fontsize=16)
plt.tight_layout()
### Creating a new instance with mixing of initial prior and posterior 1 form dataset 2:
sharePost = 1/4
ModelsPrior = PrebelInit1.MODELS[:int(PrebelInit1.nbModels*(1-sharePost)),:]
ModelsPosterior = Postbel1.SAMPLES[:int(Postbel1.nbSamples*sharePost),:]
MixedPrior = np.vstack((ModelsPrior, ModelsPosterior))
Prebel2_bis, Postbel2_bis, PrebelInit2_bis, statsCompute2_bis = BEL1D.IPR(MODEL=ModelSynthetic,Dataset=Dataset2,NoiseEstimate=NoiseEstimate,Parallelization=ppComp,
nbModelsBase=nbModelsBase,nbModelsSample=nbModelsBase,stats=True, Mixing=MixingFunc,
Graphs=False, TrueModel=TrueModel1, verbose=True, PriorSampled=MixedPrior, nbIterMax=10)
Postbel2_bis.ShowPostModels(TrueModel=TrueModel2, RMSE=True, Parallelization=ppComp)
fig = plt.gcf()
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.140, top=0.0)
ax.set_xlim(left=0.0, right=0.8)
ax.set_title('Propagated Posterior: Model 2', fontsize=16)
plt.tight_layout()
plt.show()
if ParallelComputing:
pool.terminate() | buildMODELSET_MASW | identifier_name |
FutureWork.py | '''In this script, we will produce preliminary results for the ideas that are
emitted in the disrtation entiteled "A new Bayesian framework for the
interpretation of geophysical data".
Those ideas are:
1) Building a prior with a fixed, large number of layers
2) Propagating the posterior model space from close-by points
3) Providing insight on models for falsification
'''
from scipy import stats # To build the prior model space
def buildMODELSET_MASW():
'''BUILDMODELSET is a function that will build the benchmark model.
It does not take any arguments. '''
# Values for the benchmark model parameters:
TrueModel1 = np.asarray([0.01, 0.05, 0.120, 0.280, 0.600]) # Thickness and Vs for the 3 layers (variable of the problem)
TrueModel2 = np.asarray([0.0125, 0.0525, 0.120, 0.280, 0.600])
Vp = np.asarray([0.300, 0.750, 1.5]) # Vp for the 3 layers
rho = np.asarray([1.5, 1.9, 2.2]) # rho for the 3 layers
nLayer = 3 # Number of layers in the model
Frequency = np.logspace(0.1,1.5,50) # Frequencies at which the signal is simulated
Periods = np.divide(1,Frequency) # Corresponding periods
# Forward modelling using surf96:
Dataset1 = surf96(thickness=np.append(TrueModel1[0:nLayer-1], [0]),vp=Vp,vs=TrueModel1[nLayer-1:2*nLayer-1],rho=rho,periods=Periods,wave="rayleigh",mode=1,velocity="phase",flat_earth=True)
Dataset2 = surf96(thickness=np.append(TrueModel2[0:nLayer-1], [0]),vp=Vp,vs=TrueModel2[nLayer-1:2*nLayer-1],rho=rho,periods=Periods,wave="rayleigh",mode=1,velocity="phase",flat_earth=True)
# Building the noise model (Boaga et al., 2011)
ErrorModelSynth = [0.075, 20]
NoiseEstimate = np.asarray(np.divide(ErrorModelSynth[0]*Dataset1*1000 + np.divide(ErrorModelSynth[1],Frequency),1000)) # Standard deviation for all measurements in km/s
RMSE_Noise = np.sqrt(np.square(NoiseEstimate).mean(axis=-1))
print('The RMSE for the dataset with 1 times the standard deviation is: {} km/s'.format(RMSE_Noise))
# Define the prior model space:
# Find min and max Vp for each layer in the range of Poisson's ratio [0.2, 0.45]:
# For Vp1=0.3, the roots are : 0.183712 and 0.0904534 -> Vs1 = [0.1, 0.18]
# For Vp2=0.75, the roots are : 0.459279 and 0.226134 -> Vs2 = [0.25, 0.45]
# For Vp3=1.5, the roots are : 0.918559 and 0.452267 -> Vs2 = [0.5, 0.9]
prior = np.array([[0.001, 0.03, 0.1, 0.18],[0.01, 0.1, 0.25, 0.45],[0.0, 0.0, 0.5, 0.9]])# Thicknesses min and max, Vs min and max for each layers.
# Defining names of the variables (for graphical outputs).
nParam = 2 # e and Vs
ListPrior = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesFullUnits = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesShort = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesShortUnits = [None] * ((nLayer*nParam)-1)# Half space at bottom
Mins = np.zeros(((nLayer*nParam)-1,))
Maxs = np.zeros(((nLayer*nParam)-1,))
Units = ["\\ [km]", "\\ [km/s]"]
NFull = ["Thickness\\ ","s-Wave\\ velocity\\ "]
NShort = ["th_{", "Vs_{"]
ident = 0
for j in range(nParam):
for i in range(nLayer):
if not((i == nLayer-1) and (j == 0)):# Not the half-space thickness
ListPrior[ident] = stats.uniform(loc=prior[i,j*2],scale=prior[i,j*2+1]-prior[i,j*2])
Mins[ident] = prior[i,j*2]
Maxs[ident] = prior[i,j*2+1]
NamesFullUnits[ident] = NFull[j] + str(i+1) + Units[j]
NamesShortUnits[ident] = NShort[j] + str(i+1) + "}" + Units[j]
NamesShort[ident] = NShort[j] + str(i+1) + "}"
ident += 1
method = "DC"
Periods = np.divide(1,Frequency)
paramNames = {"NamesFU":NamesFullUnits,
"NamesSU":NamesShortUnits,
"NamesS":NamesShort,
"NamesGlobal":NFull,
"NamesGlobalS":["Depth\\ [km]", "Vs\\ [km/s]", "Vp\\ [km/s]", "\\rho\\ [T/m^3]"],
"DataUnits":"[km/s]",
"DataName":"Phase\\ velocity\\ [km/s]",
"DataAxis":"Periods\\ [s]"}
# Defining the forward modelling function
def funcSurf96(model):
|
forwardFun = funcSurf96
forward = {"Fun":forwardFun,"Axis":Periods}
# Building the function for conditions (here, just checks that a sampled model is inside the prior)
cond = lambda model: (np.logical_and(np.greater_equal(model,Mins),np.less_equal(model,Maxs))).all()
# Initialize the model parameters for BEL1D
ModelSynthetic = BEL1D.MODELSET(prior=ListPrior,cond=cond,method=method,forwardFun=forward,paramNames=paramNames,nbLayer=nLayer)
return TrueModel1, TrueModel2, Periods, Dataset1, Dataset2, NoiseEstimate, ModelSynthetic
if __name__ == '__main__':
import numpy as np
from pyBEL1D import BEL1D
from pathos import multiprocessing as mp
from pathos import pools as pp
from matplotlib import pyplot as plt
from pysurf96 import surf96 # Code for the forward modelling of dispersion curves
### Parameters for the computation:
RunFixedLayers = False
RunPostPropag = True
ParallelComputing = True
RandomSeed = False
if not(RandomSeed):
np.random.seed(0) # For reproductibilty
from random import seed
seed(0)
if ParallelComputing:
pool = pp.ProcessPool(mp.cpu_count())# Create the parallel pool with at most the number of available CPU cores
ppComp = [True, pool]
else:
ppComp = [False, None] # No parallel computing
'''1) Building a prior with fixed, large number of layers'''
if RunFixedLayers:
### Building the synthetic benchmark:
Kernel = "Data/sNMR/MRS2021.mrsk"
Timing = np.arange(0.005, 0.5, 0.005)
SyntheticBenchmarkSNMR = np.asarray([0.05, 0.05, 0.05, 0.06, 0.07, 0.08, 0.10, 0.12, 0.14, 0.15, 0.05, 0.05, 0.06, 0.07, 0.08, 0.12, 0.16, 0.20, 0.24, 0.25]) # 3-layers model
### Building the prior/forward model class (MODELSET)
InitialModel = BEL1D.MODELSET.sNMR_logLayers(Kernel=Kernel, Timing=Timing, logUniform=False ,nbLayers=10, maxThick=10)
### Computing the model:
DatasetBenchmark = InitialModel.forwardFun["Fun"](SyntheticBenchmarkSNMR)
Noise = np.mean(DatasetBenchmark)/20
print('The noise level is {} nV'.format(Noise))
DatasetBenchmark += np.random.normal(scale=Noise, size=DatasetBenchmark.shape)
## Creating the BEL1D instances and IPR:
Prebel, Postbel, PrebelInit , stats = BEL1D.IPR(MODEL=InitialModel, Dataset=DatasetBenchmark, NoiseEstimate=Noise*1e9, Parallelization=ppComp,
nbModelsBase=10000, nbModelsSample=10000, stats=True, reduceModels=True, Mixing=(lambda x: 1), Graphs=False, saveIters=False, verbose=True)
# Displaying the results:
Postbel.ShowPostModels(TrueModel=SyntheticBenchmarkSNMR, RMSE=True, Parallelization=ppComp)
plt.show()
'''2) Propagating the posterior model from space from close-by points'''
if RunPostPropag:
### Defining the synthetic bechmarks:
TrueModel1, TrueModel2, Periods, Dataset1, Dataset2, NoiseEstimate, ModelSynthetic = buildMODELSET_MASW()
### Creating the firts BEL1D instance:
nbModelsBase = 1000
def MixingFunc(iter:int) -> float:
return 1# Always keeping the same proportion of models as the initial prior (see paper for argumentation).
Prebel1, Postbel1, PrebelInit1, statsCompute1 = BEL1D.IPR(MODEL=ModelSynthetic,Dataset=Dataset1,NoiseEstimate=NoiseEstimate,Parallelization=ppComp,
nbModelsBase=nbModelsBase,nbModelsSample=nbModelsBase,stats=True, Mixing=MixingFunc,
Graphs=False, TrueModel=TrueModel1, verbose=True, nbIterMax=10)
Prebel2, Postbel2, PrebelInit2, statsCompute2 = BEL1D.IPR(MODEL=ModelSynthetic,Dataset=Dataset2,NoiseEstimate=NoiseEstimate,Parallelization=ppComp,
nbModelsBase=nbModelsBase,nbModelsSample=nbModelsBase,stats=True, Mixing=MixingFunc,
Graphs=False, TrueModel=TrueModel1, verbose=True, PriorSampled=PrebelInit1.MODELS, nbIterMax=10)
Postbel1.ShowPostModels(TrueModel=TrueModel1, RMSE=True, Parallelization=ppComp)
fig = plt.gcf()
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.140, top=0.0)
ax.set_xlim(left=0.0, right=0.8)
ax.set_title('Initial Prior: Model 1', fontsize=16)
plt.tight_layout()
Postbel2.ShowPostModels(TrueModel=TrueModel2, RMSE=True, Parallelization=ppComp)
fig = plt.gcf()
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.140, top=0.0)
ax.set_xlim(left=0.0, right=0.8)
ax.set_title('Initial Prior: Model 2', fontsize=16)
plt.tight_layout()
### Creating a new instance with mixing of initial prior and posterior 1 form dataset 2:
sharePost = 1/4
ModelsPrior = PrebelInit1.MODELS[:int(PrebelInit1.nbModels*(1-sharePost)),:]
ModelsPosterior = Postbel1.SAMPLES[:int(Postbel1.nbSamples*sharePost),:]
MixedPrior = np.vstack((ModelsPrior, ModelsPosterior))
Prebel2_bis, Postbel2_bis, PrebelInit2_bis, statsCompute2_bis = BEL1D.IPR(MODEL=ModelSynthetic,Dataset=Dataset2,NoiseEstimate=NoiseEstimate,Parallelization=ppComp,
nbModelsBase=nbModelsBase,nbModelsSample=nbModelsBase,stats=True, Mixing=MixingFunc,
Graphs=False, TrueModel=TrueModel1, verbose=True, PriorSampled=MixedPrior, nbIterMax=10)
Postbel2_bis.ShowPostModels(TrueModel=TrueModel2, RMSE=True, Parallelization=ppComp)
fig = plt.gcf()
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.140, top=0.0)
ax.set_xlim(left=0.0, right=0.8)
ax.set_title('Propagated Posterior: Model 2', fontsize=16)
plt.tight_layout()
plt.show()
if ParallelComputing:
pool.terminate() | import numpy as np
from pysurf96 import surf96
Vp = np.asarray([0.300, 0.750, 1.5]) # Defined again inside the function for parallelization
rho = np.asarray([1.5, 1.9, 2.2]) # Idem
nLayer = 3 # Idem
Frequency = np.logspace(0.1,1.5,50) # Idem
Periods = np.divide(1,Frequency) # Idem
return surf96(thickness=np.append(model[0:nLayer-1], [0]), # The 2 first values of the model are the thicknesses
vp=Vp, # Fixed value for Vp
vs=model[nLayer-1:2*nLayer-1], # The 3 last values of the model are the Vs
rho=rho, # Fixed value for rho
periods=Periods, # Periods at which to compute the model
wave="rayleigh", # Type of wave to simulate
mode=1, # Only compute the fundamental mode
velocity="phase", # Use phase velocity and not group velocity
flat_earth=True) # Local model where the flat-earth hypothesis makes sens | identifier_body |
FutureWork.py | '''In this script, we will produce preliminary results for the ideas that are
emitted in the disrtation entiteled "A new Bayesian framework for the
interpretation of geophysical data".
Those ideas are:
1) Building a prior with a fixed, large number of layers
2) Propagating the posterior model space from close-by points
3) Providing insight on models for falsification
'''
from scipy import stats # To build the prior model space
def buildMODELSET_MASW():
'''BUILDMODELSET is a function that will build the benchmark model.
It does not take any arguments. '''
# Values for the benchmark model parameters:
TrueModel1 = np.asarray([0.01, 0.05, 0.120, 0.280, 0.600]) # Thickness and Vs for the 3 layers (variable of the problem)
TrueModel2 = np.asarray([0.0125, 0.0525, 0.120, 0.280, 0.600])
Vp = np.asarray([0.300, 0.750, 1.5]) # Vp for the 3 layers
rho = np.asarray([1.5, 1.9, 2.2]) # rho for the 3 layers
nLayer = 3 # Number of layers in the model
Frequency = np.logspace(0.1,1.5,50) # Frequencies at which the signal is simulated
Periods = np.divide(1,Frequency) # Corresponding periods
# Forward modelling using surf96:
Dataset1 = surf96(thickness=np.append(TrueModel1[0:nLayer-1], [0]),vp=Vp,vs=TrueModel1[nLayer-1:2*nLayer-1],rho=rho,periods=Periods,wave="rayleigh",mode=1,velocity="phase",flat_earth=True)
Dataset2 = surf96(thickness=np.append(TrueModel2[0:nLayer-1], [0]),vp=Vp,vs=TrueModel2[nLayer-1:2*nLayer-1],rho=rho,periods=Periods,wave="rayleigh",mode=1,velocity="phase",flat_earth=True)
# Building the noise model (Boaga et al., 2011)
ErrorModelSynth = [0.075, 20]
NoiseEstimate = np.asarray(np.divide(ErrorModelSynth[0]*Dataset1*1000 + np.divide(ErrorModelSynth[1],Frequency),1000)) # Standard deviation for all measurements in km/s
RMSE_Noise = np.sqrt(np.square(NoiseEstimate).mean(axis=-1))
print('The RMSE for the dataset with 1 times the standard deviation is: {} km/s'.format(RMSE_Noise))
# Define the prior model space:
# Find min and max Vp for each layer in the range of Poisson's ratio [0.2, 0.45]:
# For Vp1=0.3, the roots are : 0.183712 and 0.0904534 -> Vs1 = [0.1, 0.18]
# For Vp2=0.75, the roots are : 0.459279 and 0.226134 -> Vs2 = [0.25, 0.45]
# For Vp3=1.5, the roots are : 0.918559 and 0.452267 -> Vs2 = [0.5, 0.9]
prior = np.array([[0.001, 0.03, 0.1, 0.18],[0.01, 0.1, 0.25, 0.45],[0.0, 0.0, 0.5, 0.9]])# Thicknesses min and max, Vs min and max for each layers.
# Defining names of the variables (for graphical outputs).
nParam = 2 # e and Vs
ListPrior = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesFullUnits = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesShort = [None] * ((nLayer*nParam)-1)# Half space at bottom
NamesShortUnits = [None] * ((nLayer*nParam)-1)# Half space at bottom
Mins = np.zeros(((nLayer*nParam)-1,))
Maxs = np.zeros(((nLayer*nParam)-1,))
Units = ["\\ [km]", "\\ [km/s]"]
NFull = ["Thickness\\ ","s-Wave\\ velocity\\ "]
NShort = ["th_{", "Vs_{"]
ident = 0
for j in range(nParam):
for i in range(nLayer):
if not((i == nLayer-1) and (j == 0)):# Not the half-space thickness
ListPrior[ident] = stats.uniform(loc=prior[i,j*2],scale=prior[i,j*2+1]-prior[i,j*2])
Mins[ident] = prior[i,j*2]
Maxs[ident] = prior[i,j*2+1]
NamesFullUnits[ident] = NFull[j] + str(i+1) + Units[j]
NamesShortUnits[ident] = NShort[j] + str(i+1) + "}" + Units[j]
NamesShort[ident] = NShort[j] + str(i+1) + "}"
ident += 1
method = "DC"
Periods = np.divide(1,Frequency)
paramNames = {"NamesFU":NamesFullUnits,
"NamesSU":NamesShortUnits,
"NamesS":NamesShort,
"NamesGlobal":NFull,
"NamesGlobalS":["Depth\\ [km]", "Vs\\ [km/s]", "Vp\\ [km/s]", "\\rho\\ [T/m^3]"],
"DataUnits":"[km/s]",
"DataName":"Phase\\ velocity\\ [km/s]",
"DataAxis":"Periods\\ [s]"}
# Defining the forward modelling function
def funcSurf96(model):
import numpy as np
from pysurf96 import surf96
Vp = np.asarray([0.300, 0.750, 1.5]) # Defined again inside the function for parallelization
rho = np.asarray([1.5, 1.9, 2.2]) # Idem
nLayer = 3 # Idem
Frequency = np.logspace(0.1,1.5,50) # Idem
Periods = np.divide(1,Frequency) # Idem
return surf96(thickness=np.append(model[0:nLayer-1], [0]), # The 2 first values of the model are the thicknesses
vp=Vp, # Fixed value for Vp
vs=model[nLayer-1:2*nLayer-1], # The 3 last values of the model are the Vs
rho=rho, # Fixed value for rho
periods=Periods, # Periods at which to compute the model
wave="rayleigh", # Type of wave to simulate
mode=1, # Only compute the fundamental mode
velocity="phase", # Use phase velocity and not group velocity
flat_earth=True) # Local model where the flat-earth hypothesis makes sens
forwardFun = funcSurf96
forward = {"Fun":forwardFun,"Axis":Periods}
# Building the function for conditions (here, just checks that a sampled model is inside the prior)
cond = lambda model: (np.logical_and(np.greater_equal(model,Mins),np.less_equal(model,Maxs))).all()
# Initialize the model parameters for BEL1D
ModelSynthetic = BEL1D.MODELSET(prior=ListPrior,cond=cond,method=method,forwardFun=forward,paramNames=paramNames,nbLayer=nLayer)
return TrueModel1, TrueModel2, Periods, Dataset1, Dataset2, NoiseEstimate, ModelSynthetic
if __name__ == '__main__':
import numpy as np
from pyBEL1D import BEL1D
from pathos import multiprocessing as mp
from pathos import pools as pp
from matplotlib import pyplot as plt
from pysurf96 import surf96 # Code for the forward modelling of dispersion curves
### Parameters for the computation:
RunFixedLayers = False
RunPostPropag = True
ParallelComputing = True
RandomSeed = False
if not(RandomSeed):
np.random.seed(0) # For reproductibilty
from random import seed | if ParallelComputing:
pool = pp.ProcessPool(mp.cpu_count())# Create the parallel pool with at most the number of available CPU cores
ppComp = [True, pool]
else:
ppComp = [False, None] # No parallel computing
'''1) Building a prior with fixed, large number of layers'''
if RunFixedLayers:
### Building the synthetic benchmark:
Kernel = "Data/sNMR/MRS2021.mrsk"
Timing = np.arange(0.005, 0.5, 0.005)
SyntheticBenchmarkSNMR = np.asarray([0.05, 0.05, 0.05, 0.06, 0.07, 0.08, 0.10, 0.12, 0.14, 0.15, 0.05, 0.05, 0.06, 0.07, 0.08, 0.12, 0.16, 0.20, 0.24, 0.25]) # 3-layers model
### Building the prior/forward model class (MODELSET)
InitialModel = BEL1D.MODELSET.sNMR_logLayers(Kernel=Kernel, Timing=Timing, logUniform=False ,nbLayers=10, maxThick=10)
### Computing the model:
DatasetBenchmark = InitialModel.forwardFun["Fun"](SyntheticBenchmarkSNMR)
Noise = np.mean(DatasetBenchmark)/20
print('The noise level is {} nV'.format(Noise))
DatasetBenchmark += np.random.normal(scale=Noise, size=DatasetBenchmark.shape)
## Creating the BEL1D instances and IPR:
Prebel, Postbel, PrebelInit , stats = BEL1D.IPR(MODEL=InitialModel, Dataset=DatasetBenchmark, NoiseEstimate=Noise*1e9, Parallelization=ppComp,
nbModelsBase=10000, nbModelsSample=10000, stats=True, reduceModels=True, Mixing=(lambda x: 1), Graphs=False, saveIters=False, verbose=True)
# Displaying the results:
Postbel.ShowPostModels(TrueModel=SyntheticBenchmarkSNMR, RMSE=True, Parallelization=ppComp)
plt.show()
'''2) Propagating the posterior model from space from close-by points'''
if RunPostPropag:
### Defining the synthetic bechmarks:
TrueModel1, TrueModel2, Periods, Dataset1, Dataset2, NoiseEstimate, ModelSynthetic = buildMODELSET_MASW()
### Creating the firts BEL1D instance:
nbModelsBase = 1000
def MixingFunc(iter:int) -> float:
return 1# Always keeping the same proportion of models as the initial prior (see paper for argumentation).
Prebel1, Postbel1, PrebelInit1, statsCompute1 = BEL1D.IPR(MODEL=ModelSynthetic,Dataset=Dataset1,NoiseEstimate=NoiseEstimate,Parallelization=ppComp,
nbModelsBase=nbModelsBase,nbModelsSample=nbModelsBase,stats=True, Mixing=MixingFunc,
Graphs=False, TrueModel=TrueModel1, verbose=True, nbIterMax=10)
Prebel2, Postbel2, PrebelInit2, statsCompute2 = BEL1D.IPR(MODEL=ModelSynthetic,Dataset=Dataset2,NoiseEstimate=NoiseEstimate,Parallelization=ppComp,
nbModelsBase=nbModelsBase,nbModelsSample=nbModelsBase,stats=True, Mixing=MixingFunc,
Graphs=False, TrueModel=TrueModel1, verbose=True, PriorSampled=PrebelInit1.MODELS, nbIterMax=10)
Postbel1.ShowPostModels(TrueModel=TrueModel1, RMSE=True, Parallelization=ppComp)
fig = plt.gcf()
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.140, top=0.0)
ax.set_xlim(left=0.0, right=0.8)
ax.set_title('Initial Prior: Model 1', fontsize=16)
plt.tight_layout()
Postbel2.ShowPostModels(TrueModel=TrueModel2, RMSE=True, Parallelization=ppComp)
fig = plt.gcf()
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.140, top=0.0)
ax.set_xlim(left=0.0, right=0.8)
ax.set_title('Initial Prior: Model 2', fontsize=16)
plt.tight_layout()
### Creating a new instance with mixing of initial prior and posterior 1 form dataset 2:
sharePost = 1/4
ModelsPrior = PrebelInit1.MODELS[:int(PrebelInit1.nbModels*(1-sharePost)),:]
ModelsPosterior = Postbel1.SAMPLES[:int(Postbel1.nbSamples*sharePost),:]
MixedPrior = np.vstack((ModelsPrior, ModelsPosterior))
Prebel2_bis, Postbel2_bis, PrebelInit2_bis, statsCompute2_bis = BEL1D.IPR(MODEL=ModelSynthetic,Dataset=Dataset2,NoiseEstimate=NoiseEstimate,Parallelization=ppComp,
nbModelsBase=nbModelsBase,nbModelsSample=nbModelsBase,stats=True, Mixing=MixingFunc,
Graphs=False, TrueModel=TrueModel1, verbose=True, PriorSampled=MixedPrior, nbIterMax=10)
Postbel2_bis.ShowPostModels(TrueModel=TrueModel2, RMSE=True, Parallelization=ppComp)
fig = plt.gcf()
ax = fig.get_axes()[0]
ax.set_ylim(bottom=0.140, top=0.0)
ax.set_xlim(left=0.0, right=0.8)
ax.set_title('Propagated Posterior: Model 2', fontsize=16)
plt.tight_layout()
plt.show()
if ParallelComputing:
pool.terminate() | seed(0)
| random_line_split |
MultinomialAdversarialNetwork.py | #This version assumes domains = train/test set
import numpy as np
from ..utils import Dataset
import math
import random
from .interface import TopicModel
from .man_model.models import *
from .man_model import utils
from .man_model.options import opt
import torch.utils.data as data_utils
from tqdm import tqdm
from collections import defaultdict
import itertools
from torchnet.meter import ConfusionMeter
import torch
import torch.nn as nn
import torch.nn.functional as functional
import torch.optim as optim
from torch.utils.data import ConcatDataset, DataLoader
"""
IMPORTANT: for some reason, Model (self.F_s,etc) will not work if inputs are not float32
=> need to convert. Dont know if same thing for target tho?
Also apparently, domain labels retrieved from get_domain_labels cannot be -1?
Output size for C HAS TO BE 2 even if it's a binary classification
"""
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))
return e_x / np.sum(e_x, axis=1).reshape(-1, 1)
class MultinomialAdversarialNetwork(TopicModel):
def | (self, k, m, model_params=None, log_params=None):
super().__init__(k,m,model_params,log_params)
def prepare_data(self,d):
"""
Assume d is a dictionary of dataset where d[domain] = another dataset class
Assume labeled domain = train set, unlabeled = test
"""
train_loaders, train_iters = {}, {}
unlabeled_loaders, unlabeled_iters = {}, {}
for domain in opt.domains:
#CONVERT TO FLOAT32
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))
train = data_utils.TensorDataset(features,target)
train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)
train_iters[domain] = iter(train_loaders[domain])
for domain in opt.unlabeled_domains:
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))
uset = data_utils.TensorDataset(features,target)
unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)
unlabeled_iters[domain] = iter(unlabeled_loaders[domain])
return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters
def fit(self, d, *args, **kwargs):
#minibatches = create_minibatch(X, y, z, batch_size)
#TODO: make this able to fit consecutively
train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = self.prepare_data(d)
#Training
self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes,opt.shared_hidden_size, opt.dropout)
self.F_d = {}
for domain in opt.domains:
self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)
self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size + opt.domain_hidden_size, opt.shared_hidden_size + opt.domain_hidden_size, 2,opt.dropout, opt.C_bn)
self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt.shared_hidden_size,len(opt.all_domains), opt.loss, opt.dropout, opt.D_bn)
# print("try")
# print(opt.device)
self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.device), self.D.to(opt.device)
for f_d in self.F_d.values():
f_d = f_d.to(opt.device)
# print("endtry")
# # optimizers
optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.parameters() if self.F_s else [], self.C.parameters()] + [f.parameters() for f in self.F_d.values()])), lr=0.0001)
optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)
loss_d_res = []
l_d_res = []
l_c_res = []
for epoch in range(opt.max_epoch):
self.F_s.train()
self.C.train()
self.D.train()
for f in self.F_d.values():
f.train()
# training accuracy
correct, total = defaultdict(int), defaultdict(int)
# D accuracy
d_correct, d_total = 0, 0
# conceptually view 1 epoch as 1 epoch of the first domain
num_iter = len(train_loaders[opt.domains[0]])
for i in range(num_iter):
# D iterations
utils.freeze_net(self.F_s)
map(utils.freeze_net, self.F_d.values())
utils.freeze_net(self.C)
utils.unfreeze_net(self.D)
# optional WGAN n_critic trick
n_critic = opt.n_critic
for _ in range(n_critic):
self.D.zero_grad()
loss_d = {}
# train on both labeled and unlabeled domains
for domain in opt.unlabeled_domains:
# targets not used
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
# D accuracy
_, pred = torch.max(d_outputs, 1)
d_total += len(d_inputs)
if opt.loss.lower() == 'l2':
_, tgt_indices = torch.max(d_targets, 1)
d_correct += (pred==tgt_indices).sum().item()
l_d = functional.mse_loss(d_outputs, d_targets)
l_d.backward()
else:
d_correct += (pred==d_targets).sum().item()
l_d = functional.nll_loss(d_outputs, d_targets)
l_d.backward()
loss_d[domain] = l_d.item()
optimizerD.step()
# F&C iteration
utils.unfreeze_net(self.F_s)
map(utils.unfreeze_net, self.F_d.values())
utils.unfreeze_net(self.C)
utils.freeze_net(self.D)
#if opt.fix_emb:
# utils.freeze_net(self.F_s.word_emb)
# map(utils.freeze_net, self.F_d.values())
self.F_s.zero_grad()
for f_d in self.F_d.values():
f_d.zero_grad()
self.C.zero_grad()
shared_feats, domain_feats = [], []
for domain in opt.domains:
inputs, targets = utils.endless_get_next_batch(
train_loaders, train_iters, domain)
#target = torch.int64 rn
targets = targets.to(opt.device)
inputs = inputs.to(opt.device)
shared_feat = self.F_s(inputs)
shared_feats.append(shared_feat)
domain_feat = self.F_d[domain](inputs)
domain_feats.append(domain_feat)
features = torch.cat((shared_feat, domain_feat), dim=1)
c_outputs = self.C(features)
#return c_outputs, targets
#DEVICE SIDE TRIGGERED ERROR OCCUR HERE (l_c=...)
l_c = functional.nll_loss(c_outputs, targets)
l_c.backward(retain_graph=True)
# training accuracy
_, pred = torch.max(c_outputs, 1)
total[domain] += targets.size(0)
correct[domain] += (pred == targets).sum().item()
# update F with D gradients on all domains
for domain in opt.unlabeled_domains:
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
if opt.loss.lower() == 'gr':
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
l_d = functional.nll_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= -opt.lambd
elif opt.loss.lower() == 'l2':
d_targets = utils.get_random_domain_label(opt.loss, len(d_inputs))
l_d = functional.mse_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= opt.lambd
l_d.backward()
optimizer.step()
# print(loss_d)
# print('l_d loss: {}'.format(l_d.item()))
# print('l_c loss: {}'.format(l_c.item()))
loss_d_res.append(loss_d['test'])
l_d_res.append(l_d.item())
l_c_res.append(l_c.item())
if (epoch + 1) % kwargs["display_step"] == 0:
print(
"Epoch:", "%04d, done" % (epoch + 1) #"cost=", "{:.9f}"#.format(l_d.data[0])
)
return loss_d_res, l_d_res, l_c_res
def transform(self, d, *args, **kwargs):
F_d = self.F_d[opt.domains[0]]
self.F_s.eval()
F_d.eval()
self.C.eval()
_,_,_,it = self.prepare_data(d)
it = it[opt.unlabeled_domains[0]]
correct = 0
total = 0
confusion = ConfusionMeter(opt.num_labels)
preds = []
for inputs,targets in it:
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
d_features = F_d(inputs)
features = torch.cat((self.F_s(inputs), d_features), dim=1)
outputs = self.C(features)
_, pred = torch.max(outputs, 1)
#preds.extend(pred.data)
confusion.add(pred.data, targets.data)
total += targets.size(0)
correct += (pred == targets).sum().item()
acc = correct / total
#('{}: Accuracy on {} samples: {}%'.format(name, total, 100.0*acc))
return acc, correct
#return preds
def get_name(self):
if self._name is None:
self._name = "MAN({},{},{})".format(self.k,self.m,1)
return self._name | __init__ | identifier_name |
MultinomialAdversarialNetwork.py | #This version assumes domains = train/test set
import numpy as np
from ..utils import Dataset
import math
import random
from .interface import TopicModel
from .man_model.models import *
from .man_model import utils
from .man_model.options import opt
import torch.utils.data as data_utils
from tqdm import tqdm
from collections import defaultdict
import itertools
from torchnet.meter import ConfusionMeter
import torch
import torch.nn as nn
import torch.nn.functional as functional
import torch.optim as optim
from torch.utils.data import ConcatDataset, DataLoader
"""
IMPORTANT: for some reason, Model (self.F_s,etc) will not work if inputs are not float32
=> need to convert. Dont know if same thing for target tho?
Also apparently, domain labels retrieved from get_domain_labels cannot be -1?
Output size for C HAS TO BE 2 even if it's a binary classification
"""
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))
return e_x / np.sum(e_x, axis=1).reshape(-1, 1)
class MultinomialAdversarialNetwork(TopicModel):
def __init__(self, k, m, model_params=None, log_params=None):
super().__init__(k,m,model_params,log_params)
def prepare_data(self,d):
"""
Assume d is a dictionary of dataset where d[domain] = another dataset class
Assume labeled domain = train set, unlabeled = test
"""
train_loaders, train_iters = {}, {}
unlabeled_loaders, unlabeled_iters = {}, {}
for domain in opt.domains:
#CONVERT TO FLOAT32
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))
train = data_utils.TensorDataset(features,target)
train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)
train_iters[domain] = iter(train_loaders[domain])
for domain in opt.unlabeled_domains:
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))
uset = data_utils.TensorDataset(features,target)
unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)
unlabeled_iters[domain] = iter(unlabeled_loaders[domain])
return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters
def fit(self, d, *args, **kwargs):
#minibatches = create_minibatch(X, y, z, batch_size)
#TODO: make this able to fit consecutively
|
def transform(self, d, *args, **kwargs):
F_d = self.F_d[opt.domains[0]]
self.F_s.eval()
F_d.eval()
self.C.eval()
_,_,_,it = self.prepare_data(d)
it = it[opt.unlabeled_domains[0]]
correct = 0
total = 0
confusion = ConfusionMeter(opt.num_labels)
preds = []
for inputs,targets in it:
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
d_features = F_d(inputs)
features = torch.cat((self.F_s(inputs), d_features), dim=1)
outputs = self.C(features)
_, pred = torch.max(outputs, 1)
#preds.extend(pred.data)
confusion.add(pred.data, targets.data)
total += targets.size(0)
correct += (pred == targets).sum().item()
acc = correct / total
#('{}: Accuracy on {} samples: {}%'.format(name, total, 100.0*acc))
return acc, correct
#return preds
def get_name(self):
if self._name is None:
self._name = "MAN({},{},{})".format(self.k,self.m,1)
return self._name | train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = self.prepare_data(d)
#Training
self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes,opt.shared_hidden_size, opt.dropout)
self.F_d = {}
for domain in opt.domains:
self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)
self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size + opt.domain_hidden_size, opt.shared_hidden_size + opt.domain_hidden_size, 2,opt.dropout, opt.C_bn)
self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt.shared_hidden_size,len(opt.all_domains), opt.loss, opt.dropout, opt.D_bn)
# print("try")
# print(opt.device)
self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.device), self.D.to(opt.device)
for f_d in self.F_d.values():
f_d = f_d.to(opt.device)
# print("endtry")
# # optimizers
optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.parameters() if self.F_s else [], self.C.parameters()] + [f.parameters() for f in self.F_d.values()])), lr=0.0001)
optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)
loss_d_res = []
l_d_res = []
l_c_res = []
for epoch in range(opt.max_epoch):
self.F_s.train()
self.C.train()
self.D.train()
for f in self.F_d.values():
f.train()
# training accuracy
correct, total = defaultdict(int), defaultdict(int)
# D accuracy
d_correct, d_total = 0, 0
# conceptually view 1 epoch as 1 epoch of the first domain
num_iter = len(train_loaders[opt.domains[0]])
for i in range(num_iter):
# D iterations
utils.freeze_net(self.F_s)
map(utils.freeze_net, self.F_d.values())
utils.freeze_net(self.C)
utils.unfreeze_net(self.D)
# optional WGAN n_critic trick
n_critic = opt.n_critic
for _ in range(n_critic):
self.D.zero_grad()
loss_d = {}
# train on both labeled and unlabeled domains
for domain in opt.unlabeled_domains:
# targets not used
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
# D accuracy
_, pred = torch.max(d_outputs, 1)
d_total += len(d_inputs)
if opt.loss.lower() == 'l2':
_, tgt_indices = torch.max(d_targets, 1)
d_correct += (pred==tgt_indices).sum().item()
l_d = functional.mse_loss(d_outputs, d_targets)
l_d.backward()
else:
d_correct += (pred==d_targets).sum().item()
l_d = functional.nll_loss(d_outputs, d_targets)
l_d.backward()
loss_d[domain] = l_d.item()
optimizerD.step()
# F&C iteration
utils.unfreeze_net(self.F_s)
map(utils.unfreeze_net, self.F_d.values())
utils.unfreeze_net(self.C)
utils.freeze_net(self.D)
#if opt.fix_emb:
# utils.freeze_net(self.F_s.word_emb)
# map(utils.freeze_net, self.F_d.values())
self.F_s.zero_grad()
for f_d in self.F_d.values():
f_d.zero_grad()
self.C.zero_grad()
shared_feats, domain_feats = [], []
for domain in opt.domains:
inputs, targets = utils.endless_get_next_batch(
train_loaders, train_iters, domain)
#target = torch.int64 rn
targets = targets.to(opt.device)
inputs = inputs.to(opt.device)
shared_feat = self.F_s(inputs)
shared_feats.append(shared_feat)
domain_feat = self.F_d[domain](inputs)
domain_feats.append(domain_feat)
features = torch.cat((shared_feat, domain_feat), dim=1)
c_outputs = self.C(features)
#return c_outputs, targets
#DEVICE SIDE TRIGGERED ERROR OCCUR HERE (l_c=...)
l_c = functional.nll_loss(c_outputs, targets)
l_c.backward(retain_graph=True)
# training accuracy
_, pred = torch.max(c_outputs, 1)
total[domain] += targets.size(0)
correct[domain] += (pred == targets).sum().item()
# update F with D gradients on all domains
for domain in opt.unlabeled_domains:
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
if opt.loss.lower() == 'gr':
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
l_d = functional.nll_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= -opt.lambd
elif opt.loss.lower() == 'l2':
d_targets = utils.get_random_domain_label(opt.loss, len(d_inputs))
l_d = functional.mse_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= opt.lambd
l_d.backward()
optimizer.step()
# print(loss_d)
# print('l_d loss: {}'.format(l_d.item()))
# print('l_c loss: {}'.format(l_c.item()))
loss_d_res.append(loss_d['test'])
l_d_res.append(l_d.item())
l_c_res.append(l_c.item())
if (epoch + 1) % kwargs["display_step"] == 0:
print(
"Epoch:", "%04d, done" % (epoch + 1) #"cost=", "{:.9f}"#.format(l_d.data[0])
)
return loss_d_res, l_d_res, l_c_res | identifier_body |
MultinomialAdversarialNetwork.py | #This version assumes domains = train/test set
import numpy as np
from ..utils import Dataset
import math
import random
from .interface import TopicModel
from .man_model.models import *
from .man_model import utils
from .man_model.options import opt
import torch.utils.data as data_utils
from tqdm import tqdm
from collections import defaultdict
import itertools
from torchnet.meter import ConfusionMeter
import torch
import torch.nn as nn
import torch.nn.functional as functional
import torch.optim as optim
from torch.utils.data import ConcatDataset, DataLoader
"""
IMPORTANT: for some reason, Model (self.F_s,etc) will not work if inputs are not float32
=> need to convert. Dont know if same thing for target tho?
Also apparently, domain labels retrieved from get_domain_labels cannot be -1?
Output size for C HAS TO BE 2 even if it's a binary classification
"""
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))
return e_x / np.sum(e_x, axis=1).reshape(-1, 1)
class MultinomialAdversarialNetwork(TopicModel):
def __init__(self, k, m, model_params=None, log_params=None):
super().__init__(k,m,model_params,log_params)
def prepare_data(self,d):
"""
Assume d is a dictionary of dataset where d[domain] = another dataset class
Assume labeled domain = train set, unlabeled = test
"""
train_loaders, train_iters = {}, {}
unlabeled_loaders, unlabeled_iters = {}, {}
for domain in opt.domains:
#CONVERT TO FLOAT32
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))
train = data_utils.TensorDataset(features,target)
train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)
train_iters[domain] = iter(train_loaders[domain])
for domain in opt.unlabeled_domains:
|
return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters
def fit(self, d, *args, **kwargs):
#minibatches = create_minibatch(X, y, z, batch_size)
#TODO: make this able to fit consecutively
train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = self.prepare_data(d)
#Training
self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes,opt.shared_hidden_size, opt.dropout)
self.F_d = {}
for domain in opt.domains:
self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)
self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size + opt.domain_hidden_size, opt.shared_hidden_size + opt.domain_hidden_size, 2,opt.dropout, opt.C_bn)
self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt.shared_hidden_size,len(opt.all_domains), opt.loss, opt.dropout, opt.D_bn)
# print("try")
# print(opt.device)
self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.device), self.D.to(opt.device)
for f_d in self.F_d.values():
f_d = f_d.to(opt.device)
# print("endtry")
# # optimizers
optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.parameters() if self.F_s else [], self.C.parameters()] + [f.parameters() for f in self.F_d.values()])), lr=0.0001)
optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)
loss_d_res = []
l_d_res = []
l_c_res = []
for epoch in range(opt.max_epoch):
self.F_s.train()
self.C.train()
self.D.train()
for f in self.F_d.values():
f.train()
# training accuracy
correct, total = defaultdict(int), defaultdict(int)
# D accuracy
d_correct, d_total = 0, 0
# conceptually view 1 epoch as 1 epoch of the first domain
num_iter = len(train_loaders[opt.domains[0]])
for i in range(num_iter):
# D iterations
utils.freeze_net(self.F_s)
map(utils.freeze_net, self.F_d.values())
utils.freeze_net(self.C)
utils.unfreeze_net(self.D)
# optional WGAN n_critic trick
n_critic = opt.n_critic
for _ in range(n_critic):
self.D.zero_grad()
loss_d = {}
# train on both labeled and unlabeled domains
for domain in opt.unlabeled_domains:
# targets not used
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
# D accuracy
_, pred = torch.max(d_outputs, 1)
d_total += len(d_inputs)
if opt.loss.lower() == 'l2':
_, tgt_indices = torch.max(d_targets, 1)
d_correct += (pred==tgt_indices).sum().item()
l_d = functional.mse_loss(d_outputs, d_targets)
l_d.backward()
else:
d_correct += (pred==d_targets).sum().item()
l_d = functional.nll_loss(d_outputs, d_targets)
l_d.backward()
loss_d[domain] = l_d.item()
optimizerD.step()
# F&C iteration
utils.unfreeze_net(self.F_s)
map(utils.unfreeze_net, self.F_d.values())
utils.unfreeze_net(self.C)
utils.freeze_net(self.D)
#if opt.fix_emb:
# utils.freeze_net(self.F_s.word_emb)
# map(utils.freeze_net, self.F_d.values())
self.F_s.zero_grad()
for f_d in self.F_d.values():
f_d.zero_grad()
self.C.zero_grad()
shared_feats, domain_feats = [], []
for domain in opt.domains:
inputs, targets = utils.endless_get_next_batch(
train_loaders, train_iters, domain)
#target = torch.int64 rn
targets = targets.to(opt.device)
inputs = inputs.to(opt.device)
shared_feat = self.F_s(inputs)
shared_feats.append(shared_feat)
domain_feat = self.F_d[domain](inputs)
domain_feats.append(domain_feat)
features = torch.cat((shared_feat, domain_feat), dim=1)
c_outputs = self.C(features)
#return c_outputs, targets
#DEVICE SIDE TRIGGERED ERROR OCCUR HERE (l_c=...)
l_c = functional.nll_loss(c_outputs, targets)
l_c.backward(retain_graph=True)
# training accuracy
_, pred = torch.max(c_outputs, 1)
total[domain] += targets.size(0)
correct[domain] += (pred == targets).sum().item()
# update F with D gradients on all domains
for domain in opt.unlabeled_domains:
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
if opt.loss.lower() == 'gr':
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
l_d = functional.nll_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= -opt.lambd
elif opt.loss.lower() == 'l2':
d_targets = utils.get_random_domain_label(opt.loss, len(d_inputs))
l_d = functional.mse_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= opt.lambd
l_d.backward()
optimizer.step()
# print(loss_d)
# print('l_d loss: {}'.format(l_d.item()))
# print('l_c loss: {}'.format(l_c.item()))
loss_d_res.append(loss_d['test'])
l_d_res.append(l_d.item())
l_c_res.append(l_c.item())
if (epoch + 1) % kwargs["display_step"] == 0:
print(
"Epoch:", "%04d, done" % (epoch + 1) #"cost=", "{:.9f}"#.format(l_d.data[0])
)
return loss_d_res, l_d_res, l_c_res
def transform(self, d, *args, **kwargs):
F_d = self.F_d[opt.domains[0]]
self.F_s.eval()
F_d.eval()
self.C.eval()
_,_,_,it = self.prepare_data(d)
it = it[opt.unlabeled_domains[0]]
correct = 0
total = 0
confusion = ConfusionMeter(opt.num_labels)
preds = []
for inputs,targets in it:
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
d_features = F_d(inputs)
features = torch.cat((self.F_s(inputs), d_features), dim=1)
outputs = self.C(features)
_, pred = torch.max(outputs, 1)
#preds.extend(pred.data)
confusion.add(pred.data, targets.data)
total += targets.size(0)
correct += (pred == targets).sum().item()
acc = correct / total
#('{}: Accuracy on {} samples: {}%'.format(name, total, 100.0*acc))
return acc, correct
#return preds
def get_name(self):
if self._name is None:
self._name = "MAN({},{},{})".format(self.k,self.m,1)
return self._name | features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))
uset = data_utils.TensorDataset(features,target)
unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)
unlabeled_iters[domain] = iter(unlabeled_loaders[domain]) | conditional_block |
MultinomialAdversarialNetwork.py | #This version assumes domains = train/test set
import numpy as np
from ..utils import Dataset
import math
import random
from .interface import TopicModel
from .man_model.models import *
from .man_model import utils
from .man_model.options import opt
import torch.utils.data as data_utils
from tqdm import tqdm
from collections import defaultdict
import itertools
from torchnet.meter import ConfusionMeter
import torch
import torch.nn as nn
import torch.nn.functional as functional
import torch.optim as optim
from torch.utils.data import ConcatDataset, DataLoader
"""
IMPORTANT: for some reason, Model (self.F_s,etc) will not work if inputs are not float32
=> need to convert. Dont know if same thing for target tho?
Also apparently, domain labels retrieved from get_domain_labels cannot be -1?
Output size for C HAS TO BE 2 even if it's a binary classification
"""
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))
return e_x / np.sum(e_x, axis=1).reshape(-1, 1)
class MultinomialAdversarialNetwork(TopicModel):
def __init__(self, k, m, model_params=None, log_params=None):
super().__init__(k,m,model_params,log_params)
def prepare_data(self,d):
"""
Assume d is a dictionary of dataset where d[domain] = another dataset class
Assume labeled domain = train set, unlabeled = test
"""
train_loaders, train_iters = {}, {}
unlabeled_loaders, unlabeled_iters = {}, {}
for domain in opt.domains:
#CONVERT TO FLOAT32
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))
train = data_utils.TensorDataset(features,target)
train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)
train_iters[domain] = iter(train_loaders[domain])
for domain in opt.unlabeled_domains:
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))
uset = data_utils.TensorDataset(features,target)
unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)
unlabeled_iters[domain] = iter(unlabeled_loaders[domain])
return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters
def fit(self, d, *args, **kwargs):
#minibatches = create_minibatch(X, y, z, batch_size)
#TODO: make this able to fit consecutively
train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = self.prepare_data(d)
#Training
self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes,opt.shared_hidden_size, opt.dropout)
self.F_d = {}
for domain in opt.domains:
self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)
self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size + opt.domain_hidden_size, opt.shared_hidden_size + opt.domain_hidden_size, 2,opt.dropout, opt.C_bn)
self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt.shared_hidden_size,len(opt.all_domains), opt.loss, opt.dropout, opt.D_bn)
# print("try")
# print(opt.device)
self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.device), self.D.to(opt.device)
for f_d in self.F_d.values():
f_d = f_d.to(opt.device)
# print("endtry")
# # optimizers
optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.parameters() if self.F_s else [], self.C.parameters()] + [f.parameters() for f in self.F_d.values()])), lr=0.0001)
optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)
loss_d_res = []
l_d_res = []
l_c_res = []
for epoch in range(opt.max_epoch):
self.F_s.train()
self.C.train()
self.D.train()
for f in self.F_d.values():
f.train()
# training accuracy
correct, total = defaultdict(int), defaultdict(int)
# D accuracy
d_correct, d_total = 0, 0
# conceptually view 1 epoch as 1 epoch of the first domain
num_iter = len(train_loaders[opt.domains[0]])
for i in range(num_iter):
# D iterations
utils.freeze_net(self.F_s)
map(utils.freeze_net, self.F_d.values())
utils.freeze_net(self.C)
utils.unfreeze_net(self.D)
# optional WGAN n_critic trick
n_critic = opt.n_critic
for _ in range(n_critic):
self.D.zero_grad()
loss_d = {}
# train on both labeled and unlabeled domains
for domain in opt.unlabeled_domains:
# targets not used
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
# D accuracy
_, pred = torch.max(d_outputs, 1)
d_total += len(d_inputs)
if opt.loss.lower() == 'l2':
_, tgt_indices = torch.max(d_targets, 1)
d_correct += (pred==tgt_indices).sum().item()
l_d = functional.mse_loss(d_outputs, d_targets)
l_d.backward()
else:
d_correct += (pred==d_targets).sum().item()
l_d = functional.nll_loss(d_outputs, d_targets)
l_d.backward()
loss_d[domain] = l_d.item()
optimizerD.step()
# F&C iteration
utils.unfreeze_net(self.F_s)
map(utils.unfreeze_net, self.F_d.values())
utils.unfreeze_net(self.C)
utils.freeze_net(self.D)
#if opt.fix_emb:
# utils.freeze_net(self.F_s.word_emb)
# map(utils.freeze_net, self.F_d.values())
self.F_s.zero_grad()
for f_d in self.F_d.values():
f_d.zero_grad()
self.C.zero_grad()
shared_feats, domain_feats = [], []
for domain in opt.domains:
inputs, targets = utils.endless_get_next_batch(
train_loaders, train_iters, domain)
#target = torch.int64 rn
targets = targets.to(opt.device)
inputs = inputs.to(opt.device)
shared_feat = self.F_s(inputs)
shared_feats.append(shared_feat)
domain_feat = self.F_d[domain](inputs)
domain_feats.append(domain_feat)
features = torch.cat((shared_feat, domain_feat), dim=1)
c_outputs = self.C(features)
#return c_outputs, targets
#DEVICE SIDE TRIGGERED ERROR OCCUR HERE (l_c=...)
l_c = functional.nll_loss(c_outputs, targets)
l_c.backward(retain_graph=True)
# training accuracy
_, pred = torch.max(c_outputs, 1)
total[domain] += targets.size(0)
correct[domain] += (pred == targets).sum().item()
# update F with D gradients on all domains
for domain in opt.unlabeled_domains:
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
if opt.loss.lower() == 'gr':
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
l_d = functional.nll_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= -opt.lambd | elif opt.loss.lower() == 'l2':
d_targets = utils.get_random_domain_label(opt.loss, len(d_inputs))
l_d = functional.mse_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= opt.lambd
l_d.backward()
optimizer.step()
# print(loss_d)
# print('l_d loss: {}'.format(l_d.item()))
# print('l_c loss: {}'.format(l_c.item()))
loss_d_res.append(loss_d['test'])
l_d_res.append(l_d.item())
l_c_res.append(l_c.item())
if (epoch + 1) % kwargs["display_step"] == 0:
print(
"Epoch:", "%04d, done" % (epoch + 1) #"cost=", "{:.9f}"#.format(l_d.data[0])
)
return loss_d_res, l_d_res, l_c_res
def transform(self, d, *args, **kwargs):
F_d = self.F_d[opt.domains[0]]
self.F_s.eval()
F_d.eval()
self.C.eval()
_,_,_,it = self.prepare_data(d)
it = it[opt.unlabeled_domains[0]]
correct = 0
total = 0
confusion = ConfusionMeter(opt.num_labels)
preds = []
for inputs,targets in it:
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
d_features = F_d(inputs)
features = torch.cat((self.F_s(inputs), d_features), dim=1)
outputs = self.C(features)
_, pred = torch.max(outputs, 1)
#preds.extend(pred.data)
confusion.add(pred.data, targets.data)
total += targets.size(0)
correct += (pred == targets).sum().item()
acc = correct / total
#('{}: Accuracy on {} samples: {}%'.format(name, total, 100.0*acc))
return acc, correct
#return preds
def get_name(self):
if self._name is None:
self._name = "MAN({},{},{})".format(self.k,self.m,1)
return self._name | random_line_split | |
fid_pics.py | # This script creates plots for experiments from
# ICLR 2018 submission by Bousquet, Gelly, Tolstikhin, Bernhard.
# 1. Random samples
# 2. Interpolations:
# a. Between points of the test set, linearly
# b. Take a random point from Pz and make a whole circle on geodesic
# 3. Test reconstructions
import os
import sys
import tensorflow as tf
import numpy as np
import ops
from metrics import Metrics
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import utils
from datahandler import DataHandler
NUM_PICS = 10000
SAVE_REAL_PICS = True
SAVE_PNG = True
SAVE_FAKE_PICS = False
CELEBA_DATA_DIR = 'celebA/datasets/celeba/img_align_celeba'
MNIST_DATA_DIR = 'mnist'
OUT_DIR = 'fid_pics_celeba'
class ExpInfo(object):
def __init__(self):
self.trained_model_path = None
self.model_id = None
self.pz_std = None
self.z_dim = None
self.symmetrize = None
self.dataset = None
self.alias = None
self.test_size = None
def main():
exp_name = sys.argv[-1]
create_dir(OUT_DIR)
exp_names = ['mnist_gan', 'mnist_mmd', 'celeba_gan', 'celeba_mmd']
cluster_mnist_mmd_path = './mount/GANs/results_mnist_pot_sota_worst2d_plateau_mmd_tricks_expC_81'
cluster_mnist_mmd2d_path = './mount/GANs/results_mnist_pot_smaller_zdim2_21'
cluster_mnist_gan_path = './mount/GANs/results_mnist_pot_sota_worst2d1'
cluster_mnist_vae_path = './mount/GANs/results_mnist_vae_81'
cluster_mnist_vae2d_path = './mount/GANs/results_mnist_vae_zdim2_21'
cluster_celeba_gan_path = './mount/GANs/results_celeba_pot_worst2d_plateau_gan_jsmod_641'
cluster_celeba_vae_path = './mount/GANs/results_celeba_vae_641'
cluster_celeba_mmd_path = './mount/GANs/results_celeba_pot_worst2d_plateau_mmd_642'
cluster_celeba_mmd_began_path = './mount/GANs/results_celeba_pot_worst2d_plateau_mmd_began_642'
model_name_prefix = 'trained-pot-'
# Exp 1: CelebA with WAE+MMD on 64 dimensional Z space, DCGAN architecture
exp1 = ExpInfo()
exp1.trained_model_path = cluster_celeba_mmd_path
exp1.model_id = '378720'
exp1.pz_std = 2.0
exp1.z_dim = 64
exp1.symmetrize = True
exp1.dataset = 'celebA'
exp1.alias = 'celeba_mmd_dcgan'
exp1.test_size = 512
# Exp 2: CelebA with WAE+GAN on 64 dimensional Z space, DCGAN architecture
exp2 = ExpInfo()
exp2.trained_model_path = cluster_celeba_gan_path
exp2.model_id = '126480'
exp2.pz_std = 2.0
exp2.z_dim = 64
exp2.symmetrize = True
exp2.dataset = 'celebA'
exp2.alias = 'celeba_gan_dcgan'
exp2.test_size = 512
# Exp 3: MNIST with WAE+MMD on 8 dimensional Z space, DCGAN architecture
exp3 = ExpInfo()
exp3.trained_model_path = cluster_mnist_mmd_path
exp3.model_id = '55200'
exp3.pz_std = 1.0
exp3.z_dim = 8
exp3.symmetrize = False
exp3.dataset = 'mnist'
exp3.alias = 'mnist_mmd_dcgan'
exp3.test_size = 1000
# Exp 4: MNIST with WAE+GAN on 8 dimensional Z space, DCGAN architecture
exp4 = ExpInfo()
exp4.trained_model_path = cluster_mnist_gan_path
exp4.model_id = '62100'
exp4.pz_std = 2.0
exp4.z_dim = 8
exp4.symmetrize = False
exp4.dataset = 'mnist'
exp4.alias = 'mnist_gan_dcgan'
exp4.test_size = 1000
# Exp 5: CelebA with WAE+MMD on 64 dimensional Z space, BEGAN architecture
exp5 = ExpInfo()
exp5.trained_model_path = cluster_celeba_mmd_began_path
exp5.model_id = '157800'
exp5.pz_std = 2.0
exp5.z_dim = 64
exp5.symmetrize = True
exp5.dataset = 'celebA'
exp5.alias = 'celeba_mmd_began'
exp5.test_size = 512
# Exp 6: MNIST with VAE on 8 dimensional Z space, DCGAN architecture
exp6 = ExpInfo()
exp6.trained_model_path = cluster_mnist_vae_path
exp6.model_id = 'final-69000'
exp6.pz_std = 1.0
exp6.z_dim = 8
exp6.symmetrize = False
exp6.dataset = 'mnist'
exp6.alias = 'mnist_vae_dcgan'
exp6.test_size = 1000
# Exp 7: MNIST with VAE on 2 dimensional Z space, DCGAN architecture
exp7 = ExpInfo()
exp7.trained_model_path = cluster_mnist_vae2d_path
exp7.model_id = 'final-69000'
exp7.pz_std = 1.0
exp7.z_dim = 2
exp7.symmetrize = False
exp7.dataset = 'mnist'
exp7.alias = 'mnist_vae_2d_dcgan'
exp7.test_size = 1000
# Exp 8: MNIST with WAE-MMD on 2 dimensional Z space, DCGAN architecture
exp8 = ExpInfo()
exp8.trained_model_path = cluster_mnist_mmd2d_path
exp8.model_id = 'final-69000'
exp8.pz_std = 2.0
exp8.z_dim = 2
exp8.symmetrize = False
exp8.dataset = 'mnist'
exp8.alias = 'mnist_mmd_2d_dcgan'
exp8.test_size = 1000
# Exp 9: CelebA with VAE on 64 dimensional Z space, dcgan architecture
exp9 = ExpInfo()
exp9.trained_model_path = cluster_celeba_vae_path
exp9.model_id = '126240'
exp9.pz_std = 1.0
exp9.z_dim = 64
exp9.symmetrize = True
exp9.dataset = 'celebA'
exp9.alias = 'celeba_vae'
exp9.test_size = 512
if exp_name == 'celeba_mmd_dcgan':
exp = exp1
elif exp_name == 'celeba_gan_dcgan':
exp = exp2
elif exp_name == 'mnist_mmd_dcgan':
exp = exp3
elif exp_name == 'mnist_gan_dcgan':
exp = exp4
elif exp_name == 'celeba_mmd_began':
exp = exp5
elif exp_name == 'mnist_vae':
exp = exp6
elif exp_name == 'mnist_vae_2d':
exp = exp7
elif exp_name == 'mnist_mmd_2d':
exp = exp8
elif exp_name == 'celeba_vae':
exp = exp9
exp_list = [exp]
for exp in exp_list:
output_dir = os.path.join(OUT_DIR, exp.alias)
create_dir(output_dir)
z_dim = exp.z_dim
pz_std = exp.pz_std
dataset = exp.dataset
model_path = exp.trained_model_path
normalyze = exp.symmetrize
if SAVE_REAL_PICS:
pic_dir = os.path.join(output_dir, 'real')
create_dir(pic_dir)
# Saving real pics
opts = {}
opts['dataset'] = dataset
opts['input_normalize_sym'] = normalyze
opts['work_dir'] = output_dir
if exp.dataset == 'celebA':
opts['data_dir'] = CELEBA_DATA_DIR
elif exp.dataset == 'mnist':
opts['data_dir'] = MNIST_DATA_DIR
opts['celebA_crop'] = 'closecrop'
data = DataHandler(opts)
pic_id = 1
if dataset == 'celebA':
shuffled_ids = np.load(os.path.join(model_path, 'shuffled_training_ids'))
test_ids = shuffled_ids[-exp.test_size:]
test_images = data.data
train_ids = shuffled_ids[:-exp.test_size]
train_images = data.data
else:
test_images = data.test_data.X
train_images = data.data.X
train_ids = range(len(train_images))
test_ids = range(len(test_images))
if SAVE_PNG:
for idx in test_ids:
if pic_id % 1000 == 0:
print 'Saved %d/%d' % (pic_id, NUM_PICS)
save_pic(test_images[idx], os.path.join(pic_dir, 'real_image{:05d}.png'.format(pic_id)), exp)
pic_id += 1
if pic_id > NUM_PICS:
break
num_remain = max(NUM_PICS - len(test_ids), 0)
train_size = data.num_points
rand_train_ids = np.random.choice(train_size, num_remain, replace=False)
rand_train_ids = [train_ids[idx] for idx in rand_train_ids]
rand_train_pics = train_images[rand_train_ids]
if SAVE_PNG:
for i in range(num_remain):
if pic_id % 1000 == 0:
print 'Saved %d/%d' % (pic_id, NUM_PICS)
save_pic(rand_train_pics[i], os.path.join(pic_dir, 'real_image{:05d}.png'.format(pic_id)), exp)
pic_id += 1
all_pics = np.vstack([test_images, rand_train_pics])
all_pics = all_pics.astype(np.float)
if len(all_pics) > NUM_PICS:
all_pics = all_pics[:NUM_PICS]
np.random.shuffle(all_pics)
np.save(os.path.join(output_dir, 'real'), all_pics)
if SAVE_FAKE_PICS:
with tf.Session() as sess:
with sess.graph.as_default():
saver = tf.train.import_meta_graph(
os.path.join(model_path, 'checkpoints', model_name_prefix + exp.model_id + '.meta'))
saver.restore(sess, os.path.join(model_path, 'checkpoints', model_name_prefix + exp.model_id))
real_points_ph = tf.get_collection('real_points_ph')[0]
noise_ph = tf.get_collection('noise_ph')[0]
is_training_ph = tf.get_collection('is_training_ph')[0]
decoder = tf.get_collection('decoder')[0]
# Saving random samples
mean = np.zeros(z_dim)
cov = np.identity(z_dim)
noise = pz_std * np.random.multivariate_normal(
mean, cov, NUM_PICS).astype(np.float32)
res = sess.run(decoder, feed_dict={noise_ph: noise, is_training_ph: False})
pic_dir = os.path.join(output_dir, 'fake')
create_dir(pic_dir)
if SAVE_PNG:
for i in range(1, NUM_PICS + 1):
if i % 1000 == 0:
print 'Saved %d/%d' % (i, NUM_PICS)
save_pic(res[i-1], os.path.join(pic_dir, 'fake_image{:05d}.png'.format(i)), exp)
np.save(os.path.join(output_dir, 'fake'), res)
def save_pic(pic, path, exp):
|
def create_dir(d):
if not tf.gfile.IsDirectory(d):
tf.gfile.MakeDirs(d)
main()
| if len(pic.shape) == 4:
pic = pic[0]
height = pic.shape[0]
width = pic.shape[1]
fig = plt.figure(frameon=False, figsize=(width, height))#, dpi=1)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
if exp.symmetrize:
pic = (pic + 1.) / 2.
if exp.dataset == 'mnist':
pic = pic[:, :, 0]
pic = 1. - pic
if exp.dataset == 'mnist':
ax.imshow(pic, cmap='Greys', interpolation='none')
else:
ax.imshow(pic, interpolation='none')
fig.savefig(path, dpi=1, format='png')
plt.close()
# if exp.dataset == 'mnist':
# pic = pic[:, :, 0]
# pic = 1. - pic
# ax = plt.imshow(pic, cmap='Greys', interpolation='none')
# else:
# ax = plt.imshow(pic, interpolation='none')
# ax.axes.get_xaxis().set_ticks([])
# ax.axes.get_yaxis().set_ticks([])
# ax.axes.set_xlim([0, width])
# ax.axes.set_ylim([height, 0])
# ax.axes.set_aspect(1)
# fig.savefig(path, format='png')
# plt.close() | identifier_body |
fid_pics.py | # This script creates plots for experiments from
# ICLR 2018 submission by Bousquet, Gelly, Tolstikhin, Bernhard.
# 1. Random samples
# 2. Interpolations:
# a. Between points of the test set, linearly
# b. Take a random point from Pz and make a whole circle on geodesic
# 3. Test reconstructions
import os
import sys
import tensorflow as tf
import numpy as np
import ops
from metrics import Metrics
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import utils
from datahandler import DataHandler
NUM_PICS = 10000
SAVE_REAL_PICS = True
SAVE_PNG = True
SAVE_FAKE_PICS = False
CELEBA_DATA_DIR = 'celebA/datasets/celeba/img_align_celeba'
MNIST_DATA_DIR = 'mnist'
OUT_DIR = 'fid_pics_celeba'
class ExpInfo(object):
def __init__(self):
self.trained_model_path = None
self.model_id = None
self.pz_std = None
self.z_dim = None
self.symmetrize = None
self.dataset = None
self.alias = None
self.test_size = None
def main():
exp_name = sys.argv[-1]
create_dir(OUT_DIR)
exp_names = ['mnist_gan', 'mnist_mmd', 'celeba_gan', 'celeba_mmd']
cluster_mnist_mmd_path = './mount/GANs/results_mnist_pot_sota_worst2d_plateau_mmd_tricks_expC_81'
cluster_mnist_mmd2d_path = './mount/GANs/results_mnist_pot_smaller_zdim2_21'
cluster_mnist_gan_path = './mount/GANs/results_mnist_pot_sota_worst2d1'
cluster_mnist_vae_path = './mount/GANs/results_mnist_vae_81'
cluster_mnist_vae2d_path = './mount/GANs/results_mnist_vae_zdim2_21'
cluster_celeba_gan_path = './mount/GANs/results_celeba_pot_worst2d_plateau_gan_jsmod_641'
cluster_celeba_vae_path = './mount/GANs/results_celeba_vae_641'
cluster_celeba_mmd_path = './mount/GANs/results_celeba_pot_worst2d_plateau_mmd_642'
cluster_celeba_mmd_began_path = './mount/GANs/results_celeba_pot_worst2d_plateau_mmd_began_642'
model_name_prefix = 'trained-pot-'
# Exp 1: CelebA with WAE+MMD on 64 dimensional Z space, DCGAN architecture
exp1 = ExpInfo()
exp1.trained_model_path = cluster_celeba_mmd_path
exp1.model_id = '378720'
exp1.pz_std = 2.0
exp1.z_dim = 64
exp1.symmetrize = True
exp1.dataset = 'celebA'
exp1.alias = 'celeba_mmd_dcgan'
exp1.test_size = 512
# Exp 2: CelebA with WAE+GAN on 64 dimensional Z space, DCGAN architecture
exp2 = ExpInfo()
exp2.trained_model_path = cluster_celeba_gan_path
exp2.model_id = '126480'
exp2.pz_std = 2.0
exp2.z_dim = 64
exp2.symmetrize = True
exp2.dataset = 'celebA'
exp2.alias = 'celeba_gan_dcgan'
exp2.test_size = 512
# Exp 3: MNIST with WAE+MMD on 8 dimensional Z space, DCGAN architecture
exp3 = ExpInfo()
exp3.trained_model_path = cluster_mnist_mmd_path
exp3.model_id = '55200'
exp3.pz_std = 1.0
exp3.z_dim = 8
exp3.symmetrize = False
exp3.dataset = 'mnist'
exp3.alias = 'mnist_mmd_dcgan'
exp3.test_size = 1000
# Exp 4: MNIST with WAE+GAN on 8 dimensional Z space, DCGAN architecture
exp4 = ExpInfo()
exp4.trained_model_path = cluster_mnist_gan_path
exp4.model_id = '62100'
exp4.pz_std = 2.0
exp4.z_dim = 8
exp4.symmetrize = False
exp4.dataset = 'mnist'
exp4.alias = 'mnist_gan_dcgan'
exp4.test_size = 1000
# Exp 5: CelebA with WAE+MMD on 64 dimensional Z space, BEGAN architecture
exp5 = ExpInfo()
exp5.trained_model_path = cluster_celeba_mmd_began_path
exp5.model_id = '157800'
exp5.pz_std = 2.0
exp5.z_dim = 64
exp5.symmetrize = True
exp5.dataset = 'celebA'
exp5.alias = 'celeba_mmd_began'
exp5.test_size = 512
# Exp 6: MNIST with VAE on 8 dimensional Z space, DCGAN architecture
exp6 = ExpInfo()
exp6.trained_model_path = cluster_mnist_vae_path
exp6.model_id = 'final-69000'
exp6.pz_std = 1.0
exp6.z_dim = 8
exp6.symmetrize = False
exp6.dataset = 'mnist'
exp6.alias = 'mnist_vae_dcgan'
exp6.test_size = 1000
# Exp 7: MNIST with VAE on 2 dimensional Z space, DCGAN architecture
exp7 = ExpInfo()
exp7.trained_model_path = cluster_mnist_vae2d_path
exp7.model_id = 'final-69000'
exp7.pz_std = 1.0
exp7.z_dim = 2
exp7.symmetrize = False
exp7.dataset = 'mnist'
exp7.alias = 'mnist_vae_2d_dcgan'
exp7.test_size = 1000
# Exp 8: MNIST with WAE-MMD on 2 dimensional Z space, DCGAN architecture
exp8 = ExpInfo()
exp8.trained_model_path = cluster_mnist_mmd2d_path
exp8.model_id = 'final-69000'
exp8.pz_std = 2.0
exp8.z_dim = 2
exp8.symmetrize = False
exp8.dataset = 'mnist'
exp8.alias = 'mnist_mmd_2d_dcgan'
exp8.test_size = 1000
# Exp 9: CelebA with VAE on 64 dimensional Z space, dcgan architecture
exp9 = ExpInfo()
exp9.trained_model_path = cluster_celeba_vae_path
exp9.model_id = '126240'
exp9.pz_std = 1.0
exp9.z_dim = 64
exp9.symmetrize = True
exp9.dataset = 'celebA'
exp9.alias = 'celeba_vae'
exp9.test_size = 512
if exp_name == 'celeba_mmd_dcgan':
exp = exp1
elif exp_name == 'celeba_gan_dcgan':
exp = exp2
elif exp_name == 'mnist_mmd_dcgan':
exp = exp3
elif exp_name == 'mnist_gan_dcgan':
exp = exp4
elif exp_name == 'celeba_mmd_began':
exp = exp5
elif exp_name == 'mnist_vae':
exp = exp6
elif exp_name == 'mnist_vae_2d':
exp = exp7
elif exp_name == 'mnist_mmd_2d':
|
elif exp_name == 'celeba_vae':
exp = exp9
exp_list = [exp]
for exp in exp_list:
output_dir = os.path.join(OUT_DIR, exp.alias)
create_dir(output_dir)
z_dim = exp.z_dim
pz_std = exp.pz_std
dataset = exp.dataset
model_path = exp.trained_model_path
normalyze = exp.symmetrize
if SAVE_REAL_PICS:
pic_dir = os.path.join(output_dir, 'real')
create_dir(pic_dir)
# Saving real pics
opts = {}
opts['dataset'] = dataset
opts['input_normalize_sym'] = normalyze
opts['work_dir'] = output_dir
if exp.dataset == 'celebA':
opts['data_dir'] = CELEBA_DATA_DIR
elif exp.dataset == 'mnist':
opts['data_dir'] = MNIST_DATA_DIR
opts['celebA_crop'] = 'closecrop'
data = DataHandler(opts)
pic_id = 1
if dataset == 'celebA':
shuffled_ids = np.load(os.path.join(model_path, 'shuffled_training_ids'))
test_ids = shuffled_ids[-exp.test_size:]
test_images = data.data
train_ids = shuffled_ids[:-exp.test_size]
train_images = data.data
else:
test_images = data.test_data.X
train_images = data.data.X
train_ids = range(len(train_images))
test_ids = range(len(test_images))
if SAVE_PNG:
for idx in test_ids:
if pic_id % 1000 == 0:
print 'Saved %d/%d' % (pic_id, NUM_PICS)
save_pic(test_images[idx], os.path.join(pic_dir, 'real_image{:05d}.png'.format(pic_id)), exp)
pic_id += 1
if pic_id > NUM_PICS:
break
num_remain = max(NUM_PICS - len(test_ids), 0)
train_size = data.num_points
rand_train_ids = np.random.choice(train_size, num_remain, replace=False)
rand_train_ids = [train_ids[idx] for idx in rand_train_ids]
rand_train_pics = train_images[rand_train_ids]
if SAVE_PNG:
for i in range(num_remain):
if pic_id % 1000 == 0:
print 'Saved %d/%d' % (pic_id, NUM_PICS)
save_pic(rand_train_pics[i], os.path.join(pic_dir, 'real_image{:05d}.png'.format(pic_id)), exp)
pic_id += 1
all_pics = np.vstack([test_images, rand_train_pics])
all_pics = all_pics.astype(np.float)
if len(all_pics) > NUM_PICS:
all_pics = all_pics[:NUM_PICS]
np.random.shuffle(all_pics)
np.save(os.path.join(output_dir, 'real'), all_pics)
if SAVE_FAKE_PICS:
with tf.Session() as sess:
with sess.graph.as_default():
saver = tf.train.import_meta_graph(
os.path.join(model_path, 'checkpoints', model_name_prefix + exp.model_id + '.meta'))
saver.restore(sess, os.path.join(model_path, 'checkpoints', model_name_prefix + exp.model_id))
real_points_ph = tf.get_collection('real_points_ph')[0]
noise_ph = tf.get_collection('noise_ph')[0]
is_training_ph = tf.get_collection('is_training_ph')[0]
decoder = tf.get_collection('decoder')[0]
# Saving random samples
mean = np.zeros(z_dim)
cov = np.identity(z_dim)
noise = pz_std * np.random.multivariate_normal(
mean, cov, NUM_PICS).astype(np.float32)
res = sess.run(decoder, feed_dict={noise_ph: noise, is_training_ph: False})
pic_dir = os.path.join(output_dir, 'fake')
create_dir(pic_dir)
if SAVE_PNG:
for i in range(1, NUM_PICS + 1):
if i % 1000 == 0:
print 'Saved %d/%d' % (i, NUM_PICS)
save_pic(res[i-1], os.path.join(pic_dir, 'fake_image{:05d}.png'.format(i)), exp)
np.save(os.path.join(output_dir, 'fake'), res)
def save_pic(pic, path, exp):
if len(pic.shape) == 4:
pic = pic[0]
height = pic.shape[0]
width = pic.shape[1]
fig = plt.figure(frameon=False, figsize=(width, height))#, dpi=1)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
if exp.symmetrize:
pic = (pic + 1.) / 2.
if exp.dataset == 'mnist':
pic = pic[:, :, 0]
pic = 1. - pic
if exp.dataset == 'mnist':
ax.imshow(pic, cmap='Greys', interpolation='none')
else:
ax.imshow(pic, interpolation='none')
fig.savefig(path, dpi=1, format='png')
plt.close()
# if exp.dataset == 'mnist':
# pic = pic[:, :, 0]
# pic = 1. - pic
# ax = plt.imshow(pic, cmap='Greys', interpolation='none')
# else:
# ax = plt.imshow(pic, interpolation='none')
# ax.axes.get_xaxis().set_ticks([])
# ax.axes.get_yaxis().set_ticks([])
# ax.axes.set_xlim([0, width])
# ax.axes.set_ylim([height, 0])
# ax.axes.set_aspect(1)
# fig.savefig(path, format='png')
# plt.close()
def create_dir(d):
if not tf.gfile.IsDirectory(d):
tf.gfile.MakeDirs(d)
main()
| exp = exp8 | conditional_block |
fid_pics.py | # This script creates plots for experiments from
# ICLR 2018 submission by Bousquet, Gelly, Tolstikhin, Bernhard.
# 1. Random samples
# 2. Interpolations:
# a. Between points of the test set, linearly
# b. Take a random point from Pz and make a whole circle on geodesic
# 3. Test reconstructions
import os
import sys
import tensorflow as tf
import numpy as np
import ops
from metrics import Metrics
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import utils
from datahandler import DataHandler
NUM_PICS = 10000
SAVE_REAL_PICS = True
SAVE_PNG = True
SAVE_FAKE_PICS = False
CELEBA_DATA_DIR = 'celebA/datasets/celeba/img_align_celeba'
MNIST_DATA_DIR = 'mnist'
OUT_DIR = 'fid_pics_celeba'
class ExpInfo(object):
def __init__(self):
self.trained_model_path = None
self.model_id = None
self.pz_std = None
self.z_dim = None | self.symmetrize = None
self.dataset = None
self.alias = None
self.test_size = None
def main():
exp_name = sys.argv[-1]
create_dir(OUT_DIR)
exp_names = ['mnist_gan', 'mnist_mmd', 'celeba_gan', 'celeba_mmd']
cluster_mnist_mmd_path = './mount/GANs/results_mnist_pot_sota_worst2d_plateau_mmd_tricks_expC_81'
cluster_mnist_mmd2d_path = './mount/GANs/results_mnist_pot_smaller_zdim2_21'
cluster_mnist_gan_path = './mount/GANs/results_mnist_pot_sota_worst2d1'
cluster_mnist_vae_path = './mount/GANs/results_mnist_vae_81'
cluster_mnist_vae2d_path = './mount/GANs/results_mnist_vae_zdim2_21'
cluster_celeba_gan_path = './mount/GANs/results_celeba_pot_worst2d_plateau_gan_jsmod_641'
cluster_celeba_vae_path = './mount/GANs/results_celeba_vae_641'
cluster_celeba_mmd_path = './mount/GANs/results_celeba_pot_worst2d_plateau_mmd_642'
cluster_celeba_mmd_began_path = './mount/GANs/results_celeba_pot_worst2d_plateau_mmd_began_642'
model_name_prefix = 'trained-pot-'
# Exp 1: CelebA with WAE+MMD on 64 dimensional Z space, DCGAN architecture
exp1 = ExpInfo()
exp1.trained_model_path = cluster_celeba_mmd_path
exp1.model_id = '378720'
exp1.pz_std = 2.0
exp1.z_dim = 64
exp1.symmetrize = True
exp1.dataset = 'celebA'
exp1.alias = 'celeba_mmd_dcgan'
exp1.test_size = 512
# Exp 2: CelebA with WAE+GAN on 64 dimensional Z space, DCGAN architecture
exp2 = ExpInfo()
exp2.trained_model_path = cluster_celeba_gan_path
exp2.model_id = '126480'
exp2.pz_std = 2.0
exp2.z_dim = 64
exp2.symmetrize = True
exp2.dataset = 'celebA'
exp2.alias = 'celeba_gan_dcgan'
exp2.test_size = 512
# Exp 3: MNIST with WAE+MMD on 8 dimensional Z space, DCGAN architecture
exp3 = ExpInfo()
exp3.trained_model_path = cluster_mnist_mmd_path
exp3.model_id = '55200'
exp3.pz_std = 1.0
exp3.z_dim = 8
exp3.symmetrize = False
exp3.dataset = 'mnist'
exp3.alias = 'mnist_mmd_dcgan'
exp3.test_size = 1000
# Exp 4: MNIST with WAE+GAN on 8 dimensional Z space, DCGAN architecture
exp4 = ExpInfo()
exp4.trained_model_path = cluster_mnist_gan_path
exp4.model_id = '62100'
exp4.pz_std = 2.0
exp4.z_dim = 8
exp4.symmetrize = False
exp4.dataset = 'mnist'
exp4.alias = 'mnist_gan_dcgan'
exp4.test_size = 1000
# Exp 5: CelebA with WAE+MMD on 64 dimensional Z space, BEGAN architecture
exp5 = ExpInfo()
exp5.trained_model_path = cluster_celeba_mmd_began_path
exp5.model_id = '157800'
exp5.pz_std = 2.0
exp5.z_dim = 64
exp5.symmetrize = True
exp5.dataset = 'celebA'
exp5.alias = 'celeba_mmd_began'
exp5.test_size = 512
# Exp 6: MNIST with VAE on 8 dimensional Z space, DCGAN architecture
exp6 = ExpInfo()
exp6.trained_model_path = cluster_mnist_vae_path
exp6.model_id = 'final-69000'
exp6.pz_std = 1.0
exp6.z_dim = 8
exp6.symmetrize = False
exp6.dataset = 'mnist'
exp6.alias = 'mnist_vae_dcgan'
exp6.test_size = 1000
# Exp 7: MNIST with VAE on 2 dimensional Z space, DCGAN architecture
exp7 = ExpInfo()
exp7.trained_model_path = cluster_mnist_vae2d_path
exp7.model_id = 'final-69000'
exp7.pz_std = 1.0
exp7.z_dim = 2
exp7.symmetrize = False
exp7.dataset = 'mnist'
exp7.alias = 'mnist_vae_2d_dcgan'
exp7.test_size = 1000
# Exp 8: MNIST with WAE-MMD on 2 dimensional Z space, DCGAN architecture
exp8 = ExpInfo()
exp8.trained_model_path = cluster_mnist_mmd2d_path
exp8.model_id = 'final-69000'
exp8.pz_std = 2.0
exp8.z_dim = 2
exp8.symmetrize = False
exp8.dataset = 'mnist'
exp8.alias = 'mnist_mmd_2d_dcgan'
exp8.test_size = 1000
# Exp 9: CelebA with VAE on 64 dimensional Z space, dcgan architecture
exp9 = ExpInfo()
exp9.trained_model_path = cluster_celeba_vae_path
exp9.model_id = '126240'
exp9.pz_std = 1.0
exp9.z_dim = 64
exp9.symmetrize = True
exp9.dataset = 'celebA'
exp9.alias = 'celeba_vae'
exp9.test_size = 512
if exp_name == 'celeba_mmd_dcgan':
exp = exp1
elif exp_name == 'celeba_gan_dcgan':
exp = exp2
elif exp_name == 'mnist_mmd_dcgan':
exp = exp3
elif exp_name == 'mnist_gan_dcgan':
exp = exp4
elif exp_name == 'celeba_mmd_began':
exp = exp5
elif exp_name == 'mnist_vae':
exp = exp6
elif exp_name == 'mnist_vae_2d':
exp = exp7
elif exp_name == 'mnist_mmd_2d':
exp = exp8
elif exp_name == 'celeba_vae':
exp = exp9
exp_list = [exp]
for exp in exp_list:
output_dir = os.path.join(OUT_DIR, exp.alias)
create_dir(output_dir)
z_dim = exp.z_dim
pz_std = exp.pz_std
dataset = exp.dataset
model_path = exp.trained_model_path
normalyze = exp.symmetrize
if SAVE_REAL_PICS:
pic_dir = os.path.join(output_dir, 'real')
create_dir(pic_dir)
# Saving real pics
opts = {}
opts['dataset'] = dataset
opts['input_normalize_sym'] = normalyze
opts['work_dir'] = output_dir
if exp.dataset == 'celebA':
opts['data_dir'] = CELEBA_DATA_DIR
elif exp.dataset == 'mnist':
opts['data_dir'] = MNIST_DATA_DIR
opts['celebA_crop'] = 'closecrop'
data = DataHandler(opts)
pic_id = 1
if dataset == 'celebA':
shuffled_ids = np.load(os.path.join(model_path, 'shuffled_training_ids'))
test_ids = shuffled_ids[-exp.test_size:]
test_images = data.data
train_ids = shuffled_ids[:-exp.test_size]
train_images = data.data
else:
test_images = data.test_data.X
train_images = data.data.X
train_ids = range(len(train_images))
test_ids = range(len(test_images))
if SAVE_PNG:
for idx in test_ids:
if pic_id % 1000 == 0:
print 'Saved %d/%d' % (pic_id, NUM_PICS)
save_pic(test_images[idx], os.path.join(pic_dir, 'real_image{:05d}.png'.format(pic_id)), exp)
pic_id += 1
if pic_id > NUM_PICS:
break
num_remain = max(NUM_PICS - len(test_ids), 0)
train_size = data.num_points
rand_train_ids = np.random.choice(train_size, num_remain, replace=False)
rand_train_ids = [train_ids[idx] for idx in rand_train_ids]
rand_train_pics = train_images[rand_train_ids]
if SAVE_PNG:
for i in range(num_remain):
if pic_id % 1000 == 0:
print 'Saved %d/%d' % (pic_id, NUM_PICS)
save_pic(rand_train_pics[i], os.path.join(pic_dir, 'real_image{:05d}.png'.format(pic_id)), exp)
pic_id += 1
all_pics = np.vstack([test_images, rand_train_pics])
all_pics = all_pics.astype(np.float)
if len(all_pics) > NUM_PICS:
all_pics = all_pics[:NUM_PICS]
np.random.shuffle(all_pics)
np.save(os.path.join(output_dir, 'real'), all_pics)
if SAVE_FAKE_PICS:
with tf.Session() as sess:
with sess.graph.as_default():
saver = tf.train.import_meta_graph(
os.path.join(model_path, 'checkpoints', model_name_prefix + exp.model_id + '.meta'))
saver.restore(sess, os.path.join(model_path, 'checkpoints', model_name_prefix + exp.model_id))
real_points_ph = tf.get_collection('real_points_ph')[0]
noise_ph = tf.get_collection('noise_ph')[0]
is_training_ph = tf.get_collection('is_training_ph')[0]
decoder = tf.get_collection('decoder')[0]
# Saving random samples
mean = np.zeros(z_dim)
cov = np.identity(z_dim)
noise = pz_std * np.random.multivariate_normal(
mean, cov, NUM_PICS).astype(np.float32)
res = sess.run(decoder, feed_dict={noise_ph: noise, is_training_ph: False})
pic_dir = os.path.join(output_dir, 'fake')
create_dir(pic_dir)
if SAVE_PNG:
for i in range(1, NUM_PICS + 1):
if i % 1000 == 0:
print 'Saved %d/%d' % (i, NUM_PICS)
save_pic(res[i-1], os.path.join(pic_dir, 'fake_image{:05d}.png'.format(i)), exp)
np.save(os.path.join(output_dir, 'fake'), res)
def save_pic(pic, path, exp):
if len(pic.shape) == 4:
pic = pic[0]
height = pic.shape[0]
width = pic.shape[1]
fig = plt.figure(frameon=False, figsize=(width, height))#, dpi=1)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
if exp.symmetrize:
pic = (pic + 1.) / 2.
if exp.dataset == 'mnist':
pic = pic[:, :, 0]
pic = 1. - pic
if exp.dataset == 'mnist':
ax.imshow(pic, cmap='Greys', interpolation='none')
else:
ax.imshow(pic, interpolation='none')
fig.savefig(path, dpi=1, format='png')
plt.close()
# if exp.dataset == 'mnist':
# pic = pic[:, :, 0]
# pic = 1. - pic
# ax = plt.imshow(pic, cmap='Greys', interpolation='none')
# else:
# ax = plt.imshow(pic, interpolation='none')
# ax.axes.get_xaxis().set_ticks([])
# ax.axes.get_yaxis().set_ticks([])
# ax.axes.set_xlim([0, width])
# ax.axes.set_ylim([height, 0])
# ax.axes.set_aspect(1)
# fig.savefig(path, format='png')
# plt.close()
def create_dir(d):
if not tf.gfile.IsDirectory(d):
tf.gfile.MakeDirs(d)
main() | random_line_split | |
fid_pics.py | # This script creates plots for experiments from
# ICLR 2018 submission by Bousquet, Gelly, Tolstikhin, Bernhard.
# 1. Random samples
# 2. Interpolations:
# a. Between points of the test set, linearly
# b. Take a random point from Pz and make a whole circle on geodesic
# 3. Test reconstructions
import os
import sys
import tensorflow as tf
import numpy as np
import ops
from metrics import Metrics
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import utils
from datahandler import DataHandler
NUM_PICS = 10000
SAVE_REAL_PICS = True
SAVE_PNG = True
SAVE_FAKE_PICS = False
CELEBA_DATA_DIR = 'celebA/datasets/celeba/img_align_celeba'
MNIST_DATA_DIR = 'mnist'
OUT_DIR = 'fid_pics_celeba'
class ExpInfo(object):
def __init__(self):
self.trained_model_path = None
self.model_id = None
self.pz_std = None
self.z_dim = None
self.symmetrize = None
self.dataset = None
self.alias = None
self.test_size = None
def main():
exp_name = sys.argv[-1]
create_dir(OUT_DIR)
exp_names = ['mnist_gan', 'mnist_mmd', 'celeba_gan', 'celeba_mmd']
cluster_mnist_mmd_path = './mount/GANs/results_mnist_pot_sota_worst2d_plateau_mmd_tricks_expC_81'
cluster_mnist_mmd2d_path = './mount/GANs/results_mnist_pot_smaller_zdim2_21'
cluster_mnist_gan_path = './mount/GANs/results_mnist_pot_sota_worst2d1'
cluster_mnist_vae_path = './mount/GANs/results_mnist_vae_81'
cluster_mnist_vae2d_path = './mount/GANs/results_mnist_vae_zdim2_21'
cluster_celeba_gan_path = './mount/GANs/results_celeba_pot_worst2d_plateau_gan_jsmod_641'
cluster_celeba_vae_path = './mount/GANs/results_celeba_vae_641'
cluster_celeba_mmd_path = './mount/GANs/results_celeba_pot_worst2d_plateau_mmd_642'
cluster_celeba_mmd_began_path = './mount/GANs/results_celeba_pot_worst2d_plateau_mmd_began_642'
model_name_prefix = 'trained-pot-'
# Exp 1: CelebA with WAE+MMD on 64 dimensional Z space, DCGAN architecture
exp1 = ExpInfo()
exp1.trained_model_path = cluster_celeba_mmd_path
exp1.model_id = '378720'
exp1.pz_std = 2.0
exp1.z_dim = 64
exp1.symmetrize = True
exp1.dataset = 'celebA'
exp1.alias = 'celeba_mmd_dcgan'
exp1.test_size = 512
# Exp 2: CelebA with WAE+GAN on 64 dimensional Z space, DCGAN architecture
exp2 = ExpInfo()
exp2.trained_model_path = cluster_celeba_gan_path
exp2.model_id = '126480'
exp2.pz_std = 2.0
exp2.z_dim = 64
exp2.symmetrize = True
exp2.dataset = 'celebA'
exp2.alias = 'celeba_gan_dcgan'
exp2.test_size = 512
# Exp 3: MNIST with WAE+MMD on 8 dimensional Z space, DCGAN architecture
exp3 = ExpInfo()
exp3.trained_model_path = cluster_mnist_mmd_path
exp3.model_id = '55200'
exp3.pz_std = 1.0
exp3.z_dim = 8
exp3.symmetrize = False
exp3.dataset = 'mnist'
exp3.alias = 'mnist_mmd_dcgan'
exp3.test_size = 1000
# Exp 4: MNIST with WAE+GAN on 8 dimensional Z space, DCGAN architecture
exp4 = ExpInfo()
exp4.trained_model_path = cluster_mnist_gan_path
exp4.model_id = '62100'
exp4.pz_std = 2.0
exp4.z_dim = 8
exp4.symmetrize = False
exp4.dataset = 'mnist'
exp4.alias = 'mnist_gan_dcgan'
exp4.test_size = 1000
# Exp 5: CelebA with WAE+MMD on 64 dimensional Z space, BEGAN architecture
exp5 = ExpInfo()
exp5.trained_model_path = cluster_celeba_mmd_began_path
exp5.model_id = '157800'
exp5.pz_std = 2.0
exp5.z_dim = 64
exp5.symmetrize = True
exp5.dataset = 'celebA'
exp5.alias = 'celeba_mmd_began'
exp5.test_size = 512
# Exp 6: MNIST with VAE on 8 dimensional Z space, DCGAN architecture
exp6 = ExpInfo()
exp6.trained_model_path = cluster_mnist_vae_path
exp6.model_id = 'final-69000'
exp6.pz_std = 1.0
exp6.z_dim = 8
exp6.symmetrize = False
exp6.dataset = 'mnist'
exp6.alias = 'mnist_vae_dcgan'
exp6.test_size = 1000
# Exp 7: MNIST with VAE on 2 dimensional Z space, DCGAN architecture
exp7 = ExpInfo()
exp7.trained_model_path = cluster_mnist_vae2d_path
exp7.model_id = 'final-69000'
exp7.pz_std = 1.0
exp7.z_dim = 2
exp7.symmetrize = False
exp7.dataset = 'mnist'
exp7.alias = 'mnist_vae_2d_dcgan'
exp7.test_size = 1000
# Exp 8: MNIST with WAE-MMD on 2 dimensional Z space, DCGAN architecture
exp8 = ExpInfo()
exp8.trained_model_path = cluster_mnist_mmd2d_path
exp8.model_id = 'final-69000'
exp8.pz_std = 2.0
exp8.z_dim = 2
exp8.symmetrize = False
exp8.dataset = 'mnist'
exp8.alias = 'mnist_mmd_2d_dcgan'
exp8.test_size = 1000
# Exp 9: CelebA with VAE on 64 dimensional Z space, dcgan architecture
exp9 = ExpInfo()
exp9.trained_model_path = cluster_celeba_vae_path
exp9.model_id = '126240'
exp9.pz_std = 1.0
exp9.z_dim = 64
exp9.symmetrize = True
exp9.dataset = 'celebA'
exp9.alias = 'celeba_vae'
exp9.test_size = 512
if exp_name == 'celeba_mmd_dcgan':
exp = exp1
elif exp_name == 'celeba_gan_dcgan':
exp = exp2
elif exp_name == 'mnist_mmd_dcgan':
exp = exp3
elif exp_name == 'mnist_gan_dcgan':
exp = exp4
elif exp_name == 'celeba_mmd_began':
exp = exp5
elif exp_name == 'mnist_vae':
exp = exp6
elif exp_name == 'mnist_vae_2d':
exp = exp7
elif exp_name == 'mnist_mmd_2d':
exp = exp8
elif exp_name == 'celeba_vae':
exp = exp9
exp_list = [exp]
for exp in exp_list:
output_dir = os.path.join(OUT_DIR, exp.alias)
create_dir(output_dir)
z_dim = exp.z_dim
pz_std = exp.pz_std
dataset = exp.dataset
model_path = exp.trained_model_path
normalyze = exp.symmetrize
if SAVE_REAL_PICS:
pic_dir = os.path.join(output_dir, 'real')
create_dir(pic_dir)
# Saving real pics
opts = {}
opts['dataset'] = dataset
opts['input_normalize_sym'] = normalyze
opts['work_dir'] = output_dir
if exp.dataset == 'celebA':
opts['data_dir'] = CELEBA_DATA_DIR
elif exp.dataset == 'mnist':
opts['data_dir'] = MNIST_DATA_DIR
opts['celebA_crop'] = 'closecrop'
data = DataHandler(opts)
pic_id = 1
if dataset == 'celebA':
shuffled_ids = np.load(os.path.join(model_path, 'shuffled_training_ids'))
test_ids = shuffled_ids[-exp.test_size:]
test_images = data.data
train_ids = shuffled_ids[:-exp.test_size]
train_images = data.data
else:
test_images = data.test_data.X
train_images = data.data.X
train_ids = range(len(train_images))
test_ids = range(len(test_images))
if SAVE_PNG:
for idx in test_ids:
if pic_id % 1000 == 0:
print 'Saved %d/%d' % (pic_id, NUM_PICS)
save_pic(test_images[idx], os.path.join(pic_dir, 'real_image{:05d}.png'.format(pic_id)), exp)
pic_id += 1
if pic_id > NUM_PICS:
break
num_remain = max(NUM_PICS - len(test_ids), 0)
train_size = data.num_points
rand_train_ids = np.random.choice(train_size, num_remain, replace=False)
rand_train_ids = [train_ids[idx] for idx in rand_train_ids]
rand_train_pics = train_images[rand_train_ids]
if SAVE_PNG:
for i in range(num_remain):
if pic_id % 1000 == 0:
print 'Saved %d/%d' % (pic_id, NUM_PICS)
save_pic(rand_train_pics[i], os.path.join(pic_dir, 'real_image{:05d}.png'.format(pic_id)), exp)
pic_id += 1
all_pics = np.vstack([test_images, rand_train_pics])
all_pics = all_pics.astype(np.float)
if len(all_pics) > NUM_PICS:
all_pics = all_pics[:NUM_PICS]
np.random.shuffle(all_pics)
np.save(os.path.join(output_dir, 'real'), all_pics)
if SAVE_FAKE_PICS:
with tf.Session() as sess:
with sess.graph.as_default():
saver = tf.train.import_meta_graph(
os.path.join(model_path, 'checkpoints', model_name_prefix + exp.model_id + '.meta'))
saver.restore(sess, os.path.join(model_path, 'checkpoints', model_name_prefix + exp.model_id))
real_points_ph = tf.get_collection('real_points_ph')[0]
noise_ph = tf.get_collection('noise_ph')[0]
is_training_ph = tf.get_collection('is_training_ph')[0]
decoder = tf.get_collection('decoder')[0]
# Saving random samples
mean = np.zeros(z_dim)
cov = np.identity(z_dim)
noise = pz_std * np.random.multivariate_normal(
mean, cov, NUM_PICS).astype(np.float32)
res = sess.run(decoder, feed_dict={noise_ph: noise, is_training_ph: False})
pic_dir = os.path.join(output_dir, 'fake')
create_dir(pic_dir)
if SAVE_PNG:
for i in range(1, NUM_PICS + 1):
if i % 1000 == 0:
print 'Saved %d/%d' % (i, NUM_PICS)
save_pic(res[i-1], os.path.join(pic_dir, 'fake_image{:05d}.png'.format(i)), exp)
np.save(os.path.join(output_dir, 'fake'), res)
def save_pic(pic, path, exp):
if len(pic.shape) == 4:
pic = pic[0]
height = pic.shape[0]
width = pic.shape[1]
fig = plt.figure(frameon=False, figsize=(width, height))#, dpi=1)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
if exp.symmetrize:
pic = (pic + 1.) / 2.
if exp.dataset == 'mnist':
pic = pic[:, :, 0]
pic = 1. - pic
if exp.dataset == 'mnist':
ax.imshow(pic, cmap='Greys', interpolation='none')
else:
ax.imshow(pic, interpolation='none')
fig.savefig(path, dpi=1, format='png')
plt.close()
# if exp.dataset == 'mnist':
# pic = pic[:, :, 0]
# pic = 1. - pic
# ax = plt.imshow(pic, cmap='Greys', interpolation='none')
# else:
# ax = plt.imshow(pic, interpolation='none')
# ax.axes.get_xaxis().set_ticks([])
# ax.axes.get_yaxis().set_ticks([])
# ax.axes.set_xlim([0, width])
# ax.axes.set_ylim([height, 0])
# ax.axes.set_aspect(1)
# fig.savefig(path, format='png')
# plt.close()
def | (d):
if not tf.gfile.IsDirectory(d):
tf.gfile.MakeDirs(d)
main()
| create_dir | identifier_name |
server_utils.go | //
// (C) Copyright 2021-2022 Intel Corporation.
//
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
package server
import (
"bytes"
"context"
"fmt"
"net"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"github.com/dustin/go-humanize"
"github.com/pkg/errors"
"google.golang.org/grpc"
"github.com/daos-stack/daos/src/control/common"
"github.com/daos-stack/daos/src/control/events"
"github.com/daos-stack/daos/src/control/lib/control"
"github.com/daos-stack/daos/src/control/lib/hardware"
"github.com/daos-stack/daos/src/control/lib/ranklist"
"github.com/daos-stack/daos/src/control/logging"
"github.com/daos-stack/daos/src/control/pbin"
"github.com/daos-stack/daos/src/control/security"
"github.com/daos-stack/daos/src/control/server/config"
"github.com/daos-stack/daos/src/control/server/engine"
"github.com/daos-stack/daos/src/control/server/storage"
"github.com/daos-stack/daos/src/control/system"
"github.com/daos-stack/daos/src/control/system/raft"
)
// netListenerFn is a type alias for the net.Listener function signature.
type netListenFn func(string, string) (net.Listener, error)
// ipLookupFn defines the function signature for a helper that can
// be used to resolve a host address to a list of IP addresses.
type ipLookupFn func(string) ([]net.IP, error)
// resolveFirstAddr is a helper function to resolve a hostname to a TCP address.
// If the hostname resolves to multiple addresses, the first one is returned.
func resolveFirstAddr(addr string, lookup ipLookupFn) (*net.TCPAddr, error) {
host, port, err := net.SplitHostPort(addr)
if err != nil {
return nil, errors.Wrapf(err, "unable to split %q", addr)
}
iPort, err := strconv.Atoi(port)
if err != nil {
return nil, errors.Wrapf(err, "unable to convert %q to int", port)
}
addrs, err := lookup(host)
if err != nil {
return nil, errors.Wrapf(err, "unable to resolve %q", host)
}
if len(addrs) == 0 {
return nil, errors.Errorf("no addresses found for %q", host)
}
isIPv4 := func(ip net.IP) bool {
return ip.To4() != nil
}
// Ensure stable ordering of addresses.
sort.Slice(addrs, func(i, j int) bool {
if !isIPv4(addrs[i]) && isIPv4(addrs[j]) {
return false
} else if isIPv4(addrs[i]) && !isIPv4(addrs[j]) {
return true
}
return bytes.Compare(addrs[i], addrs[j]) < 0
})
return &net.TCPAddr{IP: addrs[0], Port: iPort}, nil
}
const scanMinHugePageCount = 128
func getBdevCfgsFromSrvCfg(cfg *config.Server) storage.TierConfigs {
var bdevCfgs storage.TierConfigs
for _, engineCfg := range cfg.Engines {
bdevCfgs = append(bdevCfgs, engineCfg.Storage.Tiers.BdevConfigs()...)
}
return bdevCfgs
}
func cfgGetReplicas(cfg *config.Server, lookup ipLookupFn) ([]*net.TCPAddr, error) {
var dbReplicas []*net.TCPAddr
for _, ap := range cfg.AccessPoints {
apAddr, err := resolveFirstAddr(ap, lookup)
if err != nil {
return nil, config.FaultConfigBadAccessPoints
}
dbReplicas = append(dbReplicas, apAddr)
}
return dbReplicas, nil
}
func cfgGetRaftDir(cfg *config.Server) string {
if len(cfg.Engines) == 0 {
return "" // can't save to SCM
}
if len(cfg.Engines[0].Storage.Tiers.ScmConfigs()) == 0 {
return ""
}
return filepath.Join(cfg.Engines[0].Storage.Tiers.ScmConfigs()[0].Scm.MountPoint, "control_raft")
}
func writeCoreDumpFilter(log logging.Logger, path string, filter uint8) error {
f, err := os.OpenFile(path, os.O_WRONLY, 0644)
if err != nil {
// Work around a testing oddity that seems to be related to launching
// the server via SSH, with the result that the /proc file is unwritable.
if os.IsPermission(err) {
log.Debugf("Unable to write core dump filter to %s: %s", path, err)
return nil
}
return errors.Wrapf(err, "unable to open core dump filter file %s", path)
}
defer f.Close()
_, err = f.WriteString(fmt.Sprintf("0x%x\n", filter))
return err
}
type replicaAddrGetter interface {
ReplicaAddr() (*net.TCPAddr, error)
}
type ctlAddrParams struct {
port int
replicaAddrSrc replicaAddrGetter
lookupHost ipLookupFn
}
func getControlAddr(params ctlAddrParams) (*net.TCPAddr, error) {
ipStr := "0.0.0.0"
if repAddr, err := params.replicaAddrSrc.ReplicaAddr(); err == nil {
ipStr = repAddr.IP.String()
}
ctlAddr, err := resolveFirstAddr(fmt.Sprintf("[%s]:%d", ipStr, params.port), params.lookupHost)
if err != nil {
return nil, errors.Wrap(err, "resolving control address")
}
return ctlAddr, nil
}
func createListener(ctlAddr *net.TCPAddr, listen netListenFn) (net.Listener, error) {
// Create and start listener on management network.
lis, err := listen("tcp4", fmt.Sprintf("0.0.0.0:%d", ctlAddr.Port))
if err != nil {
return nil, errors.Wrap(err, "unable to listen on management interface")
}
return lis, nil
}
// updateFabricEnvars adjusts the engine fabric configuration.
func updateFabricEnvars(log logging.Logger, cfg *engine.Config, fis *hardware.FabricInterfaceSet) error {
// In the case of some providers, mercury uses the interface name
// such as ib0, while OFI uses the device name such as hfi1_0 CaRT and
// Mercury will now support the new OFI_DOMAIN environment variable so
// that we can specify the correct device for each.
if !cfg.HasEnvVar("OFI_DOMAIN") {
fi, err := fis.GetInterfaceOnNetDevice(cfg.Fabric.Interface, cfg.Fabric.Provider)
if err != nil {
return errors.Wrapf(err, "unable to determine device domain for %s", cfg.Fabric.Interface)
}
log.Debugf("setting OFI_DOMAIN=%s for %s", fi.Name, cfg.Fabric.Interface)
envVar := "OFI_DOMAIN=" + fi.Name
cfg.WithEnvVars(envVar)
}
return nil
}
func getFabricNetDevClass(cfg *config.Server, fis *hardware.FabricInterfaceSet) (hardware.NetDevClass, error) {
var netDevClass hardware.NetDevClass
for index, engine := range cfg.Engines {
fi, err := fis.GetInterfaceOnNetDevice(engine.Fabric.Interface, engine.Fabric.Provider)
if err != nil {
return 0, err
}
ndc := fi.DeviceClass
if index == 0 {
netDevClass = ndc
continue
}
if ndc != netDevClass {
return 0, config.FaultConfigInvalidNetDevClass(index, netDevClass,
ndc, engine.Fabric.Interface)
}
}
return netDevClass, nil
}
// Detect the number of engine configs assigned to each NUMA node and return error if engines are
// distributed unevenly across NUMA nodes. Otherwise return sorted list of NUMA nodes in use.
// Configurations where all engines are on a single NUMA node will be allowed.
func getEngineNUMANodes(log logging.Logger, engineCfgs []*engine.Config) ([]string, error) {
nodeMap := make(map[int]int)
for _, ec := range engineCfgs {
nodeMap[int(ec.Storage.NumaNodeIndex)] += 1
}
var lastCount int
nodes := make([]string, 0, len(engineCfgs))
for k, v := range nodeMap {
if lastCount != 0 && v != lastCount {
return nil, FaultEngineNUMAImbalance(nodeMap)
}
lastCount = v
nodes = append(nodes, fmt.Sprintf("%d", k))
}
sort.Strings(nodes)
return nodes, nil
}
// Prepare bdev storage. Assumes validation has already been performed on server config. Hugepages
// are required for both emulated (AIO devices) and real NVMe bdevs. VFIO and IOMMU are not
// required for emulated NVMe.
func prepBdevStorage(srv *server, iommuEnabled bool) error {
defer srv.logDuration(track("time to prepare bdev storage"))
if srv.cfg.DisableHugepages {
srv.log.Debugf("skip nvme prepare as disable_hugepages: true in config")
return nil
}
bdevCfgs := getBdevCfgsFromSrvCfg(srv.cfg)
// Perform these checks only if non-emulated NVMe is used and user is unprivileged.
if bdevCfgs.HaveRealNVMe() && srv.runningUser.Username != "root" {
if srv.cfg.DisableVFIO {
return FaultVfioDisabled
}
if !iommuEnabled {
return FaultIommuDisabled
}
}
// When requesting to prepare NVMe drives during service start-up, use all addresses
// specified in engine config BdevList parameters as the PCIAllowList and the server
// config BdevExclude parameter as the PCIBlockList.
prepReq := storage.BdevPrepareRequest{
TargetUser: srv.runningUser.Username,
PCIAllowList: strings.Join(bdevCfgs.NVMeBdevs().Devices(), storage.BdevPciAddrSep),
PCIBlockList: strings.Join(srv.cfg.BdevExclude, storage.BdevPciAddrSep),
DisableVFIO: srv.cfg.DisableVFIO,
}
enableVMD := true
if srv.cfg.DisableVMD != nil && *srv.cfg.DisableVMD {
enableVMD = false
}
switch {
case enableVMD && srv.cfg.DisableVFIO:
srv.log.Info("VMD not enabled because VFIO disabled in config")
case enableVMD && !iommuEnabled:
srv.log.Info("VMD not enabled because IOMMU disabled on platform")
case enableVMD && bdevCfgs.HaveEmulatedNVMe():
srv.log.Info("VMD not enabled because emulated NVMe devices found in config")
default:
// If no case above matches, set enable VMD flag in request otherwise leave false.
prepReq.EnableVMD = enableVMD
}
if bdevCfgs.HaveBdevs() {
// The NrHugepages config value is a total for all engines. Distribute allocation
// of hugepages across each engine's numa node (as validation ensures that
// TargetsCount is equal for each engine). Assumes an equal number of engine's per
// numa node.
numaNodes, err := getEngineNUMANodes(srv.log, srv.cfg.Engines)
if err != nil {
return err
}
if len(numaNodes) == 0 {
return errors.New("invalid number of numa nodes detected (0)")
}
// Request a few more hugepages than actually required for each NUMA node
// allocation as some overhead may result in one or two being unavailable.
prepReq.HugePageCount = srv.cfg.NrHugepages / len(numaNodes)
prepReq.HugePageCount += common.ExtraHugePages
prepReq.HugeNodes = strings.Join(numaNodes, ",")
srv.log.Debugf("allocating %d hugepages on each of these numa nodes: %v",
prepReq.HugePageCount, numaNodes)
} else {
if srv.cfg.NrHugepages == 0 {
// If nr_hugepages is unset then set minimum needed for scanning in prepare
// request.
prepReq.HugePageCount = scanMinHugePageCount
} else {
// If nr_hugepages has been set manually but no bdevs in config then
// allocate on numa node 0 (for example if a bigger number of hugepages are
// required in discovery mode for an unusually large number of SSDs).
prepReq.HugePageCount = srv.cfg.NrHugepages
}
srv.log.Debugf("allocating %d hugepages on numa node 0", prepReq.HugePageCount)
}
// Run prepare to bind devices to user-space driver and allocate hugepages.
//
// TODO: should be passing root context into prepare request to
// facilitate cancellation.
if _, err := srv.ctlSvc.NvmePrepare(prepReq); err != nil {
srv.log.Errorf("automatic NVMe prepare failed: %s", err)
}
return nil
}
// scanBdevStorage performs discovery and validates existence of configured NVMe SSDs.
func scanBdevStorage(srv *server) (*storage.BdevScanResponse, error) {
defer srv.logDuration(track("time to scan bdev storage"))
if srv.cfg.DisableHugepages {
srv.log.Debugf("skip nvme scan as hugepages have been disabled in config")
return &storage.BdevScanResponse{}, nil
}
nvmeScanResp, err := srv.ctlSvc.NvmeScan(storage.BdevScanRequest{
DeviceList: getBdevCfgsFromSrvCfg(srv.cfg).Bdevs(),
BypassCache: true, // init cache on first scan
})
if err != nil {
err = errors.Wrap(err, "NVMe Scan Failed")
srv.log.Errorf("%s", err)
return nil, err
}
return nvmeScanResp, nil
}
func setEngineBdevs(engine *EngineInstance, scanResp *storage.BdevScanResponse, lastEngineIdx, lastBdevCount *int) error {
badInput := ""
switch {
case engine == nil:
badInput = "engine"
case scanResp == nil:
badInput = "scanResp"
case lastEngineIdx == nil:
badInput = "lastEngineIdx"
case lastBdevCount == nil:
badInput = "lastBdevCount"
}
if badInput != "" {
return errors.New("nil input param: " + badInput)
}
if err := engine.storage.SetBdevCache(*scanResp); err != nil {
return errors.Wrap(err, "setting engine storage bdev cache")
}
// After engine's bdev cache has been set, the cache will only contain details of bdevs
// identified in the relevant engine config and device addresses will have been verified
// against NVMe scan results. As any VMD endpoint addresses will have been replaced with
// backing device addresses, device counts will reflect the number of physical (as opposed
// to logical) bdevs and engine bdev counts can be accurately compared.
eIdx := engine.Index()
bdevCache := engine.storage.GetBdevCache()
newNrBdevs := len(bdevCache.Controllers)
engine.log.Debugf("last: [index: %d, bdevCount: %d], current: [index: %d, bdevCount: %d]",
*lastEngineIdx, *lastBdevCount, eIdx, newNrBdevs)
// Update last recorded counters if this is the first update or if the number of bdevs is
// unchanged. If bdev count differs between engines, return fault.
switch {
case *lastEngineIdx < 0:
if *lastBdevCount >= 0 {
return errors.New("expecting both lastEngineIdx and lastBdevCount to be unset")
}
*lastEngineIdx = int(eIdx)
*lastBdevCount = newNrBdevs
case *lastBdevCount < 0:
return errors.New("expecting both lastEngineIdx and lastBdevCount to be set")
case newNrBdevs == *lastBdevCount:
*lastEngineIdx = int(eIdx)
default:
return config.FaultConfigBdevCountMismatch(int(eIdx), newNrBdevs, *lastEngineIdx, *lastBdevCount)
}
return nil
}
func setDaosHelperEnvs(cfg *config.Server, setenv func(k, v string) error) error {
if cfg.HelperLogFile != "" {
if err := setenv(pbin.DaosPrivHelperLogFileEnvVar, cfg.HelperLogFile); err != nil {
return errors.Wrap(err, "unable to configure privileged helper logging")
}
}
if cfg.FWHelperLogFile != "" {
if err := setenv(pbin.DaosFWLogFileEnvVar, cfg.FWHelperLogFile); err != nil {
return errors.Wrap(err, "unable to configure privileged firmware helper logging")
}
}
return nil
}
// Minimum recommended number of hugepages has already been calculated and set in config so verify
// we have enough free hugepage memory to satisfy this requirement before setting mem_size and
// hugepage_size parameters for engine.
func updateMemValues(srv *server, engine *EngineInstance, getMemInfo common.GetMemInfoFn) error {
engine.RLock()
ec := engine.runner.GetConfig()
ei := ec.Index
if ec.Storage.Tiers.Bdevs().Len() == 0 |
engine.RUnlock()
// Retrieve up-to-date hugepage info to check that we got the requested number of hugepages.
mi, err := getMemInfo()
if err != nil {
return err
}
// Calculate mem_size per I/O engine (in MB) from number of hugepages required per engine.
nrPagesRequired := srv.cfg.NrHugepages / len(srv.cfg.Engines)
pageSizeMiB := mi.HugePageSizeKb / humanize.KiByte // kib to mib
memSizeReqMiB := nrPagesRequired * pageSizeMiB
memSizeFreeMiB := mi.HugePagesFree * pageSizeMiB
// Fail if free hugepage mem is not enough to sustain average I/O workload (~1GB).
srv.log.Debugf("Per-engine MemSize:%dMB, HugepageSize:%dMB (meminfo: %+v)", memSizeReqMiB,
pageSizeMiB, *mi)
if memSizeFreeMiB < memSizeReqMiB {
return FaultInsufficientFreeHugePageMem(int(ei), memSizeReqMiB, memSizeFreeMiB,
nrPagesRequired, mi.HugePagesFree)
}
// Set engine mem_size and hugepage_size (MiB) values based on hugepage info.
engine.setMemSize(memSizeReqMiB)
engine.setHugePageSz(pageSizeMiB)
return nil
}
func cleanEngineHugePages(srv *server) error {
req := storage.BdevPrepareRequest{
CleanHugePagesOnly: true,
}
msg := "cleanup hugepages via bdev backend"
resp, err := srv.ctlSvc.NvmePrepare(req)
if err != nil {
return errors.Wrap(err, msg)
}
srv.log.Debugf("%s: %d removed", msg, resp.NrHugePagesRemoved)
return nil
}
func registerEngineEventCallbacks(srv *server, engine *EngineInstance, allStarted *sync.WaitGroup) {
// Register callback to publish engine process exit events.
engine.OnInstanceExit(createPublishInstanceExitFunc(srv.pubSub.Publish, srv.hostname))
// Register callback to publish engine format requested events.
engine.OnAwaitFormat(createPublishFormatRequiredFunc(srv.pubSub.Publish, srv.hostname))
var onceReady sync.Once
engine.OnReady(func(_ context.Context) error {
// Indicate that engine has been started, only do this the first time that the
// engine starts as shared memory persists between engine restarts.
onceReady.Do(func() {
allStarted.Done()
})
return nil
})
// Register callback to update engine cfg mem_size after format.
engine.OnStorageReady(func(_ context.Context) error {
srv.log.Debugf("engine %d: storage ready", engine.Index())
// Attempt to remove unused hugepages, log error only.
if err := cleanEngineHugePages(srv); err != nil {
srv.log.Errorf(err.Error())
}
// Update engine memory related config parameters before starting.
return errors.Wrap(updateMemValues(srv, engine, common.GetMemInfo),
"updating engine memory parameters")
})
}
func configureFirstEngine(ctx context.Context, engine *EngineInstance, sysdb *raft.Database, join systemJoinFn) {
if !sysdb.IsReplica() {
return
}
// Start the system db after instance 0's SCM is ready.
var onceStorageReady sync.Once
engine.OnStorageReady(func(_ context.Context) (err error) {
onceStorageReady.Do(func() {
// NB: We use the outer context rather than
// the closure context in order to avoid
// tying the db to the instance.
err = errors.Wrap(sysdb.Start(ctx),
"failed to start system db",
)
})
return
})
if !sysdb.IsBootstrap() {
return
}
// For historical reasons, we reserve rank 0 for the first
// instance on the raft bootstrap server. This implies that
// rank 0 will always be associated with a MS replica, but
// it is not guaranteed to always be the leader.
engine.joinSystem = func(ctx context.Context, req *control.SystemJoinReq) (*control.SystemJoinResp, error) {
if sb := engine.getSuperblock(); !sb.ValidRank {
engine.log.Debug("marking bootstrap instance as rank 0")
req.Rank = 0
sb.Rank = ranklist.NewRankPtr(0)
}
return join(ctx, req)
}
}
// registerTelemetryCallbacks sets telemetry related callbacks to
// be triggered when all engines have been started.
func registerTelemetryCallbacks(ctx context.Context, srv *server) {
telemPort := srv.cfg.TelemetryPort
if telemPort == 0 {
return
}
srv.OnEnginesStarted(func(ctxIn context.Context) error {
srv.log.Debug("starting Prometheus exporter")
cleanup, err := startPrometheusExporter(ctxIn, srv.log, telemPort, srv.harness.Instances())
if err != nil {
return err
}
srv.OnShutdown(cleanup)
return nil
})
}
// registerFollowerSubscriptions stops handling received forwarded (in addition
// to local) events and starts forwarding events to the new MS leader.
// Log events on the host that they were raised (and first published) on.
// This is the initial behavior before leadership has been determined.
func registerFollowerSubscriptions(srv *server) {
srv.pubSub.Reset()
srv.pubSub.Subscribe(events.RASTypeAny, srv.evtLogger)
srv.pubSub.Subscribe(events.RASTypeStateChange, srv.evtForwarder)
}
// registerLeaderSubscriptions stops forwarding events to MS and instead starts
// handling received forwarded (and local) events.
func registerLeaderSubscriptions(srv *server) {
srv.pubSub.Reset()
srv.pubSub.Subscribe(events.RASTypeAny, srv.evtLogger)
srv.pubSub.Subscribe(events.RASTypeStateChange, srv.membership)
srv.pubSub.Subscribe(events.RASTypeStateChange, srv.sysdb)
srv.pubSub.Subscribe(events.RASTypeStateChange,
events.HandlerFunc(func(ctx context.Context, evt *events.RASEvent) {
switch evt.ID {
case events.RASSwimRankDead:
ts, err := evt.GetTimestamp()
if err != nil {
srv.log.Errorf("bad event timestamp %q: %s", evt.Timestamp, err)
return
}
srv.log.Debugf("%s marked rank %d:%x dead @ %s", evt.Hostname, evt.Rank, evt.Incarnation, ts)
// Mark the rank as unavailable for membership in
// new pools, etc. Do group update on success.
if err := srv.membership.MarkRankDead(ranklist.Rank(evt.Rank), evt.Incarnation); err != nil {
srv.log.Errorf("failed to mark rank %d:%x dead: %s", evt.Rank, evt.Incarnation, err)
if system.IsNotLeader(err) {
// If we've lost leadership while processing the event,
// attempt to re-forward it to the new leader.
evt = evt.WithForwarded(false).WithForwardable(true)
srv.log.Debugf("re-forwarding rank dead event for %d:%x", evt.Rank, evt.Incarnation)
srv.evtForwarder.OnEvent(ctx, evt)
}
return
}
srv.mgmtSvc.reqGroupUpdate(ctx, false)
}
}))
// Add a debounce to throttle multiple SWIM Rank Dead events for the same rank/incarnation.
srv.pubSub.Debounce(events.RASSwimRankDead, 0, func(ev *events.RASEvent) string {
return strconv.FormatUint(uint64(ev.Rank), 10) + ":" + strconv.FormatUint(ev.Incarnation, 10)
})
}
// getGrpcOpts generates a set of gRPC options for the server based on the supplied configuration.
func getGrpcOpts(log logging.Logger, cfgTransport *security.TransportConfig) ([]grpc.ServerOption, error) {
unaryInterceptors := []grpc.UnaryServerInterceptor{
unaryLoggingInterceptor(log), // must be first in order to properly log errors
unaryErrorInterceptor,
unaryStatusInterceptor,
unaryVersionInterceptor,
}
streamInterceptors := []grpc.StreamServerInterceptor{
streamErrorInterceptor,
}
tcOpt, err := security.ServerOptionForTransportConfig(cfgTransport)
if err != nil {
return nil, err
}
srvOpts := []grpc.ServerOption{tcOpt}
uintOpt, err := unaryInterceptorForTransportConfig(cfgTransport)
if err != nil {
return nil, err
}
if uintOpt != nil {
unaryInterceptors = append(unaryInterceptors, uintOpt)
}
sintOpt, err := streamInterceptorForTransportConfig(cfgTransport)
if err != nil {
return nil, err
}
if sintOpt != nil {
streamInterceptors = append(streamInterceptors, sintOpt)
}
return append(srvOpts, []grpc.ServerOption{
grpc.ChainUnaryInterceptor(unaryInterceptors...),
grpc.ChainStreamInterceptor(streamInterceptors...),
}...), nil
}
type netInterface interface {
Addrs() ([]net.Addr, error)
}
func getSrxSetting(cfg *config.Server) (int32, error) {
if len(cfg.Engines) == 0 {
return -1, nil
}
srxVarName := "FI_OFI_RXM_USE_SRX"
getSetting := func(ev string) (bool, int32, error) {
kv := strings.Split(ev, "=")
if len(kv) != 2 {
return false, -1, nil
}
if kv[0] != srxVarName {
return false, -1, nil
}
v, err := strconv.ParseInt(kv[1], 10, 32)
if err != nil {
return false, -1, err
}
return true, int32(v), nil
}
engineVals := make([]int32, len(cfg.Engines))
for idx, ec := range cfg.Engines {
engineVals[idx] = -1 // default to unset
for _, ev := range ec.EnvVars {
if match, engSrx, err := getSetting(ev); err != nil {
return -1, err
} else if match {
engineVals[idx] = engSrx
break
}
}
for _, pte := range ec.EnvPassThrough {
if pte == srxVarName {
return -1, errors.Errorf("%s may not be set as a pass-through env var", srxVarName)
}
}
}
cliSrx := engineVals[0]
for i := 1; i < len(engineVals); i++ {
if engineVals[i] != cliSrx {
return -1, errors.Errorf("%s setting must be the same for all engines", srxVarName)
}
}
// If the SRX config was not explicitly set via env vars, use the
// global config value.
if cliSrx == -1 {
cliSrx = int32(common.BoolAsInt(!cfg.Fabric.DisableSRX))
}
return cliSrx, nil
}
func checkFabricInterface(name string, lookup func(string) (netInterface, error)) error {
if name == "" {
return errors.New("no name provided")
}
if lookup == nil {
return errors.New("no lookup function provided")
}
netIF, err := lookup(name)
if err != nil {
return err
}
addrs, err := netIF.Addrs()
if err != nil {
return err
}
if len(addrs) == 0 {
return fmt.Errorf("no network addresses for interface %q", name)
}
return nil
}
| {
srv.log.Debugf("skipping mem check on engine %d, no bdevs", ei)
engine.RUnlock()
return nil
} | conditional_block |
server_utils.go | //
// (C) Copyright 2021-2022 Intel Corporation.
//
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
package server
import (
"bytes"
"context"
"fmt"
"net"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"github.com/dustin/go-humanize"
"github.com/pkg/errors"
"google.golang.org/grpc"
"github.com/daos-stack/daos/src/control/common"
"github.com/daos-stack/daos/src/control/events"
"github.com/daos-stack/daos/src/control/lib/control"
"github.com/daos-stack/daos/src/control/lib/hardware"
"github.com/daos-stack/daos/src/control/lib/ranklist"
"github.com/daos-stack/daos/src/control/logging"
"github.com/daos-stack/daos/src/control/pbin"
"github.com/daos-stack/daos/src/control/security"
"github.com/daos-stack/daos/src/control/server/config"
"github.com/daos-stack/daos/src/control/server/engine"
"github.com/daos-stack/daos/src/control/server/storage"
"github.com/daos-stack/daos/src/control/system"
"github.com/daos-stack/daos/src/control/system/raft"
)
// netListenerFn is a type alias for the net.Listener function signature.
type netListenFn func(string, string) (net.Listener, error)
// ipLookupFn defines the function signature for a helper that can
// be used to resolve a host address to a list of IP addresses.
type ipLookupFn func(string) ([]net.IP, error)
// resolveFirstAddr is a helper function to resolve a hostname to a TCP address.
// If the hostname resolves to multiple addresses, the first one is returned.
func resolveFirstAddr(addr string, lookup ipLookupFn) (*net.TCPAddr, error) |
const scanMinHugePageCount = 128
func getBdevCfgsFromSrvCfg(cfg *config.Server) storage.TierConfigs {
var bdevCfgs storage.TierConfigs
for _, engineCfg := range cfg.Engines {
bdevCfgs = append(bdevCfgs, engineCfg.Storage.Tiers.BdevConfigs()...)
}
return bdevCfgs
}
func cfgGetReplicas(cfg *config.Server, lookup ipLookupFn) ([]*net.TCPAddr, error) {
var dbReplicas []*net.TCPAddr
for _, ap := range cfg.AccessPoints {
apAddr, err := resolveFirstAddr(ap, lookup)
if err != nil {
return nil, config.FaultConfigBadAccessPoints
}
dbReplicas = append(dbReplicas, apAddr)
}
return dbReplicas, nil
}
func cfgGetRaftDir(cfg *config.Server) string {
if len(cfg.Engines) == 0 {
return "" // can't save to SCM
}
if len(cfg.Engines[0].Storage.Tiers.ScmConfigs()) == 0 {
return ""
}
return filepath.Join(cfg.Engines[0].Storage.Tiers.ScmConfigs()[0].Scm.MountPoint, "control_raft")
}
func writeCoreDumpFilter(log logging.Logger, path string, filter uint8) error {
f, err := os.OpenFile(path, os.O_WRONLY, 0644)
if err != nil {
// Work around a testing oddity that seems to be related to launching
// the server via SSH, with the result that the /proc file is unwritable.
if os.IsPermission(err) {
log.Debugf("Unable to write core dump filter to %s: %s", path, err)
return nil
}
return errors.Wrapf(err, "unable to open core dump filter file %s", path)
}
defer f.Close()
_, err = f.WriteString(fmt.Sprintf("0x%x\n", filter))
return err
}
type replicaAddrGetter interface {
ReplicaAddr() (*net.TCPAddr, error)
}
type ctlAddrParams struct {
port int
replicaAddrSrc replicaAddrGetter
lookupHost ipLookupFn
}
func getControlAddr(params ctlAddrParams) (*net.TCPAddr, error) {
ipStr := "0.0.0.0"
if repAddr, err := params.replicaAddrSrc.ReplicaAddr(); err == nil {
ipStr = repAddr.IP.String()
}
ctlAddr, err := resolveFirstAddr(fmt.Sprintf("[%s]:%d", ipStr, params.port), params.lookupHost)
if err != nil {
return nil, errors.Wrap(err, "resolving control address")
}
return ctlAddr, nil
}
func createListener(ctlAddr *net.TCPAddr, listen netListenFn) (net.Listener, error) {
// Create and start listener on management network.
lis, err := listen("tcp4", fmt.Sprintf("0.0.0.0:%d", ctlAddr.Port))
if err != nil {
return nil, errors.Wrap(err, "unable to listen on management interface")
}
return lis, nil
}
// updateFabricEnvars adjusts the engine fabric configuration.
func updateFabricEnvars(log logging.Logger, cfg *engine.Config, fis *hardware.FabricInterfaceSet) error {
// In the case of some providers, mercury uses the interface name
// such as ib0, while OFI uses the device name such as hfi1_0 CaRT and
// Mercury will now support the new OFI_DOMAIN environment variable so
// that we can specify the correct device for each.
if !cfg.HasEnvVar("OFI_DOMAIN") {
fi, err := fis.GetInterfaceOnNetDevice(cfg.Fabric.Interface, cfg.Fabric.Provider)
if err != nil {
return errors.Wrapf(err, "unable to determine device domain for %s", cfg.Fabric.Interface)
}
log.Debugf("setting OFI_DOMAIN=%s for %s", fi.Name, cfg.Fabric.Interface)
envVar := "OFI_DOMAIN=" + fi.Name
cfg.WithEnvVars(envVar)
}
return nil
}
func getFabricNetDevClass(cfg *config.Server, fis *hardware.FabricInterfaceSet) (hardware.NetDevClass, error) {
var netDevClass hardware.NetDevClass
for index, engine := range cfg.Engines {
fi, err := fis.GetInterfaceOnNetDevice(engine.Fabric.Interface, engine.Fabric.Provider)
if err != nil {
return 0, err
}
ndc := fi.DeviceClass
if index == 0 {
netDevClass = ndc
continue
}
if ndc != netDevClass {
return 0, config.FaultConfigInvalidNetDevClass(index, netDevClass,
ndc, engine.Fabric.Interface)
}
}
return netDevClass, nil
}
// Detect the number of engine configs assigned to each NUMA node and return error if engines are
// distributed unevenly across NUMA nodes. Otherwise return sorted list of NUMA nodes in use.
// Configurations where all engines are on a single NUMA node will be allowed.
func getEngineNUMANodes(log logging.Logger, engineCfgs []*engine.Config) ([]string, error) {
nodeMap := make(map[int]int)
for _, ec := range engineCfgs {
nodeMap[int(ec.Storage.NumaNodeIndex)] += 1
}
var lastCount int
nodes := make([]string, 0, len(engineCfgs))
for k, v := range nodeMap {
if lastCount != 0 && v != lastCount {
return nil, FaultEngineNUMAImbalance(nodeMap)
}
lastCount = v
nodes = append(nodes, fmt.Sprintf("%d", k))
}
sort.Strings(nodes)
return nodes, nil
}
// Prepare bdev storage. Assumes validation has already been performed on server config. Hugepages
// are required for both emulated (AIO devices) and real NVMe bdevs. VFIO and IOMMU are not
// required for emulated NVMe.
func prepBdevStorage(srv *server, iommuEnabled bool) error {
defer srv.logDuration(track("time to prepare bdev storage"))
if srv.cfg.DisableHugepages {
srv.log.Debugf("skip nvme prepare as disable_hugepages: true in config")
return nil
}
bdevCfgs := getBdevCfgsFromSrvCfg(srv.cfg)
// Perform these checks only if non-emulated NVMe is used and user is unprivileged.
if bdevCfgs.HaveRealNVMe() && srv.runningUser.Username != "root" {
if srv.cfg.DisableVFIO {
return FaultVfioDisabled
}
if !iommuEnabled {
return FaultIommuDisabled
}
}
// When requesting to prepare NVMe drives during service start-up, use all addresses
// specified in engine config BdevList parameters as the PCIAllowList and the server
// config BdevExclude parameter as the PCIBlockList.
prepReq := storage.BdevPrepareRequest{
TargetUser: srv.runningUser.Username,
PCIAllowList: strings.Join(bdevCfgs.NVMeBdevs().Devices(), storage.BdevPciAddrSep),
PCIBlockList: strings.Join(srv.cfg.BdevExclude, storage.BdevPciAddrSep),
DisableVFIO: srv.cfg.DisableVFIO,
}
enableVMD := true
if srv.cfg.DisableVMD != nil && *srv.cfg.DisableVMD {
enableVMD = false
}
switch {
case enableVMD && srv.cfg.DisableVFIO:
srv.log.Info("VMD not enabled because VFIO disabled in config")
case enableVMD && !iommuEnabled:
srv.log.Info("VMD not enabled because IOMMU disabled on platform")
case enableVMD && bdevCfgs.HaveEmulatedNVMe():
srv.log.Info("VMD not enabled because emulated NVMe devices found in config")
default:
// If no case above matches, set enable VMD flag in request otherwise leave false.
prepReq.EnableVMD = enableVMD
}
if bdevCfgs.HaveBdevs() {
// The NrHugepages config value is a total for all engines. Distribute allocation
// of hugepages across each engine's numa node (as validation ensures that
// TargetsCount is equal for each engine). Assumes an equal number of engine's per
// numa node.
numaNodes, err := getEngineNUMANodes(srv.log, srv.cfg.Engines)
if err != nil {
return err
}
if len(numaNodes) == 0 {
return errors.New("invalid number of numa nodes detected (0)")
}
// Request a few more hugepages than actually required for each NUMA node
// allocation as some overhead may result in one or two being unavailable.
prepReq.HugePageCount = srv.cfg.NrHugepages / len(numaNodes)
prepReq.HugePageCount += common.ExtraHugePages
prepReq.HugeNodes = strings.Join(numaNodes, ",")
srv.log.Debugf("allocating %d hugepages on each of these numa nodes: %v",
prepReq.HugePageCount, numaNodes)
} else {
if srv.cfg.NrHugepages == 0 {
// If nr_hugepages is unset then set minimum needed for scanning in prepare
// request.
prepReq.HugePageCount = scanMinHugePageCount
} else {
// If nr_hugepages has been set manually but no bdevs in config then
// allocate on numa node 0 (for example if a bigger number of hugepages are
// required in discovery mode for an unusually large number of SSDs).
prepReq.HugePageCount = srv.cfg.NrHugepages
}
srv.log.Debugf("allocating %d hugepages on numa node 0", prepReq.HugePageCount)
}
// Run prepare to bind devices to user-space driver and allocate hugepages.
//
// TODO: should be passing root context into prepare request to
// facilitate cancellation.
if _, err := srv.ctlSvc.NvmePrepare(prepReq); err != nil {
srv.log.Errorf("automatic NVMe prepare failed: %s", err)
}
return nil
}
// scanBdevStorage performs discovery and validates existence of configured NVMe SSDs.
func scanBdevStorage(srv *server) (*storage.BdevScanResponse, error) {
defer srv.logDuration(track("time to scan bdev storage"))
if srv.cfg.DisableHugepages {
srv.log.Debugf("skip nvme scan as hugepages have been disabled in config")
return &storage.BdevScanResponse{}, nil
}
nvmeScanResp, err := srv.ctlSvc.NvmeScan(storage.BdevScanRequest{
DeviceList: getBdevCfgsFromSrvCfg(srv.cfg).Bdevs(),
BypassCache: true, // init cache on first scan
})
if err != nil {
err = errors.Wrap(err, "NVMe Scan Failed")
srv.log.Errorf("%s", err)
return nil, err
}
return nvmeScanResp, nil
}
func setEngineBdevs(engine *EngineInstance, scanResp *storage.BdevScanResponse, lastEngineIdx, lastBdevCount *int) error {
badInput := ""
switch {
case engine == nil:
badInput = "engine"
case scanResp == nil:
badInput = "scanResp"
case lastEngineIdx == nil:
badInput = "lastEngineIdx"
case lastBdevCount == nil:
badInput = "lastBdevCount"
}
if badInput != "" {
return errors.New("nil input param: " + badInput)
}
if err := engine.storage.SetBdevCache(*scanResp); err != nil {
return errors.Wrap(err, "setting engine storage bdev cache")
}
// After engine's bdev cache has been set, the cache will only contain details of bdevs
// identified in the relevant engine config and device addresses will have been verified
// against NVMe scan results. As any VMD endpoint addresses will have been replaced with
// backing device addresses, device counts will reflect the number of physical (as opposed
// to logical) bdevs and engine bdev counts can be accurately compared.
eIdx := engine.Index()
bdevCache := engine.storage.GetBdevCache()
newNrBdevs := len(bdevCache.Controllers)
engine.log.Debugf("last: [index: %d, bdevCount: %d], current: [index: %d, bdevCount: %d]",
*lastEngineIdx, *lastBdevCount, eIdx, newNrBdevs)
// Update last recorded counters if this is the first update or if the number of bdevs is
// unchanged. If bdev count differs between engines, return fault.
switch {
case *lastEngineIdx < 0:
if *lastBdevCount >= 0 {
return errors.New("expecting both lastEngineIdx and lastBdevCount to be unset")
}
*lastEngineIdx = int(eIdx)
*lastBdevCount = newNrBdevs
case *lastBdevCount < 0:
return errors.New("expecting both lastEngineIdx and lastBdevCount to be set")
case newNrBdevs == *lastBdevCount:
*lastEngineIdx = int(eIdx)
default:
return config.FaultConfigBdevCountMismatch(int(eIdx), newNrBdevs, *lastEngineIdx, *lastBdevCount)
}
return nil
}
func setDaosHelperEnvs(cfg *config.Server, setenv func(k, v string) error) error {
if cfg.HelperLogFile != "" {
if err := setenv(pbin.DaosPrivHelperLogFileEnvVar, cfg.HelperLogFile); err != nil {
return errors.Wrap(err, "unable to configure privileged helper logging")
}
}
if cfg.FWHelperLogFile != "" {
if err := setenv(pbin.DaosFWLogFileEnvVar, cfg.FWHelperLogFile); err != nil {
return errors.Wrap(err, "unable to configure privileged firmware helper logging")
}
}
return nil
}
// Minimum recommended number of hugepages has already been calculated and set in config so verify
// we have enough free hugepage memory to satisfy this requirement before setting mem_size and
// hugepage_size parameters for engine.
func updateMemValues(srv *server, engine *EngineInstance, getMemInfo common.GetMemInfoFn) error {
engine.RLock()
ec := engine.runner.GetConfig()
ei := ec.Index
if ec.Storage.Tiers.Bdevs().Len() == 0 {
srv.log.Debugf("skipping mem check on engine %d, no bdevs", ei)
engine.RUnlock()
return nil
}
engine.RUnlock()
// Retrieve up-to-date hugepage info to check that we got the requested number of hugepages.
mi, err := getMemInfo()
if err != nil {
return err
}
// Calculate mem_size per I/O engine (in MB) from number of hugepages required per engine.
nrPagesRequired := srv.cfg.NrHugepages / len(srv.cfg.Engines)
pageSizeMiB := mi.HugePageSizeKb / humanize.KiByte // kib to mib
memSizeReqMiB := nrPagesRequired * pageSizeMiB
memSizeFreeMiB := mi.HugePagesFree * pageSizeMiB
// Fail if free hugepage mem is not enough to sustain average I/O workload (~1GB).
srv.log.Debugf("Per-engine MemSize:%dMB, HugepageSize:%dMB (meminfo: %+v)", memSizeReqMiB,
pageSizeMiB, *mi)
if memSizeFreeMiB < memSizeReqMiB {
return FaultInsufficientFreeHugePageMem(int(ei), memSizeReqMiB, memSizeFreeMiB,
nrPagesRequired, mi.HugePagesFree)
}
// Set engine mem_size and hugepage_size (MiB) values based on hugepage info.
engine.setMemSize(memSizeReqMiB)
engine.setHugePageSz(pageSizeMiB)
return nil
}
func cleanEngineHugePages(srv *server) error {
req := storage.BdevPrepareRequest{
CleanHugePagesOnly: true,
}
msg := "cleanup hugepages via bdev backend"
resp, err := srv.ctlSvc.NvmePrepare(req)
if err != nil {
return errors.Wrap(err, msg)
}
srv.log.Debugf("%s: %d removed", msg, resp.NrHugePagesRemoved)
return nil
}
func registerEngineEventCallbacks(srv *server, engine *EngineInstance, allStarted *sync.WaitGroup) {
// Register callback to publish engine process exit events.
engine.OnInstanceExit(createPublishInstanceExitFunc(srv.pubSub.Publish, srv.hostname))
// Register callback to publish engine format requested events.
engine.OnAwaitFormat(createPublishFormatRequiredFunc(srv.pubSub.Publish, srv.hostname))
var onceReady sync.Once
engine.OnReady(func(_ context.Context) error {
// Indicate that engine has been started, only do this the first time that the
// engine starts as shared memory persists between engine restarts.
onceReady.Do(func() {
allStarted.Done()
})
return nil
})
// Register callback to update engine cfg mem_size after format.
engine.OnStorageReady(func(_ context.Context) error {
srv.log.Debugf("engine %d: storage ready", engine.Index())
// Attempt to remove unused hugepages, log error only.
if err := cleanEngineHugePages(srv); err != nil {
srv.log.Errorf(err.Error())
}
// Update engine memory related config parameters before starting.
return errors.Wrap(updateMemValues(srv, engine, common.GetMemInfo),
"updating engine memory parameters")
})
}
func configureFirstEngine(ctx context.Context, engine *EngineInstance, sysdb *raft.Database, join systemJoinFn) {
if !sysdb.IsReplica() {
return
}
// Start the system db after instance 0's SCM is ready.
var onceStorageReady sync.Once
engine.OnStorageReady(func(_ context.Context) (err error) {
onceStorageReady.Do(func() {
// NB: We use the outer context rather than
// the closure context in order to avoid
// tying the db to the instance.
err = errors.Wrap(sysdb.Start(ctx),
"failed to start system db",
)
})
return
})
if !sysdb.IsBootstrap() {
return
}
// For historical reasons, we reserve rank 0 for the first
// instance on the raft bootstrap server. This implies that
// rank 0 will always be associated with a MS replica, but
// it is not guaranteed to always be the leader.
engine.joinSystem = func(ctx context.Context, req *control.SystemJoinReq) (*control.SystemJoinResp, error) {
if sb := engine.getSuperblock(); !sb.ValidRank {
engine.log.Debug("marking bootstrap instance as rank 0")
req.Rank = 0
sb.Rank = ranklist.NewRankPtr(0)
}
return join(ctx, req)
}
}
// registerTelemetryCallbacks sets telemetry related callbacks to
// be triggered when all engines have been started.
func registerTelemetryCallbacks(ctx context.Context, srv *server) {
telemPort := srv.cfg.TelemetryPort
if telemPort == 0 {
return
}
srv.OnEnginesStarted(func(ctxIn context.Context) error {
srv.log.Debug("starting Prometheus exporter")
cleanup, err := startPrometheusExporter(ctxIn, srv.log, telemPort, srv.harness.Instances())
if err != nil {
return err
}
srv.OnShutdown(cleanup)
return nil
})
}
// registerFollowerSubscriptions stops handling received forwarded (in addition
// to local) events and starts forwarding events to the new MS leader.
// Log events on the host that they were raised (and first published) on.
// This is the initial behavior before leadership has been determined.
func registerFollowerSubscriptions(srv *server) {
srv.pubSub.Reset()
srv.pubSub.Subscribe(events.RASTypeAny, srv.evtLogger)
srv.pubSub.Subscribe(events.RASTypeStateChange, srv.evtForwarder)
}
// registerLeaderSubscriptions stops forwarding events to MS and instead starts
// handling received forwarded (and local) events.
func registerLeaderSubscriptions(srv *server) {
srv.pubSub.Reset()
srv.pubSub.Subscribe(events.RASTypeAny, srv.evtLogger)
srv.pubSub.Subscribe(events.RASTypeStateChange, srv.membership)
srv.pubSub.Subscribe(events.RASTypeStateChange, srv.sysdb)
srv.pubSub.Subscribe(events.RASTypeStateChange,
events.HandlerFunc(func(ctx context.Context, evt *events.RASEvent) {
switch evt.ID {
case events.RASSwimRankDead:
ts, err := evt.GetTimestamp()
if err != nil {
srv.log.Errorf("bad event timestamp %q: %s", evt.Timestamp, err)
return
}
srv.log.Debugf("%s marked rank %d:%x dead @ %s", evt.Hostname, evt.Rank, evt.Incarnation, ts)
// Mark the rank as unavailable for membership in
// new pools, etc. Do group update on success.
if err := srv.membership.MarkRankDead(ranklist.Rank(evt.Rank), evt.Incarnation); err != nil {
srv.log.Errorf("failed to mark rank %d:%x dead: %s", evt.Rank, evt.Incarnation, err)
if system.IsNotLeader(err) {
// If we've lost leadership while processing the event,
// attempt to re-forward it to the new leader.
evt = evt.WithForwarded(false).WithForwardable(true)
srv.log.Debugf("re-forwarding rank dead event for %d:%x", evt.Rank, evt.Incarnation)
srv.evtForwarder.OnEvent(ctx, evt)
}
return
}
srv.mgmtSvc.reqGroupUpdate(ctx, false)
}
}))
// Add a debounce to throttle multiple SWIM Rank Dead events for the same rank/incarnation.
srv.pubSub.Debounce(events.RASSwimRankDead, 0, func(ev *events.RASEvent) string {
return strconv.FormatUint(uint64(ev.Rank), 10) + ":" + strconv.FormatUint(ev.Incarnation, 10)
})
}
// getGrpcOpts generates a set of gRPC options for the server based on the supplied configuration.
func getGrpcOpts(log logging.Logger, cfgTransport *security.TransportConfig) ([]grpc.ServerOption, error) {
unaryInterceptors := []grpc.UnaryServerInterceptor{
unaryLoggingInterceptor(log), // must be first in order to properly log errors
unaryErrorInterceptor,
unaryStatusInterceptor,
unaryVersionInterceptor,
}
streamInterceptors := []grpc.StreamServerInterceptor{
streamErrorInterceptor,
}
tcOpt, err := security.ServerOptionForTransportConfig(cfgTransport)
if err != nil {
return nil, err
}
srvOpts := []grpc.ServerOption{tcOpt}
uintOpt, err := unaryInterceptorForTransportConfig(cfgTransport)
if err != nil {
return nil, err
}
if uintOpt != nil {
unaryInterceptors = append(unaryInterceptors, uintOpt)
}
sintOpt, err := streamInterceptorForTransportConfig(cfgTransport)
if err != nil {
return nil, err
}
if sintOpt != nil {
streamInterceptors = append(streamInterceptors, sintOpt)
}
return append(srvOpts, []grpc.ServerOption{
grpc.ChainUnaryInterceptor(unaryInterceptors...),
grpc.ChainStreamInterceptor(streamInterceptors...),
}...), nil
}
type netInterface interface {
Addrs() ([]net.Addr, error)
}
func getSrxSetting(cfg *config.Server) (int32, error) {
if len(cfg.Engines) == 0 {
return -1, nil
}
srxVarName := "FI_OFI_RXM_USE_SRX"
getSetting := func(ev string) (bool, int32, error) {
kv := strings.Split(ev, "=")
if len(kv) != 2 {
return false, -1, nil
}
if kv[0] != srxVarName {
return false, -1, nil
}
v, err := strconv.ParseInt(kv[1], 10, 32)
if err != nil {
return false, -1, err
}
return true, int32(v), nil
}
engineVals := make([]int32, len(cfg.Engines))
for idx, ec := range cfg.Engines {
engineVals[idx] = -1 // default to unset
for _, ev := range ec.EnvVars {
if match, engSrx, err := getSetting(ev); err != nil {
return -1, err
} else if match {
engineVals[idx] = engSrx
break
}
}
for _, pte := range ec.EnvPassThrough {
if pte == srxVarName {
return -1, errors.Errorf("%s may not be set as a pass-through env var", srxVarName)
}
}
}
cliSrx := engineVals[0]
for i := 1; i < len(engineVals); i++ {
if engineVals[i] != cliSrx {
return -1, errors.Errorf("%s setting must be the same for all engines", srxVarName)
}
}
// If the SRX config was not explicitly set via env vars, use the
// global config value.
if cliSrx == -1 {
cliSrx = int32(common.BoolAsInt(!cfg.Fabric.DisableSRX))
}
return cliSrx, nil
}
func checkFabricInterface(name string, lookup func(string) (netInterface, error)) error {
if name == "" {
return errors.New("no name provided")
}
if lookup == nil {
return errors.New("no lookup function provided")
}
netIF, err := lookup(name)
if err != nil {
return err
}
addrs, err := netIF.Addrs()
if err != nil {
return err
}
if len(addrs) == 0 {
return fmt.Errorf("no network addresses for interface %q", name)
}
return nil
}
| {
host, port, err := net.SplitHostPort(addr)
if err != nil {
return nil, errors.Wrapf(err, "unable to split %q", addr)
}
iPort, err := strconv.Atoi(port)
if err != nil {
return nil, errors.Wrapf(err, "unable to convert %q to int", port)
}
addrs, err := lookup(host)
if err != nil {
return nil, errors.Wrapf(err, "unable to resolve %q", host)
}
if len(addrs) == 0 {
return nil, errors.Errorf("no addresses found for %q", host)
}
isIPv4 := func(ip net.IP) bool {
return ip.To4() != nil
}
// Ensure stable ordering of addresses.
sort.Slice(addrs, func(i, j int) bool {
if !isIPv4(addrs[i]) && isIPv4(addrs[j]) {
return false
} else if isIPv4(addrs[i]) && !isIPv4(addrs[j]) {
return true
}
return bytes.Compare(addrs[i], addrs[j]) < 0
})
return &net.TCPAddr{IP: addrs[0], Port: iPort}, nil
} | identifier_body |
server_utils.go | //
// (C) Copyright 2021-2022 Intel Corporation.
//
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
package server
import (
"bytes"
"context"
"fmt"
"net"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"github.com/dustin/go-humanize"
"github.com/pkg/errors"
"google.golang.org/grpc"
"github.com/daos-stack/daos/src/control/common"
"github.com/daos-stack/daos/src/control/events"
"github.com/daos-stack/daos/src/control/lib/control"
"github.com/daos-stack/daos/src/control/lib/hardware"
"github.com/daos-stack/daos/src/control/lib/ranklist"
"github.com/daos-stack/daos/src/control/logging"
"github.com/daos-stack/daos/src/control/pbin"
"github.com/daos-stack/daos/src/control/security"
"github.com/daos-stack/daos/src/control/server/config"
"github.com/daos-stack/daos/src/control/server/engine"
"github.com/daos-stack/daos/src/control/server/storage"
"github.com/daos-stack/daos/src/control/system"
"github.com/daos-stack/daos/src/control/system/raft"
)
// netListenerFn is a type alias for the net.Listener function signature.
type netListenFn func(string, string) (net.Listener, error)
// ipLookupFn defines the function signature for a helper that can
// be used to resolve a host address to a list of IP addresses.
type ipLookupFn func(string) ([]net.IP, error)
// resolveFirstAddr is a helper function to resolve a hostname to a TCP address.
// If the hostname resolves to multiple addresses, the first one is returned.
func resolveFirstAddr(addr string, lookup ipLookupFn) (*net.TCPAddr, error) {
host, port, err := net.SplitHostPort(addr)
if err != nil {
return nil, errors.Wrapf(err, "unable to split %q", addr)
}
iPort, err := strconv.Atoi(port)
if err != nil {
return nil, errors.Wrapf(err, "unable to convert %q to int", port)
}
addrs, err := lookup(host)
if err != nil {
return nil, errors.Wrapf(err, "unable to resolve %q", host)
}
if len(addrs) == 0 {
return nil, errors.Errorf("no addresses found for %q", host)
}
isIPv4 := func(ip net.IP) bool {
return ip.To4() != nil
}
// Ensure stable ordering of addresses.
sort.Slice(addrs, func(i, j int) bool {
if !isIPv4(addrs[i]) && isIPv4(addrs[j]) {
return false
} else if isIPv4(addrs[i]) && !isIPv4(addrs[j]) {
return true
}
return bytes.Compare(addrs[i], addrs[j]) < 0
})
return &net.TCPAddr{IP: addrs[0], Port: iPort}, nil
}
const scanMinHugePageCount = 128
func getBdevCfgsFromSrvCfg(cfg *config.Server) storage.TierConfigs {
var bdevCfgs storage.TierConfigs
for _, engineCfg := range cfg.Engines {
bdevCfgs = append(bdevCfgs, engineCfg.Storage.Tiers.BdevConfigs()...)
}
return bdevCfgs
}
func cfgGetReplicas(cfg *config.Server, lookup ipLookupFn) ([]*net.TCPAddr, error) {
var dbReplicas []*net.TCPAddr
for _, ap := range cfg.AccessPoints {
apAddr, err := resolveFirstAddr(ap, lookup)
if err != nil {
return nil, config.FaultConfigBadAccessPoints
}
dbReplicas = append(dbReplicas, apAddr)
}
return dbReplicas, nil
}
func cfgGetRaftDir(cfg *config.Server) string {
if len(cfg.Engines) == 0 {
return "" // can't save to SCM
}
if len(cfg.Engines[0].Storage.Tiers.ScmConfigs()) == 0 {
return ""
}
| f, err := os.OpenFile(path, os.O_WRONLY, 0644)
if err != nil {
// Work around a testing oddity that seems to be related to launching
// the server via SSH, with the result that the /proc file is unwritable.
if os.IsPermission(err) {
log.Debugf("Unable to write core dump filter to %s: %s", path, err)
return nil
}
return errors.Wrapf(err, "unable to open core dump filter file %s", path)
}
defer f.Close()
_, err = f.WriteString(fmt.Sprintf("0x%x\n", filter))
return err
}
type replicaAddrGetter interface {
ReplicaAddr() (*net.TCPAddr, error)
}
type ctlAddrParams struct {
port int
replicaAddrSrc replicaAddrGetter
lookupHost ipLookupFn
}
func getControlAddr(params ctlAddrParams) (*net.TCPAddr, error) {
ipStr := "0.0.0.0"
if repAddr, err := params.replicaAddrSrc.ReplicaAddr(); err == nil {
ipStr = repAddr.IP.String()
}
ctlAddr, err := resolveFirstAddr(fmt.Sprintf("[%s]:%d", ipStr, params.port), params.lookupHost)
if err != nil {
return nil, errors.Wrap(err, "resolving control address")
}
return ctlAddr, nil
}
func createListener(ctlAddr *net.TCPAddr, listen netListenFn) (net.Listener, error) {
// Create and start listener on management network.
lis, err := listen("tcp4", fmt.Sprintf("0.0.0.0:%d", ctlAddr.Port))
if err != nil {
return nil, errors.Wrap(err, "unable to listen on management interface")
}
return lis, nil
}
// updateFabricEnvars adjusts the engine fabric configuration.
func updateFabricEnvars(log logging.Logger, cfg *engine.Config, fis *hardware.FabricInterfaceSet) error {
// In the case of some providers, mercury uses the interface name
// such as ib0, while OFI uses the device name such as hfi1_0 CaRT and
// Mercury will now support the new OFI_DOMAIN environment variable so
// that we can specify the correct device for each.
if !cfg.HasEnvVar("OFI_DOMAIN") {
fi, err := fis.GetInterfaceOnNetDevice(cfg.Fabric.Interface, cfg.Fabric.Provider)
if err != nil {
return errors.Wrapf(err, "unable to determine device domain for %s", cfg.Fabric.Interface)
}
log.Debugf("setting OFI_DOMAIN=%s for %s", fi.Name, cfg.Fabric.Interface)
envVar := "OFI_DOMAIN=" + fi.Name
cfg.WithEnvVars(envVar)
}
return nil
}
func getFabricNetDevClass(cfg *config.Server, fis *hardware.FabricInterfaceSet) (hardware.NetDevClass, error) {
var netDevClass hardware.NetDevClass
for index, engine := range cfg.Engines {
fi, err := fis.GetInterfaceOnNetDevice(engine.Fabric.Interface, engine.Fabric.Provider)
if err != nil {
return 0, err
}
ndc := fi.DeviceClass
if index == 0 {
netDevClass = ndc
continue
}
if ndc != netDevClass {
return 0, config.FaultConfigInvalidNetDevClass(index, netDevClass,
ndc, engine.Fabric.Interface)
}
}
return netDevClass, nil
}
// Detect the number of engine configs assigned to each NUMA node and return error if engines are
// distributed unevenly across NUMA nodes. Otherwise return sorted list of NUMA nodes in use.
// Configurations where all engines are on a single NUMA node will be allowed.
func getEngineNUMANodes(log logging.Logger, engineCfgs []*engine.Config) ([]string, error) {
nodeMap := make(map[int]int)
for _, ec := range engineCfgs {
nodeMap[int(ec.Storage.NumaNodeIndex)] += 1
}
var lastCount int
nodes := make([]string, 0, len(engineCfgs))
for k, v := range nodeMap {
if lastCount != 0 && v != lastCount {
return nil, FaultEngineNUMAImbalance(nodeMap)
}
lastCount = v
nodes = append(nodes, fmt.Sprintf("%d", k))
}
sort.Strings(nodes)
return nodes, nil
}
// Prepare bdev storage. Assumes validation has already been performed on server config. Hugepages
// are required for both emulated (AIO devices) and real NVMe bdevs. VFIO and IOMMU are not
// required for emulated NVMe.
func prepBdevStorage(srv *server, iommuEnabled bool) error {
defer srv.logDuration(track("time to prepare bdev storage"))
if srv.cfg.DisableHugepages {
srv.log.Debugf("skip nvme prepare as disable_hugepages: true in config")
return nil
}
bdevCfgs := getBdevCfgsFromSrvCfg(srv.cfg)
// Perform these checks only if non-emulated NVMe is used and user is unprivileged.
if bdevCfgs.HaveRealNVMe() && srv.runningUser.Username != "root" {
if srv.cfg.DisableVFIO {
return FaultVfioDisabled
}
if !iommuEnabled {
return FaultIommuDisabled
}
}
// When requesting to prepare NVMe drives during service start-up, use all addresses
// specified in engine config BdevList parameters as the PCIAllowList and the server
// config BdevExclude parameter as the PCIBlockList.
prepReq := storage.BdevPrepareRequest{
TargetUser: srv.runningUser.Username,
PCIAllowList: strings.Join(bdevCfgs.NVMeBdevs().Devices(), storage.BdevPciAddrSep),
PCIBlockList: strings.Join(srv.cfg.BdevExclude, storage.BdevPciAddrSep),
DisableVFIO: srv.cfg.DisableVFIO,
}
enableVMD := true
if srv.cfg.DisableVMD != nil && *srv.cfg.DisableVMD {
enableVMD = false
}
switch {
case enableVMD && srv.cfg.DisableVFIO:
srv.log.Info("VMD not enabled because VFIO disabled in config")
case enableVMD && !iommuEnabled:
srv.log.Info("VMD not enabled because IOMMU disabled on platform")
case enableVMD && bdevCfgs.HaveEmulatedNVMe():
srv.log.Info("VMD not enabled because emulated NVMe devices found in config")
default:
// If no case above matches, set enable VMD flag in request otherwise leave false.
prepReq.EnableVMD = enableVMD
}
if bdevCfgs.HaveBdevs() {
// The NrHugepages config value is a total for all engines. Distribute allocation
// of hugepages across each engine's numa node (as validation ensures that
// TargetsCount is equal for each engine). Assumes an equal number of engine's per
// numa node.
numaNodes, err := getEngineNUMANodes(srv.log, srv.cfg.Engines)
if err != nil {
return err
}
if len(numaNodes) == 0 {
return errors.New("invalid number of numa nodes detected (0)")
}
// Request a few more hugepages than actually required for each NUMA node
// allocation as some overhead may result in one or two being unavailable.
prepReq.HugePageCount = srv.cfg.NrHugepages / len(numaNodes)
prepReq.HugePageCount += common.ExtraHugePages
prepReq.HugeNodes = strings.Join(numaNodes, ",")
srv.log.Debugf("allocating %d hugepages on each of these numa nodes: %v",
prepReq.HugePageCount, numaNodes)
} else {
if srv.cfg.NrHugepages == 0 {
// If nr_hugepages is unset then set minimum needed for scanning in prepare
// request.
prepReq.HugePageCount = scanMinHugePageCount
} else {
// If nr_hugepages has been set manually but no bdevs in config then
// allocate on numa node 0 (for example if a bigger number of hugepages are
// required in discovery mode for an unusually large number of SSDs).
prepReq.HugePageCount = srv.cfg.NrHugepages
}
srv.log.Debugf("allocating %d hugepages on numa node 0", prepReq.HugePageCount)
}
// Run prepare to bind devices to user-space driver and allocate hugepages.
//
// TODO: should be passing root context into prepare request to
// facilitate cancellation.
if _, err := srv.ctlSvc.NvmePrepare(prepReq); err != nil {
srv.log.Errorf("automatic NVMe prepare failed: %s", err)
}
return nil
}
// scanBdevStorage performs discovery and validates existence of configured NVMe SSDs.
func scanBdevStorage(srv *server) (*storage.BdevScanResponse, error) {
defer srv.logDuration(track("time to scan bdev storage"))
if srv.cfg.DisableHugepages {
srv.log.Debugf("skip nvme scan as hugepages have been disabled in config")
return &storage.BdevScanResponse{}, nil
}
nvmeScanResp, err := srv.ctlSvc.NvmeScan(storage.BdevScanRequest{
DeviceList: getBdevCfgsFromSrvCfg(srv.cfg).Bdevs(),
BypassCache: true, // init cache on first scan
})
if err != nil {
err = errors.Wrap(err, "NVMe Scan Failed")
srv.log.Errorf("%s", err)
return nil, err
}
return nvmeScanResp, nil
}
func setEngineBdevs(engine *EngineInstance, scanResp *storage.BdevScanResponse, lastEngineIdx, lastBdevCount *int) error {
badInput := ""
switch {
case engine == nil:
badInput = "engine"
case scanResp == nil:
badInput = "scanResp"
case lastEngineIdx == nil:
badInput = "lastEngineIdx"
case lastBdevCount == nil:
badInput = "lastBdevCount"
}
if badInput != "" {
return errors.New("nil input param: " + badInput)
}
if err := engine.storage.SetBdevCache(*scanResp); err != nil {
return errors.Wrap(err, "setting engine storage bdev cache")
}
// After engine's bdev cache has been set, the cache will only contain details of bdevs
// identified in the relevant engine config and device addresses will have been verified
// against NVMe scan results. As any VMD endpoint addresses will have been replaced with
// backing device addresses, device counts will reflect the number of physical (as opposed
// to logical) bdevs and engine bdev counts can be accurately compared.
eIdx := engine.Index()
bdevCache := engine.storage.GetBdevCache()
newNrBdevs := len(bdevCache.Controllers)
engine.log.Debugf("last: [index: %d, bdevCount: %d], current: [index: %d, bdevCount: %d]",
*lastEngineIdx, *lastBdevCount, eIdx, newNrBdevs)
// Update last recorded counters if this is the first update or if the number of bdevs is
// unchanged. If bdev count differs between engines, return fault.
switch {
case *lastEngineIdx < 0:
if *lastBdevCount >= 0 {
return errors.New("expecting both lastEngineIdx and lastBdevCount to be unset")
}
*lastEngineIdx = int(eIdx)
*lastBdevCount = newNrBdevs
case *lastBdevCount < 0:
return errors.New("expecting both lastEngineIdx and lastBdevCount to be set")
case newNrBdevs == *lastBdevCount:
*lastEngineIdx = int(eIdx)
default:
return config.FaultConfigBdevCountMismatch(int(eIdx), newNrBdevs, *lastEngineIdx, *lastBdevCount)
}
return nil
}
func setDaosHelperEnvs(cfg *config.Server, setenv func(k, v string) error) error {
if cfg.HelperLogFile != "" {
if err := setenv(pbin.DaosPrivHelperLogFileEnvVar, cfg.HelperLogFile); err != nil {
return errors.Wrap(err, "unable to configure privileged helper logging")
}
}
if cfg.FWHelperLogFile != "" {
if err := setenv(pbin.DaosFWLogFileEnvVar, cfg.FWHelperLogFile); err != nil {
return errors.Wrap(err, "unable to configure privileged firmware helper logging")
}
}
return nil
}
// Minimum recommended number of hugepages has already been calculated and set in config so verify
// we have enough free hugepage memory to satisfy this requirement before setting mem_size and
// hugepage_size parameters for engine.
func updateMemValues(srv *server, engine *EngineInstance, getMemInfo common.GetMemInfoFn) error {
engine.RLock()
ec := engine.runner.GetConfig()
ei := ec.Index
if ec.Storage.Tiers.Bdevs().Len() == 0 {
srv.log.Debugf("skipping mem check on engine %d, no bdevs", ei)
engine.RUnlock()
return nil
}
engine.RUnlock()
// Retrieve up-to-date hugepage info to check that we got the requested number of hugepages.
mi, err := getMemInfo()
if err != nil {
return err
}
// Calculate mem_size per I/O engine (in MB) from number of hugepages required per engine.
nrPagesRequired := srv.cfg.NrHugepages / len(srv.cfg.Engines)
pageSizeMiB := mi.HugePageSizeKb / humanize.KiByte // kib to mib
memSizeReqMiB := nrPagesRequired * pageSizeMiB
memSizeFreeMiB := mi.HugePagesFree * pageSizeMiB
// Fail if free hugepage mem is not enough to sustain average I/O workload (~1GB).
srv.log.Debugf("Per-engine MemSize:%dMB, HugepageSize:%dMB (meminfo: %+v)", memSizeReqMiB,
pageSizeMiB, *mi)
if memSizeFreeMiB < memSizeReqMiB {
return FaultInsufficientFreeHugePageMem(int(ei), memSizeReqMiB, memSizeFreeMiB,
nrPagesRequired, mi.HugePagesFree)
}
// Set engine mem_size and hugepage_size (MiB) values based on hugepage info.
engine.setMemSize(memSizeReqMiB)
engine.setHugePageSz(pageSizeMiB)
return nil
}
func cleanEngineHugePages(srv *server) error {
req := storage.BdevPrepareRequest{
CleanHugePagesOnly: true,
}
msg := "cleanup hugepages via bdev backend"
resp, err := srv.ctlSvc.NvmePrepare(req)
if err != nil {
return errors.Wrap(err, msg)
}
srv.log.Debugf("%s: %d removed", msg, resp.NrHugePagesRemoved)
return nil
}
func registerEngineEventCallbacks(srv *server, engine *EngineInstance, allStarted *sync.WaitGroup) {
// Register callback to publish engine process exit events.
engine.OnInstanceExit(createPublishInstanceExitFunc(srv.pubSub.Publish, srv.hostname))
// Register callback to publish engine format requested events.
engine.OnAwaitFormat(createPublishFormatRequiredFunc(srv.pubSub.Publish, srv.hostname))
var onceReady sync.Once
engine.OnReady(func(_ context.Context) error {
// Indicate that engine has been started, only do this the first time that the
// engine starts as shared memory persists between engine restarts.
onceReady.Do(func() {
allStarted.Done()
})
return nil
})
// Register callback to update engine cfg mem_size after format.
engine.OnStorageReady(func(_ context.Context) error {
srv.log.Debugf("engine %d: storage ready", engine.Index())
// Attempt to remove unused hugepages, log error only.
if err := cleanEngineHugePages(srv); err != nil {
srv.log.Errorf(err.Error())
}
// Update engine memory related config parameters before starting.
return errors.Wrap(updateMemValues(srv, engine, common.GetMemInfo),
"updating engine memory parameters")
})
}
func configureFirstEngine(ctx context.Context, engine *EngineInstance, sysdb *raft.Database, join systemJoinFn) {
if !sysdb.IsReplica() {
return
}
// Start the system db after instance 0's SCM is ready.
var onceStorageReady sync.Once
engine.OnStorageReady(func(_ context.Context) (err error) {
onceStorageReady.Do(func() {
// NB: We use the outer context rather than
// the closure context in order to avoid
// tying the db to the instance.
err = errors.Wrap(sysdb.Start(ctx),
"failed to start system db",
)
})
return
})
if !sysdb.IsBootstrap() {
return
}
// For historical reasons, we reserve rank 0 for the first
// instance on the raft bootstrap server. This implies that
// rank 0 will always be associated with a MS replica, but
// it is not guaranteed to always be the leader.
engine.joinSystem = func(ctx context.Context, req *control.SystemJoinReq) (*control.SystemJoinResp, error) {
if sb := engine.getSuperblock(); !sb.ValidRank {
engine.log.Debug("marking bootstrap instance as rank 0")
req.Rank = 0
sb.Rank = ranklist.NewRankPtr(0)
}
return join(ctx, req)
}
}
// registerTelemetryCallbacks sets telemetry related callbacks to
// be triggered when all engines have been started.
func registerTelemetryCallbacks(ctx context.Context, srv *server) {
telemPort := srv.cfg.TelemetryPort
if telemPort == 0 {
return
}
srv.OnEnginesStarted(func(ctxIn context.Context) error {
srv.log.Debug("starting Prometheus exporter")
cleanup, err := startPrometheusExporter(ctxIn, srv.log, telemPort, srv.harness.Instances())
if err != nil {
return err
}
srv.OnShutdown(cleanup)
return nil
})
}
// registerFollowerSubscriptions stops handling received forwarded (in addition
// to local) events and starts forwarding events to the new MS leader.
// Log events on the host that they were raised (and first published) on.
// This is the initial behavior before leadership has been determined.
func registerFollowerSubscriptions(srv *server) {
srv.pubSub.Reset()
srv.pubSub.Subscribe(events.RASTypeAny, srv.evtLogger)
srv.pubSub.Subscribe(events.RASTypeStateChange, srv.evtForwarder)
}
// registerLeaderSubscriptions stops forwarding events to MS and instead starts
// handling received forwarded (and local) events.
func registerLeaderSubscriptions(srv *server) {
srv.pubSub.Reset()
srv.pubSub.Subscribe(events.RASTypeAny, srv.evtLogger)
srv.pubSub.Subscribe(events.RASTypeStateChange, srv.membership)
srv.pubSub.Subscribe(events.RASTypeStateChange, srv.sysdb)
srv.pubSub.Subscribe(events.RASTypeStateChange,
events.HandlerFunc(func(ctx context.Context, evt *events.RASEvent) {
switch evt.ID {
case events.RASSwimRankDead:
ts, err := evt.GetTimestamp()
if err != nil {
srv.log.Errorf("bad event timestamp %q: %s", evt.Timestamp, err)
return
}
srv.log.Debugf("%s marked rank %d:%x dead @ %s", evt.Hostname, evt.Rank, evt.Incarnation, ts)
// Mark the rank as unavailable for membership in
// new pools, etc. Do group update on success.
if err := srv.membership.MarkRankDead(ranklist.Rank(evt.Rank), evt.Incarnation); err != nil {
srv.log.Errorf("failed to mark rank %d:%x dead: %s", evt.Rank, evt.Incarnation, err)
if system.IsNotLeader(err) {
// If we've lost leadership while processing the event,
// attempt to re-forward it to the new leader.
evt = evt.WithForwarded(false).WithForwardable(true)
srv.log.Debugf("re-forwarding rank dead event for %d:%x", evt.Rank, evt.Incarnation)
srv.evtForwarder.OnEvent(ctx, evt)
}
return
}
srv.mgmtSvc.reqGroupUpdate(ctx, false)
}
}))
// Add a debounce to throttle multiple SWIM Rank Dead events for the same rank/incarnation.
srv.pubSub.Debounce(events.RASSwimRankDead, 0, func(ev *events.RASEvent) string {
return strconv.FormatUint(uint64(ev.Rank), 10) + ":" + strconv.FormatUint(ev.Incarnation, 10)
})
}
// getGrpcOpts generates a set of gRPC options for the server based on the supplied configuration.
func getGrpcOpts(log logging.Logger, cfgTransport *security.TransportConfig) ([]grpc.ServerOption, error) {
unaryInterceptors := []grpc.UnaryServerInterceptor{
unaryLoggingInterceptor(log), // must be first in order to properly log errors
unaryErrorInterceptor,
unaryStatusInterceptor,
unaryVersionInterceptor,
}
streamInterceptors := []grpc.StreamServerInterceptor{
streamErrorInterceptor,
}
tcOpt, err := security.ServerOptionForTransportConfig(cfgTransport)
if err != nil {
return nil, err
}
srvOpts := []grpc.ServerOption{tcOpt}
uintOpt, err := unaryInterceptorForTransportConfig(cfgTransport)
if err != nil {
return nil, err
}
if uintOpt != nil {
unaryInterceptors = append(unaryInterceptors, uintOpt)
}
sintOpt, err := streamInterceptorForTransportConfig(cfgTransport)
if err != nil {
return nil, err
}
if sintOpt != nil {
streamInterceptors = append(streamInterceptors, sintOpt)
}
return append(srvOpts, []grpc.ServerOption{
grpc.ChainUnaryInterceptor(unaryInterceptors...),
grpc.ChainStreamInterceptor(streamInterceptors...),
}...), nil
}
type netInterface interface {
Addrs() ([]net.Addr, error)
}
func getSrxSetting(cfg *config.Server) (int32, error) {
if len(cfg.Engines) == 0 {
return -1, nil
}
srxVarName := "FI_OFI_RXM_USE_SRX"
getSetting := func(ev string) (bool, int32, error) {
kv := strings.Split(ev, "=")
if len(kv) != 2 {
return false, -1, nil
}
if kv[0] != srxVarName {
return false, -1, nil
}
v, err := strconv.ParseInt(kv[1], 10, 32)
if err != nil {
return false, -1, err
}
return true, int32(v), nil
}
engineVals := make([]int32, len(cfg.Engines))
for idx, ec := range cfg.Engines {
engineVals[idx] = -1 // default to unset
for _, ev := range ec.EnvVars {
if match, engSrx, err := getSetting(ev); err != nil {
return -1, err
} else if match {
engineVals[idx] = engSrx
break
}
}
for _, pte := range ec.EnvPassThrough {
if pte == srxVarName {
return -1, errors.Errorf("%s may not be set as a pass-through env var", srxVarName)
}
}
}
cliSrx := engineVals[0]
for i := 1; i < len(engineVals); i++ {
if engineVals[i] != cliSrx {
return -1, errors.Errorf("%s setting must be the same for all engines", srxVarName)
}
}
// If the SRX config was not explicitly set via env vars, use the
// global config value.
if cliSrx == -1 {
cliSrx = int32(common.BoolAsInt(!cfg.Fabric.DisableSRX))
}
return cliSrx, nil
}
func checkFabricInterface(name string, lookup func(string) (netInterface, error)) error {
if name == "" {
return errors.New("no name provided")
}
if lookup == nil {
return errors.New("no lookup function provided")
}
netIF, err := lookup(name)
if err != nil {
return err
}
addrs, err := netIF.Addrs()
if err != nil {
return err
}
if len(addrs) == 0 {
return fmt.Errorf("no network addresses for interface %q", name)
}
return nil
} | return filepath.Join(cfg.Engines[0].Storage.Tiers.ScmConfigs()[0].Scm.MountPoint, "control_raft")
}
func writeCoreDumpFilter(log logging.Logger, path string, filter uint8) error { | random_line_split |
server_utils.go | //
// (C) Copyright 2021-2022 Intel Corporation.
//
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
package server
import (
"bytes"
"context"
"fmt"
"net"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"github.com/dustin/go-humanize"
"github.com/pkg/errors"
"google.golang.org/grpc"
"github.com/daos-stack/daos/src/control/common"
"github.com/daos-stack/daos/src/control/events"
"github.com/daos-stack/daos/src/control/lib/control"
"github.com/daos-stack/daos/src/control/lib/hardware"
"github.com/daos-stack/daos/src/control/lib/ranklist"
"github.com/daos-stack/daos/src/control/logging"
"github.com/daos-stack/daos/src/control/pbin"
"github.com/daos-stack/daos/src/control/security"
"github.com/daos-stack/daos/src/control/server/config"
"github.com/daos-stack/daos/src/control/server/engine"
"github.com/daos-stack/daos/src/control/server/storage"
"github.com/daos-stack/daos/src/control/system"
"github.com/daos-stack/daos/src/control/system/raft"
)
// netListenerFn is a type alias for the net.Listener function signature.
type netListenFn func(string, string) (net.Listener, error)
// ipLookupFn defines the function signature for a helper that can
// be used to resolve a host address to a list of IP addresses.
type ipLookupFn func(string) ([]net.IP, error)
// resolveFirstAddr is a helper function to resolve a hostname to a TCP address.
// If the hostname resolves to multiple addresses, the first one is returned.
func resolveFirstAddr(addr string, lookup ipLookupFn) (*net.TCPAddr, error) {
host, port, err := net.SplitHostPort(addr)
if err != nil {
return nil, errors.Wrapf(err, "unable to split %q", addr)
}
iPort, err := strconv.Atoi(port)
if err != nil {
return nil, errors.Wrapf(err, "unable to convert %q to int", port)
}
addrs, err := lookup(host)
if err != nil {
return nil, errors.Wrapf(err, "unable to resolve %q", host)
}
if len(addrs) == 0 {
return nil, errors.Errorf("no addresses found for %q", host)
}
isIPv4 := func(ip net.IP) bool {
return ip.To4() != nil
}
// Ensure stable ordering of addresses.
sort.Slice(addrs, func(i, j int) bool {
if !isIPv4(addrs[i]) && isIPv4(addrs[j]) {
return false
} else if isIPv4(addrs[i]) && !isIPv4(addrs[j]) {
return true
}
return bytes.Compare(addrs[i], addrs[j]) < 0
})
return &net.TCPAddr{IP: addrs[0], Port: iPort}, nil
}
const scanMinHugePageCount = 128
func getBdevCfgsFromSrvCfg(cfg *config.Server) storage.TierConfigs {
var bdevCfgs storage.TierConfigs
for _, engineCfg := range cfg.Engines {
bdevCfgs = append(bdevCfgs, engineCfg.Storage.Tiers.BdevConfigs()...)
}
return bdevCfgs
}
func cfgGetReplicas(cfg *config.Server, lookup ipLookupFn) ([]*net.TCPAddr, error) {
var dbReplicas []*net.TCPAddr
for _, ap := range cfg.AccessPoints {
apAddr, err := resolveFirstAddr(ap, lookup)
if err != nil {
return nil, config.FaultConfigBadAccessPoints
}
dbReplicas = append(dbReplicas, apAddr)
}
return dbReplicas, nil
}
func cfgGetRaftDir(cfg *config.Server) string {
if len(cfg.Engines) == 0 {
return "" // can't save to SCM
}
if len(cfg.Engines[0].Storage.Tiers.ScmConfigs()) == 0 {
return ""
}
return filepath.Join(cfg.Engines[0].Storage.Tiers.ScmConfigs()[0].Scm.MountPoint, "control_raft")
}
func writeCoreDumpFilter(log logging.Logger, path string, filter uint8) error {
f, err := os.OpenFile(path, os.O_WRONLY, 0644)
if err != nil {
// Work around a testing oddity that seems to be related to launching
// the server via SSH, with the result that the /proc file is unwritable.
if os.IsPermission(err) {
log.Debugf("Unable to write core dump filter to %s: %s", path, err)
return nil
}
return errors.Wrapf(err, "unable to open core dump filter file %s", path)
}
defer f.Close()
_, err = f.WriteString(fmt.Sprintf("0x%x\n", filter))
return err
}
type replicaAddrGetter interface {
ReplicaAddr() (*net.TCPAddr, error)
}
type ctlAddrParams struct {
port int
replicaAddrSrc replicaAddrGetter
lookupHost ipLookupFn
}
func getControlAddr(params ctlAddrParams) (*net.TCPAddr, error) {
ipStr := "0.0.0.0"
if repAddr, err := params.replicaAddrSrc.ReplicaAddr(); err == nil {
ipStr = repAddr.IP.String()
}
ctlAddr, err := resolveFirstAddr(fmt.Sprintf("[%s]:%d", ipStr, params.port), params.lookupHost)
if err != nil {
return nil, errors.Wrap(err, "resolving control address")
}
return ctlAddr, nil
}
func createListener(ctlAddr *net.TCPAddr, listen netListenFn) (net.Listener, error) {
// Create and start listener on management network.
lis, err := listen("tcp4", fmt.Sprintf("0.0.0.0:%d", ctlAddr.Port))
if err != nil {
return nil, errors.Wrap(err, "unable to listen on management interface")
}
return lis, nil
}
// updateFabricEnvars adjusts the engine fabric configuration.
func updateFabricEnvars(log logging.Logger, cfg *engine.Config, fis *hardware.FabricInterfaceSet) error {
// In the case of some providers, mercury uses the interface name
// such as ib0, while OFI uses the device name such as hfi1_0 CaRT and
// Mercury will now support the new OFI_DOMAIN environment variable so
// that we can specify the correct device for each.
if !cfg.HasEnvVar("OFI_DOMAIN") {
fi, err := fis.GetInterfaceOnNetDevice(cfg.Fabric.Interface, cfg.Fabric.Provider)
if err != nil {
return errors.Wrapf(err, "unable to determine device domain for %s", cfg.Fabric.Interface)
}
log.Debugf("setting OFI_DOMAIN=%s for %s", fi.Name, cfg.Fabric.Interface)
envVar := "OFI_DOMAIN=" + fi.Name
cfg.WithEnvVars(envVar)
}
return nil
}
func getFabricNetDevClass(cfg *config.Server, fis *hardware.FabricInterfaceSet) (hardware.NetDevClass, error) {
var netDevClass hardware.NetDevClass
for index, engine := range cfg.Engines {
fi, err := fis.GetInterfaceOnNetDevice(engine.Fabric.Interface, engine.Fabric.Provider)
if err != nil {
return 0, err
}
ndc := fi.DeviceClass
if index == 0 {
netDevClass = ndc
continue
}
if ndc != netDevClass {
return 0, config.FaultConfigInvalidNetDevClass(index, netDevClass,
ndc, engine.Fabric.Interface)
}
}
return netDevClass, nil
}
// Detect the number of engine configs assigned to each NUMA node and return error if engines are
// distributed unevenly across NUMA nodes. Otherwise return sorted list of NUMA nodes in use.
// Configurations where all engines are on a single NUMA node will be allowed.
func getEngineNUMANodes(log logging.Logger, engineCfgs []*engine.Config) ([]string, error) {
nodeMap := make(map[int]int)
for _, ec := range engineCfgs {
nodeMap[int(ec.Storage.NumaNodeIndex)] += 1
}
var lastCount int
nodes := make([]string, 0, len(engineCfgs))
for k, v := range nodeMap {
if lastCount != 0 && v != lastCount {
return nil, FaultEngineNUMAImbalance(nodeMap)
}
lastCount = v
nodes = append(nodes, fmt.Sprintf("%d", k))
}
sort.Strings(nodes)
return nodes, nil
}
// Prepare bdev storage. Assumes validation has already been performed on server config. Hugepages
// are required for both emulated (AIO devices) and real NVMe bdevs. VFIO and IOMMU are not
// required for emulated NVMe.
func prepBdevStorage(srv *server, iommuEnabled bool) error {
defer srv.logDuration(track("time to prepare bdev storage"))
if srv.cfg.DisableHugepages {
srv.log.Debugf("skip nvme prepare as disable_hugepages: true in config")
return nil
}
bdevCfgs := getBdevCfgsFromSrvCfg(srv.cfg)
// Perform these checks only if non-emulated NVMe is used and user is unprivileged.
if bdevCfgs.HaveRealNVMe() && srv.runningUser.Username != "root" {
if srv.cfg.DisableVFIO {
return FaultVfioDisabled
}
if !iommuEnabled {
return FaultIommuDisabled
}
}
// When requesting to prepare NVMe drives during service start-up, use all addresses
// specified in engine config BdevList parameters as the PCIAllowList and the server
// config BdevExclude parameter as the PCIBlockList.
prepReq := storage.BdevPrepareRequest{
TargetUser: srv.runningUser.Username,
PCIAllowList: strings.Join(bdevCfgs.NVMeBdevs().Devices(), storage.BdevPciAddrSep),
PCIBlockList: strings.Join(srv.cfg.BdevExclude, storage.BdevPciAddrSep),
DisableVFIO: srv.cfg.DisableVFIO,
}
enableVMD := true
if srv.cfg.DisableVMD != nil && *srv.cfg.DisableVMD {
enableVMD = false
}
switch {
case enableVMD && srv.cfg.DisableVFIO:
srv.log.Info("VMD not enabled because VFIO disabled in config")
case enableVMD && !iommuEnabled:
srv.log.Info("VMD not enabled because IOMMU disabled on platform")
case enableVMD && bdevCfgs.HaveEmulatedNVMe():
srv.log.Info("VMD not enabled because emulated NVMe devices found in config")
default:
// If no case above matches, set enable VMD flag in request otherwise leave false.
prepReq.EnableVMD = enableVMD
}
if bdevCfgs.HaveBdevs() {
// The NrHugepages config value is a total for all engines. Distribute allocation
// of hugepages across each engine's numa node (as validation ensures that
// TargetsCount is equal for each engine). Assumes an equal number of engine's per
// numa node.
numaNodes, err := getEngineNUMANodes(srv.log, srv.cfg.Engines)
if err != nil {
return err
}
if len(numaNodes) == 0 {
return errors.New("invalid number of numa nodes detected (0)")
}
// Request a few more hugepages than actually required for each NUMA node
// allocation as some overhead may result in one or two being unavailable.
prepReq.HugePageCount = srv.cfg.NrHugepages / len(numaNodes)
prepReq.HugePageCount += common.ExtraHugePages
prepReq.HugeNodes = strings.Join(numaNodes, ",")
srv.log.Debugf("allocating %d hugepages on each of these numa nodes: %v",
prepReq.HugePageCount, numaNodes)
} else {
if srv.cfg.NrHugepages == 0 {
// If nr_hugepages is unset then set minimum needed for scanning in prepare
// request.
prepReq.HugePageCount = scanMinHugePageCount
} else {
// If nr_hugepages has been set manually but no bdevs in config then
// allocate on numa node 0 (for example if a bigger number of hugepages are
// required in discovery mode for an unusually large number of SSDs).
prepReq.HugePageCount = srv.cfg.NrHugepages
}
srv.log.Debugf("allocating %d hugepages on numa node 0", prepReq.HugePageCount)
}
// Run prepare to bind devices to user-space driver and allocate hugepages.
//
// TODO: should be passing root context into prepare request to
// facilitate cancellation.
if _, err := srv.ctlSvc.NvmePrepare(prepReq); err != nil {
srv.log.Errorf("automatic NVMe prepare failed: %s", err)
}
return nil
}
// scanBdevStorage performs discovery and validates existence of configured NVMe SSDs.
func scanBdevStorage(srv *server) (*storage.BdevScanResponse, error) {
defer srv.logDuration(track("time to scan bdev storage"))
if srv.cfg.DisableHugepages {
srv.log.Debugf("skip nvme scan as hugepages have been disabled in config")
return &storage.BdevScanResponse{}, nil
}
nvmeScanResp, err := srv.ctlSvc.NvmeScan(storage.BdevScanRequest{
DeviceList: getBdevCfgsFromSrvCfg(srv.cfg).Bdevs(),
BypassCache: true, // init cache on first scan
})
if err != nil {
err = errors.Wrap(err, "NVMe Scan Failed")
srv.log.Errorf("%s", err)
return nil, err
}
return nvmeScanResp, nil
}
func setEngineBdevs(engine *EngineInstance, scanResp *storage.BdevScanResponse, lastEngineIdx, lastBdevCount *int) error {
badInput := ""
switch {
case engine == nil:
badInput = "engine"
case scanResp == nil:
badInput = "scanResp"
case lastEngineIdx == nil:
badInput = "lastEngineIdx"
case lastBdevCount == nil:
badInput = "lastBdevCount"
}
if badInput != "" {
return errors.New("nil input param: " + badInput)
}
if err := engine.storage.SetBdevCache(*scanResp); err != nil {
return errors.Wrap(err, "setting engine storage bdev cache")
}
// After engine's bdev cache has been set, the cache will only contain details of bdevs
// identified in the relevant engine config and device addresses will have been verified
// against NVMe scan results. As any VMD endpoint addresses will have been replaced with
// backing device addresses, device counts will reflect the number of physical (as opposed
// to logical) bdevs and engine bdev counts can be accurately compared.
eIdx := engine.Index()
bdevCache := engine.storage.GetBdevCache()
newNrBdevs := len(bdevCache.Controllers)
engine.log.Debugf("last: [index: %d, bdevCount: %d], current: [index: %d, bdevCount: %d]",
*lastEngineIdx, *lastBdevCount, eIdx, newNrBdevs)
// Update last recorded counters if this is the first update or if the number of bdevs is
// unchanged. If bdev count differs between engines, return fault.
switch {
case *lastEngineIdx < 0:
if *lastBdevCount >= 0 {
return errors.New("expecting both lastEngineIdx and lastBdevCount to be unset")
}
*lastEngineIdx = int(eIdx)
*lastBdevCount = newNrBdevs
case *lastBdevCount < 0:
return errors.New("expecting both lastEngineIdx and lastBdevCount to be set")
case newNrBdevs == *lastBdevCount:
*lastEngineIdx = int(eIdx)
default:
return config.FaultConfigBdevCountMismatch(int(eIdx), newNrBdevs, *lastEngineIdx, *lastBdevCount)
}
return nil
}
func setDaosHelperEnvs(cfg *config.Server, setenv func(k, v string) error) error {
if cfg.HelperLogFile != "" {
if err := setenv(pbin.DaosPrivHelperLogFileEnvVar, cfg.HelperLogFile); err != nil {
return errors.Wrap(err, "unable to configure privileged helper logging")
}
}
if cfg.FWHelperLogFile != "" {
if err := setenv(pbin.DaosFWLogFileEnvVar, cfg.FWHelperLogFile); err != nil {
return errors.Wrap(err, "unable to configure privileged firmware helper logging")
}
}
return nil
}
// Minimum recommended number of hugepages has already been calculated and set in config so verify
// we have enough free hugepage memory to satisfy this requirement before setting mem_size and
// hugepage_size parameters for engine.
func updateMemValues(srv *server, engine *EngineInstance, getMemInfo common.GetMemInfoFn) error {
engine.RLock()
ec := engine.runner.GetConfig()
ei := ec.Index
if ec.Storage.Tiers.Bdevs().Len() == 0 {
srv.log.Debugf("skipping mem check on engine %d, no bdevs", ei)
engine.RUnlock()
return nil
}
engine.RUnlock()
// Retrieve up-to-date hugepage info to check that we got the requested number of hugepages.
mi, err := getMemInfo()
if err != nil {
return err
}
// Calculate mem_size per I/O engine (in MB) from number of hugepages required per engine.
nrPagesRequired := srv.cfg.NrHugepages / len(srv.cfg.Engines)
pageSizeMiB := mi.HugePageSizeKb / humanize.KiByte // kib to mib
memSizeReqMiB := nrPagesRequired * pageSizeMiB
memSizeFreeMiB := mi.HugePagesFree * pageSizeMiB
// Fail if free hugepage mem is not enough to sustain average I/O workload (~1GB).
srv.log.Debugf("Per-engine MemSize:%dMB, HugepageSize:%dMB (meminfo: %+v)", memSizeReqMiB,
pageSizeMiB, *mi)
if memSizeFreeMiB < memSizeReqMiB {
return FaultInsufficientFreeHugePageMem(int(ei), memSizeReqMiB, memSizeFreeMiB,
nrPagesRequired, mi.HugePagesFree)
}
// Set engine mem_size and hugepage_size (MiB) values based on hugepage info.
engine.setMemSize(memSizeReqMiB)
engine.setHugePageSz(pageSizeMiB)
return nil
}
func cleanEngineHugePages(srv *server) error {
req := storage.BdevPrepareRequest{
CleanHugePagesOnly: true,
}
msg := "cleanup hugepages via bdev backend"
resp, err := srv.ctlSvc.NvmePrepare(req)
if err != nil {
return errors.Wrap(err, msg)
}
srv.log.Debugf("%s: %d removed", msg, resp.NrHugePagesRemoved)
return nil
}
func | (srv *server, engine *EngineInstance, allStarted *sync.WaitGroup) {
// Register callback to publish engine process exit events.
engine.OnInstanceExit(createPublishInstanceExitFunc(srv.pubSub.Publish, srv.hostname))
// Register callback to publish engine format requested events.
engine.OnAwaitFormat(createPublishFormatRequiredFunc(srv.pubSub.Publish, srv.hostname))
var onceReady sync.Once
engine.OnReady(func(_ context.Context) error {
// Indicate that engine has been started, only do this the first time that the
// engine starts as shared memory persists between engine restarts.
onceReady.Do(func() {
allStarted.Done()
})
return nil
})
// Register callback to update engine cfg mem_size after format.
engine.OnStorageReady(func(_ context.Context) error {
srv.log.Debugf("engine %d: storage ready", engine.Index())
// Attempt to remove unused hugepages, log error only.
if err := cleanEngineHugePages(srv); err != nil {
srv.log.Errorf(err.Error())
}
// Update engine memory related config parameters before starting.
return errors.Wrap(updateMemValues(srv, engine, common.GetMemInfo),
"updating engine memory parameters")
})
}
func configureFirstEngine(ctx context.Context, engine *EngineInstance, sysdb *raft.Database, join systemJoinFn) {
if !sysdb.IsReplica() {
return
}
// Start the system db after instance 0's SCM is ready.
var onceStorageReady sync.Once
engine.OnStorageReady(func(_ context.Context) (err error) {
onceStorageReady.Do(func() {
// NB: We use the outer context rather than
// the closure context in order to avoid
// tying the db to the instance.
err = errors.Wrap(sysdb.Start(ctx),
"failed to start system db",
)
})
return
})
if !sysdb.IsBootstrap() {
return
}
// For historical reasons, we reserve rank 0 for the first
// instance on the raft bootstrap server. This implies that
// rank 0 will always be associated with a MS replica, but
// it is not guaranteed to always be the leader.
engine.joinSystem = func(ctx context.Context, req *control.SystemJoinReq) (*control.SystemJoinResp, error) {
if sb := engine.getSuperblock(); !sb.ValidRank {
engine.log.Debug("marking bootstrap instance as rank 0")
req.Rank = 0
sb.Rank = ranklist.NewRankPtr(0)
}
return join(ctx, req)
}
}
// registerTelemetryCallbacks sets telemetry related callbacks to
// be triggered when all engines have been started.
func registerTelemetryCallbacks(ctx context.Context, srv *server) {
telemPort := srv.cfg.TelemetryPort
if telemPort == 0 {
return
}
srv.OnEnginesStarted(func(ctxIn context.Context) error {
srv.log.Debug("starting Prometheus exporter")
cleanup, err := startPrometheusExporter(ctxIn, srv.log, telemPort, srv.harness.Instances())
if err != nil {
return err
}
srv.OnShutdown(cleanup)
return nil
})
}
// registerFollowerSubscriptions stops handling received forwarded (in addition
// to local) events and starts forwarding events to the new MS leader.
// Log events on the host that they were raised (and first published) on.
// This is the initial behavior before leadership has been determined.
func registerFollowerSubscriptions(srv *server) {
srv.pubSub.Reset()
srv.pubSub.Subscribe(events.RASTypeAny, srv.evtLogger)
srv.pubSub.Subscribe(events.RASTypeStateChange, srv.evtForwarder)
}
// registerLeaderSubscriptions stops forwarding events to MS and instead starts
// handling received forwarded (and local) events.
func registerLeaderSubscriptions(srv *server) {
srv.pubSub.Reset()
srv.pubSub.Subscribe(events.RASTypeAny, srv.evtLogger)
srv.pubSub.Subscribe(events.RASTypeStateChange, srv.membership)
srv.pubSub.Subscribe(events.RASTypeStateChange, srv.sysdb)
srv.pubSub.Subscribe(events.RASTypeStateChange,
events.HandlerFunc(func(ctx context.Context, evt *events.RASEvent) {
switch evt.ID {
case events.RASSwimRankDead:
ts, err := evt.GetTimestamp()
if err != nil {
srv.log.Errorf("bad event timestamp %q: %s", evt.Timestamp, err)
return
}
srv.log.Debugf("%s marked rank %d:%x dead @ %s", evt.Hostname, evt.Rank, evt.Incarnation, ts)
// Mark the rank as unavailable for membership in
// new pools, etc. Do group update on success.
if err := srv.membership.MarkRankDead(ranklist.Rank(evt.Rank), evt.Incarnation); err != nil {
srv.log.Errorf("failed to mark rank %d:%x dead: %s", evt.Rank, evt.Incarnation, err)
if system.IsNotLeader(err) {
// If we've lost leadership while processing the event,
// attempt to re-forward it to the new leader.
evt = evt.WithForwarded(false).WithForwardable(true)
srv.log.Debugf("re-forwarding rank dead event for %d:%x", evt.Rank, evt.Incarnation)
srv.evtForwarder.OnEvent(ctx, evt)
}
return
}
srv.mgmtSvc.reqGroupUpdate(ctx, false)
}
}))
// Add a debounce to throttle multiple SWIM Rank Dead events for the same rank/incarnation.
srv.pubSub.Debounce(events.RASSwimRankDead, 0, func(ev *events.RASEvent) string {
return strconv.FormatUint(uint64(ev.Rank), 10) + ":" + strconv.FormatUint(ev.Incarnation, 10)
})
}
// getGrpcOpts generates a set of gRPC options for the server based on the supplied configuration.
func getGrpcOpts(log logging.Logger, cfgTransport *security.TransportConfig) ([]grpc.ServerOption, error) {
unaryInterceptors := []grpc.UnaryServerInterceptor{
unaryLoggingInterceptor(log), // must be first in order to properly log errors
unaryErrorInterceptor,
unaryStatusInterceptor,
unaryVersionInterceptor,
}
streamInterceptors := []grpc.StreamServerInterceptor{
streamErrorInterceptor,
}
tcOpt, err := security.ServerOptionForTransportConfig(cfgTransport)
if err != nil {
return nil, err
}
srvOpts := []grpc.ServerOption{tcOpt}
uintOpt, err := unaryInterceptorForTransportConfig(cfgTransport)
if err != nil {
return nil, err
}
if uintOpt != nil {
unaryInterceptors = append(unaryInterceptors, uintOpt)
}
sintOpt, err := streamInterceptorForTransportConfig(cfgTransport)
if err != nil {
return nil, err
}
if sintOpt != nil {
streamInterceptors = append(streamInterceptors, sintOpt)
}
return append(srvOpts, []grpc.ServerOption{
grpc.ChainUnaryInterceptor(unaryInterceptors...),
grpc.ChainStreamInterceptor(streamInterceptors...),
}...), nil
}
type netInterface interface {
Addrs() ([]net.Addr, error)
}
func getSrxSetting(cfg *config.Server) (int32, error) {
if len(cfg.Engines) == 0 {
return -1, nil
}
srxVarName := "FI_OFI_RXM_USE_SRX"
getSetting := func(ev string) (bool, int32, error) {
kv := strings.Split(ev, "=")
if len(kv) != 2 {
return false, -1, nil
}
if kv[0] != srxVarName {
return false, -1, nil
}
v, err := strconv.ParseInt(kv[1], 10, 32)
if err != nil {
return false, -1, err
}
return true, int32(v), nil
}
engineVals := make([]int32, len(cfg.Engines))
for idx, ec := range cfg.Engines {
engineVals[idx] = -1 // default to unset
for _, ev := range ec.EnvVars {
if match, engSrx, err := getSetting(ev); err != nil {
return -1, err
} else if match {
engineVals[idx] = engSrx
break
}
}
for _, pte := range ec.EnvPassThrough {
if pte == srxVarName {
return -1, errors.Errorf("%s may not be set as a pass-through env var", srxVarName)
}
}
}
cliSrx := engineVals[0]
for i := 1; i < len(engineVals); i++ {
if engineVals[i] != cliSrx {
return -1, errors.Errorf("%s setting must be the same for all engines", srxVarName)
}
}
// If the SRX config was not explicitly set via env vars, use the
// global config value.
if cliSrx == -1 {
cliSrx = int32(common.BoolAsInt(!cfg.Fabric.DisableSRX))
}
return cliSrx, nil
}
func checkFabricInterface(name string, lookup func(string) (netInterface, error)) error {
if name == "" {
return errors.New("no name provided")
}
if lookup == nil {
return errors.New("no lookup function provided")
}
netIF, err := lookup(name)
if err != nil {
return err
}
addrs, err := netIF.Addrs()
if err != nil {
return err
}
if len(addrs) == 0 {
return fmt.Errorf("no network addresses for interface %q", name)
}
return nil
}
| registerEngineEventCallbacks | identifier_name |
path_test.go | // Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package common
import (
"bytes"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/cli-runtime/pkg/genericclioptions"
"sigs.k8s.io/cli-utils/pkg/testutil"
)
const (
packageDir = "test-pkg-dir"
subFolder = "sub-folder"
inventoryFilename = "inventory.yaml"
secondInventoryFilename = "inventory-2.yaml"
podAFilename = "pod-a.yaml"
podBFilename = "pod-b.yaml"
configSeparator = "---"
)
var (
inventoryFilePath = filepath.Join(packageDir, inventoryFilename)
secondInventoryFilePath = filepath.Join(packageDir, subFolder, secondInventoryFilename)
podAFilePath = filepath.Join(packageDir, podAFilename)
podBFilePath = filepath.Join(packageDir, podBFilename)
)
func setupTestFilesystem(t *testing.T) testutil.TestFilesystem {
// Create the test filesystem, and add package config files
// to it.
t.Log("Creating test filesystem")
tf := testutil.Setup(t, packageDir)
t.Logf("Adding File: %s", inventoryFilePath)
tf.WriteFile(t, inventoryFilePath, inventoryConfigMap)
t.Logf("Adding File: %s", secondInventoryFilePath)
tf.WriteFile(t, secondInventoryFilePath, secondInventoryConfigMap)
t.Logf("Adding File: %s", podAFilePath)
tf.WriteFile(t, podAFilePath, podA)
t.Logf("Adding File: %s", podBFilePath)
tf.WriteFile(t, podBFilePath, podB)
return tf
}
var inventoryConfigMap = []byte(`
apiVersion: v1
kind: ConfigMap
metadata:
namespace: test-namespace
name: inventory
labels:
cli-utils.sigs.k8s.io/inventory-id: test-inventory
`)
var secondInventoryConfigMap = []byte(`
apiVersion: v1
kind: ConfigMap
metadata:
namespace: test-namespace
name: inventory-2
labels:
cli-utils.sigs.k8s.io/inventory-id: test-inventory
`)
var podA = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: pod-a
namespace: test-namespace
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: registry.k8s.io/pause:2.0
`)
var podB = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: pod-b
namespace: test-namespace
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: registry.k8s.io/pause:2.0
`)
func buildMultiResourceConfig(configs ...[]byte) []byte {
r := []byte{}
for i, config := range configs {
if i > 0 {
r = append(r, []byte(configSeparator)...)
}
r = append(r, config...)
}
return r
}
func TestProcessPaths(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
trueVal := true
testCases := map[string]struct {
paths []string
expectedFileNameFlags genericclioptions.FileNameFlags
errFromDemandOneDirectory string
}{
"empty slice means reading from StdIn": {
paths: []string{},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: &[]string{"-"},
},
},
"single file in slice is error; must be directory": {
paths: []string{podAFilePath},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: nil,
Recursive: nil,
},
errFromDemandOneDirectory: "argument 'test-pkg-dir/pod-a.yaml' is not but must be a directory",
},
"single dir in slice": {
paths: []string{tf.GetRootDir()},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: &[]string{tf.GetRootDir()},
Recursive: &trueVal,
},
},
"multiple arguments is an error": {
paths: []string{podAFilePath, podBFilePath},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: nil,
Recursive: nil,
},
errFromDemandOneDirectory: "specify exactly one directory path argument; rejecting [test-pkg-dir/pod-a.yaml test-pkg-dir/pod-b.yaml]",
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
fileNameFlags, err := DemandOneDirectory(tc.paths)
assert.Equal(t, tc.expectedFileNameFlags, fileNameFlags)
if err != nil && err.Error() != tc.errFromDemandOneDirectory |
})
}
}
func TestFilterInputFile(t *testing.T) {
tf := testutil.Setup(t)
defer tf.Clean()
testCases := map[string]struct {
configObjects [][]byte
expectedObjects [][]byte
}{
"Empty config objects writes empty file": {
configObjects: [][]byte{},
expectedObjects: [][]byte{},
},
"Only inventory obj writes empty file": {
configObjects: [][]byte{inventoryConfigMap},
expectedObjects: [][]byte{},
},
"Only pods writes both pods": {
configObjects: [][]byte{podA, podB},
expectedObjects: [][]byte{podA, podB},
},
"Basic case of inventory obj and two pods": {
configObjects: [][]byte{inventoryConfigMap, podA, podB},
expectedObjects: [][]byte{podA, podB},
},
"Basic case of inventory obj and two pods in different order": {
configObjects: [][]byte{podB, inventoryConfigMap, podA},
expectedObjects: [][]byte{podB, podA},
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
// Build a single file of multiple resource configs, and
// call the tested function FilterInputFile. This writes
// the passed file to the test filesystem, filtering
// the inventory object if it exists in the passed file.
in := buildMultiResourceConfig(tc.configObjects...)
err := FilterInputFile(bytes.NewReader(in), tf.GetRootDir())
if err != nil {
t.Fatalf("Unexpected error in FilterInputFile: %s", err)
}
// Retrieve the files from the test filesystem.
actualFiles, err := os.ReadDir(tf.GetRootDir())
if err != nil {
t.Fatalf("Error reading test filesystem directory: %s", err)
}
// Since we remove the generated file for each test, there should
// not be more than one file in the test filesystem.
if len(actualFiles) > 1 {
t.Fatalf("Wrong number of files (%d) in dir: %s", len(actualFiles), tf.GetRootDir())
}
// If there is a generated file, then read it into actualStr.
actualStr := ""
if len(actualFiles) != 0 {
actualFilename := actualFiles[0].Name()
defer os.Remove(actualFilename)
actual, err := os.ReadFile(actualFilename)
if err != nil {
t.Fatalf("Error reading created file (%s): %s", actualFilename, err)
}
actualStr = strings.TrimSpace(string(actual))
}
// Build the expected string from the expectedObjects. This expected
// string should not have the inventory object config in it.
expected := buildMultiResourceConfig(tc.expectedObjects...)
expectedStr := strings.TrimSpace(string(expected))
if expectedStr != actualStr {
t.Errorf("Expected file contents (%s) not equal to actual file contents (%s)",
expectedStr, actualStr)
}
})
}
}
func TestExpandDir(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
testCases := map[string]struct {
packageDirPath string
expandedInventory string
expandedPaths []string
isError bool
}{
"empty path is error": {
packageDirPath: "",
isError: true,
},
"path that is not dir is error": {
packageDirPath: "fakedir1",
isError: true,
},
"root package dir excludes inventory object": {
packageDirPath: tf.GetRootDir(),
expandedInventory: "inventory.yaml",
expandedPaths: []string{
"pod-a.yaml",
"pod-b.yaml",
},
isError: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
actualInventory, actualPaths, err := ExpandDir(tc.packageDirPath)
if tc.isError {
if err == nil {
t.Fatalf("expected error but received none")
}
return
}
if err != nil {
t.Fatalf("received unexpected error %#v", err)
return
}
actualFilename := filepath.Base(actualInventory)
if tc.expandedInventory != actualFilename {
t.Errorf("expected inventory template filepath (%s), got (%s)", tc.expandedInventory, actualFilename)
}
if len(tc.expandedPaths) != len(actualPaths) {
t.Errorf("expected (%d) resource filepaths, got (%d)", len(tc.expandedPaths), len(actualPaths))
}
for _, expectedPath := range tc.expandedPaths {
found := false
for _, actualPath := range actualPaths {
actualFilename := filepath.Base(actualPath)
if expectedPath == actualFilename {
found = true
break
}
}
if !found {
t.Errorf("expected filename (%s) not found", expectedPath)
}
}
})
}
}
func TestExpandDirErrors(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
testCases := map[string]struct {
packageDirPath []string
expandedPaths []string
isError bool
}{
"empty path is error": {
packageDirPath: []string{},
isError: true,
},
"more than one path is error": {
packageDirPath: []string{"fakedir1", "fakedir2"},
isError: true,
},
"path that is not dir is error": {
packageDirPath: []string{"fakedir1"},
isError: true,
},
"root package dir excludes inventory object": {
packageDirPath: []string{tf.GetRootDir()},
expandedPaths: []string{
filepath.Join(packageDir, "pod-a.yaml"),
filepath.Join(packageDir, "pod-b.yaml"),
},
isError: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
trueVal := true
filenameFlags := genericclioptions.FileNameFlags{
Filenames: &tc.packageDirPath,
Recursive: &trueVal,
}
actualFlags, err := ExpandPackageDir(filenameFlags)
if tc.isError && err == nil {
t.Fatalf("expected error but received none")
}
if !tc.isError {
if err != nil {
t.Fatalf("unexpected error received: %v", err)
}
actualPaths := *actualFlags.Filenames
if len(tc.expandedPaths) != len(actualPaths) {
t.Errorf("expected config filepaths (%s), got (%s)",
tc.expandedPaths, actualPaths)
}
for _, expected := range tc.expandedPaths {
if !filepathExists(expected, actualPaths) {
t.Errorf("expected config filepath (%s) in actual filepaths (%s)",
expected, actualPaths)
}
}
// Check the inventory object is not in the filename flags.
for _, actualPath := range actualPaths {
if strings.Contains(actualPath, "inventory.yaml") {
t.Errorf("inventory object should be excluded")
}
}
}
})
}
}
// filepathExists returns true if the passed "filepath" is a substring
// of any of the passed full "filepaths"; false otherwise. For example:
// if filepath = "test/a.yaml", and filepaths includes "/tmp/test/a.yaml",
// this function returns true.
func filepathExists(filepath string, filepaths []string) bool {
for _, fp := range filepaths {
if strings.Contains(fp, filepath) {
return true
}
}
return false
}
| {
assert.Equal(t, err.Error(), tc.errFromDemandOneDirectory)
} | conditional_block |
path_test.go | // Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package common
import (
"bytes"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/cli-runtime/pkg/genericclioptions"
"sigs.k8s.io/cli-utils/pkg/testutil"
)
const (
packageDir = "test-pkg-dir"
subFolder = "sub-folder"
inventoryFilename = "inventory.yaml"
secondInventoryFilename = "inventory-2.yaml"
podAFilename = "pod-a.yaml"
podBFilename = "pod-b.yaml"
configSeparator = "---"
)
var (
inventoryFilePath = filepath.Join(packageDir, inventoryFilename)
secondInventoryFilePath = filepath.Join(packageDir, subFolder, secondInventoryFilename)
podAFilePath = filepath.Join(packageDir, podAFilename)
podBFilePath = filepath.Join(packageDir, podBFilename)
)
func setupTestFilesystem(t *testing.T) testutil.TestFilesystem {
// Create the test filesystem, and add package config files
// to it.
t.Log("Creating test filesystem")
tf := testutil.Setup(t, packageDir)
t.Logf("Adding File: %s", inventoryFilePath)
tf.WriteFile(t, inventoryFilePath, inventoryConfigMap)
t.Logf("Adding File: %s", secondInventoryFilePath)
tf.WriteFile(t, secondInventoryFilePath, secondInventoryConfigMap)
t.Logf("Adding File: %s", podAFilePath)
tf.WriteFile(t, podAFilePath, podA)
t.Logf("Adding File: %s", podBFilePath)
tf.WriteFile(t, podBFilePath, podB)
return tf
}
var inventoryConfigMap = []byte(`
apiVersion: v1
kind: ConfigMap
metadata:
namespace: test-namespace
name: inventory
labels:
cli-utils.sigs.k8s.io/inventory-id: test-inventory
`)
var secondInventoryConfigMap = []byte(`
apiVersion: v1
kind: ConfigMap
metadata:
namespace: test-namespace
name: inventory-2
labels:
cli-utils.sigs.k8s.io/inventory-id: test-inventory
`)
var podA = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: pod-a
namespace: test-namespace
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: registry.k8s.io/pause:2.0
`)
var podB = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: pod-b
namespace: test-namespace
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: registry.k8s.io/pause:2.0
`)
func | (configs ...[]byte) []byte {
r := []byte{}
for i, config := range configs {
if i > 0 {
r = append(r, []byte(configSeparator)...)
}
r = append(r, config...)
}
return r
}
func TestProcessPaths(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
trueVal := true
testCases := map[string]struct {
paths []string
expectedFileNameFlags genericclioptions.FileNameFlags
errFromDemandOneDirectory string
}{
"empty slice means reading from StdIn": {
paths: []string{},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: &[]string{"-"},
},
},
"single file in slice is error; must be directory": {
paths: []string{podAFilePath},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: nil,
Recursive: nil,
},
errFromDemandOneDirectory: "argument 'test-pkg-dir/pod-a.yaml' is not but must be a directory",
},
"single dir in slice": {
paths: []string{tf.GetRootDir()},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: &[]string{tf.GetRootDir()},
Recursive: &trueVal,
},
},
"multiple arguments is an error": {
paths: []string{podAFilePath, podBFilePath},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: nil,
Recursive: nil,
},
errFromDemandOneDirectory: "specify exactly one directory path argument; rejecting [test-pkg-dir/pod-a.yaml test-pkg-dir/pod-b.yaml]",
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
fileNameFlags, err := DemandOneDirectory(tc.paths)
assert.Equal(t, tc.expectedFileNameFlags, fileNameFlags)
if err != nil && err.Error() != tc.errFromDemandOneDirectory {
assert.Equal(t, err.Error(), tc.errFromDemandOneDirectory)
}
})
}
}
func TestFilterInputFile(t *testing.T) {
tf := testutil.Setup(t)
defer tf.Clean()
testCases := map[string]struct {
configObjects [][]byte
expectedObjects [][]byte
}{
"Empty config objects writes empty file": {
configObjects: [][]byte{},
expectedObjects: [][]byte{},
},
"Only inventory obj writes empty file": {
configObjects: [][]byte{inventoryConfigMap},
expectedObjects: [][]byte{},
},
"Only pods writes both pods": {
configObjects: [][]byte{podA, podB},
expectedObjects: [][]byte{podA, podB},
},
"Basic case of inventory obj and two pods": {
configObjects: [][]byte{inventoryConfigMap, podA, podB},
expectedObjects: [][]byte{podA, podB},
},
"Basic case of inventory obj and two pods in different order": {
configObjects: [][]byte{podB, inventoryConfigMap, podA},
expectedObjects: [][]byte{podB, podA},
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
// Build a single file of multiple resource configs, and
// call the tested function FilterInputFile. This writes
// the passed file to the test filesystem, filtering
// the inventory object if it exists in the passed file.
in := buildMultiResourceConfig(tc.configObjects...)
err := FilterInputFile(bytes.NewReader(in), tf.GetRootDir())
if err != nil {
t.Fatalf("Unexpected error in FilterInputFile: %s", err)
}
// Retrieve the files from the test filesystem.
actualFiles, err := os.ReadDir(tf.GetRootDir())
if err != nil {
t.Fatalf("Error reading test filesystem directory: %s", err)
}
// Since we remove the generated file for each test, there should
// not be more than one file in the test filesystem.
if len(actualFiles) > 1 {
t.Fatalf("Wrong number of files (%d) in dir: %s", len(actualFiles), tf.GetRootDir())
}
// If there is a generated file, then read it into actualStr.
actualStr := ""
if len(actualFiles) != 0 {
actualFilename := actualFiles[0].Name()
defer os.Remove(actualFilename)
actual, err := os.ReadFile(actualFilename)
if err != nil {
t.Fatalf("Error reading created file (%s): %s", actualFilename, err)
}
actualStr = strings.TrimSpace(string(actual))
}
// Build the expected string from the expectedObjects. This expected
// string should not have the inventory object config in it.
expected := buildMultiResourceConfig(tc.expectedObjects...)
expectedStr := strings.TrimSpace(string(expected))
if expectedStr != actualStr {
t.Errorf("Expected file contents (%s) not equal to actual file contents (%s)",
expectedStr, actualStr)
}
})
}
}
func TestExpandDir(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
testCases := map[string]struct {
packageDirPath string
expandedInventory string
expandedPaths []string
isError bool
}{
"empty path is error": {
packageDirPath: "",
isError: true,
},
"path that is not dir is error": {
packageDirPath: "fakedir1",
isError: true,
},
"root package dir excludes inventory object": {
packageDirPath: tf.GetRootDir(),
expandedInventory: "inventory.yaml",
expandedPaths: []string{
"pod-a.yaml",
"pod-b.yaml",
},
isError: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
actualInventory, actualPaths, err := ExpandDir(tc.packageDirPath)
if tc.isError {
if err == nil {
t.Fatalf("expected error but received none")
}
return
}
if err != nil {
t.Fatalf("received unexpected error %#v", err)
return
}
actualFilename := filepath.Base(actualInventory)
if tc.expandedInventory != actualFilename {
t.Errorf("expected inventory template filepath (%s), got (%s)", tc.expandedInventory, actualFilename)
}
if len(tc.expandedPaths) != len(actualPaths) {
t.Errorf("expected (%d) resource filepaths, got (%d)", len(tc.expandedPaths), len(actualPaths))
}
for _, expectedPath := range tc.expandedPaths {
found := false
for _, actualPath := range actualPaths {
actualFilename := filepath.Base(actualPath)
if expectedPath == actualFilename {
found = true
break
}
}
if !found {
t.Errorf("expected filename (%s) not found", expectedPath)
}
}
})
}
}
func TestExpandDirErrors(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
testCases := map[string]struct {
packageDirPath []string
expandedPaths []string
isError bool
}{
"empty path is error": {
packageDirPath: []string{},
isError: true,
},
"more than one path is error": {
packageDirPath: []string{"fakedir1", "fakedir2"},
isError: true,
},
"path that is not dir is error": {
packageDirPath: []string{"fakedir1"},
isError: true,
},
"root package dir excludes inventory object": {
packageDirPath: []string{tf.GetRootDir()},
expandedPaths: []string{
filepath.Join(packageDir, "pod-a.yaml"),
filepath.Join(packageDir, "pod-b.yaml"),
},
isError: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
trueVal := true
filenameFlags := genericclioptions.FileNameFlags{
Filenames: &tc.packageDirPath,
Recursive: &trueVal,
}
actualFlags, err := ExpandPackageDir(filenameFlags)
if tc.isError && err == nil {
t.Fatalf("expected error but received none")
}
if !tc.isError {
if err != nil {
t.Fatalf("unexpected error received: %v", err)
}
actualPaths := *actualFlags.Filenames
if len(tc.expandedPaths) != len(actualPaths) {
t.Errorf("expected config filepaths (%s), got (%s)",
tc.expandedPaths, actualPaths)
}
for _, expected := range tc.expandedPaths {
if !filepathExists(expected, actualPaths) {
t.Errorf("expected config filepath (%s) in actual filepaths (%s)",
expected, actualPaths)
}
}
// Check the inventory object is not in the filename flags.
for _, actualPath := range actualPaths {
if strings.Contains(actualPath, "inventory.yaml") {
t.Errorf("inventory object should be excluded")
}
}
}
})
}
}
// filepathExists returns true if the passed "filepath" is a substring
// of any of the passed full "filepaths"; false otherwise. For example:
// if filepath = "test/a.yaml", and filepaths includes "/tmp/test/a.yaml",
// this function returns true.
func filepathExists(filepath string, filepaths []string) bool {
for _, fp := range filepaths {
if strings.Contains(fp, filepath) {
return true
}
}
return false
}
| buildMultiResourceConfig | identifier_name |
path_test.go | // Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package common
import (
"bytes"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/cli-runtime/pkg/genericclioptions"
"sigs.k8s.io/cli-utils/pkg/testutil"
)
const (
packageDir = "test-pkg-dir"
subFolder = "sub-folder"
inventoryFilename = "inventory.yaml"
secondInventoryFilename = "inventory-2.yaml"
podAFilename = "pod-a.yaml"
podBFilename = "pod-b.yaml"
configSeparator = "---"
)
var (
inventoryFilePath = filepath.Join(packageDir, inventoryFilename)
secondInventoryFilePath = filepath.Join(packageDir, subFolder, secondInventoryFilename)
podAFilePath = filepath.Join(packageDir, podAFilename)
podBFilePath = filepath.Join(packageDir, podBFilename)
)
func setupTestFilesystem(t *testing.T) testutil.TestFilesystem {
// Create the test filesystem, and add package config files
// to it.
t.Log("Creating test filesystem")
tf := testutil.Setup(t, packageDir)
t.Logf("Adding File: %s", inventoryFilePath)
tf.WriteFile(t, inventoryFilePath, inventoryConfigMap)
t.Logf("Adding File: %s", secondInventoryFilePath)
tf.WriteFile(t, secondInventoryFilePath, secondInventoryConfigMap)
t.Logf("Adding File: %s", podAFilePath)
tf.WriteFile(t, podAFilePath, podA)
t.Logf("Adding File: %s", podBFilePath)
tf.WriteFile(t, podBFilePath, podB)
return tf
}
var inventoryConfigMap = []byte(`
apiVersion: v1
kind: ConfigMap
metadata:
namespace: test-namespace
name: inventory
labels:
cli-utils.sigs.k8s.io/inventory-id: test-inventory
`)
var secondInventoryConfigMap = []byte(`
apiVersion: v1
kind: ConfigMap
metadata:
namespace: test-namespace
name: inventory-2
labels:
cli-utils.sigs.k8s.io/inventory-id: test-inventory
`)
var podA = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: pod-a
namespace: test-namespace
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: registry.k8s.io/pause:2.0
`)
var podB = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: pod-b
namespace: test-namespace
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: registry.k8s.io/pause:2.0
`)
func buildMultiResourceConfig(configs ...[]byte) []byte |
func TestProcessPaths(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
trueVal := true
testCases := map[string]struct {
paths []string
expectedFileNameFlags genericclioptions.FileNameFlags
errFromDemandOneDirectory string
}{
"empty slice means reading from StdIn": {
paths: []string{},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: &[]string{"-"},
},
},
"single file in slice is error; must be directory": {
paths: []string{podAFilePath},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: nil,
Recursive: nil,
},
errFromDemandOneDirectory: "argument 'test-pkg-dir/pod-a.yaml' is not but must be a directory",
},
"single dir in slice": {
paths: []string{tf.GetRootDir()},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: &[]string{tf.GetRootDir()},
Recursive: &trueVal,
},
},
"multiple arguments is an error": {
paths: []string{podAFilePath, podBFilePath},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: nil,
Recursive: nil,
},
errFromDemandOneDirectory: "specify exactly one directory path argument; rejecting [test-pkg-dir/pod-a.yaml test-pkg-dir/pod-b.yaml]",
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
fileNameFlags, err := DemandOneDirectory(tc.paths)
assert.Equal(t, tc.expectedFileNameFlags, fileNameFlags)
if err != nil && err.Error() != tc.errFromDemandOneDirectory {
assert.Equal(t, err.Error(), tc.errFromDemandOneDirectory)
}
})
}
}
func TestFilterInputFile(t *testing.T) {
tf := testutil.Setup(t)
defer tf.Clean()
testCases := map[string]struct {
configObjects [][]byte
expectedObjects [][]byte
}{
"Empty config objects writes empty file": {
configObjects: [][]byte{},
expectedObjects: [][]byte{},
},
"Only inventory obj writes empty file": {
configObjects: [][]byte{inventoryConfigMap},
expectedObjects: [][]byte{},
},
"Only pods writes both pods": {
configObjects: [][]byte{podA, podB},
expectedObjects: [][]byte{podA, podB},
},
"Basic case of inventory obj and two pods": {
configObjects: [][]byte{inventoryConfigMap, podA, podB},
expectedObjects: [][]byte{podA, podB},
},
"Basic case of inventory obj and two pods in different order": {
configObjects: [][]byte{podB, inventoryConfigMap, podA},
expectedObjects: [][]byte{podB, podA},
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
// Build a single file of multiple resource configs, and
// call the tested function FilterInputFile. This writes
// the passed file to the test filesystem, filtering
// the inventory object if it exists in the passed file.
in := buildMultiResourceConfig(tc.configObjects...)
err := FilterInputFile(bytes.NewReader(in), tf.GetRootDir())
if err != nil {
t.Fatalf("Unexpected error in FilterInputFile: %s", err)
}
// Retrieve the files from the test filesystem.
actualFiles, err := os.ReadDir(tf.GetRootDir())
if err != nil {
t.Fatalf("Error reading test filesystem directory: %s", err)
}
// Since we remove the generated file for each test, there should
// not be more than one file in the test filesystem.
if len(actualFiles) > 1 {
t.Fatalf("Wrong number of files (%d) in dir: %s", len(actualFiles), tf.GetRootDir())
}
// If there is a generated file, then read it into actualStr.
actualStr := ""
if len(actualFiles) != 0 {
actualFilename := actualFiles[0].Name()
defer os.Remove(actualFilename)
actual, err := os.ReadFile(actualFilename)
if err != nil {
t.Fatalf("Error reading created file (%s): %s", actualFilename, err)
}
actualStr = strings.TrimSpace(string(actual))
}
// Build the expected string from the expectedObjects. This expected
// string should not have the inventory object config in it.
expected := buildMultiResourceConfig(tc.expectedObjects...)
expectedStr := strings.TrimSpace(string(expected))
if expectedStr != actualStr {
t.Errorf("Expected file contents (%s) not equal to actual file contents (%s)",
expectedStr, actualStr)
}
})
}
}
func TestExpandDir(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
testCases := map[string]struct {
packageDirPath string
expandedInventory string
expandedPaths []string
isError bool
}{
"empty path is error": {
packageDirPath: "",
isError: true,
},
"path that is not dir is error": {
packageDirPath: "fakedir1",
isError: true,
},
"root package dir excludes inventory object": {
packageDirPath: tf.GetRootDir(),
expandedInventory: "inventory.yaml",
expandedPaths: []string{
"pod-a.yaml",
"pod-b.yaml",
},
isError: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
actualInventory, actualPaths, err := ExpandDir(tc.packageDirPath)
if tc.isError {
if err == nil {
t.Fatalf("expected error but received none")
}
return
}
if err != nil {
t.Fatalf("received unexpected error %#v", err)
return
}
actualFilename := filepath.Base(actualInventory)
if tc.expandedInventory != actualFilename {
t.Errorf("expected inventory template filepath (%s), got (%s)", tc.expandedInventory, actualFilename)
}
if len(tc.expandedPaths) != len(actualPaths) {
t.Errorf("expected (%d) resource filepaths, got (%d)", len(tc.expandedPaths), len(actualPaths))
}
for _, expectedPath := range tc.expandedPaths {
found := false
for _, actualPath := range actualPaths {
actualFilename := filepath.Base(actualPath)
if expectedPath == actualFilename {
found = true
break
}
}
if !found {
t.Errorf("expected filename (%s) not found", expectedPath)
}
}
})
}
}
func TestExpandDirErrors(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
testCases := map[string]struct {
packageDirPath []string
expandedPaths []string
isError bool
}{
"empty path is error": {
packageDirPath: []string{},
isError: true,
},
"more than one path is error": {
packageDirPath: []string{"fakedir1", "fakedir2"},
isError: true,
},
"path that is not dir is error": {
packageDirPath: []string{"fakedir1"},
isError: true,
},
"root package dir excludes inventory object": {
packageDirPath: []string{tf.GetRootDir()},
expandedPaths: []string{
filepath.Join(packageDir, "pod-a.yaml"),
filepath.Join(packageDir, "pod-b.yaml"),
},
isError: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
trueVal := true
filenameFlags := genericclioptions.FileNameFlags{
Filenames: &tc.packageDirPath,
Recursive: &trueVal,
}
actualFlags, err := ExpandPackageDir(filenameFlags)
if tc.isError && err == nil {
t.Fatalf("expected error but received none")
}
if !tc.isError {
if err != nil {
t.Fatalf("unexpected error received: %v", err)
}
actualPaths := *actualFlags.Filenames
if len(tc.expandedPaths) != len(actualPaths) {
t.Errorf("expected config filepaths (%s), got (%s)",
tc.expandedPaths, actualPaths)
}
for _, expected := range tc.expandedPaths {
if !filepathExists(expected, actualPaths) {
t.Errorf("expected config filepath (%s) in actual filepaths (%s)",
expected, actualPaths)
}
}
// Check the inventory object is not in the filename flags.
for _, actualPath := range actualPaths {
if strings.Contains(actualPath, "inventory.yaml") {
t.Errorf("inventory object should be excluded")
}
}
}
})
}
}
// filepathExists returns true if the passed "filepath" is a substring
// of any of the passed full "filepaths"; false otherwise. For example:
// if filepath = "test/a.yaml", and filepaths includes "/tmp/test/a.yaml",
// this function returns true.
func filepathExists(filepath string, filepaths []string) bool {
for _, fp := range filepaths {
if strings.Contains(fp, filepath) {
return true
}
}
return false
}
| {
r := []byte{}
for i, config := range configs {
if i > 0 {
r = append(r, []byte(configSeparator)...)
}
r = append(r, config...)
}
return r
} | identifier_body |
path_test.go | // Copyright 2020 The Kubernetes Authors.
// SPDX-License-Identifier: Apache-2.0
package common
import (
"bytes"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/cli-runtime/pkg/genericclioptions"
"sigs.k8s.io/cli-utils/pkg/testutil"
)
const (
packageDir = "test-pkg-dir"
subFolder = "sub-folder"
inventoryFilename = "inventory.yaml"
secondInventoryFilename = "inventory-2.yaml"
podAFilename = "pod-a.yaml"
podBFilename = "pod-b.yaml"
configSeparator = "---"
)
var (
inventoryFilePath = filepath.Join(packageDir, inventoryFilename)
secondInventoryFilePath = filepath.Join(packageDir, subFolder, secondInventoryFilename)
podAFilePath = filepath.Join(packageDir, podAFilename)
podBFilePath = filepath.Join(packageDir, podBFilename)
)
func setupTestFilesystem(t *testing.T) testutil.TestFilesystem {
// Create the test filesystem, and add package config files
// to it.
t.Log("Creating test filesystem")
tf := testutil.Setup(t, packageDir)
t.Logf("Adding File: %s", inventoryFilePath)
tf.WriteFile(t, inventoryFilePath, inventoryConfigMap)
t.Logf("Adding File: %s", secondInventoryFilePath)
tf.WriteFile(t, secondInventoryFilePath, secondInventoryConfigMap)
t.Logf("Adding File: %s", podAFilePath)
tf.WriteFile(t, podAFilePath, podA)
t.Logf("Adding File: %s", podBFilePath)
tf.WriteFile(t, podBFilePath, podB)
return tf
}
var inventoryConfigMap = []byte(`
apiVersion: v1
kind: ConfigMap
metadata:
namespace: test-namespace
name: inventory
labels:
cli-utils.sigs.k8s.io/inventory-id: test-inventory
`)
var secondInventoryConfigMap = []byte(`
apiVersion: v1
kind: ConfigMap
metadata:
namespace: test-namespace
name: inventory-2
labels:
cli-utils.sigs.k8s.io/inventory-id: test-inventory
`)
var podA = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: pod-a
namespace: test-namespace
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: registry.k8s.io/pause:2.0
`)
var podB = []byte(`
apiVersion: v1
kind: Pod
metadata:
name: pod-b
namespace: test-namespace
labels:
name: test-pod-label
spec:
containers:
- name: kubernetes-pause
image: registry.k8s.io/pause:2.0
`)
func buildMultiResourceConfig(configs ...[]byte) []byte {
r := []byte{}
for i, config := range configs {
if i > 0 {
r = append(r, []byte(configSeparator)...)
}
r = append(r, config...)
}
return r
}
func TestProcessPaths(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
trueVal := true
testCases := map[string]struct {
paths []string
expectedFileNameFlags genericclioptions.FileNameFlags
errFromDemandOneDirectory string
}{
"empty slice means reading from StdIn": {
paths: []string{},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: &[]string{"-"},
},
},
"single file in slice is error; must be directory": {
paths: []string{podAFilePath},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: nil,
Recursive: nil,
},
errFromDemandOneDirectory: "argument 'test-pkg-dir/pod-a.yaml' is not but must be a directory",
},
"single dir in slice": {
paths: []string{tf.GetRootDir()},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: &[]string{tf.GetRootDir()},
Recursive: &trueVal,
},
},
"multiple arguments is an error": {
paths: []string{podAFilePath, podBFilePath},
expectedFileNameFlags: genericclioptions.FileNameFlags{
Filenames: nil,
Recursive: nil,
},
errFromDemandOneDirectory: "specify exactly one directory path argument; rejecting [test-pkg-dir/pod-a.yaml test-pkg-dir/pod-b.yaml]",
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
fileNameFlags, err := DemandOneDirectory(tc.paths) | }
})
}
}
func TestFilterInputFile(t *testing.T) {
tf := testutil.Setup(t)
defer tf.Clean()
testCases := map[string]struct {
configObjects [][]byte
expectedObjects [][]byte
}{
"Empty config objects writes empty file": {
configObjects: [][]byte{},
expectedObjects: [][]byte{},
},
"Only inventory obj writes empty file": {
configObjects: [][]byte{inventoryConfigMap},
expectedObjects: [][]byte{},
},
"Only pods writes both pods": {
configObjects: [][]byte{podA, podB},
expectedObjects: [][]byte{podA, podB},
},
"Basic case of inventory obj and two pods": {
configObjects: [][]byte{inventoryConfigMap, podA, podB},
expectedObjects: [][]byte{podA, podB},
},
"Basic case of inventory obj and two pods in different order": {
configObjects: [][]byte{podB, inventoryConfigMap, podA},
expectedObjects: [][]byte{podB, podA},
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
// Build a single file of multiple resource configs, and
// call the tested function FilterInputFile. This writes
// the passed file to the test filesystem, filtering
// the inventory object if it exists in the passed file.
in := buildMultiResourceConfig(tc.configObjects...)
err := FilterInputFile(bytes.NewReader(in), tf.GetRootDir())
if err != nil {
t.Fatalf("Unexpected error in FilterInputFile: %s", err)
}
// Retrieve the files from the test filesystem.
actualFiles, err := os.ReadDir(tf.GetRootDir())
if err != nil {
t.Fatalf("Error reading test filesystem directory: %s", err)
}
// Since we remove the generated file for each test, there should
// not be more than one file in the test filesystem.
if len(actualFiles) > 1 {
t.Fatalf("Wrong number of files (%d) in dir: %s", len(actualFiles), tf.GetRootDir())
}
// If there is a generated file, then read it into actualStr.
actualStr := ""
if len(actualFiles) != 0 {
actualFilename := actualFiles[0].Name()
defer os.Remove(actualFilename)
actual, err := os.ReadFile(actualFilename)
if err != nil {
t.Fatalf("Error reading created file (%s): %s", actualFilename, err)
}
actualStr = strings.TrimSpace(string(actual))
}
// Build the expected string from the expectedObjects. This expected
// string should not have the inventory object config in it.
expected := buildMultiResourceConfig(tc.expectedObjects...)
expectedStr := strings.TrimSpace(string(expected))
if expectedStr != actualStr {
t.Errorf("Expected file contents (%s) not equal to actual file contents (%s)",
expectedStr, actualStr)
}
})
}
}
func TestExpandDir(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
testCases := map[string]struct {
packageDirPath string
expandedInventory string
expandedPaths []string
isError bool
}{
"empty path is error": {
packageDirPath: "",
isError: true,
},
"path that is not dir is error": {
packageDirPath: "fakedir1",
isError: true,
},
"root package dir excludes inventory object": {
packageDirPath: tf.GetRootDir(),
expandedInventory: "inventory.yaml",
expandedPaths: []string{
"pod-a.yaml",
"pod-b.yaml",
},
isError: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
actualInventory, actualPaths, err := ExpandDir(tc.packageDirPath)
if tc.isError {
if err == nil {
t.Fatalf("expected error but received none")
}
return
}
if err != nil {
t.Fatalf("received unexpected error %#v", err)
return
}
actualFilename := filepath.Base(actualInventory)
if tc.expandedInventory != actualFilename {
t.Errorf("expected inventory template filepath (%s), got (%s)", tc.expandedInventory, actualFilename)
}
if len(tc.expandedPaths) != len(actualPaths) {
t.Errorf("expected (%d) resource filepaths, got (%d)", len(tc.expandedPaths), len(actualPaths))
}
for _, expectedPath := range tc.expandedPaths {
found := false
for _, actualPath := range actualPaths {
actualFilename := filepath.Base(actualPath)
if expectedPath == actualFilename {
found = true
break
}
}
if !found {
t.Errorf("expected filename (%s) not found", expectedPath)
}
}
})
}
}
func TestExpandDirErrors(t *testing.T) {
tf := setupTestFilesystem(t)
defer tf.Clean()
testCases := map[string]struct {
packageDirPath []string
expandedPaths []string
isError bool
}{
"empty path is error": {
packageDirPath: []string{},
isError: true,
},
"more than one path is error": {
packageDirPath: []string{"fakedir1", "fakedir2"},
isError: true,
},
"path that is not dir is error": {
packageDirPath: []string{"fakedir1"},
isError: true,
},
"root package dir excludes inventory object": {
packageDirPath: []string{tf.GetRootDir()},
expandedPaths: []string{
filepath.Join(packageDir, "pod-a.yaml"),
filepath.Join(packageDir, "pod-b.yaml"),
},
isError: false,
},
}
for tn, tc := range testCases {
t.Run(tn, func(t *testing.T) {
trueVal := true
filenameFlags := genericclioptions.FileNameFlags{
Filenames: &tc.packageDirPath,
Recursive: &trueVal,
}
actualFlags, err := ExpandPackageDir(filenameFlags)
if tc.isError && err == nil {
t.Fatalf("expected error but received none")
}
if !tc.isError {
if err != nil {
t.Fatalf("unexpected error received: %v", err)
}
actualPaths := *actualFlags.Filenames
if len(tc.expandedPaths) != len(actualPaths) {
t.Errorf("expected config filepaths (%s), got (%s)",
tc.expandedPaths, actualPaths)
}
for _, expected := range tc.expandedPaths {
if !filepathExists(expected, actualPaths) {
t.Errorf("expected config filepath (%s) in actual filepaths (%s)",
expected, actualPaths)
}
}
// Check the inventory object is not in the filename flags.
for _, actualPath := range actualPaths {
if strings.Contains(actualPath, "inventory.yaml") {
t.Errorf("inventory object should be excluded")
}
}
}
})
}
}
// filepathExists returns true if the passed "filepath" is a substring
// of any of the passed full "filepaths"; false otherwise. For example:
// if filepath = "test/a.yaml", and filepaths includes "/tmp/test/a.yaml",
// this function returns true.
func filepathExists(filepath string, filepaths []string) bool {
for _, fp := range filepaths {
if strings.Contains(fp, filepath) {
return true
}
}
return false
} | assert.Equal(t, tc.expectedFileNameFlags, fileNameFlags)
if err != nil && err.Error() != tc.errFromDemandOneDirectory {
assert.Equal(t, err.Error(), tc.errFromDemandOneDirectory) | random_line_split |
pandas_gp.py | # -*- coding: utf-8 -*-
"""
Standard GP Algorithm (Strongly typed)
Created on Sun Aug 21 10:40:14 2016
@author: jm
"""
import operator
import math
import random
import cPickle
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
from deap import gp
#from scoop import futures
#import multiprocessing
primes = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,
97,101,103,107,109,113,127,131,137,139,149,151,157,163,167,173,179,
181,191,193,197,199,211,223,227,229,233,239,241,251,257,263,269,271,
277,281,283,293,307,311,313,317,331,337,347,349,353,359,367,373,379,
383,389,397,401,409,419,421,431,433,439,443,449,457,461,463,467,479,
487,491,499] #,503,509,521,523,541,547,557,563,569,571,577,587,593,599,
# 601,607,613,617,619,631,641,643,647,653,659,661,673,677,683,691,701,
# 709,719,727,733,739,743,751,757,761,769,773,787,797,809,811,821,823,
# 827,829,839,853,857,859,863,877,881,883,887,907,911,919,929,937,941,
# 947,953,967,971,977,983,991,997,1009,1013,1019,1021,1031,1033,1039,
# 1049,1051,1061,1063,1069,1087,1091,1093,1097,1103,1109,1117,1123,
# 1129,1151,1153,1163,1171,1181,1187,1193,1201,1213,1217,1223,1229,
# 1231,1237,1249,1259,1277,1279,1283,1289,1291,1297,1301,1303,1307,
# 1319,1321,1327,1361,1367,1373,1381,1399,1409,1423,1427,1429,1433,
# 1439,1447,1451,1453,1459,1471,1481,1483,1487,1489,1493,1499,1511,
# 1523,1531,1543,1549,1553,1559,1567,1571,1579,1583,1597,1601,1607,
# 1609,1613,1619,1621,1627,1637,1657,1663,1667,1669,1693,1697,1699,
# 1709,1721,1723,1733,1741,1747,1753,1759,1777,1783,1787,1789,1801,
# 1811,1823,1831,1847,1861,1867,1871,1873,1877,1879,1889,1901,1907,
# 1913,1931,1933,1949,1951,1973,1979,1987,1993,1997,1999,2003,2011,
# 2017,2027,2029,2039,2053,2063,2069,2081,2083,2087,2089,2099,2111,
# 2113,2129,2131,2137,2141,2143,2153,2161,2179,2203,2207,2213,2221,
# 2237,2239,2243,2251,2267,2269,2273,2281,2287,2293,2297,2309,2311,
# 2333,2339,2341,2347,2351,2357,2371,2377,2381,2383,2389,2393,2399,
# 2411,2417,2423,2437,2441,2447,2459,2467,2473,2477,2503,2521,2531,
# 2539,2543,2549,2551,2557,2579,2591,2593,2609,2617,2621,2633,2647,
# 2657,2659,2663,2671,2677,2683,2687,2689,2693,2699,2707,2711,2713,
# 2719,2729,2731,2741,2749,2753,2767,2777,2789,2791,2797,2801,2803,
# 2819,2833,2837,2843,2851,2857,2861,2879,2887,2897,2903,2909,2917,
# 2927,2939,2953,2957,2963,2969,2971,2999,3001,3011,3019,3023,3037,
# 3041,3049,3061,3067,3079,3083,3089,3109,3119,3121,3137,3163,3167,
# 3169,3181,3187,3191,3203,3209,3217,3221,3229,3251,3253,3257,3259,
# 3271,3299,3301,3307,3313,3319,3323,3329,3331,3343,3347,3359,3361,
# 3371,3373,3389,3391,3407,3413,3433,3449,3457,3461,3463,3467,3469,
# 3491,3499,3511,3517,3527,3529,3533,3539,3541,3547,3557,3559,3571,
# 3581,3583,3593,3607,3613,3617,3623,3631,3637,3643,3659,3671,3673,
# 3677,3691,3697,3701,3709,3719,3727,3733,3739,3761,3767,3769,3779,
# 3793,3797,3803,3821,3823,3833,3847,3851,3853,3863,3877,3881,3889,
# 3907,3911,3917,3919,3923,3929,3931,3943,3947,3967,3989,4001,4003,
# 4007,4013,4019,4021,4027,4049,4051,4057,4073,4079,4091,4093,4099,
# 4111,4127,4129,4133,4139,4153,4157,4159,4177,4201,4211,4217,4219,
# 4229,4231]
# load the data
ibex = cPickle.load(open("ibex.pickle", "rb"))
#we need to have pd_df_bool terminals for gp to work
#better to have them as a column in the dataset
#then to use a terminal calling np.ones or np.zeros
#as we are not constrained by the size of the vector
ibex["Ones"] = True
ibex["Zeros"] = False
# Transaction costs - 10 Eur per contract
#cost = 30
cost = 50 # equiv (30/10000) * 10 points
# Split into train and test
train = ibex["2000":"2015"].copy()
test = ibex["2016":].copy()
# Functions and terminal for GP
class pd_df_float(object):
pass
class pd_df_bool(object):
pass
def f_gt(df_in, f_value):
return df_in > f_value
def f_lt(df_in, f_value):
return df_in < f_value
def protectedDiv(left, right):
try: return left / right
except ZeroDivisionError: return 0.0
def pd_add(left, right):
return left + right
def pd_subtract(left, right):
return left - right
def pd_multiply(left, right):
return left * right
def pd_divide(left, right):
return left / right
def pd_diff(df_in, _periods):
return df_in.diff(periods=abs(_periods))
def sma(df_in, periods):
return pd.rolling_mean(df_in, abs(periods))
def ewma(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.ewma(df_in, abs(periods), min_periods=abs(periods))
def hh(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_max(df_in, abs(periods), min_periods=abs(periods))
def ll(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_min(df_in, abs(periods), min_periods=abs(periods))
def pd_std(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_std(df_in, abs(periods), min_periods=abs(periods))
pset = gp.PrimitiveSetTyped('MAIN', [pd_df_float,
pd_df_float,
pd_df_float,
pd_df_float,
pd_df_float,
pd_df_bool,
pd_df_bool],
pd_df_bool)
pset.renameArguments(ARG0='Open')
pset.renameArguments(ARG1='High')
pset.renameArguments(ARG2='Low')
pset.renameArguments(ARG3='Close')
pset.renameArguments(ARG4='Volume')
# need to have pd_df_bool terminals for GP to work
pset.renameArguments(ARG5='Ones')
pset.renameArguments(ARG6='Zeros')
pset.addPrimitive(sma, [pd_df_float, int], pd_df_float, name="sma")
pset.addPrimitive(ewma, [pd_df_float, int], pd_df_float, name="ewma")
pset.addPrimitive(hh, [pd_df_float, int], pd_df_float, name="hh")
pset.addPrimitive(ll, [pd_df_float, int], pd_df_float, name="ll")
pset.addPrimitive(pd_std, [pd_df_float, int], pd_df_float, name="pd_std")
pset.addPrimitive(np.log, [pd_df_float], pd_df_float)
pset.addPrimitive(pd_diff, [pd_df_float, int], pd_df_float)
pset.addPrimitive(pd_add, [pd_df_float, pd_df_float], pd_df_float, name="pd_add")
pset.addPrimitive(pd_subtract, [pd_df_float, pd_df_float], pd_df_float, name="pd_sub")
pset.addPrimitive(pd_multiply, [pd_df_float, pd_df_float], pd_df_float, name="pd_mul")
pset.addPrimitive(pd_divide, [pd_df_float, pd_df_float], pd_df_float, name="pd_div")
pset.addPrimitive(operator.add, [int, int], int, name="add")
pset.addPrimitive(operator.sub, [int, int], int, name="sub")
#pset.addPrimitive(operator.mul, [int, int], int, name="mul")
pset.addPrimitive(protectedDiv, [int, int], int, name="div")
pset.addPrimitive(f_gt, [pd_df_float, float], pd_df_bool )
pset.addPrimitive(f_lt, [pd_df_float, float], pd_df_bool )
pset.addEphemeralConstant("short", lambda: random.randint(2,60), int)
pset.addEphemeralConstant("medium", lambda: random.randint(60,100), int)
pset.addEphemeralConstant("long", lambda: random.randint(100,200), int)
pset.addEphemeralConstant("xtralong", lambda: random.randint(200,20000), int)
pset.addEphemeralConstant("rand100", lambda: random.randint(0,100), int)
pset.addEphemeralConstant("randfloat", lambda: np.random.normal() / 100. , float)
pset.addPrimitive(operator.lt, [pd_df_float, pd_df_float], pd_df_bool)
pset.addPrimitive(operator.gt, [pd_df_float, pd_df_float], pd_df_bool)
pset.addPrimitive(np.bitwise_and, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_or, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_xor, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_not, [pd_df_bool], pd_df_bool)
pset.addPrimitive(operator.add, [float, float], float, name="f_add")
pset.addPrimitive(operator.sub, [float, float], float, name="f_sub")
pset.addPrimitive(protectedDiv, [float, float], float, name="f_div")
pset.addPrimitive(operator.mul, [float, float], float, name="f_mul")
#Better to pass this terminals as arguments (ARG5 and ARG6)
#pset.addTerminal(pd.TimeSeries(data=[1] * len(train), index=train.index, dtype=bool), pd_df_bool, name="ones")
#pset.addTerminal(pd.TimeSeries(data=[0] * len(train), index=train.index, dtype=bool), pd_df_bool, name="zeros")
pset.addTerminal(1.618, float)
pset.addTerminal(0.1618, float)
pset.addTerminal(0.01618, float)
pset.addTerminal(0.001618, float)
pset.addTerminal(-0.001618, float)
pset.addTerminal(-0.01618, float)
pset.addTerminal(-0.1618, float)
pset.addTerminal(-1.618, float)
pset.addTerminal(1, int)
for p in primes:
pset.addTerminal(p, int)
for f in np.arange(0,0.2,0.002):
pset.addTerminal(f, float)
pset.addTerminal(-f, float)
creator.create('FitnessMax', base.Fitness, weights=(1.0,))
creator.create('Individual', gp.PrimitiveTree, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register('expr', gp.genHalfAndHalf, pset=pset, min_=1, max_=6)
toolbox.register('individual', tools.initIterate, creator.Individual, toolbox.expr)
toolbox.register('population', tools.initRepeat, list, toolbox.individual)
toolbox.register('compile', gp.compile, pset=pset)
def evalFitness(individual, points):
func = toolbox.compile(expr=individual)
s = func(points.Open, points.High, points.Low, points.Close, points.Volume, points.Ones, points.Zeros)
# transform from bool to int
s = s*1
w = (s * points.Close.diff()) - np.abs(s.diff())*cost
w.dropna(inplace=True)
# W_win = w[w>0].sum()
# W_lose = abs(w[w<0].sum())
#
# profit_factor = protectedDiv(W_win, W_lose)
# return profit_factor ,
sharpe = w.mean() / w.std() * math.sqrt(600*255)
if np.isnan(sharpe) or np.isinf(sharpe):
|
return sharpe,
toolbox.register('evaluate', evalFitness, points=train)
toolbox.register('select', tools.selTournament, tournsize=3)
toolbox.register('mate', gp.cxOnePoint)
toolbox.register('expr_mut', gp.genFull, min_=0, max_=3)
toolbox.register('mutate', gp.mutUniform, expr=toolbox.expr_mut, pset=pset)
def plot(individual):
nodes, edges, labels = gp.graph(individual)
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
pos = nx.drawing.nx_agraph.graphviz_layout(g, prog="dot")
nx.draw_networkx_nodes(g, pos)
nx.draw_networkx_edges(g, pos)
nx.draw_networkx_labels(g, pos, labels)
plt.show()
if __name__ == '__main__':
#random.seed(10)
pop = toolbox.population(n=200)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register('avg', np.mean)
stats.register('min', np.min)
stats.register('max', np.max)
pop, log = algorithms.eaMuPlusLambda(pop, toolbox, 160, 160, 0.6, 0.1, 50, stats=stats, halloffame=hof)
# get the info of best solution
print("Best solution found...")
print(hof[0])
plot(hof[0])
f=toolbox.compile(hof[0])
# Check training results
s=f(train.Open, train.High, train.Low, train.Close, train.Volume, train.Ones, train.Zeros)
s=s*1
w = (s * train.Close.diff()) - np.abs(s.diff())*cost
W = w.cumsum()
df_plot = pd.DataFrame(index=train.index)
df_plot['GP Strategy'] = W
df_plot['IBEX'] = train.Close
#Normalize to 1 the start so we can compare plots.
df_plot['IBEX'] = df_plot['IBEX'] / df_plot['IBEX'][0]
df_plot.plot()
# Check testing results
s=f(test.Open, test.High, test.Low, test.Close, test.Volume, test.Ones, test.Zeros)
s=s*1
w = (s * test.Close.diff()) - np.abs(s.diff())*cost
W = w.cumsum()
df_plot = pd.DataFrame(index=test.index)
df_plot['GP Strategy'] = W
df_plot['IBEX'] = test.Close
#Normalize to 1 the start so we can compare plots.
df_plot['IBEX'] = df_plot['IBEX'] / df_plot['IBEX'][0]
df_plot.plot()
| sharpe = -99999 | conditional_block |
pandas_gp.py | # -*- coding: utf-8 -*-
"""
Standard GP Algorithm (Strongly typed)
Created on Sun Aug 21 10:40:14 2016
@author: jm
"""
import operator
import math
import random
import cPickle
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
from deap import gp
#from scoop import futures
#import multiprocessing
primes = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,
97,101,103,107,109,113,127,131,137,139,149,151,157,163,167,173,179,
181,191,193,197,199,211,223,227,229,233,239,241,251,257,263,269,271,
277,281,283,293,307,311,313,317,331,337,347,349,353,359,367,373,379,
383,389,397,401,409,419,421,431,433,439,443,449,457,461,463,467,479,
487,491,499] #,503,509,521,523,541,547,557,563,569,571,577,587,593,599,
# 601,607,613,617,619,631,641,643,647,653,659,661,673,677,683,691,701,
# 709,719,727,733,739,743,751,757,761,769,773,787,797,809,811,821,823,
# 827,829,839,853,857,859,863,877,881,883,887,907,911,919,929,937,941,
# 947,953,967,971,977,983,991,997,1009,1013,1019,1021,1031,1033,1039,
# 1049,1051,1061,1063,1069,1087,1091,1093,1097,1103,1109,1117,1123,
# 1129,1151,1153,1163,1171,1181,1187,1193,1201,1213,1217,1223,1229,
# 1231,1237,1249,1259,1277,1279,1283,1289,1291,1297,1301,1303,1307,
# 1319,1321,1327,1361,1367,1373,1381,1399,1409,1423,1427,1429,1433,
# 1439,1447,1451,1453,1459,1471,1481,1483,1487,1489,1493,1499,1511,
# 1523,1531,1543,1549,1553,1559,1567,1571,1579,1583,1597,1601,1607,
# 1609,1613,1619,1621,1627,1637,1657,1663,1667,1669,1693,1697,1699,
# 1709,1721,1723,1733,1741,1747,1753,1759,1777,1783,1787,1789,1801,
# 1811,1823,1831,1847,1861,1867,1871,1873,1877,1879,1889,1901,1907,
# 1913,1931,1933,1949,1951,1973,1979,1987,1993,1997,1999,2003,2011,
# 2017,2027,2029,2039,2053,2063,2069,2081,2083,2087,2089,2099,2111,
# 2113,2129,2131,2137,2141,2143,2153,2161,2179,2203,2207,2213,2221,
# 2237,2239,2243,2251,2267,2269,2273,2281,2287,2293,2297,2309,2311,
# 2333,2339,2341,2347,2351,2357,2371,2377,2381,2383,2389,2393,2399,
# 2411,2417,2423,2437,2441,2447,2459,2467,2473,2477,2503,2521,2531,
# 2539,2543,2549,2551,2557,2579,2591,2593,2609,2617,2621,2633,2647,
# 2657,2659,2663,2671,2677,2683,2687,2689,2693,2699,2707,2711,2713,
# 2719,2729,2731,2741,2749,2753,2767,2777,2789,2791,2797,2801,2803,
# 2819,2833,2837,2843,2851,2857,2861,2879,2887,2897,2903,2909,2917,
# 2927,2939,2953,2957,2963,2969,2971,2999,3001,3011,3019,3023,3037,
# 3041,3049,3061,3067,3079,3083,3089,3109,3119,3121,3137,3163,3167,
# 3169,3181,3187,3191,3203,3209,3217,3221,3229,3251,3253,3257,3259,
# 3271,3299,3301,3307,3313,3319,3323,3329,3331,3343,3347,3359,3361,
# 3371,3373,3389,3391,3407,3413,3433,3449,3457,3461,3463,3467,3469,
# 3491,3499,3511,3517,3527,3529,3533,3539,3541,3547,3557,3559,3571,
# 3581,3583,3593,3607,3613,3617,3623,3631,3637,3643,3659,3671,3673,
# 3677,3691,3697,3701,3709,3719,3727,3733,3739,3761,3767,3769,3779,
# 3793,3797,3803,3821,3823,3833,3847,3851,3853,3863,3877,3881,3889,
# 3907,3911,3917,3919,3923,3929,3931,3943,3947,3967,3989,4001,4003,
# 4007,4013,4019,4021,4027,4049,4051,4057,4073,4079,4091,4093,4099,
# 4111,4127,4129,4133,4139,4153,4157,4159,4177,4201,4211,4217,4219,
# 4229,4231]
# load the data
ibex = cPickle.load(open("ibex.pickle", "rb"))
#we need to have pd_df_bool terminals for gp to work
#better to have them as a column in the dataset
#then to use a terminal calling np.ones or np.zeros
#as we are not constrained by the size of the vector
ibex["Ones"] = True
ibex["Zeros"] = False
# Transaction costs - 10 Eur per contract
#cost = 30
cost = 50 # equiv (30/10000) * 10 points
# Split into train and test
train = ibex["2000":"2015"].copy()
test = ibex["2016":].copy()
# Functions and terminal for GP
class pd_df_float(object):
pass
class pd_df_bool(object):
pass
def f_gt(df_in, f_value):
return df_in > f_value
def f_lt(df_in, f_value):
return df_in < f_value
def protectedDiv(left, right):
try: return left / right
except ZeroDivisionError: return 0.0
def pd_add(left, right):
return left + right
def pd_subtract(left, right):
return left - right
def pd_multiply(left, right):
return left * right
def pd_divide(left, right):
return left / right
def pd_diff(df_in, _periods):
return df_in.diff(periods=abs(_periods))
def sma(df_in, periods):
return pd.rolling_mean(df_in, abs(periods))
def ewma(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.ewma(df_in, abs(periods), min_periods=abs(periods))
def hh(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_max(df_in, abs(periods), min_periods=abs(periods))
def ll(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_min(df_in, abs(periods), min_periods=abs(periods))
def pd_std(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_std(df_in, abs(periods), min_periods=abs(periods))
pset = gp.PrimitiveSetTyped('MAIN', [pd_df_float,
pd_df_float,
pd_df_float,
pd_df_float,
pd_df_float,
pd_df_bool,
pd_df_bool],
pd_df_bool)
pset.renameArguments(ARG0='Open')
pset.renameArguments(ARG1='High')
pset.renameArguments(ARG2='Low')
pset.renameArguments(ARG3='Close')
pset.renameArguments(ARG4='Volume')
# need to have pd_df_bool terminals for GP to work
pset.renameArguments(ARG5='Ones')
pset.renameArguments(ARG6='Zeros')
pset.addPrimitive(sma, [pd_df_float, int], pd_df_float, name="sma")
pset.addPrimitive(ewma, [pd_df_float, int], pd_df_float, name="ewma")
pset.addPrimitive(hh, [pd_df_float, int], pd_df_float, name="hh")
pset.addPrimitive(ll, [pd_df_float, int], pd_df_float, name="ll")
pset.addPrimitive(pd_std, [pd_df_float, int], pd_df_float, name="pd_std")
pset.addPrimitive(np.log, [pd_df_float], pd_df_float)
pset.addPrimitive(pd_diff, [pd_df_float, int], pd_df_float)
pset.addPrimitive(pd_add, [pd_df_float, pd_df_float], pd_df_float, name="pd_add")
pset.addPrimitive(pd_subtract, [pd_df_float, pd_df_float], pd_df_float, name="pd_sub")
pset.addPrimitive(pd_multiply, [pd_df_float, pd_df_float], pd_df_float, name="pd_mul")
pset.addPrimitive(pd_divide, [pd_df_float, pd_df_float], pd_df_float, name="pd_div")
pset.addPrimitive(operator.add, [int, int], int, name="add")
pset.addPrimitive(operator.sub, [int, int], int, name="sub")
#pset.addPrimitive(operator.mul, [int, int], int, name="mul")
pset.addPrimitive(protectedDiv, [int, int], int, name="div")
pset.addPrimitive(f_gt, [pd_df_float, float], pd_df_bool )
pset.addPrimitive(f_lt, [pd_df_float, float], pd_df_bool )
pset.addEphemeralConstant("short", lambda: random.randint(2,60), int)
pset.addEphemeralConstant("medium", lambda: random.randint(60,100), int)
pset.addEphemeralConstant("long", lambda: random.randint(100,200), int)
pset.addEphemeralConstant("xtralong", lambda: random.randint(200,20000), int)
pset.addEphemeralConstant("rand100", lambda: random.randint(0,100), int)
pset.addEphemeralConstant("randfloat", lambda: np.random.normal() / 100. , float)
pset.addPrimitive(operator.lt, [pd_df_float, pd_df_float], pd_df_bool)
pset.addPrimitive(operator.gt, [pd_df_float, pd_df_float], pd_df_bool)
pset.addPrimitive(np.bitwise_and, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_or, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_xor, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_not, [pd_df_bool], pd_df_bool)
pset.addPrimitive(operator.add, [float, float], float, name="f_add")
pset.addPrimitive(operator.sub, [float, float], float, name="f_sub")
pset.addPrimitive(protectedDiv, [float, float], float, name="f_div")
pset.addPrimitive(operator.mul, [float, float], float, name="f_mul")
#Better to pass this terminals as arguments (ARG5 and ARG6)
#pset.addTerminal(pd.TimeSeries(data=[1] * len(train), index=train.index, dtype=bool), pd_df_bool, name="ones")
#pset.addTerminal(pd.TimeSeries(data=[0] * len(train), index=train.index, dtype=bool), pd_df_bool, name="zeros")
pset.addTerminal(1.618, float)
pset.addTerminal(0.1618, float)
pset.addTerminal(0.01618, float)
pset.addTerminal(0.001618, float)
pset.addTerminal(-0.001618, float)
pset.addTerminal(-0.01618, float)
pset.addTerminal(-0.1618, float)
pset.addTerminal(-1.618, float)
pset.addTerminal(1, int)
for p in primes:
pset.addTerminal(p, int)
for f in np.arange(0,0.2,0.002):
pset.addTerminal(f, float)
pset.addTerminal(-f, float)
creator.create('FitnessMax', base.Fitness, weights=(1.0,))
creator.create('Individual', gp.PrimitiveTree, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register('expr', gp.genHalfAndHalf, pset=pset, min_=1, max_=6)
toolbox.register('individual', tools.initIterate, creator.Individual, toolbox.expr)
toolbox.register('population', tools.initRepeat, list, toolbox.individual)
toolbox.register('compile', gp.compile, pset=pset)
def evalFitness(individual, points):
func = toolbox.compile(expr=individual)
s = func(points.Open, points.High, points.Low, points.Close, points.Volume, points.Ones, points.Zeros)
# transform from bool to int
s = s*1
w = (s * points.Close.diff()) - np.abs(s.diff())*cost
w.dropna(inplace=True)
# W_win = w[w>0].sum()
# W_lose = abs(w[w<0].sum())
#
# profit_factor = protectedDiv(W_win, W_lose)
# return profit_factor ,
sharpe = w.mean() / w.std() * math.sqrt(600*255)
if np.isnan(sharpe) or np.isinf(sharpe):
sharpe = -99999
return sharpe,
toolbox.register('evaluate', evalFitness, points=train)
toolbox.register('select', tools.selTournament, tournsize=3)
toolbox.register('mate', gp.cxOnePoint)
toolbox.register('expr_mut', gp.genFull, min_=0, max_=3)
toolbox.register('mutate', gp.mutUniform, expr=toolbox.expr_mut, pset=pset)
def plot(individual):
|
if __name__ == '__main__':
#random.seed(10)
pop = toolbox.population(n=200)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register('avg', np.mean)
stats.register('min', np.min)
stats.register('max', np.max)
pop, log = algorithms.eaMuPlusLambda(pop, toolbox, 160, 160, 0.6, 0.1, 50, stats=stats, halloffame=hof)
# get the info of best solution
print("Best solution found...")
print(hof[0])
plot(hof[0])
f=toolbox.compile(hof[0])
# Check training results
s=f(train.Open, train.High, train.Low, train.Close, train.Volume, train.Ones, train.Zeros)
s=s*1
w = (s * train.Close.diff()) - np.abs(s.diff())*cost
W = w.cumsum()
df_plot = pd.DataFrame(index=train.index)
df_plot['GP Strategy'] = W
df_plot['IBEX'] = train.Close
#Normalize to 1 the start so we can compare plots.
df_plot['IBEX'] = df_plot['IBEX'] / df_plot['IBEX'][0]
df_plot.plot()
# Check testing results
s=f(test.Open, test.High, test.Low, test.Close, test.Volume, test.Ones, test.Zeros)
s=s*1
w = (s * test.Close.diff()) - np.abs(s.diff())*cost
W = w.cumsum()
df_plot = pd.DataFrame(index=test.index)
df_plot['GP Strategy'] = W
df_plot['IBEX'] = test.Close
#Normalize to 1 the start so we can compare plots.
df_plot['IBEX'] = df_plot['IBEX'] / df_plot['IBEX'][0]
df_plot.plot()
| nodes, edges, labels = gp.graph(individual)
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
pos = nx.drawing.nx_agraph.graphviz_layout(g, prog="dot")
nx.draw_networkx_nodes(g, pos)
nx.draw_networkx_edges(g, pos)
nx.draw_networkx_labels(g, pos, labels)
plt.show() | identifier_body |
pandas_gp.py | # -*- coding: utf-8 -*-
"""
Standard GP Algorithm (Strongly typed)
Created on Sun Aug 21 10:40:14 2016
@author: jm
"""
import operator
import math
import random
import cPickle
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
from deap import gp
#from scoop import futures
#import multiprocessing
primes = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,
97,101,103,107,109,113,127,131,137,139,149,151,157,163,167,173,179,
181,191,193,197,199,211,223,227,229,233,239,241,251,257,263,269,271,
277,281,283,293,307,311,313,317,331,337,347,349,353,359,367,373,379,
383,389,397,401,409,419,421,431,433,439,443,449,457,461,463,467,479,
487,491,499] #,503,509,521,523,541,547,557,563,569,571,577,587,593,599,
# 601,607,613,617,619,631,641,643,647,653,659,661,673,677,683,691,701,
# 709,719,727,733,739,743,751,757,761,769,773,787,797,809,811,821,823,
# 827,829,839,853,857,859,863,877,881,883,887,907,911,919,929,937,941,
# 947,953,967,971,977,983,991,997,1009,1013,1019,1021,1031,1033,1039,
# 1049,1051,1061,1063,1069,1087,1091,1093,1097,1103,1109,1117,1123,
# 1129,1151,1153,1163,1171,1181,1187,1193,1201,1213,1217,1223,1229,
# 1231,1237,1249,1259,1277,1279,1283,1289,1291,1297,1301,1303,1307,
# 1319,1321,1327,1361,1367,1373,1381,1399,1409,1423,1427,1429,1433,
# 1439,1447,1451,1453,1459,1471,1481,1483,1487,1489,1493,1499,1511,
# 1523,1531,1543,1549,1553,1559,1567,1571,1579,1583,1597,1601,1607,
# 1609,1613,1619,1621,1627,1637,1657,1663,1667,1669,1693,1697,1699,
# 1709,1721,1723,1733,1741,1747,1753,1759,1777,1783,1787,1789,1801,
# 1811,1823,1831,1847,1861,1867,1871,1873,1877,1879,1889,1901,1907,
# 1913,1931,1933,1949,1951,1973,1979,1987,1993,1997,1999,2003,2011,
# 2017,2027,2029,2039,2053,2063,2069,2081,2083,2087,2089,2099,2111,
# 2113,2129,2131,2137,2141,2143,2153,2161,2179,2203,2207,2213,2221,
# 2237,2239,2243,2251,2267,2269,2273,2281,2287,2293,2297,2309,2311,
# 2333,2339,2341,2347,2351,2357,2371,2377,2381,2383,2389,2393,2399,
# 2411,2417,2423,2437,2441,2447,2459,2467,2473,2477,2503,2521,2531,
# 2539,2543,2549,2551,2557,2579,2591,2593,2609,2617,2621,2633,2647,
# 2657,2659,2663,2671,2677,2683,2687,2689,2693,2699,2707,2711,2713,
# 2719,2729,2731,2741,2749,2753,2767,2777,2789,2791,2797,2801,2803,
# 2819,2833,2837,2843,2851,2857,2861,2879,2887,2897,2903,2909,2917,
# 2927,2939,2953,2957,2963,2969,2971,2999,3001,3011,3019,3023,3037,
# 3041,3049,3061,3067,3079,3083,3089,3109,3119,3121,3137,3163,3167,
# 3169,3181,3187,3191,3203,3209,3217,3221,3229,3251,3253,3257,3259,
# 3271,3299,3301,3307,3313,3319,3323,3329,3331,3343,3347,3359,3361,
# 3371,3373,3389,3391,3407,3413,3433,3449,3457,3461,3463,3467,3469,
# 3491,3499,3511,3517,3527,3529,3533,3539,3541,3547,3557,3559,3571,
# 3581,3583,3593,3607,3613,3617,3623,3631,3637,3643,3659,3671,3673,
# 3677,3691,3697,3701,3709,3719,3727,3733,3739,3761,3767,3769,3779,
# 3793,3797,3803,3821,3823,3833,3847,3851,3853,3863,3877,3881,3889,
# 3907,3911,3917,3919,3923,3929,3931,3943,3947,3967,3989,4001,4003,
# 4007,4013,4019,4021,4027,4049,4051,4057,4073,4079,4091,4093,4099,
# 4111,4127,4129,4133,4139,4153,4157,4159,4177,4201,4211,4217,4219,
# 4229,4231]
# load the data
ibex = cPickle.load(open("ibex.pickle", "rb"))
#we need to have pd_df_bool terminals for gp to work
#better to have them as a column in the dataset
#then to use a terminal calling np.ones or np.zeros
#as we are not constrained by the size of the vector
ibex["Ones"] = True
ibex["Zeros"] = False
# Transaction costs - 10 Eur per contract
#cost = 30
cost = 50 # equiv (30/10000) * 10 points
# Split into train and test
train = ibex["2000":"2015"].copy()
test = ibex["2016":].copy()
# Functions and terminal for GP
class pd_df_float(object):
pass
class pd_df_bool(object):
pass
def f_gt(df_in, f_value):
return df_in > f_value
def f_lt(df_in, f_value):
return df_in < f_value
def protectedDiv(left, right):
try: return left / right
except ZeroDivisionError: return 0.0
def pd_add(left, right):
return left + right
def | (left, right):
return left - right
def pd_multiply(left, right):
return left * right
def pd_divide(left, right):
return left / right
def pd_diff(df_in, _periods):
return df_in.diff(periods=abs(_periods))
def sma(df_in, periods):
return pd.rolling_mean(df_in, abs(periods))
def ewma(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.ewma(df_in, abs(periods), min_periods=abs(periods))
def hh(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_max(df_in, abs(periods), min_periods=abs(periods))
def ll(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_min(df_in, abs(periods), min_periods=abs(periods))
def pd_std(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_std(df_in, abs(periods), min_periods=abs(periods))
pset = gp.PrimitiveSetTyped('MAIN', [pd_df_float,
pd_df_float,
pd_df_float,
pd_df_float,
pd_df_float,
pd_df_bool,
pd_df_bool],
pd_df_bool)
pset.renameArguments(ARG0='Open')
pset.renameArguments(ARG1='High')
pset.renameArguments(ARG2='Low')
pset.renameArguments(ARG3='Close')
pset.renameArguments(ARG4='Volume')
# need to have pd_df_bool terminals for GP to work
pset.renameArguments(ARG5='Ones')
pset.renameArguments(ARG6='Zeros')
pset.addPrimitive(sma, [pd_df_float, int], pd_df_float, name="sma")
pset.addPrimitive(ewma, [pd_df_float, int], pd_df_float, name="ewma")
pset.addPrimitive(hh, [pd_df_float, int], pd_df_float, name="hh")
pset.addPrimitive(ll, [pd_df_float, int], pd_df_float, name="ll")
pset.addPrimitive(pd_std, [pd_df_float, int], pd_df_float, name="pd_std")
pset.addPrimitive(np.log, [pd_df_float], pd_df_float)
pset.addPrimitive(pd_diff, [pd_df_float, int], pd_df_float)
pset.addPrimitive(pd_add, [pd_df_float, pd_df_float], pd_df_float, name="pd_add")
pset.addPrimitive(pd_subtract, [pd_df_float, pd_df_float], pd_df_float, name="pd_sub")
pset.addPrimitive(pd_multiply, [pd_df_float, pd_df_float], pd_df_float, name="pd_mul")
pset.addPrimitive(pd_divide, [pd_df_float, pd_df_float], pd_df_float, name="pd_div")
pset.addPrimitive(operator.add, [int, int], int, name="add")
pset.addPrimitive(operator.sub, [int, int], int, name="sub")
#pset.addPrimitive(operator.mul, [int, int], int, name="mul")
pset.addPrimitive(protectedDiv, [int, int], int, name="div")
pset.addPrimitive(f_gt, [pd_df_float, float], pd_df_bool )
pset.addPrimitive(f_lt, [pd_df_float, float], pd_df_bool )
pset.addEphemeralConstant("short", lambda: random.randint(2,60), int)
pset.addEphemeralConstant("medium", lambda: random.randint(60,100), int)
pset.addEphemeralConstant("long", lambda: random.randint(100,200), int)
pset.addEphemeralConstant("xtralong", lambda: random.randint(200,20000), int)
pset.addEphemeralConstant("rand100", lambda: random.randint(0,100), int)
pset.addEphemeralConstant("randfloat", lambda: np.random.normal() / 100. , float)
pset.addPrimitive(operator.lt, [pd_df_float, pd_df_float], pd_df_bool)
pset.addPrimitive(operator.gt, [pd_df_float, pd_df_float], pd_df_bool)
pset.addPrimitive(np.bitwise_and, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_or, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_xor, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_not, [pd_df_bool], pd_df_bool)
pset.addPrimitive(operator.add, [float, float], float, name="f_add")
pset.addPrimitive(operator.sub, [float, float], float, name="f_sub")
pset.addPrimitive(protectedDiv, [float, float], float, name="f_div")
pset.addPrimitive(operator.mul, [float, float], float, name="f_mul")
#Better to pass this terminals as arguments (ARG5 and ARG6)
#pset.addTerminal(pd.TimeSeries(data=[1] * len(train), index=train.index, dtype=bool), pd_df_bool, name="ones")
#pset.addTerminal(pd.TimeSeries(data=[0] * len(train), index=train.index, dtype=bool), pd_df_bool, name="zeros")
pset.addTerminal(1.618, float)
pset.addTerminal(0.1618, float)
pset.addTerminal(0.01618, float)
pset.addTerminal(0.001618, float)
pset.addTerminal(-0.001618, float)
pset.addTerminal(-0.01618, float)
pset.addTerminal(-0.1618, float)
pset.addTerminal(-1.618, float)
pset.addTerminal(1, int)
for p in primes:
pset.addTerminal(p, int)
for f in np.arange(0,0.2,0.002):
pset.addTerminal(f, float)
pset.addTerminal(-f, float)
creator.create('FitnessMax', base.Fitness, weights=(1.0,))
creator.create('Individual', gp.PrimitiveTree, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register('expr', gp.genHalfAndHalf, pset=pset, min_=1, max_=6)
toolbox.register('individual', tools.initIterate, creator.Individual, toolbox.expr)
toolbox.register('population', tools.initRepeat, list, toolbox.individual)
toolbox.register('compile', gp.compile, pset=pset)
def evalFitness(individual, points):
func = toolbox.compile(expr=individual)
s = func(points.Open, points.High, points.Low, points.Close, points.Volume, points.Ones, points.Zeros)
# transform from bool to int
s = s*1
w = (s * points.Close.diff()) - np.abs(s.diff())*cost
w.dropna(inplace=True)
# W_win = w[w>0].sum()
# W_lose = abs(w[w<0].sum())
#
# profit_factor = protectedDiv(W_win, W_lose)
# return profit_factor ,
sharpe = w.mean() / w.std() * math.sqrt(600*255)
if np.isnan(sharpe) or np.isinf(sharpe):
sharpe = -99999
return sharpe,
toolbox.register('evaluate', evalFitness, points=train)
toolbox.register('select', tools.selTournament, tournsize=3)
toolbox.register('mate', gp.cxOnePoint)
toolbox.register('expr_mut', gp.genFull, min_=0, max_=3)
toolbox.register('mutate', gp.mutUniform, expr=toolbox.expr_mut, pset=pset)
def plot(individual):
nodes, edges, labels = gp.graph(individual)
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
pos = nx.drawing.nx_agraph.graphviz_layout(g, prog="dot")
nx.draw_networkx_nodes(g, pos)
nx.draw_networkx_edges(g, pos)
nx.draw_networkx_labels(g, pos, labels)
plt.show()
if __name__ == '__main__':
#random.seed(10)
pop = toolbox.population(n=200)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register('avg', np.mean)
stats.register('min', np.min)
stats.register('max', np.max)
pop, log = algorithms.eaMuPlusLambda(pop, toolbox, 160, 160, 0.6, 0.1, 50, stats=stats, halloffame=hof)
# get the info of best solution
print("Best solution found...")
print(hof[0])
plot(hof[0])
f=toolbox.compile(hof[0])
# Check training results
s=f(train.Open, train.High, train.Low, train.Close, train.Volume, train.Ones, train.Zeros)
s=s*1
w = (s * train.Close.diff()) - np.abs(s.diff())*cost
W = w.cumsum()
df_plot = pd.DataFrame(index=train.index)
df_plot['GP Strategy'] = W
df_plot['IBEX'] = train.Close
#Normalize to 1 the start so we can compare plots.
df_plot['IBEX'] = df_plot['IBEX'] / df_plot['IBEX'][0]
df_plot.plot()
# Check testing results
s=f(test.Open, test.High, test.Low, test.Close, test.Volume, test.Ones, test.Zeros)
s=s*1
w = (s * test.Close.diff()) - np.abs(s.diff())*cost
W = w.cumsum()
df_plot = pd.DataFrame(index=test.index)
df_plot['GP Strategy'] = W
df_plot['IBEX'] = test.Close
#Normalize to 1 the start so we can compare plots.
df_plot['IBEX'] = df_plot['IBEX'] / df_plot['IBEX'][0]
df_plot.plot()
| pd_subtract | identifier_name |
pandas_gp.py | # -*- coding: utf-8 -*-
"""
Standard GP Algorithm (Strongly typed)
Created on Sun Aug 21 10:40:14 2016
@author: jm
"""
import operator
import math
import random
import cPickle
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
from deap import gp
#from scoop import futures
#import multiprocessing
primes = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,
97,101,103,107,109,113,127,131,137,139,149,151,157,163,167,173,179,
181,191,193,197,199,211,223,227,229,233,239,241,251,257,263,269,271,
277,281,283,293,307,311,313,317,331,337,347,349,353,359,367,373,379,
383,389,397,401,409,419,421,431,433,439,443,449,457,461,463,467,479,
487,491,499] #,503,509,521,523,541,547,557,563,569,571,577,587,593,599,
# 601,607,613,617,619,631,641,643,647,653,659,661,673,677,683,691,701,
# 709,719,727,733,739,743,751,757,761,769,773,787,797,809,811,821,823,
# 827,829,839,853,857,859,863,877,881,883,887,907,911,919,929,937,941,
# 947,953,967,971,977,983,991,997,1009,1013,1019,1021,1031,1033,1039,
# 1049,1051,1061,1063,1069,1087,1091,1093,1097,1103,1109,1117,1123,
# 1129,1151,1153,1163,1171,1181,1187,1193,1201,1213,1217,1223,1229,
# 1231,1237,1249,1259,1277,1279,1283,1289,1291,1297,1301,1303,1307,
# 1319,1321,1327,1361,1367,1373,1381,1399,1409,1423,1427,1429,1433,
# 1439,1447,1451,1453,1459,1471,1481,1483,1487,1489,1493,1499,1511,
# 1523,1531,1543,1549,1553,1559,1567,1571,1579,1583,1597,1601,1607,
# 1609,1613,1619,1621,1627,1637,1657,1663,1667,1669,1693,1697,1699,
# 1709,1721,1723,1733,1741,1747,1753,1759,1777,1783,1787,1789,1801,
# 1811,1823,1831,1847,1861,1867,1871,1873,1877,1879,1889,1901,1907,
# 1913,1931,1933,1949,1951,1973,1979,1987,1993,1997,1999,2003,2011,
# 2017,2027,2029,2039,2053,2063,2069,2081,2083,2087,2089,2099,2111,
# 2113,2129,2131,2137,2141,2143,2153,2161,2179,2203,2207,2213,2221,
# 2237,2239,2243,2251,2267,2269,2273,2281,2287,2293,2297,2309,2311,
# 2333,2339,2341,2347,2351,2357,2371,2377,2381,2383,2389,2393,2399,
# 2411,2417,2423,2437,2441,2447,2459,2467,2473,2477,2503,2521,2531,
# 2539,2543,2549,2551,2557,2579,2591,2593,2609,2617,2621,2633,2647,
# 2657,2659,2663,2671,2677,2683,2687,2689,2693,2699,2707,2711,2713,
# 2719,2729,2731,2741,2749,2753,2767,2777,2789,2791,2797,2801,2803,
# 2819,2833,2837,2843,2851,2857,2861,2879,2887,2897,2903,2909,2917,
# 2927,2939,2953,2957,2963,2969,2971,2999,3001,3011,3019,3023,3037,
# 3041,3049,3061,3067,3079,3083,3089,3109,3119,3121,3137,3163,3167,
# 3169,3181,3187,3191,3203,3209,3217,3221,3229,3251,3253,3257,3259,
# 3271,3299,3301,3307,3313,3319,3323,3329,3331,3343,3347,3359,3361,
# 3371,3373,3389,3391,3407,3413,3433,3449,3457,3461,3463,3467,3469,
# 3491,3499,3511,3517,3527,3529,3533,3539,3541,3547,3557,3559,3571,
# 3581,3583,3593,3607,3613,3617,3623,3631,3637,3643,3659,3671,3673,
# 3677,3691,3697,3701,3709,3719,3727,3733,3739,3761,3767,3769,3779,
# 3793,3797,3803,3821,3823,3833,3847,3851,3853,3863,3877,3881,3889,
# 3907,3911,3917,3919,3923,3929,3931,3943,3947,3967,3989,4001,4003,
# 4007,4013,4019,4021,4027,4049,4051,4057,4073,4079,4091,4093,4099,
# 4111,4127,4129,4133,4139,4153,4157,4159,4177,4201,4211,4217,4219,
# 4229,4231]
# load the data
ibex = cPickle.load(open("ibex.pickle", "rb"))
#we need to have pd_df_bool terminals for gp to work
#better to have them as a column in the dataset
#then to use a terminal calling np.ones or np.zeros
#as we are not constrained by the size of the vector
ibex["Ones"] = True
ibex["Zeros"] = False
# Transaction costs - 10 Eur per contract
#cost = 30
cost = 50 # equiv (30/10000) * 10 points
# Split into train and test
train = ibex["2000":"2015"].copy()
test = ibex["2016":].copy()
# Functions and terminal for GP
class pd_df_float(object):
pass
class pd_df_bool(object):
pass
def f_gt(df_in, f_value):
return df_in > f_value
def f_lt(df_in, f_value):
return df_in < f_value
def protectedDiv(left, right):
try: return left / right
except ZeroDivisionError: return 0.0
def pd_add(left, right):
return left + right
def pd_subtract(left, right):
return left - right
def pd_multiply(left, right):
return left * right
def pd_divide(left, right):
return left / right
def pd_diff(df_in, _periods):
return df_in.diff(periods=abs(_periods))
def sma(df_in, periods):
return pd.rolling_mean(df_in, abs(periods))
def ewma(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.ewma(df_in, abs(periods), min_periods=abs(periods))
def hh(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_max(df_in, abs(periods), min_periods=abs(periods))
def ll(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_min(df_in, abs(periods), min_periods=abs(periods))
def pd_std(df_in, periods):
if abs(periods) < 2:
return df_in
else:
return pd.rolling_std(df_in, abs(periods), min_periods=abs(periods))
| pd_df_float,
pd_df_float,
pd_df_float,
pd_df_float,
pd_df_bool,
pd_df_bool],
pd_df_bool)
pset.renameArguments(ARG0='Open')
pset.renameArguments(ARG1='High')
pset.renameArguments(ARG2='Low')
pset.renameArguments(ARG3='Close')
pset.renameArguments(ARG4='Volume')
# need to have pd_df_bool terminals for GP to work
pset.renameArguments(ARG5='Ones')
pset.renameArguments(ARG6='Zeros')
pset.addPrimitive(sma, [pd_df_float, int], pd_df_float, name="sma")
pset.addPrimitive(ewma, [pd_df_float, int], pd_df_float, name="ewma")
pset.addPrimitive(hh, [pd_df_float, int], pd_df_float, name="hh")
pset.addPrimitive(ll, [pd_df_float, int], pd_df_float, name="ll")
pset.addPrimitive(pd_std, [pd_df_float, int], pd_df_float, name="pd_std")
pset.addPrimitive(np.log, [pd_df_float], pd_df_float)
pset.addPrimitive(pd_diff, [pd_df_float, int], pd_df_float)
pset.addPrimitive(pd_add, [pd_df_float, pd_df_float], pd_df_float, name="pd_add")
pset.addPrimitive(pd_subtract, [pd_df_float, pd_df_float], pd_df_float, name="pd_sub")
pset.addPrimitive(pd_multiply, [pd_df_float, pd_df_float], pd_df_float, name="pd_mul")
pset.addPrimitive(pd_divide, [pd_df_float, pd_df_float], pd_df_float, name="pd_div")
pset.addPrimitive(operator.add, [int, int], int, name="add")
pset.addPrimitive(operator.sub, [int, int], int, name="sub")
#pset.addPrimitive(operator.mul, [int, int], int, name="mul")
pset.addPrimitive(protectedDiv, [int, int], int, name="div")
pset.addPrimitive(f_gt, [pd_df_float, float], pd_df_bool )
pset.addPrimitive(f_lt, [pd_df_float, float], pd_df_bool )
pset.addEphemeralConstant("short", lambda: random.randint(2,60), int)
pset.addEphemeralConstant("medium", lambda: random.randint(60,100), int)
pset.addEphemeralConstant("long", lambda: random.randint(100,200), int)
pset.addEphemeralConstant("xtralong", lambda: random.randint(200,20000), int)
pset.addEphemeralConstant("rand100", lambda: random.randint(0,100), int)
pset.addEphemeralConstant("randfloat", lambda: np.random.normal() / 100. , float)
pset.addPrimitive(operator.lt, [pd_df_float, pd_df_float], pd_df_bool)
pset.addPrimitive(operator.gt, [pd_df_float, pd_df_float], pd_df_bool)
pset.addPrimitive(np.bitwise_and, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_or, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_xor, [pd_df_bool, pd_df_bool], pd_df_bool)
pset.addPrimitive(np.bitwise_not, [pd_df_bool], pd_df_bool)
pset.addPrimitive(operator.add, [float, float], float, name="f_add")
pset.addPrimitive(operator.sub, [float, float], float, name="f_sub")
pset.addPrimitive(protectedDiv, [float, float], float, name="f_div")
pset.addPrimitive(operator.mul, [float, float], float, name="f_mul")
#Better to pass this terminals as arguments (ARG5 and ARG6)
#pset.addTerminal(pd.TimeSeries(data=[1] * len(train), index=train.index, dtype=bool), pd_df_bool, name="ones")
#pset.addTerminal(pd.TimeSeries(data=[0] * len(train), index=train.index, dtype=bool), pd_df_bool, name="zeros")
pset.addTerminal(1.618, float)
pset.addTerminal(0.1618, float)
pset.addTerminal(0.01618, float)
pset.addTerminal(0.001618, float)
pset.addTerminal(-0.001618, float)
pset.addTerminal(-0.01618, float)
pset.addTerminal(-0.1618, float)
pset.addTerminal(-1.618, float)
pset.addTerminal(1, int)
for p in primes:
pset.addTerminal(p, int)
for f in np.arange(0,0.2,0.002):
pset.addTerminal(f, float)
pset.addTerminal(-f, float)
creator.create('FitnessMax', base.Fitness, weights=(1.0,))
creator.create('Individual', gp.PrimitiveTree, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register('expr', gp.genHalfAndHalf, pset=pset, min_=1, max_=6)
toolbox.register('individual', tools.initIterate, creator.Individual, toolbox.expr)
toolbox.register('population', tools.initRepeat, list, toolbox.individual)
toolbox.register('compile', gp.compile, pset=pset)
def evalFitness(individual, points):
func = toolbox.compile(expr=individual)
s = func(points.Open, points.High, points.Low, points.Close, points.Volume, points.Ones, points.Zeros)
# transform from bool to int
s = s*1
w = (s * points.Close.diff()) - np.abs(s.diff())*cost
w.dropna(inplace=True)
# W_win = w[w>0].sum()
# W_lose = abs(w[w<0].sum())
#
# profit_factor = protectedDiv(W_win, W_lose)
# return profit_factor ,
sharpe = w.mean() / w.std() * math.sqrt(600*255)
if np.isnan(sharpe) or np.isinf(sharpe):
sharpe = -99999
return sharpe,
toolbox.register('evaluate', evalFitness, points=train)
toolbox.register('select', tools.selTournament, tournsize=3)
toolbox.register('mate', gp.cxOnePoint)
toolbox.register('expr_mut', gp.genFull, min_=0, max_=3)
toolbox.register('mutate', gp.mutUniform, expr=toolbox.expr_mut, pset=pset)
def plot(individual):
nodes, edges, labels = gp.graph(individual)
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
pos = nx.drawing.nx_agraph.graphviz_layout(g, prog="dot")
nx.draw_networkx_nodes(g, pos)
nx.draw_networkx_edges(g, pos)
nx.draw_networkx_labels(g, pos, labels)
plt.show()
if __name__ == '__main__':
#random.seed(10)
pop = toolbox.population(n=200)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register('avg', np.mean)
stats.register('min', np.min)
stats.register('max', np.max)
pop, log = algorithms.eaMuPlusLambda(pop, toolbox, 160, 160, 0.6, 0.1, 50, stats=stats, halloffame=hof)
# get the info of best solution
print("Best solution found...")
print(hof[0])
plot(hof[0])
f=toolbox.compile(hof[0])
# Check training results
s=f(train.Open, train.High, train.Low, train.Close, train.Volume, train.Ones, train.Zeros)
s=s*1
w = (s * train.Close.diff()) - np.abs(s.diff())*cost
W = w.cumsum()
df_plot = pd.DataFrame(index=train.index)
df_plot['GP Strategy'] = W
df_plot['IBEX'] = train.Close
#Normalize to 1 the start so we can compare plots.
df_plot['IBEX'] = df_plot['IBEX'] / df_plot['IBEX'][0]
df_plot.plot()
# Check testing results
s=f(test.Open, test.High, test.Low, test.Close, test.Volume, test.Ones, test.Zeros)
s=s*1
w = (s * test.Close.diff()) - np.abs(s.diff())*cost
W = w.cumsum()
df_plot = pd.DataFrame(index=test.index)
df_plot['GP Strategy'] = W
df_plot['IBEX'] = test.Close
#Normalize to 1 the start so we can compare plots.
df_plot['IBEX'] = df_plot['IBEX'] / df_plot['IBEX'][0]
df_plot.plot() | pset = gp.PrimitiveSetTyped('MAIN', [pd_df_float, | random_line_split |
gke.go | /**
* Copyright (c) 2019-present Future Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package operator
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"time"
set "github.com/deckarep/golang-set"
"github.com/future-architect/gcp-instance-scheduler/model"
"github.com/hashicorp/go-multierror"
"golang.org/x/net/context"
"google.golang.org/api/compute/v1"
"google.golang.org/api/container/v1"
)
type GKENodePoolCall struct {
targetLabel string
projectID string
error error
s *compute.Service
ctx context.Context
targetLabelValue string
}
func GKENodePool(ctx context.Context, projectID string) *GKENodePoolCall {
s, err := compute.NewService(ctx)
if err != nil {
return &GKENodePoolCall{error: err}
}
// get all templates list
return &GKENodePoolCall{
s: s,
projectID: projectID,
ctx: ctx,
}
}
func (r *GKENodePoolCall) Filter(labelName, value string) *GKENodePoolCall {
if r.error != nil {
return r
}
r.targetLabel = labelName
r.targetLabelValue = value
return r
}
func (r *GKENodePoolCall) Resize(size int64) (*model.Report, error) {
if r.error != nil {
return nil, r.error
}
// get all instance group mangers list
managerList, err := compute.NewInstanceGroupManagersService(r.s).AggregatedList(r.projectID).Do()
if err != nil {
return nil, err
}
// add instance group name of cluster node pool to Set
gkeNodePoolInstanceGroupSet, err := r.getGKEInstanceGroup()
if err != nil {
return nil, err
}
fmt.Println("gkeNodePoolInstanceGroupSet:", gkeNodePoolInstanceGroupSet.ToSlice())
var res = r.error
var alreadyRes []string
var doneRes []string
for _, manager := range valuesIG(managerList.Items) {
fmt.Println("manager.InstanceTemplate:", manager.InstanceTemplate)
fmt.Println("manager.Name:", manager.Name)
// Check GKE NodePool InstanceGroup
if gkeNodePoolInstanceGroupSet.Contains(manager.Name) {
if !manager.Status.IsStable {
continue
}
if manager.TargetSize == size {
alreadyRes = append(alreadyRes, manager.Name)
continue
}
// get manager zone name
zoneUrlElements := strings.Split(manager.Zone, "/")
zone := zoneUrlElements[len(zoneUrlElements)-1]
ms := compute.NewInstanceGroupManagersService(r.s)
if _, err := ms.Resize(r.projectID, zone, manager.Name, size).Do(); err != nil {
res = multierror.Append(res, err)
continue
}
doneRes = append(doneRes, manager.Name)
}
time.Sleep(CallInterval)
}
return &model.Report{
InstanceType: model.GKENodePool,
Dones: doneRes,
Alreadies: alreadyRes,
}, res
}
func (r *GKENodePoolCall) | () (*model.Report, error) {
if r.error != nil {
return nil, r.error
}
managerList, err := compute.NewInstanceGroupManagersService(r.s).AggregatedList(r.projectID).Do()
if err != nil {
return nil, err
}
// add instance group name of cluster node pool to Set
gkeNodePoolInstanceGroupSet, err := r.getGKEInstanceGroup()
if err != nil {
return nil, err
}
sizeMap, err := GetOriginalNodePoolSize(r.ctx, r.projectID, r.targetLabel, r.targetLabelValue)
if err != nil {
return nil, err
}
var res = r.error
var doneRes []string
var alreadyRes []string
for _, manager := range valuesIG(managerList.Items) {
// check instance group of gke node pool
if gkeNodePoolInstanceGroupSet.Contains(manager.Name) {
if !manager.Status.IsStable {
continue
}
split := strings.Split(manager.InstanceGroup, "/")
instanceGroupName := split[len(split)-1]
originalSize := sizeMap[instanceGroupName]
if manager.TargetSize == originalSize {
alreadyRes = append(alreadyRes, manager.Name)
continue
}
// get manager zone name
zoneUrlElements := strings.Split(manager.Zone, "/")
zone := zoneUrlElements[len(zoneUrlElements)-1] // ex) us-central1-a
ms := compute.NewInstanceGroupManagersService(r.s)
if _, err := ms.Resize(r.projectID, zone, manager.Name, originalSize).Do(); err != nil {
res = multierror.Append(res, err)
continue
}
doneRes = append(doneRes, manager.Name)
}
time.Sleep(CallInterval)
}
return &model.Report{
InstanceType: model.GKENodePool,
Dones: doneRes,
Alreadies: alreadyRes,
}, res
}
// get target GKE instance group Set
func (r *GKENodePoolCall) getGKEInstanceGroup() (set.Set, error) {
s, err := container.NewService(r.ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + r.projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
res := set.NewSet()
for _, cluster := range filter(clusters.Clusters, r.targetLabel, r.targetLabelValue) {
for _, nodePool := range cluster.NodePools {
for _, gkeInstanceGroup := range nodePool.InstanceGroupUrls {
tmpUrlElements := strings.Split(gkeInstanceGroup, "/")
managerTemplate := tmpUrlElements[len(tmpUrlElements)-1]
res.Add(managerTemplate) // e.g. gke-tky-cluster-default-pool-cb765a7d-grp
}
}
}
return res, nil
}
func SetLableIfNoLabel(ctx context.Context, projectID, targetLabel string) error {
s, err := container.NewService(ctx)
if err != nil {
return err
}
currentNodeSize, err := GetCurrentNodeCount(ctx, projectID, targetLabel)
if err != nil {
return err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return err
}
for _, cluster := range filter(clusters.Clusters, targetLabel, "true") {
labels := cluster.ResourceLabels
fingerprint := cluster.LabelFingerprint
for _, nodePool := range cluster.NodePools {
nodeSizeLabel := "restore-size-" + nodePool.Name
_, ok := labels[nodeSizeLabel]
if !ok {
// set new label
labels[nodeSizeLabel] = strconv.FormatInt(currentNodeSize[nodePool.Name], 10)
}
}
parseRegion := strings.Split(cluster.Location, "/")
region := parseRegion[len(parseRegion)-1]
name := "projects/" + projectID + "/locations/" + region + "/clusters/" + cluster.Name
req := &container.SetLabelsRequest{
ResourceLabels: labels,
LabelFingerprint: fingerprint,
}
// update labels
_, err := container.NewProjectsLocationsClustersService(s).SetResourceLabels(name, req).Do()
if err != nil {
return err
}
}
return nil
}
// GetOriginalNodePoolSize returns map that key=instanceGroupName and value=originalSize
func GetOriginalNodePoolSize(ctx context.Context, projectID, targetLabel, labelValue string) (map[string]int64, error) {
s, err := container.NewService(ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
result := make(map[string]int64)
for _, cluster := range filter(clusters.Clusters, targetLabel, labelValue) {
labels := cluster.ResourceLabels
for _, nodePool := range cluster.NodePools {
restoreSize, ok := labels["restore-size-"+nodePool.Name]
if !ok {
continue
}
size, err := strconv.Atoi(restoreSize)
if err != nil {
return nil, errors.New("label: " + "restore-size-" + nodePool.Name + " value is not number format?")
}
for _, url := range nodePool.InstanceGroupUrls {
// u;rl is below format
// e.g. https://www.googleapis.com/compute/v1/projects/{ProjectID}/zones/us-central1-a/instanceGroupManagers/gke-standard-cluster-1-default-pool-1234abcd-grp
urlSplit := strings.Split(url, "/")
instanceGroupName := urlSplit[len(urlSplit)-1]
result[instanceGroupName] = int64(size)
}
}
}
return result, nil
}
// GetCurrentNodeCount returns map that key=NodePoolName and value=currentSize
func GetCurrentNodeCount(ctx context.Context, projectID, targetLabel string) (map[string]int64, error) {
s, err := container.NewService(ctx)
if err != nil {
return nil, err
}
computeService, err := compute.NewService(ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
result := make(map[string]int64)
reZone := regexp.MustCompile(".*/zones/")
reInstance := regexp.MustCompile(".*/instanceGroupManagers/")
reEtc := regexp.MustCompile("/.*")
for _, cluster := range filter(clusters.Clusters, targetLabel, "true") {
for _, nodePool := range cluster.NodePools {
// nodePool.InstanceGroupUrls's format is below
// ["https://www.googleapis.com/compute/v1/projects/<projectID>/zones/<zone1>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp",
// "https://www.googleapis.com/compute/v1/projects/<projectID>/zones/<zone2>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp",
// "https://www.googleapis.com/compute/v1/projects/<projectID>/zones/<zone3>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp"]
zone := reZone.ReplaceAllString(nodePool.InstanceGroupUrls[0], "") //"<zone1>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp"
zone = reEtc.ReplaceAllString(zone, "") //"<zone1>
instanceGroup := reInstance.ReplaceAllString(nodePool.InstanceGroupUrls[0], "")
resp, err := computeService.InstanceGroups.Get(projectID, zone, instanceGroup).Context(ctx).Do()
if err != nil {
return nil, err
}
size := resp.Size
result[nodePool.Name] = size
}
}
return result, nil
}
// grep target cluster and create target cluster list
func filter(l []*container.Cluster, label, value string) []*container.Cluster {
if label == "" { //TODO Temp impl
return l
}
var res []*container.Cluster
for _, cluster := range l {
if cluster.ResourceLabels[label] == value {
res = append(res, cluster)
}
}
return res
}
| Recovery | identifier_name |
gke.go | /**
* Copyright (c) 2019-present Future Corporation | * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package operator
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"time"
set "github.com/deckarep/golang-set"
"github.com/future-architect/gcp-instance-scheduler/model"
"github.com/hashicorp/go-multierror"
"golang.org/x/net/context"
"google.golang.org/api/compute/v1"
"google.golang.org/api/container/v1"
)
type GKENodePoolCall struct {
targetLabel string
projectID string
error error
s *compute.Service
ctx context.Context
targetLabelValue string
}
func GKENodePool(ctx context.Context, projectID string) *GKENodePoolCall {
s, err := compute.NewService(ctx)
if err != nil {
return &GKENodePoolCall{error: err}
}
// get all templates list
return &GKENodePoolCall{
s: s,
projectID: projectID,
ctx: ctx,
}
}
func (r *GKENodePoolCall) Filter(labelName, value string) *GKENodePoolCall {
if r.error != nil {
return r
}
r.targetLabel = labelName
r.targetLabelValue = value
return r
}
func (r *GKENodePoolCall) Resize(size int64) (*model.Report, error) {
if r.error != nil {
return nil, r.error
}
// get all instance group mangers list
managerList, err := compute.NewInstanceGroupManagersService(r.s).AggregatedList(r.projectID).Do()
if err != nil {
return nil, err
}
// add instance group name of cluster node pool to Set
gkeNodePoolInstanceGroupSet, err := r.getGKEInstanceGroup()
if err != nil {
return nil, err
}
fmt.Println("gkeNodePoolInstanceGroupSet:", gkeNodePoolInstanceGroupSet.ToSlice())
var res = r.error
var alreadyRes []string
var doneRes []string
for _, manager := range valuesIG(managerList.Items) {
fmt.Println("manager.InstanceTemplate:", manager.InstanceTemplate)
fmt.Println("manager.Name:", manager.Name)
// Check GKE NodePool InstanceGroup
if gkeNodePoolInstanceGroupSet.Contains(manager.Name) {
if !manager.Status.IsStable {
continue
}
if manager.TargetSize == size {
alreadyRes = append(alreadyRes, manager.Name)
continue
}
// get manager zone name
zoneUrlElements := strings.Split(manager.Zone, "/")
zone := zoneUrlElements[len(zoneUrlElements)-1]
ms := compute.NewInstanceGroupManagersService(r.s)
if _, err := ms.Resize(r.projectID, zone, manager.Name, size).Do(); err != nil {
res = multierror.Append(res, err)
continue
}
doneRes = append(doneRes, manager.Name)
}
time.Sleep(CallInterval)
}
return &model.Report{
InstanceType: model.GKENodePool,
Dones: doneRes,
Alreadies: alreadyRes,
}, res
}
func (r *GKENodePoolCall) Recovery() (*model.Report, error) {
if r.error != nil {
return nil, r.error
}
managerList, err := compute.NewInstanceGroupManagersService(r.s).AggregatedList(r.projectID).Do()
if err != nil {
return nil, err
}
// add instance group name of cluster node pool to Set
gkeNodePoolInstanceGroupSet, err := r.getGKEInstanceGroup()
if err != nil {
return nil, err
}
sizeMap, err := GetOriginalNodePoolSize(r.ctx, r.projectID, r.targetLabel, r.targetLabelValue)
if err != nil {
return nil, err
}
var res = r.error
var doneRes []string
var alreadyRes []string
for _, manager := range valuesIG(managerList.Items) {
// check instance group of gke node pool
if gkeNodePoolInstanceGroupSet.Contains(manager.Name) {
if !manager.Status.IsStable {
continue
}
split := strings.Split(manager.InstanceGroup, "/")
instanceGroupName := split[len(split)-1]
originalSize := sizeMap[instanceGroupName]
if manager.TargetSize == originalSize {
alreadyRes = append(alreadyRes, manager.Name)
continue
}
// get manager zone name
zoneUrlElements := strings.Split(manager.Zone, "/")
zone := zoneUrlElements[len(zoneUrlElements)-1] // ex) us-central1-a
ms := compute.NewInstanceGroupManagersService(r.s)
if _, err := ms.Resize(r.projectID, zone, manager.Name, originalSize).Do(); err != nil {
res = multierror.Append(res, err)
continue
}
doneRes = append(doneRes, manager.Name)
}
time.Sleep(CallInterval)
}
return &model.Report{
InstanceType: model.GKENodePool,
Dones: doneRes,
Alreadies: alreadyRes,
}, res
}
// get target GKE instance group Set
func (r *GKENodePoolCall) getGKEInstanceGroup() (set.Set, error) {
s, err := container.NewService(r.ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + r.projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
res := set.NewSet()
for _, cluster := range filter(clusters.Clusters, r.targetLabel, r.targetLabelValue) {
for _, nodePool := range cluster.NodePools {
for _, gkeInstanceGroup := range nodePool.InstanceGroupUrls {
tmpUrlElements := strings.Split(gkeInstanceGroup, "/")
managerTemplate := tmpUrlElements[len(tmpUrlElements)-1]
res.Add(managerTemplate) // e.g. gke-tky-cluster-default-pool-cb765a7d-grp
}
}
}
return res, nil
}
func SetLableIfNoLabel(ctx context.Context, projectID, targetLabel string) error {
s, err := container.NewService(ctx)
if err != nil {
return err
}
currentNodeSize, err := GetCurrentNodeCount(ctx, projectID, targetLabel)
if err != nil {
return err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return err
}
for _, cluster := range filter(clusters.Clusters, targetLabel, "true") {
labels := cluster.ResourceLabels
fingerprint := cluster.LabelFingerprint
for _, nodePool := range cluster.NodePools {
nodeSizeLabel := "restore-size-" + nodePool.Name
_, ok := labels[nodeSizeLabel]
if !ok {
// set new label
labels[nodeSizeLabel] = strconv.FormatInt(currentNodeSize[nodePool.Name], 10)
}
}
parseRegion := strings.Split(cluster.Location, "/")
region := parseRegion[len(parseRegion)-1]
name := "projects/" + projectID + "/locations/" + region + "/clusters/" + cluster.Name
req := &container.SetLabelsRequest{
ResourceLabels: labels,
LabelFingerprint: fingerprint,
}
// update labels
_, err := container.NewProjectsLocationsClustersService(s).SetResourceLabels(name, req).Do()
if err != nil {
return err
}
}
return nil
}
// GetOriginalNodePoolSize returns map that key=instanceGroupName and value=originalSize
func GetOriginalNodePoolSize(ctx context.Context, projectID, targetLabel, labelValue string) (map[string]int64, error) {
s, err := container.NewService(ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
result := make(map[string]int64)
for _, cluster := range filter(clusters.Clusters, targetLabel, labelValue) {
labels := cluster.ResourceLabels
for _, nodePool := range cluster.NodePools {
restoreSize, ok := labels["restore-size-"+nodePool.Name]
if !ok {
continue
}
size, err := strconv.Atoi(restoreSize)
if err != nil {
return nil, errors.New("label: " + "restore-size-" + nodePool.Name + " value is not number format?")
}
for _, url := range nodePool.InstanceGroupUrls {
// u;rl is below format
// e.g. https://www.googleapis.com/compute/v1/projects/{ProjectID}/zones/us-central1-a/instanceGroupManagers/gke-standard-cluster-1-default-pool-1234abcd-grp
urlSplit := strings.Split(url, "/")
instanceGroupName := urlSplit[len(urlSplit)-1]
result[instanceGroupName] = int64(size)
}
}
}
return result, nil
}
// GetCurrentNodeCount returns map that key=NodePoolName and value=currentSize
func GetCurrentNodeCount(ctx context.Context, projectID, targetLabel string) (map[string]int64, error) {
s, err := container.NewService(ctx)
if err != nil {
return nil, err
}
computeService, err := compute.NewService(ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
result := make(map[string]int64)
reZone := regexp.MustCompile(".*/zones/")
reInstance := regexp.MustCompile(".*/instanceGroupManagers/")
reEtc := regexp.MustCompile("/.*")
for _, cluster := range filter(clusters.Clusters, targetLabel, "true") {
for _, nodePool := range cluster.NodePools {
// nodePool.InstanceGroupUrls's format is below
// ["https://www.googleapis.com/compute/v1/projects/<projectID>/zones/<zone1>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp",
// "https://www.googleapis.com/compute/v1/projects/<projectID>/zones/<zone2>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp",
// "https://www.googleapis.com/compute/v1/projects/<projectID>/zones/<zone3>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp"]
zone := reZone.ReplaceAllString(nodePool.InstanceGroupUrls[0], "") //"<zone1>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp"
zone = reEtc.ReplaceAllString(zone, "") //"<zone1>
instanceGroup := reInstance.ReplaceAllString(nodePool.InstanceGroupUrls[0], "")
resp, err := computeService.InstanceGroups.Get(projectID, zone, instanceGroup).Context(ctx).Do()
if err != nil {
return nil, err
}
size := resp.Size
result[nodePool.Name] = size
}
}
return result, nil
}
// grep target cluster and create target cluster list
func filter(l []*container.Cluster, label, value string) []*container.Cluster {
if label == "" { //TODO Temp impl
return l
}
var res []*container.Cluster
for _, cluster := range l {
if cluster.ResourceLabels[label] == value {
res = append(res, cluster)
}
}
return res
} | * | random_line_split |
gke.go | /**
* Copyright (c) 2019-present Future Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package operator
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"time"
set "github.com/deckarep/golang-set"
"github.com/future-architect/gcp-instance-scheduler/model"
"github.com/hashicorp/go-multierror"
"golang.org/x/net/context"
"google.golang.org/api/compute/v1"
"google.golang.org/api/container/v1"
)
type GKENodePoolCall struct {
targetLabel string
projectID string
error error
s *compute.Service
ctx context.Context
targetLabelValue string
}
func GKENodePool(ctx context.Context, projectID string) *GKENodePoolCall {
s, err := compute.NewService(ctx)
if err != nil {
return &GKENodePoolCall{error: err}
}
// get all templates list
return &GKENodePoolCall{
s: s,
projectID: projectID,
ctx: ctx,
}
}
func (r *GKENodePoolCall) Filter(labelName, value string) *GKENodePoolCall {
if r.error != nil {
return r
}
r.targetLabel = labelName
r.targetLabelValue = value
return r
}
func (r *GKENodePoolCall) Resize(size int64) (*model.Report, error) {
if r.error != nil {
return nil, r.error
}
// get all instance group mangers list
managerList, err := compute.NewInstanceGroupManagersService(r.s).AggregatedList(r.projectID).Do()
if err != nil {
return nil, err
}
// add instance group name of cluster node pool to Set
gkeNodePoolInstanceGroupSet, err := r.getGKEInstanceGroup()
if err != nil {
return nil, err
}
fmt.Println("gkeNodePoolInstanceGroupSet:", gkeNodePoolInstanceGroupSet.ToSlice())
var res = r.error
var alreadyRes []string
var doneRes []string
for _, manager := range valuesIG(managerList.Items) {
fmt.Println("manager.InstanceTemplate:", manager.InstanceTemplate)
fmt.Println("manager.Name:", manager.Name)
// Check GKE NodePool InstanceGroup
if gkeNodePoolInstanceGroupSet.Contains(manager.Name) {
if !manager.Status.IsStable {
continue
}
if manager.TargetSize == size {
alreadyRes = append(alreadyRes, manager.Name)
continue
}
// get manager zone name
zoneUrlElements := strings.Split(manager.Zone, "/")
zone := zoneUrlElements[len(zoneUrlElements)-1]
ms := compute.NewInstanceGroupManagersService(r.s)
if _, err := ms.Resize(r.projectID, zone, manager.Name, size).Do(); err != nil {
res = multierror.Append(res, err)
continue
}
doneRes = append(doneRes, manager.Name)
}
time.Sleep(CallInterval)
}
return &model.Report{
InstanceType: model.GKENodePool,
Dones: doneRes,
Alreadies: alreadyRes,
}, res
}
func (r *GKENodePoolCall) Recovery() (*model.Report, error) {
if r.error != nil {
return nil, r.error
}
managerList, err := compute.NewInstanceGroupManagersService(r.s).AggregatedList(r.projectID).Do()
if err != nil {
return nil, err
}
// add instance group name of cluster node pool to Set
gkeNodePoolInstanceGroupSet, err := r.getGKEInstanceGroup()
if err != nil {
return nil, err
}
sizeMap, err := GetOriginalNodePoolSize(r.ctx, r.projectID, r.targetLabel, r.targetLabelValue)
if err != nil {
return nil, err
}
var res = r.error
var doneRes []string
var alreadyRes []string
for _, manager := range valuesIG(managerList.Items) {
// check instance group of gke node pool
if gkeNodePoolInstanceGroupSet.Contains(manager.Name) {
if !manager.Status.IsStable {
continue
}
split := strings.Split(manager.InstanceGroup, "/")
instanceGroupName := split[len(split)-1]
originalSize := sizeMap[instanceGroupName]
if manager.TargetSize == originalSize {
alreadyRes = append(alreadyRes, manager.Name)
continue
}
// get manager zone name
zoneUrlElements := strings.Split(manager.Zone, "/")
zone := zoneUrlElements[len(zoneUrlElements)-1] // ex) us-central1-a
ms := compute.NewInstanceGroupManagersService(r.s)
if _, err := ms.Resize(r.projectID, zone, manager.Name, originalSize).Do(); err != nil {
res = multierror.Append(res, err)
continue
}
doneRes = append(doneRes, manager.Name)
}
time.Sleep(CallInterval)
}
return &model.Report{
InstanceType: model.GKENodePool,
Dones: doneRes,
Alreadies: alreadyRes,
}, res
}
// get target GKE instance group Set
func (r *GKENodePoolCall) getGKEInstanceGroup() (set.Set, error) {
s, err := container.NewService(r.ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + r.projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
res := set.NewSet()
for _, cluster := range filter(clusters.Clusters, r.targetLabel, r.targetLabelValue) {
for _, nodePool := range cluster.NodePools {
for _, gkeInstanceGroup := range nodePool.InstanceGroupUrls {
tmpUrlElements := strings.Split(gkeInstanceGroup, "/")
managerTemplate := tmpUrlElements[len(tmpUrlElements)-1]
res.Add(managerTemplate) // e.g. gke-tky-cluster-default-pool-cb765a7d-grp
}
}
}
return res, nil
}
func SetLableIfNoLabel(ctx context.Context, projectID, targetLabel string) error {
s, err := container.NewService(ctx)
if err != nil {
return err
}
currentNodeSize, err := GetCurrentNodeCount(ctx, projectID, targetLabel)
if err != nil {
return err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return err
}
for _, cluster := range filter(clusters.Clusters, targetLabel, "true") {
labels := cluster.ResourceLabels
fingerprint := cluster.LabelFingerprint
for _, nodePool := range cluster.NodePools {
nodeSizeLabel := "restore-size-" + nodePool.Name
_, ok := labels[nodeSizeLabel]
if !ok {
// set new label
labels[nodeSizeLabel] = strconv.FormatInt(currentNodeSize[nodePool.Name], 10)
}
}
parseRegion := strings.Split(cluster.Location, "/")
region := parseRegion[len(parseRegion)-1]
name := "projects/" + projectID + "/locations/" + region + "/clusters/" + cluster.Name
req := &container.SetLabelsRequest{
ResourceLabels: labels,
LabelFingerprint: fingerprint,
}
// update labels
_, err := container.NewProjectsLocationsClustersService(s).SetResourceLabels(name, req).Do()
if err != nil {
return err
}
}
return nil
}
// GetOriginalNodePoolSize returns map that key=instanceGroupName and value=originalSize
func GetOriginalNodePoolSize(ctx context.Context, projectID, targetLabel, labelValue string) (map[string]int64, error) {
s, err := container.NewService(ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
result := make(map[string]int64)
for _, cluster := range filter(clusters.Clusters, targetLabel, labelValue) {
labels := cluster.ResourceLabels
for _, nodePool := range cluster.NodePools {
restoreSize, ok := labels["restore-size-"+nodePool.Name]
if !ok {
continue
}
size, err := strconv.Atoi(restoreSize)
if err != nil {
return nil, errors.New("label: " + "restore-size-" + nodePool.Name + " value is not number format?")
}
for _, url := range nodePool.InstanceGroupUrls {
// u;rl is below format
// e.g. https://www.googleapis.com/compute/v1/projects/{ProjectID}/zones/us-central1-a/instanceGroupManagers/gke-standard-cluster-1-default-pool-1234abcd-grp
urlSplit := strings.Split(url, "/")
instanceGroupName := urlSplit[len(urlSplit)-1]
result[instanceGroupName] = int64(size)
}
}
}
return result, nil
}
// GetCurrentNodeCount returns map that key=NodePoolName and value=currentSize
func GetCurrentNodeCount(ctx context.Context, projectID, targetLabel string) (map[string]int64, error) {
s, err := container.NewService(ctx)
if err != nil {
return nil, err
}
computeService, err := compute.NewService(ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
result := make(map[string]int64)
reZone := regexp.MustCompile(".*/zones/")
reInstance := regexp.MustCompile(".*/instanceGroupManagers/")
reEtc := regexp.MustCompile("/.*")
for _, cluster := range filter(clusters.Clusters, targetLabel, "true") {
for _, nodePool := range cluster.NodePools {
// nodePool.InstanceGroupUrls's format is below
// ["https://www.googleapis.com/compute/v1/projects/<projectID>/zones/<zone1>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp",
// "https://www.googleapis.com/compute/v1/projects/<projectID>/zones/<zone2>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp",
// "https://www.googleapis.com/compute/v1/projects/<projectID>/zones/<zone3>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp"]
zone := reZone.ReplaceAllString(nodePool.InstanceGroupUrls[0], "") //"<zone1>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp"
zone = reEtc.ReplaceAllString(zone, "") //"<zone1>
instanceGroup := reInstance.ReplaceAllString(nodePool.InstanceGroupUrls[0], "")
resp, err := computeService.InstanceGroups.Get(projectID, zone, instanceGroup).Context(ctx).Do()
if err != nil {
return nil, err
}
size := resp.Size
result[nodePool.Name] = size
}
}
return result, nil
}
// grep target cluster and create target cluster list
func filter(l []*container.Cluster, label, value string) []*container.Cluster {
if label == "" |
var res []*container.Cluster
for _, cluster := range l {
if cluster.ResourceLabels[label] == value {
res = append(res, cluster)
}
}
return res
}
| { //TODO Temp impl
return l
} | conditional_block |
gke.go | /**
* Copyright (c) 2019-present Future Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package operator
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"time"
set "github.com/deckarep/golang-set"
"github.com/future-architect/gcp-instance-scheduler/model"
"github.com/hashicorp/go-multierror"
"golang.org/x/net/context"
"google.golang.org/api/compute/v1"
"google.golang.org/api/container/v1"
)
type GKENodePoolCall struct {
targetLabel string
projectID string
error error
s *compute.Service
ctx context.Context
targetLabelValue string
}
func GKENodePool(ctx context.Context, projectID string) *GKENodePoolCall {
s, err := compute.NewService(ctx)
if err != nil {
return &GKENodePoolCall{error: err}
}
// get all templates list
return &GKENodePoolCall{
s: s,
projectID: projectID,
ctx: ctx,
}
}
func (r *GKENodePoolCall) Filter(labelName, value string) *GKENodePoolCall |
func (r *GKENodePoolCall) Resize(size int64) (*model.Report, error) {
if r.error != nil {
return nil, r.error
}
// get all instance group mangers list
managerList, err := compute.NewInstanceGroupManagersService(r.s).AggregatedList(r.projectID).Do()
if err != nil {
return nil, err
}
// add instance group name of cluster node pool to Set
gkeNodePoolInstanceGroupSet, err := r.getGKEInstanceGroup()
if err != nil {
return nil, err
}
fmt.Println("gkeNodePoolInstanceGroupSet:", gkeNodePoolInstanceGroupSet.ToSlice())
var res = r.error
var alreadyRes []string
var doneRes []string
for _, manager := range valuesIG(managerList.Items) {
fmt.Println("manager.InstanceTemplate:", manager.InstanceTemplate)
fmt.Println("manager.Name:", manager.Name)
// Check GKE NodePool InstanceGroup
if gkeNodePoolInstanceGroupSet.Contains(manager.Name) {
if !manager.Status.IsStable {
continue
}
if manager.TargetSize == size {
alreadyRes = append(alreadyRes, manager.Name)
continue
}
// get manager zone name
zoneUrlElements := strings.Split(manager.Zone, "/")
zone := zoneUrlElements[len(zoneUrlElements)-1]
ms := compute.NewInstanceGroupManagersService(r.s)
if _, err := ms.Resize(r.projectID, zone, manager.Name, size).Do(); err != nil {
res = multierror.Append(res, err)
continue
}
doneRes = append(doneRes, manager.Name)
}
time.Sleep(CallInterval)
}
return &model.Report{
InstanceType: model.GKENodePool,
Dones: doneRes,
Alreadies: alreadyRes,
}, res
}
func (r *GKENodePoolCall) Recovery() (*model.Report, error) {
if r.error != nil {
return nil, r.error
}
managerList, err := compute.NewInstanceGroupManagersService(r.s).AggregatedList(r.projectID).Do()
if err != nil {
return nil, err
}
// add instance group name of cluster node pool to Set
gkeNodePoolInstanceGroupSet, err := r.getGKEInstanceGroup()
if err != nil {
return nil, err
}
sizeMap, err := GetOriginalNodePoolSize(r.ctx, r.projectID, r.targetLabel, r.targetLabelValue)
if err != nil {
return nil, err
}
var res = r.error
var doneRes []string
var alreadyRes []string
for _, manager := range valuesIG(managerList.Items) {
// check instance group of gke node pool
if gkeNodePoolInstanceGroupSet.Contains(manager.Name) {
if !manager.Status.IsStable {
continue
}
split := strings.Split(manager.InstanceGroup, "/")
instanceGroupName := split[len(split)-1]
originalSize := sizeMap[instanceGroupName]
if manager.TargetSize == originalSize {
alreadyRes = append(alreadyRes, manager.Name)
continue
}
// get manager zone name
zoneUrlElements := strings.Split(manager.Zone, "/")
zone := zoneUrlElements[len(zoneUrlElements)-1] // ex) us-central1-a
ms := compute.NewInstanceGroupManagersService(r.s)
if _, err := ms.Resize(r.projectID, zone, manager.Name, originalSize).Do(); err != nil {
res = multierror.Append(res, err)
continue
}
doneRes = append(doneRes, manager.Name)
}
time.Sleep(CallInterval)
}
return &model.Report{
InstanceType: model.GKENodePool,
Dones: doneRes,
Alreadies: alreadyRes,
}, res
}
// get target GKE instance group Set
func (r *GKENodePoolCall) getGKEInstanceGroup() (set.Set, error) {
s, err := container.NewService(r.ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + r.projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
res := set.NewSet()
for _, cluster := range filter(clusters.Clusters, r.targetLabel, r.targetLabelValue) {
for _, nodePool := range cluster.NodePools {
for _, gkeInstanceGroup := range nodePool.InstanceGroupUrls {
tmpUrlElements := strings.Split(gkeInstanceGroup, "/")
managerTemplate := tmpUrlElements[len(tmpUrlElements)-1]
res.Add(managerTemplate) // e.g. gke-tky-cluster-default-pool-cb765a7d-grp
}
}
}
return res, nil
}
func SetLableIfNoLabel(ctx context.Context, projectID, targetLabel string) error {
s, err := container.NewService(ctx)
if err != nil {
return err
}
currentNodeSize, err := GetCurrentNodeCount(ctx, projectID, targetLabel)
if err != nil {
return err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return err
}
for _, cluster := range filter(clusters.Clusters, targetLabel, "true") {
labels := cluster.ResourceLabels
fingerprint := cluster.LabelFingerprint
for _, nodePool := range cluster.NodePools {
nodeSizeLabel := "restore-size-" + nodePool.Name
_, ok := labels[nodeSizeLabel]
if !ok {
// set new label
labels[nodeSizeLabel] = strconv.FormatInt(currentNodeSize[nodePool.Name], 10)
}
}
parseRegion := strings.Split(cluster.Location, "/")
region := parseRegion[len(parseRegion)-1]
name := "projects/" + projectID + "/locations/" + region + "/clusters/" + cluster.Name
req := &container.SetLabelsRequest{
ResourceLabels: labels,
LabelFingerprint: fingerprint,
}
// update labels
_, err := container.NewProjectsLocationsClustersService(s).SetResourceLabels(name, req).Do()
if err != nil {
return err
}
}
return nil
}
// GetOriginalNodePoolSize returns map that key=instanceGroupName and value=originalSize
func GetOriginalNodePoolSize(ctx context.Context, projectID, targetLabel, labelValue string) (map[string]int64, error) {
s, err := container.NewService(ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
result := make(map[string]int64)
for _, cluster := range filter(clusters.Clusters, targetLabel, labelValue) {
labels := cluster.ResourceLabels
for _, nodePool := range cluster.NodePools {
restoreSize, ok := labels["restore-size-"+nodePool.Name]
if !ok {
continue
}
size, err := strconv.Atoi(restoreSize)
if err != nil {
return nil, errors.New("label: " + "restore-size-" + nodePool.Name + " value is not number format?")
}
for _, url := range nodePool.InstanceGroupUrls {
// u;rl is below format
// e.g. https://www.googleapis.com/compute/v1/projects/{ProjectID}/zones/us-central1-a/instanceGroupManagers/gke-standard-cluster-1-default-pool-1234abcd-grp
urlSplit := strings.Split(url, "/")
instanceGroupName := urlSplit[len(urlSplit)-1]
result[instanceGroupName] = int64(size)
}
}
}
return result, nil
}
// GetCurrentNodeCount returns map that key=NodePoolName and value=currentSize
func GetCurrentNodeCount(ctx context.Context, projectID, targetLabel string) (map[string]int64, error) {
s, err := container.NewService(ctx)
if err != nil {
return nil, err
}
computeService, err := compute.NewService(ctx)
if err != nil {
return nil, err
}
// get all clusters list
clusters, err := container.NewProjectsLocationsClustersService(s).List("projects/" + projectID + "/locations/-").Do()
if err != nil {
return nil, err
}
result := make(map[string]int64)
reZone := regexp.MustCompile(".*/zones/")
reInstance := regexp.MustCompile(".*/instanceGroupManagers/")
reEtc := regexp.MustCompile("/.*")
for _, cluster := range filter(clusters.Clusters, targetLabel, "true") {
for _, nodePool := range cluster.NodePools {
// nodePool.InstanceGroupUrls's format is below
// ["https://www.googleapis.com/compute/v1/projects/<projectID>/zones/<zone1>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp",
// "https://www.googleapis.com/compute/v1/projects/<projectID>/zones/<zone2>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp",
// "https://www.googleapis.com/compute/v1/projects/<projectID>/zones/<zone3>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp"]
zone := reZone.ReplaceAllString(nodePool.InstanceGroupUrls[0], "") //"<zone1>/instanceGroupManagers/gke-test-scheduler-2-default-pool-2b19b588-grp"
zone = reEtc.ReplaceAllString(zone, "") //"<zone1>
instanceGroup := reInstance.ReplaceAllString(nodePool.InstanceGroupUrls[0], "")
resp, err := computeService.InstanceGroups.Get(projectID, zone, instanceGroup).Context(ctx).Do()
if err != nil {
return nil, err
}
size := resp.Size
result[nodePool.Name] = size
}
}
return result, nil
}
// grep target cluster and create target cluster list
func filter(l []*container.Cluster, label, value string) []*container.Cluster {
if label == "" { //TODO Temp impl
return l
}
var res []*container.Cluster
for _, cluster := range l {
if cluster.ResourceLabels[label] == value {
res = append(res, cluster)
}
}
return res
}
| {
if r.error != nil {
return r
}
r.targetLabel = labelName
r.targetLabelValue = value
return r
} | identifier_body |
blob.go | package types
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"fmt"
)
const (
/*
* Currently only to support Legacy VPN plugins, and Mac App Store
* but intended to replace all the various platform code, dev code etc. bits.
*/
CS_SIGNER_TYPE_UNKNOWN = 0
CS_SIGNER_TYPE_LEGACYVPN = 5
CS_SIGNER_TYPE_MAC_APP_STORE = 6
CS_SUPPL_SIGNER_TYPE_UNKNOWN = 0
CS_SUPPL_SIGNER_TYPE_TRUSTCACHE = 7
CS_SUPPL_SIGNER_TYPE_LOCAL = 8
CS_SIGNER_TYPE_OOPJIT = 9
/* Validation categories used for trusted launch environment */
CS_VALIDATION_CATEGORY_INVALID = 0
CS_VALIDATION_CATEGORY_PLATFORM = 1
CS_VALIDATION_CATEGORY_TESTFLIGHT = 2
CS_VALIDATION_CATEGORY_DEVELOPMENT = 3
CS_VALIDATION_CATEGORY_APP_STORE = 4
CS_VALIDATION_CATEGORY_ENTERPRISE = 5
CS_VALIDATION_CATEGORY_DEVELOPER_ID = 6
CS_VALIDATION_CATEGORY_LOCAL_SIGNING = 7
CS_VALIDATION_CATEGORY_ROSETTA = 8
CS_VALIDATION_CATEGORY_OOPJIT = 9
CS_VALIDATION_CATEGORY_NONE = 10
/* The set of application types we support for linkage signatures */
CS_LINKAGE_APPLICATION_INVALID = 0
CS_LINKAGE_APPLICATION_ROSETTA = 1
/* XOJIT has been renamed to OOP-JIT */
CS_LINKAGE_APPLICATION_XOJIT = 2
CS_LINKAGE_APPLICATION_OOPJIT = 2
/* The set of application sub-types we support for linkage signatures */
/*
* For backwards compatibility with older signatures, the AOT sub-type is kept
* as 0.
*/
CS_LINKAGE_APPLICATION_ROSETTA_AOT = 0
/* OOP-JIT sub-types -- XOJIT type kept for external dependencies */
CS_LINKAGE_APPLICATION_XOJIT_PREVIEWS = 1
CS_LINKAGE_APPLICATION_OOPJIT_INVALID = 0
CS_LINKAGE_APPLICATION_OOPJIT_PREVIEWS = 1
CS_LINKAGE_APPLICATION_OOPJIT_MLCOMPILER = 2
CSTYPE_INDEX_REQUIREMENTS = 0x00000002 /* compat with amfi */
CSTYPE_INDEX_ENTITLEMENTS = 0x00000005 /* compat with amfi */
)
const (
/*
* Defined launch types
*/
CS_LAUNCH_TYPE_NONE = 0
CS_LAUNCH_TYPE_SYSTEM_SERVICE = 1
)
var NULL_PAGE_SHA256_HASH = []byte{
0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9,
0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b, 0x02,
0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a,
0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89, 0x2c, 0xa7,
}
type Magic uint32
const (
// Magic numbers used by Code Signing
MAGIC_REQUIREMENT Magic = 0xfade0c00 // single Requirement blob
MAGIC_REQUIREMENTS Magic = 0xfade0c01 // Requirements vector (internal requirements)
MAGIC_CODEDIRECTORY Magic = 0xfade0c02 // CodeDirectory blob
MAGIC_EMBEDDED_SIGNATURE Magic = 0xfade0cc0 // embedded form of signature data
MAGIC_EMBEDDED_SIGNATURE_OLD Magic = 0xfade0b02 /* XXX */
MAGIC_LIBRARY_DEPENDENCY_BLOB Magic = 0xfade0c05
MAGIC_EMBEDDED_ENTITLEMENTS Magic = 0xfade7171 /* embedded entitlements */
MAGIC_EMBEDDED_ENTITLEMENTS_DER Magic = 0xfade7172 /* embedded entitlements */
MAGIC_DETACHED_SIGNATURE Magic = 0xfade0cc1 // multi-arch collection of embedded signatures
MAGIC_BLOBWRAPPER Magic = 0xfade0b01 // used for the cms blob
MAGIC_EMBEDDED_LAUNCH_CONSTRAINT Magic = 0xfade8181 // Light weight code requirement
)
func (cm Magic) String() string {
switch cm {
case MAGIC_REQUIREMENT:
return "Requirement"
case MAGIC_REQUIREMENTS:
return "Requirements"
case MAGIC_CODEDIRECTORY:
return "Codedirectory"
case MAGIC_EMBEDDED_SIGNATURE:
return "Embedded Signature"
case MAGIC_EMBEDDED_SIGNATURE_OLD:
return "Embedded Signature (Old)"
case MAGIC_LIBRARY_DEPENDENCY_BLOB:
return "Library Dependency Blob"
case MAGIC_EMBEDDED_ENTITLEMENTS:
return "Embedded Entitlements"
case MAGIC_EMBEDDED_ENTITLEMENTS_DER:
return "Embedded Entitlements (DER)"
case MAGIC_DETACHED_SIGNATURE:
return "Detached Signature"
case MAGIC_BLOBWRAPPER:
return "Blob Wrapper"
case MAGIC_EMBEDDED_LAUNCH_CONSTRAINT:
return "Embedded Launch Constraint"
default:
return fmt.Sprintf("Magic(%#x)", uint32(cm))
}
}
type SbHeader struct {
Magic Magic `json:"magic,omitempty"` // magic number
Length uint32 `json:"length,omitempty"` // total length of SuperBlob
Count uint32 `json:"count,omitempty"` // number of index entries following
}
// SuperBlob object
type SuperBlob struct {
SbHeader
Index []BlobIndex // (count) entries
Blobs []Blob // followed by Blobs in no particular order as indicated by offsets in index
}
func NewSuperBlob(magic Magic) SuperBlob {
return SuperBlob{
SbHeader: SbHeader{
Magic: magic,
},
}
}
func (s *SuperBlob) AddBlob(typ SlotType, blob Blob) {
idx := BlobIndex{
Type: typ,
}
s.Index = append(s.Index, idx)
s.Blobs = append(s.Blobs, blob)
s.Count++
s.Length += uint32(binary.Size(BlobHeader{}.Magic)) + blob.Length + uint32(binary.Size(idx))
}
func (s *SuperBlob) GetBlob(typ SlotType) (Blob, error) {
for i, idx := range s.Index {
if idx.Type == typ {
return s.Blobs[i], nil
}
}
return Blob{}, fmt.Errorf("blob not found")
}
func (s *SuperBlob) Size() int {
sz := binary.Size(s.SbHeader) + binary.Size(BlobHeader{}) + binary.Size(s.Index)
for _, blob := range s.Blobs {
sz += binary.Size(blob.BlobHeader)
sz += len(blob.Data)
}
return sz
}
func (s *SuperBlob) Write(buf *bytes.Buffer, o binary.ByteOrder) error {
off := uint32(binary.Size(s.SbHeader) + binary.Size(s.Index))
for i := range s.Index {
s.Index[i].Offset = off
off += s.Blobs[i].Length
}
if err := binary.Write(buf, o, s.SbHeader); err != nil {
return fmt.Errorf("failed to write SuperBlob header to buffer: %v", err)
}
if err := binary.Write(buf, o, s.Index); err != nil {
return fmt.Errorf("failed to write SuperBlob indices to buffer: %v", err)
}
for _, blob := range s.Blobs {
if err := binary.Write(buf, o, blob.BlobHeader); err != nil {
return fmt.Errorf("failed to write blob header to superblob buffer: %v", err)
}
if err := binary.Write(buf, o, blob.Data); err != nil {
return fmt.Errorf("failed to write blob data to superblob buffer: %v", err)
}
}
return nil
}
type SlotType uint32
const (
CSSLOT_CODEDIRECTORY SlotType = 0
CSSLOT_INFOSLOT SlotType = 1 // Info.plist
CSSLOT_REQUIREMENTS SlotType = 2 // internal requirements
CSSLOT_RESOURCEDIR SlotType = 3 // resource directory
CSSLOT_APPLICATION SlotType = 4 // Application specific slot/Top-level directory list
CSSLOT_ENTITLEMENTS SlotType = 5 // embedded entitlement configuration
CSSLOT_REP_SPECIFIC SlotType = 6 // for use by disk images
CSSLOT_ENTITLEMENTS_DER SlotType = 7 // DER representation of entitlements plist
CSSLOT_LAUNCH_CONSTRAINT_SELF SlotType = 8
CSSLOT_LAUNCH_CONSTRAINT_PARENT SlotType = 9
CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE SlotType = 10
CSSLOT_LIBRARY_CONSTRAINT SlotType = 11
CSSLOT_ALTERNATE_CODEDIRECTORIES SlotType = 0x1000 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES1 SlotType = 0x1001 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES2 SlotType = 0x1002 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES3 SlotType = 0x1003 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES4 SlotType = 0x1004 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORY_MAX = 5
CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT = CSSLOT_ALTERNATE_CODEDIRECTORIES + CSSLOT_ALTERNATE_CODEDIRECTORY_MAX
CSSLOT_CMS_SIGNATURE SlotType = 0x10000 // CMS signature
CSSLOT_IDENTIFICATIONSLOT SlotType = 0x10001 // identification blob; used for detached signature
CSSLOT_TICKETSLOT SlotType = 0x10002 // Notarization ticket
)
func (c SlotType) String() string {
switch c {
case CSSLOT_CODEDIRECTORY:
return "CodeDirectory"
case CSSLOT_INFOSLOT:
return "Bound Info.plist"
case CSSLOT_REQUIREMENTS:
return "Requirements Blob"
case CSSLOT_RESOURCEDIR:
return "Resource Directory"
case CSSLOT_APPLICATION:
return "Application Specific"
case CSSLOT_ENTITLEMENTS:
return "Entitlements Plist"
case CSSLOT_REP_SPECIFIC:
return "DMG Specific"
case CSSLOT_ENTITLEMENTS_DER:
return "Entitlements ASN1/DER" | return "Launch Constraint (parent)"
case CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE:
return "Launch Constraint (responsible proc)"
case CSSLOT_LIBRARY_CONSTRAINT:
return "Library Constraint"
case CSSLOT_ALTERNATE_CODEDIRECTORIES:
return "Alternate CodeDirectories 0"
case CSSLOT_ALTERNATE_CODEDIRECTORIES1:
return "Alternate CodeDirectories 1"
case CSSLOT_ALTERNATE_CODEDIRECTORIES2:
return "Alternate CodeDirectories 2"
case CSSLOT_ALTERNATE_CODEDIRECTORIES3:
return "Alternate CodeDirectories 3"
case CSSLOT_ALTERNATE_CODEDIRECTORIES4:
return "Alternate CodeDirectories 4"
case CSSLOT_CMS_SIGNATURE:
return "CMS (RFC3852) signature"
case CSSLOT_IDENTIFICATIONSLOT:
return "IdentificationSlot"
case CSSLOT_TICKETSLOT:
return "TicketSlot"
default:
return fmt.Sprintf("Unknown SlotType: %d", c)
}
}
// BlobIndex object
type BlobIndex struct {
Type SlotType `json:"type,omitempty"` // type of entry
Offset uint32 `json:"offset,omitempty"` // offset of entry
}
type BlobHeader struct {
Magic Magic `json:"magic,omitempty"` // magic number
Length uint32 `json:"length,omitempty"` // total length of blob
}
// Blob object
type Blob struct {
BlobHeader
Data []byte // (length - sizeof(blob_header)) bytes
}
func NewBlob(magic Magic, data []byte) Blob {
return Blob{
BlobHeader: BlobHeader{
Magic: magic,
Length: uint32(binary.Size(BlobHeader{}) + len(data)),
},
Data: data,
}
}
func (b Blob) Sha256Hash() ([]byte, error) {
h := sha256.New()
if err := binary.Write(h, binary.BigEndian, b.BlobHeader); err != nil {
return nil, fmt.Errorf("failed to hash blob header: %v", err)
}
if err := binary.Write(h, binary.BigEndian, b.Data); err != nil {
return nil, fmt.Errorf("failed to hash blob header: %v", err)
}
return h.Sum(nil), nil
}
func (b Blob) Bytes() ([]byte, error) {
buf := new(bytes.Buffer)
if err := binary.Write(buf, binary.BigEndian, b.BlobHeader); err != nil {
return nil, fmt.Errorf("failed to write blob header to buffer: %v", err)
}
if err := binary.Write(buf, binary.BigEndian, b.Data); err != nil {
return nil, fmt.Errorf("failed to write blob data to buffer: %v", err)
}
return buf.Bytes(), nil
} | case CSSLOT_LAUNCH_CONSTRAINT_SELF:
return "Launch Constraint (self)"
case CSSLOT_LAUNCH_CONSTRAINT_PARENT: | random_line_split |
blob.go | package types
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"fmt"
)
const (
/*
* Currently only to support Legacy VPN plugins, and Mac App Store
* but intended to replace all the various platform code, dev code etc. bits.
*/
CS_SIGNER_TYPE_UNKNOWN = 0
CS_SIGNER_TYPE_LEGACYVPN = 5
CS_SIGNER_TYPE_MAC_APP_STORE = 6
CS_SUPPL_SIGNER_TYPE_UNKNOWN = 0
CS_SUPPL_SIGNER_TYPE_TRUSTCACHE = 7
CS_SUPPL_SIGNER_TYPE_LOCAL = 8
CS_SIGNER_TYPE_OOPJIT = 9
/* Validation categories used for trusted launch environment */
CS_VALIDATION_CATEGORY_INVALID = 0
CS_VALIDATION_CATEGORY_PLATFORM = 1
CS_VALIDATION_CATEGORY_TESTFLIGHT = 2
CS_VALIDATION_CATEGORY_DEVELOPMENT = 3
CS_VALIDATION_CATEGORY_APP_STORE = 4
CS_VALIDATION_CATEGORY_ENTERPRISE = 5
CS_VALIDATION_CATEGORY_DEVELOPER_ID = 6
CS_VALIDATION_CATEGORY_LOCAL_SIGNING = 7
CS_VALIDATION_CATEGORY_ROSETTA = 8
CS_VALIDATION_CATEGORY_OOPJIT = 9
CS_VALIDATION_CATEGORY_NONE = 10
/* The set of application types we support for linkage signatures */
CS_LINKAGE_APPLICATION_INVALID = 0
CS_LINKAGE_APPLICATION_ROSETTA = 1
/* XOJIT has been renamed to OOP-JIT */
CS_LINKAGE_APPLICATION_XOJIT = 2
CS_LINKAGE_APPLICATION_OOPJIT = 2
/* The set of application sub-types we support for linkage signatures */
/*
* For backwards compatibility with older signatures, the AOT sub-type is kept
* as 0.
*/
CS_LINKAGE_APPLICATION_ROSETTA_AOT = 0
/* OOP-JIT sub-types -- XOJIT type kept for external dependencies */
CS_LINKAGE_APPLICATION_XOJIT_PREVIEWS = 1
CS_LINKAGE_APPLICATION_OOPJIT_INVALID = 0
CS_LINKAGE_APPLICATION_OOPJIT_PREVIEWS = 1
CS_LINKAGE_APPLICATION_OOPJIT_MLCOMPILER = 2
CSTYPE_INDEX_REQUIREMENTS = 0x00000002 /* compat with amfi */
CSTYPE_INDEX_ENTITLEMENTS = 0x00000005 /* compat with amfi */
)
const (
/*
* Defined launch types
*/
CS_LAUNCH_TYPE_NONE = 0
CS_LAUNCH_TYPE_SYSTEM_SERVICE = 1
)
var NULL_PAGE_SHA256_HASH = []byte{
0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9,
0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b, 0x02,
0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a,
0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89, 0x2c, 0xa7,
}
type Magic uint32
const (
// Magic numbers used by Code Signing
MAGIC_REQUIREMENT Magic = 0xfade0c00 // single Requirement blob
MAGIC_REQUIREMENTS Magic = 0xfade0c01 // Requirements vector (internal requirements)
MAGIC_CODEDIRECTORY Magic = 0xfade0c02 // CodeDirectory blob
MAGIC_EMBEDDED_SIGNATURE Magic = 0xfade0cc0 // embedded form of signature data
MAGIC_EMBEDDED_SIGNATURE_OLD Magic = 0xfade0b02 /* XXX */
MAGIC_LIBRARY_DEPENDENCY_BLOB Magic = 0xfade0c05
MAGIC_EMBEDDED_ENTITLEMENTS Magic = 0xfade7171 /* embedded entitlements */
MAGIC_EMBEDDED_ENTITLEMENTS_DER Magic = 0xfade7172 /* embedded entitlements */
MAGIC_DETACHED_SIGNATURE Magic = 0xfade0cc1 // multi-arch collection of embedded signatures
MAGIC_BLOBWRAPPER Magic = 0xfade0b01 // used for the cms blob
MAGIC_EMBEDDED_LAUNCH_CONSTRAINT Magic = 0xfade8181 // Light weight code requirement
)
func (cm Magic) String() string {
switch cm {
case MAGIC_REQUIREMENT:
return "Requirement"
case MAGIC_REQUIREMENTS:
return "Requirements"
case MAGIC_CODEDIRECTORY:
return "Codedirectory"
case MAGIC_EMBEDDED_SIGNATURE:
return "Embedded Signature"
case MAGIC_EMBEDDED_SIGNATURE_OLD:
return "Embedded Signature (Old)"
case MAGIC_LIBRARY_DEPENDENCY_BLOB:
return "Library Dependency Blob"
case MAGIC_EMBEDDED_ENTITLEMENTS:
return "Embedded Entitlements"
case MAGIC_EMBEDDED_ENTITLEMENTS_DER:
return "Embedded Entitlements (DER)"
case MAGIC_DETACHED_SIGNATURE:
return "Detached Signature"
case MAGIC_BLOBWRAPPER:
return "Blob Wrapper"
case MAGIC_EMBEDDED_LAUNCH_CONSTRAINT:
return "Embedded Launch Constraint"
default:
return fmt.Sprintf("Magic(%#x)", uint32(cm))
}
}
type SbHeader struct {
Magic Magic `json:"magic,omitempty"` // magic number
Length uint32 `json:"length,omitempty"` // total length of SuperBlob
Count uint32 `json:"count,omitempty"` // number of index entries following
}
// SuperBlob object
type SuperBlob struct {
SbHeader
Index []BlobIndex // (count) entries
Blobs []Blob // followed by Blobs in no particular order as indicated by offsets in index
}
func NewSuperBlob(magic Magic) SuperBlob {
return SuperBlob{
SbHeader: SbHeader{
Magic: magic,
},
}
}
func (s *SuperBlob) AddBlob(typ SlotType, blob Blob) {
idx := BlobIndex{
Type: typ,
}
s.Index = append(s.Index, idx)
s.Blobs = append(s.Blobs, blob)
s.Count++
s.Length += uint32(binary.Size(BlobHeader{}.Magic)) + blob.Length + uint32(binary.Size(idx))
}
func (s *SuperBlob) GetBlob(typ SlotType) (Blob, error) {
for i, idx := range s.Index {
if idx.Type == typ {
return s.Blobs[i], nil
}
}
return Blob{}, fmt.Errorf("blob not found")
}
func (s *SuperBlob) Size() int {
sz := binary.Size(s.SbHeader) + binary.Size(BlobHeader{}) + binary.Size(s.Index)
for _, blob := range s.Blobs {
sz += binary.Size(blob.BlobHeader)
sz += len(blob.Data)
}
return sz
}
func (s *SuperBlob) Write(buf *bytes.Buffer, o binary.ByteOrder) error {
off := uint32(binary.Size(s.SbHeader) + binary.Size(s.Index))
for i := range s.Index {
s.Index[i].Offset = off
off += s.Blobs[i].Length
}
if err := binary.Write(buf, o, s.SbHeader); err != nil |
if err := binary.Write(buf, o, s.Index); err != nil {
return fmt.Errorf("failed to write SuperBlob indices to buffer: %v", err)
}
for _, blob := range s.Blobs {
if err := binary.Write(buf, o, blob.BlobHeader); err != nil {
return fmt.Errorf("failed to write blob header to superblob buffer: %v", err)
}
if err := binary.Write(buf, o, blob.Data); err != nil {
return fmt.Errorf("failed to write blob data to superblob buffer: %v", err)
}
}
return nil
}
type SlotType uint32
const (
CSSLOT_CODEDIRECTORY SlotType = 0
CSSLOT_INFOSLOT SlotType = 1 // Info.plist
CSSLOT_REQUIREMENTS SlotType = 2 // internal requirements
CSSLOT_RESOURCEDIR SlotType = 3 // resource directory
CSSLOT_APPLICATION SlotType = 4 // Application specific slot/Top-level directory list
CSSLOT_ENTITLEMENTS SlotType = 5 // embedded entitlement configuration
CSSLOT_REP_SPECIFIC SlotType = 6 // for use by disk images
CSSLOT_ENTITLEMENTS_DER SlotType = 7 // DER representation of entitlements plist
CSSLOT_LAUNCH_CONSTRAINT_SELF SlotType = 8
CSSLOT_LAUNCH_CONSTRAINT_PARENT SlotType = 9
CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE SlotType = 10
CSSLOT_LIBRARY_CONSTRAINT SlotType = 11
CSSLOT_ALTERNATE_CODEDIRECTORIES SlotType = 0x1000 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES1 SlotType = 0x1001 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES2 SlotType = 0x1002 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES3 SlotType = 0x1003 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES4 SlotType = 0x1004 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORY_MAX = 5
CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT = CSSLOT_ALTERNATE_CODEDIRECTORIES + CSSLOT_ALTERNATE_CODEDIRECTORY_MAX
CSSLOT_CMS_SIGNATURE SlotType = 0x10000 // CMS signature
CSSLOT_IDENTIFICATIONSLOT SlotType = 0x10001 // identification blob; used for detached signature
CSSLOT_TICKETSLOT SlotType = 0x10002 // Notarization ticket
)
func (c SlotType) String() string {
switch c {
case CSSLOT_CODEDIRECTORY:
return "CodeDirectory"
case CSSLOT_INFOSLOT:
return "Bound Info.plist"
case CSSLOT_REQUIREMENTS:
return "Requirements Blob"
case CSSLOT_RESOURCEDIR:
return "Resource Directory"
case CSSLOT_APPLICATION:
return "Application Specific"
case CSSLOT_ENTITLEMENTS:
return "Entitlements Plist"
case CSSLOT_REP_SPECIFIC:
return "DMG Specific"
case CSSLOT_ENTITLEMENTS_DER:
return "Entitlements ASN1/DER"
case CSSLOT_LAUNCH_CONSTRAINT_SELF:
return "Launch Constraint (self)"
case CSSLOT_LAUNCH_CONSTRAINT_PARENT:
return "Launch Constraint (parent)"
case CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE:
return "Launch Constraint (responsible proc)"
case CSSLOT_LIBRARY_CONSTRAINT:
return "Library Constraint"
case CSSLOT_ALTERNATE_CODEDIRECTORIES:
return "Alternate CodeDirectories 0"
case CSSLOT_ALTERNATE_CODEDIRECTORIES1:
return "Alternate CodeDirectories 1"
case CSSLOT_ALTERNATE_CODEDIRECTORIES2:
return "Alternate CodeDirectories 2"
case CSSLOT_ALTERNATE_CODEDIRECTORIES3:
return "Alternate CodeDirectories 3"
case CSSLOT_ALTERNATE_CODEDIRECTORIES4:
return "Alternate CodeDirectories 4"
case CSSLOT_CMS_SIGNATURE:
return "CMS (RFC3852) signature"
case CSSLOT_IDENTIFICATIONSLOT:
return "IdentificationSlot"
case CSSLOT_TICKETSLOT:
return "TicketSlot"
default:
return fmt.Sprintf("Unknown SlotType: %d", c)
}
}
// BlobIndex object
type BlobIndex struct {
Type SlotType `json:"type,omitempty"` // type of entry
Offset uint32 `json:"offset,omitempty"` // offset of entry
}
type BlobHeader struct {
Magic Magic `json:"magic,omitempty"` // magic number
Length uint32 `json:"length,omitempty"` // total length of blob
}
// Blob object
type Blob struct {
BlobHeader
Data []byte // (length - sizeof(blob_header)) bytes
}
func NewBlob(magic Magic, data []byte) Blob {
return Blob{
BlobHeader: BlobHeader{
Magic: magic,
Length: uint32(binary.Size(BlobHeader{}) + len(data)),
},
Data: data,
}
}
func (b Blob) Sha256Hash() ([]byte, error) {
h := sha256.New()
if err := binary.Write(h, binary.BigEndian, b.BlobHeader); err != nil {
return nil, fmt.Errorf("failed to hash blob header: %v", err)
}
if err := binary.Write(h, binary.BigEndian, b.Data); err != nil {
return nil, fmt.Errorf("failed to hash blob header: %v", err)
}
return h.Sum(nil), nil
}
func (b Blob) Bytes() ([]byte, error) {
buf := new(bytes.Buffer)
if err := binary.Write(buf, binary.BigEndian, b.BlobHeader); err != nil {
return nil, fmt.Errorf("failed to write blob header to buffer: %v", err)
}
if err := binary.Write(buf, binary.BigEndian, b.Data); err != nil {
return nil, fmt.Errorf("failed to write blob data to buffer: %v", err)
}
return buf.Bytes(), nil
}
| {
return fmt.Errorf("failed to write SuperBlob header to buffer: %v", err)
} | conditional_block |
blob.go | package types
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"fmt"
)
const (
/*
* Currently only to support Legacy VPN plugins, and Mac App Store
* but intended to replace all the various platform code, dev code etc. bits.
*/
CS_SIGNER_TYPE_UNKNOWN = 0
CS_SIGNER_TYPE_LEGACYVPN = 5
CS_SIGNER_TYPE_MAC_APP_STORE = 6
CS_SUPPL_SIGNER_TYPE_UNKNOWN = 0
CS_SUPPL_SIGNER_TYPE_TRUSTCACHE = 7
CS_SUPPL_SIGNER_TYPE_LOCAL = 8
CS_SIGNER_TYPE_OOPJIT = 9
/* Validation categories used for trusted launch environment */
CS_VALIDATION_CATEGORY_INVALID = 0
CS_VALIDATION_CATEGORY_PLATFORM = 1
CS_VALIDATION_CATEGORY_TESTFLIGHT = 2
CS_VALIDATION_CATEGORY_DEVELOPMENT = 3
CS_VALIDATION_CATEGORY_APP_STORE = 4
CS_VALIDATION_CATEGORY_ENTERPRISE = 5
CS_VALIDATION_CATEGORY_DEVELOPER_ID = 6
CS_VALIDATION_CATEGORY_LOCAL_SIGNING = 7
CS_VALIDATION_CATEGORY_ROSETTA = 8
CS_VALIDATION_CATEGORY_OOPJIT = 9
CS_VALIDATION_CATEGORY_NONE = 10
/* The set of application types we support for linkage signatures */
CS_LINKAGE_APPLICATION_INVALID = 0
CS_LINKAGE_APPLICATION_ROSETTA = 1
/* XOJIT has been renamed to OOP-JIT */
CS_LINKAGE_APPLICATION_XOJIT = 2
CS_LINKAGE_APPLICATION_OOPJIT = 2
/* The set of application sub-types we support for linkage signatures */
/*
* For backwards compatibility with older signatures, the AOT sub-type is kept
* as 0.
*/
CS_LINKAGE_APPLICATION_ROSETTA_AOT = 0
/* OOP-JIT sub-types -- XOJIT type kept for external dependencies */
CS_LINKAGE_APPLICATION_XOJIT_PREVIEWS = 1
CS_LINKAGE_APPLICATION_OOPJIT_INVALID = 0
CS_LINKAGE_APPLICATION_OOPJIT_PREVIEWS = 1
CS_LINKAGE_APPLICATION_OOPJIT_MLCOMPILER = 2
CSTYPE_INDEX_REQUIREMENTS = 0x00000002 /* compat with amfi */
CSTYPE_INDEX_ENTITLEMENTS = 0x00000005 /* compat with amfi */
)
const (
/*
* Defined launch types
*/
CS_LAUNCH_TYPE_NONE = 0
CS_LAUNCH_TYPE_SYSTEM_SERVICE = 1
)
var NULL_PAGE_SHA256_HASH = []byte{
0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9,
0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b, 0x02,
0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a,
0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89, 0x2c, 0xa7,
}
type Magic uint32
const (
// Magic numbers used by Code Signing
MAGIC_REQUIREMENT Magic = 0xfade0c00 // single Requirement blob
MAGIC_REQUIREMENTS Magic = 0xfade0c01 // Requirements vector (internal requirements)
MAGIC_CODEDIRECTORY Magic = 0xfade0c02 // CodeDirectory blob
MAGIC_EMBEDDED_SIGNATURE Magic = 0xfade0cc0 // embedded form of signature data
MAGIC_EMBEDDED_SIGNATURE_OLD Magic = 0xfade0b02 /* XXX */
MAGIC_LIBRARY_DEPENDENCY_BLOB Magic = 0xfade0c05
MAGIC_EMBEDDED_ENTITLEMENTS Magic = 0xfade7171 /* embedded entitlements */
MAGIC_EMBEDDED_ENTITLEMENTS_DER Magic = 0xfade7172 /* embedded entitlements */
MAGIC_DETACHED_SIGNATURE Magic = 0xfade0cc1 // multi-arch collection of embedded signatures
MAGIC_BLOBWRAPPER Magic = 0xfade0b01 // used for the cms blob
MAGIC_EMBEDDED_LAUNCH_CONSTRAINT Magic = 0xfade8181 // Light weight code requirement
)
func (cm Magic) String() string {
switch cm {
case MAGIC_REQUIREMENT:
return "Requirement"
case MAGIC_REQUIREMENTS:
return "Requirements"
case MAGIC_CODEDIRECTORY:
return "Codedirectory"
case MAGIC_EMBEDDED_SIGNATURE:
return "Embedded Signature"
case MAGIC_EMBEDDED_SIGNATURE_OLD:
return "Embedded Signature (Old)"
case MAGIC_LIBRARY_DEPENDENCY_BLOB:
return "Library Dependency Blob"
case MAGIC_EMBEDDED_ENTITLEMENTS:
return "Embedded Entitlements"
case MAGIC_EMBEDDED_ENTITLEMENTS_DER:
return "Embedded Entitlements (DER)"
case MAGIC_DETACHED_SIGNATURE:
return "Detached Signature"
case MAGIC_BLOBWRAPPER:
return "Blob Wrapper"
case MAGIC_EMBEDDED_LAUNCH_CONSTRAINT:
return "Embedded Launch Constraint"
default:
return fmt.Sprintf("Magic(%#x)", uint32(cm))
}
}
type SbHeader struct {
Magic Magic `json:"magic,omitempty"` // magic number
Length uint32 `json:"length,omitempty"` // total length of SuperBlob
Count uint32 `json:"count,omitempty"` // number of index entries following
}
// SuperBlob object
type SuperBlob struct {
SbHeader
Index []BlobIndex // (count) entries
Blobs []Blob // followed by Blobs in no particular order as indicated by offsets in index
}
func NewSuperBlob(magic Magic) SuperBlob {
return SuperBlob{
SbHeader: SbHeader{
Magic: magic,
},
}
}
func (s *SuperBlob) AddBlob(typ SlotType, blob Blob) {
idx := BlobIndex{
Type: typ,
}
s.Index = append(s.Index, idx)
s.Blobs = append(s.Blobs, blob)
s.Count++
s.Length += uint32(binary.Size(BlobHeader{}.Magic)) + blob.Length + uint32(binary.Size(idx))
}
func (s *SuperBlob) GetBlob(typ SlotType) (Blob, error) {
for i, idx := range s.Index {
if idx.Type == typ {
return s.Blobs[i], nil
}
}
return Blob{}, fmt.Errorf("blob not found")
}
func (s *SuperBlob) Size() int {
sz := binary.Size(s.SbHeader) + binary.Size(BlobHeader{}) + binary.Size(s.Index)
for _, blob := range s.Blobs {
sz += binary.Size(blob.BlobHeader)
sz += len(blob.Data)
}
return sz
}
func (s *SuperBlob) Write(buf *bytes.Buffer, o binary.ByteOrder) error {
off := uint32(binary.Size(s.SbHeader) + binary.Size(s.Index))
for i := range s.Index {
s.Index[i].Offset = off
off += s.Blobs[i].Length
}
if err := binary.Write(buf, o, s.SbHeader); err != nil {
return fmt.Errorf("failed to write SuperBlob header to buffer: %v", err)
}
if err := binary.Write(buf, o, s.Index); err != nil {
return fmt.Errorf("failed to write SuperBlob indices to buffer: %v", err)
}
for _, blob := range s.Blobs {
if err := binary.Write(buf, o, blob.BlobHeader); err != nil {
return fmt.Errorf("failed to write blob header to superblob buffer: %v", err)
}
if err := binary.Write(buf, o, blob.Data); err != nil {
return fmt.Errorf("failed to write blob data to superblob buffer: %v", err)
}
}
return nil
}
type SlotType uint32
const (
CSSLOT_CODEDIRECTORY SlotType = 0
CSSLOT_INFOSLOT SlotType = 1 // Info.plist
CSSLOT_REQUIREMENTS SlotType = 2 // internal requirements
CSSLOT_RESOURCEDIR SlotType = 3 // resource directory
CSSLOT_APPLICATION SlotType = 4 // Application specific slot/Top-level directory list
CSSLOT_ENTITLEMENTS SlotType = 5 // embedded entitlement configuration
CSSLOT_REP_SPECIFIC SlotType = 6 // for use by disk images
CSSLOT_ENTITLEMENTS_DER SlotType = 7 // DER representation of entitlements plist
CSSLOT_LAUNCH_CONSTRAINT_SELF SlotType = 8
CSSLOT_LAUNCH_CONSTRAINT_PARENT SlotType = 9
CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE SlotType = 10
CSSLOT_LIBRARY_CONSTRAINT SlotType = 11
CSSLOT_ALTERNATE_CODEDIRECTORIES SlotType = 0x1000 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES1 SlotType = 0x1001 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES2 SlotType = 0x1002 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES3 SlotType = 0x1003 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES4 SlotType = 0x1004 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORY_MAX = 5
CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT = CSSLOT_ALTERNATE_CODEDIRECTORIES + CSSLOT_ALTERNATE_CODEDIRECTORY_MAX
CSSLOT_CMS_SIGNATURE SlotType = 0x10000 // CMS signature
CSSLOT_IDENTIFICATIONSLOT SlotType = 0x10001 // identification blob; used for detached signature
CSSLOT_TICKETSLOT SlotType = 0x10002 // Notarization ticket
)
func (c SlotType) String() string {
switch c {
case CSSLOT_CODEDIRECTORY:
return "CodeDirectory"
case CSSLOT_INFOSLOT:
return "Bound Info.plist"
case CSSLOT_REQUIREMENTS:
return "Requirements Blob"
case CSSLOT_RESOURCEDIR:
return "Resource Directory"
case CSSLOT_APPLICATION:
return "Application Specific"
case CSSLOT_ENTITLEMENTS:
return "Entitlements Plist"
case CSSLOT_REP_SPECIFIC:
return "DMG Specific"
case CSSLOT_ENTITLEMENTS_DER:
return "Entitlements ASN1/DER"
case CSSLOT_LAUNCH_CONSTRAINT_SELF:
return "Launch Constraint (self)"
case CSSLOT_LAUNCH_CONSTRAINT_PARENT:
return "Launch Constraint (parent)"
case CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE:
return "Launch Constraint (responsible proc)"
case CSSLOT_LIBRARY_CONSTRAINT:
return "Library Constraint"
case CSSLOT_ALTERNATE_CODEDIRECTORIES:
return "Alternate CodeDirectories 0"
case CSSLOT_ALTERNATE_CODEDIRECTORIES1:
return "Alternate CodeDirectories 1"
case CSSLOT_ALTERNATE_CODEDIRECTORIES2:
return "Alternate CodeDirectories 2"
case CSSLOT_ALTERNATE_CODEDIRECTORIES3:
return "Alternate CodeDirectories 3"
case CSSLOT_ALTERNATE_CODEDIRECTORIES4:
return "Alternate CodeDirectories 4"
case CSSLOT_CMS_SIGNATURE:
return "CMS (RFC3852) signature"
case CSSLOT_IDENTIFICATIONSLOT:
return "IdentificationSlot"
case CSSLOT_TICKETSLOT:
return "TicketSlot"
default:
return fmt.Sprintf("Unknown SlotType: %d", c)
}
}
// BlobIndex object
type BlobIndex struct {
Type SlotType `json:"type,omitempty"` // type of entry
Offset uint32 `json:"offset,omitempty"` // offset of entry
}
type BlobHeader struct {
Magic Magic `json:"magic,omitempty"` // magic number
Length uint32 `json:"length,omitempty"` // total length of blob
}
// Blob object
type Blob struct {
BlobHeader
Data []byte // (length - sizeof(blob_header)) bytes
}
func NewBlob(magic Magic, data []byte) Blob {
return Blob{
BlobHeader: BlobHeader{
Magic: magic,
Length: uint32(binary.Size(BlobHeader{}) + len(data)),
},
Data: data,
}
}
func (b Blob) Sha256Hash() ([]byte, error) |
func (b Blob) Bytes() ([]byte, error) {
buf := new(bytes.Buffer)
if err := binary.Write(buf, binary.BigEndian, b.BlobHeader); err != nil {
return nil, fmt.Errorf("failed to write blob header to buffer: %v", err)
}
if err := binary.Write(buf, binary.BigEndian, b.Data); err != nil {
return nil, fmt.Errorf("failed to write blob data to buffer: %v", err)
}
return buf.Bytes(), nil
}
| {
h := sha256.New()
if err := binary.Write(h, binary.BigEndian, b.BlobHeader); err != nil {
return nil, fmt.Errorf("failed to hash blob header: %v", err)
}
if err := binary.Write(h, binary.BigEndian, b.Data); err != nil {
return nil, fmt.Errorf("failed to hash blob header: %v", err)
}
return h.Sum(nil), nil
} | identifier_body |
blob.go | package types
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"fmt"
)
const (
/*
* Currently only to support Legacy VPN plugins, and Mac App Store
* but intended to replace all the various platform code, dev code etc. bits.
*/
CS_SIGNER_TYPE_UNKNOWN = 0
CS_SIGNER_TYPE_LEGACYVPN = 5
CS_SIGNER_TYPE_MAC_APP_STORE = 6
CS_SUPPL_SIGNER_TYPE_UNKNOWN = 0
CS_SUPPL_SIGNER_TYPE_TRUSTCACHE = 7
CS_SUPPL_SIGNER_TYPE_LOCAL = 8
CS_SIGNER_TYPE_OOPJIT = 9
/* Validation categories used for trusted launch environment */
CS_VALIDATION_CATEGORY_INVALID = 0
CS_VALIDATION_CATEGORY_PLATFORM = 1
CS_VALIDATION_CATEGORY_TESTFLIGHT = 2
CS_VALIDATION_CATEGORY_DEVELOPMENT = 3
CS_VALIDATION_CATEGORY_APP_STORE = 4
CS_VALIDATION_CATEGORY_ENTERPRISE = 5
CS_VALIDATION_CATEGORY_DEVELOPER_ID = 6
CS_VALIDATION_CATEGORY_LOCAL_SIGNING = 7
CS_VALIDATION_CATEGORY_ROSETTA = 8
CS_VALIDATION_CATEGORY_OOPJIT = 9
CS_VALIDATION_CATEGORY_NONE = 10
/* The set of application types we support for linkage signatures */
CS_LINKAGE_APPLICATION_INVALID = 0
CS_LINKAGE_APPLICATION_ROSETTA = 1
/* XOJIT has been renamed to OOP-JIT */
CS_LINKAGE_APPLICATION_XOJIT = 2
CS_LINKAGE_APPLICATION_OOPJIT = 2
/* The set of application sub-types we support for linkage signatures */
/*
* For backwards compatibility with older signatures, the AOT sub-type is kept
* as 0.
*/
CS_LINKAGE_APPLICATION_ROSETTA_AOT = 0
/* OOP-JIT sub-types -- XOJIT type kept for external dependencies */
CS_LINKAGE_APPLICATION_XOJIT_PREVIEWS = 1
CS_LINKAGE_APPLICATION_OOPJIT_INVALID = 0
CS_LINKAGE_APPLICATION_OOPJIT_PREVIEWS = 1
CS_LINKAGE_APPLICATION_OOPJIT_MLCOMPILER = 2
CSTYPE_INDEX_REQUIREMENTS = 0x00000002 /* compat with amfi */
CSTYPE_INDEX_ENTITLEMENTS = 0x00000005 /* compat with amfi */
)
const (
/*
* Defined launch types
*/
CS_LAUNCH_TYPE_NONE = 0
CS_LAUNCH_TYPE_SYSTEM_SERVICE = 1
)
var NULL_PAGE_SHA256_HASH = []byte{
0xad, 0x7f, 0xac, 0xb2, 0x58, 0x6f, 0xc6, 0xe9,
0x66, 0xc0, 0x04, 0xd7, 0xd1, 0xd1, 0x6b, 0x02,
0x4f, 0x58, 0x05, 0xff, 0x7c, 0xb4, 0x7c, 0x7a,
0x85, 0xda, 0xbd, 0x8b, 0x48, 0x89, 0x2c, 0xa7,
}
type Magic uint32
const (
// Magic numbers used by Code Signing
MAGIC_REQUIREMENT Magic = 0xfade0c00 // single Requirement blob
MAGIC_REQUIREMENTS Magic = 0xfade0c01 // Requirements vector (internal requirements)
MAGIC_CODEDIRECTORY Magic = 0xfade0c02 // CodeDirectory blob
MAGIC_EMBEDDED_SIGNATURE Magic = 0xfade0cc0 // embedded form of signature data
MAGIC_EMBEDDED_SIGNATURE_OLD Magic = 0xfade0b02 /* XXX */
MAGIC_LIBRARY_DEPENDENCY_BLOB Magic = 0xfade0c05
MAGIC_EMBEDDED_ENTITLEMENTS Magic = 0xfade7171 /* embedded entitlements */
MAGIC_EMBEDDED_ENTITLEMENTS_DER Magic = 0xfade7172 /* embedded entitlements */
MAGIC_DETACHED_SIGNATURE Magic = 0xfade0cc1 // multi-arch collection of embedded signatures
MAGIC_BLOBWRAPPER Magic = 0xfade0b01 // used for the cms blob
MAGIC_EMBEDDED_LAUNCH_CONSTRAINT Magic = 0xfade8181 // Light weight code requirement
)
func (cm Magic) String() string {
switch cm {
case MAGIC_REQUIREMENT:
return "Requirement"
case MAGIC_REQUIREMENTS:
return "Requirements"
case MAGIC_CODEDIRECTORY:
return "Codedirectory"
case MAGIC_EMBEDDED_SIGNATURE:
return "Embedded Signature"
case MAGIC_EMBEDDED_SIGNATURE_OLD:
return "Embedded Signature (Old)"
case MAGIC_LIBRARY_DEPENDENCY_BLOB:
return "Library Dependency Blob"
case MAGIC_EMBEDDED_ENTITLEMENTS:
return "Embedded Entitlements"
case MAGIC_EMBEDDED_ENTITLEMENTS_DER:
return "Embedded Entitlements (DER)"
case MAGIC_DETACHED_SIGNATURE:
return "Detached Signature"
case MAGIC_BLOBWRAPPER:
return "Blob Wrapper"
case MAGIC_EMBEDDED_LAUNCH_CONSTRAINT:
return "Embedded Launch Constraint"
default:
return fmt.Sprintf("Magic(%#x)", uint32(cm))
}
}
type SbHeader struct {
Magic Magic `json:"magic,omitempty"` // magic number
Length uint32 `json:"length,omitempty"` // total length of SuperBlob
Count uint32 `json:"count,omitempty"` // number of index entries following
}
// SuperBlob object
type SuperBlob struct {
SbHeader
Index []BlobIndex // (count) entries
Blobs []Blob // followed by Blobs in no particular order as indicated by offsets in index
}
func NewSuperBlob(magic Magic) SuperBlob {
return SuperBlob{
SbHeader: SbHeader{
Magic: magic,
},
}
}
func (s *SuperBlob) AddBlob(typ SlotType, blob Blob) {
idx := BlobIndex{
Type: typ,
}
s.Index = append(s.Index, idx)
s.Blobs = append(s.Blobs, blob)
s.Count++
s.Length += uint32(binary.Size(BlobHeader{}.Magic)) + blob.Length + uint32(binary.Size(idx))
}
func (s *SuperBlob) GetBlob(typ SlotType) (Blob, error) {
for i, idx := range s.Index {
if idx.Type == typ {
return s.Blobs[i], nil
}
}
return Blob{}, fmt.Errorf("blob not found")
}
func (s *SuperBlob) Size() int {
sz := binary.Size(s.SbHeader) + binary.Size(BlobHeader{}) + binary.Size(s.Index)
for _, blob := range s.Blobs {
sz += binary.Size(blob.BlobHeader)
sz += len(blob.Data)
}
return sz
}
func (s *SuperBlob) Write(buf *bytes.Buffer, o binary.ByteOrder) error {
off := uint32(binary.Size(s.SbHeader) + binary.Size(s.Index))
for i := range s.Index {
s.Index[i].Offset = off
off += s.Blobs[i].Length
}
if err := binary.Write(buf, o, s.SbHeader); err != nil {
return fmt.Errorf("failed to write SuperBlob header to buffer: %v", err)
}
if err := binary.Write(buf, o, s.Index); err != nil {
return fmt.Errorf("failed to write SuperBlob indices to buffer: %v", err)
}
for _, blob := range s.Blobs {
if err := binary.Write(buf, o, blob.BlobHeader); err != nil {
return fmt.Errorf("failed to write blob header to superblob buffer: %v", err)
}
if err := binary.Write(buf, o, blob.Data); err != nil {
return fmt.Errorf("failed to write blob data to superblob buffer: %v", err)
}
}
return nil
}
type SlotType uint32
const (
CSSLOT_CODEDIRECTORY SlotType = 0
CSSLOT_INFOSLOT SlotType = 1 // Info.plist
CSSLOT_REQUIREMENTS SlotType = 2 // internal requirements
CSSLOT_RESOURCEDIR SlotType = 3 // resource directory
CSSLOT_APPLICATION SlotType = 4 // Application specific slot/Top-level directory list
CSSLOT_ENTITLEMENTS SlotType = 5 // embedded entitlement configuration
CSSLOT_REP_SPECIFIC SlotType = 6 // for use by disk images
CSSLOT_ENTITLEMENTS_DER SlotType = 7 // DER representation of entitlements plist
CSSLOT_LAUNCH_CONSTRAINT_SELF SlotType = 8
CSSLOT_LAUNCH_CONSTRAINT_PARENT SlotType = 9
CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE SlotType = 10
CSSLOT_LIBRARY_CONSTRAINT SlotType = 11
CSSLOT_ALTERNATE_CODEDIRECTORIES SlotType = 0x1000 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES1 SlotType = 0x1001 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES2 SlotType = 0x1002 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES3 SlotType = 0x1003 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORIES4 SlotType = 0x1004 // Used for expressing a code directory using an alternate digest type.
CSSLOT_ALTERNATE_CODEDIRECTORY_MAX = 5
CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT = CSSLOT_ALTERNATE_CODEDIRECTORIES + CSSLOT_ALTERNATE_CODEDIRECTORY_MAX
CSSLOT_CMS_SIGNATURE SlotType = 0x10000 // CMS signature
CSSLOT_IDENTIFICATIONSLOT SlotType = 0x10001 // identification blob; used for detached signature
CSSLOT_TICKETSLOT SlotType = 0x10002 // Notarization ticket
)
func (c SlotType) String() string {
switch c {
case CSSLOT_CODEDIRECTORY:
return "CodeDirectory"
case CSSLOT_INFOSLOT:
return "Bound Info.plist"
case CSSLOT_REQUIREMENTS:
return "Requirements Blob"
case CSSLOT_RESOURCEDIR:
return "Resource Directory"
case CSSLOT_APPLICATION:
return "Application Specific"
case CSSLOT_ENTITLEMENTS:
return "Entitlements Plist"
case CSSLOT_REP_SPECIFIC:
return "DMG Specific"
case CSSLOT_ENTITLEMENTS_DER:
return "Entitlements ASN1/DER"
case CSSLOT_LAUNCH_CONSTRAINT_SELF:
return "Launch Constraint (self)"
case CSSLOT_LAUNCH_CONSTRAINT_PARENT:
return "Launch Constraint (parent)"
case CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE:
return "Launch Constraint (responsible proc)"
case CSSLOT_LIBRARY_CONSTRAINT:
return "Library Constraint"
case CSSLOT_ALTERNATE_CODEDIRECTORIES:
return "Alternate CodeDirectories 0"
case CSSLOT_ALTERNATE_CODEDIRECTORIES1:
return "Alternate CodeDirectories 1"
case CSSLOT_ALTERNATE_CODEDIRECTORIES2:
return "Alternate CodeDirectories 2"
case CSSLOT_ALTERNATE_CODEDIRECTORIES3:
return "Alternate CodeDirectories 3"
case CSSLOT_ALTERNATE_CODEDIRECTORIES4:
return "Alternate CodeDirectories 4"
case CSSLOT_CMS_SIGNATURE:
return "CMS (RFC3852) signature"
case CSSLOT_IDENTIFICATIONSLOT:
return "IdentificationSlot"
case CSSLOT_TICKETSLOT:
return "TicketSlot"
default:
return fmt.Sprintf("Unknown SlotType: %d", c)
}
}
// BlobIndex object
type BlobIndex struct {
Type SlotType `json:"type,omitempty"` // type of entry
Offset uint32 `json:"offset,omitempty"` // offset of entry
}
type BlobHeader struct {
Magic Magic `json:"magic,omitempty"` // magic number
Length uint32 `json:"length,omitempty"` // total length of blob
}
// Blob object
type Blob struct {
BlobHeader
Data []byte // (length - sizeof(blob_header)) bytes
}
func NewBlob(magic Magic, data []byte) Blob {
return Blob{
BlobHeader: BlobHeader{
Magic: magic,
Length: uint32(binary.Size(BlobHeader{}) + len(data)),
},
Data: data,
}
}
func (b Blob) | () ([]byte, error) {
h := sha256.New()
if err := binary.Write(h, binary.BigEndian, b.BlobHeader); err != nil {
return nil, fmt.Errorf("failed to hash blob header: %v", err)
}
if err := binary.Write(h, binary.BigEndian, b.Data); err != nil {
return nil, fmt.Errorf("failed to hash blob header: %v", err)
}
return h.Sum(nil), nil
}
func (b Blob) Bytes() ([]byte, error) {
buf := new(bytes.Buffer)
if err := binary.Write(buf, binary.BigEndian, b.BlobHeader); err != nil {
return nil, fmt.Errorf("failed to write blob header to buffer: %v", err)
}
if err := binary.Write(buf, binary.BigEndian, b.Data); err != nil {
return nil, fmt.Errorf("failed to write blob data to buffer: %v", err)
}
return buf.Bytes(), nil
}
| Sha256Hash | identifier_name |
storage.go | package callosum
import (
"database/sql"
"fmt"
"log"
"sync"
_ "github.com/mattn/go-sqlite3" //sqllite DB driver import
)
//UserRow holds the data obtained from fetching a row from the `users` table.
type UserRow struct {
ID int64
ScreenName string
Description string
LastLookedAt string
LatestTweetID int64
LatestFriendID int64
LatestFollowerID int64
Protected int
Processed int
Accepted int
Blob []byte
}
//TweetRow holds the data obtained from fetching a row from the `tweets` table
type TweetRow struct {
TweetID int64
CreatedAt string
Language string
screenName string
tweet []byte
}
//Storage holds a open connection the the sqlite database
type Storage struct {
db *sql.DB
}
type queryArgs struct {
query string
args []interface{}
}
var mutex = &sync.Mutex{}
var chQueryArgs chan *queryArgs
var db *sql.DB
func executeStatements() {
for {
if qa, ok := <-chQueryArgs; ok {
_, err := db.Exec(qa.query, qa.args...)
if err != nil {
log.Fatal(err)
}
}
}
}
//NewStorage creates returns a new Storage object.
//DBName is the name of the sqllite database file where
//all the users and tweets data will be collected. NewStorage
//create the sqlite file, if it is not already present and creates
//the tables. if the database is present, opens a connection.
func NewStorage(DBName string) *Storage {
s := &Storage{}
mutex.Lock()
if db == nil {
s.checkMakeDatabase(DBName)
db = s.db
if chQueryArgs == nil {
chQueryArgs = make(chan *queryArgs, 100)
go executeStatements()
}
s.setupTables()
}
mutex.Unlock()
return s
}
func (s *Storage) setupTables() {
tableName := "users"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s (
user_id INTEGER PRIMARY KEY,
screen_name TEXT CONSTRAINT uniquescreenname UNIQUE,
description TEXT CONSTRAINT defaultdesc DEFAULT "",
last_looked_at INTEGER CONSTRAINT defaultlastlookedat DEFAULT 0,
latest_tweet_id INTEGER CONSTRAINT defaultlatesttweetid DEFAULT 0,
latest_following_id INTEGER CONSTRAINT defaultlatestfollowingid DEFAULT 0,
latest_follower_id INTEGER CONSTRAINT defaultlatestfollowerid DEFAULT 0,
protected INTEGER CONSTRAINT defaultprotected DEFAULT 0,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0,
accepted INTEGER CONSTRAINT defaultaccepted DEFAULT 0,
blob BLOB)`, tableName))
tableName = "tweets"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(tweet_id INTEGER PRIMARY KEY,
created_at INTEGER,
langugage TEXT,
user_id INTEGER,
desc TEXT,
blob BLOB
-- FOREIGN KEY(screen_name) REFERENCES users(screen_name)
)`, tableName))
tableName = "screennames"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(screen_name TEXT PRIMARY KEY,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0)`, tableName))
tableName = "userids"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER PRIMARY KEY,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0)`, tableName))
tableName = "followers"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER,
follower_id INTEGER,
CONSTRAINT uniquemap UNIQUE (user_id, follower_id))`, tableName))
tableName = "following"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER,
following_id INTEGER,
CONSTRAINT uniquemap UNIQUE (user_id, following_id))`, tableName))
}
func (s *Storage) checkMakeDatabase(DBName string) *sql.DB {
var db *sql.DB
db, err := sql.Open("sqlite3", DBName+".db") //?cache=shared&mode=rwc")
if err != nil {
log.Fatal(err)
}
db.Exec("PRAGMA journal_mode=WAL;")
s.db = db
return db
}
func (s *Storage) makeTable(tableName, sqlStmt string) {
_, err := s.db.Exec(sqlStmt)
if err != nil |
}
//StoreScreenName inserts the given screenName into the `screenames` table
func (s *Storage) StoreScreenName(screenName string) {
_, err := s.db.Exec("INSERT OR IGNORE INTO screennames (screen_name) VALUES (?)", screenName)
if err != nil {
log.Fatal(err)
}
}
//StoreUser inserts the Twitter user details into the `users` table.
func (s *Storage) StoreUser(userID int64, screenName, description string, protected bool, blob []byte) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO users (user_id, screen_name, description, protected, blob) VALUES (?, ?, ?, ?, ?)",
[]interface{}{userID, screenName, description, protected, blob}}
}
//StoreTweet inserts the tweet details into the `tweets` table.
func (s *Storage) StoreTweet(tweetID, createdAt, userID int64, language, desc string, blob []byte) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO tweets (tweet_id, created_at, langugage, user_id, desc, blob) VALUES (?, ?, ?, ?, ?, ?)",
[]interface{}{tweetID, createdAt, language, userID, desc, blob}}
}
func (s *Storage) storeFriendOrFollower(userID, friendOrFollowerID int64, query string) {
chQueryArgs <- &queryArgs{query, []interface{}{userID, friendOrFollowerID}}
}
//StoreFriends stores the mapping between the userID and the IDs of
//users the follow into the `following` table.
func (s *Storage) StoreFriends(userID int64, friendIDs []int64) {
for _, friendID := range friendIDs {
s.storeFriendOrFollower(userID, friendID, "INSERT OR IGNORE INTO following (user_id, following_id) VALUES (?, ?)")
}
}
//StoreFollowers stores the mapping between the userID and the IDs of
//their followes into the `followers` table.
func (s *Storage) StoreFollowers(userID int64, followerIDs []int64) {
for _, followerID := range followerIDs {
s.storeFriendOrFollower(userID, followerID, "INSERT OR IGNORE INTO followers (user_id, follower_id) VALUES (?, ?)")
}
}
func (s *Storage) storeUserID(userID int64) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO userids (user_id) VALUES (?)", []interface{}{userID}}
}
//StoreUserIDs stores the given userIDs in the `userids` table
func (s *Storage) StoreUserIDs(userIDs []int64) {
for _, userID := range userIDs {
s.storeUserID(userID)
}
}
func (s *Storage) queryScreenNamesOrIDs(query string, results interface{}) {
rows, err := s.db.Query(query)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
switch x := results.(type) {
case *[]string:
var item string
rows.Scan(&item)
*x = append(*x, item)
case *[]int64:
var item int64
rows.Scan(&item)
*x = append(*x, item)
default:
log.Fatal("results type must be *[]string or *[]int64")
}
}
}
//GetScreenNames gets Twitter handles from the `screenames` table that have already been processed
func (s *Storage) GetScreenNames() []string {
var results []string
s.queryScreenNamesOrIDs("SELECT screen_name from screennames where processed=1", &results)
return results
}
//GetUnprocessedScreenNames gets Twitter handles from the `screenames` table that are yet to be processed
func (s *Storage) GetUnprocessedScreenNames() []string {
var results []string
s.queryScreenNamesOrIDs("SELECT screen_name from screennames where processed=0", &results)
return results
}
//GetUserIDs gets user ids from the `userids` table that have already been processed
func (s *Storage) GetUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from userids where processed=1", &results)
return results
}
//GetUnprocessedUserIDs gets user ids from the `userids` table that are yet to be processed
func (s *Storage) GetUnprocessedUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from userids where processed=0", &results)
return results
}
//GetAcceptedUserIDs gets user ids from the `users` table for whom the user filtering
//function has marked them as accepted for further processing
func (s *Storage) GetAcceptedUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from users where accepted=1", &results)
return results
}
//GetUserByScreenNameOrID gets the UserRow for the given screenName or ID
func (s *Storage) GetUserByScreenNameOrID(screenNameOrID interface{}) *UserRow {
var u UserRow
query := `SELECT user_id,
screen_name,
description,
last_looked_at,
latest_tweet_id,
latest_following_id,
latest_follower_id,
protected,
processed,
accepted,
blob
FROM users
WHERE %s=?`
var row *sql.Row
switch x := screenNameOrID.(type) {
case int64:
query = fmt.Sprintf(query, "user_id")
row = s.db.QueryRow(query, x)
case string:
query = fmt.Sprintf(query, "screen_name")
row = s.db.QueryRow(query, x)
}
err := row.Scan(
&u.ID,
&u.ScreenName,
&u.Description,
&u.LastLookedAt,
&u.LatestTweetID,
&u.LatestFriendID,
&u.LatestFollowerID,
&u.Protected,
&u.Processed,
&u.Accepted,
&u.Blob)
switch {
case err == sql.ErrNoRows:
return nil
case err != nil:
log.Fatal(err)
}
return &u
}
//MarkUserLatestTweetsCollected updates the `last_looked_at` timestamp and the `latest_tweet_id` for
//the given user in the `users` table
func (s *Storage) MarkUserLatestTweetsCollected(userID int64, lastLookedAt, latestTweetID int64) {
chQueryArgs <- &queryArgs{"UPDATE users SET last_looked_at=?, latest_tweet_id=? where user_id=?", []interface{}{lastLookedAt, latestTweetID, userID}}
}
//MarkUserLatestFriendsCollected sets the `latest_following_id` to the latest id of the users given userID
//is following
func (s *Storage) MarkUserLatestFriendsCollected(userID, latestFriendID int64) {
chQueryArgs <- &queryArgs{"UPDATE users SET latest_following_id=? where user_id=?", []interface{}{latestFriendID, userID}}
}
//MarkUserLatestFollowersCollected sets the `latest_follower_id` to the latest id of the followers collected
func (s *Storage) MarkUserLatestFollowersCollected(userID, latestFollowerID int64) {
chQueryArgs <- &queryArgs{"UPDATE users SET latest_follower_id=? where user_id=?", []interface{}{latestFollowerID, userID}}
}
//MarkUserProcessed sets the `processed` and the `accepted` flags for the user in the `users` table
func (s *Storage) MarkUserProcessed(ID int64, processed, accepted bool) {
chQueryArgs <- &queryArgs{"UPDATE users SET processed=?, accepted=? where user_id=?", []interface{}{processed, accepted, ID}}
}
//MarkUserIDProcessed sets the `processed` flag for the given user id in the `userids` table
func (s *Storage) MarkUserIDProcessed(ID int64, processed bool) {
chQueryArgs <- &queryArgs{"UPDATE userids SET processed=? where user_id=?", []interface{}{processed, ID}}
}
//MarkScreenNameProcessed sets the `processed` flag for the given screenName in the `screennames` table
func (s *Storage) MarkScreenNameProcessed(screenName string, processed bool) {
chQueryArgs <- &queryArgs{"UPDATE screennames SET processed=? where screen_name=?", []interface{}{processed, screenName}}
}
| {
log.Fatalf("%q: %s\n", err, sqlStmt)
return
} | conditional_block |
storage.go | package callosum
import (
"database/sql"
"fmt"
"log"
"sync"
_ "github.com/mattn/go-sqlite3" //sqllite DB driver import
)
//UserRow holds the data obtained from fetching a row from the `users` table.
type UserRow struct {
ID int64
ScreenName string
Description string
LastLookedAt string
LatestTweetID int64
LatestFriendID int64
LatestFollowerID int64
Protected int
Processed int
Accepted int
Blob []byte
}
//TweetRow holds the data obtained from fetching a row from the `tweets` table
type TweetRow struct {
TweetID int64
CreatedAt string
Language string
screenName string
tweet []byte
}
//Storage holds a open connection the the sqlite database
type Storage struct {
db *sql.DB
}
type queryArgs struct {
query string
args []interface{}
}
var mutex = &sync.Mutex{}
var chQueryArgs chan *queryArgs
var db *sql.DB
func executeStatements() {
for {
if qa, ok := <-chQueryArgs; ok {
_, err := db.Exec(qa.query, qa.args...)
if err != nil {
log.Fatal(err)
}
}
}
}
//NewStorage creates returns a new Storage object.
//DBName is the name of the sqllite database file where
//all the users and tweets data will be collected. NewStorage
//create the sqlite file, if it is not already present and creates
//the tables. if the database is present, opens a connection.
func NewStorage(DBName string) *Storage {
s := &Storage{}
mutex.Lock()
if db == nil {
s.checkMakeDatabase(DBName)
db = s.db
if chQueryArgs == nil {
chQueryArgs = make(chan *queryArgs, 100)
go executeStatements()
}
| }
mutex.Unlock()
return s
}
func (s *Storage) setupTables() {
tableName := "users"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s (
user_id INTEGER PRIMARY KEY,
screen_name TEXT CONSTRAINT uniquescreenname UNIQUE,
description TEXT CONSTRAINT defaultdesc DEFAULT "",
last_looked_at INTEGER CONSTRAINT defaultlastlookedat DEFAULT 0,
latest_tweet_id INTEGER CONSTRAINT defaultlatesttweetid DEFAULT 0,
latest_following_id INTEGER CONSTRAINT defaultlatestfollowingid DEFAULT 0,
latest_follower_id INTEGER CONSTRAINT defaultlatestfollowerid DEFAULT 0,
protected INTEGER CONSTRAINT defaultprotected DEFAULT 0,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0,
accepted INTEGER CONSTRAINT defaultaccepted DEFAULT 0,
blob BLOB)`, tableName))
tableName = "tweets"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(tweet_id INTEGER PRIMARY KEY,
created_at INTEGER,
langugage TEXT,
user_id INTEGER,
desc TEXT,
blob BLOB
-- FOREIGN KEY(screen_name) REFERENCES users(screen_name)
)`, tableName))
tableName = "screennames"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(screen_name TEXT PRIMARY KEY,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0)`, tableName))
tableName = "userids"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER PRIMARY KEY,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0)`, tableName))
tableName = "followers"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER,
follower_id INTEGER,
CONSTRAINT uniquemap UNIQUE (user_id, follower_id))`, tableName))
tableName = "following"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER,
following_id INTEGER,
CONSTRAINT uniquemap UNIQUE (user_id, following_id))`, tableName))
}
func (s *Storage) checkMakeDatabase(DBName string) *sql.DB {
var db *sql.DB
db, err := sql.Open("sqlite3", DBName+".db") //?cache=shared&mode=rwc")
if err != nil {
log.Fatal(err)
}
db.Exec("PRAGMA journal_mode=WAL;")
s.db = db
return db
}
func (s *Storage) makeTable(tableName, sqlStmt string) {
_, err := s.db.Exec(sqlStmt)
if err != nil {
log.Fatalf("%q: %s\n", err, sqlStmt)
return
}
}
//StoreScreenName inserts the given screenName into the `screenames` table
func (s *Storage) StoreScreenName(screenName string) {
_, err := s.db.Exec("INSERT OR IGNORE INTO screennames (screen_name) VALUES (?)", screenName)
if err != nil {
log.Fatal(err)
}
}
//StoreUser inserts the Twitter user details into the `users` table.
func (s *Storage) StoreUser(userID int64, screenName, description string, protected bool, blob []byte) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO users (user_id, screen_name, description, protected, blob) VALUES (?, ?, ?, ?, ?)",
[]interface{}{userID, screenName, description, protected, blob}}
}
//StoreTweet inserts the tweet details into the `tweets` table.
func (s *Storage) StoreTweet(tweetID, createdAt, userID int64, language, desc string, blob []byte) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO tweets (tweet_id, created_at, langugage, user_id, desc, blob) VALUES (?, ?, ?, ?, ?, ?)",
[]interface{}{tweetID, createdAt, language, userID, desc, blob}}
}
func (s *Storage) storeFriendOrFollower(userID, friendOrFollowerID int64, query string) {
chQueryArgs <- &queryArgs{query, []interface{}{userID, friendOrFollowerID}}
}
//StoreFriends stores the mapping between the userID and the IDs of
//users the follow into the `following` table.
func (s *Storage) StoreFriends(userID int64, friendIDs []int64) {
for _, friendID := range friendIDs {
s.storeFriendOrFollower(userID, friendID, "INSERT OR IGNORE INTO following (user_id, following_id) VALUES (?, ?)")
}
}
//StoreFollowers stores the mapping between the userID and the IDs of
//their followes into the `followers` table.
func (s *Storage) StoreFollowers(userID int64, followerIDs []int64) {
for _, followerID := range followerIDs {
s.storeFriendOrFollower(userID, followerID, "INSERT OR IGNORE INTO followers (user_id, follower_id) VALUES (?, ?)")
}
}
func (s *Storage) storeUserID(userID int64) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO userids (user_id) VALUES (?)", []interface{}{userID}}
}
//StoreUserIDs stores the given userIDs in the `userids` table
func (s *Storage) StoreUserIDs(userIDs []int64) {
for _, userID := range userIDs {
s.storeUserID(userID)
}
}
func (s *Storage) queryScreenNamesOrIDs(query string, results interface{}) {
rows, err := s.db.Query(query)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
switch x := results.(type) {
case *[]string:
var item string
rows.Scan(&item)
*x = append(*x, item)
case *[]int64:
var item int64
rows.Scan(&item)
*x = append(*x, item)
default:
log.Fatal("results type must be *[]string or *[]int64")
}
}
}
//GetScreenNames gets Twitter handles from the `screenames` table that have already been processed
func (s *Storage) GetScreenNames() []string {
var results []string
s.queryScreenNamesOrIDs("SELECT screen_name from screennames where processed=1", &results)
return results
}
//GetUnprocessedScreenNames gets Twitter handles from the `screenames` table that are yet to be processed
func (s *Storage) GetUnprocessedScreenNames() []string {
var results []string
s.queryScreenNamesOrIDs("SELECT screen_name from screennames where processed=0", &results)
return results
}
//GetUserIDs gets user ids from the `userids` table that have already been processed
func (s *Storage) GetUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from userids where processed=1", &results)
return results
}
//GetUnprocessedUserIDs gets user ids from the `userids` table that are yet to be processed
func (s *Storage) GetUnprocessedUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from userids where processed=0", &results)
return results
}
//GetAcceptedUserIDs gets user ids from the `users` table for whom the user filtering
//function has marked them as accepted for further processing
func (s *Storage) GetAcceptedUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from users where accepted=1", &results)
return results
}
//GetUserByScreenNameOrID gets the UserRow for the given screenName or ID
func (s *Storage) GetUserByScreenNameOrID(screenNameOrID interface{}) *UserRow {
var u UserRow
query := `SELECT user_id,
screen_name,
description,
last_looked_at,
latest_tweet_id,
latest_following_id,
latest_follower_id,
protected,
processed,
accepted,
blob
FROM users
WHERE %s=?`
var row *sql.Row
switch x := screenNameOrID.(type) {
case int64:
query = fmt.Sprintf(query, "user_id")
row = s.db.QueryRow(query, x)
case string:
query = fmt.Sprintf(query, "screen_name")
row = s.db.QueryRow(query, x)
}
err := row.Scan(
&u.ID,
&u.ScreenName,
&u.Description,
&u.LastLookedAt,
&u.LatestTweetID,
&u.LatestFriendID,
&u.LatestFollowerID,
&u.Protected,
&u.Processed,
&u.Accepted,
&u.Blob)
switch {
case err == sql.ErrNoRows:
return nil
case err != nil:
log.Fatal(err)
}
return &u
}
//MarkUserLatestTweetsCollected updates the `last_looked_at` timestamp and the `latest_tweet_id` for
//the given user in the `users` table
func (s *Storage) MarkUserLatestTweetsCollected(userID int64, lastLookedAt, latestTweetID int64) {
chQueryArgs <- &queryArgs{"UPDATE users SET last_looked_at=?, latest_tweet_id=? where user_id=?", []interface{}{lastLookedAt, latestTweetID, userID}}
}
//MarkUserLatestFriendsCollected sets the `latest_following_id` to the latest id of the users given userID
//is following
func (s *Storage) MarkUserLatestFriendsCollected(userID, latestFriendID int64) {
chQueryArgs <- &queryArgs{"UPDATE users SET latest_following_id=? where user_id=?", []interface{}{latestFriendID, userID}}
}
//MarkUserLatestFollowersCollected sets the `latest_follower_id` to the latest id of the followers collected
func (s *Storage) MarkUserLatestFollowersCollected(userID, latestFollowerID int64) {
chQueryArgs <- &queryArgs{"UPDATE users SET latest_follower_id=? where user_id=?", []interface{}{latestFollowerID, userID}}
}
//MarkUserProcessed sets the `processed` and the `accepted` flags for the user in the `users` table
func (s *Storage) MarkUserProcessed(ID int64, processed, accepted bool) {
chQueryArgs <- &queryArgs{"UPDATE users SET processed=?, accepted=? where user_id=?", []interface{}{processed, accepted, ID}}
}
//MarkUserIDProcessed sets the `processed` flag for the given user id in the `userids` table
func (s *Storage) MarkUserIDProcessed(ID int64, processed bool) {
chQueryArgs <- &queryArgs{"UPDATE userids SET processed=? where user_id=?", []interface{}{processed, ID}}
}
//MarkScreenNameProcessed sets the `processed` flag for the given screenName in the `screennames` table
func (s *Storage) MarkScreenNameProcessed(screenName string, processed bool) {
chQueryArgs <- &queryArgs{"UPDATE screennames SET processed=? where screen_name=?", []interface{}{processed, screenName}}
} | s.setupTables() | random_line_split |
storage.go | package callosum
import (
"database/sql"
"fmt"
"log"
"sync"
_ "github.com/mattn/go-sqlite3" //sqllite DB driver import
)
//UserRow holds the data obtained from fetching a row from the `users` table.
type UserRow struct {
ID int64
ScreenName string
Description string
LastLookedAt string
LatestTweetID int64
LatestFriendID int64
LatestFollowerID int64
Protected int
Processed int
Accepted int
Blob []byte
}
//TweetRow holds the data obtained from fetching a row from the `tweets` table
type TweetRow struct {
TweetID int64
CreatedAt string
Language string
screenName string
tweet []byte
}
//Storage holds a open connection the the sqlite database
type Storage struct {
db *sql.DB
}
type queryArgs struct {
query string
args []interface{}
}
var mutex = &sync.Mutex{}
var chQueryArgs chan *queryArgs
var db *sql.DB
func executeStatements() {
for {
if qa, ok := <-chQueryArgs; ok {
_, err := db.Exec(qa.query, qa.args...)
if err != nil {
log.Fatal(err)
}
}
}
}
//NewStorage creates returns a new Storage object.
//DBName is the name of the sqllite database file where
//all the users and tweets data will be collected. NewStorage
//create the sqlite file, if it is not already present and creates
//the tables. if the database is present, opens a connection.
func NewStorage(DBName string) *Storage {
s := &Storage{}
mutex.Lock()
if db == nil {
s.checkMakeDatabase(DBName)
db = s.db
if chQueryArgs == nil {
chQueryArgs = make(chan *queryArgs, 100)
go executeStatements()
}
s.setupTables()
}
mutex.Unlock()
return s
}
func (s *Storage) setupTables() {
tableName := "users"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s (
user_id INTEGER PRIMARY KEY,
screen_name TEXT CONSTRAINT uniquescreenname UNIQUE,
description TEXT CONSTRAINT defaultdesc DEFAULT "",
last_looked_at INTEGER CONSTRAINT defaultlastlookedat DEFAULT 0,
latest_tweet_id INTEGER CONSTRAINT defaultlatesttweetid DEFAULT 0,
latest_following_id INTEGER CONSTRAINT defaultlatestfollowingid DEFAULT 0,
latest_follower_id INTEGER CONSTRAINT defaultlatestfollowerid DEFAULT 0,
protected INTEGER CONSTRAINT defaultprotected DEFAULT 0,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0,
accepted INTEGER CONSTRAINT defaultaccepted DEFAULT 0,
blob BLOB)`, tableName))
tableName = "tweets"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(tweet_id INTEGER PRIMARY KEY,
created_at INTEGER,
langugage TEXT,
user_id INTEGER,
desc TEXT,
blob BLOB
-- FOREIGN KEY(screen_name) REFERENCES users(screen_name)
)`, tableName))
tableName = "screennames"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(screen_name TEXT PRIMARY KEY,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0)`, tableName))
tableName = "userids"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER PRIMARY KEY,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0)`, tableName))
tableName = "followers"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER,
follower_id INTEGER,
CONSTRAINT uniquemap UNIQUE (user_id, follower_id))`, tableName))
tableName = "following"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER,
following_id INTEGER,
CONSTRAINT uniquemap UNIQUE (user_id, following_id))`, tableName))
}
func (s *Storage) checkMakeDatabase(DBName string) *sql.DB {
var db *sql.DB
db, err := sql.Open("sqlite3", DBName+".db") //?cache=shared&mode=rwc")
if err != nil {
log.Fatal(err)
}
db.Exec("PRAGMA journal_mode=WAL;")
s.db = db
return db
}
func (s *Storage) makeTable(tableName, sqlStmt string) {
_, err := s.db.Exec(sqlStmt)
if err != nil {
log.Fatalf("%q: %s\n", err, sqlStmt)
return
}
}
//StoreScreenName inserts the given screenName into the `screenames` table
func (s *Storage) StoreScreenName(screenName string) {
_, err := s.db.Exec("INSERT OR IGNORE INTO screennames (screen_name) VALUES (?)", screenName)
if err != nil {
log.Fatal(err)
}
}
//StoreUser inserts the Twitter user details into the `users` table.
func (s *Storage) StoreUser(userID int64, screenName, description string, protected bool, blob []byte) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO users (user_id, screen_name, description, protected, blob) VALUES (?, ?, ?, ?, ?)",
[]interface{}{userID, screenName, description, protected, blob}}
}
//StoreTweet inserts the tweet details into the `tweets` table.
func (s *Storage) StoreTweet(tweetID, createdAt, userID int64, language, desc string, blob []byte) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO tweets (tweet_id, created_at, langugage, user_id, desc, blob) VALUES (?, ?, ?, ?, ?, ?)",
[]interface{}{tweetID, createdAt, language, userID, desc, blob}}
}
func (s *Storage) storeFriendOrFollower(userID, friendOrFollowerID int64, query string) {
chQueryArgs <- &queryArgs{query, []interface{}{userID, friendOrFollowerID}}
}
//StoreFriends stores the mapping between the userID and the IDs of
//users the follow into the `following` table.
func (s *Storage) StoreFriends(userID int64, friendIDs []int64) {
for _, friendID := range friendIDs {
s.storeFriendOrFollower(userID, friendID, "INSERT OR IGNORE INTO following (user_id, following_id) VALUES (?, ?)")
}
}
//StoreFollowers stores the mapping between the userID and the IDs of
//their followes into the `followers` table.
func (s *Storage) StoreFollowers(userID int64, followerIDs []int64) {
for _, followerID := range followerIDs {
s.storeFriendOrFollower(userID, followerID, "INSERT OR IGNORE INTO followers (user_id, follower_id) VALUES (?, ?)")
}
}
func (s *Storage) storeUserID(userID int64) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO userids (user_id) VALUES (?)", []interface{}{userID}}
}
//StoreUserIDs stores the given userIDs in the `userids` table
func (s *Storage) StoreUserIDs(userIDs []int64) {
for _, userID := range userIDs {
s.storeUserID(userID)
}
}
func (s *Storage) queryScreenNamesOrIDs(query string, results interface{}) {
rows, err := s.db.Query(query)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
switch x := results.(type) {
case *[]string:
var item string
rows.Scan(&item)
*x = append(*x, item)
case *[]int64:
var item int64
rows.Scan(&item)
*x = append(*x, item)
default:
log.Fatal("results type must be *[]string or *[]int64")
}
}
}
//GetScreenNames gets Twitter handles from the `screenames` table that have already been processed
func (s *Storage) GetScreenNames() []string {
var results []string
s.queryScreenNamesOrIDs("SELECT screen_name from screennames where processed=1", &results)
return results
}
//GetUnprocessedScreenNames gets Twitter handles from the `screenames` table that are yet to be processed
func (s *Storage) GetUnprocessedScreenNames() []string {
var results []string
s.queryScreenNamesOrIDs("SELECT screen_name from screennames where processed=0", &results)
return results
}
//GetUserIDs gets user ids from the `userids` table that have already been processed
func (s *Storage) GetUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from userids where processed=1", &results)
return results
}
//GetUnprocessedUserIDs gets user ids from the `userids` table that are yet to be processed
func (s *Storage) GetUnprocessedUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from userids where processed=0", &results)
return results
}
//GetAcceptedUserIDs gets user ids from the `users` table for whom the user filtering
//function has marked them as accepted for further processing
func (s *Storage) GetAcceptedUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from users where accepted=1", &results)
return results
}
//GetUserByScreenNameOrID gets the UserRow for the given screenName or ID
func (s *Storage) GetUserByScreenNameOrID(screenNameOrID interface{}) *UserRow {
var u UserRow
query := `SELECT user_id,
screen_name,
description,
last_looked_at,
latest_tweet_id,
latest_following_id,
latest_follower_id,
protected,
processed,
accepted,
blob
FROM users
WHERE %s=?`
var row *sql.Row
switch x := screenNameOrID.(type) {
case int64:
query = fmt.Sprintf(query, "user_id")
row = s.db.QueryRow(query, x)
case string:
query = fmt.Sprintf(query, "screen_name")
row = s.db.QueryRow(query, x)
}
err := row.Scan(
&u.ID,
&u.ScreenName,
&u.Description,
&u.LastLookedAt,
&u.LatestTweetID,
&u.LatestFriendID,
&u.LatestFollowerID,
&u.Protected,
&u.Processed,
&u.Accepted,
&u.Blob)
switch {
case err == sql.ErrNoRows:
return nil
case err != nil:
log.Fatal(err)
}
return &u
}
//MarkUserLatestTweetsCollected updates the `last_looked_at` timestamp and the `latest_tweet_id` for
//the given user in the `users` table
func (s *Storage) | (userID int64, lastLookedAt, latestTweetID int64) {
chQueryArgs <- &queryArgs{"UPDATE users SET last_looked_at=?, latest_tweet_id=? where user_id=?", []interface{}{lastLookedAt, latestTweetID, userID}}
}
//MarkUserLatestFriendsCollected sets the `latest_following_id` to the latest id of the users given userID
//is following
func (s *Storage) MarkUserLatestFriendsCollected(userID, latestFriendID int64) {
chQueryArgs <- &queryArgs{"UPDATE users SET latest_following_id=? where user_id=?", []interface{}{latestFriendID, userID}}
}
//MarkUserLatestFollowersCollected sets the `latest_follower_id` to the latest id of the followers collected
func (s *Storage) MarkUserLatestFollowersCollected(userID, latestFollowerID int64) {
chQueryArgs <- &queryArgs{"UPDATE users SET latest_follower_id=? where user_id=?", []interface{}{latestFollowerID, userID}}
}
//MarkUserProcessed sets the `processed` and the `accepted` flags for the user in the `users` table
func (s *Storage) MarkUserProcessed(ID int64, processed, accepted bool) {
chQueryArgs <- &queryArgs{"UPDATE users SET processed=?, accepted=? where user_id=?", []interface{}{processed, accepted, ID}}
}
//MarkUserIDProcessed sets the `processed` flag for the given user id in the `userids` table
func (s *Storage) MarkUserIDProcessed(ID int64, processed bool) {
chQueryArgs <- &queryArgs{"UPDATE userids SET processed=? where user_id=?", []interface{}{processed, ID}}
}
//MarkScreenNameProcessed sets the `processed` flag for the given screenName in the `screennames` table
func (s *Storage) MarkScreenNameProcessed(screenName string, processed bool) {
chQueryArgs <- &queryArgs{"UPDATE screennames SET processed=? where screen_name=?", []interface{}{processed, screenName}}
}
| MarkUserLatestTweetsCollected | identifier_name |
storage.go | package callosum
import (
"database/sql"
"fmt"
"log"
"sync"
_ "github.com/mattn/go-sqlite3" //sqllite DB driver import
)
//UserRow holds the data obtained from fetching a row from the `users` table.
type UserRow struct {
ID int64
ScreenName string
Description string
LastLookedAt string
LatestTweetID int64
LatestFriendID int64
LatestFollowerID int64
Protected int
Processed int
Accepted int
Blob []byte
}
//TweetRow holds the data obtained from fetching a row from the `tweets` table
type TweetRow struct {
TweetID int64
CreatedAt string
Language string
screenName string
tweet []byte
}
//Storage holds a open connection the the sqlite database
type Storage struct {
db *sql.DB
}
type queryArgs struct {
query string
args []interface{}
}
var mutex = &sync.Mutex{}
var chQueryArgs chan *queryArgs
var db *sql.DB
func executeStatements() {
for {
if qa, ok := <-chQueryArgs; ok {
_, err := db.Exec(qa.query, qa.args...)
if err != nil {
log.Fatal(err)
}
}
}
}
//NewStorage creates returns a new Storage object.
//DBName is the name of the sqllite database file where
//all the users and tweets data will be collected. NewStorage
//create the sqlite file, if it is not already present and creates
//the tables. if the database is present, opens a connection.
func NewStorage(DBName string) *Storage {
s := &Storage{}
mutex.Lock()
if db == nil {
s.checkMakeDatabase(DBName)
db = s.db
if chQueryArgs == nil {
chQueryArgs = make(chan *queryArgs, 100)
go executeStatements()
}
s.setupTables()
}
mutex.Unlock()
return s
}
func (s *Storage) setupTables() {
tableName := "users"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s (
user_id INTEGER PRIMARY KEY,
screen_name TEXT CONSTRAINT uniquescreenname UNIQUE,
description TEXT CONSTRAINT defaultdesc DEFAULT "",
last_looked_at INTEGER CONSTRAINT defaultlastlookedat DEFAULT 0,
latest_tweet_id INTEGER CONSTRAINT defaultlatesttweetid DEFAULT 0,
latest_following_id INTEGER CONSTRAINT defaultlatestfollowingid DEFAULT 0,
latest_follower_id INTEGER CONSTRAINT defaultlatestfollowerid DEFAULT 0,
protected INTEGER CONSTRAINT defaultprotected DEFAULT 0,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0,
accepted INTEGER CONSTRAINT defaultaccepted DEFAULT 0,
blob BLOB)`, tableName))
tableName = "tweets"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(tweet_id INTEGER PRIMARY KEY,
created_at INTEGER,
langugage TEXT,
user_id INTEGER,
desc TEXT,
blob BLOB
-- FOREIGN KEY(screen_name) REFERENCES users(screen_name)
)`, tableName))
tableName = "screennames"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(screen_name TEXT PRIMARY KEY,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0)`, tableName))
tableName = "userids"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER PRIMARY KEY,
processed INTEGER CONSTRAINT defaultprocessed DEFAULT 0)`, tableName))
tableName = "followers"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER,
follower_id INTEGER,
CONSTRAINT uniquemap UNIQUE (user_id, follower_id))`, tableName))
tableName = "following"
s.makeTable(tableName, fmt.Sprintf(`
CREATE TABLE IF NOT EXISTS %s(user_id INTEGER,
following_id INTEGER,
CONSTRAINT uniquemap UNIQUE (user_id, following_id))`, tableName))
}
func (s *Storage) checkMakeDatabase(DBName string) *sql.DB {
var db *sql.DB
db, err := sql.Open("sqlite3", DBName+".db") //?cache=shared&mode=rwc")
if err != nil {
log.Fatal(err)
}
db.Exec("PRAGMA journal_mode=WAL;")
s.db = db
return db
}
func (s *Storage) makeTable(tableName, sqlStmt string) {
_, err := s.db.Exec(sqlStmt)
if err != nil {
log.Fatalf("%q: %s\n", err, sqlStmt)
return
}
}
//StoreScreenName inserts the given screenName into the `screenames` table
func (s *Storage) StoreScreenName(screenName string) |
//StoreUser inserts the Twitter user details into the `users` table.
func (s *Storage) StoreUser(userID int64, screenName, description string, protected bool, blob []byte) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO users (user_id, screen_name, description, protected, blob) VALUES (?, ?, ?, ?, ?)",
[]interface{}{userID, screenName, description, protected, blob}}
}
//StoreTweet inserts the tweet details into the `tweets` table.
func (s *Storage) StoreTweet(tweetID, createdAt, userID int64, language, desc string, blob []byte) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO tweets (tweet_id, created_at, langugage, user_id, desc, blob) VALUES (?, ?, ?, ?, ?, ?)",
[]interface{}{tweetID, createdAt, language, userID, desc, blob}}
}
func (s *Storage) storeFriendOrFollower(userID, friendOrFollowerID int64, query string) {
chQueryArgs <- &queryArgs{query, []interface{}{userID, friendOrFollowerID}}
}
//StoreFriends stores the mapping between the userID and the IDs of
//users the follow into the `following` table.
func (s *Storage) StoreFriends(userID int64, friendIDs []int64) {
for _, friendID := range friendIDs {
s.storeFriendOrFollower(userID, friendID, "INSERT OR IGNORE INTO following (user_id, following_id) VALUES (?, ?)")
}
}
//StoreFollowers stores the mapping between the userID and the IDs of
//their followes into the `followers` table.
func (s *Storage) StoreFollowers(userID int64, followerIDs []int64) {
for _, followerID := range followerIDs {
s.storeFriendOrFollower(userID, followerID, "INSERT OR IGNORE INTO followers (user_id, follower_id) VALUES (?, ?)")
}
}
func (s *Storage) storeUserID(userID int64) {
chQueryArgs <- &queryArgs{"INSERT OR IGNORE INTO userids (user_id) VALUES (?)", []interface{}{userID}}
}
//StoreUserIDs stores the given userIDs in the `userids` table
func (s *Storage) StoreUserIDs(userIDs []int64) {
for _, userID := range userIDs {
s.storeUserID(userID)
}
}
func (s *Storage) queryScreenNamesOrIDs(query string, results interface{}) {
rows, err := s.db.Query(query)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
switch x := results.(type) {
case *[]string:
var item string
rows.Scan(&item)
*x = append(*x, item)
case *[]int64:
var item int64
rows.Scan(&item)
*x = append(*x, item)
default:
log.Fatal("results type must be *[]string or *[]int64")
}
}
}
//GetScreenNames gets Twitter handles from the `screenames` table that have already been processed
func (s *Storage) GetScreenNames() []string {
var results []string
s.queryScreenNamesOrIDs("SELECT screen_name from screennames where processed=1", &results)
return results
}
//GetUnprocessedScreenNames gets Twitter handles from the `screenames` table that are yet to be processed
func (s *Storage) GetUnprocessedScreenNames() []string {
var results []string
s.queryScreenNamesOrIDs("SELECT screen_name from screennames where processed=0", &results)
return results
}
//GetUserIDs gets user ids from the `userids` table that have already been processed
func (s *Storage) GetUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from userids where processed=1", &results)
return results
}
//GetUnprocessedUserIDs gets user ids from the `userids` table that are yet to be processed
func (s *Storage) GetUnprocessedUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from userids where processed=0", &results)
return results
}
//GetAcceptedUserIDs gets user ids from the `users` table for whom the user filtering
//function has marked them as accepted for further processing
func (s *Storage) GetAcceptedUserIDs() []int64 {
var results []int64
s.queryScreenNamesOrIDs("SELECT user_id from users where accepted=1", &results)
return results
}
//GetUserByScreenNameOrID gets the UserRow for the given screenName or ID
func (s *Storage) GetUserByScreenNameOrID(screenNameOrID interface{}) *UserRow {
var u UserRow
query := `SELECT user_id,
screen_name,
description,
last_looked_at,
latest_tweet_id,
latest_following_id,
latest_follower_id,
protected,
processed,
accepted,
blob
FROM users
WHERE %s=?`
var row *sql.Row
switch x := screenNameOrID.(type) {
case int64:
query = fmt.Sprintf(query, "user_id")
row = s.db.QueryRow(query, x)
case string:
query = fmt.Sprintf(query, "screen_name")
row = s.db.QueryRow(query, x)
}
err := row.Scan(
&u.ID,
&u.ScreenName,
&u.Description,
&u.LastLookedAt,
&u.LatestTweetID,
&u.LatestFriendID,
&u.LatestFollowerID,
&u.Protected,
&u.Processed,
&u.Accepted,
&u.Blob)
switch {
case err == sql.ErrNoRows:
return nil
case err != nil:
log.Fatal(err)
}
return &u
}
//MarkUserLatestTweetsCollected updates the `last_looked_at` timestamp and the `latest_tweet_id` for
//the given user in the `users` table
func (s *Storage) MarkUserLatestTweetsCollected(userID int64, lastLookedAt, latestTweetID int64) {
chQueryArgs <- &queryArgs{"UPDATE users SET last_looked_at=?, latest_tweet_id=? where user_id=?", []interface{}{lastLookedAt, latestTweetID, userID}}
}
//MarkUserLatestFriendsCollected sets the `latest_following_id` to the latest id of the users given userID
//is following
func (s *Storage) MarkUserLatestFriendsCollected(userID, latestFriendID int64) {
chQueryArgs <- &queryArgs{"UPDATE users SET latest_following_id=? where user_id=?", []interface{}{latestFriendID, userID}}
}
//MarkUserLatestFollowersCollected sets the `latest_follower_id` to the latest id of the followers collected
func (s *Storage) MarkUserLatestFollowersCollected(userID, latestFollowerID int64) {
chQueryArgs <- &queryArgs{"UPDATE users SET latest_follower_id=? where user_id=?", []interface{}{latestFollowerID, userID}}
}
//MarkUserProcessed sets the `processed` and the `accepted` flags for the user in the `users` table
func (s *Storage) MarkUserProcessed(ID int64, processed, accepted bool) {
chQueryArgs <- &queryArgs{"UPDATE users SET processed=?, accepted=? where user_id=?", []interface{}{processed, accepted, ID}}
}
//MarkUserIDProcessed sets the `processed` flag for the given user id in the `userids` table
func (s *Storage) MarkUserIDProcessed(ID int64, processed bool) {
chQueryArgs <- &queryArgs{"UPDATE userids SET processed=? where user_id=?", []interface{}{processed, ID}}
}
//MarkScreenNameProcessed sets the `processed` flag for the given screenName in the `screennames` table
func (s *Storage) MarkScreenNameProcessed(screenName string, processed bool) {
chQueryArgs <- &queryArgs{"UPDATE screennames SET processed=? where screen_name=?", []interface{}{processed, screenName}}
}
| {
_, err := s.db.Exec("INSERT OR IGNORE INTO screennames (screen_name) VALUES (?)", screenName)
if err != nil {
log.Fatal(err)
}
} | identifier_body |
equilibrium_computation.py | """
@version $Id: equilibrium_computation.py 2017-02-10 16:08 denis $
Module to compute the resting equilibrium point of a Virtual Epileptic Patient module
"""
import numpy
from scipy.optimize import root
from sympy import symbols, exp, solve, lambdify
from tvb_epilepsy.base.constants import X1_DEF, X1_EQ_CR_DEF, X0_DEF, X0_CR_DEF
from tvb_epilepsy.tvb_api.epileptor_models import *
from tvb.simulator.models import Epileptor
#Currently we assume only difference coupling (permittivity coupling following Proix et al 2014)
#TODO: to generalize for different coupling functions
def x1eq_def(X1_DEF, X1_EQ_CR_DEF, n_regions):
#The default initial condition for x1 equilibrium search
return numpy.repeat(numpy.array((X1_EQ_CR_DEF + X1_DEF) / 2.0), n_regions)
def x1_lin_def(X1_DEF, X1_EQ_CR_DEF, n_regions):
# The point of the linear Taylor expansion
return numpy.repeat(numpy.array((X1_EQ_CR_DEF + X1_DEF) / 2.0), n_regions)
def fx1_2d_calc(x1, z=0, y0=0, Iext1=0):
x12 = x1 ** 2
return -x1 * x12 - 2 * x12 - z + y0 + Iext1
def fx1_6d_calc(x1, z=0, y0=0, Iext1=0):
x12 = x1 ** 2
return -x1 * x12 + 3 * x12 - z + y0 + Iext1
def fz_lin_calc(x1, x0, x0cr, r, z=0, coupl=0):
return 4 * (x1 - r * x0 + x0cr) - z - coupl
def fz_sig_calc(x1, x0, x0cr, r, z=0, coupl=0):
return 3/(1 + exp(-10 * (x1 + 0.5))) - r * x0 + x0cr - z - coupl
def zeq_2d_calc(x1eq, y0, Iext1):
return fx1_2d_calc(x1eq, z=0, y0=y0, Iext1=Iext1)
def zeq_6d_calc(x1eq, y0, Iext1):
return fx1_6d_calc(x1eq, z=0, y0=y0, Iext1=Iext1)
def y1eq_calc(x1eq, d=5.0):
return 1 - d * x1eq ** 2
def pop2eq_calc(x1eq, zeq, Iext2):
shape = x1eq.shape
type = x1eq.dtype
# g_eq = 0.1*x1eq (1)
# y2eq = 0 (2)
y2eq = numpy.zeros(shape, dtype=type)
# -x2eq**3 + x2eq -y2eq+2*g_eq-0.3*(zeq-3.5)+Iext2 =0=> (1),(2)
# -x2eq**3 + x2eq +2*0.1*x1eq-0.3*(zeq-3.5)+Iext2 =0=>
# p3 p1 p0
# -x2eq**3 + x2eq +0.2*x1eq-0.3*(zeq-3.5)+Iext2 =0
p0 = 0.2 * x1eq - 0.3 * (zeq - 3.5) + Iext2
x2eq = numpy.zeros(shape, dtype=type)
for i in range(shape[1]):
x2eq[0 ,i] = numpy.min(numpy.real(numpy.roots([-1.0, 0.0, 1.0, p0[0 ,i]])))
return x2eq, y2eq
# def pop2eq_calc(n_regions,x1eq,zeq,Iext2):
# shape = x1eq.shape
# type = x1eq.dtype
# #g_eq = 0.1*x1eq (1)
# #y2eq = 6*(x2eq+0.25)*x1eq (2)
# #-x2eq**3 + x2eq -y2eq+2*g_eq-0.3*(zeq-3.5)+Iext2 =0=> (1),(2)
# #-x2eq**3 + x2eq -6*(x2eq+0.25)*x1eq+2*0.1*x1eq-0.3*(zeq-3.5)+Iext2 =0=>
# #-x2eq**3 + (1.0-6*x1eq)*x2eq -1.5*x1eq+ 0.2*x1eq-0.3*(zeq-3.5)+Iext2 =0
# #p3 p1 p0
# #-x2eq**3 + (1.0-6*x1eq)*x2eq -1.3*x1eq -0.3*(zeq-3.5) +Iext2 =0
# p0 = -1.3*x1eq-0.3*(zeq-3.5)+Iext2
# p1 = 1.0-6*x1eq
# x2eq = numpy.zeros(shape, dtype=type)
# for i in range(shape[1]):
# x2eq[0 ,i] = numpy.min( numpy.real( numpy.roots([-1.0, 0.0, p1[i,0], p0[i,0] ]) ) )
# #(2):
# y2eq = 6*(x2eq+0.25)*x1eq
# return x2eq, y2eq
def geq_calc(x1eq):
return 0.1 * x1eq
def x1eq_x0_hypo_optimize_fun(x, ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
no_x0 = len(ix0)
no_e = len(iE)
type = x1EQ.dtype
i_e = numpy.ones((no_e,1), dtype=type)
i_x0 = numpy.ones((no_x0,1), dtype=type)
#Coupling to from from to
w_e_to_e = numpy.sum(numpy.dot(w[iE][:,iE], numpy.dot(i_e, x1EQ[:,iE]) - numpy.dot(i_e, x1EQ[:,iE]).T), axis=1)
w_x0_to_e = numpy.sum(numpy.dot(w[iE][:, ix0], numpy.dot(i_e, x0) - numpy.dot(i_x0, x1EQ[:,iE]).T), axis=1)
w_e_to_x0 = numpy.sum(numpy.dot(w[ix0][:,iE], numpy.dot(i_x0, x1EQ[:,iE]) - numpy.dot(i_e, x0).T), axis=1)
w_x0_to_x0 = numpy.sum(numpy.dot(w[ix0][:,ix0], numpy.dot(i_x0, x0) - numpy.dot(i_x0, x0).T), axis=1)
fun = numpy.array(x1EQ.shape)
#Known x1eq, unknown x0:
fun[iE] = fz_lin_calc(x1EQ[iE], x[iE], x0cr[iE], rx0[iE], z=zEQ[iE], coupl=K[iE] * (w_e_to_e + w_x0_to_e))
# Known x0, unknown x1eq:
fun[ix0] = fz_lin_calc(x[ix0], x0, x0cr[ix0], rx0[ix0], z=zeq_2d_calc(x[ix0], y0[ix0], Iext1[ix0]),
coupl=K[ix0] * (w_e_to_x0 + w_x0_to_x0))
return fun
def x1eq_x0_hypo_optimize_jac(x, ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
no_x0 = len(ix0)
no_e = len(iE)
n_regions = no_e + no_x0
type = x1EQ.dtype
i_x0 = numpy.ones((no_x0, 1), dtype=type) | jac_e_x1o = -numpy.dot(i_x0, K[:,iE]) * w[iE][:,ix0]
jac_x0_x0e = numpy.zeros((no_x0,no_e),dtype = type)
jac_x0_x1o = numpy.diag(4 + 3 * x[ix0] ** 2 + 4 * x[ix0] + K[ix0] * numpy.sum(w[ix0][:,ix0], axis=1)) \
- numpy.dot(i_x0, K[:, ix0]) * w[ix0][:, ix0]
jac = numpy.zeros((n_regions,n_regions), dtype=type)
jac[iE][:,iE] = jac_e_x0e
jac[iE][:, ix0] = jac_e_x1o
jac[ix0][:, iE] = jac_x0_x0e
jac[ix0][:, ix0] = jac_x0_x1o
return jac
def x1eq_x0_hypo_optimize(ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
xinit = numpy.zeros(x1EQ.shape, dtype = x1EQ.dtype)
#Set initial conditions for the optimization algorithm, by ignoring coupling (=0)
# fz = 4 * (x1 - r * x0 + x0cr) - z -coupling = 0
#x0init = (x1 + x0cr -z/4) / rx0
xinit[:, iE] = x0_calc(x1EQ[:, iE], zEQ[:, iE], x0cr[:, iE], rx0[:, iE], 0.0)
#x1eqinit = rx0 * x0 - x0cr + z / 4
xinit[:, ix0] = rx0[:, ix0] * x0 - x0cr[:, ix0] + zEQ[:, ix0] / 4
#Solve:
sol = root(x1eq_x0_hypo_optimize_fun, xinit, args=(ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w),
method='lm', jac=x1eq_x0_hypo_optimize_jac, tol=10**(-6), callback=None, options=None) #method='hybr'
if sol.success:
x1EQ[:,ix0] = sol.x[:, ix0]
return x1EQ
else:
raise ValueError(sol.message)
def x1eq_x0_hypo_linTaylor(ix0,iE,x1EQ,zEQ,x0,x0cr,rx0,y0,Iext1,K,w):
no_x0 = len(ix0)
no_e = len(iE)
n_regions = no_e + no_x0
# The equilibria of the nodes of fixed epileptogenicity
x1_eq = x1EQ[:, iE]
z_eq = zEQ[:, iE]
#Prepare linear system to solve:
#The point of the linear Taylor expansion
x1LIN = x1_lin_def(X1_DEF, X1_EQ_CR_DEF, n_regions)
# For regions of fixed equilibria:
ii_e = numpy.ones((1, no_e), dtype=numpy.float32)
we_to_e = numpy.expand_dims(numpy.sum(w[iE][:, iE] * (numpy.dot(ii_e.T, x1_eq) -
numpy.dot(x1_eq.T, ii_e)), axis=1), 1).T
wx0_to_e = -x1_eq * numpy.expand_dims(numpy.sum(w[ix0][:, iE], axis=0), 0)
be = 4.0 * (x1_eq + x0cr[:, iE]) - z_eq - K[:, iE] * (we_to_e + wx0_to_e)
# For regions of fixed x0:
ii_x0 = numpy.ones((1, no_x0), dtype=numpy.float32)
we_to_x0 = numpy.expand_dims(numpy.sum(w[ix0][:, iE] * numpy.dot(ii_x0.T, x1_eq), axis=1), 1).T
bx0 = 4.0 * (x0cr[:, ix0] - rx0[:, ix0] * x0) - y0[:, ix0] - Iext1[:, ix0] \
- 2.0 * x1LIN[:, ix0] ** 3 - 2.0 * x1LIN[:, ix0] ** 2 - K[:, ix0] * we_to_x0
# Concatenate B vector:
b = -numpy.concatenate((be, bx0), axis=1).T
# From-to Epileptogenicity-fixed regions
# ae_to_e = -4 * numpy.eye( no_e, dtype=numpy.float32 )
ae_to_e = -4 * numpy.diag(rx0[0, iE])
# From x0-fixed regions to Epileptogenicity-fixed regions
ax0_to_e = -numpy.dot(K[:, iE].T, ii_x0) * w[iE][:, ix0]
# From Epileptogenicity-fixed regions to x0-fixed regions
ae_to_x0 = numpy.zeros((no_x0, no_e), dtype=numpy.float32)
# From-to x0-fixed regions
ax0_to_x0 = numpy.diag((4.0 + 3.0 * (x1LIN[:, ix0] ** 2 + 4.0 * x1LIN[:, ix0]) +
K[0, ix0] * numpy.expand_dims(numpy.sum(w[ix0][:, ix0], axis=0), 0)).T[:, 0]) \
- numpy.dot(K[:, ix0].T, ii_x0) * w[ix0][:, ix0]
# Concatenate A matrix
a = numpy.concatenate((numpy.concatenate((ae_to_e, ax0_to_e), axis=1),
numpy.concatenate((ae_to_x0, ax0_to_x0), axis=1)), axis=0)
# Solve the system
x = numpy.dot(numpy.linalg.inv(a), b).T
# Unpack solution:
# The equilibria of the regions with fixed E have not changed:
# The equilibria of the regions with fixed x0:
x1EQ[0, ix0] = x[0, no_e:]
return x1EQ
def x0cr_rx0_calc(y0, Iext1, epileptor_model = "2d", zmode = numpy.array("lin"),
x1rest = X1_DEF, x1cr = X1_EQ_CR_DEF, x0def = X0_DEF, x0cr_def = X0_CR_DEF):
#Define the symbolic variables we need:
(y01, I1, x1, z, x0, r, x0cr, f1, fz) = symbols('y01 I1 x1 z x0 r x0cr f1 fz')
#Define the fx1(x1) expression (assuming centered x1 in all cases)...
if isinstance(epileptor_model,EpileptorDP2D) or epileptor_model=="2d":
#...for the 2D permittivity coupling approximation, Proix et al 2014
fx1 = x1 ** 3 + 2 * x1 ** 2
else:
#...or for the original (>=6D) epileptor
fx1 = x1 ** 3 - 3 * x1 ** 2
#...and the z expression, coming from solving dx1/dt=f1(x1,z)=0
z = y01 - fx1 + I1
#Define the fz expression...
if zmode == 'lin':
#...for linear...
fz = 4 * (x1 - r * x0 + x0cr) - z
elif zmode == 'sig':
#...and sigmoidal versions
fz = 3/(1 + exp(-10 * (x1 + 0.5))) - r * x0 + x0cr - z
else:
raise ValueError('zmode is neither "lin" nor "sig"')
#Solve the fz expression for rx0 and x0cr, assuming the following two points (x1eq,x0) = [(-5/3,0.0),(-4/3,1.0)]...
#...and WITHOUT COUPLING
fz_sol = solve([fz.subs([(x1, x1rest), (x0, x0def), (z, z.subs(x1, x1rest))]),
fz.subs([(x1, x1cr), (x0, x0cr_def), (z, z.subs(x1, x1cr))])], r, x0cr)
#Convert the solution of x0cr from expression to function that accepts numpy arrays as inputs:
x0cr = lambdify((y01,I1),fz_sol[x0cr],'numpy')
#Compute the actual x0cr now given the inputs y0 and Iext1
x0cr = x0cr(y0, Iext1)
#The rx0 doesn' depend on y0 and Iext1, therefore...
rx0 = fz_sol[r]*numpy.ones(shape=x0cr.shape)
return x0cr, rx0
def coupling_calc(x1, K, w):
#Note that for difference coupling it doesn't matter whether we use centered x1 or decentered x1-5/3
# Only difference coupling for the moment.
# TODO: Extend for different coupling forms
n_regions = x1.size
i_n = numpy.ones((n_regions,1), dtype='f')
# Coupling from to
return K*numpy.sum(numpy.dot(w, numpy.dot(i_n, x1) - numpy.dot(i_n, x1).T), axis=1)
def x0_calc(x1, z, x0cr, rx0, coupl, zmode=numpy.array("lin")):
if zmode == 'lin':
return (x1 + x0cr - (z+coupl) / 4) / rx0
elif zmode == 'sig':
return (3 / (1 + numpy.exp(-10 * (x1 + 0.5))) + x0cr - z + coupl) / rx0
else:
raise ValueError('zmode is neither "lin" nor "sig"')
def calc_equilibrium_point(epileptor_model, hypothesis):
#De-center them:
if isinstance(epileptor_model,EpileptorDP2D):
if epileptor_model.zmode == 'sig':
#2D approximation, Proix et al 2014
zeq = zeq_2d_calc(hypothesis.x1EQ, epileptor_model.yc.T, epileptor_model.Iext1.T)
else:
zeq = hypothesis.zEQ
return hypothesis.x1EQ, zeq
else:
#all >=6D models
if isinstance(epileptor_model, Epileptor):
#tvb
zeq = zeq_6d_calc(hypothesis.x1EQ, epileptor_model.c.T, epileptor_model.Iext.T)
y1eq = y1eq_calc(hypothesis.x1EQ, epileptor_model.c.T)
else:
zeq = zeq_6d_calc(hypothesis.x1EQ, epileptor_model.yc.T, epileptor_model.Iext1.T)
y1eq = y1eq_calc(hypothesis.x1EQ, epileptor_model.yc.T)
(x2eq, y2eq) = pop2eq_calc(hypothesis.x1EQ, zeq, epileptor_model.Iext2.T)
geq = geq_calc(hypothesis.x1EQ)
if isinstance(epileptor_model, EpileptorDPrealistic):
#the 11D "realistic" simulations model
return hypothesis.x1EQ, y1eq, zeq, x2eq, y2eq, geq, epileptor_model.x0.T, epileptor_model.slope.T, \
epileptor_model.Iext1.T, epileptor_model.Iext2.T, epileptor_model.K.T
else:
#all >=6D models
return hypothesis.x1EQ, y1eq, zeq, x2eq, y2eq, geq |
jac_e_x0e = numpy.diag(- 4 * rx0[iE]) | random_line_split |
equilibrium_computation.py | """
@version $Id: equilibrium_computation.py 2017-02-10 16:08 denis $
Module to compute the resting equilibrium point of a Virtual Epileptic Patient module
"""
import numpy
from scipy.optimize import root
from sympy import symbols, exp, solve, lambdify
from tvb_epilepsy.base.constants import X1_DEF, X1_EQ_CR_DEF, X0_DEF, X0_CR_DEF
from tvb_epilepsy.tvb_api.epileptor_models import *
from tvb.simulator.models import Epileptor
#Currently we assume only difference coupling (permittivity coupling following Proix et al 2014)
#TODO: to generalize for different coupling functions
def x1eq_def(X1_DEF, X1_EQ_CR_DEF, n_regions):
#The default initial condition for x1 equilibrium search
return numpy.repeat(numpy.array((X1_EQ_CR_DEF + X1_DEF) / 2.0), n_regions)
def x1_lin_def(X1_DEF, X1_EQ_CR_DEF, n_regions):
# The point of the linear Taylor expansion
return numpy.repeat(numpy.array((X1_EQ_CR_DEF + X1_DEF) / 2.0), n_regions)
def fx1_2d_calc(x1, z=0, y0=0, Iext1=0):
x12 = x1 ** 2
return -x1 * x12 - 2 * x12 - z + y0 + Iext1
def fx1_6d_calc(x1, z=0, y0=0, Iext1=0):
x12 = x1 ** 2
return -x1 * x12 + 3 * x12 - z + y0 + Iext1
def fz_lin_calc(x1, x0, x0cr, r, z=0, coupl=0):
return 4 * (x1 - r * x0 + x0cr) - z - coupl
def fz_sig_calc(x1, x0, x0cr, r, z=0, coupl=0):
return 3/(1 + exp(-10 * (x1 + 0.5))) - r * x0 + x0cr - z - coupl
def zeq_2d_calc(x1eq, y0, Iext1):
return fx1_2d_calc(x1eq, z=0, y0=y0, Iext1=Iext1)
def zeq_6d_calc(x1eq, y0, Iext1):
return fx1_6d_calc(x1eq, z=0, y0=y0, Iext1=Iext1)
def y1eq_calc(x1eq, d=5.0):
return 1 - d * x1eq ** 2
def pop2eq_calc(x1eq, zeq, Iext2):
shape = x1eq.shape
type = x1eq.dtype
# g_eq = 0.1*x1eq (1)
# y2eq = 0 (2)
y2eq = numpy.zeros(shape, dtype=type)
# -x2eq**3 + x2eq -y2eq+2*g_eq-0.3*(zeq-3.5)+Iext2 =0=> (1),(2)
# -x2eq**3 + x2eq +2*0.1*x1eq-0.3*(zeq-3.5)+Iext2 =0=>
# p3 p1 p0
# -x2eq**3 + x2eq +0.2*x1eq-0.3*(zeq-3.5)+Iext2 =0
p0 = 0.2 * x1eq - 0.3 * (zeq - 3.5) + Iext2
x2eq = numpy.zeros(shape, dtype=type)
for i in range(shape[1]):
|
return x2eq, y2eq
# def pop2eq_calc(n_regions,x1eq,zeq,Iext2):
# shape = x1eq.shape
# type = x1eq.dtype
# #g_eq = 0.1*x1eq (1)
# #y2eq = 6*(x2eq+0.25)*x1eq (2)
# #-x2eq**3 + x2eq -y2eq+2*g_eq-0.3*(zeq-3.5)+Iext2 =0=> (1),(2)
# #-x2eq**3 + x2eq -6*(x2eq+0.25)*x1eq+2*0.1*x1eq-0.3*(zeq-3.5)+Iext2 =0=>
# #-x2eq**3 + (1.0-6*x1eq)*x2eq -1.5*x1eq+ 0.2*x1eq-0.3*(zeq-3.5)+Iext2 =0
# #p3 p1 p0
# #-x2eq**3 + (1.0-6*x1eq)*x2eq -1.3*x1eq -0.3*(zeq-3.5) +Iext2 =0
# p0 = -1.3*x1eq-0.3*(zeq-3.5)+Iext2
# p1 = 1.0-6*x1eq
# x2eq = numpy.zeros(shape, dtype=type)
# for i in range(shape[1]):
# x2eq[0 ,i] = numpy.min( numpy.real( numpy.roots([-1.0, 0.0, p1[i,0], p0[i,0] ]) ) )
# #(2):
# y2eq = 6*(x2eq+0.25)*x1eq
# return x2eq, y2eq
def geq_calc(x1eq):
return 0.1 * x1eq
def x1eq_x0_hypo_optimize_fun(x, ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
no_x0 = len(ix0)
no_e = len(iE)
type = x1EQ.dtype
i_e = numpy.ones((no_e,1), dtype=type)
i_x0 = numpy.ones((no_x0,1), dtype=type)
#Coupling to from from to
w_e_to_e = numpy.sum(numpy.dot(w[iE][:,iE], numpy.dot(i_e, x1EQ[:,iE]) - numpy.dot(i_e, x1EQ[:,iE]).T), axis=1)
w_x0_to_e = numpy.sum(numpy.dot(w[iE][:, ix0], numpy.dot(i_e, x0) - numpy.dot(i_x0, x1EQ[:,iE]).T), axis=1)
w_e_to_x0 = numpy.sum(numpy.dot(w[ix0][:,iE], numpy.dot(i_x0, x1EQ[:,iE]) - numpy.dot(i_e, x0).T), axis=1)
w_x0_to_x0 = numpy.sum(numpy.dot(w[ix0][:,ix0], numpy.dot(i_x0, x0) - numpy.dot(i_x0, x0).T), axis=1)
fun = numpy.array(x1EQ.shape)
#Known x1eq, unknown x0:
fun[iE] = fz_lin_calc(x1EQ[iE], x[iE], x0cr[iE], rx0[iE], z=zEQ[iE], coupl=K[iE] * (w_e_to_e + w_x0_to_e))
# Known x0, unknown x1eq:
fun[ix0] = fz_lin_calc(x[ix0], x0, x0cr[ix0], rx0[ix0], z=zeq_2d_calc(x[ix0], y0[ix0], Iext1[ix0]),
coupl=K[ix0] * (w_e_to_x0 + w_x0_to_x0))
return fun
def x1eq_x0_hypo_optimize_jac(x, ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
no_x0 = len(ix0)
no_e = len(iE)
n_regions = no_e + no_x0
type = x1EQ.dtype
i_x0 = numpy.ones((no_x0, 1), dtype=type)
jac_e_x0e = numpy.diag(- 4 * rx0[iE])
jac_e_x1o = -numpy.dot(i_x0, K[:,iE]) * w[iE][:,ix0]
jac_x0_x0e = numpy.zeros((no_x0,no_e),dtype = type)
jac_x0_x1o = numpy.diag(4 + 3 * x[ix0] ** 2 + 4 * x[ix0] + K[ix0] * numpy.sum(w[ix0][:,ix0], axis=1)) \
- numpy.dot(i_x0, K[:, ix0]) * w[ix0][:, ix0]
jac = numpy.zeros((n_regions,n_regions), dtype=type)
jac[iE][:,iE] = jac_e_x0e
jac[iE][:, ix0] = jac_e_x1o
jac[ix0][:, iE] = jac_x0_x0e
jac[ix0][:, ix0] = jac_x0_x1o
return jac
def x1eq_x0_hypo_optimize(ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
xinit = numpy.zeros(x1EQ.shape, dtype = x1EQ.dtype)
#Set initial conditions for the optimization algorithm, by ignoring coupling (=0)
# fz = 4 * (x1 - r * x0 + x0cr) - z -coupling = 0
#x0init = (x1 + x0cr -z/4) / rx0
xinit[:, iE] = x0_calc(x1EQ[:, iE], zEQ[:, iE], x0cr[:, iE], rx0[:, iE], 0.0)
#x1eqinit = rx0 * x0 - x0cr + z / 4
xinit[:, ix0] = rx0[:, ix0] * x0 - x0cr[:, ix0] + zEQ[:, ix0] / 4
#Solve:
sol = root(x1eq_x0_hypo_optimize_fun, xinit, args=(ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w),
method='lm', jac=x1eq_x0_hypo_optimize_jac, tol=10**(-6), callback=None, options=None) #method='hybr'
if sol.success:
x1EQ[:,ix0] = sol.x[:, ix0]
return x1EQ
else:
raise ValueError(sol.message)
def x1eq_x0_hypo_linTaylor(ix0,iE,x1EQ,zEQ,x0,x0cr,rx0,y0,Iext1,K,w):
no_x0 = len(ix0)
no_e = len(iE)
n_regions = no_e + no_x0
# The equilibria of the nodes of fixed epileptogenicity
x1_eq = x1EQ[:, iE]
z_eq = zEQ[:, iE]
#Prepare linear system to solve:
#The point of the linear Taylor expansion
x1LIN = x1_lin_def(X1_DEF, X1_EQ_CR_DEF, n_regions)
# For regions of fixed equilibria:
ii_e = numpy.ones((1, no_e), dtype=numpy.float32)
we_to_e = numpy.expand_dims(numpy.sum(w[iE][:, iE] * (numpy.dot(ii_e.T, x1_eq) -
numpy.dot(x1_eq.T, ii_e)), axis=1), 1).T
wx0_to_e = -x1_eq * numpy.expand_dims(numpy.sum(w[ix0][:, iE], axis=0), 0)
be = 4.0 * (x1_eq + x0cr[:, iE]) - z_eq - K[:, iE] * (we_to_e + wx0_to_e)
# For regions of fixed x0:
ii_x0 = numpy.ones((1, no_x0), dtype=numpy.float32)
we_to_x0 = numpy.expand_dims(numpy.sum(w[ix0][:, iE] * numpy.dot(ii_x0.T, x1_eq), axis=1), 1).T
bx0 = 4.0 * (x0cr[:, ix0] - rx0[:, ix0] * x0) - y0[:, ix0] - Iext1[:, ix0] \
- 2.0 * x1LIN[:, ix0] ** 3 - 2.0 * x1LIN[:, ix0] ** 2 - K[:, ix0] * we_to_x0
# Concatenate B vector:
b = -numpy.concatenate((be, bx0), axis=1).T
# From-to Epileptogenicity-fixed regions
# ae_to_e = -4 * numpy.eye( no_e, dtype=numpy.float32 )
ae_to_e = -4 * numpy.diag(rx0[0, iE])
# From x0-fixed regions to Epileptogenicity-fixed regions
ax0_to_e = -numpy.dot(K[:, iE].T, ii_x0) * w[iE][:, ix0]
# From Epileptogenicity-fixed regions to x0-fixed regions
ae_to_x0 = numpy.zeros((no_x0, no_e), dtype=numpy.float32)
# From-to x0-fixed regions
ax0_to_x0 = numpy.diag((4.0 + 3.0 * (x1LIN[:, ix0] ** 2 + 4.0 * x1LIN[:, ix0]) +
K[0, ix0] * numpy.expand_dims(numpy.sum(w[ix0][:, ix0], axis=0), 0)).T[:, 0]) \
- numpy.dot(K[:, ix0].T, ii_x0) * w[ix0][:, ix0]
# Concatenate A matrix
a = numpy.concatenate((numpy.concatenate((ae_to_e, ax0_to_e), axis=1),
numpy.concatenate((ae_to_x0, ax0_to_x0), axis=1)), axis=0)
# Solve the system
x = numpy.dot(numpy.linalg.inv(a), b).T
# Unpack solution:
# The equilibria of the regions with fixed E have not changed:
# The equilibria of the regions with fixed x0:
x1EQ[0, ix0] = x[0, no_e:]
return x1EQ
def x0cr_rx0_calc(y0, Iext1, epileptor_model = "2d", zmode = numpy.array("lin"),
x1rest = X1_DEF, x1cr = X1_EQ_CR_DEF, x0def = X0_DEF, x0cr_def = X0_CR_DEF):
#Define the symbolic variables we need:
(y01, I1, x1, z, x0, r, x0cr, f1, fz) = symbols('y01 I1 x1 z x0 r x0cr f1 fz')
#Define the fx1(x1) expression (assuming centered x1 in all cases)...
if isinstance(epileptor_model,EpileptorDP2D) or epileptor_model=="2d":
#...for the 2D permittivity coupling approximation, Proix et al 2014
fx1 = x1 ** 3 + 2 * x1 ** 2
else:
#...or for the original (>=6D) epileptor
fx1 = x1 ** 3 - 3 * x1 ** 2
#...and the z expression, coming from solving dx1/dt=f1(x1,z)=0
z = y01 - fx1 + I1
#Define the fz expression...
if zmode == 'lin':
#...for linear...
fz = 4 * (x1 - r * x0 + x0cr) - z
elif zmode == 'sig':
#...and sigmoidal versions
fz = 3/(1 + exp(-10 * (x1 + 0.5))) - r * x0 + x0cr - z
else:
raise ValueError('zmode is neither "lin" nor "sig"')
#Solve the fz expression for rx0 and x0cr, assuming the following two points (x1eq,x0) = [(-5/3,0.0),(-4/3,1.0)]...
#...and WITHOUT COUPLING
fz_sol = solve([fz.subs([(x1, x1rest), (x0, x0def), (z, z.subs(x1, x1rest))]),
fz.subs([(x1, x1cr), (x0, x0cr_def), (z, z.subs(x1, x1cr))])], r, x0cr)
#Convert the solution of x0cr from expression to function that accepts numpy arrays as inputs:
x0cr = lambdify((y01,I1),fz_sol[x0cr],'numpy')
#Compute the actual x0cr now given the inputs y0 and Iext1
x0cr = x0cr(y0, Iext1)
#The rx0 doesn' depend on y0 and Iext1, therefore...
rx0 = fz_sol[r]*numpy.ones(shape=x0cr.shape)
return x0cr, rx0
def coupling_calc(x1, K, w):
#Note that for difference coupling it doesn't matter whether we use centered x1 or decentered x1-5/3
# Only difference coupling for the moment.
# TODO: Extend for different coupling forms
n_regions = x1.size
i_n = numpy.ones((n_regions,1), dtype='f')
# Coupling from to
return K*numpy.sum(numpy.dot(w, numpy.dot(i_n, x1) - numpy.dot(i_n, x1).T), axis=1)
def x0_calc(x1, z, x0cr, rx0, coupl, zmode=numpy.array("lin")):
if zmode == 'lin':
return (x1 + x0cr - (z+coupl) / 4) / rx0
elif zmode == 'sig':
return (3 / (1 + numpy.exp(-10 * (x1 + 0.5))) + x0cr - z + coupl) / rx0
else:
raise ValueError('zmode is neither "lin" nor "sig"')
def calc_equilibrium_point(epileptor_model, hypothesis):
#De-center them:
if isinstance(epileptor_model,EpileptorDP2D):
if epileptor_model.zmode == 'sig':
#2D approximation, Proix et al 2014
zeq = zeq_2d_calc(hypothesis.x1EQ, epileptor_model.yc.T, epileptor_model.Iext1.T)
else:
zeq = hypothesis.zEQ
return hypothesis.x1EQ, zeq
else:
#all >=6D models
if isinstance(epileptor_model, Epileptor):
#tvb
zeq = zeq_6d_calc(hypothesis.x1EQ, epileptor_model.c.T, epileptor_model.Iext.T)
y1eq = y1eq_calc(hypothesis.x1EQ, epileptor_model.c.T)
else:
zeq = zeq_6d_calc(hypothesis.x1EQ, epileptor_model.yc.T, epileptor_model.Iext1.T)
y1eq = y1eq_calc(hypothesis.x1EQ, epileptor_model.yc.T)
(x2eq, y2eq) = pop2eq_calc(hypothesis.x1EQ, zeq, epileptor_model.Iext2.T)
geq = geq_calc(hypothesis.x1EQ)
if isinstance(epileptor_model, EpileptorDPrealistic):
#the 11D "realistic" simulations model
return hypothesis.x1EQ, y1eq, zeq, x2eq, y2eq, geq, epileptor_model.x0.T, epileptor_model.slope.T, \
epileptor_model.Iext1.T, epileptor_model.Iext2.T, epileptor_model.K.T
else:
#all >=6D models
return hypothesis.x1EQ, y1eq, zeq, x2eq, y2eq, geq
| x2eq[0 ,i] = numpy.min(numpy.real(numpy.roots([-1.0, 0.0, 1.0, p0[0 ,i]]))) | conditional_block |
equilibrium_computation.py | """
@version $Id: equilibrium_computation.py 2017-02-10 16:08 denis $
Module to compute the resting equilibrium point of a Virtual Epileptic Patient module
"""
import numpy
from scipy.optimize import root
from sympy import symbols, exp, solve, lambdify
from tvb_epilepsy.base.constants import X1_DEF, X1_EQ_CR_DEF, X0_DEF, X0_CR_DEF
from tvb_epilepsy.tvb_api.epileptor_models import *
from tvb.simulator.models import Epileptor
#Currently we assume only difference coupling (permittivity coupling following Proix et al 2014)
#TODO: to generalize for different coupling functions
def | (X1_DEF, X1_EQ_CR_DEF, n_regions):
#The default initial condition for x1 equilibrium search
return numpy.repeat(numpy.array((X1_EQ_CR_DEF + X1_DEF) / 2.0), n_regions)
def x1_lin_def(X1_DEF, X1_EQ_CR_DEF, n_regions):
# The point of the linear Taylor expansion
return numpy.repeat(numpy.array((X1_EQ_CR_DEF + X1_DEF) / 2.0), n_regions)
def fx1_2d_calc(x1, z=0, y0=0, Iext1=0):
x12 = x1 ** 2
return -x1 * x12 - 2 * x12 - z + y0 + Iext1
def fx1_6d_calc(x1, z=0, y0=0, Iext1=0):
x12 = x1 ** 2
return -x1 * x12 + 3 * x12 - z + y0 + Iext1
def fz_lin_calc(x1, x0, x0cr, r, z=0, coupl=0):
return 4 * (x1 - r * x0 + x0cr) - z - coupl
def fz_sig_calc(x1, x0, x0cr, r, z=0, coupl=0):
return 3/(1 + exp(-10 * (x1 + 0.5))) - r * x0 + x0cr - z - coupl
def zeq_2d_calc(x1eq, y0, Iext1):
return fx1_2d_calc(x1eq, z=0, y0=y0, Iext1=Iext1)
def zeq_6d_calc(x1eq, y0, Iext1):
return fx1_6d_calc(x1eq, z=0, y0=y0, Iext1=Iext1)
def y1eq_calc(x1eq, d=5.0):
return 1 - d * x1eq ** 2
def pop2eq_calc(x1eq, zeq, Iext2):
shape = x1eq.shape
type = x1eq.dtype
# g_eq = 0.1*x1eq (1)
# y2eq = 0 (2)
y2eq = numpy.zeros(shape, dtype=type)
# -x2eq**3 + x2eq -y2eq+2*g_eq-0.3*(zeq-3.5)+Iext2 =0=> (1),(2)
# -x2eq**3 + x2eq +2*0.1*x1eq-0.3*(zeq-3.5)+Iext2 =0=>
# p3 p1 p0
# -x2eq**3 + x2eq +0.2*x1eq-0.3*(zeq-3.5)+Iext2 =0
p0 = 0.2 * x1eq - 0.3 * (zeq - 3.5) + Iext2
x2eq = numpy.zeros(shape, dtype=type)
for i in range(shape[1]):
x2eq[0 ,i] = numpy.min(numpy.real(numpy.roots([-1.0, 0.0, 1.0, p0[0 ,i]])))
return x2eq, y2eq
# def pop2eq_calc(n_regions,x1eq,zeq,Iext2):
# shape = x1eq.shape
# type = x1eq.dtype
# #g_eq = 0.1*x1eq (1)
# #y2eq = 6*(x2eq+0.25)*x1eq (2)
# #-x2eq**3 + x2eq -y2eq+2*g_eq-0.3*(zeq-3.5)+Iext2 =0=> (1),(2)
# #-x2eq**3 + x2eq -6*(x2eq+0.25)*x1eq+2*0.1*x1eq-0.3*(zeq-3.5)+Iext2 =0=>
# #-x2eq**3 + (1.0-6*x1eq)*x2eq -1.5*x1eq+ 0.2*x1eq-0.3*(zeq-3.5)+Iext2 =0
# #p3 p1 p0
# #-x2eq**3 + (1.0-6*x1eq)*x2eq -1.3*x1eq -0.3*(zeq-3.5) +Iext2 =0
# p0 = -1.3*x1eq-0.3*(zeq-3.5)+Iext2
# p1 = 1.0-6*x1eq
# x2eq = numpy.zeros(shape, dtype=type)
# for i in range(shape[1]):
# x2eq[0 ,i] = numpy.min( numpy.real( numpy.roots([-1.0, 0.0, p1[i,0], p0[i,0] ]) ) )
# #(2):
# y2eq = 6*(x2eq+0.25)*x1eq
# return x2eq, y2eq
def geq_calc(x1eq):
return 0.1 * x1eq
def x1eq_x0_hypo_optimize_fun(x, ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
no_x0 = len(ix0)
no_e = len(iE)
type = x1EQ.dtype
i_e = numpy.ones((no_e,1), dtype=type)
i_x0 = numpy.ones((no_x0,1), dtype=type)
#Coupling to from from to
w_e_to_e = numpy.sum(numpy.dot(w[iE][:,iE], numpy.dot(i_e, x1EQ[:,iE]) - numpy.dot(i_e, x1EQ[:,iE]).T), axis=1)
w_x0_to_e = numpy.sum(numpy.dot(w[iE][:, ix0], numpy.dot(i_e, x0) - numpy.dot(i_x0, x1EQ[:,iE]).T), axis=1)
w_e_to_x0 = numpy.sum(numpy.dot(w[ix0][:,iE], numpy.dot(i_x0, x1EQ[:,iE]) - numpy.dot(i_e, x0).T), axis=1)
w_x0_to_x0 = numpy.sum(numpy.dot(w[ix0][:,ix0], numpy.dot(i_x0, x0) - numpy.dot(i_x0, x0).T), axis=1)
fun = numpy.array(x1EQ.shape)
#Known x1eq, unknown x0:
fun[iE] = fz_lin_calc(x1EQ[iE], x[iE], x0cr[iE], rx0[iE], z=zEQ[iE], coupl=K[iE] * (w_e_to_e + w_x0_to_e))
# Known x0, unknown x1eq:
fun[ix0] = fz_lin_calc(x[ix0], x0, x0cr[ix0], rx0[ix0], z=zeq_2d_calc(x[ix0], y0[ix0], Iext1[ix0]),
coupl=K[ix0] * (w_e_to_x0 + w_x0_to_x0))
return fun
def x1eq_x0_hypo_optimize_jac(x, ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
no_x0 = len(ix0)
no_e = len(iE)
n_regions = no_e + no_x0
type = x1EQ.dtype
i_x0 = numpy.ones((no_x0, 1), dtype=type)
jac_e_x0e = numpy.diag(- 4 * rx0[iE])
jac_e_x1o = -numpy.dot(i_x0, K[:,iE]) * w[iE][:,ix0]
jac_x0_x0e = numpy.zeros((no_x0,no_e),dtype = type)
jac_x0_x1o = numpy.diag(4 + 3 * x[ix0] ** 2 + 4 * x[ix0] + K[ix0] * numpy.sum(w[ix0][:,ix0], axis=1)) \
- numpy.dot(i_x0, K[:, ix0]) * w[ix0][:, ix0]
jac = numpy.zeros((n_regions,n_regions), dtype=type)
jac[iE][:,iE] = jac_e_x0e
jac[iE][:, ix0] = jac_e_x1o
jac[ix0][:, iE] = jac_x0_x0e
jac[ix0][:, ix0] = jac_x0_x1o
return jac
def x1eq_x0_hypo_optimize(ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
xinit = numpy.zeros(x1EQ.shape, dtype = x1EQ.dtype)
#Set initial conditions for the optimization algorithm, by ignoring coupling (=0)
# fz = 4 * (x1 - r * x0 + x0cr) - z -coupling = 0
#x0init = (x1 + x0cr -z/4) / rx0
xinit[:, iE] = x0_calc(x1EQ[:, iE], zEQ[:, iE], x0cr[:, iE], rx0[:, iE], 0.0)
#x1eqinit = rx0 * x0 - x0cr + z / 4
xinit[:, ix0] = rx0[:, ix0] * x0 - x0cr[:, ix0] + zEQ[:, ix0] / 4
#Solve:
sol = root(x1eq_x0_hypo_optimize_fun, xinit, args=(ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w),
method='lm', jac=x1eq_x0_hypo_optimize_jac, tol=10**(-6), callback=None, options=None) #method='hybr'
if sol.success:
x1EQ[:,ix0] = sol.x[:, ix0]
return x1EQ
else:
raise ValueError(sol.message)
def x1eq_x0_hypo_linTaylor(ix0,iE,x1EQ,zEQ,x0,x0cr,rx0,y0,Iext1,K,w):
no_x0 = len(ix0)
no_e = len(iE)
n_regions = no_e + no_x0
# The equilibria of the nodes of fixed epileptogenicity
x1_eq = x1EQ[:, iE]
z_eq = zEQ[:, iE]
#Prepare linear system to solve:
#The point of the linear Taylor expansion
x1LIN = x1_lin_def(X1_DEF, X1_EQ_CR_DEF, n_regions)
# For regions of fixed equilibria:
ii_e = numpy.ones((1, no_e), dtype=numpy.float32)
we_to_e = numpy.expand_dims(numpy.sum(w[iE][:, iE] * (numpy.dot(ii_e.T, x1_eq) -
numpy.dot(x1_eq.T, ii_e)), axis=1), 1).T
wx0_to_e = -x1_eq * numpy.expand_dims(numpy.sum(w[ix0][:, iE], axis=0), 0)
be = 4.0 * (x1_eq + x0cr[:, iE]) - z_eq - K[:, iE] * (we_to_e + wx0_to_e)
# For regions of fixed x0:
ii_x0 = numpy.ones((1, no_x0), dtype=numpy.float32)
we_to_x0 = numpy.expand_dims(numpy.sum(w[ix0][:, iE] * numpy.dot(ii_x0.T, x1_eq), axis=1), 1).T
bx0 = 4.0 * (x0cr[:, ix0] - rx0[:, ix0] * x0) - y0[:, ix0] - Iext1[:, ix0] \
- 2.0 * x1LIN[:, ix0] ** 3 - 2.0 * x1LIN[:, ix0] ** 2 - K[:, ix0] * we_to_x0
# Concatenate B vector:
b = -numpy.concatenate((be, bx0), axis=1).T
# From-to Epileptogenicity-fixed regions
# ae_to_e = -4 * numpy.eye( no_e, dtype=numpy.float32 )
ae_to_e = -4 * numpy.diag(rx0[0, iE])
# From x0-fixed regions to Epileptogenicity-fixed regions
ax0_to_e = -numpy.dot(K[:, iE].T, ii_x0) * w[iE][:, ix0]
# From Epileptogenicity-fixed regions to x0-fixed regions
ae_to_x0 = numpy.zeros((no_x0, no_e), dtype=numpy.float32)
# From-to x0-fixed regions
ax0_to_x0 = numpy.diag((4.0 + 3.0 * (x1LIN[:, ix0] ** 2 + 4.0 * x1LIN[:, ix0]) +
K[0, ix0] * numpy.expand_dims(numpy.sum(w[ix0][:, ix0], axis=0), 0)).T[:, 0]) \
- numpy.dot(K[:, ix0].T, ii_x0) * w[ix0][:, ix0]
# Concatenate A matrix
a = numpy.concatenate((numpy.concatenate((ae_to_e, ax0_to_e), axis=1),
numpy.concatenate((ae_to_x0, ax0_to_x0), axis=1)), axis=0)
# Solve the system
x = numpy.dot(numpy.linalg.inv(a), b).T
# Unpack solution:
# The equilibria of the regions with fixed E have not changed:
# The equilibria of the regions with fixed x0:
x1EQ[0, ix0] = x[0, no_e:]
return x1EQ
def x0cr_rx0_calc(y0, Iext1, epileptor_model = "2d", zmode = numpy.array("lin"),
x1rest = X1_DEF, x1cr = X1_EQ_CR_DEF, x0def = X0_DEF, x0cr_def = X0_CR_DEF):
#Define the symbolic variables we need:
(y01, I1, x1, z, x0, r, x0cr, f1, fz) = symbols('y01 I1 x1 z x0 r x0cr f1 fz')
#Define the fx1(x1) expression (assuming centered x1 in all cases)...
if isinstance(epileptor_model,EpileptorDP2D) or epileptor_model=="2d":
#...for the 2D permittivity coupling approximation, Proix et al 2014
fx1 = x1 ** 3 + 2 * x1 ** 2
else:
#...or for the original (>=6D) epileptor
fx1 = x1 ** 3 - 3 * x1 ** 2
#...and the z expression, coming from solving dx1/dt=f1(x1,z)=0
z = y01 - fx1 + I1
#Define the fz expression...
if zmode == 'lin':
#...for linear...
fz = 4 * (x1 - r * x0 + x0cr) - z
elif zmode == 'sig':
#...and sigmoidal versions
fz = 3/(1 + exp(-10 * (x1 + 0.5))) - r * x0 + x0cr - z
else:
raise ValueError('zmode is neither "lin" nor "sig"')
#Solve the fz expression for rx0 and x0cr, assuming the following two points (x1eq,x0) = [(-5/3,0.0),(-4/3,1.0)]...
#...and WITHOUT COUPLING
fz_sol = solve([fz.subs([(x1, x1rest), (x0, x0def), (z, z.subs(x1, x1rest))]),
fz.subs([(x1, x1cr), (x0, x0cr_def), (z, z.subs(x1, x1cr))])], r, x0cr)
#Convert the solution of x0cr from expression to function that accepts numpy arrays as inputs:
x0cr = lambdify((y01,I1),fz_sol[x0cr],'numpy')
#Compute the actual x0cr now given the inputs y0 and Iext1
x0cr = x0cr(y0, Iext1)
#The rx0 doesn' depend on y0 and Iext1, therefore...
rx0 = fz_sol[r]*numpy.ones(shape=x0cr.shape)
return x0cr, rx0
def coupling_calc(x1, K, w):
#Note that for difference coupling it doesn't matter whether we use centered x1 or decentered x1-5/3
# Only difference coupling for the moment.
# TODO: Extend for different coupling forms
n_regions = x1.size
i_n = numpy.ones((n_regions,1), dtype='f')
# Coupling from to
return K*numpy.sum(numpy.dot(w, numpy.dot(i_n, x1) - numpy.dot(i_n, x1).T), axis=1)
def x0_calc(x1, z, x0cr, rx0, coupl, zmode=numpy.array("lin")):
if zmode == 'lin':
return (x1 + x0cr - (z+coupl) / 4) / rx0
elif zmode == 'sig':
return (3 / (1 + numpy.exp(-10 * (x1 + 0.5))) + x0cr - z + coupl) / rx0
else:
raise ValueError('zmode is neither "lin" nor "sig"')
def calc_equilibrium_point(epileptor_model, hypothesis):
#De-center them:
if isinstance(epileptor_model,EpileptorDP2D):
if epileptor_model.zmode == 'sig':
#2D approximation, Proix et al 2014
zeq = zeq_2d_calc(hypothesis.x1EQ, epileptor_model.yc.T, epileptor_model.Iext1.T)
else:
zeq = hypothesis.zEQ
return hypothesis.x1EQ, zeq
else:
#all >=6D models
if isinstance(epileptor_model, Epileptor):
#tvb
zeq = zeq_6d_calc(hypothesis.x1EQ, epileptor_model.c.T, epileptor_model.Iext.T)
y1eq = y1eq_calc(hypothesis.x1EQ, epileptor_model.c.T)
else:
zeq = zeq_6d_calc(hypothesis.x1EQ, epileptor_model.yc.T, epileptor_model.Iext1.T)
y1eq = y1eq_calc(hypothesis.x1EQ, epileptor_model.yc.T)
(x2eq, y2eq) = pop2eq_calc(hypothesis.x1EQ, zeq, epileptor_model.Iext2.T)
geq = geq_calc(hypothesis.x1EQ)
if isinstance(epileptor_model, EpileptorDPrealistic):
#the 11D "realistic" simulations model
return hypothesis.x1EQ, y1eq, zeq, x2eq, y2eq, geq, epileptor_model.x0.T, epileptor_model.slope.T, \
epileptor_model.Iext1.T, epileptor_model.Iext2.T, epileptor_model.K.T
else:
#all >=6D models
return hypothesis.x1EQ, y1eq, zeq, x2eq, y2eq, geq
| x1eq_def | identifier_name |
equilibrium_computation.py | """
@version $Id: equilibrium_computation.py 2017-02-10 16:08 denis $
Module to compute the resting equilibrium point of a Virtual Epileptic Patient module
"""
import numpy
from scipy.optimize import root
from sympy import symbols, exp, solve, lambdify
from tvb_epilepsy.base.constants import X1_DEF, X1_EQ_CR_DEF, X0_DEF, X0_CR_DEF
from tvb_epilepsy.tvb_api.epileptor_models import *
from tvb.simulator.models import Epileptor
#Currently we assume only difference coupling (permittivity coupling following Proix et al 2014)
#TODO: to generalize for different coupling functions
def x1eq_def(X1_DEF, X1_EQ_CR_DEF, n_regions):
#The default initial condition for x1 equilibrium search
return numpy.repeat(numpy.array((X1_EQ_CR_DEF + X1_DEF) / 2.0), n_regions)
def x1_lin_def(X1_DEF, X1_EQ_CR_DEF, n_regions):
# The point of the linear Taylor expansion
return numpy.repeat(numpy.array((X1_EQ_CR_DEF + X1_DEF) / 2.0), n_regions)
def fx1_2d_calc(x1, z=0, y0=0, Iext1=0):
x12 = x1 ** 2
return -x1 * x12 - 2 * x12 - z + y0 + Iext1
def fx1_6d_calc(x1, z=0, y0=0, Iext1=0):
x12 = x1 ** 2
return -x1 * x12 + 3 * x12 - z + y0 + Iext1
def fz_lin_calc(x1, x0, x0cr, r, z=0, coupl=0):
return 4 * (x1 - r * x0 + x0cr) - z - coupl
def fz_sig_calc(x1, x0, x0cr, r, z=0, coupl=0):
return 3/(1 + exp(-10 * (x1 + 0.5))) - r * x0 + x0cr - z - coupl
def zeq_2d_calc(x1eq, y0, Iext1):
|
def zeq_6d_calc(x1eq, y0, Iext1):
return fx1_6d_calc(x1eq, z=0, y0=y0, Iext1=Iext1)
def y1eq_calc(x1eq, d=5.0):
return 1 - d * x1eq ** 2
def pop2eq_calc(x1eq, zeq, Iext2):
shape = x1eq.shape
type = x1eq.dtype
# g_eq = 0.1*x1eq (1)
# y2eq = 0 (2)
y2eq = numpy.zeros(shape, dtype=type)
# -x2eq**3 + x2eq -y2eq+2*g_eq-0.3*(zeq-3.5)+Iext2 =0=> (1),(2)
# -x2eq**3 + x2eq +2*0.1*x1eq-0.3*(zeq-3.5)+Iext2 =0=>
# p3 p1 p0
# -x2eq**3 + x2eq +0.2*x1eq-0.3*(zeq-3.5)+Iext2 =0
p0 = 0.2 * x1eq - 0.3 * (zeq - 3.5) + Iext2
x2eq = numpy.zeros(shape, dtype=type)
for i in range(shape[1]):
x2eq[0 ,i] = numpy.min(numpy.real(numpy.roots([-1.0, 0.0, 1.0, p0[0 ,i]])))
return x2eq, y2eq
# def pop2eq_calc(n_regions,x1eq,zeq,Iext2):
# shape = x1eq.shape
# type = x1eq.dtype
# #g_eq = 0.1*x1eq (1)
# #y2eq = 6*(x2eq+0.25)*x1eq (2)
# #-x2eq**3 + x2eq -y2eq+2*g_eq-0.3*(zeq-3.5)+Iext2 =0=> (1),(2)
# #-x2eq**3 + x2eq -6*(x2eq+0.25)*x1eq+2*0.1*x1eq-0.3*(zeq-3.5)+Iext2 =0=>
# #-x2eq**3 + (1.0-6*x1eq)*x2eq -1.5*x1eq+ 0.2*x1eq-0.3*(zeq-3.5)+Iext2 =0
# #p3 p1 p0
# #-x2eq**3 + (1.0-6*x1eq)*x2eq -1.3*x1eq -0.3*(zeq-3.5) +Iext2 =0
# p0 = -1.3*x1eq-0.3*(zeq-3.5)+Iext2
# p1 = 1.0-6*x1eq
# x2eq = numpy.zeros(shape, dtype=type)
# for i in range(shape[1]):
# x2eq[0 ,i] = numpy.min( numpy.real( numpy.roots([-1.0, 0.0, p1[i,0], p0[i,0] ]) ) )
# #(2):
# y2eq = 6*(x2eq+0.25)*x1eq
# return x2eq, y2eq
def geq_calc(x1eq):
return 0.1 * x1eq
def x1eq_x0_hypo_optimize_fun(x, ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
no_x0 = len(ix0)
no_e = len(iE)
type = x1EQ.dtype
i_e = numpy.ones((no_e,1), dtype=type)
i_x0 = numpy.ones((no_x0,1), dtype=type)
#Coupling to from from to
w_e_to_e = numpy.sum(numpy.dot(w[iE][:,iE], numpy.dot(i_e, x1EQ[:,iE]) - numpy.dot(i_e, x1EQ[:,iE]).T), axis=1)
w_x0_to_e = numpy.sum(numpy.dot(w[iE][:, ix0], numpy.dot(i_e, x0) - numpy.dot(i_x0, x1EQ[:,iE]).T), axis=1)
w_e_to_x0 = numpy.sum(numpy.dot(w[ix0][:,iE], numpy.dot(i_x0, x1EQ[:,iE]) - numpy.dot(i_e, x0).T), axis=1)
w_x0_to_x0 = numpy.sum(numpy.dot(w[ix0][:,ix0], numpy.dot(i_x0, x0) - numpy.dot(i_x0, x0).T), axis=1)
fun = numpy.array(x1EQ.shape)
#Known x1eq, unknown x0:
fun[iE] = fz_lin_calc(x1EQ[iE], x[iE], x0cr[iE], rx0[iE], z=zEQ[iE], coupl=K[iE] * (w_e_to_e + w_x0_to_e))
# Known x0, unknown x1eq:
fun[ix0] = fz_lin_calc(x[ix0], x0, x0cr[ix0], rx0[ix0], z=zeq_2d_calc(x[ix0], y0[ix0], Iext1[ix0]),
coupl=K[ix0] * (w_e_to_x0 + w_x0_to_x0))
return fun
def x1eq_x0_hypo_optimize_jac(x, ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
no_x0 = len(ix0)
no_e = len(iE)
n_regions = no_e + no_x0
type = x1EQ.dtype
i_x0 = numpy.ones((no_x0, 1), dtype=type)
jac_e_x0e = numpy.diag(- 4 * rx0[iE])
jac_e_x1o = -numpy.dot(i_x0, K[:,iE]) * w[iE][:,ix0]
jac_x0_x0e = numpy.zeros((no_x0,no_e),dtype = type)
jac_x0_x1o = numpy.diag(4 + 3 * x[ix0] ** 2 + 4 * x[ix0] + K[ix0] * numpy.sum(w[ix0][:,ix0], axis=1)) \
- numpy.dot(i_x0, K[:, ix0]) * w[ix0][:, ix0]
jac = numpy.zeros((n_regions,n_regions), dtype=type)
jac[iE][:,iE] = jac_e_x0e
jac[iE][:, ix0] = jac_e_x1o
jac[ix0][:, iE] = jac_x0_x0e
jac[ix0][:, ix0] = jac_x0_x1o
return jac
def x1eq_x0_hypo_optimize(ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w):
xinit = numpy.zeros(x1EQ.shape, dtype = x1EQ.dtype)
#Set initial conditions for the optimization algorithm, by ignoring coupling (=0)
# fz = 4 * (x1 - r * x0 + x0cr) - z -coupling = 0
#x0init = (x1 + x0cr -z/4) / rx0
xinit[:, iE] = x0_calc(x1EQ[:, iE], zEQ[:, iE], x0cr[:, iE], rx0[:, iE], 0.0)
#x1eqinit = rx0 * x0 - x0cr + z / 4
xinit[:, ix0] = rx0[:, ix0] * x0 - x0cr[:, ix0] + zEQ[:, ix0] / 4
#Solve:
sol = root(x1eq_x0_hypo_optimize_fun, xinit, args=(ix0, iE, x1EQ, zEQ, x0, x0cr, rx0, y0, Iext1, K, w),
method='lm', jac=x1eq_x0_hypo_optimize_jac, tol=10**(-6), callback=None, options=None) #method='hybr'
if sol.success:
x1EQ[:,ix0] = sol.x[:, ix0]
return x1EQ
else:
raise ValueError(sol.message)
def x1eq_x0_hypo_linTaylor(ix0,iE,x1EQ,zEQ,x0,x0cr,rx0,y0,Iext1,K,w):
no_x0 = len(ix0)
no_e = len(iE)
n_regions = no_e + no_x0
# The equilibria of the nodes of fixed epileptogenicity
x1_eq = x1EQ[:, iE]
z_eq = zEQ[:, iE]
#Prepare linear system to solve:
#The point of the linear Taylor expansion
x1LIN = x1_lin_def(X1_DEF, X1_EQ_CR_DEF, n_regions)
# For regions of fixed equilibria:
ii_e = numpy.ones((1, no_e), dtype=numpy.float32)
we_to_e = numpy.expand_dims(numpy.sum(w[iE][:, iE] * (numpy.dot(ii_e.T, x1_eq) -
numpy.dot(x1_eq.T, ii_e)), axis=1), 1).T
wx0_to_e = -x1_eq * numpy.expand_dims(numpy.sum(w[ix0][:, iE], axis=0), 0)
be = 4.0 * (x1_eq + x0cr[:, iE]) - z_eq - K[:, iE] * (we_to_e + wx0_to_e)
# For regions of fixed x0:
ii_x0 = numpy.ones((1, no_x0), dtype=numpy.float32)
we_to_x0 = numpy.expand_dims(numpy.sum(w[ix0][:, iE] * numpy.dot(ii_x0.T, x1_eq), axis=1), 1).T
bx0 = 4.0 * (x0cr[:, ix0] - rx0[:, ix0] * x0) - y0[:, ix0] - Iext1[:, ix0] \
- 2.0 * x1LIN[:, ix0] ** 3 - 2.0 * x1LIN[:, ix0] ** 2 - K[:, ix0] * we_to_x0
# Concatenate B vector:
b = -numpy.concatenate((be, bx0), axis=1).T
# From-to Epileptogenicity-fixed regions
# ae_to_e = -4 * numpy.eye( no_e, dtype=numpy.float32 )
ae_to_e = -4 * numpy.diag(rx0[0, iE])
# From x0-fixed regions to Epileptogenicity-fixed regions
ax0_to_e = -numpy.dot(K[:, iE].T, ii_x0) * w[iE][:, ix0]
# From Epileptogenicity-fixed regions to x0-fixed regions
ae_to_x0 = numpy.zeros((no_x0, no_e), dtype=numpy.float32)
# From-to x0-fixed regions
ax0_to_x0 = numpy.diag((4.0 + 3.0 * (x1LIN[:, ix0] ** 2 + 4.0 * x1LIN[:, ix0]) +
K[0, ix0] * numpy.expand_dims(numpy.sum(w[ix0][:, ix0], axis=0), 0)).T[:, 0]) \
- numpy.dot(K[:, ix0].T, ii_x0) * w[ix0][:, ix0]
# Concatenate A matrix
a = numpy.concatenate((numpy.concatenate((ae_to_e, ax0_to_e), axis=1),
numpy.concatenate((ae_to_x0, ax0_to_x0), axis=1)), axis=0)
# Solve the system
x = numpy.dot(numpy.linalg.inv(a), b).T
# Unpack solution:
# The equilibria of the regions with fixed E have not changed:
# The equilibria of the regions with fixed x0:
x1EQ[0, ix0] = x[0, no_e:]
return x1EQ
def x0cr_rx0_calc(y0, Iext1, epileptor_model = "2d", zmode = numpy.array("lin"),
x1rest = X1_DEF, x1cr = X1_EQ_CR_DEF, x0def = X0_DEF, x0cr_def = X0_CR_DEF):
#Define the symbolic variables we need:
(y01, I1, x1, z, x0, r, x0cr, f1, fz) = symbols('y01 I1 x1 z x0 r x0cr f1 fz')
#Define the fx1(x1) expression (assuming centered x1 in all cases)...
if isinstance(epileptor_model,EpileptorDP2D) or epileptor_model=="2d":
#...for the 2D permittivity coupling approximation, Proix et al 2014
fx1 = x1 ** 3 + 2 * x1 ** 2
else:
#...or for the original (>=6D) epileptor
fx1 = x1 ** 3 - 3 * x1 ** 2
#...and the z expression, coming from solving dx1/dt=f1(x1,z)=0
z = y01 - fx1 + I1
#Define the fz expression...
if zmode == 'lin':
#...for linear...
fz = 4 * (x1 - r * x0 + x0cr) - z
elif zmode == 'sig':
#...and sigmoidal versions
fz = 3/(1 + exp(-10 * (x1 + 0.5))) - r * x0 + x0cr - z
else:
raise ValueError('zmode is neither "lin" nor "sig"')
#Solve the fz expression for rx0 and x0cr, assuming the following two points (x1eq,x0) = [(-5/3,0.0),(-4/3,1.0)]...
#...and WITHOUT COUPLING
fz_sol = solve([fz.subs([(x1, x1rest), (x0, x0def), (z, z.subs(x1, x1rest))]),
fz.subs([(x1, x1cr), (x0, x0cr_def), (z, z.subs(x1, x1cr))])], r, x0cr)
#Convert the solution of x0cr from expression to function that accepts numpy arrays as inputs:
x0cr = lambdify((y01,I1),fz_sol[x0cr],'numpy')
#Compute the actual x0cr now given the inputs y0 and Iext1
x0cr = x0cr(y0, Iext1)
#The rx0 doesn' depend on y0 and Iext1, therefore...
rx0 = fz_sol[r]*numpy.ones(shape=x0cr.shape)
return x0cr, rx0
def coupling_calc(x1, K, w):
#Note that for difference coupling it doesn't matter whether we use centered x1 or decentered x1-5/3
# Only difference coupling for the moment.
# TODO: Extend for different coupling forms
n_regions = x1.size
i_n = numpy.ones((n_regions,1), dtype='f')
# Coupling from to
return K*numpy.sum(numpy.dot(w, numpy.dot(i_n, x1) - numpy.dot(i_n, x1).T), axis=1)
def x0_calc(x1, z, x0cr, rx0, coupl, zmode=numpy.array("lin")):
if zmode == 'lin':
return (x1 + x0cr - (z+coupl) / 4) / rx0
elif zmode == 'sig':
return (3 / (1 + numpy.exp(-10 * (x1 + 0.5))) + x0cr - z + coupl) / rx0
else:
raise ValueError('zmode is neither "lin" nor "sig"')
def calc_equilibrium_point(epileptor_model, hypothesis):
#De-center them:
if isinstance(epileptor_model,EpileptorDP2D):
if epileptor_model.zmode == 'sig':
#2D approximation, Proix et al 2014
zeq = zeq_2d_calc(hypothesis.x1EQ, epileptor_model.yc.T, epileptor_model.Iext1.T)
else:
zeq = hypothesis.zEQ
return hypothesis.x1EQ, zeq
else:
#all >=6D models
if isinstance(epileptor_model, Epileptor):
#tvb
zeq = zeq_6d_calc(hypothesis.x1EQ, epileptor_model.c.T, epileptor_model.Iext.T)
y1eq = y1eq_calc(hypothesis.x1EQ, epileptor_model.c.T)
else:
zeq = zeq_6d_calc(hypothesis.x1EQ, epileptor_model.yc.T, epileptor_model.Iext1.T)
y1eq = y1eq_calc(hypothesis.x1EQ, epileptor_model.yc.T)
(x2eq, y2eq) = pop2eq_calc(hypothesis.x1EQ, zeq, epileptor_model.Iext2.T)
geq = geq_calc(hypothesis.x1EQ)
if isinstance(epileptor_model, EpileptorDPrealistic):
#the 11D "realistic" simulations model
return hypothesis.x1EQ, y1eq, zeq, x2eq, y2eq, geq, epileptor_model.x0.T, epileptor_model.slope.T, \
epileptor_model.Iext1.T, epileptor_model.Iext2.T, epileptor_model.K.T
else:
#all >=6D models
return hypothesis.x1EQ, y1eq, zeq, x2eq, y2eq, geq
| return fx1_2d_calc(x1eq, z=0, y0=y0, Iext1=Iext1) | identifier_body |
hverse-encounter-helper.user.js | /* eslint-disable max-len */
// ==UserScript==
// @name HentaiVerse Encounter Unclicker
// @namespace PrincessRTFM
// @version 3.2.1
// @description Massively improves the useability/interface of the HentaiVerse random encounters system; tracks the time since the last event (and what it was), automatically opens random encounters, synchronises display updates, safe to open in multiple tabs (and will update all tabs accordingly)
// @author Lilith
// @match https://e-hentai.org/*
// @updateURL https://gh.princessrtfm.com/js/monkey/hverse-encounter-helper.user.js
// @grant GM.openInTab
// @grant GM.notification
// @grant GM_addStyle
// @grant GM_getValue
// @grant GM_setValue
// @grant GM_addValueChangeListener
// @require https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js
// @require https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.22.2/moment.min.js
// @require http://princessrtfm.com/js/lib/moment-timer.js
// ==/UserScript==
/*
CHANGELOG:
v1.0.0 - initial release, just removes onclick onhandler
v1.1.0 - added timer since page opened
v1.2.0 - auto-opens in background tab
v1.3.0 - updates page title with timer, opens in foreground tab
v1.3.1 - page title update timer capitalises the first character if it's a letter
v1.3.2 - page title update timer includes the suffix 'ago'
v1.3.3 - fixed the tab opening, actually foregrounds now
v1.4.0 - timer is always injected, an event pane will be created saying that nothing has happened if no event was detected
v1.5.0 - implemented persistent tracking of the time of the last event
v1.6.0 - implemented cross-tab communication to handle events in other tabs, removed the popup notification
v1.7.0 - now tracks the last event text as well as time, so the persistent tracking display makes more sense
v1.7.1 - fixed the bug where opening a page with no event would smash the persistent storage of the event text
v1.7.2 - properly rewrote the "dawn of a new day" header to include the timer
v1.8.0 - title text now includes a short description of the last event detected, updating cross-tab
v1.9.0 - reverted to background tabs for the automatic link opening - the rest has changed enough that foregrounding is too annoying; if `#game` is added to URL, opens in current tab
v1.10.0 - shorten the time-since label in the title
v1.11.0 - cleans up the eventpane contents from "dawn of a new day" events
v2.0.0 - now operates on a master/slave system with BroadcastChannel messages to keep everything synchronised; foundation built for slave displays to not calc updates
v2.0.1 - no longer operates on the hentaiverse pages, BroadcastChannels are not cross-origin so each domain gets one master instance
v3.0.0 - un-stupified the backend (organic code growth is bad, kids)
v3.1.0 - added an "enter the hentaiverse" link on the event pane if there isn't one already
v3.1.1 - fixed a typo, cleaned the file, and bumped the version (forgot to set to 3.1.0)
v3.2.0 - added a timer to reload the page (master page only) after 24 hours, for automated new day xp collection
v3.2.1 - fixed a nasty edge case bug causing an infinite reload loop
PLANNED:
[MINOR] Make the master page post a notification (via GM.notification) when the timer runs out
[MAJOR] Use AJAX to get the news page and update the eventpane with the new content when the timer runs out
*/
/* eslint-enable max-len */
/* global GM_addValueChangeListener, jQuery, $, moment */
// SCRIPT INITIALISATION BEGINS \\
const SCRIPT_NAME = `${GM_info.script.name} V${GM_info.script.version || '???'}`;
const EVTPANE_CSS = [
"width: 720px;",
"height: auto;",
"margin: 5px auto 0px;",
"background: rgb(242, 239, 223);",
"border: 1px solid rgb(92, 13, 18);",
"padding: 3px;",
"font-size: 9pt;",
"text-align: center !important;",
];
const LAST_EVENT_TIMESTAMP_KEY = "lastEventTime";
const LAST_EVENT_NAME_KEY = "lastEventName";
const AUTO_OPEN_IN_BACKGROUND = true;
const PAGE_TITLE = `[$PERIOD.SHORT$ $EVENT.SHORT$] $STATUS$ E-Hentai`;
const HEADER_TEXT = `You $EVENT$ $PERIOD$ ago!`;
const EVENT_CHECKS = {
NEW_DAY: /dawn.+?new\s+day/ui,
RANDOM_FIGHT: /encountered\s+a\s+monster/ui,
};
const EVENT_LABELS = {
NEW_DAY: "woke to a new day",
RANDOM_FIGHT: "encountered a monster",
NO_EVENT: "have been bored since",
};
const EVENT_TITLES = {
NEW_DAY: "🌞",
RANDOM_FIGHT: "💥",
NO_EVENT: "❌",
};
const BUG_CHARS = Object.defineProperty([
'💀',
'💣',
'💔',
'💢',
'💥',
'❌',
'🛑',
'❗',
'🐛',
'🦟',
'🦗',
'🐜',
'🐝',
], 'toString', {
value() {
let s = '';
const bits = Array.from(this);
for (let i = 0; i < 5; i++) {
s += bits.splice(Math.floor(Math.random() * bits.length), 1);
}
return s;
},
});
// SCRIPT CORE BEGINS \\
((window, $, moment) => {
// eslint-disable-next-line no-extend-native
Set.prototype.addAll = Set.prototype.addAll || function addAll(iterable) {
Array.from(iterable).forEach(e => this.add(e));
};
const genid = () => ([1e7] + 1e3 + 4e3 + 8e3 + 1e11)
.repeat(2)
.replace(
/[018]/gu,
c => (c ^ crypto.getRandomValues(new Uint8Array(1))[0] & 15 >> c / 4).toString(16)
);
const SCRIPT_ID = genid();
const REGISTRY = new Set([SCRIPT_ID]);
const RADIO = new BroadcastChannel(SCRIPT_NAME);
const broadcast = (message, disguise) => RADIO.postMessage({
source: disguise || SCRIPT_ID,
event: message,
known: Array.from(REGISTRY.values()),
}); // So, you can lie about the source, for good reason! ...well, not GOOD reason.
RADIO.INITIAL_PING = 'PING';
RADIO.SET_SLAVE = 'SYNC';
RADIO.NEW_MASTER = 'EXCH';
RADIO.INSTANCE_GONE = 'GONE';
RADIO.TICK = 'EXEC';
RADIO.initialise = () => {
console.log(`${SCRIPT_ID} << ${RADIO.INITIAL_PING}`);
broadcast(RADIO.INITIAL_PING);
};
RADIO.slaveToMe = () => {
console.log(`${SCRIPT_ID} << ${RADIO.SET_SLAVE}`);
broadcast(RADIO.SET_SLAVE);
};
RADIO.switchMaster = to => {
console.log(`${SCRIPT_ID} << ${RADIO.NEW_MASTER} // ${to}`);
broadcast(RADIO.NEW_MASTER, to);
};
RADIO.unloadSelf = () => {
console.log(`${SCRIPT_ID} << ${RADIO.INSTANCE_GONE}`);
broadcast(RADIO.INSTANCE_GONE);
};
RADIO.runSlaves = () => {
console.log(`${SCRIPT_ID} << ${RADIO.TICK}`);
broadcast(RADIO.TICK);
};
let MASTER_ID = SCRIPT_ID;
let eventPane = $('#eventpane');
let header;
if (eventPane.length) {
eventPane.css('height', 'auto');
const eventLinks = eventPane.find('a[href]');
eventLinks.each((i, e) => {
const link = $(e);
e.addEventListener('click', () => true);
if (link.text().match(/\bfight\b/ui)) {
if (location.hash == "#debug") {
return;
}
if (location.hash == "#game") {
location.replace(e.href);
}
else {
GM.openInTab(e.href, AUTO_OPEN_IN_BACKGROUND);
}
link.hide();
}
});
const lines = eventPane.children('p, div');
header = lines.first();
}
else {
GM_addStyle(`#eventpane {\n${EVTPANE_CSS.map(e => `\t${e}`).join("\n")}\n}`);
eventPane = $('<div id="eventpane"></div>');
header = $('<div style="font-size:10pt; font-weight:bold; padding:0px; margin:12px auto 2px"></div>');
eventPane.append(header);
eventPane.append('<div style="margin-top: 10px;"></div>');
header.text(BUG_CHARS); // You shouldn't actually SEE this, so if you do...
const news = $('#newsinner'); | news.first().prepend(eventPane);
}
else if (gallery.length) {
gallery.after(eventPane);
}
}
if (!eventPane
.find('a[href]')
.filter((i, e) => (e.href || '').toLowerCase().includes('hentaiverse.org'))
.length
) {
eventPane.append('<p><a href="https://hentaiverse.org/">Enter the HentaiVerse</a></p>');
}
$('#nb a[href]').filter((i, e) => (e.href || '').toLowerCase().includes('hentaiverse.org'))
.parents('#nb > *')
.hide();
if (!moment) {
header.attr('title', 'Failed to load moment.js library').css('border-bottom', '2px dotted red');
return;
}
const lastEvent = () => moment(GM_getValue(LAST_EVENT_TIMESTAMP_KEY, 0) || Date.now().valueOf());
const expandTemplate = (tmpl, durationObj, eventKey) => {
const durationStr = durationObj.humanize();
return tmpl
.replace(/\$PERIOD\$/gu, durationStr)
.replace(
/\$PERIOD.SHORT\$/gu,
durationStr
.replace(/^a\s+few\s+/ui, "0 ")
.replace(/^an?\s+/ui, "1 ")
.replace(/^(\d+)\s+([dhms]).*$/u, "$1$2")
)
.replace(/\$EVENT\$/gu, EVENT_LABELS[eventKey])
.replace(/\$EVENT.SHORT\$/gu, EVENT_TITLES[eventKey])
.replace(/\$STATUS\$/gu, MASTER_ID == SCRIPT_ID ? '👑' : '⛓')
.replace(/^(.)(.+)$/u, (match, g1, g2) => g1.toUpperCase() + g2);
};
let start = lastEvent();
let eventKey = GM_getValue(LAST_EVENT_NAME_KEY, 'NO_EVENT');
const headerText = header.text();
console.log(`Retrieved event header: ${headerText}`);
let foundHeader = false;
for (const [
key,
value,
] of Object.entries(EVENT_CHECKS)) {
if (headerText.match(value)) {
start = moment();
eventKey = key;
foundHeader = true;
break;
}
}
GM_setValue(LAST_EVENT_TIMESTAMP_KEY, start.valueOf());
GM_setValue(LAST_EVENT_NAME_KEY, eventKey);
GM_addValueChangeListener(LAST_EVENT_TIMESTAMP_KEY, (key, oval, nval, remote) => {
if (!remote) {
return;
} // Only care about changes in other tabs
start = lastEvent();
});
GM_addValueChangeListener(LAST_EVENT_NAME_KEY, (key, oval, nval, remote) => {
if (!remote) {
return;
} // Only care about changes in other tabs
eventKey = GM_getValue(LAST_EVENT_NAME_KEY, eventKey);
});
const updateDisplay = () => {
console.time("Display update time");
const now = moment();
const period = moment.duration(now.diff(start));
header.text(expandTemplate(HEADER_TEXT, period, eventKey));
document.title = expandTemplate(PAGE_TITLE, period, eventKey);
if (period.asMinutes() >= 30 && !$('#hentaiverse-unclicker-reload').length) {
header.after([
'<div id="hentaiverse-unclicker-reload" style="margin-top: 10px;">',
'A new encounter is ready! ',
'<a href="javascript:window.location.reload()">',
'Click to reload!',
'</a></div>',
].join(''));
}
else if (period.asMinutes() < 30) {
$('#hentaiverse-unclicker-reload').remove();
}
console.timeEnd("Display update time");
// The new automatic reload ONLY reloads the master page.
// If you have more than one page, this means the master page will cycle through all of your
// open pages in the order of oldest to newest. If you only have one, it'll obviously not do that.
if (MASTER_ID == SCRIPT_ID && period.asDays() >= 1) {
if (!foundHeader) {
GM_setValue(LAST_EVENT_TIMESTAMP_KEY, moment().valueOf()); // Fix a nasty edge case I found
}
console.log("24 hours passed, automatic reload triggered");
location.reload();
}
};
const ticker = () => {
updateDisplay();
console.log("Transmitting update command to all slaves");
RADIO.runSlaves();
};
updateDisplay();
const timer = new moment.duration(30000).timer({
loop: true,
start: true,
}, ticker);
$(window).on('unload', () => {
if (REGISTRY.size > 1) { // Only if we're not alone
if (MASTER_ID == SCRIPT_ID) { // If we're the master and we're going away
REGISTRY.delete(SCRIPT_ID); // So we can't pick ourselves
// Send out an event with a fake source, to indicate to all slaved pages
// that they need to reset their master id
RADIO.switchMaster(REGISTRY.values().next().value);
}
else { // The master instance needs to remove us from the registry
RADIO.unloadSelf();
}
}
});
RADIO.addEventListener('message', msgevt => {
const msgobj = msgevt.data;
if (typeof msgobj != 'object') {
return;
}
const msgSrc = msgobj.source;
const msgEvt = msgobj.event;
const msgAll = msgobj.known;
console.log(`${msgSrc} >> ${msgEvt}`);
switch (msgEvt) {
case RADIO.INITIAL_PING:
if (MASTER_ID == SCRIPT_ID) {
console.log("Master instance pinged by new instance, registering");
REGISTRY.add(msgSrc);
console.dir(REGISTRY);
console.log("Slaving new instance to self");
RADIO.slaveToMe();
ticker();
}
else {
console.log("Pinged by new instance, ignored (not the master)");
}
break;
case RADIO.SET_SLAVE:
console.log("Return ping received, disabling local timer and slaving to master");
timer.stop();
MASTER_ID = msgSrc;
break;
case RADIO.NEW_MASTER: // The established master page unloaded, switch to the new one
console.log("The king is dead... long live the king.");
REGISTRY.delete(MASTER_ID); // Remove the now-dead page (if it was in there)
MASTER_ID = msgSrc;
REGISTRY.add(MASTER_ID);
if (MASTER_ID == SCRIPT_ID) { // Are WE the new master?
console.log("The prince has risen to power now!");
timer.start(); // If so, start our timer
REGISTRY.addAll(msgAll); // Load the registry details
ticker(); // Update everybody
}
console.dir(REGISTRY);
break;
case RADIO.INSTANCE_GONE:
REGISTRY.delete(msgSrc);
if (MASTER_ID == SCRIPT_ID) {
console.log("Instance terminated, removed from registry");
}
break;
case RADIO.TICK:
console.log("Received tick instruction, updating display");
updateDisplay();
break;
default:
console.error('Received unknown broadcast, this is probably a Bad Thing!');
break;
}
});
console.log("Sending inquisitive ping");
RADIO.initialise();
})(window, window.jQuery || window.$ || jQuery || $, moment);
// END SCRIPT \\ | const gallery = $('#nb');
if (news.length) { | random_line_split |
hverse-encounter-helper.user.js | /* eslint-disable max-len */
// ==UserScript==
// @name HentaiVerse Encounter Unclicker
// @namespace PrincessRTFM
// @version 3.2.1
// @description Massively improves the useability/interface of the HentaiVerse random encounters system; tracks the time since the last event (and what it was), automatically opens random encounters, synchronises display updates, safe to open in multiple tabs (and will update all tabs accordingly)
// @author Lilith
// @match https://e-hentai.org/*
// @updateURL https://gh.princessrtfm.com/js/monkey/hverse-encounter-helper.user.js
// @grant GM.openInTab
// @grant GM.notification
// @grant GM_addStyle
// @grant GM_getValue
// @grant GM_setValue
// @grant GM_addValueChangeListener
// @require https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js
// @require https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.22.2/moment.min.js
// @require http://princessrtfm.com/js/lib/moment-timer.js
// ==/UserScript==
/*
CHANGELOG:
v1.0.0 - initial release, just removes onclick onhandler
v1.1.0 - added timer since page opened
v1.2.0 - auto-opens in background tab
v1.3.0 - updates page title with timer, opens in foreground tab
v1.3.1 - page title update timer capitalises the first character if it's a letter
v1.3.2 - page title update timer includes the suffix 'ago'
v1.3.3 - fixed the tab opening, actually foregrounds now
v1.4.0 - timer is always injected, an event pane will be created saying that nothing has happened if no event was detected
v1.5.0 - implemented persistent tracking of the time of the last event
v1.6.0 - implemented cross-tab communication to handle events in other tabs, removed the popup notification
v1.7.0 - now tracks the last event text as well as time, so the persistent tracking display makes more sense
v1.7.1 - fixed the bug where opening a page with no event would smash the persistent storage of the event text
v1.7.2 - properly rewrote the "dawn of a new day" header to include the timer
v1.8.0 - title text now includes a short description of the last event detected, updating cross-tab
v1.9.0 - reverted to background tabs for the automatic link opening - the rest has changed enough that foregrounding is too annoying; if `#game` is added to URL, opens in current tab
v1.10.0 - shorten the time-since label in the title
v1.11.0 - cleans up the eventpane contents from "dawn of a new day" events
v2.0.0 - now operates on a master/slave system with BroadcastChannel messages to keep everything synchronised; foundation built for slave displays to not calc updates
v2.0.1 - no longer operates on the hentaiverse pages, BroadcastChannels are not cross-origin so each domain gets one master instance
v3.0.0 - un-stupified the backend (organic code growth is bad, kids)
v3.1.0 - added an "enter the hentaiverse" link on the event pane if there isn't one already
v3.1.1 - fixed a typo, cleaned the file, and bumped the version (forgot to set to 3.1.0)
v3.2.0 - added a timer to reload the page (master page only) after 24 hours, for automated new day xp collection
v3.2.1 - fixed a nasty edge case bug causing an infinite reload loop
PLANNED:
[MINOR] Make the master page post a notification (via GM.notification) when the timer runs out
[MAJOR] Use AJAX to get the news page and update the eventpane with the new content when the timer runs out
*/
/* eslint-enable max-len */
/* global GM_addValueChangeListener, jQuery, $, moment */
// SCRIPT INITIALISATION BEGINS \\
const SCRIPT_NAME = `${GM_info.script.name} V${GM_info.script.version || '???'}`;
const EVTPANE_CSS = [
"width: 720px;",
"height: auto;",
"margin: 5px auto 0px;",
"background: rgb(242, 239, 223);",
"border: 1px solid rgb(92, 13, 18);",
"padding: 3px;",
"font-size: 9pt;",
"text-align: center !important;",
];
const LAST_EVENT_TIMESTAMP_KEY = "lastEventTime";
const LAST_EVENT_NAME_KEY = "lastEventName";
const AUTO_OPEN_IN_BACKGROUND = true;
const PAGE_TITLE = `[$PERIOD.SHORT$ $EVENT.SHORT$] $STATUS$ E-Hentai`;
const HEADER_TEXT = `You $EVENT$ $PERIOD$ ago!`;
const EVENT_CHECKS = {
NEW_DAY: /dawn.+?new\s+day/ui,
RANDOM_FIGHT: /encountered\s+a\s+monster/ui,
};
const EVENT_LABELS = {
NEW_DAY: "woke to a new day",
RANDOM_FIGHT: "encountered a monster",
NO_EVENT: "have been bored since",
};
const EVENT_TITLES = {
NEW_DAY: "🌞",
RANDOM_FIGHT: "💥",
NO_EVENT: "❌",
};
const BUG_CHARS = Object.defineProperty([
'💀',
'💣',
'💔',
'💢',
'💥',
'❌',
'🛑',
'❗',
'🐛',
'🦟',
'🦗',
'🐜',
'🐝',
], 'toString', {
value() {
let s = '';
const bits = Array. | this);
for (let i = 0; i < 5; i++) {
s += bits.splice(Math.floor(Math.random() * bits.length), 1);
}
return s;
},
});
// SCRIPT CORE BEGINS \\
((window, $, moment) => {
// eslint-disable-next-line no-extend-native
Set.prototype.addAll = Set.prototype.addAll || function addAll(iterable) {
Array.from(iterable).forEach(e => this.add(e));
};
const genid = () => ([1e7] + 1e3 + 4e3 + 8e3 + 1e11)
.repeat(2)
.replace(
/[018]/gu,
c => (c ^ crypto.getRandomValues(new Uint8Array(1))[0] & 15 >> c / 4).toString(16)
);
const SCRIPT_ID = genid();
const REGISTRY = new Set([SCRIPT_ID]);
const RADIO = new BroadcastChannel(SCRIPT_NAME);
const broadcast = (message, disguise) => RADIO.postMessage({
source: disguise || SCRIPT_ID,
event: message,
known: Array.from(REGISTRY.values()),
}); // So, you can lie about the source, for good reason! ...well, not GOOD reason.
RADIO.INITIAL_PING = 'PING';
RADIO.SET_SLAVE = 'SYNC';
RADIO.NEW_MASTER = 'EXCH';
RADIO.INSTANCE_GONE = 'GONE';
RADIO.TICK = 'EXEC';
RADIO.initialise = () => {
console.log(`${SCRIPT_ID} << ${RADIO.INITIAL_PING}`);
broadcast(RADIO.INITIAL_PING);
};
RADIO.slaveToMe = () => {
console.log(`${SCRIPT_ID} << ${RADIO.SET_SLAVE}`);
broadcast(RADIO.SET_SLAVE);
};
RADIO.switchMaster = to => {
console.log(`${SCRIPT_ID} << ${RADIO.NEW_MASTER} // ${to}`);
broadcast(RADIO.NEW_MASTER, to);
};
RADIO.unloadSelf = () => {
console.log(`${SCRIPT_ID} << ${RADIO.INSTANCE_GONE}`);
broadcast(RADIO.INSTANCE_GONE);
};
RADIO.runSlaves = () => {
console.log(`${SCRIPT_ID} << ${RADIO.TICK}`);
broadcast(RADIO.TICK);
};
let MASTER_ID = SCRIPT_ID;
let eventPane = $('#eventpane');
let header;
if (eventPane.length) {
eventPane.css('height', 'auto');
const eventLinks = eventPane.find('a[href]');
eventLinks.each((i, e) => {
const link = $(e);
e.addEventListener('click', () => true);
if (link.text().match(/\bfight\b/ui)) {
if (location.hash == "#debug") {
return;
}
if (location.hash == "#game") {
location.replace(e.href);
}
else {
GM.openInTab(e.href, AUTO_OPEN_IN_BACKGROUND);
}
link.hide();
}
});
const lines = eventPane.children('p, div');
header = lines.first();
}
else {
GM_addStyle(`#eventpane {\n${EVTPANE_CSS.map(e => `\t${e}`).join("\n")}\n}`);
eventPane = $('<div id="eventpane"></div>');
header = $('<div style="font-size:10pt; font-weight:bold; padding:0px; margin:12px auto 2px"></div>');
eventPane.append(header);
eventPane.append('<div style="margin-top: 10px;"></div>');
header.text(BUG_CHARS); // You shouldn't actually SEE this, so if you do...
const news = $('#newsinner');
const gallery = $('#nb');
if (news.length) {
news.first().prepend(eventPane);
}
else if (gallery.length) {
gallery.after(eventPane);
}
}
if (!eventPane
.find('a[href]')
.filter((i, e) => (e.href || '').toLowerCase().includes('hentaiverse.org'))
.length
) {
eventPane.append('<p><a href="https://hentaiverse.org/">Enter the HentaiVerse</a></p>');
}
$('#nb a[href]').filter((i, e) => (e.href || '').toLowerCase().includes('hentaiverse.org'))
.parents('#nb > *')
.hide();
if (!moment) {
header.attr('title', 'Failed to load moment.js library').css('border-bottom', '2px dotted red');
return;
}
const lastEvent = () => moment(GM_getValue(LAST_EVENT_TIMESTAMP_KEY, 0) || Date.now().valueOf());
const expandTemplate = (tmpl, durationObj, eventKey) => {
const durationStr = durationObj.humanize();
return tmpl
.replace(/\$PERIOD\$/gu, durationStr)
.replace(
/\$PERIOD.SHORT\$/gu,
durationStr
.replace(/^a\s+few\s+/ui, "0 ")
.replace(/^an?\s+/ui, "1 ")
.replace(/^(\d+)\s+([dhms]).*$/u, "$1$2")
)
.replace(/\$EVENT\$/gu, EVENT_LABELS[eventKey])
.replace(/\$EVENT.SHORT\$/gu, EVENT_TITLES[eventKey])
.replace(/\$STATUS\$/gu, MASTER_ID == SCRIPT_ID ? '👑' : '⛓')
.replace(/^(.)(.+)$/u, (match, g1, g2) => g1.toUpperCase() + g2);
};
let start = lastEvent();
let eventKey = GM_getValue(LAST_EVENT_NAME_KEY, 'NO_EVENT');
const headerText = header.text();
console.log(`Retrieved event header: ${headerText}`);
let foundHeader = false;
for (const [
key,
value,
] of Object.entries(EVENT_CHECKS)) {
if (headerText.match(value)) {
start = moment();
eventKey = key;
foundHeader = true;
break;
}
}
GM_setValue(LAST_EVENT_TIMESTAMP_KEY, start.valueOf());
GM_setValue(LAST_EVENT_NAME_KEY, eventKey);
GM_addValueChangeListener(LAST_EVENT_TIMESTAMP_KEY, (key, oval, nval, remote) => {
if (!remote) {
return;
} // Only care about changes in other tabs
start = lastEvent();
});
GM_addValueChangeListener(LAST_EVENT_NAME_KEY, (key, oval, nval, remote) => {
if (!remote) {
return;
} // Only care about changes in other tabs
eventKey = GM_getValue(LAST_EVENT_NAME_KEY, eventKey);
});
const updateDisplay = () => {
console.time("Display update time");
const now = moment();
const period = moment.duration(now.diff(start));
header.text(expandTemplate(HEADER_TEXT, period, eventKey));
document.title = expandTemplate(PAGE_TITLE, period, eventKey);
if (period.asMinutes() >= 30 && !$('#hentaiverse-unclicker-reload').length) {
header.after([
'<div id="hentaiverse-unclicker-reload" style="margin-top: 10px;">',
'A new encounter is ready! ',
'<a href="javascript:window.location.reload()">',
'Click to reload!',
'</a></div>',
].join(''));
}
else if (period.asMinutes() < 30) {
$('#hentaiverse-unclicker-reload').remove();
}
console.timeEnd("Display update time");
// The new automatic reload ONLY reloads the master page.
// If you have more than one page, this means the master page will cycle through all of your
// open pages in the order of oldest to newest. If you only have one, it'll obviously not do that.
if (MASTER_ID == SCRIPT_ID && period.asDays() >= 1) {
if (!foundHeader) {
GM_setValue(LAST_EVENT_TIMESTAMP_KEY, moment().valueOf()); // Fix a nasty edge case I found
}
console.log("24 hours passed, automatic reload triggered");
location.reload();
}
};
const ticker = () => {
updateDisplay();
console.log("Transmitting update command to all slaves");
RADIO.runSlaves();
};
updateDisplay();
const timer = new moment.duration(30000).timer({
loop: true,
start: true,
}, ticker);
$(window).on('unload', () => {
if (REGISTRY.size > 1) { // Only if we're not alone
if (MASTER_ID == SCRIPT_ID) { // If we're the master and we're going away
REGISTRY.delete(SCRIPT_ID); // So we can't pick ourselves
// Send out an event with a fake source, to indicate to all slaved pages
// that they need to reset their master id
RADIO.switchMaster(REGISTRY.values().next().value);
}
else { // The master instance needs to remove us from the registry
RADIO.unloadSelf();
}
}
});
RADIO.addEventListener('message', msgevt => {
const msgobj = msgevt.data;
if (typeof msgobj != 'object') {
return;
}
const msgSrc = msgobj.source;
const msgEvt = msgobj.event;
const msgAll = msgobj.known;
console.log(`${msgSrc} >> ${msgEvt}`);
switch (msgEvt) {
case RADIO.INITIAL_PING:
if (MASTER_ID == SCRIPT_ID) {
console.log("Master instance pinged by new instance, registering");
REGISTRY.add(msgSrc);
console.dir(REGISTRY);
console.log("Slaving new instance to self");
RADIO.slaveToMe();
ticker();
}
else {
console.log("Pinged by new instance, ignored (not the master)");
}
break;
case RADIO.SET_SLAVE:
console.log("Return ping received, disabling local timer and slaving to master");
timer.stop();
MASTER_ID = msgSrc;
break;
case RADIO.NEW_MASTER: // The established master page unloaded, switch to the new one
console.log("The king is dead... long live the king.");
REGISTRY.delete(MASTER_ID); // Remove the now-dead page (if it was in there)
MASTER_ID = msgSrc;
REGISTRY.add(MASTER_ID);
if (MASTER_ID == SCRIPT_ID) { // Are WE the new master?
console.log("The prince has risen to power now!");
timer.start(); // If so, start our timer
REGISTRY.addAll(msgAll); // Load the registry details
ticker(); // Update everybody
}
console.dir(REGISTRY);
break;
case RADIO.INSTANCE_GONE:
REGISTRY.delete(msgSrc);
if (MASTER_ID == SCRIPT_ID) {
console.log("Instance terminated, removed from registry");
}
break;
case RADIO.TICK:
console.log("Received tick instruction, updating display");
updateDisplay();
break;
default:
console.error('Received unknown broadcast, this is probably a Bad Thing!');
break;
}
});
console.log("Sending inquisitive ping");
RADIO.initialise();
})(window, window.jQuery || window.$ || jQuery || $, moment);
// END SCRIPT \\
| from( | identifier_name |
hverse-encounter-helper.user.js | /* eslint-disable max-len */
// ==UserScript==
// @name HentaiVerse Encounter Unclicker
// @namespace PrincessRTFM
// @version 3.2.1
// @description Massively improves the useability/interface of the HentaiVerse random encounters system; tracks the time since the last event (and what it was), automatically opens random encounters, synchronises display updates, safe to open in multiple tabs (and will update all tabs accordingly)
// @author Lilith
// @match https://e-hentai.org/*
// @updateURL https://gh.princessrtfm.com/js/monkey/hverse-encounter-helper.user.js
// @grant GM.openInTab
// @grant GM.notification
// @grant GM_addStyle
// @grant GM_getValue
// @grant GM_setValue
// @grant GM_addValueChangeListener
// @require https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js
// @require https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.22.2/moment.min.js
// @require http://princessrtfm.com/js/lib/moment-timer.js
// ==/UserScript==
/*
CHANGELOG:
v1.0.0 - initial release, just removes onclick onhandler
v1.1.0 - added timer since page opened
v1.2.0 - auto-opens in background tab
v1.3.0 - updates page title with timer, opens in foreground tab
v1.3.1 - page title update timer capitalises the first character if it's a letter
v1.3.2 - page title update timer includes the suffix 'ago'
v1.3.3 - fixed the tab opening, actually foregrounds now
v1.4.0 - timer is always injected, an event pane will be created saying that nothing has happened if no event was detected
v1.5.0 - implemented persistent tracking of the time of the last event
v1.6.0 - implemented cross-tab communication to handle events in other tabs, removed the popup notification
v1.7.0 - now tracks the last event text as well as time, so the persistent tracking display makes more sense
v1.7.1 - fixed the bug where opening a page with no event would smash the persistent storage of the event text
v1.7.2 - properly rewrote the "dawn of a new day" header to include the timer
v1.8.0 - title text now includes a short description of the last event detected, updating cross-tab
v1.9.0 - reverted to background tabs for the automatic link opening - the rest has changed enough that foregrounding is too annoying; if `#game` is added to URL, opens in current tab
v1.10.0 - shorten the time-since label in the title
v1.11.0 - cleans up the eventpane contents from "dawn of a new day" events
v2.0.0 - now operates on a master/slave system with BroadcastChannel messages to keep everything synchronised; foundation built for slave displays to not calc updates
v2.0.1 - no longer operates on the hentaiverse pages, BroadcastChannels are not cross-origin so each domain gets one master instance
v3.0.0 - un-stupified the backend (organic code growth is bad, kids)
v3.1.0 - added an "enter the hentaiverse" link on the event pane if there isn't one already
v3.1.1 - fixed a typo, cleaned the file, and bumped the version (forgot to set to 3.1.0)
v3.2.0 - added a timer to reload the page (master page only) after 24 hours, for automated new day xp collection
v3.2.1 - fixed a nasty edge case bug causing an infinite reload loop
PLANNED:
[MINOR] Make the master page post a notification (via GM.notification) when the timer runs out
[MAJOR] Use AJAX to get the news page and update the eventpane with the new content when the timer runs out
*/
/* eslint-enable max-len */
/* global GM_addValueChangeListener, jQuery, $, moment */
// SCRIPT INITIALISATION BEGINS \\
const SCRIPT_NAME = `${GM_info.script.name} V${GM_info.script.version || '???'}`;
const EVTPANE_CSS = [
"width: 720px;",
"height: auto;",
"margin: 5px auto 0px;",
"background: rgb(242, 239, 223);",
"border: 1px solid rgb(92, 13, 18);",
"padding: 3px;",
"font-size: 9pt;",
"text-align: center !important;",
];
const LAST_EVENT_TIMESTAMP_KEY = "lastEventTime";
const LAST_EVENT_NAME_KEY = "lastEventName";
const AUTO_OPEN_IN_BACKGROUND = true;
const PAGE_TITLE = `[$PERIOD.SHORT$ $EVENT.SHORT$] $STATUS$ E-Hentai`;
const HEADER_TEXT = `You $EVENT$ $PERIOD$ ago!`;
const EVENT_CHECKS = {
NEW_DAY: /dawn.+?new\s+day/ui,
RANDOM_FIGHT: /encountered\s+a\s+monster/ui,
};
const EVENT_LABELS = {
NEW_DAY: "woke to a new day",
RANDOM_FIGHT: "encountered a monster",
NO_EVENT: "have been bored since",
};
const EVENT_TITLES = {
NEW_DAY: "🌞",
RANDOM_FIGHT: "💥",
NO_EVENT: "❌",
};
const BUG_CHARS = Object.defineProperty([
'💀',
'💣',
'💔',
'💢',
'💥',
'❌',
'🛑',
'❗',
'🐛',
'🦟',
'🦗',
'🐜',
'🐝',
], 'toString', {
value() {
let s = '';
const bits = Array.from(this);
for (let i = 0; i < 5; i++) {
s += bits.splice(Math.floor(Math.random() * bits.length), 1);
}
return s;
},
});
// SCRIPT CORE BEGINS \\
((window, $, moment) => {
// eslint-disable-next-line no-extend-native
Set.prototype.addAll = Set.prototype.addAll || function addAll(iterable) {
Array.from(iterable).forEach(e => this.add(e));
};
const genid = () => ([1e7] + 1e3 + 4e3 + 8e3 + 1e11)
.repeat(2)
.replace(
/[018]/gu,
c => (c ^ crypto.getRandomValues(new Uint8Array(1))[0] & 15 >> c / 4).toString(16)
);
const SCRIPT_ID = genid();
const REGISTRY = new Set([SCRIPT_ID]);
const RADIO = new BroadcastChannel(SCRIPT_NAME);
const broadcast = (message, disguise) => RADIO.postMessage({
source: disguise || SCRIPT_ID,
event: message,
known: Array.from(REGISTRY.values()),
}); // So, you can lie about the source, for good reason! ...well, not GOOD reason.
RADIO.INITIAL_PING = 'PING';
RADIO.SET_SLAVE = 'SYNC';
RADIO.NEW_MASTER = 'EXCH';
RADIO.INSTANCE_GONE = 'GONE';
RADIO.TICK = 'EXEC';
RADIO.initialise = () => {
console.log(`${SCRIPT_ID} << ${RADIO.INITIAL_PING}`);
broadcast(RADIO.INITIAL_PING);
};
RADIO.slaveToMe = () => {
console.log(`${SCRIPT_ID} << ${RADIO.SET_SLAVE}`);
broadcast(RADIO.SET_SLAVE);
};
RADIO.switchMaster = to => {
console.log(`${SCRIPT_ID} << ${RADIO.NEW_MASTER} // ${to}`);
broadcast(RADIO.NEW_MASTER, to);
};
RADIO.unloadSelf = () => {
console.log(`${SCRIPT_ID} << ${RADIO.INSTANCE_GONE}`);
broadcast(RADIO.INSTANCE_GONE);
};
RADIO.runSlaves = () => {
console.log(`${SCRIPT_ID} << ${RADIO.TICK}`);
broadcast(RADIO.TICK);
};
let MASTER_ID = SCRIPT_ID;
let eventPane = $('#eventpane');
let header;
if (eventPane.length) {
eventPane.css('height', 'auto');
const eventLinks = eventPane.find('a[href]');
eventLinks.each((i, e) => {
const link = $(e);
e.addEventListener('click', () => true);
if (link.text().match(/\bfight\b/ui)) {
if (location.hash == "#debug") {
return;
}
if (location.hash == "#game") {
location.replace(e.href);
}
else {
GM.openInTab(e.href, AUTO_OPEN_IN_BACKGROUND);
}
link.hide();
}
});
const lines = eventPane.children('p, div');
header = lines.first();
}
else {
GM_addStyle(`#eventpane {\n${EVTPANE_CSS.map(e => `\t${e}`).join("\n")}\n}`);
eventPane = $('<div id="eventpane"></div>');
header = $('<div style="font-size:10pt; font-weight:bold; padding:0px; margin:12px auto 2px"></div>');
eventPane.append(header);
eventPane.append('<div style="margin-top: 10px;"></div>');
header.text(BUG_CHARS); // You shouldn't actually SEE this, so if you do...
const news = $('#newsinner');
const gallery = $('#nb');
if (news.length) {
news.first().prepend(eventPane);
}
else if (gallery.length) {
gallery.after(eventPane);
}
}
if (!eventPane
.find('a[href]')
.filter((i, e) => (e.href || '').toLowerCase().includes('hentaiverse.org'))
.length
) {
eventPane.append('<p><a href="https://hentaiverse.org/">Enter the HentaiVerse</a></p>');
}
$('#nb a[href]').filter((i, e) => (e.href || '').toLowerCase().includes('hentaiverse.org'))
.parents('#nb > *')
.hide();
if (!moment) {
header.attr('title', 'Failed to load mome | LAST_EVENT_TIMESTAMP_KEY, 0) || Date.now().valueOf());
const expandTemplate = (tmpl, durationObj, eventKey) => {
const durationStr = durationObj.humanize();
return tmpl
.replace(/\$PERIOD\$/gu, durationStr)
.replace(
/\$PERIOD.SHORT\$/gu,
durationStr
.replace(/^a\s+few\s+/ui, "0 ")
.replace(/^an?\s+/ui, "1 ")
.replace(/^(\d+)\s+([dhms]).*$/u, "$1$2")
)
.replace(/\$EVENT\$/gu, EVENT_LABELS[eventKey])
.replace(/\$EVENT.SHORT\$/gu, EVENT_TITLES[eventKey])
.replace(/\$STATUS\$/gu, MASTER_ID == SCRIPT_ID ? '👑' : '⛓')
.replace(/^(.)(.+)$/u, (match, g1, g2) => g1.toUpperCase() + g2);
};
let start = lastEvent();
let eventKey = GM_getValue(LAST_EVENT_NAME_KEY, 'NO_EVENT');
const headerText = header.text();
console.log(`Retrieved event header: ${headerText}`);
let foundHeader = false;
for (const [
key,
value,
] of Object.entries(EVENT_CHECKS)) {
if (headerText.match(value)) {
start = moment();
eventKey = key;
foundHeader = true;
break;
}
}
GM_setValue(LAST_EVENT_TIMESTAMP_KEY, start.valueOf());
GM_setValue(LAST_EVENT_NAME_KEY, eventKey);
GM_addValueChangeListener(LAST_EVENT_TIMESTAMP_KEY, (key, oval, nval, remote) => {
if (!remote) {
return;
} // Only care about changes in other tabs
start = lastEvent();
});
GM_addValueChangeListener(LAST_EVENT_NAME_KEY, (key, oval, nval, remote) => {
if (!remote) {
return;
} // Only care about changes in other tabs
eventKey = GM_getValue(LAST_EVENT_NAME_KEY, eventKey);
});
const updateDisplay = () => {
console.time("Display update time");
const now = moment();
const period = moment.duration(now.diff(start));
header.text(expandTemplate(HEADER_TEXT, period, eventKey));
document.title = expandTemplate(PAGE_TITLE, period, eventKey);
if (period.asMinutes() >= 30 && !$('#hentaiverse-unclicker-reload').length) {
header.after([
'<div id="hentaiverse-unclicker-reload" style="margin-top: 10px;">',
'A new encounter is ready! ',
'<a href="javascript:window.location.reload()">',
'Click to reload!',
'</a></div>',
].join(''));
}
else if (period.asMinutes() < 30) {
$('#hentaiverse-unclicker-reload').remove();
}
console.timeEnd("Display update time");
// The new automatic reload ONLY reloads the master page.
// If you have more than one page, this means the master page will cycle through all of your
// open pages in the order of oldest to newest. If you only have one, it'll obviously not do that.
if (MASTER_ID == SCRIPT_ID && period.asDays() >= 1) {
if (!foundHeader) {
GM_setValue(LAST_EVENT_TIMESTAMP_KEY, moment().valueOf()); // Fix a nasty edge case I found
}
console.log("24 hours passed, automatic reload triggered");
location.reload();
}
};
const ticker = () => {
updateDisplay();
console.log("Transmitting update command to all slaves");
RADIO.runSlaves();
};
updateDisplay();
const timer = new moment.duration(30000).timer({
loop: true,
start: true,
}, ticker);
$(window).on('unload', () => {
if (REGISTRY.size > 1) { // Only if we're not alone
if (MASTER_ID == SCRIPT_ID) { // If we're the master and we're going away
REGISTRY.delete(SCRIPT_ID); // So we can't pick ourselves
// Send out an event with a fake source, to indicate to all slaved pages
// that they need to reset their master id
RADIO.switchMaster(REGISTRY.values().next().value);
}
else { // The master instance needs to remove us from the registry
RADIO.unloadSelf();
}
}
});
RADIO.addEventListener('message', msgevt => {
const msgobj = msgevt.data;
if (typeof msgobj != 'object') {
return;
}
const msgSrc = msgobj.source;
const msgEvt = msgobj.event;
const msgAll = msgobj.known;
console.log(`${msgSrc} >> ${msgEvt}`);
switch (msgEvt) {
case RADIO.INITIAL_PING:
if (MASTER_ID == SCRIPT_ID) {
console.log("Master instance pinged by new instance, registering");
REGISTRY.add(msgSrc);
console.dir(REGISTRY);
console.log("Slaving new instance to self");
RADIO.slaveToMe();
ticker();
}
else {
console.log("Pinged by new instance, ignored (not the master)");
}
break;
case RADIO.SET_SLAVE:
console.log("Return ping received, disabling local timer and slaving to master");
timer.stop();
MASTER_ID = msgSrc;
break;
case RADIO.NEW_MASTER: // The established master page unloaded, switch to the new one
console.log("The king is dead... long live the king.");
REGISTRY.delete(MASTER_ID); // Remove the now-dead page (if it was in there)
MASTER_ID = msgSrc;
REGISTRY.add(MASTER_ID);
if (MASTER_ID == SCRIPT_ID) { // Are WE the new master?
console.log("The prince has risen to power now!");
timer.start(); // If so, start our timer
REGISTRY.addAll(msgAll); // Load the registry details
ticker(); // Update everybody
}
console.dir(REGISTRY);
break;
case RADIO.INSTANCE_GONE:
REGISTRY.delete(msgSrc);
if (MASTER_ID == SCRIPT_ID) {
console.log("Instance terminated, removed from registry");
}
break;
case RADIO.TICK:
console.log("Received tick instruction, updating display");
updateDisplay();
break;
default:
console.error('Received unknown broadcast, this is probably a Bad Thing!');
break;
}
});
console.log("Sending inquisitive ping");
RADIO.initialise();
})(window, window.jQuery || window.$ || jQuery || $, moment);
// END SCRIPT \\
| nt.js library').css('border-bottom', '2px dotted red');
return;
}
const lastEvent = () => moment(GM_getValue( | conditional_block |
hverse-encounter-helper.user.js | /* eslint-disable max-len */
// ==UserScript==
// @name HentaiVerse Encounter Unclicker
// @namespace PrincessRTFM
// @version 3.2.1
// @description Massively improves the useability/interface of the HentaiVerse random encounters system; tracks the time since the last event (and what it was), automatically opens random encounters, synchronises display updates, safe to open in multiple tabs (and will update all tabs accordingly)
// @author Lilith
// @match https://e-hentai.org/*
// @updateURL https://gh.princessrtfm.com/js/monkey/hverse-encounter-helper.user.js
// @grant GM.openInTab
// @grant GM.notification
// @grant GM_addStyle
// @grant GM_getValue
// @grant GM_setValue
// @grant GM_addValueChangeListener
// @require https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js
// @require https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.22.2/moment.min.js
// @require http://princessrtfm.com/js/lib/moment-timer.js
// ==/UserScript==
/*
CHANGELOG:
v1.0.0 - initial release, just removes onclick onhandler
v1.1.0 - added timer since page opened
v1.2.0 - auto-opens in background tab
v1.3.0 - updates page title with timer, opens in foreground tab
v1.3.1 - page title update timer capitalises the first character if it's a letter
v1.3.2 - page title update timer includes the suffix 'ago'
v1.3.3 - fixed the tab opening, actually foregrounds now
v1.4.0 - timer is always injected, an event pane will be created saying that nothing has happened if no event was detected
v1.5.0 - implemented persistent tracking of the time of the last event
v1.6.0 - implemented cross-tab communication to handle events in other tabs, removed the popup notification
v1.7.0 - now tracks the last event text as well as time, so the persistent tracking display makes more sense
v1.7.1 - fixed the bug where opening a page with no event would smash the persistent storage of the event text
v1.7.2 - properly rewrote the "dawn of a new day" header to include the timer
v1.8.0 - title text now includes a short description of the last event detected, updating cross-tab
v1.9.0 - reverted to background tabs for the automatic link opening - the rest has changed enough that foregrounding is too annoying; if `#game` is added to URL, opens in current tab
v1.10.0 - shorten the time-since label in the title
v1.11.0 - cleans up the eventpane contents from "dawn of a new day" events
v2.0.0 - now operates on a master/slave system with BroadcastChannel messages to keep everything synchronised; foundation built for slave displays to not calc updates
v2.0.1 - no longer operates on the hentaiverse pages, BroadcastChannels are not cross-origin so each domain gets one master instance
v3.0.0 - un-stupified the backend (organic code growth is bad, kids)
v3.1.0 - added an "enter the hentaiverse" link on the event pane if there isn't one already
v3.1.1 - fixed a typo, cleaned the file, and bumped the version (forgot to set to 3.1.0)
v3.2.0 - added a timer to reload the page (master page only) after 24 hours, for automated new day xp collection
v3.2.1 - fixed a nasty edge case bug causing an infinite reload loop
PLANNED:
[MINOR] Make the master page post a notification (via GM.notification) when the timer runs out
[MAJOR] Use AJAX to get the news page and update the eventpane with the new content when the timer runs out
*/
/* eslint-enable max-len */
/* global GM_addValueChangeListener, jQuery, $, moment */
// SCRIPT INITIALISATION BEGINS \\
const SCRIPT_NAME = `${GM_info.script.name} V${GM_info.script.version || '???'}`;
const EVTPANE_CSS = [
"width: 720px;",
"height: auto;",
"margin: 5px auto 0px;",
"background: rgb(242, 239, 223);",
"border: 1px solid rgb(92, 13, 18);",
"padding: 3px;",
"font-size: 9pt;",
"text-align: center !important;",
];
const LAST_EVENT_TIMESTAMP_KEY = "lastEventTime";
const LAST_EVENT_NAME_KEY = "lastEventName";
const AUTO_OPEN_IN_BACKGROUND = true;
const PAGE_TITLE = `[$PERIOD.SHORT$ $EVENT.SHORT$] $STATUS$ E-Hentai`;
const HEADER_TEXT = `You $EVENT$ $PERIOD$ ago!`;
const EVENT_CHECKS = {
NEW_DAY: /dawn.+?new\s+day/ui,
RANDOM_FIGHT: /encountered\s+a\s+monster/ui,
};
const EVENT_LABELS = {
NEW_DAY: "woke to a new day",
RANDOM_FIGHT: "encountered a monster",
NO_EVENT: "have been bored since",
};
const EVENT_TITLES = {
NEW_DAY: "🌞",
RANDOM_FIGHT: "💥",
NO_EVENT: "❌",
};
const BUG_CHARS = Object.defineProperty([
'💀',
'💣',
'💔',
'💢',
'💥',
'❌',
'🛑',
'❗',
'🐛',
'🦟',
'🦗',
'🐜',
'🐝',
], 'toString', {
value() {
let s = '';
const bits = Array.from(thi | moment) => {
// eslint-disable-next-line no-extend-native
Set.prototype.addAll = Set.prototype.addAll || function addAll(iterable) {
Array.from(iterable).forEach(e => this.add(e));
};
const genid = () => ([1e7] + 1e3 + 4e3 + 8e3 + 1e11)
.repeat(2)
.replace(
/[018]/gu,
c => (c ^ crypto.getRandomValues(new Uint8Array(1))[0] & 15 >> c / 4).toString(16)
);
const SCRIPT_ID = genid();
const REGISTRY = new Set([SCRIPT_ID]);
const RADIO = new BroadcastChannel(SCRIPT_NAME);
const broadcast = (message, disguise) => RADIO.postMessage({
source: disguise || SCRIPT_ID,
event: message,
known: Array.from(REGISTRY.values()),
}); // So, you can lie about the source, for good reason! ...well, not GOOD reason.
RADIO.INITIAL_PING = 'PING';
RADIO.SET_SLAVE = 'SYNC';
RADIO.NEW_MASTER = 'EXCH';
RADIO.INSTANCE_GONE = 'GONE';
RADIO.TICK = 'EXEC';
RADIO.initialise = () => {
console.log(`${SCRIPT_ID} << ${RADIO.INITIAL_PING}`);
broadcast(RADIO.INITIAL_PING);
};
RADIO.slaveToMe = () => {
console.log(`${SCRIPT_ID} << ${RADIO.SET_SLAVE}`);
broadcast(RADIO.SET_SLAVE);
};
RADIO.switchMaster = to => {
console.log(`${SCRIPT_ID} << ${RADIO.NEW_MASTER} // ${to}`);
broadcast(RADIO.NEW_MASTER, to);
};
RADIO.unloadSelf = () => {
console.log(`${SCRIPT_ID} << ${RADIO.INSTANCE_GONE}`);
broadcast(RADIO.INSTANCE_GONE);
};
RADIO.runSlaves = () => {
console.log(`${SCRIPT_ID} << ${RADIO.TICK}`);
broadcast(RADIO.TICK);
};
let MASTER_ID = SCRIPT_ID;
let eventPane = $('#eventpane');
let header;
if (eventPane.length) {
eventPane.css('height', 'auto');
const eventLinks = eventPane.find('a[href]');
eventLinks.each((i, e) => {
const link = $(e);
e.addEventListener('click', () => true);
if (link.text().match(/\bfight\b/ui)) {
if (location.hash == "#debug") {
return;
}
if (location.hash == "#game") {
location.replace(e.href);
}
else {
GM.openInTab(e.href, AUTO_OPEN_IN_BACKGROUND);
}
link.hide();
}
});
const lines = eventPane.children('p, div');
header = lines.first();
}
else {
GM_addStyle(`#eventpane {\n${EVTPANE_CSS.map(e => `\t${e}`).join("\n")}\n}`);
eventPane = $('<div id="eventpane"></div>');
header = $('<div style="font-size:10pt; font-weight:bold; padding:0px; margin:12px auto 2px"></div>');
eventPane.append(header);
eventPane.append('<div style="margin-top: 10px;"></div>');
header.text(BUG_CHARS); // You shouldn't actually SEE this, so if you do...
const news = $('#newsinner');
const gallery = $('#nb');
if (news.length) {
news.first().prepend(eventPane);
}
else if (gallery.length) {
gallery.after(eventPane);
}
}
if (!eventPane
.find('a[href]')
.filter((i, e) => (e.href || '').toLowerCase().includes('hentaiverse.org'))
.length
) {
eventPane.append('<p><a href="https://hentaiverse.org/">Enter the HentaiVerse</a></p>');
}
$('#nb a[href]').filter((i, e) => (e.href || '').toLowerCase().includes('hentaiverse.org'))
.parents('#nb > *')
.hide();
if (!moment) {
header.attr('title', 'Failed to load moment.js library').css('border-bottom', '2px dotted red');
return;
}
const lastEvent = () => moment(GM_getValue(LAST_EVENT_TIMESTAMP_KEY, 0) || Date.now().valueOf());
const expandTemplate = (tmpl, durationObj, eventKey) => {
const durationStr = durationObj.humanize();
return tmpl
.replace(/\$PERIOD\$/gu, durationStr)
.replace(
/\$PERIOD.SHORT\$/gu,
durationStr
.replace(/^a\s+few\s+/ui, "0 ")
.replace(/^an?\s+/ui, "1 ")
.replace(/^(\d+)\s+([dhms]).*$/u, "$1$2")
)
.replace(/\$EVENT\$/gu, EVENT_LABELS[eventKey])
.replace(/\$EVENT.SHORT\$/gu, EVENT_TITLES[eventKey])
.replace(/\$STATUS\$/gu, MASTER_ID == SCRIPT_ID ? '👑' : '⛓')
.replace(/^(.)(.+)$/u, (match, g1, g2) => g1.toUpperCase() + g2);
};
let start = lastEvent();
let eventKey = GM_getValue(LAST_EVENT_NAME_KEY, 'NO_EVENT');
const headerText = header.text();
console.log(`Retrieved event header: ${headerText}`);
let foundHeader = false;
for (const [
key,
value,
] of Object.entries(EVENT_CHECKS)) {
if (headerText.match(value)) {
start = moment();
eventKey = key;
foundHeader = true;
break;
}
}
GM_setValue(LAST_EVENT_TIMESTAMP_KEY, start.valueOf());
GM_setValue(LAST_EVENT_NAME_KEY, eventKey);
GM_addValueChangeListener(LAST_EVENT_TIMESTAMP_KEY, (key, oval, nval, remote) => {
if (!remote) {
return;
} // Only care about changes in other tabs
start = lastEvent();
});
GM_addValueChangeListener(LAST_EVENT_NAME_KEY, (key, oval, nval, remote) => {
if (!remote) {
return;
} // Only care about changes in other tabs
eventKey = GM_getValue(LAST_EVENT_NAME_KEY, eventKey);
});
const updateDisplay = () => {
console.time("Display update time");
const now = moment();
const period = moment.duration(now.diff(start));
header.text(expandTemplate(HEADER_TEXT, period, eventKey));
document.title = expandTemplate(PAGE_TITLE, period, eventKey);
if (period.asMinutes() >= 30 && !$('#hentaiverse-unclicker-reload').length) {
header.after([
'<div id="hentaiverse-unclicker-reload" style="margin-top: 10px;">',
'A new encounter is ready! ',
'<a href="javascript:window.location.reload()">',
'Click to reload!',
'</a></div>',
].join(''));
}
else if (period.asMinutes() < 30) {
$('#hentaiverse-unclicker-reload').remove();
}
console.timeEnd("Display update time");
// The new automatic reload ONLY reloads the master page.
// If you have more than one page, this means the master page will cycle through all of your
// open pages in the order of oldest to newest. If you only have one, it'll obviously not do that.
if (MASTER_ID == SCRIPT_ID && period.asDays() >= 1) {
if (!foundHeader) {
GM_setValue(LAST_EVENT_TIMESTAMP_KEY, moment().valueOf()); // Fix a nasty edge case I found
}
console.log("24 hours passed, automatic reload triggered");
location.reload();
}
};
const ticker = () => {
updateDisplay();
console.log("Transmitting update command to all slaves");
RADIO.runSlaves();
};
updateDisplay();
const timer = new moment.duration(30000).timer({
loop: true,
start: true,
}, ticker);
$(window).on('unload', () => {
if (REGISTRY.size > 1) { // Only if we're not alone
if (MASTER_ID == SCRIPT_ID) { // If we're the master and we're going away
REGISTRY.delete(SCRIPT_ID); // So we can't pick ourselves
// Send out an event with a fake source, to indicate to all slaved pages
// that they need to reset their master id
RADIO.switchMaster(REGISTRY.values().next().value);
}
else { // The master instance needs to remove us from the registry
RADIO.unloadSelf();
}
}
});
RADIO.addEventListener('message', msgevt => {
const msgobj = msgevt.data;
if (typeof msgobj != 'object') {
return;
}
const msgSrc = msgobj.source;
const msgEvt = msgobj.event;
const msgAll = msgobj.known;
console.log(`${msgSrc} >> ${msgEvt}`);
switch (msgEvt) {
case RADIO.INITIAL_PING:
if (MASTER_ID == SCRIPT_ID) {
console.log("Master instance pinged by new instance, registering");
REGISTRY.add(msgSrc);
console.dir(REGISTRY);
console.log("Slaving new instance to self");
RADIO.slaveToMe();
ticker();
}
else {
console.log("Pinged by new instance, ignored (not the master)");
}
break;
case RADIO.SET_SLAVE:
console.log("Return ping received, disabling local timer and slaving to master");
timer.stop();
MASTER_ID = msgSrc;
break;
case RADIO.NEW_MASTER: // The established master page unloaded, switch to the new one
console.log("The king is dead... long live the king.");
REGISTRY.delete(MASTER_ID); // Remove the now-dead page (if it was in there)
MASTER_ID = msgSrc;
REGISTRY.add(MASTER_ID);
if (MASTER_ID == SCRIPT_ID) { // Are WE the new master?
console.log("The prince has risen to power now!");
timer.start(); // If so, start our timer
REGISTRY.addAll(msgAll); // Load the registry details
ticker(); // Update everybody
}
console.dir(REGISTRY);
break;
case RADIO.INSTANCE_GONE:
REGISTRY.delete(msgSrc);
if (MASTER_ID == SCRIPT_ID) {
console.log("Instance terminated, removed from registry");
}
break;
case RADIO.TICK:
console.log("Received tick instruction, updating display");
updateDisplay();
break;
default:
console.error('Received unknown broadcast, this is probably a Bad Thing!');
break;
}
});
console.log("Sending inquisitive ping");
RADIO.initialise();
})(window, window.jQuery || window.$ || jQuery || $, moment);
// END SCRIPT \\
| s);
for (let i = 0; i < 5; i++) {
s += bits.splice(Math.floor(Math.random() * bits.length), 1);
}
return s;
},
});
// SCRIPT CORE BEGINS \\
((window, $, | identifier_body |
experiment_types.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
analyticsv1alpha2 "github.com/iter8-tools/iter8-istio/pkg/analytics/api/v1alpha2"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Experiment contains the sections for --
// defining an experiment,
// showing experiment status,
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:categories=all,iter8
// +kubebuilder:printcolumn:name="type",type="string",JSONPath=".status.experimentType",description="Type of experiment",format="byte"
// +kubebuilder:printcolumn:name="hosts",type="string",JSONPath=".status.effectiveHosts",description="Names of candidates",format="byte"
// +kubebuilder:printcolumn:name="phase",type="string",JSONPath=".status.phase",description="Phase of the experiment",format="byte"
// +kubebuilder:printcolumn:name="winner found",type="boolean",JSONPath=".status.assessment.winner.winning_version_found",description="Winner identified",format="byte"
// +kubebuilder:printcolumn:name="current best",type="string",JSONPath=".status.assessment.winner.name",description="Current best version",format="byte"
// +kubebuilder:printcolumn:name="confidence",priority=1,type="string",JSONPath=".status.assessment.winner.probability_of_winning_for_best_version",description="Confidence current bets version will be the winner",format="float"
// +kubebuilder:printcolumn:name="status",type="string",JSONPath=".status.message",description="Detailed Status of the experiment",format="byte"
// +kubebuilder:printcolumn:name="baseline",priority=1,type="string",JSONPath=".spec.service.baseline",description="Name of baseline",format="byte"
// +kubebuilder:printcolumn:name="candidates",priority=1,type="string",JSONPath=".spec.service.candidates",description="Names of candidates",format="byte"
type Experiment struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ExperimentSpec `json:"spec"`
// +optional
Status ExperimentStatus `json:"status,omitempty"`
}
// ExperimentList contains a list of Experiment
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ExperimentList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Experiment `json:"items"`
}
// ExperimentSpec defines the desired state of Experiment
type ExperimentSpec struct {
// Service is a reference to the service componenets that this experiment is targeting at
Service `json:"service"`
| // Noted that at most one reward metric is allowed
// If more than one reward criterion is included, the first would be used while others would be omitted
// +optional
Criteria []Criterion `json:"criteria,omitempty"`
// TrafficControl provides instructions on traffic management for an experiment
// +optional
TrafficControl *TrafficControl `json:"trafficControl,omitempty"`
// Endpoint of reaching analytics service
// default is http://iter8-analytics:8080
// +optional
AnalyticsEndpoint *string `json:"analyticsEndpoint,omitempty"`
// Duration specifies how often/many times the expriment should re-evaluate the assessment
// +optional
Duration *Duration `json:"duration,omitempty"`
// Cleanup indicates whether routing rules and deployment receiving no traffic should be deleted at the end of experiment
// +optional
Cleanup *bool `json:"cleanup,omitempty"`
// The metrics used in the experiment
// +optional
Metrics *Metrics `json:"metrics,omitempty"`
// User actions to override the current status of the experiment
// +optional
ManualOverride *ManualOverride `json:"manualOverride,omitempty"`
// Networking describes how traffic network should be configured for the experiment
// +optional
Networking *Networking `json:"networking,omitempty"`
}
// Service is a reference to the service that this experiment is targeting at
type Service struct {
// defines the object reference to the service
*corev1.ObjectReference `json:",inline"`
// Name of the baseline deployment
Baseline string `json:"baseline"`
// List of names of candidate deployments
Candidates []string `json:"candidates"`
// Port number exposed by internal services
Port *int32 `json:"port,omitempty"`
}
// Host holds the name of host and gateway associated with it
type Host struct {
// Name of the Host
Name string `json:"name"`
// The gateway associated with the host
Gateway string `json:"gateway"`
}
// Criterion defines the criterion for assessing a target
type Criterion struct {
// Name of metric used in the assessment
Metric string `json:"metric"`
// Threshold specifies the numerical value for a success criterion
// Metric value above threhsold violates the criterion
// +optional
Threshold *Threshold `json:"threshold,omitempty"`
// IsReward indicates whether the metric is a reward metric or not
// +optional
IsReward *bool `json:"isReward,omitempty"`
}
// Threshold defines the value and type of a criterion threshold
type Threshold struct {
// Type of threshold
// relative: value of threshold specifies the relative amount of changes
// absolute: value of threshold indicates an absolute value
//+kubebuilder:validation:Enum={relative,absolute}
Type string `json:"type"`
// Value of threshold
Value float32 `json:"value"`
// Once a target metric violates this threshold, traffic to the target should be cutoff or not
// +optional
CutoffTrafficOnViolation *bool `json:"cutoffTrafficOnViolation,omitempty"`
}
// Duration specifies how often/many times the expriment should re-evaluate the assessment
type Duration struct {
// Interval specifies duration between iterations
// default is 30s
// +optional
Interval *string `json:"interval,omitempty"`
// MaxIterations indicates the amount of iteration
// default is 100
// +optional
MaxIterations *int32 `json:"maxIterations,omitempty"`
}
// TrafficControl specifies constrains on traffic and stratgy used to update the traffic
type TrafficControl struct {
// Strategy used to shift traffic
// default is progressive
// +kubebuilder:validation:Enum={progressive, top_2, uniform}
// +optional
Strategy *StrategyType `json:"strategy,omitempty"`
// OnTermination determines traffic split status at the end of experiment
// +kubebuilder:validation:Enum={to_winner,to_baseline,keep_last}
// +optional
OnTermination *OnTerminationType `json:"onTermination,omitempty"`
// Only requests fulfill the match section would be used in experiment
// Istio matching rules are used
// +optional
Match *Match `json:"match,omitempty"`
// Percentage specifies the amount of traffic to service that would be used in experiment
// default is 100
// +optional
Percentage *int32 `json:"percentage,omitempty"`
// MaxIncrement is the upperlimit of traffic increment for a target in one iteration
// default is 2
// +optional
MaxIncrement *int32 `json:"maxIncrement,omitempty"`
// RouterID refers to the id of router used to handle traffic for the experiment
// If it's not specified, the first entry of effictive host will be used as the id
// +optional
RouterID *string `json:"routerID,omitempty"`
}
// Match contains matching criteria for requests
type Match struct {
// Matching criteria for HTTP requests
// +optional
HTTP []*HTTPMatchRequest `json:"http,omitempty"`
}
// ManualOverride defines actions that the user can perform to an experiment
type ManualOverride struct {
// Action to perform
//+kubebuilder:validation:Enum={pause,resume,terminate}
Action ActionType `json:"action"`
// Traffic split status specification
// Applied to action terminate only
// example:
// reviews-v2:80
// reviews-v3:20
// +optional
TrafficSplit map[string]int32 `json:"trafficSplit,omitempty"`
}
// Networking describes how traffic network should be configured for the experiment
type Networking struct {
// id of router
// +optional
ID *string `json:"id,omitempty"`
// List of hosts used to receive external traffic
// +optional
Hosts []Host `json:"hosts,omitempty"`
}
// Metrics contains definitions for metrics used in the experiment
type Metrics struct {
// List of counter metrics definiton
// +optional
CounterMetrics []CounterMetric `json:"counter_metrics,omitempty"`
// List of ratio metrics definiton
// +optional
RatioMetrics []RatioMetric `json:"ratio_metrics,omitempty"`
}
// CounterMetric is the definition of Counter Metric
type CounterMetric struct {
// Name of metric
Name string `json:"name" yaml:"name"`
// Query template of this metric
QueryTemplate string `json:"query_template" yaml:"query_template"`
// Preferred direction of the metric value
// +optional
PreferredDirection *string `json:"preferred_direction,omitempty" yaml:"preferred_direction,omitempty"`
// Unit of the metric value
// +optional
Unit *string `json:"unit,omitempty" yaml:"unit,omitempty"`
}
// RatioMetric is the definiton of Ratio Metric
type RatioMetric struct {
// name of metric
Name string `json:"name" yaml:"name"`
// Counter metric used in numerator
Numerator string `json:"numerator" yaml:"numerator"`
// Counter metric used in denominator
Denominator string `json:"denominator" yaml:"denominator"`
// Boolean flag indicating if the value of this metric is always in the range 0 to 1
// +optional
ZeroToOne *bool `json:"zero_to_one,omitempty" yaml:"zero_to_one,omitempty"`
// Preferred direction of the metric value
// +optional
PreferredDirection *string `json:"preferred_direction,omitempty" yaml:"preferred_direction,omitempty"`
}
// ExperimentStatus defines the observed state of Experiment
type ExperimentStatus struct {
// List of conditions
// +optional
Conditions Conditions `json:"conditions,omitempty"`
// InitTimestamp is the timestamp when the experiment is initialized
// +optional
InitTimestamp *metav1.Time `json:"initTimestamp,omitempty"`
// StartTimestamp is the timestamp when the experiment starts
// +optional
StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"`
// EndTimestamp is the timestamp when experiment completes
// +optional
EndTimestamp *metav1.Time `json:"endTimestamp,omitempty"`
// LastUpdateTime is the last time iteration has been updated
// +optional
LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
// CurrentIteration is the current iteration number
// +optional
CurrentIteration *int32 `json:"currentIteration,omitempty"`
// Assessment returned by the last analyis
// +optional
Assessment *Assessment `json:"assessment,omitempty"`
// Phase marks the Phase the experiment is at
// +optional
Phase PhaseType `json:"phase,omitempty"`
// Message specifies message to show in the kubectl printer
// +optional
Message *string `json:"message,omitempty"`
// AnalysisState is the last recorded analysis state
// +optional
AnalysisState *runtime.RawExtension `json:"analysisState,omitempty"`
// ExperimentType is type of experiment
ExperimentType string `json:"experimentType,omitempty"`
// EffectiveHosts is computed host for experiment.
// List of spec.Service.Name and spec.Service.Hosts[0].name
EffectiveHosts []string `json:"effectiveHosts,omitempty"`
}
// Conditions is a list of ExperimentConditions
type Conditions []*ExperimentCondition
// ExperimentCondition describes a condition of an experiment
type ExperimentCondition struct {
// Type of the condition
Type ExperimentConditionType `json:"type"`
// Status of the condition
Status corev1.ConditionStatus `json:"status"`
// The time when this condition is last updated
// +optional
LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason for the last update
// +optional
Reason *string `json:"reason,omitempty"`
// Detailed explanation on the update
// +optional
Message *string `json:"message,omitempty"`
}
// Assessment details for the each target
type Assessment struct {
// Assessment details of baseline
Baseline VersionAssessment `json:"baseline"`
// Assessment details of each candidate
Candidates []VersionAssessment `json:"candidates"`
// Assessment for winner target if exists
Winner *WinnerAssessment `json:"winner,omitempty"`
}
// WinnerAssessment shows assessment details for winner of an experiment
type WinnerAssessment struct {
// name of winner version
// +optional
Name *string `json:"name,omitempty"`
// Assessment details from analytics
*analyticsv1alpha2.WinnerAssessment `json:",inline,omitempty"`
}
// VersionAssessment contains assessment details for each version
type VersionAssessment struct {
// name of version
Name string `json:"name"`
// Weight of traffic
Weight int32 `json:"weight"`
// Assessment details from analytics
analyticsv1alpha2.VersionAssessment `json:",inline"`
// A flag indicates whether traffic to this target should be cutoff
// +optional
Rollback bool `json:"rollback,omitempty"`
} | // Criteria contains a list of Criterion for assessing the target service | random_line_split |
Blockchain.go | package BLC
import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"fmt"
"github.com/boltdb/bolt"
"log"
"math/big"
"os"
"strconv"
)
type Blockchain struct {
Tip []byte // BlockHash of top Block
DB *bolt.DB // A pointer to the database
}
func CreateBlockchainWithGenesisBlock(address string, nodeID string) |
// Convert command variables to Transaction Objects
func (blockchain *Blockchain) hanldeTransations(from []string, to []string, amount []string, nodeId string) []*Transaction {
var txs []*Transaction
utxoSet := &UTXOSet{blockchain}
for i := 0; i < len(from); i++ {
amountInt, _ := strconv.Atoi(amount[i])
tx := NewSimpleTransation(from[i], to[i], int64(amountInt), utxoSet, txs, nodeId)
txs = append(txs, tx)
}
return txs
}
// Package transactions and mine a new Block
func (blockchain *Blockchain) MineNewBlock(originalTxs []*Transaction) *Block {
// Reward of mining a block
coinBaseTransaction := NewRewardTransacion()
txs := []*Transaction{coinBaseTransaction}
txs = append(txs, originalTxs...)
// Verify transactions
for _, tx := range txs {
if !tx.IsCoinBaseTransaction() {
if blockchain.VerifityTransaction(tx, txs) == false {
log.Panic("Verify transaction failed...")
}
}
}
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
// Get the latest block
var block Block
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
hash := b.Get([]byte("l"))
blockBytes := b.Get(hash)
gobDecode(blockBytes, &block)
}
return nil
})
if err != nil {
log.Panic(err)
}
// Mine a new block
newBlock := NewBlock(txs, block.Height+1, block.BlockHash)
return newBlock
}
// Save a block to the database
func (blockchain *Blockchain) SaveNewBlockToBlockchain(newBlock *Block) {
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
b.Put(newBlock.BlockHash, gobEncode(newBlock))
b.Put([]byte("l"), newBlock.BlockHash)
blockchain.Tip = newBlock.BlockHash
}
return nil
})
if err != nil {
log.Panic(err)
}
}
// Get Unspent transaction outputs(UTXOs)
func (blc *Blockchain) getUTXOsByAddress(address string, txs []*Transaction) []*UTXO {
var utxos []*UTXO
spentTxOutputMap := make(map[string][]int)
// calculate UTXOs by querying txs
for i := len(txs) - 1; i >= 0; i-- {
utxos = caculate(txs[i], address, spentTxOutputMap, utxos)
}
// calculate UTXOs by querying Blocks
it := blc.Iterator()
for {
block := it.Next()
for i := len(block.Transactions) - 1; i >= 0; i-- {
utxos = caculate(block.Transactions[i], address, spentTxOutputMap, utxos)
}
hashInt := new(big.Int)
hashInt.SetBytes(block.PrevBlockHash)
// If current block is genesis block, exit loop
if big.NewInt(0).Cmp(hashInt) == 0 {
break
}
}
return utxos
}
// calculate utxos
func caculate(tx *Transaction, address string, spentOutputMap map[string][]int, utxos []*UTXO) []*UTXO {
// collect all inputs into spentOutputMap
if !tx.IsCoinBaseTransaction() {
for _, input := range tx.Inputs {
full_payload := Base58Decode([]byte(address))
pubKeyHash := full_payload[1 : len(full_payload)-addressCheckSumLen]
if input.UnlockWithAddress(pubKeyHash) {
transactionHash := hex.EncodeToString(input.TransactionHash)
spentOutputMap[transactionHash] = append(spentOutputMap[transactionHash], input.IndexOfOutputs)
}
}
}
// Tranverse all outputs, unSpentUTXOs = all outputs - spent outputs
outputsLoop:
for index, output := range tx.Outputs {
if output.UnlockWithAddress(address) {
if len(spentOutputMap) != 0 {
var isSpent bool
for transactionHash, indexArray := range spentOutputMap { //143d,[]int{1}
//遍历 记录已经花费的下标的数组
for _, i := range indexArray {
if i == index && hex.EncodeToString(tx.TransactionHash) == transactionHash {
isSpent = true //标记当前的output是已经花费
continue outputsLoop
}
}
}
if !isSpent {
utxo := &UTXO{tx.TransactionHash, index, output}
utxos = append(utxos, utxo)
}
} else {
utxo := &UTXO{tx.TransactionHash, index, output}
utxos = append(utxos, utxo)
}
}
}
return utxos
}
// Find UTXOs which can be regarded as inputs in this transaction
func (bc *Blockchain) FindSpendableUTXOs(from string, amount int64, txs []*Transaction) (int64, map[string][]int) {
var total int64
spendableMap := make(map[string][]int)
utxos := bc.getUTXOsByAddress(from, txs)
for _, utxo := range utxos {
total += utxo.Output.Value
transactionHash := hex.EncodeToString(utxo.TransactionHash)
spendableMap[transactionHash] = append(spendableMap[transactionHash], utxo.Index)
if total >= amount {
break
}
}
if total < amount {
fmt.Printf("%s,余额不足,无法转账。。", from)
os.Exit(1)
}
return total, spendableMap
}
func (blc *Blockchain) Printchain() {
blockIterator := blc.Iterator()
for {
block := blockIterator.Next()
fmt.Println(block)
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(&hashInt) == 0 {
break
}
}
}
func (blockchain *Blockchain) Iterator() *BlockchainIterator {
return &BlockchainIterator{blockchain.Tip, blockchain.DB}
}
func DBExists(DBName string) bool {
if _, err := os.Stat(DBName); os.IsNotExist(err) {
return false
}
return true
}
func BlockchainObject(nodeID string) *Blockchain {
DBName := fmt.Sprintf(DBName, nodeID)
if DBExists(DBName) {
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
var blockchain *Blockchain
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
hash := b.Get([]byte("l"))
blockchain = &Blockchain{hash, db}
}
return nil
})
if err != nil {
log.Panic(err)
}
return blockchain
} else {
fmt.Println("数据库不存在,无法获取BlockChain对象。。。")
return nil
}
}
func (bc *Blockchain) SignTransaction(tx *Transaction, privateKey ecdsa.PrivateKey, txs []*Transaction) {
if tx.IsCoinBaseTransaction() {
return
}
prevTransactionMap := make(map[string]*Transaction)
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
prevTransactionMap[transactionHash] = bc.FindTransactionByTransactionHash(input.TransactionHash, txs)
}
tx.Sign(privateKey, prevTransactionMap)
}
func (bc *Blockchain) FindTransactionByTransactionHash(transactionHash []byte, txs []*Transaction) *Transaction {
for _, tx := range txs {
if bytes.Compare(tx.TransactionHash, transactionHash) == 0 {
return tx
}
}
iterator := bc.Iterator()
for {
block := iterator.Next()
for _, tx := range block.Transactions {
if bytes.Compare(tx.TransactionHash, transactionHash) == 0 {
return tx
}
}
bigInt := new(big.Int)
bigInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(bigInt) == 0 {
break
}
}
return &Transaction{}
}
/*
验证交易的数字签名
*/
func (bc *Blockchain) VerifityTransaction(tx *Transaction, txs []*Transaction) bool {
//要想验证数字签名:私钥+数据 (tx的副本+之前的交易)
//2.获取该tx中的Input,引用之前的transaction中的未花费的output
prevTxs := make(map[string]*Transaction)
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
prevTxs[transactionHash] = bc.FindTransactionByTransactionHash(input.TransactionHash, txs)
}
if len(prevTxs) == 0 {
fmt.Println("没找到对应交易")
} else {
//fmt.Println("preTxs___________________________________")
//fmt.Println(prevTxs)
}
//验证
return tx.VerifyTransaction(prevTxs)
//return true
}
func (bc *Blockchain) GetAllUTXOs() map[string]*UTXOArray {
iterator := bc.Iterator()
utxoMap := make(map[string]*UTXOArray)
//已花费的input map
inputMap := make(map[string][]*Input)
for {
block := iterator.Next()
for i := len(block.Transactions) - 1; i >= 0; i-- {
// collect inputs
tx := block.Transactions[i]
transactionHash := hex.EncodeToString(tx.TransactionHash)
utxoArray := &UTXOArray{[]*UTXO{}}
if !tx.IsCoinBaseTransaction() {
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
inputMap[transactionHash] = append(inputMap[transactionHash], input)
}
}
//根据inputMap,遍历outputs 找出 UTXO
outputLoop:
for index, output := range tx.Outputs {
if len(inputMap) > 0 {
//isSpent := false
inputs := inputMap[transactionHash] //如果inputs 存在, 则对应的交易里面某笔output肯定已经被消费
for _, input := range inputs {
//判断input对应的是否当期的output
if index == input.IndexOfOutputs && input.UnlockWithAddress(output.PubKeyHash) {
//此笔output已被消费
//isSpent = true
continue outputLoop
}
}
//if isSpent == false {
//outputs 加进utxoMap
utxo := &UTXO{tx.TransactionHash, index, output}
utxoArray.UTXOs = append(utxoArray.UTXOs, utxo)
//}
} else {
//outputs 加进utxoMap
utxo := &UTXO{tx.TransactionHash, index, output}
utxoArray.UTXOs = append(utxoArray.UTXOs, utxo)
}
}
if len(utxoArray.UTXOs) > 0 {
utxoMap[transactionHash] = utxoArray
}
}
//退出条件
hashBigInt := new(big.Int)
hashBigInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(hashBigInt) == 0 {
break
}
}
return utxoMap
}
func (bc *Blockchain) GetHeight() int64 {
return bc.Iterator().Next().Height
}
func (bc *Blockchain) getAllBlocksHash() [][]byte {
iterator := bc.Iterator()
var blocksHashes [][]byte
for {
block := iterator.Next()
blocksHashes = append(blocksHashes, block.BlockHash)
bigInt := new(big.Int)
bigInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(bigInt) == 0 {
break
}
}
return blocksHashes
}
func (bc *Blockchain) GetBlockByHash(hash []byte) *Block {
var block Block
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
blockBytes := b.Get(hash)
gobDecode(blockBytes, &block)
}
return nil
})
if err != nil {
log.Panic(err)
}
return &block
}
func (bc *Blockchain) AddBlockToChain(block *Block) {
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
blockBytes := b.Get(block.BlockHash)
if blockBytes != nil {
return nil
}
err := b.Put(block.BlockHash, gobEncode(block))
if err != nil {
log.Panic(err)
}
lastBlockHash := b.Get([]byte("l"))
lastBlockBytes := b.Get(lastBlockHash)
var lastBlock Block
gobDecode(lastBlockBytes, &lastBlock)
if lastBlock.Height < block.Height {
b.Put([]byte("l"), block.BlockHash)
bc.Tip = block.BlockHash
}
}
return nil
})
if err != nil {
log.Panic(err)
}
}
| {
DBName := fmt.Sprintf(DBName, nodeID)
if DBExists(DBName) {
fmt.Println("Genesis block already exist!")
os.Exit(1)
}
fmt.Println("Creating genesis block....")
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(BlockBucketName))
if err != nil {
log.Panic(err)
}
if b != nil {
// Create the genesis block with a coinbase transaction
txCoinbase := NewCoinbaseTransacion(address)
genesisBlock := CreateGenesisBlock([]*Transaction{txCoinbase})
err := b.Put(genesisBlock.BlockHash, gobEncode(genesisBlock))
if err != nil {
log.Panic(err)
}
// Update Tip of blockchain
err = b.Put([]byte("l"), genesisBlock.BlockHash)
if err != nil {
log.Panic(err)
}
}
return nil
})
if err != nil {
log.Panic(err)
}
} | identifier_body |
Blockchain.go | package BLC
import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"fmt"
"github.com/boltdb/bolt"
"log"
"math/big"
"os"
"strconv"
)
type Blockchain struct {
Tip []byte // BlockHash of top Block
DB *bolt.DB // A pointer to the database
}
func CreateBlockchainWithGenesisBlock(address string, nodeID string) {
DBName := fmt.Sprintf(DBName, nodeID)
if DBExists(DBName) {
fmt.Println("Genesis block already exist!")
os.Exit(1)
}
fmt.Println("Creating genesis block....")
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(BlockBucketName))
if err != nil {
log.Panic(err)
}
if b != nil {
// Create the genesis block with a coinbase transaction
txCoinbase := NewCoinbaseTransacion(address)
genesisBlock := CreateGenesisBlock([]*Transaction{txCoinbase})
err := b.Put(genesisBlock.BlockHash, gobEncode(genesisBlock))
if err != nil {
log.Panic(err)
}
// Update Tip of blockchain
err = b.Put([]byte("l"), genesisBlock.BlockHash)
if err != nil {
log.Panic(err)
}
}
return nil
})
if err != nil {
log.Panic(err)
}
}
// Convert command variables to Transaction Objects
func (blockchain *Blockchain) hanldeTransations(from []string, to []string, amount []string, nodeId string) []*Transaction {
var txs []*Transaction
utxoSet := &UTXOSet{blockchain}
for i := 0; i < len(from); i++ {
amountInt, _ := strconv.Atoi(amount[i])
tx := NewSimpleTransation(from[i], to[i], int64(amountInt), utxoSet, txs, nodeId)
txs = append(txs, tx)
}
return txs
}
// Package transactions and mine a new Block
func (blockchain *Blockchain) MineNewBlock(originalTxs []*Transaction) *Block {
// Reward of mining a block
coinBaseTransaction := NewRewardTransacion()
txs := []*Transaction{coinBaseTransaction}
txs = append(txs, originalTxs...)
// Verify transactions
for _, tx := range txs {
if !tx.IsCoinBaseTransaction() {
if blockchain.VerifityTransaction(tx, txs) == false {
log.Panic("Verify transaction failed...")
}
}
}
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
// Get the latest block
var block Block
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
hash := b.Get([]byte("l"))
blockBytes := b.Get(hash)
gobDecode(blockBytes, &block)
}
return nil
})
if err != nil {
log.Panic(err)
}
// Mine a new block
newBlock := NewBlock(txs, block.Height+1, block.BlockHash)
return newBlock
}
// Save a block to the database
func (blockchain *Blockchain) SaveNewBlockToBlockchain(newBlock *Block) {
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
b.Put(newBlock.BlockHash, gobEncode(newBlock))
b.Put([]byte("l"), newBlock.BlockHash)
blockchain.Tip = newBlock.BlockHash
}
return nil
})
if err != nil {
log.Panic(err)
}
}
// Get Unspent transaction outputs(UTXOs)
func (blc *Blockchain) getUTXOsByAddress(address string, txs []*Transaction) []*UTXO {
var utxos []*UTXO
spentTxOutputMap := make(map[string][]int)
// calculate UTXOs by querying txs
for i := len(txs) - 1; i >= 0; i-- {
utxos = caculate(txs[i], address, spentTxOutputMap, utxos)
}
// calculate UTXOs by querying Blocks
it := blc.Iterator()
for {
block := it.Next()
for i := len(block.Transactions) - 1; i >= 0; i-- {
utxos = caculate(block.Transactions[i], address, spentTxOutputMap, utxos)
}
hashInt := new(big.Int)
hashInt.SetBytes(block.PrevBlockHash)
// If current block is genesis block, exit loop
if big.NewInt(0).Cmp(hashInt) == 0 {
break
}
}
return utxos
}
// calculate utxos
func caculate(tx *Transaction, address string, spentOutputMap map[string][]int, utxos []*UTXO) []*UTXO {
// collect all inputs into spentOutputMap
if !tx.IsCoinBaseTransaction() {
for _, input := range tx.Inputs {
full_payload := Base58Decode([]byte(address))
pubKeyHash := full_payload[1 : len(full_payload)-addressCheckSumLen]
if input.UnlockWithAddress(pubKeyHash) {
transactionHash := hex.EncodeToString(input.TransactionHash)
spentOutputMap[transactionHash] = append(spentOutputMap[transactionHash], input.IndexOfOutputs)
}
}
}
// Tranverse all outputs, unSpentUTXOs = all outputs - spent outputs
outputsLoop:
for index, output := range tx.Outputs {
if output.UnlockWithAddress(address) {
if len(spentOutputMap) != 0 {
var isSpent bool
for transactionHash, indexArray := range spentOutputMap { //143d,[]int{1}
//遍历 记录已经花费的下标的数组
for _, i := range indexArray {
if i == index && hex.EncodeToString(tx.TransactionHash) == transactionHash {
isSpent = true //标记当前的output是已经花费
continue outputsLoop
}
}
}
if !isSpent {
utxo := &UTXO{tx.TransactionHash, index, output}
utxos = append(utxos, utxo)
}
} else {
utxo := &UTXO{tx.TransactionHash, index, output}
utxos = append(utxos, utxo)
}
}
}
return utxos
}
// Find UTXOs which can be regarded as inputs in this transaction
func (bc *Blockchain) FindSpendableUTXOs(from string, amount int64, txs []*Transaction) (int64, map[string][]int) {
var total int64
spendableMap := make(map[string][]int)
utxos := bc.getUTXOsByAddress(from, txs)
for _, utxo := range utxos {
total += utxo.Output.Value
transactionHash := hex.EncodeToString(utxo.TransactionHash)
spendableMap[transactionHash] = append(spendableMap[transactionHash], utxo.Index)
if total >= amount {
break
}
}
if total < amount {
fmt.Printf("%s,余额不足,无法转账。。", from)
os.Exit(1)
}
return total, spendableMap
}
func (blc *Blockchain) Printchain() {
blockIterator := blc.Iterator()
for {
block := blockIterator.Next()
fmt.Println(block)
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(&hashInt) == 0 {
break
}
}
}
func (blockchain *Blockchain) Iterator() *BlockchainIterator {
return &BlockchainIterator{blockchain.Tip, blockchain.DB}
}
func DBExists(DBName string) bool {
if _, err := os.Stat(DBName); os.IsNotExist(err) {
return false
}
return true
}
func BlockchainObject(nodeID string) *Blockchain {
DBName := fmt.Sprintf(DBName, nodeID)
if DBExists(DBName) {
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
var blockchain *Blockchain
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
hash := b.Get([]byte("l"))
blockchain = &Blockchain{hash, db}
}
return nil
})
if err != nil {
log.Panic(err)
}
return blockchain
} else {
fmt.Println("数据库不存在,无法获取BlockChain对象。。。")
return nil
}
}
func (bc *Blockchain) SignTransaction(tx *Transaction, privateKey ecdsa.PrivateKey, txs []*Transaction) {
if tx.IsCoinBaseTransaction() {
return
}
prevTransactionMap := make(map[string]*Transaction)
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
prevTransactionMap[transactionHash] = bc.FindTransactionByTransactionHash(input.TransactionHash, txs)
}
tx.Sign(privateKey, prevTransactionMap)
}
func (bc *Blockchain) FindTransactionByTransactionHash(transactionHash []byte, txs []*Transaction) *Transaction {
for _, tx := range txs {
if bytes.Compare(tx.TransactionHash, transactionHash) == 0 {
return tx
}
}
iterator := bc.Iterator()
for {
block := iterator.Next()
for _, tx := range block.Transactions {
if bytes.Compare(tx.TransactionHash, transactionHash) == 0 {
return tx
}
}
bigInt := new(big.Int)
bigInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(bigInt) == 0 {
break
}
}
return &Transaction{}
}
/*
验证交易的数字签名
*/
func (bc *Blockchain) VerifityTransaction(tx *Transaction, txs []*Transaction) bool {
//要想验证数字签名:私钥+数据 (tx的副本+之前的交易)
//2.获取该tx中的Input,引用之前的transaction中的未花费的output
prevTxs := make(map[string]*Transaction)
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
prevTxs[transactionHash] = bc.FindTransactionByTransactionHash(input.TransactionHash, txs)
}
| fmt.Println("没找到对应交易")
} else {
//fmt.Println("preTxs___________________________________")
//fmt.Println(prevTxs)
}
//验证
return tx.VerifyTransaction(prevTxs)
//return true
}
func (bc *Blockchain) GetAllUTXOs() map[string]*UTXOArray {
iterator := bc.Iterator()
utxoMap := make(map[string]*UTXOArray)
//已花费的input map
inputMap := make(map[string][]*Input)
for {
block := iterator.Next()
for i := len(block.Transactions) - 1; i >= 0; i-- {
// collect inputs
tx := block.Transactions[i]
transactionHash := hex.EncodeToString(tx.TransactionHash)
utxoArray := &UTXOArray{[]*UTXO{}}
if !tx.IsCoinBaseTransaction() {
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
inputMap[transactionHash] = append(inputMap[transactionHash], input)
}
}
//根据inputMap,遍历outputs 找出 UTXO
outputLoop:
for index, output := range tx.Outputs {
if len(inputMap) > 0 {
//isSpent := false
inputs := inputMap[transactionHash] //如果inputs 存在, 则对应的交易里面某笔output肯定已经被消费
for _, input := range inputs {
//判断input对应的是否当期的output
if index == input.IndexOfOutputs && input.UnlockWithAddress(output.PubKeyHash) {
//此笔output已被消费
//isSpent = true
continue outputLoop
}
}
//if isSpent == false {
//outputs 加进utxoMap
utxo := &UTXO{tx.TransactionHash, index, output}
utxoArray.UTXOs = append(utxoArray.UTXOs, utxo)
//}
} else {
//outputs 加进utxoMap
utxo := &UTXO{tx.TransactionHash, index, output}
utxoArray.UTXOs = append(utxoArray.UTXOs, utxo)
}
}
if len(utxoArray.UTXOs) > 0 {
utxoMap[transactionHash] = utxoArray
}
}
//退出条件
hashBigInt := new(big.Int)
hashBigInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(hashBigInt) == 0 {
break
}
}
return utxoMap
}
func (bc *Blockchain) GetHeight() int64 {
return bc.Iterator().Next().Height
}
func (bc *Blockchain) getAllBlocksHash() [][]byte {
iterator := bc.Iterator()
var blocksHashes [][]byte
for {
block := iterator.Next()
blocksHashes = append(blocksHashes, block.BlockHash)
bigInt := new(big.Int)
bigInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(bigInt) == 0 {
break
}
}
return blocksHashes
}
func (bc *Blockchain) GetBlockByHash(hash []byte) *Block {
var block Block
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
blockBytes := b.Get(hash)
gobDecode(blockBytes, &block)
}
return nil
})
if err != nil {
log.Panic(err)
}
return &block
}
func (bc *Blockchain) AddBlockToChain(block *Block) {
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
blockBytes := b.Get(block.BlockHash)
if blockBytes != nil {
return nil
}
err := b.Put(block.BlockHash, gobEncode(block))
if err != nil {
log.Panic(err)
}
lastBlockHash := b.Get([]byte("l"))
lastBlockBytes := b.Get(lastBlockHash)
var lastBlock Block
gobDecode(lastBlockBytes, &lastBlock)
if lastBlock.Height < block.Height {
b.Put([]byte("l"), block.BlockHash)
bc.Tip = block.BlockHash
}
}
return nil
})
if err != nil {
log.Panic(err)
}
} | if len(prevTxs) == 0 { | random_line_split |
Blockchain.go | package BLC
import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"fmt"
"github.com/boltdb/bolt"
"log"
"math/big"
"os"
"strconv"
)
type Blockchain struct {
Tip []byte // BlockHash of top Block
DB *bolt.DB // A pointer to the database
}
func CreateBlockchainWithGenesisBlock(address string, nodeID string) {
DBName := fmt.Sprintf(DBName, nodeID)
if DBExists(DBName) {
fmt.Println("Genesis block already exist!")
os.Exit(1)
}
fmt.Println("Creating genesis block....")
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(BlockBucketName))
if err != nil {
log.Panic(err)
}
if b != nil {
// Create the genesis block with a coinbase transaction
txCoinbase := NewCoinbaseTransacion(address)
genesisBlock := CreateGenesisBlock([]*Transaction{txCoinbase})
err := b.Put(genesisBlock.BlockHash, gobEncode(genesisBlock))
if err != nil {
log.Panic(err)
}
// Update Tip of blockchain
err = b.Put([]byte("l"), genesisBlock.BlockHash)
if err != nil {
log.Panic(err)
}
}
return nil
})
if err != nil {
log.Panic(err)
}
}
// Convert command variables to Transaction Objects
func (blockchain *Blockchain) hanldeTransations(from []string, to []string, amount []string, nodeId string) []*Transaction {
var txs []*Transaction
utxoSet := &UTXOSet{blockchain}
for i := 0; i < len(from); i++ {
amountInt, _ := strconv.Atoi(amount[i])
tx := NewSimpleTransation(from[i], to[i], int64(amountInt), utxoSet, txs, nodeId)
txs = append(txs, tx)
}
return txs
}
// Package transactions and mine a new Block
func (blockchain *Blockchain) MineNewBlock(originalTxs []*Transaction) *Block {
// Reward of mining a block
coinBaseTransaction := NewRewardTransacion()
txs := []*Transaction{coinBaseTransaction}
txs = append(txs, originalTxs...)
// Verify transactions
for _, tx := range txs {
if !tx.IsCoinBaseTransaction() {
if blockchain.VerifityTransaction(tx, txs) == false {
log.Panic("Verify transaction failed...")
}
}
}
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
// Get the latest block
var block Block
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
hash := b.Get([]byte("l"))
blockBytes := b.Get(hash)
gobDecode(blockBytes, &block)
}
return nil
})
if err != nil {
log.Panic(err)
}
// Mine a new block
newBlock := NewBlock(txs, block.Height+1, block.BlockHash)
return newBlock
}
// Save a block to the database
func (blockchain *Blockchain) SaveNewBlockToBlockchain(newBlock *Block) {
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
b.Put(newBlock.BlockHash, gobEncode(newBlock))
b.Put([]byte("l"), newBlock.BlockHash)
blockchain.Tip = newBlock.BlockHash
}
return nil
})
if err != nil {
log.Panic(err)
}
}
// Get Unspent transaction outputs(UTXOs)
func (blc *Blockchain) getUTXOsByAddress(address string, txs []*Transaction) []*UTXO {
var utxos []*UTXO
spentTxOutputMap := make(map[string][]int)
// calculate UTXOs by querying txs
for i := len(txs) - 1; i >= 0; i-- {
utxos = caculate(txs[i], address, spentTxOutputMap, utxos)
}
// calculate UTXOs by querying Blocks
it := blc.Iterator()
for {
block := it.Next()
for i := len(block.Transactions) - 1; i >= 0; i-- {
utxos = caculate(block.Transactions[i], address, spentTxOutputMap, utxos)
}
hashInt := new(big.Int)
hashInt.SetBytes(block.PrevBlockHash)
// If current block is genesis block, exit loop
if big.NewInt(0).Cmp(hashInt) == 0 {
break
}
}
return utxos
}
// calculate utxos
func caculate(tx *Transaction, address string, spentOutputMap map[string][]int, utxos []*UTXO) []*UTXO {
// collect all inputs into spentOutputMap
if !tx.IsCoinBaseTransaction() {
for _, input := range tx.Inputs {
full_payload := Base58Decode([]byte(address))
pubKeyHash := full_payload[1 : len(full_payload)-addressCheckSumLen]
if input.UnlockWithAddress(pubKeyHash) {
transactionHash := hex.EncodeToString(input.TransactionHash)
spentOutputMap[transactionHash] = append(spentOutputMap[transactionHash], input.IndexOfOutputs)
}
}
}
// Tranverse all outputs, unSpentUTXOs = all outputs - spent outputs
outputsLoop:
for index, output := range tx.Outputs {
if output.UnlockWithAddress(address) {
if len(spentOutputMap) != 0 {
var isSpent bool
for transactionHash, indexArray := range spentOutputMap { //143d,[]int{1}
//遍历 记录已经花费的下标的数组
for _, i := range indexArray {
if i == index && hex.EncodeToString(tx.TransactionHash) == transactionHash {
isSpent = true //标记当前的output是已经花费
continue outputsLoop
}
}
}
if !isSpent {
utxo := &UTXO{tx.TransactionHash, index, output}
utxos = append(utxos, utxo)
}
} else {
utxo := &UTXO{tx.TransactionHash, index, output}
utxos = append(utxos, utxo)
}
}
}
return utxos
}
// Find UTXOs which can be regarded as inputs in this transaction
func (bc *Blockchain) FindSpendableUTXOs(from string, amount int64, txs []*Transaction) (int64, map[string][]int) {
var total int64
spendableMap := make(map[string][]int)
utxos := bc.getUTXOsByAddress(from, txs)
for _, utxo := range utxos {
total += utxo.Output.Value
transactionHash := hex.EncodeToString(utxo.TransactionHash)
spendableMap[transactionHash] = append(spendableMap[transactionHash], utxo.Index)
if total >= amount {
break
}
}
if total < amount {
fmt.Printf("%s,余额不足,无法转账。。", from)
os.Exit(1)
}
return total, spendableMap
}
func (blc *Blockchain) Printchain() {
blockIterator := blc.Iterator()
for {
block := blockIterator.Next()
fmt.Println(block)
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(&hashInt) == 0 {
break
}
}
}
func (blockchain *Blockchain) Iterator() *BlockchainIterator {
return &BlockchainIterator{blockchain.Tip, blockchain.DB}
}
func DBExists(DBName string) bool {
if _, err := os.Stat(DBName); os.IsNotExist(err) {
return false
}
return true
}
func BlockchainObject(nodeID string) | ame := fmt.Sprintf(DBName, nodeID)
if DBExists(DBName) {
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
var blockchain *Blockchain
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
hash := b.Get([]byte("l"))
blockchain = &Blockchain{hash, db}
}
return nil
})
if err != nil {
log.Panic(err)
}
return blockchain
} else {
fmt.Println("数据库不存在,无法获取BlockChain对象。。。")
return nil
}
}
func (bc *Blockchain) SignTransaction(tx *Transaction, privateKey ecdsa.PrivateKey, txs []*Transaction) {
if tx.IsCoinBaseTransaction() {
return
}
prevTransactionMap := make(map[string]*Transaction)
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
prevTransactionMap[transactionHash] = bc.FindTransactionByTransactionHash(input.TransactionHash, txs)
}
tx.Sign(privateKey, prevTransactionMap)
}
func (bc *Blockchain) FindTransactionByTransactionHash(transactionHash []byte, txs []*Transaction) *Transaction {
for _, tx := range txs {
if bytes.Compare(tx.TransactionHash, transactionHash) == 0 {
return tx
}
}
iterator := bc.Iterator()
for {
block := iterator.Next()
for _, tx := range block.Transactions {
if bytes.Compare(tx.TransactionHash, transactionHash) == 0 {
return tx
}
}
bigInt := new(big.Int)
bigInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(bigInt) == 0 {
break
}
}
return &Transaction{}
}
/*
验证交易的数字签名
*/
func (bc *Blockchain) VerifityTransaction(tx *Transaction, txs []*Transaction) bool {
//要想验证数字签名:私钥+数据 (tx的副本+之前的交易)
//2.获取该tx中的Input,引用之前的transaction中的未花费的output
prevTxs := make(map[string]*Transaction)
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
prevTxs[transactionHash] = bc.FindTransactionByTransactionHash(input.TransactionHash, txs)
}
if len(prevTxs) == 0 {
fmt.Println("没找到对应交易")
} else {
//fmt.Println("preTxs___________________________________")
//fmt.Println(prevTxs)
}
//验证
return tx.VerifyTransaction(prevTxs)
//return true
}
func (bc *Blockchain) GetAllUTXOs() map[string]*UTXOArray {
iterator := bc.Iterator()
utxoMap := make(map[string]*UTXOArray)
//已花费的input map
inputMap := make(map[string][]*Input)
for {
block := iterator.Next()
for i := len(block.Transactions) - 1; i >= 0; i-- {
// collect inputs
tx := block.Transactions[i]
transactionHash := hex.EncodeToString(tx.TransactionHash)
utxoArray := &UTXOArray{[]*UTXO{}}
if !tx.IsCoinBaseTransaction() {
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
inputMap[transactionHash] = append(inputMap[transactionHash], input)
}
}
//根据inputMap,遍历outputs 找出 UTXO
outputLoop:
for index, output := range tx.Outputs {
if len(inputMap) > 0 {
//isSpent := false
inputs := inputMap[transactionHash] //如果inputs 存在, 则对应的交易里面某笔output肯定已经被消费
for _, input := range inputs {
//判断input对应的是否当期的output
if index == input.IndexOfOutputs && input.UnlockWithAddress(output.PubKeyHash) {
//此笔output已被消费
//isSpent = true
continue outputLoop
}
}
//if isSpent == false {
//outputs 加进utxoMap
utxo := &UTXO{tx.TransactionHash, index, output}
utxoArray.UTXOs = append(utxoArray.UTXOs, utxo)
//}
} else {
//outputs 加进utxoMap
utxo := &UTXO{tx.TransactionHash, index, output}
utxoArray.UTXOs = append(utxoArray.UTXOs, utxo)
}
}
if len(utxoArray.UTXOs) > 0 {
utxoMap[transactionHash] = utxoArray
}
}
//退出条件
hashBigInt := new(big.Int)
hashBigInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(hashBigInt) == 0 {
break
}
}
return utxoMap
}
func (bc *Blockchain) GetHeight() int64 {
return bc.Iterator().Next().Height
}
func (bc *Blockchain) getAllBlocksHash() [][]byte {
iterator := bc.Iterator()
var blocksHashes [][]byte
for {
block := iterator.Next()
blocksHashes = append(blocksHashes, block.BlockHash)
bigInt := new(big.Int)
bigInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(bigInt) == 0 {
break
}
}
return blocksHashes
}
func (bc *Blockchain) GetBlockByHash(hash []byte) *Block {
var block Block
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
blockBytes := b.Get(hash)
gobDecode(blockBytes, &block)
}
return nil
})
if err != nil {
log.Panic(err)
}
return &block
}
func (bc *Blockchain) AddBlockToChain(block *Block) {
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
blockBytes := b.Get(block.BlockHash)
if blockBytes != nil {
return nil
}
err := b.Put(block.BlockHash, gobEncode(block))
if err != nil {
log.Panic(err)
}
lastBlockHash := b.Get([]byte("l"))
lastBlockBytes := b.Get(lastBlockHash)
var lastBlock Block
gobDecode(lastBlockBytes, &lastBlock)
if lastBlock.Height < block.Height {
b.Put([]byte("l"), block.BlockHash)
bc.Tip = block.BlockHash
}
}
return nil
})
if err != nil {
log.Panic(err)
}
}
| *Blockchain {
DBN | conditional_block |
Blockchain.go | package BLC
import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"fmt"
"github.com/boltdb/bolt"
"log"
"math/big"
"os"
"strconv"
)
type Blockchain struct {
Tip []byte // BlockHash of top Block
DB *bolt.DB // A pointer to the database
}
func CreateBlockchainWithGenesisBlock(address string, nodeID string) {
DBName := fmt.Sprintf(DBName, nodeID)
if DBExists(DBName) {
fmt.Println("Genesis block already exist!")
os.Exit(1)
}
fmt.Println("Creating genesis block....")
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte(BlockBucketName))
if err != nil {
log.Panic(err)
}
if b != nil {
// Create the genesis block with a coinbase transaction
txCoinbase := NewCoinbaseTransacion(address)
genesisBlock := CreateGenesisBlock([]*Transaction{txCoinbase})
err := b.Put(genesisBlock.BlockHash, gobEncode(genesisBlock))
if err != nil {
log.Panic(err)
}
// Update Tip of blockchain
err = b.Put([]byte("l"), genesisBlock.BlockHash)
if err != nil {
log.Panic(err)
}
}
return nil
})
if err != nil {
log.Panic(err)
}
}
// Convert command variables to Transaction Objects
func (blockchain *Blockchain) hanldeTransations(from []string, to []string, amount []string, nodeId string) []*Transaction {
var txs []*Transaction
utxoSet := &UTXOSet{blockchain}
for i := 0; i < len(from); i++ {
amountInt, _ := strconv.Atoi(amount[i])
tx := NewSimpleTransation(from[i], to[i], int64(amountInt), utxoSet, txs, nodeId)
txs = append(txs, tx)
}
return txs
}
// Package transactions and mine a new Block
func (blockchain *Blockchain) MineNewBlock(originalTxs []*Transaction) *Block {
// Reward of mining a block
coinBaseTransaction := NewRewardTransacion()
txs := []*Transaction{coinBaseTransaction}
txs = append(txs, originalTxs...)
// Verify transactions
for _, tx := range txs {
if !tx.IsCoinBaseTransaction() {
if blockchain.VerifityTransaction(tx, txs) == false {
log.Panic("Verify transaction failed...")
}
}
}
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
// Get the latest block
var block Block
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
hash := b.Get([]byte("l"))
blockBytes := b.Get(hash)
gobDecode(blockBytes, &block)
}
return nil
})
if err != nil {
log.Panic(err)
}
// Mine a new block
newBlock := NewBlock(txs, block.Height+1, block.BlockHash)
return newBlock
}
// Save a block to the database
func (blockchain *Blockchain) SaveNewBlockToBlockchain(newBlock *Block) {
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
b.Put(newBlock.BlockHash, gobEncode(newBlock))
b.Put([]byte("l"), newBlock.BlockHash)
blockchain.Tip = newBlock.BlockHash
}
return nil
})
if err != nil {
log.Panic(err)
}
}
// Get Unspent transaction outputs(UTXOs)
func (blc *Blockchain) getUTXOsByAddress(address string, txs []*Transaction) []*UTXO {
var utxos []*UTXO
spentTxOutputMap := make(map[string][]int)
// calculate UTXOs by querying txs
for i := len(txs) - 1; i >= 0; i-- {
utxos = caculate(txs[i], address, spentTxOutputMap, utxos)
}
// calculate UTXOs by querying Blocks
it := blc.Iterator()
for {
block := it.Next()
for i := len(block.Transactions) - 1; i >= 0; i-- {
utxos = caculate(block.Transactions[i], address, spentTxOutputMap, utxos)
}
hashInt := new(big.Int)
hashInt.SetBytes(block.PrevBlockHash)
// If current block is genesis block, exit loop
if big.NewInt(0).Cmp(hashInt) == 0 {
break
}
}
return utxos
}
// calculate utxos
func caculate(tx *Transaction, address string, spentOutputMap map[string][]int, utxos []*UTXO) []*UTXO {
// collect all inputs into spentOutputMap
if !tx.IsCoinBaseTransaction() {
for _, input := range tx.Inputs {
full_payload := Base58Decode([]byte(address))
pubKeyHash := full_payload[1 : len(full_payload)-addressCheckSumLen]
if input.UnlockWithAddress(pubKeyHash) {
transactionHash := hex.EncodeToString(input.TransactionHash)
spentOutputMap[transactionHash] = append(spentOutputMap[transactionHash], input.IndexOfOutputs)
}
}
}
// Tranverse all outputs, unSpentUTXOs = all outputs - spent outputs
outputsLoop:
for index, output := range tx.Outputs {
if output.UnlockWithAddress(address) {
if len(spentOutputMap) != 0 {
var isSpent bool
for transactionHash, indexArray := range spentOutputMap { //143d,[]int{1}
//遍历 记录已经花费的下标的数组
for _, i := range indexArray {
if i == index && hex.EncodeToString(tx.TransactionHash) == transactionHash {
isSpent = true //标记当前的output是已经花费
continue outputsLoop
}
}
}
if !isSpent {
utxo := &UTXO{tx.TransactionHash, index, output}
utxos = append(utxos, utxo)
}
} else {
utxo := &UTXO{tx.TransactionHash, index, output}
utxos = append(utxos, utxo)
}
}
}
return utxos
}
// Find UTXOs which can be regarded as inputs in this transaction
func (bc *Blockchain) FindSpendableUTXOs(from string, amount int64, txs []*Transaction) (int64, map[string][]int) {
var total int64
spendableMap := make(map[string][]int)
utxos := bc.getUTXOsByAddress(from, txs)
for _, utxo := range utxos {
total += utxo.Output.Value
transactionHash := hex.EncodeToString(utxo.TransactionHash)
spendableMap[transactionHash] = append(spendableMap[transactionHash], utxo.Index)
if total >= amount {
break
}
}
if total < amount {
fmt.Printf("%s,余额不足,无法转账。。", from)
os.Exit(1)
}
return total, spendableMap
}
func (blc *Blockchain) Printchain() {
blockIterator := blc.Iterator()
for {
block := blockIterator.Next()
fmt.Println(block)
var hashInt big.Int
hashInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(&hashInt) == 0 {
break
}
}
}
func (blockchain *Blockchain) Iterator() *BlockchainIterator {
return &BlockchainIterator{blockchain.Tip, blockchain.DB}
}
func DBExists(DBName string) bool {
if _, err := os.Stat(DBName); os.IsNotExist(err) {
return false
}
return true
}
func BlockchainObject(nodeID string) *Blockchain {
DBName := fmt.Sprintf(DBName, nodeID)
if DBExists(DBName) {
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
var blockchain *Blockchain
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
hash := b.Get([]byte("l"))
blockchain = &Blockchain{hash, db}
}
return nil
})
if err != nil {
log.Panic(err)
}
return blockchain
} else {
fmt.Println("数据库不存在,无法获取BlockChain对象。。。")
return nil
}
}
func (bc *Blockchain) SignTransaction(tx *Transaction, privateKey ecdsa.PrivateKey, txs []*Transaction) {
if tx.IsCoinBaseTransaction() {
return
}
prevTransactionMap := make(map[string]*Transaction)
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
prevTransactionMap[transactionHash] = bc.FindTransactionByTransactionHash(input.TransactionHash, txs)
}
tx.Sign(privateKey, prevTransactionMap)
}
func (bc *Blockchain) FindTransactionByTransactionHash(transactionHash []byte, txs []*Transaction) *Transaction {
for _, tx := range txs {
if bytes.Compare(tx.TransactionHash, transactionHash) == 0 {
return tx
}
}
iterator := bc.Iterator()
for {
block := iterator.Next()
for _, tx := range block.Transactions {
if bytes.Compare(tx.TransactionHash, transactionHash) == 0 {
return tx
}
}
bigInt := new(big.Int)
bigInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(bigInt) == 0 {
break
}
}
return &Transaction{}
}
/*
验证交易的数字签名
*/
func (bc *Blockchain) VerifityTransaction(tx *Transaction, txs []*Transaction) bool {
//要想验证数字签名:私钥+数据 (tx的副本+之前的交易)
//2.获取该tx中的Input,引用之前的transaction中的未花费的output
prevTxs := make(map[string]*Transaction)
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
prevTxs[transactionHash] = bc.FindTransactionByTransactionHash(input.TransactionHash, txs)
}
if len(prevTxs) == 0 {
fmt.Println("没找到对应交易")
} else {
//fmt.Println("preTxs___________________________________")
//fmt.Println(prevTxs)
}
//验证
return tx.VerifyTransaction(prevTxs)
//return true
}
func (bc *Blockchain) GetAllUTXOs() map[string]*UTXOArray {
iterator := bc.Iterator()
utxoMap := make(map[string]*UTXOArray)
//已花费的input map
inputMap := make(map[string][]*Input)
for {
block := iterator.Next()
for i := len(block.Transactions) - 1; i >= 0; i-- {
// collect inputs
tx := block.Transactions[i]
transactionHash := hex.EncodeToString(tx.TransactionHash)
utxoArray := &UTXOArray{[]*UTXO{}}
if !tx.IsCoinBaseTransaction() {
for _, input := range tx.Inputs {
transactionHash := hex.EncodeToString(input.TransactionHash)
inputMap[transactionHash] = append(inputMap[transactionHash], input)
}
}
//根据inputMap,遍历outputs 找出 UTXO
outputLoop:
for index, output := range tx.Outputs {
if len(inputMap) > 0 {
//isSpent := false
inputs := inputMap[transactionHash] //如果inputs 存在, 则对应的交易里面某笔output肯定已经被消费
for _, input := range inputs {
//判断input对应的是否当期的output
if index == input.IndexOfOutputs && input.UnlockWithAddress(output.PubKeyHash) {
//此笔output已被消费
//isSpent = true
continue outputLoop
}
}
//if isSpent == false {
//outputs 加进utxoMap
utxo := &UTXO{tx.TransactionHash, index, output}
utxoArray.UTXOs = append(utxoArray.UTXOs, utxo)
//}
} else {
//outputs 加进utxoMap
utxo := &UTXO{tx.TransactionHash, index, output}
utxoArray.UTXOs = append(utxoArray.UTXOs, utxo)
}
}
if len(utxoArray.UTXOs) > 0 {
utxoMap[transactionHash] = utxoArray
}
}
//退出条件
hashBigInt := new(big.Int)
hashBigInt.SetBytes(block.PrevBlockHash)
if big.NewInt(0).Cmp(hashBigInt) == 0 {
break
}
}
return utxoMap
}
func (bc *Blockchain) GetHeight() int64 {
return bc.Iterator().Next().Height
}
func (bc *Blockchain) getAllBlocksHash() [][]byte {
iterator := bc.Iterator()
var blocksHashes [][]byte
for {
block := iterator.Next()
blocksHashes = append(blocksHashes, block.BlockHash)
bigInt := new(big.Int)
bigInt.SetBytes(block.PrevBlockHash)
if big | ).Cmp(bigInt) == 0 {
break
}
}
return blocksHashes
}
func (bc *Blockchain) GetBlockByHash(hash []byte) *Block {
var block Block
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
blockBytes := b.Get(hash)
gobDecode(blockBytes, &block)
}
return nil
})
if err != nil {
log.Panic(err)
}
return &block
}
func (bc *Blockchain) AddBlockToChain(block *Block) {
DBName := fmt.Sprintf(DBName, os.Getenv("NODE_ID"))
db, err := bolt.Open(DBName, 0600, nil)
if err != nil {
log.Panic(err)
}
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(BlockBucketName))
if b != nil {
blockBytes := b.Get(block.BlockHash)
if blockBytes != nil {
return nil
}
err := b.Put(block.BlockHash, gobEncode(block))
if err != nil {
log.Panic(err)
}
lastBlockHash := b.Get([]byte("l"))
lastBlockBytes := b.Get(lastBlockHash)
var lastBlock Block
gobDecode(lastBlockBytes, &lastBlock)
if lastBlock.Height < block.Height {
b.Put([]byte("l"), block.BlockHash)
bc.Tip = block.BlockHash
}
}
return nil
})
if err != nil {
log.Panic(err)
}
}
| .NewInt(0 | identifier_name |
channel.pb.go | // Copyright (c) 2018 NEC Laboratories Europe GmbH.
//
// Authors: Wenting Li <wenting.li@neclab.eu>
// Sergey Fedorov <sergey.fedorov@neclab.eu>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.24.0
// protoc v3.12.1
// source: channel.proto
package proto
import (
context "context"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type Message struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
}
func (x *Message) Reset() {
*x = Message{}
if protoimpl.UnsafeEnabled {
mi := &file_channel_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Message) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Message) ProtoMessage() {}
func (x *Message) ProtoReflect() protoreflect.Message {
mi := &file_channel_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Message.ProtoReflect.Descriptor instead.
func (*Message) Descriptor() ([]byte, []int) {
return file_channel_proto_rawDescGZIP(), []int{0}
}
func (x *Message) GetPayload() []byte {
if x != nil {
return x.Payload
}
return nil
}
var File_channel_proto protoreflect.FileDescriptor
var file_channel_proto_rawDesc = []byte{
0x0a, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x23, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x32, 0x6f, 0x0a, 0x07, 0x43,
0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x32, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x43, 0x68, 0x61, 0x74, 0x12, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x1a, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x65,
0x65, 0x72, 0x43, 0x68, 0x61, 0x74, 0x12, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_channel_proto_rawDescOnce sync.Once
file_channel_proto_rawDescData = file_channel_proto_rawDesc
)
func file_channel_proto_rawDescGZIP() []byte {
file_channel_proto_rawDescOnce.Do(func() {
file_channel_proto_rawDescData = protoimpl.X.CompressGZIP(file_channel_proto_rawDescData)
})
return file_channel_proto_rawDescData
}
var file_channel_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_channel_proto_goTypes = []interface{}{
(*Message)(nil), // 0: proto.Message
}
var file_channel_proto_depIdxs = []int32{
0, // 0: proto.Channel.ClientChat:input_type -> proto.Message
0, // 1: proto.Channel.PeerChat:input_type -> proto.Message
0, // 2: proto.Channel.ClientChat:output_type -> proto.Message
0, // 3: proto.Channel.PeerChat:output_type -> proto.Message
2, // [2:4] is the sub-list for method output_type
0, // [0:2] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_channel_proto_init() }
func file_channel_proto_init() {
if File_channel_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_channel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Message); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_channel_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_channel_proto_goTypes,
DependencyIndexes: file_channel_proto_depIdxs,
MessageInfos: file_channel_proto_msgTypes, | }.Build()
File_channel_proto = out.File
file_channel_proto_rawDesc = nil
file_channel_proto_goTypes = nil
file_channel_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// ChannelClient is the client API for Channel service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ChannelClient interface {
ClientChat(ctx context.Context, opts ...grpc.CallOption) (Channel_ClientChatClient, error)
PeerChat(ctx context.Context, opts ...grpc.CallOption) (Channel_PeerChatClient, error)
}
type channelClient struct {
cc grpc.ClientConnInterface
}
func NewChannelClient(cc grpc.ClientConnInterface) ChannelClient {
return &channelClient{cc}
}
func (c *channelClient) ClientChat(ctx context.Context, opts ...grpc.CallOption) (Channel_ClientChatClient, error) {
stream, err := c.cc.NewStream(ctx, &_Channel_serviceDesc.Streams[0], "/proto.Channel/ClientChat", opts...)
if err != nil {
return nil, err
}
x := &channelClientChatClient{stream}
return x, nil
}
type Channel_ClientChatClient interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ClientStream
}
type channelClientChatClient struct {
grpc.ClientStream
}
func (x *channelClientChatClient) Send(m *Message) error {
return x.ClientStream.SendMsg(m)
}
func (x *channelClientChatClient) Recv() (*Message, error) {
m := new(Message)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *channelClient) PeerChat(ctx context.Context, opts ...grpc.CallOption) (Channel_PeerChatClient, error) {
stream, err := c.cc.NewStream(ctx, &_Channel_serviceDesc.Streams[1], "/proto.Channel/PeerChat", opts...)
if err != nil {
return nil, err
}
x := &channelPeerChatClient{stream}
return x, nil
}
type Channel_PeerChatClient interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ClientStream
}
type channelPeerChatClient struct {
grpc.ClientStream
}
func (x *channelPeerChatClient) Send(m *Message) error {
return x.ClientStream.SendMsg(m)
}
func (x *channelPeerChatClient) Recv() (*Message, error) {
m := new(Message)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// ChannelServer is the server API for Channel service.
type ChannelServer interface {
ClientChat(Channel_ClientChatServer) error
PeerChat(Channel_PeerChatServer) error
}
// UnimplementedChannelServer can be embedded to have forward compatible implementations.
type UnimplementedChannelServer struct {
}
func (*UnimplementedChannelServer) ClientChat(Channel_ClientChatServer) error {
return status.Errorf(codes.Unimplemented, "method ClientChat not implemented")
}
func (*UnimplementedChannelServer) PeerChat(Channel_PeerChatServer) error {
return status.Errorf(codes.Unimplemented, "method PeerChat not implemented")
}
func RegisterChannelServer(s *grpc.Server, srv ChannelServer) {
s.RegisterService(&_Channel_serviceDesc, srv)
}
func _Channel_ClientChat_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(ChannelServer).ClientChat(&channelClientChatServer{stream})
}
type Channel_ClientChatServer interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ServerStream
}
type channelClientChatServer struct {
grpc.ServerStream
}
func (x *channelClientChatServer) Send(m *Message) error {
return x.ServerStream.SendMsg(m)
}
func (x *channelClientChatServer) Recv() (*Message, error) {
m := new(Message)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _Channel_PeerChat_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(ChannelServer).PeerChat(&channelPeerChatServer{stream})
}
type Channel_PeerChatServer interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ServerStream
}
type channelPeerChatServer struct {
grpc.ServerStream
}
func (x *channelPeerChatServer) Send(m *Message) error {
return x.ServerStream.SendMsg(m)
}
func (x *channelPeerChatServer) Recv() (*Message, error) {
m := new(Message)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _Channel_serviceDesc = grpc.ServiceDesc{
ServiceName: "proto.Channel",
HandlerType: (*ChannelServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "ClientChat",
Handler: _Channel_ClientChat_Handler,
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "PeerChat",
Handler: _Channel_PeerChat_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "channel.proto",
} | random_line_split | |
channel.pb.go | // Copyright (c) 2018 NEC Laboratories Europe GmbH.
//
// Authors: Wenting Li <wenting.li@neclab.eu>
// Sergey Fedorov <sergey.fedorov@neclab.eu>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.24.0
// protoc v3.12.1
// source: channel.proto
package proto
import (
context "context"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type Message struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
}
func (x *Message) Reset() {
*x = Message{}
if protoimpl.UnsafeEnabled {
mi := &file_channel_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Message) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Message) ProtoMessage() {}
func (x *Message) ProtoReflect() protoreflect.Message {
mi := &file_channel_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Message.ProtoReflect.Descriptor instead.
func (*Message) Descriptor() ([]byte, []int) {
return file_channel_proto_rawDescGZIP(), []int{0}
}
func (x *Message) GetPayload() []byte {
if x != nil {
return x.Payload
}
return nil
}
var File_channel_proto protoreflect.FileDescriptor
var file_channel_proto_rawDesc = []byte{
0x0a, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x23, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x32, 0x6f, 0x0a, 0x07, 0x43,
0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x32, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x43, 0x68, 0x61, 0x74, 0x12, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x1a, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x65,
0x65, 0x72, 0x43, 0x68, 0x61, 0x74, 0x12, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_channel_proto_rawDescOnce sync.Once
file_channel_proto_rawDescData = file_channel_proto_rawDesc
)
func file_channel_proto_rawDescGZIP() []byte {
file_channel_proto_rawDescOnce.Do(func() {
file_channel_proto_rawDescData = protoimpl.X.CompressGZIP(file_channel_proto_rawDescData)
})
return file_channel_proto_rawDescData
}
var file_channel_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_channel_proto_goTypes = []interface{}{
(*Message)(nil), // 0: proto.Message
}
var file_channel_proto_depIdxs = []int32{
0, // 0: proto.Channel.ClientChat:input_type -> proto.Message
0, // 1: proto.Channel.PeerChat:input_type -> proto.Message
0, // 2: proto.Channel.ClientChat:output_type -> proto.Message
0, // 3: proto.Channel.PeerChat:output_type -> proto.Message
2, // [2:4] is the sub-list for method output_type
0, // [0:2] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_channel_proto_init() }
func file_channel_proto_init() {
if File_channel_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_channel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Message); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_channel_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_channel_proto_goTypes,
DependencyIndexes: file_channel_proto_depIdxs,
MessageInfos: file_channel_proto_msgTypes,
}.Build()
File_channel_proto = out.File
file_channel_proto_rawDesc = nil
file_channel_proto_goTypes = nil
file_channel_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// ChannelClient is the client API for Channel service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ChannelClient interface {
ClientChat(ctx context.Context, opts ...grpc.CallOption) (Channel_ClientChatClient, error)
PeerChat(ctx context.Context, opts ...grpc.CallOption) (Channel_PeerChatClient, error)
}
type channelClient struct {
cc grpc.ClientConnInterface
}
func NewChannelClient(cc grpc.ClientConnInterface) ChannelClient {
return &channelClient{cc}
}
func (c *channelClient) ClientChat(ctx context.Context, opts ...grpc.CallOption) (Channel_ClientChatClient, error) {
stream, err := c.cc.NewStream(ctx, &_Channel_serviceDesc.Streams[0], "/proto.Channel/ClientChat", opts...)
if err != nil {
return nil, err
}
x := &channelClientChatClient{stream}
return x, nil
}
type Channel_ClientChatClient interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ClientStream
}
type channelClientChatClient struct {
grpc.ClientStream
}
func (x *channelClientChatClient) Send(m *Message) error {
return x.ClientStream.SendMsg(m)
}
func (x *channelClientChatClient) Recv() (*Message, error) {
m := new(Message)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *channelClient) PeerChat(ctx context.Context, opts ...grpc.CallOption) (Channel_PeerChatClient, error) {
stream, err := c.cc.NewStream(ctx, &_Channel_serviceDesc.Streams[1], "/proto.Channel/PeerChat", opts...)
if err != nil {
return nil, err
}
x := &channelPeerChatClient{stream}
return x, nil
}
type Channel_PeerChatClient interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ClientStream
}
type channelPeerChatClient struct {
grpc.ClientStream
}
func (x *channelPeerChatClient) Send(m *Message) error {
return x.ClientStream.SendMsg(m)
}
func (x *channelPeerChatClient) Recv() (*Message, error) {
m := new(Message)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// ChannelServer is the server API for Channel service.
type ChannelServer interface {
ClientChat(Channel_ClientChatServer) error
PeerChat(Channel_PeerChatServer) error
}
// UnimplementedChannelServer can be embedded to have forward compatible implementations.
type UnimplementedChannelServer struct {
}
func (*UnimplementedChannelServer) ClientChat(Channel_ClientChatServer) error {
return status.Errorf(codes.Unimplemented, "method ClientChat not implemented")
}
func (*UnimplementedChannelServer) PeerChat(Channel_PeerChatServer) error {
return status.Errorf(codes.Unimplemented, "method PeerChat not implemented")
}
func RegisterChannelServer(s *grpc.Server, srv ChannelServer) {
s.RegisterService(&_Channel_serviceDesc, srv)
}
func _Channel_ClientChat_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(ChannelServer).ClientChat(&channelClientChatServer{stream})
}
type Channel_ClientChatServer interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ServerStream
}
type channelClientChatServer struct {
grpc.ServerStream
}
func (x *channelClientChatServer) Send(m *Message) error {
return x.ServerStream.SendMsg(m)
}
func (x *channelClientChatServer) Recv() (*Message, error) {
m := new(Message)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _Channel_PeerChat_Handler(srv interface{}, stream grpc.ServerStream) error |
type Channel_PeerChatServer interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ServerStream
}
type channelPeerChatServer struct {
grpc.ServerStream
}
func (x *channelPeerChatServer) Send(m *Message) error {
return x.ServerStream.SendMsg(m)
}
func (x *channelPeerChatServer) Recv() (*Message, error) {
m := new(Message)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _Channel_serviceDesc = grpc.ServiceDesc{
ServiceName: "proto.Channel",
HandlerType: (*ChannelServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "ClientChat",
Handler: _Channel_ClientChat_Handler,
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "PeerChat",
Handler: _Channel_PeerChat_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "channel.proto",
}
| {
return srv.(ChannelServer).PeerChat(&channelPeerChatServer{stream})
} | identifier_body |
channel.pb.go | // Copyright (c) 2018 NEC Laboratories Europe GmbH.
//
// Authors: Wenting Li <wenting.li@neclab.eu>
// Sergey Fedorov <sergey.fedorov@neclab.eu>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.24.0
// protoc v3.12.1
// source: channel.proto
package proto
import (
context "context"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type Message struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
}
func (x *Message) Reset() {
*x = Message{}
if protoimpl.UnsafeEnabled {
mi := &file_channel_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Message) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Message) ProtoMessage() {}
func (x *Message) ProtoReflect() protoreflect.Message {
mi := &file_channel_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Message.ProtoReflect.Descriptor instead.
func (*Message) Descriptor() ([]byte, []int) {
return file_channel_proto_rawDescGZIP(), []int{0}
}
func (x *Message) GetPayload() []byte {
if x != nil {
return x.Payload
}
return nil
}
var File_channel_proto protoreflect.FileDescriptor
var file_channel_proto_rawDesc = []byte{
0x0a, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x23, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x32, 0x6f, 0x0a, 0x07, 0x43,
0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x32, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x43, 0x68, 0x61, 0x74, 0x12, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x1a, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x65,
0x65, 0x72, 0x43, 0x68, 0x61, 0x74, 0x12, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_channel_proto_rawDescOnce sync.Once
file_channel_proto_rawDescData = file_channel_proto_rawDesc
)
func file_channel_proto_rawDescGZIP() []byte {
file_channel_proto_rawDescOnce.Do(func() {
file_channel_proto_rawDescData = protoimpl.X.CompressGZIP(file_channel_proto_rawDescData)
})
return file_channel_proto_rawDescData
}
var file_channel_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_channel_proto_goTypes = []interface{}{
(*Message)(nil), // 0: proto.Message
}
var file_channel_proto_depIdxs = []int32{
0, // 0: proto.Channel.ClientChat:input_type -> proto.Message
0, // 1: proto.Channel.PeerChat:input_type -> proto.Message
0, // 2: proto.Channel.ClientChat:output_type -> proto.Message
0, // 3: proto.Channel.PeerChat:output_type -> proto.Message
2, // [2:4] is the sub-list for method output_type
0, // [0:2] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_channel_proto_init() }
func file_channel_proto_init() {
if File_channel_proto != nil {
return
}
if !protoimpl.UnsafeEnabled |
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_channel_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_channel_proto_goTypes,
DependencyIndexes: file_channel_proto_depIdxs,
MessageInfos: file_channel_proto_msgTypes,
}.Build()
File_channel_proto = out.File
file_channel_proto_rawDesc = nil
file_channel_proto_goTypes = nil
file_channel_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// ChannelClient is the client API for Channel service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ChannelClient interface {
ClientChat(ctx context.Context, opts ...grpc.CallOption) (Channel_ClientChatClient, error)
PeerChat(ctx context.Context, opts ...grpc.CallOption) (Channel_PeerChatClient, error)
}
type channelClient struct {
cc grpc.ClientConnInterface
}
func NewChannelClient(cc grpc.ClientConnInterface) ChannelClient {
return &channelClient{cc}
}
func (c *channelClient) ClientChat(ctx context.Context, opts ...grpc.CallOption) (Channel_ClientChatClient, error) {
stream, err := c.cc.NewStream(ctx, &_Channel_serviceDesc.Streams[0], "/proto.Channel/ClientChat", opts...)
if err != nil {
return nil, err
}
x := &channelClientChatClient{stream}
return x, nil
}
type Channel_ClientChatClient interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ClientStream
}
type channelClientChatClient struct {
grpc.ClientStream
}
func (x *channelClientChatClient) Send(m *Message) error {
return x.ClientStream.SendMsg(m)
}
func (x *channelClientChatClient) Recv() (*Message, error) {
m := new(Message)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *channelClient) PeerChat(ctx context.Context, opts ...grpc.CallOption) (Channel_PeerChatClient, error) {
stream, err := c.cc.NewStream(ctx, &_Channel_serviceDesc.Streams[1], "/proto.Channel/PeerChat", opts...)
if err != nil {
return nil, err
}
x := &channelPeerChatClient{stream}
return x, nil
}
type Channel_PeerChatClient interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ClientStream
}
type channelPeerChatClient struct {
grpc.ClientStream
}
func (x *channelPeerChatClient) Send(m *Message) error {
return x.ClientStream.SendMsg(m)
}
func (x *channelPeerChatClient) Recv() (*Message, error) {
m := new(Message)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// ChannelServer is the server API for Channel service.
type ChannelServer interface {
ClientChat(Channel_ClientChatServer) error
PeerChat(Channel_PeerChatServer) error
}
// UnimplementedChannelServer can be embedded to have forward compatible implementations.
type UnimplementedChannelServer struct {
}
func (*UnimplementedChannelServer) ClientChat(Channel_ClientChatServer) error {
return status.Errorf(codes.Unimplemented, "method ClientChat not implemented")
}
func (*UnimplementedChannelServer) PeerChat(Channel_PeerChatServer) error {
return status.Errorf(codes.Unimplemented, "method PeerChat not implemented")
}
func RegisterChannelServer(s *grpc.Server, srv ChannelServer) {
s.RegisterService(&_Channel_serviceDesc, srv)
}
func _Channel_ClientChat_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(ChannelServer).ClientChat(&channelClientChatServer{stream})
}
type Channel_ClientChatServer interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ServerStream
}
type channelClientChatServer struct {
grpc.ServerStream
}
func (x *channelClientChatServer) Send(m *Message) error {
return x.ServerStream.SendMsg(m)
}
func (x *channelClientChatServer) Recv() (*Message, error) {
m := new(Message)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _Channel_PeerChat_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(ChannelServer).PeerChat(&channelPeerChatServer{stream})
}
type Channel_PeerChatServer interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ServerStream
}
type channelPeerChatServer struct {
grpc.ServerStream
}
func (x *channelPeerChatServer) Send(m *Message) error {
return x.ServerStream.SendMsg(m)
}
func (x *channelPeerChatServer) Recv() (*Message, error) {
m := new(Message)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _Channel_serviceDesc = grpc.ServiceDesc{
ServiceName: "proto.Channel",
HandlerType: (*ChannelServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "ClientChat",
Handler: _Channel_ClientChat_Handler,
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "PeerChat",
Handler: _Channel_PeerChat_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "channel.proto",
}
| {
file_channel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Message); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
} | conditional_block |
channel.pb.go | // Copyright (c) 2018 NEC Laboratories Europe GmbH.
//
// Authors: Wenting Li <wenting.li@neclab.eu>
// Sergey Fedorov <sergey.fedorov@neclab.eu>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.24.0
// protoc v3.12.1
// source: channel.proto
package proto
import (
context "context"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type Message struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"`
}
func (x *Message) Reset() {
*x = Message{}
if protoimpl.UnsafeEnabled {
mi := &file_channel_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Message) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Message) ProtoMessage() {}
func (x *Message) ProtoReflect() protoreflect.Message {
mi := &file_channel_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Message.ProtoReflect.Descriptor instead.
func (*Message) | () ([]byte, []int) {
return file_channel_proto_rawDescGZIP(), []int{0}
}
func (x *Message) GetPayload() []byte {
if x != nil {
return x.Payload
}
return nil
}
var File_channel_proto protoreflect.FileDescriptor
var file_channel_proto_rawDesc = []byte{
0x0a, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x23, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x32, 0x6f, 0x0a, 0x07, 0x43,
0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x32, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x43, 0x68, 0x61, 0x74, 0x12, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x1a, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x30, 0x0a, 0x08, 0x50, 0x65,
0x65, 0x72, 0x43, 0x68, 0x61, 0x74, 0x12, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x0e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_channel_proto_rawDescOnce sync.Once
file_channel_proto_rawDescData = file_channel_proto_rawDesc
)
func file_channel_proto_rawDescGZIP() []byte {
file_channel_proto_rawDescOnce.Do(func() {
file_channel_proto_rawDescData = protoimpl.X.CompressGZIP(file_channel_proto_rawDescData)
})
return file_channel_proto_rawDescData
}
var file_channel_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_channel_proto_goTypes = []interface{}{
(*Message)(nil), // 0: proto.Message
}
var file_channel_proto_depIdxs = []int32{
0, // 0: proto.Channel.ClientChat:input_type -> proto.Message
0, // 1: proto.Channel.PeerChat:input_type -> proto.Message
0, // 2: proto.Channel.ClientChat:output_type -> proto.Message
0, // 3: proto.Channel.PeerChat:output_type -> proto.Message
2, // [2:4] is the sub-list for method output_type
0, // [0:2] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_channel_proto_init() }
func file_channel_proto_init() {
if File_channel_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_channel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Message); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_channel_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_channel_proto_goTypes,
DependencyIndexes: file_channel_proto_depIdxs,
MessageInfos: file_channel_proto_msgTypes,
}.Build()
File_channel_proto = out.File
file_channel_proto_rawDesc = nil
file_channel_proto_goTypes = nil
file_channel_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// ChannelClient is the client API for Channel service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ChannelClient interface {
ClientChat(ctx context.Context, opts ...grpc.CallOption) (Channel_ClientChatClient, error)
PeerChat(ctx context.Context, opts ...grpc.CallOption) (Channel_PeerChatClient, error)
}
type channelClient struct {
cc grpc.ClientConnInterface
}
func NewChannelClient(cc grpc.ClientConnInterface) ChannelClient {
return &channelClient{cc}
}
func (c *channelClient) ClientChat(ctx context.Context, opts ...grpc.CallOption) (Channel_ClientChatClient, error) {
stream, err := c.cc.NewStream(ctx, &_Channel_serviceDesc.Streams[0], "/proto.Channel/ClientChat", opts...)
if err != nil {
return nil, err
}
x := &channelClientChatClient{stream}
return x, nil
}
type Channel_ClientChatClient interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ClientStream
}
type channelClientChatClient struct {
grpc.ClientStream
}
func (x *channelClientChatClient) Send(m *Message) error {
return x.ClientStream.SendMsg(m)
}
func (x *channelClientChatClient) Recv() (*Message, error) {
m := new(Message)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *channelClient) PeerChat(ctx context.Context, opts ...grpc.CallOption) (Channel_PeerChatClient, error) {
stream, err := c.cc.NewStream(ctx, &_Channel_serviceDesc.Streams[1], "/proto.Channel/PeerChat", opts...)
if err != nil {
return nil, err
}
x := &channelPeerChatClient{stream}
return x, nil
}
type Channel_PeerChatClient interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ClientStream
}
type channelPeerChatClient struct {
grpc.ClientStream
}
func (x *channelPeerChatClient) Send(m *Message) error {
return x.ClientStream.SendMsg(m)
}
func (x *channelPeerChatClient) Recv() (*Message, error) {
m := new(Message)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// ChannelServer is the server API for Channel service.
type ChannelServer interface {
ClientChat(Channel_ClientChatServer) error
PeerChat(Channel_PeerChatServer) error
}
// UnimplementedChannelServer can be embedded to have forward compatible implementations.
type UnimplementedChannelServer struct {
}
func (*UnimplementedChannelServer) ClientChat(Channel_ClientChatServer) error {
return status.Errorf(codes.Unimplemented, "method ClientChat not implemented")
}
func (*UnimplementedChannelServer) PeerChat(Channel_PeerChatServer) error {
return status.Errorf(codes.Unimplemented, "method PeerChat not implemented")
}
func RegisterChannelServer(s *grpc.Server, srv ChannelServer) {
s.RegisterService(&_Channel_serviceDesc, srv)
}
func _Channel_ClientChat_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(ChannelServer).ClientChat(&channelClientChatServer{stream})
}
type Channel_ClientChatServer interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ServerStream
}
type channelClientChatServer struct {
grpc.ServerStream
}
func (x *channelClientChatServer) Send(m *Message) error {
return x.ServerStream.SendMsg(m)
}
func (x *channelClientChatServer) Recv() (*Message, error) {
m := new(Message)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _Channel_PeerChat_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(ChannelServer).PeerChat(&channelPeerChatServer{stream})
}
type Channel_PeerChatServer interface {
Send(*Message) error
Recv() (*Message, error)
grpc.ServerStream
}
type channelPeerChatServer struct {
grpc.ServerStream
}
func (x *channelPeerChatServer) Send(m *Message) error {
return x.ServerStream.SendMsg(m)
}
func (x *channelPeerChatServer) Recv() (*Message, error) {
m := new(Message)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _Channel_serviceDesc = grpc.ServiceDesc{
ServiceName: "proto.Channel",
HandlerType: (*ChannelServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "ClientChat",
Handler: _Channel_ClientChat_Handler,
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "PeerChat",
Handler: _Channel_PeerChat_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "channel.proto",
}
| Descriptor | identifier_name |
ag.py | #!/bin/python3
import os
import re
import sys
import json
from pprint import pprint
from shorthand import *
from amyhead import *
#charset_namespace="[A-Za-z0-9_$]+" # in 'shorthand'
#parser_goal=re.compile('[ \t]*([0-9]+)[ \t](.+)')
token_item=r'([\n]|^)([ \t]*\~?[0-9]+|[ \t]*\~?include|[ \t]*\~?gonear)[ \t]+([^ \t][^\n]*)'
#token_item_1='[ \t]*([0-9]+)[ \t]([^\n]+)'
#token_item='[\t]*\[[ \t]*[\n](([^\n]+[\n])+)[ \t]*\][ \t]*([\n]|$)'
token_nodeopt=r'(-pull|-push)([ \t]+'+charset_namespace+r')*'
token_goalset = r'(^|[\n])[ \t]*' # start : 1
token_goalset+= r'('+charset_namespace+r')[ \t]+('+charset_namespace+r'|-)' # name,succ : 2
token_goalset+= r'(([ \t]+'+charset_namespace+r')*)' # pres : 2
token_goalset+= r'(([ \t]+'+token_nodeopt+r')*)' # pu** : 4
token_goalset+= r'[ \t]*' # tailing
token_goalset+= r'(([\n][\n]?[^\n]+)*)' # contentInSameNode : 2
token_goalset+= r'([\n]+[\n](?=[\n])|[\n]?[\n]?$)' # sep : 2
# '(?=...)'Matches if ... matches next, but doesn’t consume any of the string.
sts=re.compile(r'[ \t]+')
nodeopt=re.compile(r'('+token_nodeopt+r')')
class KWs:
def __init__(self,kwv):
self.data={}
self.__cnt=0
for kw in kwv:
self.__cnt+=1
tmp={"label":self.__cnt,"len":len(kw),"txt":kw}
self[kw]=tmp
self[self.__cnt]=tmp
def getKwData(self,i):
'''
isinstance(i)==int or isinstance(i)==str
'''
return self.data[i] if i in self.data else None
class Goal:
# a part of goalset
# is a sub-class of Goaltree and should not be use directly
parser_item=re.compile(token_item)
#kwv=KWs(['include','gonear'])
KW_include_label=-1
KW_include_txt="include"
KW_include_lentxt=len(KW_include_txt)
KW_gonear_label=-2
KW_gonear_txt="gonear"
KW_gonear_lentxt=len(KW_gonear_txt)
# node
def __init__(self):
self.constraints=[] # [ (int(label),item,negate?) ... ]
self.maxLabel=-1
self.including=False
self.arrangeNeeded=False
self.extendedView=None # inherit from Goaltree
def __eq__(self,rhs):
self.arrange()
if isinstance(rhs,self.__class__):
return self.constraints==rhs.constraints
else:
raise TypeError("unsupport: %s == %s"%(self.__class__,type(rhs)))
def __repr__(self):
return "[Goal:"+str(self.constraints)+"]"
def isSpecial(self):
# having constraints labels != 0
for c in self.constraints:
if c[0]!=0:
return 1
return 0
def flatten(self):
add=[]
delItSet=set()
rg=range(len(self.constraints))
for i in rg:
c=self.constraints[i]
if c[0]==self.__class__.KW_include_label:
src=c[1][1]
if len(src.sets)<=1 and len(src.pushs(""))==0 and len(src.pulls(""))==0:
cFinalGs=src[src.getFinals()[0]][0]
if len(cFinalGs)<=1 and cFinalGs[0].isSpecial()==0:
add+=cFinalGs[0].constraints
delItSet.add(i)
if len(add)!=0:
newCs=[ self.constraints[i] for i in rg if not i in delItSet ]
self.constraints=newCs
for c in add:
self.add(c[1],c[0],c[2],arrangeLater=True)
self.arrange()
return self
def arrange(self):
if self.arrangeNeeded!=False:
self.arrangeNeeded=False
self.constraints.sort(key=lambda x:(x[2],x[:2]))
tmpv=[]
tmp=0
for c in self.constraints:
if tmp==c: continue
tmpv.append(c)
tmp=c
self.constraints=tmpv
if len([ c[0] for c in tmpv if c[0]==-1])==0:
self.including=False
return self
def add(self,item,label=0,negate=False,arrangeLater=False):
# label must be an integer
self.constraints.append((label,item,negate))
if self.maxLabel<label: self.maxLabel=label
if arrangeLater==False: self.arrange()
else: self.arrangeNeeded=arrangeLater
def fromStr(self,s,cd='./',extView=None):
s=s.replace('\r','')
'''
character:'\r' is ommited
this function will NOT do the arrangement
make sure the lines of constraints:
labels are in increasing order
items of the same label are in lexicographical order
format:
each line: label item
([ \t]*\~?[0-9]+|include|gonear)
lines not match will be omitted
'''
# preserve
old=self.constraints
# clean
self.constraints=[]
self.maxLabel=-1
self.extendedView=extView
# start
lines=s.split('\n')
p=self.__class__.parser_item
rs=p.split(s)
#print('*'*11),pprint(rs) # debug
for i in range(1,len(rs),p.groups+1):
# not match , ([\n]|^) , [ \t]*\~?[0-9]+|KWs , [^\n]+
# start from 1 =>
# ([\n]|^) , [ \t]*[0-9]+|KWs , [^\n]+ , not match
isKW=False
negate=False
label=sts.sub('',rs[i+1])
if label[0]=='~':
negate=True
label=label[1:]
content=rs[i+2]
#print(rs[i],rs[i+1]) # debug
if label==self.__class__.KW_include_txt:
isKW=True
label=self.__class__.KW_include_label
self.including=True
tmp=Goaltree()
tmp.fromTxt(content,_cd=cd)
item=(content,tmp)
if label==self.__class__.KW_gonear_txt:
isKW=True
label=self.__class__.KW_gonear_label
tmp=None # TODO
item=(content,tmp)
if isKW==False:
item=content
label=int(label)
self.add(item,label,negate=negate,arrangeLater=True)
'''
for line in lines:
m=self.__class__.parser_item.match(line)
if isNone(m): continue
res=m.group(1,2)
self.add(res[1],int(res[0]),arrangeLater=True)
# TODO: need ORs
'''
return self
def toStr(self,labelMinLen=0):
length=max(len(str(self.maxLabel)),labelMinLen)
if self.including:
length=max(length,self.__class__.KW_include_lentxt)
rtv=""
tmpv=[]
for c in self.constraints:
useLen=length+c[2] # len of neg no usage
label=c[0]
content=c[1]
if label==self.__class__.KW_include_label:
useLen=0
label=self.__class__.KW_include_txt
content=c[1][0]
#if 0!=0: tmpv.append(c[1][1].toStr(labelMinLen=length).split('\n')[1:])
if label==self.__class__.KW_gonear_label:
useLen=0
label=self.__class__.KW_gonear_txt
content=c[1][0]
label=('~' if c[2] else '')+str(label)
tmpv.append("%*s\t%s"%(useLen,label,content))
rtv+='\n'.join(tmpv)
return rtv
def fromTxt(self,filename,_cd='./',extView=None):
cd=_cd+filename[:filename.rindex('/')+1] if '/' in filename else _cd
with open(_cd+filename,'rb') as f:
self.fromStr("".join(map(chr,f.read())),cd=cd,extView=extView)
return self
# TODO:
with open(_cd+filename+".learn",'rb') as f:
pass
def size(self):
rtv={"byname":0,"bygoal":1}
for c in self.constraints:
if c[0]=="include":
tmp=c[1][1].size()
for k,v in tmp.items(): rtv[k]+=v
return rtv
class Goaltree:
# lots of goalset
'''
definitions:
successor:
the next goalset to match after matching a goalset
closer the root(tree), closer the final goalset
'''
parser_set=re.compile(token_goalset)
def __init__(self):
self.sets={}
self.filename=None
self.extendedView=None
# an extendedView is an importlib.import_module() object
# this can only be used when using 'fromTxt'
# using 'fromStr' will remove (=None) previous extendedView from 'fromTxt'
# file name is '_cd' and 'filename' given to 'fromTxt' concatenate '.py'
# i.e. _cd+filename+".py"
## it is recommended to construct a hashtable ( key is tuple(*.outputs()) or you can specify other methods ) with timeout to prevent re-calculating same condition within the same goal to achive
self.learned={"nextgoal":{}}
self.isSuccsOf={}
# learn file is self.filename+".learn", self.filename will be set after self.fromTxt()
pass
def __repr__(self):
rtv='{Goaltree:\n'
tmp=[ (k,v) for k,v in self.sets.items() ]
tmp.sort()
for x in tmp:
rtv+="\t"+x[0]+":"+str(x[1])+",\n"
rtv+='\t-:0\n}'
return rtv
def __getitem__(self,k):
return self.sets[k] if k in self.sets else None
def newNode(self,goals=[],name="",successorName='-'):
node=(goals,successorName,set(),[''])
return name,node
def addNode(self,goalset=[],name="",successorName='-'):
# TODO
if name=="" or (name in self.sets): return 1
pass
def keys(self,notBelow=None,beforeKeys=set()):
def valid_prec(k):
precs=self.getPrecs(k)
return len(precs)==0 or len(precs&beforeKeys)!=0
if isNone(notBelow):
rtv=[k for k in self.sets if valid_prec(k)]
rtv.sort()
return rtv
else:
#rtv=[k for k in self.sets if not self.getSucc(k) in notBelow]
rtv=[k for k in self.sets if len(self.getSuccs(k)¬Below)==0 and valid_prec(k)]
rtv.sort()
return rtv
def getGoals(self,k):
# return a goalset
return self.sets[k][0] if k in self.sets else None
def getSucc(self,k):
return self.sets[k][1]
def _getSuccs(self,k):
rtvSet=set()
rtvStr=k
tmpsucc=self.getSucc(k)
while not ( tmpsucc=='-' or tmpsucc=='+' or (tmpsucc in rtvSet) ):
# rtv
rtvSet.add(tmpsucc)
rtvStr+='-'
rtvStr+=tmpsucc
# reversed succs
#if not tmpsucc in self.isSuccsOf: self.isSuccsOf[tmpsucc]=set() # is set before
self.isSuccsOf[tmpsucc].add(k)
# next
tmpsucc=self.getSucc(tmpsucc)
return rtvSet,rtvStr
def getSuccs(self,k):
return self.sets[k][2]
def getSuccsStr(self,k):
return self.sets[k][3][0]
def getPrecs(self,k):
return self.sets[k][4]
def getOpts(self,k):
rtv=dict(self.sets[k][5])
for i in rtv: rtv[i]=rtv[i][1]
return rtv
def getFinals(self):
return [ k for k in self.sets if self.getSucc(k)=='-' ]
def fromStr(self,s,cd='./',extView=None):
s=s.replace('\r','')
'''
\r\n , \n\r , \n -> \n
format: see self.fromTxt
'''
# unset filename
self.filename=None
self.extendedView=extView
#old=self.sets
p=self.__class__.parser_set
s=re.sub("(\n\r|\n|\r\n)[ \t]*(\n\r|\n|\r\n)","\n\n",s)
defined=set()
data=[]
rs=p.split(s) # cut via "\n\n\n"
#print(p.groups+1),print(rs[1:1+p.groups+1]),print(rs[1+(p.groups+1)*1:1+(p.groups+1)*2]),exit()
#print(rs[0]),exit() # debug
for i in range(1,len(rs),p.groups+1):
# rs[0] is "not match", omitted
# start from 1 =>
# (^|[\n]) , currName , succName , precNames , precName_last , pu** , pu**_last , (-pull|-push) , pu**_func_last , goals , others
# +0 , +1 , +2 , +3 , +4 , +5 , +6 , +7 , +8 , +9 , + >=10
#print(i,p.groups+1,rs[i-1]),print(rs[i:i+p.groups+1]) # debug
#if i>p.groups: exit() # debug
curr=rs[i+1]
if curr in defined:
raise TypeError("Error: '"+curr+"' is defined twice")
defined.add(curr)
#print(i,curr,defined) # debug
succ = rs[i+2]
prec = set(sts.split(rs[i+3])[1:]) # or
opts = {"-push":(set(),[],[]),"-pull":(set(),[],[])} # (fooNamesLookupForRepeated,fooContent)
for opt in nodeopt.split(rs[i+5])[1::nodeopt.groups+1]:
arr=sts.split(opt) # opt_type foo1 foo2 ...
dest=[k for k in opts if arr[0]==k] # opt_type
if len(dest)==0: raise TypeError("Error: "+arr[0]+" is not an option")
arr,dst=tuple(arr[1:]),opts[dest[0]]
if not (arr in dst[0]): # trim repeated combination
dst[0].add(arr)
dst[1].append([getattr(self.extendedView,f) for f in arr])
else: print("warning: permutation:",arr,"in",dest[0],"already exists in this node")
dst[2].append(arr)
gsv = re.split("[\n][ \t]*[\n]",rs[i+9]) # or
data.append((curr, ([ Goal().fromStr(gs,cd=cd,extView=self.extendedView).flatten() for gs in gsv ],succ,set(),[''],prec,opts) ))
# curr:( Goal()s , succ , succSet , succStrs , prec , opts)
#data.sort()
#print(defined),exit() # debug
#print(sorted(list(defined))) # debug
#pprint(data) # debug
self.sets=dict(data)
del data
'''
def getTarget(c):
tmp=c[1].split(":")[1] if ":" in c[1] else c[1]
return c[0],tmp,c[2]
for k in self.sets:
node=self.sets[k]
if node[1]=='-': continue
gs_node=node[0]
if len(gs_node)!=1: continue
gs_node=gs_node[0]
gs_node.arrange()
sn=set(gs_node.constraints)
succ=self.sets[node[1]]
gs_succ=succ[0]
for g in gs_succ:
if abs(len(g.constraints)-len(sn))>1: continue
ss=set(g.constraints)
delta=ss^sn
if len(delta)>2: continue
rem_sn,rem_ss=delta&sn,delta&ss
if len(rem_sn)!=1 or len(rem_ss)!=1: continue # no idea how to do
rem_sn,rem_ss=rem_sn.pop(),rem_ss.pop()
if not (":" in rem_sn[1] or ":" in rem_ss[1]): continue # not value
rem1_sn=re.split(r'[ \t]+',rem_sn[1])
rem1_ss=re.split(r'[ \t]+',rem_ss[1])
if len(rem1_sn)!=len(rem1_ss)!=1: continue
rem1_sn.sort(),rem1_ss.sort()
diff=[]
for i in range(len(rem1_sn)):
if rem1_sn[i]!=rem1_ss[i]:
diff.append((rem1_sn[i],rem1_ss[i]))
if len(diff)!=1 or diff[0]==diff[1]: continue
target=[ x[:x.index(":")] for x in diff ]
if target[0]!=target[1]: continue
vals=[ x[len(target[0])+1:] for x in diff ]
if not ',' in vals[0]: vals[0]=vals[0]+','+vals[0]
if not ',' in vals[1]: vals[1]=vals[1]+','+vals[1]
newNodes=[]
if vals[0]
print("?",gs_node),exit()
'''
self.isSuccsOf=dict([(k,set()) for k in self.sets])
for k,v in self.sets.items():
succSet,succStr=self._getSuccs(k)
v[2].update(succSet)
v[3][0]+=succStr
# basic keys
allKeys=set([k for k in self.sets])
for k in allKeys:
# all lower nodes
self.learned["nextgoal"][k]=dict([ (kk,(0.0-len(self.getSuccs(kk)))/len(allKeys)) for kk in allKeys-self.isSuccsOf[k] if kk!=k ])
self.learned["nextgoal"][""]=dict([ (k,(0.0-len(self.getSuccs(k)))/len(allKeys)) for k in allKeys if len(self.getPrecs(k))==0 ])
return self
def toStr(self,labelMinLen=0):
kv=[ k for k in self.sets ]
kv.sort()
rtv=""
tmpv=[]
for k in kv:
tmps=""
tmps+=k+'\t'+self.getSucc(k)
if len(self.getPrecs(k))!=0:
tmps+='\t'.join([""]+sorted([ kk for kk in self.getPrecs(k) ]))
opts=self.sets[k][5]
optstrs=[]
for opt in sorted([_ for _ in opts]):
if len(opts[opt][2])!=0:
optstrs.append('\t'.join([x for v in opts[opt][2]for x in[opt]+list(v)]))
tmps+='\t'.join([""]+optstrs)
tmpgsv=[ g.toStr(labelMinLen=labelMinLen) for g in self.getGoals(k) ]
tmpv.append('\n'.join([tmps,"\n\n".join(tmpgsv)]))
rtv+="\n\n\n".join(tmpv)
return rtv
def fromTxt(self,filename,_cd='./'):
'''
concept:
a block with a name is a set of Goal. that means reach one of them is a 'match', and can to further more (try the successor)
'''
'''
format prototype:
( none or more empty lines )
...
( none or more empty lines )
name successorName(if None, use '-')
# lines which cannot be parsed as <class: Goal>
label item
# lines which cannot be parsed as <class: Goal>
label item
...
label item
label item
( an empty line )
label item
label item
...
label item
label item
( two or more empty lines )
...
( two or more empty lines )
name successorName(if None, use '-')
label item
...
in regex:
'''
cd=_cd+filename[:filename.rindex('/')+1] if '/' in filename else _cd
filename=_cd+filename
try:
path=filename+".py"
if os.path.isfile(path):
spec = importlib.util.spec_from_file_location(filename,path)
self.extendedView = importlib.util.module_from_spec(spec)
spec.loader.exec_module(self.extendedView)
#print(inspect.getsource(self.extendedView)) # debug
except:
print("WARNING: file exists but it cannot be import:",path)
with open(filename,'rb') as f:
self.fromStr("".join(map(chr,f.read())),cd=cd,extView=self.extendedView)
self.filename=filename
return self
def size(self):
rtv={"byname":len(self.sets),"bygoal":0}
for _,d in self.sets.items():
arr=d[0]
for x in arr:
tmp= | return rtv
'''
learn file
a learn file records ordered goals of the successful paths
probably
'''
def loadNextGoalFile(self,filename=None):
# return True on error
# else False
if isNone(filename): filename=self.filename
if isNone(filename) or os.path.isfile(filename)==False: return True
with open(filename) as f:
self.learned["nextgoal"]=json.loads(f.read())["nextgoal"]
return False
def saveNextGoalFile(self,filename=None):
# filename
learnfile=""
if isNone(filename):
if isNone(self.filename):
t0=str(time.time())
t0+='0'*(t0.find('.')+8-len(t0))
self.filename=t0.replace('.','-')
learnfile+=self.filename+".learn"
else: learnfile+=filename
if os.path.isdir(learnfile): return True
with open(learnfile,"w") as f:
f.write(json.dumps(self.learned))
return False
def saveNextGoal(self,successSubgoalList):
'''
format of successSubgoalList is genSol()['nodes']
i.e. a list of successful paths
'''
# data
nextgoal=self.learned["nextgoal"]
for arr in successSubgoalList:
p=[""]+arr
for i in range(1,len(p)):
#print(p[i-1]) # debug
if not p[i-1] in nextgoal: nextgoal[ p[i-1] ]={}
curr=nextgoal[ p[i-1] ]
if not p[i] in curr: curr[ p[i] ]=0
curr[ p[i] ]+=1
return False
def wkeys(self,currentKey,notBelow=None,beforeKeys=set()):
'''
* weighted keys *
# ref-rtv
if isNone(notBelow):
rtv=[k for k in self.sets]
rtv.sort()
return rtv
else:
#rtv=[k for k in self.sets if not self.getSucc(k) in notBelow]
rtv=[k for k in self.sets if len(self.getSuccs(k)¬Below)==0]
rtv.sort()
return rtv
'''
# inter-func.
def valid_prec(k):
# control upper nodes to return
# True === valid , i.e. will be return from 'wkeys'
# it takes 'beforeKeys' to check if there's at least 1 presenting in 'precs'
precs=self.getPrecs(k)
return len(precs)==0 or len(precs&beforeKeys)!=0
if isNone(notBelow): notBelow=set()
if type(notBelow)!=set: notBelow=set(notBelow)
# data
#validKeys=[k for k in self.sets if len(self.getSuccs(k)¬Below)==0]
nextgoal=self.learned["nextgoal"]
target=nextgoal[currentKey] if currentKey in nextgoal else {}
rtv=[ (v,k) for k,v in target.items() if k in self.sets and len(self.getSuccs(k)¬Below)==0 and valid_prec(k) ]
#rtv+=[ (0,k) for k in self.sets if (not k in target) and len(self.getSuccs(k)¬Below)==0]
#rtv.sort(reverse=True) # leave it to caller
return rtv
def pulls(self,currentKey,notBelow=None,beforeKeys=set(),wkeys=None):
# return how other nodes can pull
# if 'wkeys' is not None: notBelow && beforeKeys will be omitted
# note: if wkeys is not sublist from self.wkeys, it might cause errors
if isNone(wkeys):
wkeys=self.wkeys(currentKey=currentKey,notBelow=notBelow,beforeKeys=beforeKeys)
wkeys.sort()
return [ hv for vk in wkeys for hv in self.getOpts(vk[1])["-pull"] ]
def pushs(self,currentKey):
# return how can the node push
rtv=[] if not currentKey in self.sets else self.getOpts(currentKey)["-push"]
return rtv
###########
| x.size()
for k,v in tmp.items(): rtv[k]+=v
| conditional_block |
ag.py | #!/bin/python3
import os
import re
import sys
import json
from pprint import pprint
from shorthand import *
from amyhead import *
#charset_namespace="[A-Za-z0-9_$]+" # in 'shorthand'
#parser_goal=re.compile('[ \t]*([0-9]+)[ \t](.+)')
token_item=r'([\n]|^)([ \t]*\~?[0-9]+|[ \t]*\~?include|[ \t]*\~?gonear)[ \t]+([^ \t][^\n]*)'
#token_item_1='[ \t]*([0-9]+)[ \t]([^\n]+)'
#token_item='[\t]*\[[ \t]*[\n](([^\n]+[\n])+)[ \t]*\][ \t]*([\n]|$)'
token_nodeopt=r'(-pull|-push)([ \t]+'+charset_namespace+r')*'
token_goalset = r'(^|[\n])[ \t]*' # start : 1
token_goalset+= r'('+charset_namespace+r')[ \t]+('+charset_namespace+r'|-)' # name,succ : 2
token_goalset+= r'(([ \t]+'+charset_namespace+r')*)' # pres : 2
token_goalset+= r'(([ \t]+'+token_nodeopt+r')*)' # pu** : 4
token_goalset+= r'[ \t]*' # tailing
token_goalset+= r'(([\n][\n]?[^\n]+)*)' # contentInSameNode : 2
token_goalset+= r'([\n]+[\n](?=[\n])|[\n]?[\n]?$)' # sep : 2
# '(?=...)'Matches if ... matches next, but doesn’t consume any of the string.
sts=re.compile(r'[ \t]+')
nodeopt=re.compile(r'('+token_nodeopt+r')')
class KWs:
def __init__(self,kwv):
self.data={}
self.__cnt=0
for kw in kwv:
self.__cnt+=1
tmp={"label":self.__cnt,"len":len(kw),"txt":kw}
self[kw]=tmp
self[self.__cnt]=tmp
def getKwData(self,i):
'''
isinstance(i)==int or isinstance(i)==str
'''
return self.data[i] if i in self.data else None
class Goal:
# a part of goalset
# is a sub-class of Goaltree and should not be use directly
parser_item=re.compile(token_item)
#kwv=KWs(['include','gonear'])
KW_include_label=-1
KW_include_txt="include"
KW_include_lentxt=len(KW_include_txt)
KW_gonear_label=-2
KW_gonear_txt="gonear"
KW_gonear_lentxt=len(KW_gonear_txt)
# node
def __init__(self):
self.constraints=[] # [ (int(label),item,negate?) ... ]
self.maxLabel=-1
self.including=False
self.arrangeNeeded=False
self.extendedView=None # inherit from Goaltree
def __eq__(self,rhs):
self.arrange()
if isinstance(rhs,self.__class__):
return self.constraints==rhs.constraints
else:
raise TypeError("unsupport: %s == %s"%(self.__class__,type(rhs)))
def __repr__(self):
return "[Goal:"+str(self.constraints)+"]"
def isSpecial(self):
# having constraints labels != 0
for c in self.constraints:
if c[0]!=0:
return 1
return 0
def flatten(self):
add=[]
delItSet=set()
rg=range(len(self.constraints))
for i in rg:
c=self.constraints[i]
if c[0]==self.__class__.KW_include_label:
src=c[1][1]
if len(src.sets)<=1 and len(src.pushs(""))==0 and len(src.pulls(""))==0:
cFinalGs=src[src.getFinals()[0]][0]
if len(cFinalGs)<=1 and cFinalGs[0].isSpecial()==0:
add+=cFinalGs[0].constraints
delItSet.add(i)
if len(add)!=0:
newCs=[ self.constraints[i] for i in rg if not i in delItSet ]
self.constraints=newCs
for c in add:
self.add(c[1],c[0],c[2],arrangeLater=True)
self.arrange()
return self
def arrange(self):
if self.arrangeNeeded!=False:
self.arrangeNeeded=False
self.constraints.sort(key=lambda x:(x[2],x[:2]))
tmpv=[]
tmp=0
for c in self.constraints:
if tmp==c: continue
tmpv.append(c)
tmp=c
self.constraints=tmpv
if len([ c[0] for c in tmpv if c[0]==-1])==0:
self.including=False
return self
def add(self,item,label=0,negate=False,arrangeLater=False):
# label must be an integer
self.constraints.append((label,item,negate))
if self.maxLabel<label: self.maxLabel=label
if arrangeLater==False: self.arrange()
else: self.arrangeNeeded=arrangeLater
def fromStr(self,s,cd='./',extView=None):
s=s.replace('\r','')
'''
character:'\r' is ommited
this function will NOT do the arrangement
make sure the lines of constraints:
labels are in increasing order
items of the same label are in lexicographical order
format:
each line: label item
([ \t]*\~?[0-9]+|include|gonear)
lines not match will be omitted
'''
# preserve
old=self.constraints
# clean
self.constraints=[]
self.maxLabel=-1
self.extendedView=extView
# start
lines=s.split('\n')
p=self.__class__.parser_item
rs=p.split(s)
#print('*'*11),pprint(rs) # debug
for i in range(1,len(rs),p.groups+1):
# not match , ([\n]|^) , [ \t]*\~?[0-9]+|KWs , [^\n]+
# start from 1 =>
# ([\n]|^) , [ \t]*[0-9]+|KWs , [^\n]+ , not match
isKW=False
negate=False
label=sts.sub('',rs[i+1])
if label[0]=='~':
negate=True
label=label[1:]
content=rs[i+2]
#print(rs[i],rs[i+1]) # debug
if label==self.__class__.KW_include_txt:
isKW=True
label=self.__class__.KW_include_label
self.including=True
tmp=Goaltree()
tmp.fromTxt(content,_cd=cd)
item=(content,tmp)
if label==self.__class__.KW_gonear_txt:
isKW=True
label=self.__class__.KW_gonear_label
tmp=None # TODO
item=(content,tmp)
if isKW==False:
item=content
label=int(label)
self.add(item,label,negate=negate,arrangeLater=True)
'''
for line in lines:
m=self.__class__.parser_item.match(line)
if isNone(m): continue
res=m.group(1,2)
self.add(res[1],int(res[0]),arrangeLater=True)
# TODO: need ORs
'''
return self
def toStr(self,labelMinLen=0):
length=max(len(str(self.maxLabel)),labelMinLen)
if self.including:
length=max(length,self.__class__.KW_include_lentxt)
rtv=""
tmpv=[]
for c in self.constraints:
useLen=length+c[2] # len of neg no usage
label=c[0]
content=c[1]
if label==self.__class__.KW_include_label:
useLen=0
label=self.__class__.KW_include_txt
content=c[1][0]
#if 0!=0: tmpv.append(c[1][1].toStr(labelMinLen=length).split('\n')[1:])
if label==self.__class__.KW_gonear_label:
useLen=0
label=self.__class__.KW_gonear_txt
content=c[1][0]
label=('~' if c[2] else '')+str(label)
tmpv.append("%*s\t%s"%(useLen,label,content))
rtv+='\n'.join(tmpv)
return rtv
def fromTxt(self,filename,_cd='./',extView=None):
cd=_cd+filename[:filename.rindex('/')+1] if '/' in filename else _cd
with open(_cd+filename,'rb') as f:
self.fromStr("".join(map(chr,f.read())),cd=cd,extView=extView)
return self
# TODO:
with open(_cd+filename+".learn",'rb') as f:
pass
def size(self):
rtv={"byname":0,"bygoal":1}
for c in self.constraints:
if c[0]=="include":
tmp=c[1][1].size()
for k,v in tmp.items(): rtv[k]+=v
return rtv
class Goaltree:
# lots of goalset
'''
definitions:
successor:
the next goalset to match after matching a goalset
closer the root(tree), closer the final goalset
'''
parser_set=re.compile(token_goalset)
def __init__(self):
self.sets={}
self.filename=None
self.extendedView=None
# an extendedView is an importlib.import_module() object
# this can only be used when using 'fromTxt'
# using 'fromStr' will remove (=None) previous extendedView from 'fromTxt'
# file name is '_cd' and 'filename' given to 'fromTxt' concatenate '.py'
# i.e. _cd+filename+".py"
## it is recommended to construct a hashtable ( key is tuple(*.outputs()) or you can specify other methods ) with timeout to prevent re-calculating same condition within the same goal to achive
self.learned={"nextgoal":{}}
self.isSuccsOf={}
# learn file is self.filename+".learn", self.filename will be set after self.fromTxt()
pass
def __repr__(self):
rtv='{Goaltree:\n'
tmp=[ (k,v) for k,v in self.sets.items() ]
tmp.sort()
for x in tmp:
rtv+="\t"+x[0]+":"+str(x[1])+",\n"
rtv+='\t-:0\n}'
return rtv
def __getitem__(self,k):
return self.sets[k] if k in self.sets else None
def newNode(self,goals=[],name="",successorName='-'):
node=(goals,successorName,set(),[''])
return name,node
def addNode(self,goalset=[],name="",successorName='-'):
# TODO
if name=="" or (name in self.sets): return 1
pass
def keys(self,notBelow=None,beforeKeys=set()):
def valid_prec(k):
precs=self.getPrecs(k)
return len(precs)==0 or len(precs&beforeKeys)!=0
if isNone(notBelow):
rtv=[k for k in self.sets if valid_prec(k)]
rtv.sort()
return rtv
else:
#rtv=[k for k in self.sets if not self.getSucc(k) in notBelow]
rtv=[k for k in self.sets if len(self.getSuccs(k)¬Below)==0 and valid_prec(k)]
rtv.sort()
return rtv
def getGoals(self,k):
# return a goalset
return self.sets[k][0] if k in self.sets else None
def getSucc(self,k):
return self.sets[k][1]
def _getSuccs(self,k):
rtvSet=set()
rtvStr=k
tmpsucc=self.getSucc(k)
while not ( tmpsucc=='-' or tmpsucc=='+' or (tmpsucc in rtvSet) ):
# rtv
rtvSet.add(tmpsucc)
rtvStr+='-'
rtvStr+=tmpsucc
# reversed succs
#if not tmpsucc in self.isSuccsOf: self.isSuccsOf[tmpsucc]=set() # is set before
self.isSuccsOf[tmpsucc].add(k)
# next
tmpsucc=self.getSucc(tmpsucc)
return rtvSet,rtvStr
def getSuccs(self,k):
return self.sets[k][2]
def getSuccsStr(self,k):
return self.sets[k][3][0]
def getPrecs(self,k):
return self.sets[k][4]
def getOpts(self,k):
rtv=dict(self.sets[k][5])
for i in rtv: rtv[i]=rtv[i][1]
return rtv
def getFinals(self):
retu | ef fromStr(self,s,cd='./',extView=None):
s=s.replace('\r','')
'''
\r\n , \n\r , \n -> \n
format: see self.fromTxt
'''
# unset filename
self.filename=None
self.extendedView=extView
#old=self.sets
p=self.__class__.parser_set
s=re.sub("(\n\r|\n|\r\n)[ \t]*(\n\r|\n|\r\n)","\n\n",s)
defined=set()
data=[]
rs=p.split(s) # cut via "\n\n\n"
#print(p.groups+1),print(rs[1:1+p.groups+1]),print(rs[1+(p.groups+1)*1:1+(p.groups+1)*2]),exit()
#print(rs[0]),exit() # debug
for i in range(1,len(rs),p.groups+1):
# rs[0] is "not match", omitted
# start from 1 =>
# (^|[\n]) , currName , succName , precNames , precName_last , pu** , pu**_last , (-pull|-push) , pu**_func_last , goals , others
# +0 , +1 , +2 , +3 , +4 , +5 , +6 , +7 , +8 , +9 , + >=10
#print(i,p.groups+1,rs[i-1]),print(rs[i:i+p.groups+1]) # debug
#if i>p.groups: exit() # debug
curr=rs[i+1]
if curr in defined:
raise TypeError("Error: '"+curr+"' is defined twice")
defined.add(curr)
#print(i,curr,defined) # debug
succ = rs[i+2]
prec = set(sts.split(rs[i+3])[1:]) # or
opts = {"-push":(set(),[],[]),"-pull":(set(),[],[])} # (fooNamesLookupForRepeated,fooContent)
for opt in nodeopt.split(rs[i+5])[1::nodeopt.groups+1]:
arr=sts.split(opt) # opt_type foo1 foo2 ...
dest=[k for k in opts if arr[0]==k] # opt_type
if len(dest)==0: raise TypeError("Error: "+arr[0]+" is not an option")
arr,dst=tuple(arr[1:]),opts[dest[0]]
if not (arr in dst[0]): # trim repeated combination
dst[0].add(arr)
dst[1].append([getattr(self.extendedView,f) for f in arr])
else: print("warning: permutation:",arr,"in",dest[0],"already exists in this node")
dst[2].append(arr)
gsv = re.split("[\n][ \t]*[\n]",rs[i+9]) # or
data.append((curr, ([ Goal().fromStr(gs,cd=cd,extView=self.extendedView).flatten() for gs in gsv ],succ,set(),[''],prec,opts) ))
# curr:( Goal()s , succ , succSet , succStrs , prec , opts)
#data.sort()
#print(defined),exit() # debug
#print(sorted(list(defined))) # debug
#pprint(data) # debug
self.sets=dict(data)
del data
'''
def getTarget(c):
tmp=c[1].split(":")[1] if ":" in c[1] else c[1]
return c[0],tmp,c[2]
for k in self.sets:
node=self.sets[k]
if node[1]=='-': continue
gs_node=node[0]
if len(gs_node)!=1: continue
gs_node=gs_node[0]
gs_node.arrange()
sn=set(gs_node.constraints)
succ=self.sets[node[1]]
gs_succ=succ[0]
for g in gs_succ:
if abs(len(g.constraints)-len(sn))>1: continue
ss=set(g.constraints)
delta=ss^sn
if len(delta)>2: continue
rem_sn,rem_ss=delta&sn,delta&ss
if len(rem_sn)!=1 or len(rem_ss)!=1: continue # no idea how to do
rem_sn,rem_ss=rem_sn.pop(),rem_ss.pop()
if not (":" in rem_sn[1] or ":" in rem_ss[1]): continue # not value
rem1_sn=re.split(r'[ \t]+',rem_sn[1])
rem1_ss=re.split(r'[ \t]+',rem_ss[1])
if len(rem1_sn)!=len(rem1_ss)!=1: continue
rem1_sn.sort(),rem1_ss.sort()
diff=[]
for i in range(len(rem1_sn)):
if rem1_sn[i]!=rem1_ss[i]:
diff.append((rem1_sn[i],rem1_ss[i]))
if len(diff)!=1 or diff[0]==diff[1]: continue
target=[ x[:x.index(":")] for x in diff ]
if target[0]!=target[1]: continue
vals=[ x[len(target[0])+1:] for x in diff ]
if not ',' in vals[0]: vals[0]=vals[0]+','+vals[0]
if not ',' in vals[1]: vals[1]=vals[1]+','+vals[1]
newNodes=[]
if vals[0]
print("?",gs_node),exit()
'''
self.isSuccsOf=dict([(k,set()) for k in self.sets])
for k,v in self.sets.items():
succSet,succStr=self._getSuccs(k)
v[2].update(succSet)
v[3][0]+=succStr
# basic keys
allKeys=set([k for k in self.sets])
for k in allKeys:
# all lower nodes
self.learned["nextgoal"][k]=dict([ (kk,(0.0-len(self.getSuccs(kk)))/len(allKeys)) for kk in allKeys-self.isSuccsOf[k] if kk!=k ])
self.learned["nextgoal"][""]=dict([ (k,(0.0-len(self.getSuccs(k)))/len(allKeys)) for k in allKeys if len(self.getPrecs(k))==0 ])
return self
def toStr(self,labelMinLen=0):
kv=[ k for k in self.sets ]
kv.sort()
rtv=""
tmpv=[]
for k in kv:
tmps=""
tmps+=k+'\t'+self.getSucc(k)
if len(self.getPrecs(k))!=0:
tmps+='\t'.join([""]+sorted([ kk for kk in self.getPrecs(k) ]))
opts=self.sets[k][5]
optstrs=[]
for opt in sorted([_ for _ in opts]):
if len(opts[opt][2])!=0:
optstrs.append('\t'.join([x for v in opts[opt][2]for x in[opt]+list(v)]))
tmps+='\t'.join([""]+optstrs)
tmpgsv=[ g.toStr(labelMinLen=labelMinLen) for g in self.getGoals(k) ]
tmpv.append('\n'.join([tmps,"\n\n".join(tmpgsv)]))
rtv+="\n\n\n".join(tmpv)
return rtv
def fromTxt(self,filename,_cd='./'):
'''
concept:
a block with a name is a set of Goal. that means reach one of them is a 'match', and can to further more (try the successor)
'''
'''
format prototype:
( none or more empty lines )
...
( none or more empty lines )
name successorName(if None, use '-')
# lines which cannot be parsed as <class: Goal>
label item
# lines which cannot be parsed as <class: Goal>
label item
...
label item
label item
( an empty line )
label item
label item
...
label item
label item
( two or more empty lines )
...
( two or more empty lines )
name successorName(if None, use '-')
label item
...
in regex:
'''
cd=_cd+filename[:filename.rindex('/')+1] if '/' in filename else _cd
filename=_cd+filename
try:
path=filename+".py"
if os.path.isfile(path):
spec = importlib.util.spec_from_file_location(filename,path)
self.extendedView = importlib.util.module_from_spec(spec)
spec.loader.exec_module(self.extendedView)
#print(inspect.getsource(self.extendedView)) # debug
except:
print("WARNING: file exists but it cannot be import:",path)
with open(filename,'rb') as f:
self.fromStr("".join(map(chr,f.read())),cd=cd,extView=self.extendedView)
self.filename=filename
return self
def size(self):
rtv={"byname":len(self.sets),"bygoal":0}
for _,d in self.sets.items():
arr=d[0]
for x in arr:
tmp=x.size()
for k,v in tmp.items(): rtv[k]+=v
return rtv
'''
learn file
a learn file records ordered goals of the successful paths
probably
'''
def loadNextGoalFile(self,filename=None):
# return True on error
# else False
if isNone(filename): filename=self.filename
if isNone(filename) or os.path.isfile(filename)==False: return True
with open(filename) as f:
self.learned["nextgoal"]=json.loads(f.read())["nextgoal"]
return False
def saveNextGoalFile(self,filename=None):
# filename
learnfile=""
if isNone(filename):
if isNone(self.filename):
t0=str(time.time())
t0+='0'*(t0.find('.')+8-len(t0))
self.filename=t0.replace('.','-')
learnfile+=self.filename+".learn"
else: learnfile+=filename
if os.path.isdir(learnfile): return True
with open(learnfile,"w") as f:
f.write(json.dumps(self.learned))
return False
def saveNextGoal(self,successSubgoalList):
'''
format of successSubgoalList is genSol()['nodes']
i.e. a list of successful paths
'''
# data
nextgoal=self.learned["nextgoal"]
for arr in successSubgoalList:
p=[""]+arr
for i in range(1,len(p)):
#print(p[i-1]) # debug
if not p[i-1] in nextgoal: nextgoal[ p[i-1] ]={}
curr=nextgoal[ p[i-1] ]
if not p[i] in curr: curr[ p[i] ]=0
curr[ p[i] ]+=1
return False
def wkeys(self,currentKey,notBelow=None,beforeKeys=set()):
'''
* weighted keys *
# ref-rtv
if isNone(notBelow):
rtv=[k for k in self.sets]
rtv.sort()
return rtv
else:
#rtv=[k for k in self.sets if not self.getSucc(k) in notBelow]
rtv=[k for k in self.sets if len(self.getSuccs(k)¬Below)==0]
rtv.sort()
return rtv
'''
# inter-func.
def valid_prec(k):
# control upper nodes to return
# True === valid , i.e. will be return from 'wkeys'
# it takes 'beforeKeys' to check if there's at least 1 presenting in 'precs'
precs=self.getPrecs(k)
return len(precs)==0 or len(precs&beforeKeys)!=0
if isNone(notBelow): notBelow=set()
if type(notBelow)!=set: notBelow=set(notBelow)
# data
#validKeys=[k for k in self.sets if len(self.getSuccs(k)¬Below)==0]
nextgoal=self.learned["nextgoal"]
target=nextgoal[currentKey] if currentKey in nextgoal else {}
rtv=[ (v,k) for k,v in target.items() if k in self.sets and len(self.getSuccs(k)¬Below)==0 and valid_prec(k) ]
#rtv+=[ (0,k) for k in self.sets if (not k in target) and len(self.getSuccs(k)¬Below)==0]
#rtv.sort(reverse=True) # leave it to caller
return rtv
def pulls(self,currentKey,notBelow=None,beforeKeys=set(),wkeys=None):
# return how other nodes can pull
# if 'wkeys' is not None: notBelow && beforeKeys will be omitted
# note: if wkeys is not sublist from self.wkeys, it might cause errors
if isNone(wkeys):
wkeys=self.wkeys(currentKey=currentKey,notBelow=notBelow,beforeKeys=beforeKeys)
wkeys.sort()
return [ hv for vk in wkeys for hv in self.getOpts(vk[1])["-pull"] ]
def pushs(self,currentKey):
# return how can the node push
rtv=[] if not currentKey in self.sets else self.getOpts(currentKey)["-push"]
return rtv
###########
| rn [ k for k in self.sets if self.getSucc(k)=='-' ]
d | identifier_body |
ag.py | #!/bin/python3
import os
import re
import sys
import json
from pprint import pprint
from shorthand import *
from amyhead import *
#charset_namespace="[A-Za-z0-9_$]+" # in 'shorthand'
#parser_goal=re.compile('[ \t]*([0-9]+)[ \t](.+)')
token_item=r'([\n]|^)([ \t]*\~?[0-9]+|[ \t]*\~?include|[ \t]*\~?gonear)[ \t]+([^ \t][^\n]*)'
#token_item_1='[ \t]*([0-9]+)[ \t]([^\n]+)'
#token_item='[\t]*\[[ \t]*[\n](([^\n]+[\n])+)[ \t]*\][ \t]*([\n]|$)'
token_nodeopt=r'(-pull|-push)([ \t]+'+charset_namespace+r')*'
token_goalset = r'(^|[\n])[ \t]*' # start : 1
token_goalset+= r'('+charset_namespace+r')[ \t]+('+charset_namespace+r'|-)' # name,succ : 2
token_goalset+= r'(([ \t]+'+charset_namespace+r')*)' # pres : 2
token_goalset+= r'(([ \t]+'+token_nodeopt+r')*)' # pu** : 4
token_goalset+= r'[ \t]*' # tailing
token_goalset+= r'(([\n][\n]?[^\n]+)*)' # contentInSameNode : 2
token_goalset+= r'([\n]+[\n](?=[\n])|[\n]?[\n]?$)' # sep : 2
# '(?=...)'Matches if ... matches next, but doesn’t consume any of the string.
sts=re.compile(r'[ \t]+')
nodeopt=re.compile(r'('+token_nodeopt+r')')
class KWs:
def __init__(self,kwv):
self.data={}
self.__cnt=0
for kw in kwv:
self.__cnt+=1
tmp={"label":self.__cnt,"len":len(kw),"txt":kw}
self[kw]=tmp
self[self.__cnt]=tmp
def getKwData(self,i):
'''
isinstance(i)==int or isinstance(i)==str
'''
return self.data[i] if i in self.data else None
class Goal:
# a part of goalset
# is a sub-class of Goaltree and should not be use directly
parser_item=re.compile(token_item)
#kwv=KWs(['include','gonear'])
KW_include_label=-1
KW_include_txt="include"
KW_include_lentxt=len(KW_include_txt)
KW_gonear_label=-2
KW_gonear_txt="gonear"
KW_gonear_lentxt=len(KW_gonear_txt)
# node
def __init__(self):
| self.constraints=[] # [ (int(label),item,negate?) ... ]
self.maxLabel=-1
self.including=False
self.arrangeNeeded=False
self.extendedView=None # inherit from Goaltree
def __eq__(self,rhs):
self.arrange()
if isinstance(rhs,self.__class__):
return self.constraints==rhs.constraints
else:
raise TypeError("unsupport: %s == %s"%(self.__class__,type(rhs)))
def __repr__(self):
return "[Goal:"+str(self.constraints)+"]"
def isSpecial(self):
# having constraints labels != 0
for c in self.constraints:
if c[0]!=0:
return 1
return 0
def flatten(self):
add=[]
delItSet=set()
rg=range(len(self.constraints))
for i in rg:
c=self.constraints[i]
if c[0]==self.__class__.KW_include_label:
src=c[1][1]
if len(src.sets)<=1 and len(src.pushs(""))==0 and len(src.pulls(""))==0:
cFinalGs=src[src.getFinals()[0]][0]
if len(cFinalGs)<=1 and cFinalGs[0].isSpecial()==0:
add+=cFinalGs[0].constraints
delItSet.add(i)
if len(add)!=0:
newCs=[ self.constraints[i] for i in rg if not i in delItSet ]
self.constraints=newCs
for c in add:
self.add(c[1],c[0],c[2],arrangeLater=True)
self.arrange()
return self
def arrange(self):
if self.arrangeNeeded!=False:
self.arrangeNeeded=False
self.constraints.sort(key=lambda x:(x[2],x[:2]))
tmpv=[]
tmp=0
for c in self.constraints:
if tmp==c: continue
tmpv.append(c)
tmp=c
self.constraints=tmpv
if len([ c[0] for c in tmpv if c[0]==-1])==0:
self.including=False
return self
def add(self,item,label=0,negate=False,arrangeLater=False):
# label must be an integer
self.constraints.append((label,item,negate))
if self.maxLabel<label: self.maxLabel=label
if arrangeLater==False: self.arrange()
else: self.arrangeNeeded=arrangeLater
def fromStr(self,s,cd='./',extView=None):
s=s.replace('\r','')
'''
character:'\r' is ommited
this function will NOT do the arrangement
make sure the lines of constraints:
labels are in increasing order
items of the same label are in lexicographical order
format:
each line: label item
([ \t]*\~?[0-9]+|include|gonear)
lines not match will be omitted
'''
# preserve
old=self.constraints
# clean
self.constraints=[]
self.maxLabel=-1
self.extendedView=extView
# start
lines=s.split('\n')
p=self.__class__.parser_item
rs=p.split(s)
#print('*'*11),pprint(rs) # debug
for i in range(1,len(rs),p.groups+1):
# not match , ([\n]|^) , [ \t]*\~?[0-9]+|KWs , [^\n]+
# start from 1 =>
# ([\n]|^) , [ \t]*[0-9]+|KWs , [^\n]+ , not match
isKW=False
negate=False
label=sts.sub('',rs[i+1])
if label[0]=='~':
negate=True
label=label[1:]
content=rs[i+2]
#print(rs[i],rs[i+1]) # debug
if label==self.__class__.KW_include_txt:
isKW=True
label=self.__class__.KW_include_label
self.including=True
tmp=Goaltree()
tmp.fromTxt(content,_cd=cd)
item=(content,tmp)
if label==self.__class__.KW_gonear_txt:
isKW=True
label=self.__class__.KW_gonear_label
tmp=None # TODO
item=(content,tmp)
if isKW==False:
item=content
label=int(label)
self.add(item,label,negate=negate,arrangeLater=True)
'''
for line in lines:
m=self.__class__.parser_item.match(line)
if isNone(m): continue
res=m.group(1,2)
self.add(res[1],int(res[0]),arrangeLater=True)
# TODO: need ORs
'''
return self
def toStr(self,labelMinLen=0):
length=max(len(str(self.maxLabel)),labelMinLen)
if self.including:
length=max(length,self.__class__.KW_include_lentxt)
rtv=""
tmpv=[]
for c in self.constraints:
useLen=length+c[2] # len of neg no usage
label=c[0]
content=c[1]
if label==self.__class__.KW_include_label:
useLen=0
label=self.__class__.KW_include_txt
content=c[1][0]
#if 0!=0: tmpv.append(c[1][1].toStr(labelMinLen=length).split('\n')[1:])
if label==self.__class__.KW_gonear_label:
useLen=0
label=self.__class__.KW_gonear_txt
content=c[1][0]
label=('~' if c[2] else '')+str(label)
tmpv.append("%*s\t%s"%(useLen,label,content))
rtv+='\n'.join(tmpv)
return rtv
def fromTxt(self,filename,_cd='./',extView=None):
cd=_cd+filename[:filename.rindex('/')+1] if '/' in filename else _cd
with open(_cd+filename,'rb') as f:
self.fromStr("".join(map(chr,f.read())),cd=cd,extView=extView)
return self
# TODO:
with open(_cd+filename+".learn",'rb') as f:
pass
def size(self):
rtv={"byname":0,"bygoal":1}
for c in self.constraints:
if c[0]=="include":
tmp=c[1][1].size()
for k,v in tmp.items(): rtv[k]+=v
return rtv
class Goaltree:
# lots of goalset
'''
definitions:
successor:
the next goalset to match after matching a goalset
closer the root(tree), closer the final goalset
'''
parser_set=re.compile(token_goalset)
def __init__(self):
self.sets={}
self.filename=None
self.extendedView=None
# an extendedView is an importlib.import_module() object
# this can only be used when using 'fromTxt'
# using 'fromStr' will remove (=None) previous extendedView from 'fromTxt'
# file name is '_cd' and 'filename' given to 'fromTxt' concatenate '.py'
# i.e. _cd+filename+".py"
## it is recommended to construct a hashtable ( key is tuple(*.outputs()) or you can specify other methods ) with timeout to prevent re-calculating same condition within the same goal to achive
self.learned={"nextgoal":{}}
self.isSuccsOf={}
# learn file is self.filename+".learn", self.filename will be set after self.fromTxt()
pass
def __repr__(self):
rtv='{Goaltree:\n'
tmp=[ (k,v) for k,v in self.sets.items() ]
tmp.sort()
for x in tmp:
rtv+="\t"+x[0]+":"+str(x[1])+",\n"
rtv+='\t-:0\n}'
return rtv
def __getitem__(self,k):
return self.sets[k] if k in self.sets else None
def newNode(self,goals=[],name="",successorName='-'):
node=(goals,successorName,set(),[''])
return name,node
def addNode(self,goalset=[],name="",successorName='-'):
# TODO
if name=="" or (name in self.sets): return 1
pass
def keys(self,notBelow=None,beforeKeys=set()):
def valid_prec(k):
precs=self.getPrecs(k)
return len(precs)==0 or len(precs&beforeKeys)!=0
if isNone(notBelow):
rtv=[k for k in self.sets if valid_prec(k)]
rtv.sort()
return rtv
else:
#rtv=[k for k in self.sets if not self.getSucc(k) in notBelow]
rtv=[k for k in self.sets if len(self.getSuccs(k)¬Below)==0 and valid_prec(k)]
rtv.sort()
return rtv
def getGoals(self,k):
# return a goalset
return self.sets[k][0] if k in self.sets else None
def getSucc(self,k):
return self.sets[k][1]
def _getSuccs(self,k):
rtvSet=set()
rtvStr=k
tmpsucc=self.getSucc(k)
while not ( tmpsucc=='-' or tmpsucc=='+' or (tmpsucc in rtvSet) ):
# rtv
rtvSet.add(tmpsucc)
rtvStr+='-'
rtvStr+=tmpsucc
# reversed succs
#if not tmpsucc in self.isSuccsOf: self.isSuccsOf[tmpsucc]=set() # is set before
self.isSuccsOf[tmpsucc].add(k)
# next
tmpsucc=self.getSucc(tmpsucc)
return rtvSet,rtvStr
def getSuccs(self,k):
return self.sets[k][2]
def getSuccsStr(self,k):
return self.sets[k][3][0]
def getPrecs(self,k):
return self.sets[k][4]
def getOpts(self,k):
rtv=dict(self.sets[k][5])
for i in rtv: rtv[i]=rtv[i][1]
return rtv
def getFinals(self):
return [ k for k in self.sets if self.getSucc(k)=='-' ]
def fromStr(self,s,cd='./',extView=None):
s=s.replace('\r','')
'''
\r\n , \n\r , \n -> \n
format: see self.fromTxt
'''
# unset filename
self.filename=None
self.extendedView=extView
#old=self.sets
p=self.__class__.parser_set
s=re.sub("(\n\r|\n|\r\n)[ \t]*(\n\r|\n|\r\n)","\n\n",s)
defined=set()
data=[]
rs=p.split(s) # cut via "\n\n\n"
#print(p.groups+1),print(rs[1:1+p.groups+1]),print(rs[1+(p.groups+1)*1:1+(p.groups+1)*2]),exit()
#print(rs[0]),exit() # debug
for i in range(1,len(rs),p.groups+1):
# rs[0] is "not match", omitted
# start from 1 =>
# (^|[\n]) , currName , succName , precNames , precName_last , pu** , pu**_last , (-pull|-push) , pu**_func_last , goals , others
# +0 , +1 , +2 , +3 , +4 , +5 , +6 , +7 , +8 , +9 , + >=10
#print(i,p.groups+1,rs[i-1]),print(rs[i:i+p.groups+1]) # debug
#if i>p.groups: exit() # debug
curr=rs[i+1]
if curr in defined:
raise TypeError("Error: '"+curr+"' is defined twice")
defined.add(curr)
#print(i,curr,defined) # debug
succ = rs[i+2]
prec = set(sts.split(rs[i+3])[1:]) # or
opts = {"-push":(set(),[],[]),"-pull":(set(),[],[])} # (fooNamesLookupForRepeated,fooContent)
for opt in nodeopt.split(rs[i+5])[1::nodeopt.groups+1]:
arr=sts.split(opt) # opt_type foo1 foo2 ...
dest=[k for k in opts if arr[0]==k] # opt_type
if len(dest)==0: raise TypeError("Error: "+arr[0]+" is not an option")
arr,dst=tuple(arr[1:]),opts[dest[0]]
if not (arr in dst[0]): # trim repeated combination
dst[0].add(arr)
dst[1].append([getattr(self.extendedView,f) for f in arr])
else: print("warning: permutation:",arr,"in",dest[0],"already exists in this node")
dst[2].append(arr)
gsv = re.split("[\n][ \t]*[\n]",rs[i+9]) # or
data.append((curr, ([ Goal().fromStr(gs,cd=cd,extView=self.extendedView).flatten() for gs in gsv ],succ,set(),[''],prec,opts) ))
# curr:( Goal()s , succ , succSet , succStrs , prec , opts)
#data.sort()
#print(defined),exit() # debug
#print(sorted(list(defined))) # debug
#pprint(data) # debug
self.sets=dict(data)
del data
'''
def getTarget(c):
tmp=c[1].split(":")[1] if ":" in c[1] else c[1]
return c[0],tmp,c[2]
for k in self.sets:
node=self.sets[k]
if node[1]=='-': continue
gs_node=node[0]
if len(gs_node)!=1: continue
gs_node=gs_node[0]
gs_node.arrange()
sn=set(gs_node.constraints)
succ=self.sets[node[1]]
gs_succ=succ[0]
for g in gs_succ:
if abs(len(g.constraints)-len(sn))>1: continue
ss=set(g.constraints)
delta=ss^sn
if len(delta)>2: continue
rem_sn,rem_ss=delta&sn,delta&ss
if len(rem_sn)!=1 or len(rem_ss)!=1: continue # no idea how to do
rem_sn,rem_ss=rem_sn.pop(),rem_ss.pop()
if not (":" in rem_sn[1] or ":" in rem_ss[1]): continue # not value
rem1_sn=re.split(r'[ \t]+',rem_sn[1])
rem1_ss=re.split(r'[ \t]+',rem_ss[1])
if len(rem1_sn)!=len(rem1_ss)!=1: continue
rem1_sn.sort(),rem1_ss.sort()
diff=[]
for i in range(len(rem1_sn)):
if rem1_sn[i]!=rem1_ss[i]:
diff.append((rem1_sn[i],rem1_ss[i]))
if len(diff)!=1 or diff[0]==diff[1]: continue
target=[ x[:x.index(":")] for x in diff ]
if target[0]!=target[1]: continue
vals=[ x[len(target[0])+1:] for x in diff ]
if not ',' in vals[0]: vals[0]=vals[0]+','+vals[0]
if not ',' in vals[1]: vals[1]=vals[1]+','+vals[1]
newNodes=[]
if vals[0]
print("?",gs_node),exit()
'''
self.isSuccsOf=dict([(k,set()) for k in self.sets])
for k,v in self.sets.items():
succSet,succStr=self._getSuccs(k)
v[2].update(succSet)
v[3][0]+=succStr
# basic keys
allKeys=set([k for k in self.sets])
for k in allKeys:
# all lower nodes
self.learned["nextgoal"][k]=dict([ (kk,(0.0-len(self.getSuccs(kk)))/len(allKeys)) for kk in allKeys-self.isSuccsOf[k] if kk!=k ])
self.learned["nextgoal"][""]=dict([ (k,(0.0-len(self.getSuccs(k)))/len(allKeys)) for k in allKeys if len(self.getPrecs(k))==0 ])
return self
def toStr(self,labelMinLen=0):
kv=[ k for k in self.sets ]
kv.sort()
rtv=""
tmpv=[]
for k in kv:
tmps=""
tmps+=k+'\t'+self.getSucc(k)
if len(self.getPrecs(k))!=0:
tmps+='\t'.join([""]+sorted([ kk for kk in self.getPrecs(k) ]))
opts=self.sets[k][5]
optstrs=[]
for opt in sorted([_ for _ in opts]):
if len(opts[opt][2])!=0:
optstrs.append('\t'.join([x for v in opts[opt][2]for x in[opt]+list(v)]))
tmps+='\t'.join([""]+optstrs)
tmpgsv=[ g.toStr(labelMinLen=labelMinLen) for g in self.getGoals(k) ]
tmpv.append('\n'.join([tmps,"\n\n".join(tmpgsv)]))
rtv+="\n\n\n".join(tmpv)
return rtv
def fromTxt(self,filename,_cd='./'):
'''
concept:
a block with a name is a set of Goal. that means reach one of them is a 'match', and can to further more (try the successor)
'''
'''
format prototype:
( none or more empty lines )
...
( none or more empty lines )
name successorName(if None, use '-')
# lines which cannot be parsed as <class: Goal>
label item
# lines which cannot be parsed as <class: Goal>
label item
...
label item
label item
( an empty line )
label item
label item
...
label item
label item
( two or more empty lines )
...
( two or more empty lines )
name successorName(if None, use '-')
label item
...
in regex:
'''
cd=_cd+filename[:filename.rindex('/')+1] if '/' in filename else _cd
filename=_cd+filename
try:
path=filename+".py"
if os.path.isfile(path):
spec = importlib.util.spec_from_file_location(filename,path)
self.extendedView = importlib.util.module_from_spec(spec)
spec.loader.exec_module(self.extendedView)
#print(inspect.getsource(self.extendedView)) # debug
except:
print("WARNING: file exists but it cannot be import:",path)
with open(filename,'rb') as f:
self.fromStr("".join(map(chr,f.read())),cd=cd,extView=self.extendedView)
self.filename=filename
return self
def size(self):
rtv={"byname":len(self.sets),"bygoal":0}
for _,d in self.sets.items():
arr=d[0]
for x in arr:
tmp=x.size()
for k,v in tmp.items(): rtv[k]+=v
return rtv
'''
learn file
a learn file records ordered goals of the successful paths
probably
'''
def loadNextGoalFile(self,filename=None):
# return True on error
# else False
if isNone(filename): filename=self.filename
if isNone(filename) or os.path.isfile(filename)==False: return True
with open(filename) as f:
self.learned["nextgoal"]=json.loads(f.read())["nextgoal"]
return False
def saveNextGoalFile(self,filename=None):
# filename
learnfile=""
if isNone(filename):
if isNone(self.filename):
t0=str(time.time())
t0+='0'*(t0.find('.')+8-len(t0))
self.filename=t0.replace('.','-')
learnfile+=self.filename+".learn"
else: learnfile+=filename
if os.path.isdir(learnfile): return True
with open(learnfile,"w") as f:
f.write(json.dumps(self.learned))
return False
def saveNextGoal(self,successSubgoalList):
'''
format of successSubgoalList is genSol()['nodes']
i.e. a list of successful paths
'''
# data
nextgoal=self.learned["nextgoal"]
for arr in successSubgoalList:
p=[""]+arr
for i in range(1,len(p)):
#print(p[i-1]) # debug
if not p[i-1] in nextgoal: nextgoal[ p[i-1] ]={}
curr=nextgoal[ p[i-1] ]
if not p[i] in curr: curr[ p[i] ]=0
curr[ p[i] ]+=1
return False
def wkeys(self,currentKey,notBelow=None,beforeKeys=set()):
'''
* weighted keys *
# ref-rtv
if isNone(notBelow):
rtv=[k for k in self.sets]
rtv.sort()
return rtv
else:
#rtv=[k for k in self.sets if not self.getSucc(k) in notBelow]
rtv=[k for k in self.sets if len(self.getSuccs(k)¬Below)==0]
rtv.sort()
return rtv
'''
# inter-func.
def valid_prec(k):
# control upper nodes to return
# True === valid , i.e. will be return from 'wkeys'
# it takes 'beforeKeys' to check if there's at least 1 presenting in 'precs'
precs=self.getPrecs(k)
return len(precs)==0 or len(precs&beforeKeys)!=0
if isNone(notBelow): notBelow=set()
if type(notBelow)!=set: notBelow=set(notBelow)
# data
#validKeys=[k for k in self.sets if len(self.getSuccs(k)¬Below)==0]
nextgoal=self.learned["nextgoal"]
target=nextgoal[currentKey] if currentKey in nextgoal else {}
rtv=[ (v,k) for k,v in target.items() if k in self.sets and len(self.getSuccs(k)¬Below)==0 and valid_prec(k) ]
#rtv+=[ (0,k) for k in self.sets if (not k in target) and len(self.getSuccs(k)¬Below)==0]
#rtv.sort(reverse=True) # leave it to caller
return rtv
def pulls(self,currentKey,notBelow=None,beforeKeys=set(),wkeys=None):
# return how other nodes can pull
# if 'wkeys' is not None: notBelow && beforeKeys will be omitted
# note: if wkeys is not sublist from self.wkeys, it might cause errors
if isNone(wkeys):
wkeys=self.wkeys(currentKey=currentKey,notBelow=notBelow,beforeKeys=beforeKeys)
wkeys.sort()
return [ hv for vk in wkeys for hv in self.getOpts(vk[1])["-pull"] ]
def pushs(self,currentKey):
# return how can the node push
rtv=[] if not currentKey in self.sets else self.getOpts(currentKey)["-push"]
return rtv
########### | random_line_split | |
ag.py | #!/bin/python3
import os
import re
import sys
import json
from pprint import pprint
from shorthand import *
from amyhead import *
#charset_namespace="[A-Za-z0-9_$]+" # in 'shorthand'
#parser_goal=re.compile('[ \t]*([0-9]+)[ \t](.+)')
token_item=r'([\n]|^)([ \t]*\~?[0-9]+|[ \t]*\~?include|[ \t]*\~?gonear)[ \t]+([^ \t][^\n]*)'
#token_item_1='[ \t]*([0-9]+)[ \t]([^\n]+)'
#token_item='[\t]*\[[ \t]*[\n](([^\n]+[\n])+)[ \t]*\][ \t]*([\n]|$)'
token_nodeopt=r'(-pull|-push)([ \t]+'+charset_namespace+r')*'
token_goalset = r'(^|[\n])[ \t]*' # start : 1
token_goalset+= r'('+charset_namespace+r')[ \t]+('+charset_namespace+r'|-)' # name,succ : 2
token_goalset+= r'(([ \t]+'+charset_namespace+r')*)' # pres : 2
token_goalset+= r'(([ \t]+'+token_nodeopt+r')*)' # pu** : 4
token_goalset+= r'[ \t]*' # tailing
token_goalset+= r'(([\n][\n]?[^\n]+)*)' # contentInSameNode : 2
token_goalset+= r'([\n]+[\n](?=[\n])|[\n]?[\n]?$)' # sep : 2
# '(?=...)'Matches if ... matches next, but doesn’t consume any of the string.
sts=re.compile(r'[ \t]+')
nodeopt=re.compile(r'('+token_nodeopt+r')')
class KWs:
def __init__(self,kwv):
self.data={}
self.__cnt=0
for kw in kwv:
self.__cnt+=1
tmp={"label":self.__cnt,"len":len(kw),"txt":kw}
self[kw]=tmp
self[self.__cnt]=tmp
def getKwData(self,i):
'''
isinstance(i)==int or isinstance(i)==str
'''
return self.data[i] if i in self.data else None
class Goal:
# a part of goalset
# is a sub-class of Goaltree and should not be use directly
parser_item=re.compile(token_item)
#kwv=KWs(['include','gonear'])
KW_include_label=-1
KW_include_txt="include"
KW_include_lentxt=len(KW_include_txt)
KW_gonear_label=-2
KW_gonear_txt="gonear"
KW_gonear_lentxt=len(KW_gonear_txt)
# node
def __init__(self):
self.constraints=[] # [ (int(label),item,negate?) ... ]
self.maxLabel=-1
self.including=False
self.arrangeNeeded=False
self.extendedView=None # inherit from Goaltree
def __eq__(self,rhs):
self.arrange()
if isinstance(rhs,self.__class__):
return self.constraints==rhs.constraints
else:
raise TypeError("unsupport: %s == %s"%(self.__class__,type(rhs)))
def __repr__(self):
return "[Goal:"+str(self.constraints)+"]"
def isSpecial(self):
# having constraints labels != 0
for c in self.constraints:
if c[0]!=0:
return 1
return 0
def flatten(self):
add=[]
delItSet=set()
rg=range(len(self.constraints))
for i in rg:
c=self.constraints[i]
if c[0]==self.__class__.KW_include_label:
src=c[1][1]
if len(src.sets)<=1 and len(src.pushs(""))==0 and len(src.pulls(""))==0:
cFinalGs=src[src.getFinals()[0]][0]
if len(cFinalGs)<=1 and cFinalGs[0].isSpecial()==0:
add+=cFinalGs[0].constraints
delItSet.add(i)
if len(add)!=0:
newCs=[ self.constraints[i] for i in rg if not i in delItSet ]
self.constraints=newCs
for c in add:
self.add(c[1],c[0],c[2],arrangeLater=True)
self.arrange()
return self
def arrange(self):
if self.arrangeNeeded!=False:
self.arrangeNeeded=False
self.constraints.sort(key=lambda x:(x[2],x[:2]))
tmpv=[]
tmp=0
for c in self.constraints:
if tmp==c: continue
tmpv.append(c)
tmp=c
self.constraints=tmpv
if len([ c[0] for c in tmpv if c[0]==-1])==0:
self.including=False
return self
def add(self,item,label=0,negate=False,arrangeLater=False):
# label must be an integer
self.constraints.append((label,item,negate))
if self.maxLabel<label: self.maxLabel=label
if arrangeLater==False: self.arrange()
else: self.arrangeNeeded=arrangeLater
def fromStr(self,s,cd='./',extView=None):
s=s.replace('\r','')
'''
character:'\r' is ommited
this function will NOT do the arrangement
make sure the lines of constraints:
labels are in increasing order
items of the same label are in lexicographical order
format:
each line: label item
([ \t]*\~?[0-9]+|include|gonear)
lines not match will be omitted
'''
# preserve
old=self.constraints
# clean
self.constraints=[]
self.maxLabel=-1
self.extendedView=extView
# start
lines=s.split('\n')
p=self.__class__.parser_item
rs=p.split(s)
#print('*'*11),pprint(rs) # debug
for i in range(1,len(rs),p.groups+1):
# not match , ([\n]|^) , [ \t]*\~?[0-9]+|KWs , [^\n]+
# start from 1 =>
# ([\n]|^) , [ \t]*[0-9]+|KWs , [^\n]+ , not match
isKW=False
negate=False
label=sts.sub('',rs[i+1])
if label[0]=='~':
negate=True
label=label[1:]
content=rs[i+2]
#print(rs[i],rs[i+1]) # debug
if label==self.__class__.KW_include_txt:
isKW=True
label=self.__class__.KW_include_label
self.including=True
tmp=Goaltree()
tmp.fromTxt(content,_cd=cd)
item=(content,tmp)
if label==self.__class__.KW_gonear_txt:
isKW=True
label=self.__class__.KW_gonear_label
tmp=None # TODO
item=(content,tmp)
if isKW==False:
item=content
label=int(label)
self.add(item,label,negate=negate,arrangeLater=True)
'''
for line in lines:
m=self.__class__.parser_item.match(line)
if isNone(m): continue
res=m.group(1,2)
self.add(res[1],int(res[0]),arrangeLater=True)
# TODO: need ORs
'''
return self
def toStr(self,labelMinLen=0):
length=max(len(str(self.maxLabel)),labelMinLen)
if self.including:
length=max(length,self.__class__.KW_include_lentxt)
rtv=""
tmpv=[]
for c in self.constraints:
useLen=length+c[2] # len of neg no usage
label=c[0]
content=c[1]
if label==self.__class__.KW_include_label:
useLen=0
label=self.__class__.KW_include_txt
content=c[1][0]
#if 0!=0: tmpv.append(c[1][1].toStr(labelMinLen=length).split('\n')[1:])
if label==self.__class__.KW_gonear_label:
useLen=0
label=self.__class__.KW_gonear_txt
content=c[1][0]
label=('~' if c[2] else '')+str(label)
tmpv.append("%*s\t%s"%(useLen,label,content))
rtv+='\n'.join(tmpv)
return rtv
def fromTxt(self,filename,_cd='./',extView=None):
cd=_cd+filename[:filename.rindex('/')+1] if '/' in filename else _cd
with open(_cd+filename,'rb') as f:
self.fromStr("".join(map(chr,f.read())),cd=cd,extView=extView)
return self
# TODO:
with open(_cd+filename+".learn",'rb') as f:
pass
def size(self):
rtv={"byname":0,"bygoal":1}
for c in self.constraints:
if c[0]=="include":
tmp=c[1][1].size()
for k,v in tmp.items(): rtv[k]+=v
return rtv
class Goaltree:
# lots of goalset
'''
definitions:
successor:
the next goalset to match after matching a goalset
closer the root(tree), closer the final goalset
'''
parser_set=re.compile(token_goalset)
def __init__(self):
self.sets={}
self.filename=None
self.extendedView=None
# an extendedView is an importlib.import_module() object
# this can only be used when using 'fromTxt'
# using 'fromStr' will remove (=None) previous extendedView from 'fromTxt'
# file name is '_cd' and 'filename' given to 'fromTxt' concatenate '.py'
# i.e. _cd+filename+".py"
## it is recommended to construct a hashtable ( key is tuple(*.outputs()) or you can specify other methods ) with timeout to prevent re-calculating same condition within the same goal to achive
self.learned={"nextgoal":{}}
self.isSuccsOf={}
# learn file is self.filename+".learn", self.filename will be set after self.fromTxt()
pass
def __repr__(self):
rtv='{Goaltree:\n'
tmp=[ (k,v) for k,v in self.sets.items() ]
tmp.sort()
for x in tmp:
rtv+="\t"+x[0]+":"+str(x[1])+",\n"
rtv+='\t-:0\n}'
return rtv
def __getitem__(self,k):
return self.sets[k] if k in self.sets else None
def newNode(self,goals=[],name="",successorName='-'):
node=(goals,successorName,set(),[''])
return name,node
def addNode(self,goalset=[],name="",successorName='-'):
# TODO
if name=="" or (name in self.sets): return 1
pass
def keys(self,notBelow=None,beforeKeys=set()):
def valid_prec(k):
precs=self.getPrecs(k)
return len(precs)==0 or len(precs&beforeKeys)!=0
if isNone(notBelow):
rtv=[k for k in self.sets if valid_prec(k)]
rtv.sort()
return rtv
else:
#rtv=[k for k in self.sets if not self.getSucc(k) in notBelow]
rtv=[k for k in self.sets if len(self.getSuccs(k)¬Below)==0 and valid_prec(k)]
rtv.sort()
return rtv
def getGoals(self,k):
# return a goalset
return self.sets[k][0] if k in self.sets else None
def getSucc(self,k):
return self.sets[k][1]
def _getSuccs(self,k):
rtvSet=set()
rtvStr=k
tmpsucc=self.getSucc(k)
while not ( tmpsucc=='-' or tmpsucc=='+' or (tmpsucc in rtvSet) ):
# rtv
rtvSet.add(tmpsucc)
rtvStr+='-'
rtvStr+=tmpsucc
# reversed succs
#if not tmpsucc in self.isSuccsOf: self.isSuccsOf[tmpsucc]=set() # is set before
self.isSuccsOf[tmpsucc].add(k)
# next
tmpsucc=self.getSucc(tmpsucc)
return rtvSet,rtvStr
def getS | f,k):
return self.sets[k][2]
def getSuccsStr(self,k):
return self.sets[k][3][0]
def getPrecs(self,k):
return self.sets[k][4]
def getOpts(self,k):
rtv=dict(self.sets[k][5])
for i in rtv: rtv[i]=rtv[i][1]
return rtv
def getFinals(self):
return [ k for k in self.sets if self.getSucc(k)=='-' ]
def fromStr(self,s,cd='./',extView=None):
s=s.replace('\r','')
'''
\r\n , \n\r , \n -> \n
format: see self.fromTxt
'''
# unset filename
self.filename=None
self.extendedView=extView
#old=self.sets
p=self.__class__.parser_set
s=re.sub("(\n\r|\n|\r\n)[ \t]*(\n\r|\n|\r\n)","\n\n",s)
defined=set()
data=[]
rs=p.split(s) # cut via "\n\n\n"
#print(p.groups+1),print(rs[1:1+p.groups+1]),print(rs[1+(p.groups+1)*1:1+(p.groups+1)*2]),exit()
#print(rs[0]),exit() # debug
for i in range(1,len(rs),p.groups+1):
# rs[0] is "not match", omitted
# start from 1 =>
# (^|[\n]) , currName , succName , precNames , precName_last , pu** , pu**_last , (-pull|-push) , pu**_func_last , goals , others
# +0 , +1 , +2 , +3 , +4 , +5 , +6 , +7 , +8 , +9 , + >=10
#print(i,p.groups+1,rs[i-1]),print(rs[i:i+p.groups+1]) # debug
#if i>p.groups: exit() # debug
curr=rs[i+1]
if curr in defined:
raise TypeError("Error: '"+curr+"' is defined twice")
defined.add(curr)
#print(i,curr,defined) # debug
succ = rs[i+2]
prec = set(sts.split(rs[i+3])[1:]) # or
opts = {"-push":(set(),[],[]),"-pull":(set(),[],[])} # (fooNamesLookupForRepeated,fooContent)
for opt in nodeopt.split(rs[i+5])[1::nodeopt.groups+1]:
arr=sts.split(opt) # opt_type foo1 foo2 ...
dest=[k for k in opts if arr[0]==k] # opt_type
if len(dest)==0: raise TypeError("Error: "+arr[0]+" is not an option")
arr,dst=tuple(arr[1:]),opts[dest[0]]
if not (arr in dst[0]): # trim repeated combination
dst[0].add(arr)
dst[1].append([getattr(self.extendedView,f) for f in arr])
else: print("warning: permutation:",arr,"in",dest[0],"already exists in this node")
dst[2].append(arr)
gsv = re.split("[\n][ \t]*[\n]",rs[i+9]) # or
data.append((curr, ([ Goal().fromStr(gs,cd=cd,extView=self.extendedView).flatten() for gs in gsv ],succ,set(),[''],prec,opts) ))
# curr:( Goal()s , succ , succSet , succStrs , prec , opts)
#data.sort()
#print(defined),exit() # debug
#print(sorted(list(defined))) # debug
#pprint(data) # debug
self.sets=dict(data)
del data
'''
def getTarget(c):
tmp=c[1].split(":")[1] if ":" in c[1] else c[1]
return c[0],tmp,c[2]
for k in self.sets:
node=self.sets[k]
if node[1]=='-': continue
gs_node=node[0]
if len(gs_node)!=1: continue
gs_node=gs_node[0]
gs_node.arrange()
sn=set(gs_node.constraints)
succ=self.sets[node[1]]
gs_succ=succ[0]
for g in gs_succ:
if abs(len(g.constraints)-len(sn))>1: continue
ss=set(g.constraints)
delta=ss^sn
if len(delta)>2: continue
rem_sn,rem_ss=delta&sn,delta&ss
if len(rem_sn)!=1 or len(rem_ss)!=1: continue # no idea how to do
rem_sn,rem_ss=rem_sn.pop(),rem_ss.pop()
if not (":" in rem_sn[1] or ":" in rem_ss[1]): continue # not value
rem1_sn=re.split(r'[ \t]+',rem_sn[1])
rem1_ss=re.split(r'[ \t]+',rem_ss[1])
if len(rem1_sn)!=len(rem1_ss)!=1: continue
rem1_sn.sort(),rem1_ss.sort()
diff=[]
for i in range(len(rem1_sn)):
if rem1_sn[i]!=rem1_ss[i]:
diff.append((rem1_sn[i],rem1_ss[i]))
if len(diff)!=1 or diff[0]==diff[1]: continue
target=[ x[:x.index(":")] for x in diff ]
if target[0]!=target[1]: continue
vals=[ x[len(target[0])+1:] for x in diff ]
if not ',' in vals[0]: vals[0]=vals[0]+','+vals[0]
if not ',' in vals[1]: vals[1]=vals[1]+','+vals[1]
newNodes=[]
if vals[0]
print("?",gs_node),exit()
'''
self.isSuccsOf=dict([(k,set()) for k in self.sets])
for k,v in self.sets.items():
succSet,succStr=self._getSuccs(k)
v[2].update(succSet)
v[3][0]+=succStr
# basic keys
allKeys=set([k for k in self.sets])
for k in allKeys:
# all lower nodes
self.learned["nextgoal"][k]=dict([ (kk,(0.0-len(self.getSuccs(kk)))/len(allKeys)) for kk in allKeys-self.isSuccsOf[k] if kk!=k ])
self.learned["nextgoal"][""]=dict([ (k,(0.0-len(self.getSuccs(k)))/len(allKeys)) for k in allKeys if len(self.getPrecs(k))==0 ])
return self
def toStr(self,labelMinLen=0):
kv=[ k for k in self.sets ]
kv.sort()
rtv=""
tmpv=[]
for k in kv:
tmps=""
tmps+=k+'\t'+self.getSucc(k)
if len(self.getPrecs(k))!=0:
tmps+='\t'.join([""]+sorted([ kk for kk in self.getPrecs(k) ]))
opts=self.sets[k][5]
optstrs=[]
for opt in sorted([_ for _ in opts]):
if len(opts[opt][2])!=0:
optstrs.append('\t'.join([x for v in opts[opt][2]for x in[opt]+list(v)]))
tmps+='\t'.join([""]+optstrs)
tmpgsv=[ g.toStr(labelMinLen=labelMinLen) for g in self.getGoals(k) ]
tmpv.append('\n'.join([tmps,"\n\n".join(tmpgsv)]))
rtv+="\n\n\n".join(tmpv)
return rtv
def fromTxt(self,filename,_cd='./'):
'''
concept:
a block with a name is a set of Goal. that means reach one of them is a 'match', and can to further more (try the successor)
'''
'''
format prototype:
( none or more empty lines )
...
( none or more empty lines )
name successorName(if None, use '-')
# lines which cannot be parsed as <class: Goal>
label item
# lines which cannot be parsed as <class: Goal>
label item
...
label item
label item
( an empty line )
label item
label item
...
label item
label item
( two or more empty lines )
...
( two or more empty lines )
name successorName(if None, use '-')
label item
...
in regex:
'''
cd=_cd+filename[:filename.rindex('/')+1] if '/' in filename else _cd
filename=_cd+filename
try:
path=filename+".py"
if os.path.isfile(path):
spec = importlib.util.spec_from_file_location(filename,path)
self.extendedView = importlib.util.module_from_spec(spec)
spec.loader.exec_module(self.extendedView)
#print(inspect.getsource(self.extendedView)) # debug
except:
print("WARNING: file exists but it cannot be import:",path)
with open(filename,'rb') as f:
self.fromStr("".join(map(chr,f.read())),cd=cd,extView=self.extendedView)
self.filename=filename
return self
def size(self):
rtv={"byname":len(self.sets),"bygoal":0}
for _,d in self.sets.items():
arr=d[0]
for x in arr:
tmp=x.size()
for k,v in tmp.items(): rtv[k]+=v
return rtv
'''
learn file
a learn file records ordered goals of the successful paths
probably
'''
def loadNextGoalFile(self,filename=None):
# return True on error
# else False
if isNone(filename): filename=self.filename
if isNone(filename) or os.path.isfile(filename)==False: return True
with open(filename) as f:
self.learned["nextgoal"]=json.loads(f.read())["nextgoal"]
return False
def saveNextGoalFile(self,filename=None):
# filename
learnfile=""
if isNone(filename):
if isNone(self.filename):
t0=str(time.time())
t0+='0'*(t0.find('.')+8-len(t0))
self.filename=t0.replace('.','-')
learnfile+=self.filename+".learn"
else: learnfile+=filename
if os.path.isdir(learnfile): return True
with open(learnfile,"w") as f:
f.write(json.dumps(self.learned))
return False
def saveNextGoal(self,successSubgoalList):
'''
format of successSubgoalList is genSol()['nodes']
i.e. a list of successful paths
'''
# data
nextgoal=self.learned["nextgoal"]
for arr in successSubgoalList:
p=[""]+arr
for i in range(1,len(p)):
#print(p[i-1]) # debug
if not p[i-1] in nextgoal: nextgoal[ p[i-1] ]={}
curr=nextgoal[ p[i-1] ]
if not p[i] in curr: curr[ p[i] ]=0
curr[ p[i] ]+=1
return False
def wkeys(self,currentKey,notBelow=None,beforeKeys=set()):
'''
* weighted keys *
# ref-rtv
if isNone(notBelow):
rtv=[k for k in self.sets]
rtv.sort()
return rtv
else:
#rtv=[k for k in self.sets if not self.getSucc(k) in notBelow]
rtv=[k for k in self.sets if len(self.getSuccs(k)¬Below)==0]
rtv.sort()
return rtv
'''
# inter-func.
def valid_prec(k):
# control upper nodes to return
# True === valid , i.e. will be return from 'wkeys'
# it takes 'beforeKeys' to check if there's at least 1 presenting in 'precs'
precs=self.getPrecs(k)
return len(precs)==0 or len(precs&beforeKeys)!=0
if isNone(notBelow): notBelow=set()
if type(notBelow)!=set: notBelow=set(notBelow)
# data
#validKeys=[k for k in self.sets if len(self.getSuccs(k)¬Below)==0]
nextgoal=self.learned["nextgoal"]
target=nextgoal[currentKey] if currentKey in nextgoal else {}
rtv=[ (v,k) for k,v in target.items() if k in self.sets and len(self.getSuccs(k)¬Below)==0 and valid_prec(k) ]
#rtv+=[ (0,k) for k in self.sets if (not k in target) and len(self.getSuccs(k)¬Below)==0]
#rtv.sort(reverse=True) # leave it to caller
return rtv
def pulls(self,currentKey,notBelow=None,beforeKeys=set(),wkeys=None):
# return how other nodes can pull
# if 'wkeys' is not None: notBelow && beforeKeys will be omitted
# note: if wkeys is not sublist from self.wkeys, it might cause errors
if isNone(wkeys):
wkeys=self.wkeys(currentKey=currentKey,notBelow=notBelow,beforeKeys=beforeKeys)
wkeys.sort()
return [ hv for vk in wkeys for hv in self.getOpts(vk[1])["-pull"] ]
def pushs(self,currentKey):
# return how can the node push
rtv=[] if not currentKey in self.sets else self.getOpts(currentKey)["-push"]
return rtv
###########
| uccs(sel | identifier_name |
daemon.go | /*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package daemon
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"runtime"
"sync"
"time"
"github.com/pkg/errors"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"golang.org/x/sync/errgroup"
"golang.org/x/time/rate"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"d7y.io/dragonfly/v2/client/clientutil"
"d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/gc"
"d7y.io/dragonfly/v2/client/daemon/peer"
"d7y.io/dragonfly/v2/client/daemon/proxy"
"d7y.io/dragonfly/v2/client/daemon/rpcserver"
"d7y.io/dragonfly/v2/client/daemon/storage"
"d7y.io/dragonfly/v2/client/daemon/upload"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/internal/dfpath"
"d7y.io/dragonfly/v2/internal/idgen"
"d7y.io/dragonfly/v2/pkg/basic/dfnet"
"d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client"
"d7y.io/dragonfly/v2/pkg/util/net/iputils"
)
type Daemon interface {
Serve() error
Stop()
// ExportTaskManager returns the underlay peer.TaskManager for downloading when embed dragonfly in custom binary
ExportTaskManager() peer.TaskManager
// ExportPeerHost returns the underlay scheduler.PeerHost for scheduling
ExportPeerHost() *scheduler.PeerHost
}
type clientDaemon struct {
once *sync.Once
done chan bool
schedPeerHost *scheduler.PeerHost
Option config.DaemonOption
RPCManager rpcserver.Server
UploadManager upload.Manager
ProxyManager proxy.Manager
StorageManager storage.Manager
GCManager gc.Manager
PeerTaskManager peer.TaskManager
PieceManager peer.PieceManager
}
var _ Daemon = (*clientDaemon)(nil)
func New(opt *config.DaemonOption) (Daemon, error) {
host := &scheduler.PeerHost{
Uuid: idgen.UUIDString(),
Ip: opt.Host.AdvertiseIP,
RpcPort: int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start),
DownPort: 0,
HostName: iputils.HostName,
SecurityDomain: opt.Host.SecurityDomain,
Location: opt.Host.Location,
Idc: opt.Host.IDC,
NetTopology: opt.Host.NetTopology,
}
var opts []grpc.DialOption
if opt.Options.Telemetry.Jaeger != "" {
opts = append(opts, grpc.WithChainUnaryInterceptor(otelgrpc.UnaryClientInterceptor()), grpc.WithChainStreamInterceptor(otelgrpc.StreamClientInterceptor()))
}
sched, err := schedulerclient.GetClientByAddr(opt.Scheduler.NetAddrs, opts...)
if err != nil {
return nil, errors.Wrap(err, "failed to get schedulers")
}
// Storage.Option.DataPath is same with Daemon DataDir
opt.Storage.DataPath = opt.DataDir
storageManager, err := storage.NewStorageManager(opt.Storage.StoreStrategy, &opt.Storage,
/* gc callback */
func(request storage.CommonTaskRequest) {
er := sched.LeaveTask(context.Background(), &scheduler.PeerTarget{
TaskId: request.TaskID,
PeerId: request.PeerID,
})
if er != nil {
logger.Errorf("step 4:leave task %s/%s, error: %v", request.TaskID, request.PeerID, er)
} else {
logger.Infof("step 4:leave task %s/%s state ok", request.TaskID, request.PeerID)
}
})
if err != nil {
return nil, err
}
pieceManager, err := peer.NewPieceManager(storageManager,
opt.Download.PieceDownloadTimeout,
peer.WithLimiter(rate.NewLimiter(opt.Download.TotalRateLimit.Limit, int(opt.Download.TotalRateLimit.Limit))),
peer.WithCalculateDigest(opt.Download.CalculateDigest),
)
if err != nil {
return nil, err
}
peerTaskManager, err := peer.NewPeerTaskManager(host, pieceManager, storageManager, sched, opt.Scheduler,
opt.Download.PerPeerRateLimit.Limit, opt.Storage.Multiplex)
if err != nil {
return nil, err
}
// TODO(jim): more server options
var downloadServerOption []grpc.ServerOption
if !opt.Download.DownloadGRPC.Security.Insecure {
tlsCredentials, err := loadGPRCTLSCredentials(opt.Download.DownloadGRPC.Security)
if err != nil {
return nil, err
}
downloadServerOption = append(downloadServerOption, grpc.Creds(tlsCredentials))
}
var peerServerOption []grpc.ServerOption
if !opt.Download.PeerGRPC.Security.Insecure {
tlsCredentials, err := loadGPRCTLSCredentials(opt.Download.PeerGRPC.Security)
if err != nil {
return nil, err
}
peerServerOption = append(peerServerOption, grpc.Creds(tlsCredentials))
}
rpcManager, err := rpcserver.New(host, peerTaskManager, storageManager, downloadServerOption, peerServerOption)
if err != nil {
return nil, err
}
var proxyManager proxy.Manager
proxyManager, err = proxy.NewProxyManager(host, peerTaskManager, opt.Proxy)
if err != nil {
return nil, err
}
uploadManager, err := upload.NewUploadManager(storageManager,
upload.WithLimiter(rate.NewLimiter(opt.Upload.RateLimit.Limit, int(opt.Upload.RateLimit.Limit))))
if err != nil {
return nil, err
}
return &clientDaemon{
once: &sync.Once{},
done: make(chan bool),
schedPeerHost: host,
Option: *opt,
RPCManager: rpcManager,
PeerTaskManager: peerTaskManager,
PieceManager: pieceManager,
ProxyManager: proxyManager,
UploadManager: uploadManager,
StorageManager: storageManager,
GCManager: gc.NewManager(opt.GCInterval.Duration),
}, nil
}
func loadGPRCTLSCredentials(opt config.SecurityOption) (credentials.TransportCredentials, error) {
// Load certificate of the CA who signed client's certificate
pemClientCA, err := ioutil.ReadFile(opt.CACert)
if err != nil {
return nil, err
}
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(pemClientCA) {
return nil, fmt.Errorf("failed to add client CA's certificate")
}
// Load server's certificate and private key
serverCert, err := tls.LoadX509KeyPair(opt.Cert, opt.Key)
if err != nil {
return nil, err
}
// Create the credentials and return it
if opt.TLSConfig == nil {
opt.TLSConfig = &tls.Config{
Certificates: []tls.Certificate{serverCert},
ClientAuth: tls.RequireAndVerifyClientCert,
ClientCAs: certPool,
}
} else {
opt.TLSConfig.Certificates = []tls.Certificate{serverCert}
opt.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
opt.TLSConfig.ClientCAs = certPool
}
return credentials.NewTLS(opt.TLSConfig), nil
}
func (*clientDaemon) prepareTCPListener(opt config.ListenOption, withTLS bool) (net.Listener, int, error) {
if len(opt.TCPListen.Namespace) > 0 {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
recoverFunc, err := switchNetNamespace(opt.TCPListen.Namespace)
if err != nil {
logger.Errorf("failed to change net namespace: %v", err)
return nil, -1, err
}
defer func() {
err := recoverFunc()
if err != nil {
logger.Errorf("failed to recover net namespace: %v", err)
}
}()
}
var (
ln net.Listener
port int
err error
)
if opt.TCPListen != nil {
ln, port, err = rpc.ListenWithPortRange(opt.TCPListen.Listen, opt.TCPListen.PortRange.Start, opt.TCPListen.PortRange.End)
}
if err != nil {
return nil, -1, err
}
// when use grpc, tls config is in server option
if !withTLS || opt.Security.Insecure |
if opt.Security.Cert == "" || opt.Security.Key == "" {
return nil, -1, errors.New("empty cert or key for tls")
}
// Create the TLS ClientOption with the CA pool and enable Client certificate validation
if opt.Security.TLSConfig == nil {
opt.Security.TLSConfig = &tls.Config{}
}
tlsConfig := opt.Security.TLSConfig
if opt.Security.CACert != "" {
caCert, err := ioutil.ReadFile(opt.Security.CACert)
if err != nil {
return nil, -1, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig.ClientCAs = caCertPool
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
}
tlsConfig.Certificates = make([]tls.Certificate, 1)
tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(opt.Security.Cert, opt.Security.Key)
if err != nil {
return nil, -1, err
}
return tls.NewListener(ln, tlsConfig), port, nil
}
func (cd *clientDaemon) Serve() error {
cd.GCManager.Start()
// TODO remove this field, and use directly dfpath.DaemonSockPath
cd.Option.Download.DownloadGRPC.UnixListen.Socket = dfpath.DaemonSockPath
// prepare download service listen
if cd.Option.Download.DownloadGRPC.UnixListen == nil {
return errors.New("download grpc unix listen option is empty")
}
_ = os.Remove(cd.Option.Download.DownloadGRPC.UnixListen.Socket)
downloadListener, err := rpc.Listen(dfnet.NetAddr{
Type: dfnet.UNIX,
Addr: cd.Option.Download.DownloadGRPC.UnixListen.Socket,
})
if err != nil {
logger.Errorf("failed to listen for download grpc service: %v", err)
return err
}
// prepare peer service listen
if cd.Option.Download.PeerGRPC.TCPListen == nil {
return errors.New("peer grpc tcp listen option is empty")
}
peerListener, peerPort, err := cd.prepareTCPListener(cd.Option.Download.PeerGRPC, false)
if err != nil {
logger.Errorf("failed to listen for peer grpc service: %v", err)
return err
}
cd.schedPeerHost.RpcPort = int32(peerPort)
// prepare upload service listen
if cd.Option.Upload.TCPListen == nil {
return errors.New("upload tcp listen option is empty")
}
uploadListener, uploadPort, err := cd.prepareTCPListener(cd.Option.Upload.ListenOption, true)
if err != nil {
logger.Errorf("failed to listen for upload service: %v", err)
return err
}
cd.schedPeerHost.DownPort = int32(uploadPort)
g := errgroup.Group{}
// serve download grpc service
g.Go(func() error {
defer downloadListener.Close()
logger.Infof("serve download grpc at unix://%s", cd.Option.Download.DownloadGRPC.UnixListen.Socket)
if err := cd.RPCManager.ServeDownload(downloadListener); err != nil {
logger.Errorf("failed to serve for download grpc service: %v", err)
return err
}
return nil
})
// serve peer grpc service
g.Go(func() error {
defer peerListener.Close()
logger.Infof("serve peer grpc at %s://%s", peerListener.Addr().Network(), peerListener.Addr().String())
if err := cd.RPCManager.ServePeer(peerListener); err != nil {
logger.Errorf("failed to serve for peer grpc service: %v", err)
return err
}
return nil
})
if cd.ProxyManager.IsEnabled() {
// prepare proxy service listen
if cd.Option.Proxy.TCPListen == nil {
return errors.New("proxy tcp listen option is empty")
}
proxyListener, proxyPort, err := cd.prepareTCPListener(cd.Option.Proxy.ListenOption, true)
if err != nil {
logger.Errorf("failed to listen for proxy service: %v", err)
return err
}
// serve proxy service
g.Go(func() error {
defer proxyListener.Close()
logger.Infof("serve proxy at tcp://%s:%d", cd.Option.Proxy.TCPListen.Listen, proxyPort)
if err = cd.ProxyManager.Serve(proxyListener); err != nil && err != http.ErrServerClosed {
logger.Errorf("failed to serve for proxy service: %v", err)
return err
} else if err == http.ErrServerClosed {
logger.Infof("proxy service closed")
}
return nil
})
// serve proxy sni service
if cd.Option.Proxy.HijackHTTPS != nil && len(cd.Option.Proxy.HijackHTTPS.SNI) > 0 {
for _, opt := range cd.Option.Proxy.HijackHTTPS.SNI {
listener, port, err := cd.prepareTCPListener(config.ListenOption{
TCPListen: opt,
}, false)
if err != nil {
logger.Errorf("failed to listen for proxy sni service: %v", err)
return err
}
logger.Infof("serve proxy sni at tcp://%s:%d", opt.Listen, port)
g.Go(func() error {
err := cd.ProxyManager.ServeSNI(listener)
if err != nil {
logger.Errorf("failed to serve proxy sni service: %v", err)
}
return err
})
}
}
}
// serve upload service
g.Go(func() error {
defer uploadListener.Close()
logger.Infof("serve upload service at %s://%s", uploadListener.Addr().Network(), uploadListener.Addr().String())
if err := cd.UploadManager.Serve(uploadListener); err != nil && err != http.ErrServerClosed {
logger.Errorf("failed to serve for upload service: %v", err)
return err
} else if err == http.ErrServerClosed {
logger.Infof("upload service closed")
}
return nil
})
if cd.Option.AliveTime.Duration > 0 {
g.Go(func() error {
select {
case <-time.After(cd.Option.AliveTime.Duration):
var keepalives = []clientutil.KeepAlive{
cd.StorageManager,
cd.RPCManager,
}
var keep bool
for _, keepalive := range keepalives {
if keepalive.Alive(cd.Option.AliveTime.Duration) {
keep = true
}
}
if !keep {
cd.Stop()
logger.Infof("alive time reached, stop daemon")
}
case <-cd.done:
logger.Infof("peer host done, stop watch alive time")
}
return nil
})
}
werr := g.Wait()
cd.Stop()
return werr
}
func (cd *clientDaemon) Stop() {
cd.once.Do(func() {
close(cd.done)
cd.GCManager.Stop()
cd.RPCManager.Stop()
cd.UploadManager.Stop()
if cd.ProxyManager.IsEnabled() {
cd.ProxyManager.Stop()
}
if !cd.Option.KeepStorage {
logger.Infof("keep storage disabled")
cd.StorageManager.CleanUp()
}
})
}
func (cd *clientDaemon) ExportTaskManager() peer.TaskManager {
return cd.PeerTaskManager
}
func (cd *clientDaemon) ExportPeerHost() *scheduler.PeerHost {
return cd.schedPeerHost
}
| {
return ln, port, err
} | conditional_block |
daemon.go | /*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package daemon
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"runtime"
"sync"
"time"
"github.com/pkg/errors"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"golang.org/x/sync/errgroup"
"golang.org/x/time/rate"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"d7y.io/dragonfly/v2/client/clientutil"
"d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/gc"
"d7y.io/dragonfly/v2/client/daemon/peer"
"d7y.io/dragonfly/v2/client/daemon/proxy"
"d7y.io/dragonfly/v2/client/daemon/rpcserver"
"d7y.io/dragonfly/v2/client/daemon/storage"
"d7y.io/dragonfly/v2/client/daemon/upload"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/internal/dfpath"
"d7y.io/dragonfly/v2/internal/idgen"
"d7y.io/dragonfly/v2/pkg/basic/dfnet"
"d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client"
"d7y.io/dragonfly/v2/pkg/util/net/iputils"
)
type Daemon interface {
Serve() error
Stop()
// ExportTaskManager returns the underlay peer.TaskManager for downloading when embed dragonfly in custom binary
ExportTaskManager() peer.TaskManager
// ExportPeerHost returns the underlay scheduler.PeerHost for scheduling
ExportPeerHost() *scheduler.PeerHost
}
type clientDaemon struct {
once *sync.Once
done chan bool
schedPeerHost *scheduler.PeerHost
Option config.DaemonOption
RPCManager rpcserver.Server
UploadManager upload.Manager
ProxyManager proxy.Manager
StorageManager storage.Manager
GCManager gc.Manager
PeerTaskManager peer.TaskManager
PieceManager peer.PieceManager
}
var _ Daemon = (*clientDaemon)(nil)
func New(opt *config.DaemonOption) (Daemon, error) {
host := &scheduler.PeerHost{
Uuid: idgen.UUIDString(),
Ip: opt.Host.AdvertiseIP,
RpcPort: int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start),
DownPort: 0,
HostName: iputils.HostName,
SecurityDomain: opt.Host.SecurityDomain,
Location: opt.Host.Location,
Idc: opt.Host.IDC,
NetTopology: opt.Host.NetTopology,
}
var opts []grpc.DialOption
if opt.Options.Telemetry.Jaeger != "" {
opts = append(opts, grpc.WithChainUnaryInterceptor(otelgrpc.UnaryClientInterceptor()), grpc.WithChainStreamInterceptor(otelgrpc.StreamClientInterceptor()))
}
sched, err := schedulerclient.GetClientByAddr(opt.Scheduler.NetAddrs, opts...)
if err != nil {
return nil, errors.Wrap(err, "failed to get schedulers")
}
// Storage.Option.DataPath is same with Daemon DataDir
opt.Storage.DataPath = opt.DataDir
storageManager, err := storage.NewStorageManager(opt.Storage.StoreStrategy, &opt.Storage,
/* gc callback */
func(request storage.CommonTaskRequest) {
er := sched.LeaveTask(context.Background(), &scheduler.PeerTarget{
TaskId: request.TaskID,
PeerId: request.PeerID,
})
if er != nil {
logger.Errorf("step 4:leave task %s/%s, error: %v", request.TaskID, request.PeerID, er)
} else {
logger.Infof("step 4:leave task %s/%s state ok", request.TaskID, request.PeerID)
}
})
if err != nil {
return nil, err
}
pieceManager, err := peer.NewPieceManager(storageManager,
opt.Download.PieceDownloadTimeout,
peer.WithLimiter(rate.NewLimiter(opt.Download.TotalRateLimit.Limit, int(opt.Download.TotalRateLimit.Limit))),
peer.WithCalculateDigest(opt.Download.CalculateDigest),
)
if err != nil {
return nil, err
}
peerTaskManager, err := peer.NewPeerTaskManager(host, pieceManager, storageManager, sched, opt.Scheduler,
opt.Download.PerPeerRateLimit.Limit, opt.Storage.Multiplex)
if err != nil {
return nil, err
}
// TODO(jim): more server options
var downloadServerOption []grpc.ServerOption
if !opt.Download.DownloadGRPC.Security.Insecure {
tlsCredentials, err := loadGPRCTLSCredentials(opt.Download.DownloadGRPC.Security)
if err != nil {
return nil, err
}
downloadServerOption = append(downloadServerOption, grpc.Creds(tlsCredentials))
}
var peerServerOption []grpc.ServerOption
if !opt.Download.PeerGRPC.Security.Insecure {
tlsCredentials, err := loadGPRCTLSCredentials(opt.Download.PeerGRPC.Security)
if err != nil {
return nil, err
}
peerServerOption = append(peerServerOption, grpc.Creds(tlsCredentials))
}
rpcManager, err := rpcserver.New(host, peerTaskManager, storageManager, downloadServerOption, peerServerOption)
if err != nil {
return nil, err
}
var proxyManager proxy.Manager
proxyManager, err = proxy.NewProxyManager(host, peerTaskManager, opt.Proxy)
if err != nil {
return nil, err
}
uploadManager, err := upload.NewUploadManager(storageManager,
upload.WithLimiter(rate.NewLimiter(opt.Upload.RateLimit.Limit, int(opt.Upload.RateLimit.Limit))))
if err != nil {
return nil, err
}
return &clientDaemon{
once: &sync.Once{},
done: make(chan bool),
schedPeerHost: host,
Option: *opt,
RPCManager: rpcManager,
PeerTaskManager: peerTaskManager,
PieceManager: pieceManager,
ProxyManager: proxyManager,
UploadManager: uploadManager,
StorageManager: storageManager,
GCManager: gc.NewManager(opt.GCInterval.Duration),
}, nil
}
func loadGPRCTLSCredentials(opt config.SecurityOption) (credentials.TransportCredentials, error) {
// Load certificate of the CA who signed client's certificate
pemClientCA, err := ioutil.ReadFile(opt.CACert)
if err != nil {
return nil, err
}
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(pemClientCA) {
return nil, fmt.Errorf("failed to add client CA's certificate")
}
// Load server's certificate and private key
serverCert, err := tls.LoadX509KeyPair(opt.Cert, opt.Key)
if err != nil {
return nil, err
}
// Create the credentials and return it
if opt.TLSConfig == nil {
opt.TLSConfig = &tls.Config{
Certificates: []tls.Certificate{serverCert},
ClientAuth: tls.RequireAndVerifyClientCert,
ClientCAs: certPool,
}
} else {
opt.TLSConfig.Certificates = []tls.Certificate{serverCert}
opt.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
opt.TLSConfig.ClientCAs = certPool
}
return credentials.NewTLS(opt.TLSConfig), nil
}
func (*clientDaemon) prepareTCPListener(opt config.ListenOption, withTLS bool) (net.Listener, int, error) {
if len(opt.TCPListen.Namespace) > 0 {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
recoverFunc, err := switchNetNamespace(opt.TCPListen.Namespace)
if err != nil {
logger.Errorf("failed to change net namespace: %v", err)
return nil, -1, err
}
defer func() {
err := recoverFunc()
if err != nil {
logger.Errorf("failed to recover net namespace: %v", err)
}
}()
}
var (
ln net.Listener
port int
err error
)
if opt.TCPListen != nil {
ln, port, err = rpc.ListenWithPortRange(opt.TCPListen.Listen, opt.TCPListen.PortRange.Start, opt.TCPListen.PortRange.End)
}
if err != nil {
return nil, -1, err
}
// when use grpc, tls config is in server option
if !withTLS || opt.Security.Insecure {
return ln, port, err
}
if opt.Security.Cert == "" || opt.Security.Key == "" {
return nil, -1, errors.New("empty cert or key for tls")
}
// Create the TLS ClientOption with the CA pool and enable Client certificate validation
if opt.Security.TLSConfig == nil {
opt.Security.TLSConfig = &tls.Config{}
}
tlsConfig := opt.Security.TLSConfig
if opt.Security.CACert != "" {
caCert, err := ioutil.ReadFile(opt.Security.CACert)
if err != nil {
return nil, -1, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig.ClientCAs = caCertPool
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
}
tlsConfig.Certificates = make([]tls.Certificate, 1)
tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(opt.Security.Cert, opt.Security.Key)
if err != nil {
return nil, -1, err
}
return tls.NewListener(ln, tlsConfig), port, nil
}
func (cd *clientDaemon) Serve() error {
cd.GCManager.Start()
// TODO remove this field, and use directly dfpath.DaemonSockPath
cd.Option.Download.DownloadGRPC.UnixListen.Socket = dfpath.DaemonSockPath
// prepare download service listen
if cd.Option.Download.DownloadGRPC.UnixListen == nil {
return errors.New("download grpc unix listen option is empty")
}
_ = os.Remove(cd.Option.Download.DownloadGRPC.UnixListen.Socket)
downloadListener, err := rpc.Listen(dfnet.NetAddr{
Type: dfnet.UNIX,
Addr: cd.Option.Download.DownloadGRPC.UnixListen.Socket,
})
if err != nil {
logger.Errorf("failed to listen for download grpc service: %v", err)
return err
}
// prepare peer service listen
if cd.Option.Download.PeerGRPC.TCPListen == nil {
return errors.New("peer grpc tcp listen option is empty")
}
peerListener, peerPort, err := cd.prepareTCPListener(cd.Option.Download.PeerGRPC, false)
if err != nil {
logger.Errorf("failed to listen for peer grpc service: %v", err)
return err
}
cd.schedPeerHost.RpcPort = int32(peerPort)
// prepare upload service listen
if cd.Option.Upload.TCPListen == nil {
return errors.New("upload tcp listen option is empty")
}
uploadListener, uploadPort, err := cd.prepareTCPListener(cd.Option.Upload.ListenOption, true)
if err != nil {
logger.Errorf("failed to listen for upload service: %v", err)
return err
}
cd.schedPeerHost.DownPort = int32(uploadPort)
g := errgroup.Group{}
// serve download grpc service
g.Go(func() error {
defer downloadListener.Close()
logger.Infof("serve download grpc at unix://%s", cd.Option.Download.DownloadGRPC.UnixListen.Socket)
if err := cd.RPCManager.ServeDownload(downloadListener); err != nil {
logger.Errorf("failed to serve for download grpc service: %v", err)
return err
}
return nil
})
// serve peer grpc service
g.Go(func() error {
defer peerListener.Close()
logger.Infof("serve peer grpc at %s://%s", peerListener.Addr().Network(), peerListener.Addr().String())
if err := cd.RPCManager.ServePeer(peerListener); err != nil {
logger.Errorf("failed to serve for peer grpc service: %v", err)
return err
}
return nil
})
if cd.ProxyManager.IsEnabled() {
// prepare proxy service listen
if cd.Option.Proxy.TCPListen == nil {
return errors.New("proxy tcp listen option is empty")
}
proxyListener, proxyPort, err := cd.prepareTCPListener(cd.Option.Proxy.ListenOption, true)
if err != nil {
logger.Errorf("failed to listen for proxy service: %v", err)
return err
}
// serve proxy service
g.Go(func() error {
defer proxyListener.Close()
logger.Infof("serve proxy at tcp://%s:%d", cd.Option.Proxy.TCPListen.Listen, proxyPort)
if err = cd.ProxyManager.Serve(proxyListener); err != nil && err != http.ErrServerClosed {
logger.Errorf("failed to serve for proxy service: %v", err)
return err
} else if err == http.ErrServerClosed {
logger.Infof("proxy service closed")
}
return nil
})
// serve proxy sni service
if cd.Option.Proxy.HijackHTTPS != nil && len(cd.Option.Proxy.HijackHTTPS.SNI) > 0 {
for _, opt := range cd.Option.Proxy.HijackHTTPS.SNI {
listener, port, err := cd.prepareTCPListener(config.ListenOption{
TCPListen: opt,
}, false)
if err != nil {
logger.Errorf("failed to listen for proxy sni service: %v", err)
return err
}
logger.Infof("serve proxy sni at tcp://%s:%d", opt.Listen, port)
g.Go(func() error {
err := cd.ProxyManager.ServeSNI(listener)
if err != nil {
logger.Errorf("failed to serve proxy sni service: %v", err)
}
return err
})
}
}
}
// serve upload service
g.Go(func() error {
defer uploadListener.Close()
logger.Infof("serve upload service at %s://%s", uploadListener.Addr().Network(), uploadListener.Addr().String())
if err := cd.UploadManager.Serve(uploadListener); err != nil && err != http.ErrServerClosed {
logger.Errorf("failed to serve for upload service: %v", err)
return err
} else if err == http.ErrServerClosed {
logger.Infof("upload service closed")
}
return nil
})
if cd.Option.AliveTime.Duration > 0 {
g.Go(func() error {
select {
case <-time.After(cd.Option.AliveTime.Duration):
var keepalives = []clientutil.KeepAlive{
cd.StorageManager,
cd.RPCManager,
}
var keep bool
for _, keepalive := range keepalives {
if keepalive.Alive(cd.Option.AliveTime.Duration) {
keep = true
}
}
if !keep {
cd.Stop()
logger.Infof("alive time reached, stop daemon")
}
case <-cd.done:
logger.Infof("peer host done, stop watch alive time")
}
return nil
})
}
werr := g.Wait()
cd.Stop()
return werr
}
func (cd *clientDaemon) Stop() {
cd.once.Do(func() {
close(cd.done)
cd.GCManager.Stop()
cd.RPCManager.Stop()
cd.UploadManager.Stop()
if cd.ProxyManager.IsEnabled() {
cd.ProxyManager.Stop()
}
if !cd.Option.KeepStorage {
logger.Infof("keep storage disabled")
cd.StorageManager.CleanUp()
}
})
}
func (cd *clientDaemon) ExportTaskManager() peer.TaskManager |
func (cd *clientDaemon) ExportPeerHost() *scheduler.PeerHost {
return cd.schedPeerHost
}
| {
return cd.PeerTaskManager
} | identifier_body |
daemon.go | /*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package daemon
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"runtime"
"sync"
"time"
"github.com/pkg/errors"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"golang.org/x/sync/errgroup"
"golang.org/x/time/rate"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"d7y.io/dragonfly/v2/client/clientutil"
"d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/gc"
"d7y.io/dragonfly/v2/client/daemon/peer"
"d7y.io/dragonfly/v2/client/daemon/proxy"
"d7y.io/dragonfly/v2/client/daemon/rpcserver"
"d7y.io/dragonfly/v2/client/daemon/storage"
"d7y.io/dragonfly/v2/client/daemon/upload"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/internal/dfpath"
"d7y.io/dragonfly/v2/internal/idgen"
"d7y.io/dragonfly/v2/pkg/basic/dfnet"
"d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client"
"d7y.io/dragonfly/v2/pkg/util/net/iputils"
)
type Daemon interface {
Serve() error
Stop()
// ExportTaskManager returns the underlay peer.TaskManager for downloading when embed dragonfly in custom binary
ExportTaskManager() peer.TaskManager
// ExportPeerHost returns the underlay scheduler.PeerHost for scheduling
ExportPeerHost() *scheduler.PeerHost
}
type clientDaemon struct {
once *sync.Once
done chan bool
schedPeerHost *scheduler.PeerHost
Option config.DaemonOption
RPCManager rpcserver.Server
UploadManager upload.Manager
ProxyManager proxy.Manager
StorageManager storage.Manager
GCManager gc.Manager
PeerTaskManager peer.TaskManager
PieceManager peer.PieceManager
}
var _ Daemon = (*clientDaemon)(nil)
func New(opt *config.DaemonOption) (Daemon, error) {
host := &scheduler.PeerHost{
Uuid: idgen.UUIDString(),
Ip: opt.Host.AdvertiseIP,
RpcPort: int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start),
DownPort: 0,
HostName: iputils.HostName,
SecurityDomain: opt.Host.SecurityDomain,
Location: opt.Host.Location,
Idc: opt.Host.IDC,
NetTopology: opt.Host.NetTopology,
}
var opts []grpc.DialOption
if opt.Options.Telemetry.Jaeger != "" {
opts = append(opts, grpc.WithChainUnaryInterceptor(otelgrpc.UnaryClientInterceptor()), grpc.WithChainStreamInterceptor(otelgrpc.StreamClientInterceptor()))
}
sched, err := schedulerclient.GetClientByAddr(opt.Scheduler.NetAddrs, opts...)
if err != nil {
return nil, errors.Wrap(err, "failed to get schedulers")
}
// Storage.Option.DataPath is same with Daemon DataDir
opt.Storage.DataPath = opt.DataDir
storageManager, err := storage.NewStorageManager(opt.Storage.StoreStrategy, &opt.Storage,
/* gc callback */
func(request storage.CommonTaskRequest) {
er := sched.LeaveTask(context.Background(), &scheduler.PeerTarget{
TaskId: request.TaskID,
PeerId: request.PeerID,
})
if er != nil {
logger.Errorf("step 4:leave task %s/%s, error: %v", request.TaskID, request.PeerID, er)
} else {
logger.Infof("step 4:leave task %s/%s state ok", request.TaskID, request.PeerID)
}
})
if err != nil {
return nil, err
}
pieceManager, err := peer.NewPieceManager(storageManager,
opt.Download.PieceDownloadTimeout,
peer.WithLimiter(rate.NewLimiter(opt.Download.TotalRateLimit.Limit, int(opt.Download.TotalRateLimit.Limit))),
peer.WithCalculateDigest(opt.Download.CalculateDigest),
)
if err != nil {
return nil, err
}
peerTaskManager, err := peer.NewPeerTaskManager(host, pieceManager, storageManager, sched, opt.Scheduler,
opt.Download.PerPeerRateLimit.Limit, opt.Storage.Multiplex)
if err != nil {
return nil, err
}
// TODO(jim): more server options
var downloadServerOption []grpc.ServerOption
if !opt.Download.DownloadGRPC.Security.Insecure {
tlsCredentials, err := loadGPRCTLSCredentials(opt.Download.DownloadGRPC.Security)
if err != nil {
return nil, err
}
downloadServerOption = append(downloadServerOption, grpc.Creds(tlsCredentials))
}
var peerServerOption []grpc.ServerOption
if !opt.Download.PeerGRPC.Security.Insecure {
tlsCredentials, err := loadGPRCTLSCredentials(opt.Download.PeerGRPC.Security)
if err != nil {
return nil, err
}
peerServerOption = append(peerServerOption, grpc.Creds(tlsCredentials))
}
rpcManager, err := rpcserver.New(host, peerTaskManager, storageManager, downloadServerOption, peerServerOption)
if err != nil {
return nil, err
}
var proxyManager proxy.Manager
proxyManager, err = proxy.NewProxyManager(host, peerTaskManager, opt.Proxy)
if err != nil {
return nil, err
}
uploadManager, err := upload.NewUploadManager(storageManager,
upload.WithLimiter(rate.NewLimiter(opt.Upload.RateLimit.Limit, int(opt.Upload.RateLimit.Limit))))
if err != nil {
return nil, err
}
return &clientDaemon{ | schedPeerHost: host,
Option: *opt,
RPCManager: rpcManager,
PeerTaskManager: peerTaskManager,
PieceManager: pieceManager,
ProxyManager: proxyManager,
UploadManager: uploadManager,
StorageManager: storageManager,
GCManager: gc.NewManager(opt.GCInterval.Duration),
}, nil
}
func loadGPRCTLSCredentials(opt config.SecurityOption) (credentials.TransportCredentials, error) {
// Load certificate of the CA who signed client's certificate
pemClientCA, err := ioutil.ReadFile(opt.CACert)
if err != nil {
return nil, err
}
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(pemClientCA) {
return nil, fmt.Errorf("failed to add client CA's certificate")
}
// Load server's certificate and private key
serverCert, err := tls.LoadX509KeyPair(opt.Cert, opt.Key)
if err != nil {
return nil, err
}
// Create the credentials and return it
if opt.TLSConfig == nil {
opt.TLSConfig = &tls.Config{
Certificates: []tls.Certificate{serverCert},
ClientAuth: tls.RequireAndVerifyClientCert,
ClientCAs: certPool,
}
} else {
opt.TLSConfig.Certificates = []tls.Certificate{serverCert}
opt.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
opt.TLSConfig.ClientCAs = certPool
}
return credentials.NewTLS(opt.TLSConfig), nil
}
func (*clientDaemon) prepareTCPListener(opt config.ListenOption, withTLS bool) (net.Listener, int, error) {
if len(opt.TCPListen.Namespace) > 0 {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
recoverFunc, err := switchNetNamespace(opt.TCPListen.Namespace)
if err != nil {
logger.Errorf("failed to change net namespace: %v", err)
return nil, -1, err
}
defer func() {
err := recoverFunc()
if err != nil {
logger.Errorf("failed to recover net namespace: %v", err)
}
}()
}
var (
ln net.Listener
port int
err error
)
if opt.TCPListen != nil {
ln, port, err = rpc.ListenWithPortRange(opt.TCPListen.Listen, opt.TCPListen.PortRange.Start, opt.TCPListen.PortRange.End)
}
if err != nil {
return nil, -1, err
}
// when use grpc, tls config is in server option
if !withTLS || opt.Security.Insecure {
return ln, port, err
}
if opt.Security.Cert == "" || opt.Security.Key == "" {
return nil, -1, errors.New("empty cert or key for tls")
}
// Create the TLS ClientOption with the CA pool and enable Client certificate validation
if opt.Security.TLSConfig == nil {
opt.Security.TLSConfig = &tls.Config{}
}
tlsConfig := opt.Security.TLSConfig
if opt.Security.CACert != "" {
caCert, err := ioutil.ReadFile(opt.Security.CACert)
if err != nil {
return nil, -1, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig.ClientCAs = caCertPool
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
}
tlsConfig.Certificates = make([]tls.Certificate, 1)
tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(opt.Security.Cert, opt.Security.Key)
if err != nil {
return nil, -1, err
}
return tls.NewListener(ln, tlsConfig), port, nil
}
func (cd *clientDaemon) Serve() error {
cd.GCManager.Start()
// TODO remove this field, and use directly dfpath.DaemonSockPath
cd.Option.Download.DownloadGRPC.UnixListen.Socket = dfpath.DaemonSockPath
// prepare download service listen
if cd.Option.Download.DownloadGRPC.UnixListen == nil {
return errors.New("download grpc unix listen option is empty")
}
_ = os.Remove(cd.Option.Download.DownloadGRPC.UnixListen.Socket)
downloadListener, err := rpc.Listen(dfnet.NetAddr{
Type: dfnet.UNIX,
Addr: cd.Option.Download.DownloadGRPC.UnixListen.Socket,
})
if err != nil {
logger.Errorf("failed to listen for download grpc service: %v", err)
return err
}
// prepare peer service listen
if cd.Option.Download.PeerGRPC.TCPListen == nil {
return errors.New("peer grpc tcp listen option is empty")
}
peerListener, peerPort, err := cd.prepareTCPListener(cd.Option.Download.PeerGRPC, false)
if err != nil {
logger.Errorf("failed to listen for peer grpc service: %v", err)
return err
}
cd.schedPeerHost.RpcPort = int32(peerPort)
// prepare upload service listen
if cd.Option.Upload.TCPListen == nil {
return errors.New("upload tcp listen option is empty")
}
uploadListener, uploadPort, err := cd.prepareTCPListener(cd.Option.Upload.ListenOption, true)
if err != nil {
logger.Errorf("failed to listen for upload service: %v", err)
return err
}
cd.schedPeerHost.DownPort = int32(uploadPort)
g := errgroup.Group{}
// serve download grpc service
g.Go(func() error {
defer downloadListener.Close()
logger.Infof("serve download grpc at unix://%s", cd.Option.Download.DownloadGRPC.UnixListen.Socket)
if err := cd.RPCManager.ServeDownload(downloadListener); err != nil {
logger.Errorf("failed to serve for download grpc service: %v", err)
return err
}
return nil
})
// serve peer grpc service
g.Go(func() error {
defer peerListener.Close()
logger.Infof("serve peer grpc at %s://%s", peerListener.Addr().Network(), peerListener.Addr().String())
if err := cd.RPCManager.ServePeer(peerListener); err != nil {
logger.Errorf("failed to serve for peer grpc service: %v", err)
return err
}
return nil
})
if cd.ProxyManager.IsEnabled() {
// prepare proxy service listen
if cd.Option.Proxy.TCPListen == nil {
return errors.New("proxy tcp listen option is empty")
}
proxyListener, proxyPort, err := cd.prepareTCPListener(cd.Option.Proxy.ListenOption, true)
if err != nil {
logger.Errorf("failed to listen for proxy service: %v", err)
return err
}
// serve proxy service
g.Go(func() error {
defer proxyListener.Close()
logger.Infof("serve proxy at tcp://%s:%d", cd.Option.Proxy.TCPListen.Listen, proxyPort)
if err = cd.ProxyManager.Serve(proxyListener); err != nil && err != http.ErrServerClosed {
logger.Errorf("failed to serve for proxy service: %v", err)
return err
} else if err == http.ErrServerClosed {
logger.Infof("proxy service closed")
}
return nil
})
// serve proxy sni service
if cd.Option.Proxy.HijackHTTPS != nil && len(cd.Option.Proxy.HijackHTTPS.SNI) > 0 {
for _, opt := range cd.Option.Proxy.HijackHTTPS.SNI {
listener, port, err := cd.prepareTCPListener(config.ListenOption{
TCPListen: opt,
}, false)
if err != nil {
logger.Errorf("failed to listen for proxy sni service: %v", err)
return err
}
logger.Infof("serve proxy sni at tcp://%s:%d", opt.Listen, port)
g.Go(func() error {
err := cd.ProxyManager.ServeSNI(listener)
if err != nil {
logger.Errorf("failed to serve proxy sni service: %v", err)
}
return err
})
}
}
}
// serve upload service
g.Go(func() error {
defer uploadListener.Close()
logger.Infof("serve upload service at %s://%s", uploadListener.Addr().Network(), uploadListener.Addr().String())
if err := cd.UploadManager.Serve(uploadListener); err != nil && err != http.ErrServerClosed {
logger.Errorf("failed to serve for upload service: %v", err)
return err
} else if err == http.ErrServerClosed {
logger.Infof("upload service closed")
}
return nil
})
if cd.Option.AliveTime.Duration > 0 {
g.Go(func() error {
select {
case <-time.After(cd.Option.AliveTime.Duration):
var keepalives = []clientutil.KeepAlive{
cd.StorageManager,
cd.RPCManager,
}
var keep bool
for _, keepalive := range keepalives {
if keepalive.Alive(cd.Option.AliveTime.Duration) {
keep = true
}
}
if !keep {
cd.Stop()
logger.Infof("alive time reached, stop daemon")
}
case <-cd.done:
logger.Infof("peer host done, stop watch alive time")
}
return nil
})
}
werr := g.Wait()
cd.Stop()
return werr
}
func (cd *clientDaemon) Stop() {
cd.once.Do(func() {
close(cd.done)
cd.GCManager.Stop()
cd.RPCManager.Stop()
cd.UploadManager.Stop()
if cd.ProxyManager.IsEnabled() {
cd.ProxyManager.Stop()
}
if !cd.Option.KeepStorage {
logger.Infof("keep storage disabled")
cd.StorageManager.CleanUp()
}
})
}
func (cd *clientDaemon) ExportTaskManager() peer.TaskManager {
return cd.PeerTaskManager
}
func (cd *clientDaemon) ExportPeerHost() *scheduler.PeerHost {
return cd.schedPeerHost
} | once: &sync.Once{},
done: make(chan bool), | random_line_split |
daemon.go | /*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package daemon
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"runtime"
"sync"
"time"
"github.com/pkg/errors"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"golang.org/x/sync/errgroup"
"golang.org/x/time/rate"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"d7y.io/dragonfly/v2/client/clientutil"
"d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/gc"
"d7y.io/dragonfly/v2/client/daemon/peer"
"d7y.io/dragonfly/v2/client/daemon/proxy"
"d7y.io/dragonfly/v2/client/daemon/rpcserver"
"d7y.io/dragonfly/v2/client/daemon/storage"
"d7y.io/dragonfly/v2/client/daemon/upload"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/internal/dfpath"
"d7y.io/dragonfly/v2/internal/idgen"
"d7y.io/dragonfly/v2/pkg/basic/dfnet"
"d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client"
"d7y.io/dragonfly/v2/pkg/util/net/iputils"
)
type Daemon interface {
Serve() error
Stop()
// ExportTaskManager returns the underlay peer.TaskManager for downloading when embed dragonfly in custom binary
ExportTaskManager() peer.TaskManager
// ExportPeerHost returns the underlay scheduler.PeerHost for scheduling
ExportPeerHost() *scheduler.PeerHost
}
type clientDaemon struct {
once *sync.Once
done chan bool
schedPeerHost *scheduler.PeerHost
Option config.DaemonOption
RPCManager rpcserver.Server
UploadManager upload.Manager
ProxyManager proxy.Manager
StorageManager storage.Manager
GCManager gc.Manager
PeerTaskManager peer.TaskManager
PieceManager peer.PieceManager
}
var _ Daemon = (*clientDaemon)(nil)
func New(opt *config.DaemonOption) (Daemon, error) {
host := &scheduler.PeerHost{
Uuid: idgen.UUIDString(),
Ip: opt.Host.AdvertiseIP,
RpcPort: int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start),
DownPort: 0,
HostName: iputils.HostName,
SecurityDomain: opt.Host.SecurityDomain,
Location: opt.Host.Location,
Idc: opt.Host.IDC,
NetTopology: opt.Host.NetTopology,
}
var opts []grpc.DialOption
if opt.Options.Telemetry.Jaeger != "" {
opts = append(opts, grpc.WithChainUnaryInterceptor(otelgrpc.UnaryClientInterceptor()), grpc.WithChainStreamInterceptor(otelgrpc.StreamClientInterceptor()))
}
sched, err := schedulerclient.GetClientByAddr(opt.Scheduler.NetAddrs, opts...)
if err != nil {
return nil, errors.Wrap(err, "failed to get schedulers")
}
// Storage.Option.DataPath is same with Daemon DataDir
opt.Storage.DataPath = opt.DataDir
storageManager, err := storage.NewStorageManager(opt.Storage.StoreStrategy, &opt.Storage,
/* gc callback */
func(request storage.CommonTaskRequest) {
er := sched.LeaveTask(context.Background(), &scheduler.PeerTarget{
TaskId: request.TaskID,
PeerId: request.PeerID,
})
if er != nil {
logger.Errorf("step 4:leave task %s/%s, error: %v", request.TaskID, request.PeerID, er)
} else {
logger.Infof("step 4:leave task %s/%s state ok", request.TaskID, request.PeerID)
}
})
if err != nil {
return nil, err
}
pieceManager, err := peer.NewPieceManager(storageManager,
opt.Download.PieceDownloadTimeout,
peer.WithLimiter(rate.NewLimiter(opt.Download.TotalRateLimit.Limit, int(opt.Download.TotalRateLimit.Limit))),
peer.WithCalculateDigest(opt.Download.CalculateDigest),
)
if err != nil {
return nil, err
}
peerTaskManager, err := peer.NewPeerTaskManager(host, pieceManager, storageManager, sched, opt.Scheduler,
opt.Download.PerPeerRateLimit.Limit, opt.Storage.Multiplex)
if err != nil {
return nil, err
}
// TODO(jim): more server options
var downloadServerOption []grpc.ServerOption
if !opt.Download.DownloadGRPC.Security.Insecure {
tlsCredentials, err := loadGPRCTLSCredentials(opt.Download.DownloadGRPC.Security)
if err != nil {
return nil, err
}
downloadServerOption = append(downloadServerOption, grpc.Creds(tlsCredentials))
}
var peerServerOption []grpc.ServerOption
if !opt.Download.PeerGRPC.Security.Insecure {
tlsCredentials, err := loadGPRCTLSCredentials(opt.Download.PeerGRPC.Security)
if err != nil {
return nil, err
}
peerServerOption = append(peerServerOption, grpc.Creds(tlsCredentials))
}
rpcManager, err := rpcserver.New(host, peerTaskManager, storageManager, downloadServerOption, peerServerOption)
if err != nil {
return nil, err
}
var proxyManager proxy.Manager
proxyManager, err = proxy.NewProxyManager(host, peerTaskManager, opt.Proxy)
if err != nil {
return nil, err
}
uploadManager, err := upload.NewUploadManager(storageManager,
upload.WithLimiter(rate.NewLimiter(opt.Upload.RateLimit.Limit, int(opt.Upload.RateLimit.Limit))))
if err != nil {
return nil, err
}
return &clientDaemon{
once: &sync.Once{},
done: make(chan bool),
schedPeerHost: host,
Option: *opt,
RPCManager: rpcManager,
PeerTaskManager: peerTaskManager,
PieceManager: pieceManager,
ProxyManager: proxyManager,
UploadManager: uploadManager,
StorageManager: storageManager,
GCManager: gc.NewManager(opt.GCInterval.Duration),
}, nil
}
func | (opt config.SecurityOption) (credentials.TransportCredentials, error) {
// Load certificate of the CA who signed client's certificate
pemClientCA, err := ioutil.ReadFile(opt.CACert)
if err != nil {
return nil, err
}
certPool := x509.NewCertPool()
if !certPool.AppendCertsFromPEM(pemClientCA) {
return nil, fmt.Errorf("failed to add client CA's certificate")
}
// Load server's certificate and private key
serverCert, err := tls.LoadX509KeyPair(opt.Cert, opt.Key)
if err != nil {
return nil, err
}
// Create the credentials and return it
if opt.TLSConfig == nil {
opt.TLSConfig = &tls.Config{
Certificates: []tls.Certificate{serverCert},
ClientAuth: tls.RequireAndVerifyClientCert,
ClientCAs: certPool,
}
} else {
opt.TLSConfig.Certificates = []tls.Certificate{serverCert}
opt.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
opt.TLSConfig.ClientCAs = certPool
}
return credentials.NewTLS(opt.TLSConfig), nil
}
func (*clientDaemon) prepareTCPListener(opt config.ListenOption, withTLS bool) (net.Listener, int, error) {
if len(opt.TCPListen.Namespace) > 0 {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
recoverFunc, err := switchNetNamespace(opt.TCPListen.Namespace)
if err != nil {
logger.Errorf("failed to change net namespace: %v", err)
return nil, -1, err
}
defer func() {
err := recoverFunc()
if err != nil {
logger.Errorf("failed to recover net namespace: %v", err)
}
}()
}
var (
ln net.Listener
port int
err error
)
if opt.TCPListen != nil {
ln, port, err = rpc.ListenWithPortRange(opt.TCPListen.Listen, opt.TCPListen.PortRange.Start, opt.TCPListen.PortRange.End)
}
if err != nil {
return nil, -1, err
}
// when use grpc, tls config is in server option
if !withTLS || opt.Security.Insecure {
return ln, port, err
}
if opt.Security.Cert == "" || opt.Security.Key == "" {
return nil, -1, errors.New("empty cert or key for tls")
}
// Create the TLS ClientOption with the CA pool and enable Client certificate validation
if opt.Security.TLSConfig == nil {
opt.Security.TLSConfig = &tls.Config{}
}
tlsConfig := opt.Security.TLSConfig
if opt.Security.CACert != "" {
caCert, err := ioutil.ReadFile(opt.Security.CACert)
if err != nil {
return nil, -1, err
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
tlsConfig.ClientCAs = caCertPool
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
}
tlsConfig.Certificates = make([]tls.Certificate, 1)
tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(opt.Security.Cert, opt.Security.Key)
if err != nil {
return nil, -1, err
}
return tls.NewListener(ln, tlsConfig), port, nil
}
func (cd *clientDaemon) Serve() error {
cd.GCManager.Start()
// TODO remove this field, and use directly dfpath.DaemonSockPath
cd.Option.Download.DownloadGRPC.UnixListen.Socket = dfpath.DaemonSockPath
// prepare download service listen
if cd.Option.Download.DownloadGRPC.UnixListen == nil {
return errors.New("download grpc unix listen option is empty")
}
_ = os.Remove(cd.Option.Download.DownloadGRPC.UnixListen.Socket)
downloadListener, err := rpc.Listen(dfnet.NetAddr{
Type: dfnet.UNIX,
Addr: cd.Option.Download.DownloadGRPC.UnixListen.Socket,
})
if err != nil {
logger.Errorf("failed to listen for download grpc service: %v", err)
return err
}
// prepare peer service listen
if cd.Option.Download.PeerGRPC.TCPListen == nil {
return errors.New("peer grpc tcp listen option is empty")
}
peerListener, peerPort, err := cd.prepareTCPListener(cd.Option.Download.PeerGRPC, false)
if err != nil {
logger.Errorf("failed to listen for peer grpc service: %v", err)
return err
}
cd.schedPeerHost.RpcPort = int32(peerPort)
// prepare upload service listen
if cd.Option.Upload.TCPListen == nil {
return errors.New("upload tcp listen option is empty")
}
uploadListener, uploadPort, err := cd.prepareTCPListener(cd.Option.Upload.ListenOption, true)
if err != nil {
logger.Errorf("failed to listen for upload service: %v", err)
return err
}
cd.schedPeerHost.DownPort = int32(uploadPort)
g := errgroup.Group{}
// serve download grpc service
g.Go(func() error {
defer downloadListener.Close()
logger.Infof("serve download grpc at unix://%s", cd.Option.Download.DownloadGRPC.UnixListen.Socket)
if err := cd.RPCManager.ServeDownload(downloadListener); err != nil {
logger.Errorf("failed to serve for download grpc service: %v", err)
return err
}
return nil
})
// serve peer grpc service
g.Go(func() error {
defer peerListener.Close()
logger.Infof("serve peer grpc at %s://%s", peerListener.Addr().Network(), peerListener.Addr().String())
if err := cd.RPCManager.ServePeer(peerListener); err != nil {
logger.Errorf("failed to serve for peer grpc service: %v", err)
return err
}
return nil
})
if cd.ProxyManager.IsEnabled() {
// prepare proxy service listen
if cd.Option.Proxy.TCPListen == nil {
return errors.New("proxy tcp listen option is empty")
}
proxyListener, proxyPort, err := cd.prepareTCPListener(cd.Option.Proxy.ListenOption, true)
if err != nil {
logger.Errorf("failed to listen for proxy service: %v", err)
return err
}
// serve proxy service
g.Go(func() error {
defer proxyListener.Close()
logger.Infof("serve proxy at tcp://%s:%d", cd.Option.Proxy.TCPListen.Listen, proxyPort)
if err = cd.ProxyManager.Serve(proxyListener); err != nil && err != http.ErrServerClosed {
logger.Errorf("failed to serve for proxy service: %v", err)
return err
} else if err == http.ErrServerClosed {
logger.Infof("proxy service closed")
}
return nil
})
// serve proxy sni service
if cd.Option.Proxy.HijackHTTPS != nil && len(cd.Option.Proxy.HijackHTTPS.SNI) > 0 {
for _, opt := range cd.Option.Proxy.HijackHTTPS.SNI {
listener, port, err := cd.prepareTCPListener(config.ListenOption{
TCPListen: opt,
}, false)
if err != nil {
logger.Errorf("failed to listen for proxy sni service: %v", err)
return err
}
logger.Infof("serve proxy sni at tcp://%s:%d", opt.Listen, port)
g.Go(func() error {
err := cd.ProxyManager.ServeSNI(listener)
if err != nil {
logger.Errorf("failed to serve proxy sni service: %v", err)
}
return err
})
}
}
}
// serve upload service
g.Go(func() error {
defer uploadListener.Close()
logger.Infof("serve upload service at %s://%s", uploadListener.Addr().Network(), uploadListener.Addr().String())
if err := cd.UploadManager.Serve(uploadListener); err != nil && err != http.ErrServerClosed {
logger.Errorf("failed to serve for upload service: %v", err)
return err
} else if err == http.ErrServerClosed {
logger.Infof("upload service closed")
}
return nil
})
if cd.Option.AliveTime.Duration > 0 {
g.Go(func() error {
select {
case <-time.After(cd.Option.AliveTime.Duration):
var keepalives = []clientutil.KeepAlive{
cd.StorageManager,
cd.RPCManager,
}
var keep bool
for _, keepalive := range keepalives {
if keepalive.Alive(cd.Option.AliveTime.Duration) {
keep = true
}
}
if !keep {
cd.Stop()
logger.Infof("alive time reached, stop daemon")
}
case <-cd.done:
logger.Infof("peer host done, stop watch alive time")
}
return nil
})
}
werr := g.Wait()
cd.Stop()
return werr
}
func (cd *clientDaemon) Stop() {
cd.once.Do(func() {
close(cd.done)
cd.GCManager.Stop()
cd.RPCManager.Stop()
cd.UploadManager.Stop()
if cd.ProxyManager.IsEnabled() {
cd.ProxyManager.Stop()
}
if !cd.Option.KeepStorage {
logger.Infof("keep storage disabled")
cd.StorageManager.CleanUp()
}
})
}
func (cd *clientDaemon) ExportTaskManager() peer.TaskManager {
return cd.PeerTaskManager
}
func (cd *clientDaemon) ExportPeerHost() *scheduler.PeerHost {
return cd.schedPeerHost
}
| loadGPRCTLSCredentials | identifier_name |
main.rs | extern crate bible_reference_rs;
extern crate chrono;
extern crate futures;
extern crate hyper;
extern crate postgres;
extern crate serde;
extern crate url;
#[macro_use]
extern crate serde_json;
mod models;
use bible_reference_rs::*;
use futures::future::{Future, FutureResult};
use hyper::service::{NewService, Service};
use hyper::{header, Body, Method, Request, Response, Server, StatusCode};
use models::*;
use postgres::{Connection, TlsMode};
use serde_json::Value;
use std::env;
use std::fmt;
const DEFAULT_URL: &'static str = "postgres://docker:docker@localhost:5432/bible";
#[derive(Debug)]
enum ServiceError {
NoInput,
NoDatabaseConnection(String),
}
impl std::error::Error for ServiceError {}
impl fmt::Display for ServiceError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ServiceError::NoInput => write!(f, "No input provided"),
ServiceError::NoDatabaseConnection(details) => write!(f, "DB: {}", details),
}
}
}
fn connect_db() -> Result<Connection, ServiceError> {
let url = env::var("DATABASE_URL").unwrap_or(String::from(DEFAULT_URL));
println!("Connecting: {}", &url);
match Connection::connect(url, TlsMode::None) {
Ok(connection) => Ok(connection),
Err(error) => {
println!("Connection: {}", error);
Err(ServiceError::NoDatabaseConnection(format!("{}", error)))
}
}
}
fn verses_by_chapters(db: &Connection, id: i16, chapters: Vec<i16>) -> Vec<Value> {
db.query(
"SELECT row_to_json(rst_bible)
FROM rst_bible
WHERE book_id = $1 AND chapter = ANY($2)",
&[&id, &chapters],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn verses_in_chapter_by_verses(
db: &Connection,
id: i16,
chapter: i16,
verses: Vec<i16>,
) -> Vec<Value> {
db.query(
"SELECT row_to_json(rst_bible)
FROM rst_bible
WHERE book_id = $1 AND chapter = $2 AND verse = ANY($3)",
&[&id, &chapter, &verses],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn fetch_results(db: &Connection, refs: Vec<BibleReference>) -> Vec<Value> {
if refs.is_empty() {
return vec![];
}
let valid: Vec<BookRef> = refs
.iter()
.flat_map(|r| {
let statement = db
.prepare(
"SELECT id, book as title, alt, abbr
FROM rst_bible_books
WHERE book ~* $1 OR alt ~* $1 OR abbr ~* $1
LIMIT 1",
).unwrap();
let rows = statement.query(&[&r.book]).unwrap();
if rows.is_empty() {
None
} else {
let row = rows.iter().next().unwrap();
Some(BookRef {
id: row.get(0),
name: row.get(1),
alt: row.get(2),
locations: r.locations.clone(),
})
}
}).collect();
valid
.iter()
.map(|reference| {
let book_id = reference.id;
let book_title = &reference.name;
let book_alt = &reference.alt;
let texts = reference
.locations
.iter()
.flat_map(
move |location| match (&location.chapters, &location.verses) {
// Fetch verses by chapters
(chapters, None) => {
let ch = chapters.into_iter().map(|v| *v as i16).collect();
Some(verses_by_chapters(&db, book_id, ch))
}
// Fetch verses by chapter and verses
(chapters, Some(verses)) if chapters.len() == 1 => {
let ch = chapters[0] as i16;
let vs = verses.into_iter().map(|v| *v as i16).collect();
Some(verses_in_chapter_by_verses(&db, book_id, ch, vs))
}
_ => None,
},
).collect::<Vec<_>>();
json!({ "reference": { "title": book_title, "alt": book_alt }, "texts": texts })
}).collect::<Vec<_>>()
}
fn fetch_daily_verses(db: &Connection) -> Vec<String> {
use chrono::{Datelike, Utc};
let now = Utc::now();
let month = now.month() as i16;
let day = now.day() as i16;
db.query(
"SELECT verses
FROM rst_bible_daily
WHERE month = $1 AND day = $2",
&[&month, &day],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn parse_query(query: Option<&str>) -> FutureResult<String, ServiceError> {
use std::collections::HashMap;
let query = &query.unwrap_or("");
let args = url::form_urlencoded::parse(&query.as_bytes())
.into_owned()
.collect::<HashMap<String, String>>();
match args
.get("q")
.map(|v| v.to_string())
.filter(|s| !s.is_empty())
{
Some(value) => futures::future::ok(value),
None => futures::future::err(ServiceError::NoInput),
}
}
#[derive(Debug)]
struct SearchPaginate {
text: String,
page: i16,
}
fn parse_query_paginate(query: Option<&str>) -> FutureResult<SearchPaginate, ServiceError> {
use std::collections::HashMap;
let query = &query.unwrap_or("");
let args = url::form_urlencoded::parse(&query.as_bytes())
.into_owned()
.collect::<HashMap<String, String>>();
let q = args
.get("q")
.map(|v| v.to_string())
.filter(|s| !s.is_empty());
let p = args
.get("p")
.map(|v| v.parse::<i16>().unwrap_or(1))
.unwrap_or(1);
match (q, p) {
(Some(q), p) => futures::future::ok(SearchPaginate { text: q, page: p }),
_ => futures::future::err(ServiceError::NoInput),
}
}
// Verse Of the Day
fn vod_response_body(db: &Connection) -> Body {
let results = fetch_daily_verses(&db)
.into_iter()
.flat_map(|daily| {
let refs = parse(daily.as_str());
let results = fetch_results(&db, refs);
if results.is_empty() {
None
} else {
Some(results)
}
}).flatten()
.collect::<Vec<_>>();
Body::from(json!({ "results": results }).to_string())
}
fn search_results(query: String, db: &Connection) -> FutureResult<Body, ServiceError> {
let refs = parse(query.as_str());
futures::future::ok(Body::from(
json!({ "results": fetch_results(&db, refs) }).to_string(),
))
}
fn fetch_search_results(text: String, page: i16, db: &Connection) -> (Vec<Value>, i64) {
let page = if page <= 0 { 1 } else { page };
let count_rows = db
.query(
"SELECT COUNT(book_id)
FROM rst_bible
WHERE text ~* $1",
&[&text],
).unwrap();
let mut total: i64 = 0;
if count_rows.is_empty() {
return (vec![json!([])], total);
} else {
total = count_rows.get(0).get("count");
}
let offset = ((page - 1) * 10) as i64;
let rows = db
.query(
"SELECT row_to_json(t)
FROM (
SELECT v.book_id, v.text, v.chapter, v.verse, b.book as book_name, b.alt as book_alt from rst_bible v
LEFT OUTER JOIN rst_bible_books b on (v.book_id = b.id)
WHERE text ~* $1
) t
LIMIT 10
OFFSET $2",
&[&text, &offset],
).unwrap();
let results = rows.into_iter().map(|r| r.get(0)).collect::<Vec<Value>>();
(vec![json!(results)], (total as f64 / 10_f64).ceil() as i64)
}
fn search_text(query: SearchPaginate, db: &Connection) -> FutureResult<Body, ServiceError> {
let text = &query.text;
let results = fetch_search_results(text.to_string(), query.page, db);
futures::future::ok(Body::from(
json!({
"meta": { "text": text, "page": query.page, "total": results.1 },
"results": results.0
}).to_string(),
))
}
fn success_response(body: Body) -> FutureResult<Response<Body>, ServiceError> |
struct SearchService;
impl NewService for SearchService {
type ReqBody = Body;
type ResBody = Body;
type Error = ServiceError;
type Service = SearchService;
type Future = Box<Future<Item = Self::Service, Error = Self::Error> + Send>;
type InitError = ServiceError;
fn new_service(&self) -> Self::Future {
Box::new(futures::future::ok(SearchService))
}
}
impl Service for SearchService {
type ReqBody = Body;
type ResBody = Body;
type Error = ServiceError;
type Future = Box<Future<Item = Response<Self::ResBody>, Error = Self::Error> + Send>;
fn call(&mut self, request: Request<Self::ReqBody>) -> Self::Future {
let db_connection = match connect_db() {
Ok(db) => db,
Err(_) => {
return Box::new(futures::future::ok(
Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::empty())
.unwrap(),
))
}
};
match (request.method(), request.uri().path()) {
(&Method::GET, "/refs") => Box::new(
parse_query(request.uri().query())
.and_then(move |query| search_results(query, &db_connection))
.and_then(success_response)
.or_else(|_| {
futures::future::ok(
Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::empty())
.unwrap(),
)
}),
),
(&Method::GET, "/search") => Box::new(
parse_query_paginate(request.uri().query())
.and_then(move |query| search_text(query, &db_connection))
.and_then(success_response)
.or_else(|_| {
futures::future::ok(
Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::empty())
.unwrap(),
)
}),
),
(&Method::GET, "/daily") => {
Box::new(success_response(vod_response_body(&db_connection)))
}
_ => Box::new(futures::future::ok(
Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::empty())
.unwrap(),
)),
}
}
}
fn main() {
let addr = "127.0.0.1:8080".parse().unwrap();
let server = Server::bind(&addr)
.serve(SearchService)
.map_err(|e| eprintln!("Server error: {}", e));
println!("Listening {}", addr);
hyper::rt::run(server);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_fetch_chapter() {
let db = connect_db().unwrap();
let refs = parse("Быт 1");
let verses = fetch_results(&db, refs);
assert_eq!(verses.len(), 1);
}
}
| {
futures::future::ok(
Response::builder()
.header(header::CONTENT_TYPE, "application/json")
.header(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.header(header::ACCESS_CONTROL_ALLOW_METHODS, "GET")
.header(header::ACCESS_CONTROL_ALLOW_HEADERS, "Content-Type")
.body(body)
.unwrap(),
)
} | identifier_body |
main.rs | extern crate bible_reference_rs;
extern crate chrono;
extern crate futures;
extern crate hyper;
extern crate postgres;
extern crate serde;
extern crate url;
#[macro_use]
extern crate serde_json;
mod models;
use bible_reference_rs::*;
use futures::future::{Future, FutureResult};
use hyper::service::{NewService, Service};
use hyper::{header, Body, Method, Request, Response, Server, StatusCode};
use models::*;
use postgres::{Connection, TlsMode};
use serde_json::Value;
use std::env;
use std::fmt;
const DEFAULT_URL: &'static str = "postgres://docker:docker@localhost:5432/bible";
#[derive(Debug)]
enum ServiceError {
NoInput,
NoDatabaseConnection(String),
}
impl std::error::Error for ServiceError {}
impl fmt::Display for ServiceError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ServiceError::NoInput => write!(f, "No input provided"),
ServiceError::NoDatabaseConnection(details) => write!(f, "DB: {}", details),
}
}
}
fn connect_db() -> Result<Connection, ServiceError> {
let url = env::var("DATABASE_URL").unwrap_or(String::from(DEFAULT_URL));
println!("Connecting: {}", &url);
match Connection::connect(url, TlsMode::None) {
Ok(connection) => Ok(connection),
Err(error) => {
println!("Connection: {}", error);
Err(ServiceError::NoDatabaseConnection(format!("{}", error)))
}
}
}
fn verses_by_chapters(db: &Connection, id: i16, chapters: Vec<i16>) -> Vec<Value> {
db.query(
"SELECT row_to_json(rst_bible)
FROM rst_bible
WHERE book_id = $1 AND chapter = ANY($2)",
&[&id, &chapters],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn verses_in_chapter_by_verses(
db: &Connection,
id: i16,
chapter: i16,
verses: Vec<i16>,
) -> Vec<Value> {
db.query(
"SELECT row_to_json(rst_bible)
FROM rst_bible
WHERE book_id = $1 AND chapter = $2 AND verse = ANY($3)",
&[&id, &chapter, &verses],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn fetch_results(db: &Connection, refs: Vec<BibleReference>) -> Vec<Value> {
if refs.is_empty() {
return vec![];
}
let valid: Vec<BookRef> = refs
.iter()
.flat_map(|r| {
let statement = db
.prepare(
"SELECT id, book as title, alt, abbr
FROM rst_bible_books
WHERE book ~* $1 OR alt ~* $1 OR abbr ~* $1
LIMIT 1",
).unwrap();
let rows = statement.query(&[&r.book]).unwrap();
if rows.is_empty() {
None
} else {
let row = rows.iter().next().unwrap();
Some(BookRef {
id: row.get(0),
name: row.get(1),
alt: row.get(2),
locations: r.locations.clone(),
})
}
}).collect();
valid
.iter()
.map(|reference| {
let book_id = reference.id;
let book_title = &reference.name;
let book_alt = &reference.alt;
let texts = reference
.locations
.iter()
.flat_map(
move |location| match (&location.chapters, &location.verses) {
// Fetch verses by chapters
(chapters, None) => {
let ch = chapters.into_iter().map(|v| *v as i16).collect();
Some(verses_by_chapters(&db, book_id, ch))
}
// Fetch verses by chapter and verses
(chapters, Some(verses)) if chapters.len() == 1 => {
let ch = chapters[0] as i16;
let vs = verses.into_iter().map(|v| *v as i16).collect();
Some(verses_in_chapter_by_verses(&db, book_id, ch, vs))
}
_ => None,
},
).collect::<Vec<_>>();
json!({ "reference": { "title": book_title, "alt": book_alt }, "texts": texts })
}).collect::<Vec<_>>()
}
fn fetch_daily_verses(db: &Connection) -> Vec<String> {
use chrono::{Datelike, Utc};
let now = Utc::now();
let month = now.month() as i16;
let day = now.day() as i16;
db.query(
"SELECT verses
FROM rst_bible_daily
WHERE month = $1 AND day = $2",
&[&month, &day],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn parse_query(query: Option<&str>) -> FutureResult<String, ServiceError> {
use std::collections::HashMap;
let query = &query.unwrap_or("");
let args = url::form_urlencoded::parse(&query.as_bytes())
.into_owned()
.collect::<HashMap<String, String>>();
match args
.get("q")
.map(|v| v.to_string())
.filter(|s| !s.is_empty())
{
Some(value) => futures::future::ok(value),
None => futures::future::err(ServiceError::NoInput),
}
}
#[derive(Debug)]
struct SearchPaginate {
text: String,
page: i16,
}
fn parse_query_paginate(query: Option<&str>) -> FutureResult<SearchPaginate, ServiceError> {
use std::collections::HashMap;
let query = &query.unwrap_or("");
let args = url::form_urlencoded::parse(&query.as_bytes())
.into_owned()
.collect::<HashMap<String, String>>();
let q = args
.get("q")
.map(|v| v.to_string())
.filter(|s| !s.is_empty());
let p = args
.get("p")
.map(|v| v.parse::<i16>().unwrap_or(1))
.unwrap_or(1);
match (q, p) {
(Some(q), p) => futures::future::ok(SearchPaginate { text: q, page: p }),
_ => futures::future::err(ServiceError::NoInput),
}
}
// Verse Of the Day
fn vod_response_body(db: &Connection) -> Body {
let results = fetch_daily_verses(&db)
.into_iter()
.flat_map(|daily| {
let refs = parse(daily.as_str());
let results = fetch_results(&db, refs);
if results.is_empty() {
None
} else {
Some(results)
}
}).flatten()
.collect::<Vec<_>>();
Body::from(json!({ "results": results }).to_string())
}
fn search_results(query: String, db: &Connection) -> FutureResult<Body, ServiceError> {
let refs = parse(query.as_str());
futures::future::ok(Body::from(
json!({ "results": fetch_results(&db, refs) }).to_string(),
))
}
fn fetch_search_results(text: String, page: i16, db: &Connection) -> (Vec<Value>, i64) {
let page = if page <= 0 { 1 } else { page };
let count_rows = db
.query(
"SELECT COUNT(book_id)
FROM rst_bible
WHERE text ~* $1",
&[&text],
).unwrap();
let mut total: i64 = 0;
if count_rows.is_empty() {
return (vec![json!([])], total);
} else {
total = count_rows.get(0).get("count");
}
let offset = ((page - 1) * 10) as i64;
let rows = db
.query(
"SELECT row_to_json(t)
FROM (
SELECT v.book_id, v.text, v.chapter, v.verse, b.book as book_name, b.alt as book_alt from rst_bible v
LEFT OUTER JOIN rst_bible_books b on (v.book_id = b.id)
WHERE text ~* $1
) t
LIMIT 10
OFFSET $2",
&[&text, &offset],
).unwrap();
let results = rows.into_iter().map(|r| r.get(0)).collect::<Vec<Value>>();
(vec![json!(results)], (total as f64 / 10_f64).ceil() as i64)
}
fn | (query: SearchPaginate, db: &Connection) -> FutureResult<Body, ServiceError> {
let text = &query.text;
let results = fetch_search_results(text.to_string(), query.page, db);
futures::future::ok(Body::from(
json!({
"meta": { "text": text, "page": query.page, "total": results.1 },
"results": results.0
}).to_string(),
))
}
fn success_response(body: Body) -> FutureResult<Response<Body>, ServiceError> {
futures::future::ok(
Response::builder()
.header(header::CONTENT_TYPE, "application/json")
.header(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.header(header::ACCESS_CONTROL_ALLOW_METHODS, "GET")
.header(header::ACCESS_CONTROL_ALLOW_HEADERS, "Content-Type")
.body(body)
.unwrap(),
)
}
struct SearchService;
impl NewService for SearchService {
type ReqBody = Body;
type ResBody = Body;
type Error = ServiceError;
type Service = SearchService;
type Future = Box<Future<Item = Self::Service, Error = Self::Error> + Send>;
type InitError = ServiceError;
fn new_service(&self) -> Self::Future {
Box::new(futures::future::ok(SearchService))
}
}
impl Service for SearchService {
type ReqBody = Body;
type ResBody = Body;
type Error = ServiceError;
type Future = Box<Future<Item = Response<Self::ResBody>, Error = Self::Error> + Send>;
fn call(&mut self, request: Request<Self::ReqBody>) -> Self::Future {
let db_connection = match connect_db() {
Ok(db) => db,
Err(_) => {
return Box::new(futures::future::ok(
Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::empty())
.unwrap(),
))
}
};
match (request.method(), request.uri().path()) {
(&Method::GET, "/refs") => Box::new(
parse_query(request.uri().query())
.and_then(move |query| search_results(query, &db_connection))
.and_then(success_response)
.or_else(|_| {
futures::future::ok(
Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::empty())
.unwrap(),
)
}),
),
(&Method::GET, "/search") => Box::new(
parse_query_paginate(request.uri().query())
.and_then(move |query| search_text(query, &db_connection))
.and_then(success_response)
.or_else(|_| {
futures::future::ok(
Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::empty())
.unwrap(),
)
}),
),
(&Method::GET, "/daily") => {
Box::new(success_response(vod_response_body(&db_connection)))
}
_ => Box::new(futures::future::ok(
Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::empty())
.unwrap(),
)),
}
}
}
fn main() {
let addr = "127.0.0.1:8080".parse().unwrap();
let server = Server::bind(&addr)
.serve(SearchService)
.map_err(|e| eprintln!("Server error: {}", e));
println!("Listening {}", addr);
hyper::rt::run(server);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_fetch_chapter() {
let db = connect_db().unwrap();
let refs = parse("Быт 1");
let verses = fetch_results(&db, refs);
assert_eq!(verses.len(), 1);
}
}
| search_text | identifier_name |
main.rs | extern crate bible_reference_rs;
extern crate chrono;
extern crate futures;
extern crate hyper;
extern crate postgres;
extern crate serde;
extern crate url;
#[macro_use]
extern crate serde_json;
mod models;
use bible_reference_rs::*;
use futures::future::{Future, FutureResult};
use hyper::service::{NewService, Service};
use hyper::{header, Body, Method, Request, Response, Server, StatusCode};
use models::*;
use postgres::{Connection, TlsMode};
use serde_json::Value;
use std::env;
use std::fmt;
const DEFAULT_URL: &'static str = "postgres://docker:docker@localhost:5432/bible";
#[derive(Debug)]
enum ServiceError {
NoInput,
NoDatabaseConnection(String),
}
impl std::error::Error for ServiceError {}
impl fmt::Display for ServiceError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ServiceError::NoInput => write!(f, "No input provided"),
ServiceError::NoDatabaseConnection(details) => write!(f, "DB: {}", details),
}
}
}
fn connect_db() -> Result<Connection, ServiceError> {
let url = env::var("DATABASE_URL").unwrap_or(String::from(DEFAULT_URL));
println!("Connecting: {}", &url);
match Connection::connect(url, TlsMode::None) {
Ok(connection) => Ok(connection),
Err(error) => {
println!("Connection: {}", error);
Err(ServiceError::NoDatabaseConnection(format!("{}", error)))
}
}
}
fn verses_by_chapters(db: &Connection, id: i16, chapters: Vec<i16>) -> Vec<Value> {
db.query(
"SELECT row_to_json(rst_bible)
FROM rst_bible
WHERE book_id = $1 AND chapter = ANY($2)",
&[&id, &chapters],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn verses_in_chapter_by_verses(
db: &Connection,
id: i16,
chapter: i16,
verses: Vec<i16>,
) -> Vec<Value> {
db.query(
"SELECT row_to_json(rst_bible)
FROM rst_bible
WHERE book_id = $1 AND chapter = $2 AND verse = ANY($3)",
&[&id, &chapter, &verses],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn fetch_results(db: &Connection, refs: Vec<BibleReference>) -> Vec<Value> {
if refs.is_empty() {
return vec![];
}
let valid: Vec<BookRef> = refs
.iter()
.flat_map(|r| {
let statement = db
.prepare(
"SELECT id, book as title, alt, abbr
FROM rst_bible_books
WHERE book ~* $1 OR alt ~* $1 OR abbr ~* $1
LIMIT 1",
).unwrap();
let rows = statement.query(&[&r.book]).unwrap();
if rows.is_empty() {
None
} else {
let row = rows.iter().next().unwrap();
Some(BookRef {
id: row.get(0),
name: row.get(1),
alt: row.get(2),
locations: r.locations.clone(),
})
}
}).collect();
valid
.iter()
.map(|reference| {
let book_id = reference.id;
let book_title = &reference.name;
let book_alt = &reference.alt;
let texts = reference
.locations
.iter()
.flat_map(
move |location| match (&location.chapters, &location.verses) {
// Fetch verses by chapters
(chapters, None) => {
let ch = chapters.into_iter().map(|v| *v as i16).collect();
Some(verses_by_chapters(&db, book_id, ch))
}
// Fetch verses by chapter and verses
(chapters, Some(verses)) if chapters.len() == 1 => {
let ch = chapters[0] as i16;
let vs = verses.into_iter().map(|v| *v as i16).collect();
Some(verses_in_chapter_by_verses(&db, book_id, ch, vs))
}
_ => None,
},
).collect::<Vec<_>>();
json!({ "reference": { "title": book_title, "alt": book_alt }, "texts": texts })
}).collect::<Vec<_>>()
}
fn fetch_daily_verses(db: &Connection) -> Vec<String> {
use chrono::{Datelike, Utc};
let now = Utc::now();
let month = now.month() as i16;
let day = now.day() as i16;
db.query(
"SELECT verses
FROM rst_bible_daily
WHERE month = $1 AND day = $2",
&[&month, &day],
).unwrap()
.iter()
.map(|row| row.get(0))
.collect()
}
fn parse_query(query: Option<&str>) -> FutureResult<String, ServiceError> {
use std::collections::HashMap;
let query = &query.unwrap_or("");
let args = url::form_urlencoded::parse(&query.as_bytes())
.into_owned()
.collect::<HashMap<String, String>>();
match args
.get("q")
.map(|v| v.to_string())
.filter(|s| !s.is_empty())
{
Some(value) => futures::future::ok(value),
None => futures::future::err(ServiceError::NoInput),
}
}
#[derive(Debug)]
struct SearchPaginate {
text: String,
page: i16,
}
fn parse_query_paginate(query: Option<&str>) -> FutureResult<SearchPaginate, ServiceError> {
use std::collections::HashMap;
let query = &query.unwrap_or("");
let args = url::form_urlencoded::parse(&query.as_bytes())
.into_owned()
.collect::<HashMap<String, String>>();
let q = args
.get("q")
.map(|v| v.to_string())
.filter(|s| !s.is_empty());
let p = args
.get("p")
.map(|v| v.parse::<i16>().unwrap_or(1))
.unwrap_or(1);
match (q, p) {
(Some(q), p) => futures::future::ok(SearchPaginate { text: q, page: p }),
_ => futures::future::err(ServiceError::NoInput),
}
}
// Verse Of the Day
fn vod_response_body(db: &Connection) -> Body {
let results = fetch_daily_verses(&db)
.into_iter()
.flat_map(|daily| {
let refs = parse(daily.as_str());
let results = fetch_results(&db, refs);
if results.is_empty() {
None
} else {
Some(results)
}
}).flatten()
.collect::<Vec<_>>();
Body::from(json!({ "results": results }).to_string())
}
fn search_results(query: String, db: &Connection) -> FutureResult<Body, ServiceError> {
let refs = parse(query.as_str());
futures::future::ok(Body::from(
json!({ "results": fetch_results(&db, refs) }).to_string(),
))
}
fn fetch_search_results(text: String, page: i16, db: &Connection) -> (Vec<Value>, i64) {
let page = if page <= 0 { 1 } else { page };
let count_rows = db
.query(
"SELECT COUNT(book_id)
FROM rst_bible
WHERE text ~* $1",
&[&text],
).unwrap();
let mut total: i64 = 0;
if count_rows.is_empty() {
return (vec![json!([])], total);
} else {
total = count_rows.get(0).get("count");
}
let offset = ((page - 1) * 10) as i64;
let rows = db
.query(
"SELECT row_to_json(t)
FROM (
SELECT v.book_id, v.text, v.chapter, v.verse, b.book as book_name, b.alt as book_alt from rst_bible v
LEFT OUTER JOIN rst_bible_books b on (v.book_id = b.id)
WHERE text ~* $1
) t
LIMIT 10
OFFSET $2",
&[&text, &offset],
).unwrap();
let results = rows.into_iter().map(|r| r.get(0)).collect::<Vec<Value>>();
(vec![json!(results)], (total as f64 / 10_f64).ceil() as i64)
}
fn search_text(query: SearchPaginate, db: &Connection) -> FutureResult<Body, ServiceError> {
let text = &query.text;
let results = fetch_search_results(text.to_string(), query.page, db);
futures::future::ok(Body::from(
json!({
"meta": { "text": text, "page": query.page, "total": results.1 },
"results": results.0
}).to_string(),
))
}
fn success_response(body: Body) -> FutureResult<Response<Body>, ServiceError> {
futures::future::ok(
Response::builder()
.header(header::CONTENT_TYPE, "application/json")
.header(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") | .unwrap(),
)
}
struct SearchService;
impl NewService for SearchService {
type ReqBody = Body;
type ResBody = Body;
type Error = ServiceError;
type Service = SearchService;
type Future = Box<Future<Item = Self::Service, Error = Self::Error> + Send>;
type InitError = ServiceError;
fn new_service(&self) -> Self::Future {
Box::new(futures::future::ok(SearchService))
}
}
impl Service for SearchService {
type ReqBody = Body;
type ResBody = Body;
type Error = ServiceError;
type Future = Box<Future<Item = Response<Self::ResBody>, Error = Self::Error> + Send>;
fn call(&mut self, request: Request<Self::ReqBody>) -> Self::Future {
let db_connection = match connect_db() {
Ok(db) => db,
Err(_) => {
return Box::new(futures::future::ok(
Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::empty())
.unwrap(),
))
}
};
match (request.method(), request.uri().path()) {
(&Method::GET, "/refs") => Box::new(
parse_query(request.uri().query())
.and_then(move |query| search_results(query, &db_connection))
.and_then(success_response)
.or_else(|_| {
futures::future::ok(
Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::empty())
.unwrap(),
)
}),
),
(&Method::GET, "/search") => Box::new(
parse_query_paginate(request.uri().query())
.and_then(move |query| search_text(query, &db_connection))
.and_then(success_response)
.or_else(|_| {
futures::future::ok(
Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::empty())
.unwrap(),
)
}),
),
(&Method::GET, "/daily") => {
Box::new(success_response(vod_response_body(&db_connection)))
}
_ => Box::new(futures::future::ok(
Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::empty())
.unwrap(),
)),
}
}
}
fn main() {
let addr = "127.0.0.1:8080".parse().unwrap();
let server = Server::bind(&addr)
.serve(SearchService)
.map_err(|e| eprintln!("Server error: {}", e));
println!("Listening {}", addr);
hyper::rt::run(server);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_fetch_chapter() {
let db = connect_db().unwrap();
let refs = parse("Быт 1");
let verses = fetch_results(&db, refs);
assert_eq!(verses.len(), 1);
}
} | .header(header::ACCESS_CONTROL_ALLOW_METHODS, "GET")
.header(header::ACCESS_CONTROL_ALLOW_HEADERS, "Content-Type")
.body(body) | random_line_split |
domain_randomization.py | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from random import randint
from typing import List, Mapping, Optional, Tuple, Union
import numpy as np
from init_args_serializer import Serializable
import pyrado
from pyrado.domain_randomization.domain_randomizer import DomainRandomizer
from pyrado.environment_wrappers.base import EnvWrapper
from pyrado.environment_wrappers.utils import all_envs, inner_env, remove_env
from pyrado.environments.base import Env
from pyrado.environments.sim_base import SimEnv
from pyrado.utils.input_output import completion_context, print_cbt
class DomainRandWrapper(EnvWrapper, Serializable):
"""Base class for environment wrappers which call a `DomainRandomizer` to randomize the domain parameters"""
def __init__(self, wrapped_env: Union[SimEnv, EnvWrapper], randomizer: Optional[DomainRandomizer]):
"""
Constructor
:param wrapped_env: environment to wrap
:param randomizer: `DomainRandomizer` object holding the probability distribution of all randomizable
domain parameters, pass `None` if you want to subclass wrapping another `DomainRandWrapper`
and use its randomizer
"""
if not isinstance(inner_env(wrapped_env), SimEnv):
raise pyrado.TypeErr(given=wrapped_env, expected_type=SimEnv)
if not isinstance(randomizer, DomainRandomizer) and randomizer is not None:
raise pyrado.TypeErr(given=randomizer, expected_type=DomainRandomizer)
Serializable._init(self, locals())
# Invoke EnvWrapper's constructor
super().__init__(wrapped_env)
self._randomizer = randomizer
@property
def randomizer(self) -> DomainRandomizer:
return self._randomizer
@randomizer.setter
def randomizer(self, randomizer: DomainRandomizer):
if not isinstance(randomizer, DomainRandomizer):
raise pyrado.TypeErr(given=randomizer, expected_type=DomainRandomizer)
self._randomizer = randomizer
class MetaDomainRandWrapper(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which wraps another `DomainRandWrapper` to adapt its parameters,
called domain distribution parameters.
"""
def __init__(self, wrapped_rand_env: DomainRandWrapper, dp_mapping: Mapping[int, Tuple[str, str]]):
"""
Constructor
:param wrapped_rand_env: randomized environment to wrap
:param dp_mapping: mapping from index of the numpy array (coming from the algorithm) to domain parameter name
(e.g. mass, length) and the domain distribution parameter (e.g. mean, std)
.. code-block:: python
# For the mapping arg use the this dict constructor
```
m = {0: ('name1', 'parameter_type1'), 1: ('name2', 'parameter_type2')}
```
"""
if not isinstance(wrapped_rand_env, DomainRandWrapper):
raise pyrado.TypeErr(given=wrapped_rand_env, expected_type=DomainRandWrapper)
Serializable._init(self, locals())
# Invoke the DomainRandWrapper's constructor
super().__init__(wrapped_rand_env, None) | def randomizer(self) -> DomainRandomizer:
# Forward to the wrapped DomainRandWrapper
return self._wrapped_env.randomizer
@randomizer.setter
def randomizer(self, dr: DomainRandomizer):
# Forward to the wrapped DomainRandWrapper
self._wrapped_env.randomizer = dr
def adapt_randomizer(self, domain_distr_param_values: np.ndarray):
# Check the input dimension and reshape if necessary
if domain_distr_param_values.ndim == 1:
pass
elif domain_distr_param_values.ndim == 2:
domain_distr_param_values = domain_distr_param_values.ravel()
else:
raise pyrado.ShapeErr(given=domain_distr_param_values, expected_match=(1,))
# Reconfigure the wrapped environment's DomainRandomizer
for i, value in enumerate(domain_distr_param_values):
dp_name, ddp_name = self.dp_mapping.get(i)
self._wrapped_env.randomizer.adapt_one_distr_param(dp_name, ddp_name, value)
class DomainRandWrapperLive(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which randomized the wrapped env at every reset.
Thus every rollout is done with different domain parameters.
"""
def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
if domain_param is None:
# No explicit specification of domain parameters, so randomizer is called to draw a parameter dict
self._randomizer.randomize(num_samples=1)
domain_param = self._randomizer.get_params(fmt="dict", dtype="numpy")
# Forward to EnvWrapper, which delegates to self._wrapped_env
return super().reset(init_state=init_state, domain_param=domain_param)
class DomainRandWrapperBuffer(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which randomized the wrapped env using a buffer of domain parameter sets.
At every call of the reset method this wrapper cycles through that buffer.
"""
def __init__(self, wrapped_env, randomizer: Optional[DomainRandomizer], selection: Optional[str] = "cyclic"):
"""
Constructor
:param wrapped_env: environment to wrap around
:param randomizer: `DomainRandomizer` object that manages the randomization. If `None`, the user has to set the
buffer manually, the circular reset however works the same way
:param selection: method to draw samples from the buffer, either cyclic or random
"""
if selection not in ["cyclic", "random"]:
raise pyrado.ValueErr(given=selection, eq_constraint="cyclic or random")
Serializable._init(self, locals())
# Invoke the DomainRandWrapper's constructor
super().__init__(wrapped_env, randomizer)
self._ring_idx = None
self._buffer = None
self.selection = selection
@property
def ring_idx(self) -> int:
"""Get the buffer's index."""
return self._ring_idx
@ring_idx.setter
def ring_idx(self, idx: int):
"""Set the buffer's index."""
if not (isinstance(idx, int) or not 0 <= idx < len(self._buffer)):
raise pyrado.ValueErr(given=idx, ge_constraint="0 (int)", l_constraint=len(self._buffer))
self._ring_idx = idx
@property
def selection(self) -> str:
"""Get the selection method."""
return self._selection
@selection.setter
def selection(self, selection: str):
"""Set the selection method."""
if selection not in ["cyclic", "random"]:
raise pyrado.ValueErr(given=selection, eq_constraint="cyclic or random")
self._selection = selection
def fill_buffer(self, num_domains: int):
"""
Fill the internal buffer with domains.
:param num_domains: number of randomized domain parameter sets to store in the buffer
"""
if self._randomizer is None:
raise pyrado.TypeErr(msg="The randomizer must not be None to call fill_buffer()!")
if not isinstance(num_domains, int) or num_domains < 0:
raise pyrado.ValueErr(given=num_domains, g_constraint="0 (int)")
self._randomizer.randomize(num_domains)
self._buffer = self._randomizer.get_params(-1, fmt="list", dtype="numpy")
self._ring_idx = 0
@property
def buffer(self):
"""Get the domain parameter buffer."""
return self._buffer
@buffer.setter
def buffer(self, buffer: Union[List[dict], dict]):
"""
Set the domain parameter buffer.
Depends on the way the buffer has been saved, see the `DomainRandomizer.get_params()` arguments.
:param buffer: list of dicts, each describing a domain ,or just one dict for one domain
"""
if not (isinstance(buffer, list) or isinstance(buffer, dict)):
raise pyrado.TypeErr(given=buffer, expected_type=[list, dict])
self._buffer = buffer
def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
if domain_param is None:
# No explicit specification of domain parameters, so randomizer is requested
if isinstance(self._buffer, dict):
# The buffer consists of one domain parameter set
domain_param = self._buffer
elif isinstance(self._buffer, list):
# The buffer consists of a list of domain parameter sets
domain_param = self._buffer[self._ring_idx] # first selection will be index 0
if self._selection == "cyclic":
self._ring_idx = (self._ring_idx + 1) % len(self._buffer)
elif self._selection == "random":
self._ring_idx = randint(0, len(self._buffer) - 1)
else:
raise pyrado.TypeErr(given=self._buffer, expected_type=[dict, list])
# Forward to EnvWrapper, which delegates to self._wrapped_env
return super().reset(init_state=init_state, domain_param=domain_param)
def _get_state(self, state_dict: dict):
super()._get_state(state_dict)
state_dict["buffer"] = self._buffer
state_dict["ring_idx"] = self._ring_idx
def _set_state(self, state_dict: dict, copying: bool = False):
super()._set_state(state_dict, copying)
self._buffer = state_dict["buffer"]
self._ring_idx = state_dict["ring_idx"]
def remove_all_dr_wrappers(env: Env, verbose: bool = False):
"""
Go through the environment chain and remove all wrappers of type `DomainRandWrapper` (and subclasses).
:param env: env chain with domain randomization wrappers
:param verbose: choose if status messages should be printed
:return: env chain without domain randomization wrappers
"""
while any(isinstance(subenv, DomainRandWrapper) for subenv in all_envs(env)):
if verbose:
with completion_context(
f"Found domain randomization wrapper of type {type(env).__name__}. Removing it now",
color="y",
bright=True,
):
env = remove_env(env, DomainRandWrapper)
else:
env = remove_env(env, DomainRandWrapper)
return env |
self.dp_mapping = dp_mapping
@property | random_line_split |
domain_randomization.py | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from random import randint
from typing import List, Mapping, Optional, Tuple, Union
import numpy as np
from init_args_serializer import Serializable
import pyrado
from pyrado.domain_randomization.domain_randomizer import DomainRandomizer
from pyrado.environment_wrappers.base import EnvWrapper
from pyrado.environment_wrappers.utils import all_envs, inner_env, remove_env
from pyrado.environments.base import Env
from pyrado.environments.sim_base import SimEnv
from pyrado.utils.input_output import completion_context, print_cbt
class DomainRandWrapper(EnvWrapper, Serializable):
"""Base class for environment wrappers which call a `DomainRandomizer` to randomize the domain parameters"""
def __init__(self, wrapped_env: Union[SimEnv, EnvWrapper], randomizer: Optional[DomainRandomizer]):
"""
Constructor
:param wrapped_env: environment to wrap
:param randomizer: `DomainRandomizer` object holding the probability distribution of all randomizable
domain parameters, pass `None` if you want to subclass wrapping another `DomainRandWrapper`
and use its randomizer
"""
if not isinstance(inner_env(wrapped_env), SimEnv):
raise pyrado.TypeErr(given=wrapped_env, expected_type=SimEnv)
if not isinstance(randomizer, DomainRandomizer) and randomizer is not None:
raise pyrado.TypeErr(given=randomizer, expected_type=DomainRandomizer)
Serializable._init(self, locals())
# Invoke EnvWrapper's constructor
super().__init__(wrapped_env)
self._randomizer = randomizer
@property
def randomizer(self) -> DomainRandomizer:
return self._randomizer
@randomizer.setter
def randomizer(self, randomizer: DomainRandomizer):
if not isinstance(randomizer, DomainRandomizer):
raise pyrado.TypeErr(given=randomizer, expected_type=DomainRandomizer)
self._randomizer = randomizer
class MetaDomainRandWrapper(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which wraps another `DomainRandWrapper` to adapt its parameters,
called domain distribution parameters.
"""
def __init__(self, wrapped_rand_env: DomainRandWrapper, dp_mapping: Mapping[int, Tuple[str, str]]):
"""
Constructor
:param wrapped_rand_env: randomized environment to wrap
:param dp_mapping: mapping from index of the numpy array (coming from the algorithm) to domain parameter name
(e.g. mass, length) and the domain distribution parameter (e.g. mean, std)
.. code-block:: python
# For the mapping arg use the this dict constructor
```
m = {0: ('name1', 'parameter_type1'), 1: ('name2', 'parameter_type2')}
```
"""
if not isinstance(wrapped_rand_env, DomainRandWrapper):
raise pyrado.TypeErr(given=wrapped_rand_env, expected_type=DomainRandWrapper)
Serializable._init(self, locals())
# Invoke the DomainRandWrapper's constructor
super().__init__(wrapped_rand_env, None)
self.dp_mapping = dp_mapping
@property
def randomizer(self) -> DomainRandomizer:
# Forward to the wrapped DomainRandWrapper
return self._wrapped_env.randomizer
@randomizer.setter
def randomizer(self, dr: DomainRandomizer):
# Forward to the wrapped DomainRandWrapper
self._wrapped_env.randomizer = dr
def adapt_randomizer(self, domain_distr_param_values: np.ndarray):
# Check the input dimension and reshape if necessary
if domain_distr_param_values.ndim == 1:
pass
elif domain_distr_param_values.ndim == 2:
domain_distr_param_values = domain_distr_param_values.ravel()
else:
raise pyrado.ShapeErr(given=domain_distr_param_values, expected_match=(1,))
# Reconfigure the wrapped environment's DomainRandomizer
for i, value in enumerate(domain_distr_param_values):
dp_name, ddp_name = self.dp_mapping.get(i)
self._wrapped_env.randomizer.adapt_one_distr_param(dp_name, ddp_name, value)
class DomainRandWrapperLive(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which randomized the wrapped env at every reset.
Thus every rollout is done with different domain parameters.
"""
def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
if domain_param is None:
# No explicit specification of domain parameters, so randomizer is called to draw a parameter dict
self._randomizer.randomize(num_samples=1)
domain_param = self._randomizer.get_params(fmt="dict", dtype="numpy")
# Forward to EnvWrapper, which delegates to self._wrapped_env
return super().reset(init_state=init_state, domain_param=domain_param)
class DomainRandWrapperBuffer(DomainRandWrapper, Serializable):
"""
Domain randomization wrapper which randomized the wrapped env using a buffer of domain parameter sets.
At every call of the reset method this wrapper cycles through that buffer.
"""
def __init__(self, wrapped_env, randomizer: Optional[DomainRandomizer], selection: Optional[str] = "cyclic"):
"""
Constructor
:param wrapped_env: environment to wrap around
:param randomizer: `DomainRandomizer` object that manages the randomization. If `None`, the user has to set the
buffer manually, the circular reset however works the same way
:param selection: method to draw samples from the buffer, either cyclic or random
"""
if selection not in ["cyclic", "random"]:
raise pyrado.ValueErr(given=selection, eq_constraint="cyclic or random")
Serializable._init(self, locals())
# Invoke the DomainRandWrapper's constructor
super().__init__(wrapped_env, randomizer)
self._ring_idx = None
self._buffer = None
self.selection = selection
@property
def ring_idx(self) -> int:
"""Get the buffer's index."""
return self._ring_idx
@ring_idx.setter
def ring_idx(self, idx: int):
"""Set the buffer's index."""
if not (isinstance(idx, int) or not 0 <= idx < len(self._buffer)):
raise pyrado.ValueErr(given=idx, ge_constraint="0 (int)", l_constraint=len(self._buffer))
self._ring_idx = idx
@property
def selection(self) -> str:
"""Get the selection method."""
return self._selection
@selection.setter
def selection(self, selection: str):
"""Set the selection method."""
if selection not in ["cyclic", "random"]:
raise pyrado.ValueErr(given=selection, eq_constraint="cyclic or random")
self._selection = selection
def fill_buffer(self, num_domains: int):
"""
Fill the internal buffer with domains.
:param num_domains: number of randomized domain parameter sets to store in the buffer
"""
if self._randomizer is None:
raise pyrado.TypeErr(msg="The randomizer must not be None to call fill_buffer()!")
if not isinstance(num_domains, int) or num_domains < 0:
raise pyrado.ValueErr(given=num_domains, g_constraint="0 (int)")
self._randomizer.randomize(num_domains)
self._buffer = self._randomizer.get_params(-1, fmt="list", dtype="numpy")
self._ring_idx = 0
@property
def buffer(self):
"""Get the domain parameter buffer."""
return self._buffer
@buffer.setter
def buffer(self, buffer: Union[List[dict], dict]):
"""
Set the domain parameter buffer.
Depends on the way the buffer has been saved, see the `DomainRandomizer.get_params()` arguments.
:param buffer: list of dicts, each describing a domain ,or just one dict for one domain
"""
if not (isinstance(buffer, list) or isinstance(buffer, dict)):
raise pyrado.TypeErr(given=buffer, expected_type=[list, dict])
self._buffer = buffer
def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
if domain_param is None:
# No explicit specification of domain parameters, so randomizer is requested
if isinstance(self._buffer, dict):
# The buffer consists of one domain parameter set
domain_param = self._buffer
elif isinstance(self._buffer, list):
# The buffer consists of a list of domain parameter sets
domain_param = self._buffer[self._ring_idx] # first selection will be index 0
if self._selection == "cyclic":
self._ring_idx = (self._ring_idx + 1) % len(self._buffer)
elif self._selection == "random":
self._ring_idx = randint(0, len(self._buffer) - 1)
else:
raise pyrado.TypeErr(given=self._buffer, expected_type=[dict, list])
# Forward to EnvWrapper, which delegates to self._wrapped_env
return super().reset(init_state=init_state, domain_param=domain_param)
def _get_state(self, state_dict: dict):
super()._get_state(state_dict)
state_dict["buffer"] = self._buffer
state_dict["ring_idx"] = self._ring_idx
def _set_state(self, state_dict: dict, copying: bool = False):
super()._set_state(state_dict, copying)
self._buffer = state_dict["buffer"]
self._ring_idx = state_dict["ring_idx"]
def remove_all_dr_wrappers(env: Env, verbose: bool = False):
"""
Go through the environment chain and remove all wrappers of type `DomainRandWrapper` (and subclasses).
:param env: env chain with domain randomization wrappers
:param verbose: choose if status messages should be printed
:return: env chain without domain randomization wrappers
"""
while any(isinstance(subenv, DomainRandWrapper) for subenv in all_envs(env)):
if verbose:
with completion_context(
f"Found domain randomization wrapper of type {type(env).__name__}. Removing it now",
color="y",
bright=True,
):
env = remove_env(env, DomainRandWrapper)
else:
|
return env
| env = remove_env(env, DomainRandWrapper) | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.