repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
Liuxg16/BrainMatrix
|
scala-package/core/src/test/scala/ml/dmlc/mxnet/SerializerSuite.scala
|
<filename>scala-package/core/src/test/scala/ml/dmlc/mxnet/SerializerSuite.scala
package ml.dmlc.mxnet
import ml.dmlc.mxnet.optimizer.SGD
import org.scalatest.{Matchers, BeforeAndAfterAll, FunSuite}
class SerializerSuite extends FunSuite with BeforeAndAfterAll with Matchers {
test("serialize and deserialize optimizer") {
val optimizer: Optimizer = new SGD(learningRate = 0.1f, momentum = 0.9f, wd = 0.0005f)
val optSerialized: String = Serializer.encodeBase64String(
Serializer.getSerializer.serialize(optimizer))
assert(optSerialized.length > 0)
val bytes = Serializer.decodeBase64String(optSerialized)
val optDeserialized = Serializer.getSerializer.deserialize[Optimizer](bytes)
assert(optDeserialized.isInstanceOf[SGD])
val sgd = optDeserialized.asInstanceOf[SGD]
val learningRate = classOf[SGD].getDeclaredField("learningRate")
learningRate.setAccessible(true)
assert(learningRate.get(sgd).asInstanceOf[Float] === 0.1f +- 1e-6f)
val momentum = classOf[SGD].getDeclaredField("momentum")
momentum.setAccessible(true)
assert(momentum.get(sgd).asInstanceOf[Float] === 0.9f +- 1e-6f)
val wd = classOf[SGD].getDeclaredField("wd")
wd.setAccessible(true)
assert(wd.get(sgd).asInstanceOf[Float] === 0.0005f +- 1e-6f)
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/char_rnn_symbol/Config.scala
|
<filename>scalakernel/src/main/java/thu/brainmatrix/char_rnn_symbol/Config.scala
package thu.brainmatrix.char_rnn_symbol
/**
* @author liuxianggen
* @date 20160718
* @brief provide some global setting for charr_rnn
* @param
* @return
* @example
* @note
*/
object Config {
val INPUT_FILE_NAME = "./seqData/input.txt"
// val INPUT_FILE_NAME = "./seqData/ptb.train.txt"
val VOCAB_FILE_NAME = "./seqData/vocab.txt"
val SEQ_LENGTH = 32
val UNKNOW_CHAR = '\0'
val DROPOUT = 0
val BATCH_SIZE = 32
val DIM_HIDDEN = 64
val DIM_EMBED = 64
val LSTM_N_LAYER = 3
val N_EPOCH = 2 // 21
val LEARNING_RATE = 0.001f
val MOMENTUM = 0f
val WEIGHT_DECAY = 0.000001f
val CLIP_GRADIENT = 1
val N_GPU = 0
val USE_GPU = true
val DATA_TRAIN_RATIO = 0.9
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/util/Draw.scala
|
package thu.brainmatrix.util
import breeze.linalg._
import breeze.plot._
class Draw(val subplots:Int*){
val f = Figure()
var p:Plot = f.subplot(0)
def subplot(row:Int, col:Int, selected:Int){
this.p = f.subplot(row,col,selected)
}
def add_line[@specialized(Int, Float, Double) V,@specialized(Int, Float, Double) V1](x: Array[V],y:Array[V1],style:Char = '-'){
if(x.length == y.length){
val xa = DenseVector.create(x.map(_.toString().toDouble),0,1,x.length)//choose all
val ya = DenseVector.create(y.map(_.toString().toDouble),0,1,y.length)//choose all
this.p += plot(xa, ya,style)
}else{
throw new java.lang.VerifyError("the data for two axis dismatched!")
}
}
def addInfo(xlabel:String, ylable:String,title:String = null){
this.p.xlabel = xlabel
this.p.ylabel = ylable
if(title!=null)
this.p.title = title
}
def add_hist[@specialized(Int, Float, Double) V](x: Array[V],n_hist:Int = 10){
this.p +=hist(x.map(_.toString().toDouble), n_hist)
}
def draw(){
// p.xlabel = "x axis"
// p.ylabel = "y axis"
f.saveas("lines.png")
}
}
object Util {
def Util_plot[@specialized(Int, Float, Double) V,@specialized(Int, Float, Double) V1](x: Array[V],y:Array[V1]){
val f = Figure()
val p = f.subplot(0)
if(x.length == y.length){
val xa = DenseVector.create(x.map(_.toString().toDouble),0,1,x.length)//choose all
val ya = DenseVector.create(y.map(_.toString().toDouble),0,1,y.length)//choose all
p += plot(xa, ya)
p.xlabel = "x axis"
p.ylabel = "y axis"
f.saveas("lines.png")
}else{
throw new java.lang.VerifyError("the data for two axis dismatched!")
}
}
def hist_test(){
val f = Figure()
val p = f.subplot(0)
val x = Array.fill[Float](1000)(0.7f)
x.indices.foreach(i => {
x(i) = scala.util.Random.nextFloat()
})
// val x = (0, 1000).map(_.toFloat/1000)
// x(3) = 0.08f
// x.foreach(print)
// val y = Array.range(0, 10)
// val xa = DenseVector.create(x.map(_.toString().toDouble),0,1,x.length)//choose all
// val ya = DenseVector.create(y.map(_.toString().toDouble),0,1,y.length)//choose all
// val g = breeze.stats.distributions.Gaussian(0,1)
// val gs = g.sample(100)
// gs.foreach(print(_))
// p += hist(g.sample(100000),10)
p +=hist(x,100)
f.saveas("lines.png")
}
def plot_test(){
val f = Figure()
val p2 = f.subplot(2,1,1)
val g = breeze.stats.distributions.Gaussian(0,1)
p2 += hist(g.sample(100000),100)
p2.title = "A normal distribution"
f.saveas("subplots.png")
}
def main(args:Array[String]){
// Util_plot(Array(1,2,3), Array(3f,40f,5f))
// hist_test()
plot_test()
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/sae/AutoEncoderModel.scala
|
package thu.brainmatrix.sae
import scala.collection.mutable.ListBuffer
import scala.collection.immutable.Range
import thu.brainmatrix.NDArray
import thu.brainmatrix.Initializer
import thu.brainmatrix.Symbol
import thu.brainmatrix.DataIter
import thu.brainmatrix.Optimizer
import thu.brainmatrix.MAE
import thu.brainmatrix.IO
import thu.brainmatrix.optimizer.SGD
import org.slf4j.LoggerFactory
/*
*
* by liuxianggen
* 2016-05-11
* @param data:the input symbol
* @param dims:dimension of the network
*
*/
class AutoEncoderModel(val dims:Vector[Int],val sparse_penalty:Float=0,
pt_dropout:Float=0,ft_dropout:Float=0,input_act:String=null,
internal_act:String = "relu",output_act:String=null) extends AEModel {
val N = dims.length-1
val stacks = ListBuffer[Symbol]()
val data = Symbol.CreateVariable("data")
/*
* config each layer
*/
var decoder_act:String = null
var idropout = 0f
var odropout = 0f
var encoder_act:String = null
for(i <- 0 until N){
if(i==0){
decoder_act = input_act
idropout = 0f
}else{
decoder_act = internal_act
idropout = pt_dropout
}
if(this.N-1 == i){
encoder_act = output_act
odropout = 0f
}else{
encoder_act = internal_act
odropout = pt_dropout
}
val (istack,iargs,iargs_grad,iargs_mult,iauxs) = make_stack(i,data,dims(i),dims(i+1),sparse_penalty,idropout,odropout,
encoder_act,decoder_act)
// the key symbol of each layer
this.stacks.append(istack)
this.args.++=(iargs)
this.args_grad ++= iargs_grad
this.args_mult ++=iargs_mult
this.auxs ++=iauxs
}
/**
* encoder: key symbol the forward network of this autoencoder network
* internals: each encoder in forward network
*/
val (encoder,internals) = make_encoder(this.data,dims,sparse_penalty,ft_dropout,internal_act,output_act)
val decoder = make_decoder(this.encoder,dims,sparse_penalty,ft_dropout,internal_act,input_act)
if(input_act=="softmax"){
this.loss = this.decoder
}else{
this.loss = Symbol.LinearRegressionOutput()(Map("data"->this.decoder,"label"->this.data))
}
def make_encoder(data:Symbol,dims:Vector[Int],sparse_penalty:Float=0f,dropout:Float = 0f,
internal_act:String = "relu",output_act:String = null):(Symbol,ListBuffer[Symbol])={
var x = data
val internals = ListBuffer[Symbol]()
val N = dims.length-1
for(i<-0 until N){
x = Symbol.FullyConnected(name="encoder_%d".format(i))(Map("data"->x,"num_hidden"->dims(i+1)))
if(internal_act!=null && i<N-1){
x = Symbol.Activation()(Map("data"->x,"act_type"->internal_act))
if(internal_act=="sigmod" && sparse_penalty!=0f){
x = Symbol.IdentityAttachKLSparseReg("sparse_encoder_%d".format(i))(Map("data"->x,"penalty"->sparse_penalty))
}
}else if(output_act!=null && i==N-1){
x = Symbol.Activation()(Map("data"->x,"act_type"->output_act))
if(output_act=="sigmod" && sparse_penalty!=0f){
x = Symbol.IdentityAttachKLSparseReg("sparse_encoder_%d".format(i))(Map("data"->x,"penalty"->sparse_penalty))
}
}
if(dropout!=0){
x = Symbol.Dropout()(Map("data"->x,"p"->dropout))
}
internals.append(x)
}
// internals.foreach { x => println(x.debugStr+"\n-------------------------\n") }
(x,internals)
}
def make_decoder(feature:Symbol,dims:Vector[Int],sparse_penalty:Float=0f,dropout:Float = 0f,
internal_act:String = "relu",input_act:String = null):Symbol = {
var x = feature
val internals = ListBuffer[Symbol]()
val N = dims.length-1
for(i<- Range(0,N).reverse){
x = Symbol.FullyConnected(name="decoder_%d".format(i))(Map("data"->x,"num_hidden"->dims(i)))
if(internal_act!=null && i>0){
x = Symbol.Activation()(Map("data"->x,"act_type"->internal_act))
if(internal_act=="sigmod" && sparse_penalty!=0f){
x = Symbol.IdentityAttachKLSparseReg("sparse_decoder_%d".format(i))(Map("data"->x,"penalty"->sparse_penalty))
}
}else if(input_act!=null && i==0){
x = Symbol.Activation()(Map("data"->x,"act_type"->input_act))
if(input_act=="sigmod" && sparse_penalty!=0f){
x = Symbol.IdentityAttachKLSparseReg("sparse_decoder_%d".format(i))(Map("data"->x,"penalty"->sparse_penalty))
}
}
if(dropout!=0 && i>0){
x = Symbol.Dropout()(Map("data"->x,"p"->dropout))
}
}
x
}
def make_stack(istack:Int ,data:Symbol,num_input:Int,num_hidden:Int,
sparse_penalty:Float=0f,idropout:Float = 0f,odropout:Float=0f,
encoder_act:String = "relu",decoder_act:String = "relu"):(Symbol,ListBuffer[(String,NDArray)],
ListBuffer[(String,NDArray)],ListBuffer[(String,Float)],ListBuffer[(String,NDArray)]) = {
var x = data
if(0f!=idropout){
x = Symbol.Dropout()(Map("data"->data,"p"->idropout))
}
x = Symbol.FullyConnected(name="encoder_%d".format(istack))(Map("data"->x,
"num_hidden"->num_hidden))
if(encoder_act!=null){
x = Symbol.Activation()(Map("data"->x,"act_type"->encoder_act))
if(encoder_act=="sigmod" && sparse_penalty!=0f){
x = Symbol.IdentityAttachKLSparseReg("sparse_encoder_%d"
.format(istack))(Map("data"->x,"penalty"->sparse_penalty))
}
}
if(0f!=odropout){
x = Symbol.Dropout()(Map("data"->x,"p"->idropout))
}
x = Symbol.FullyConnected(name="decoder_%d".format(istack))(Map("data"->x,
"num_hidden"->num_input))
if(decoder_act=="softmax"){
x = Symbol.SoftmaxOutput()(Map("data"->x,"label"->data,"prob_label"->true,"act_type"->decoder_act))
}else if(decoder_act != null){
x = Symbol.Activation()(Map("data"->x,"act_type"->decoder_act))
if(encoder_act=="sigmod" && sparse_penalty!=0f){
x = Symbol.IdentityAttachKLSparseReg("sparse_decoder_%d"
.format(istack))(Map("data"->x,"penalty"->sparse_penalty))
}
x = Symbol.LinearRegressionOutput()(Map("data"->x,"label"->data))
}else{
x = Symbol.LinearRegressionOutput()(Map("data"->x,"label"->data))
}
val args_t = ListBuffer(("encoder_%d_weight".format(istack),NDArray.empty(this.xpu,num_hidden,num_input)),
("encoder_%d_bias".format(istack),NDArray.empty(this.xpu, num_hidden)),
("decoder_%d_weight".format(istack), NDArray.empty(this.xpu,num_input, num_hidden)),
("decoder_%d_bias".format(istack),NDArray.empty(this.xpu,num_input)))
val args_grad_t = ListBuffer(("encoder_%d_weight".format(istack),NDArray.zeros(this.xpu,num_hidden,num_input)),
("encoder_%d_bias".format(istack),NDArray.zeros(this.xpu, num_hidden)),
("decoder_%d_weight".format(istack), NDArray.zeros(this.xpu,num_input, num_hidden)),
("decoder_%d_bias".format(istack),NDArray.zeros(this.xpu,num_input)))
val args_mult_t = ListBuffer(("encoder_%d_weight".format(istack),1.0f),
("encoder_%d_bias".format(istack),2.0f),
("decoder_%d_weight".format(istack),1.0f),
("decoder_%d_bias".format(istack),2.0f))
val auxs_t = ListBuffer[(String,NDArray)]()
if(encoder_act=="sigmod" && sparse_penalty!=0f){
auxs_t.append(("sparse_encoder_%d_moving_avg".format(istack),NDArray.ones(this.xpu,num_hidden)*0.5f))
}
if(encoder_act=="sigmod" && sparse_penalty!=0f){
auxs_t.append(("sparse_decoder_%d_moving_avg".format(istack),NDArray.ones(this.xpu,num_input)*0.5f))
}
val init_t = new thu.brainmatrix.Uniform(0.07f)
for((k,v) <- args_t){
init_t(k,v)
// println("------------------------")
// val tf = NDArray.mean(NDArray.abs(v))
// System.err.println(s"param:$k \t\t stat(mean):$tf")
}
(x,args_t,args_grad_t,args_mult_t,auxs_t)
}
def layerwise_pretrain(data_iter:DataIter,batch_Size:Int,n_iter:Int,optimizer:Optimizer){
// def l2_norm(){}
val solver = new Solver(optimizer)
solver.set_metric(new MAE())
solver.set_monitor(new Monitor(3))
for (i <- 0 until this.N){
var data_iter_i:DataIter = null
var X_i = ListBuffer[NDArray]()
if(i==0){
data_iter_i = data_iter
println(s"Pre-training layer $i...")
solver.solve_0(this.xpu, this.stacks(i), this.args, this.args_grad, this.auxs, data_iter_i, 0, n_iter, false)
}else{
X_i = AEModel.extract_feature(this.internals(i-1), this.args, this.auxs, data_iter, this.xpu).values.head
println(s"Pre-training layer $i...")
solver.solve(this.xpu, this.stacks(i), this.args, this.args_grad, this.auxs, X_i, 0, n_iter, false)
}
}
}
def finetune(data_iter:DataIter,batch_size:Int,n_iter:Int,optimizer:Optimizer){
val solver = new Solver(optimizer)
solver.set_metric(new MAE())
solver.set_monitor(new Monitor(3))
solver.solve_0(this.xpu,this.loss,this.args,this.args_grad,this.auxs,data_iter,0,n_iter,false)
}
def eval(data_iter:DataIter):Float = {
val X_data = AEModel.extract_feature(this.loss, this.args, this.auxs, data_iter, this.xpu).values.head
data_iter.reset()
var sum = 0f
for(x_data<-X_data){
val temp = NDArray.mean(NDArray.square(x_data-data_iter.next().data(0)))
sum += temp.toScalar
}
sum/(X_data.length)
}
}
object AutoEncoderModel{
private val logger = LoggerFactory.getLogger(classOf[AutoEncoderModel])
def main(args:Array[String]){
println("-----------------------AutoEncoder--------------------------------")
val batchSize=100
val iterNum = 10
val lr_init = 1f
val ae = new AutoEncoderModel(dims = Vector(784,200,50,20,10),pt_dropout=0.9f,internal_act="relu", output_act="relu")
//get dataIter
val trainDataIter = IO.MNISTIter(Map(
"image" -> "data/train-images-idx3-ubyte",
"label" -> "data/train-labels-idx1-ubyte",
"data_shape" -> "(784)",
"label_name" -> "sm_label",
"batch_size" -> batchSize.toString,
"shuffle" -> "1",
"flat" -> "1",
"silent" -> "0",
"seed" -> "10"))
// val trainDataIter:DataIter = null
ae.layerwise_pretrain(trainDataIter, batchSize, iterNum, optimizer=new SGD(learningRate = lr_init, momentum = 0f, wd = 0f))
println("Finetune ....")
ae.finetune(trainDataIter, batchSize, iterNum, optimizer=new SGD(learningRate = lr_init, momentum = 0f, wd = 0f))
println("Evaluation ......")
val training_error = ae.eval(trainDataIter)
println(s"training error:$training_error")
//get dataIter
val valDataIter = IO.MNISTIter(Map(
"image" -> "data/t10k-images-idx3-ubyte",
"label" -> "data/t10k-labels-idx1-ubyte",
"data_shape" -> "(784)",
"label_name" -> "sm_label",
"batch_size" -> batchSize.toString,
"shuffle" -> "1",
"flat" -> "1",
"silent" -> "0",
"seed" -> "10"))
val val_error = ae.eval(valDataIter)
println(s"val error:$val_error")
// println("validation error:")
println("-----------------------AutoEncoder--------------------------------")
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/lstmSort/LstmSortSuite.scala
|
<reponame>Liuxg16/BrainMatrix
package thu.brainmatrix.lstmSort
import thu.brainmatrix.util.IOHelper
import thu.brainmatrix.lstmSort.ButketIo
class LstmSortSuite {
// test("test"){
def testTest{
val path_train = "./data/sort.train.txt"
val path_test = "./data/sort.valid.txt"
val batch_size = 100
val buckets = List(5)
val num_hidden = 300
val num_embed = 512
val num_lstm_layer = 2
val seqLen = 5
val num_epoch = 8
val learningRate = 0.1f
val momentum = 0.9
// # a dict that contains the word and the index
val vocab = IOHelper.buildVocab("./data/sort.train.txt")
// println(vocab)
// initalize states for LSTM
val initC = for (l <- 0 until num_lstm_layer) yield (s"l${l}_init_c", (batch_size, num_hidden))
val initH = for (l <- 0 until num_lstm_layer) yield (s"l${l}_init_h", (batch_size, num_hidden))
val initStates = initC ++ initH
val dataTrain = new ButketIo.BucketSentenceIter(path_train, vocab, buckets,batch_size, initStates)
val batch = dataTrain.next()
// println(batch.data(0))
// println(batch.label(0))
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/ml/GibbsSampling.scala
|
package thu.brainmatrix.ml
import scala.util.control.Breaks
import thu.brainmatrix.NDArray
import thu.brainmatrix.Context
import thu.brainmatrix.Shape
import thu.brainmatrix.Random
import thu.brainmatrix.util.mathTool
/**
*
* HMM
* properties
* pi:
* T: the transfer probabilities matrix (K,K)
* Obs_pi: the probabilities of the observations,(K,D)
* this model has K hidden different states and D observed states
*
*/
class GIbbsSampling(val pi:NDArray,val T:NDArray,val Obs_pi:NDArray) {
val ctx = Context.cpu(0)
var pi_est = NDArray.Normalize(NDArray.ones(pi.shape, ctx))
var T_est = NDArray.Normalize(NDArray.ones(this.T.shape, ctx))
var Obs_pi_est = NDArray.Normalize(NDArray.ones(this.Obs_pi.shape, ctx))
def getObservation(nSteps:Int):(Array[Int],Array[Int]) = {
val observations = Array.fill[Int](nSteps)(0)
val states = Array.fill[Int](nSteps)(0)
val sampleStates = mathTool.SampleByPro1D(this.pi)
val sampleObs = mathTool.SampleByPro1D(this.Obs_pi.slice(states(0)))
for(t<-1 until nSteps){
states(t) = mathTool.SampleByPro1D(this.T.slice(states(t-1)))
observations(t) = mathTool.SampleByPro1D(this.Obs_pi.slice(states(t-1)))
}
(states,observations)
}
def simulation(nSteps:Int,nRep:Int,x:Array[Int]) :Array[Int] = {
// val observations = Array.fill[Int](nSteps)(0)
val states = Array.fill[Int](nSteps)(0)
val T_T = NDArray.transpose(this.T_est)
val obs_pi_T = NDArray.transpose(this.Obs_pi_est)
// P(y_t|Y,X,\theta) = t_{y_t,y_{t+1}}e_{x_t,e_t}t_{y_{t-1},y_{t}}
for(t<- 0 until nSteps){
states(t) = mathTool.SampleByPro1D(this.pi_est)
// observations(t) = mathTool.SampleByPro1D(this.Obs_pi.slice(states(t)))
}
for(iter <- 0 until nRep){
for(t<-1 until nSteps-1){
val pyt = NDArray.Normalize(T_T.slice(states(t+1)) * obs_pi_T.slice(x(t))*this.T_est.slice(states(t-1)))
states(t) = mathTool.SampleByPro1D(pyt)
// observations(t) = mathTool.SampleByPro1D(this.Obs_pi.slice(states(t)))
}
}
states
}
def update(states:Array[Int],observations:Array[Int]) :Array[NDArray] = {
val ctx = Context.cpu(0)
val criterion = 0.5
val I = this.Obs_pi.shape(0) //states n
val K = this.Obs_pi.shape(1) // observations n
val e_ik = Array.fill[Array[Float]](I)(Array.fill[Float](K)(0f))
val Iy = Array.fill[Float](I)(0.0000001f)
val t_ij = Array.fill[Array[Float]](I)(Array.fill[Float](I)(0))
states zip observations foreach{case (s,o)=>{
e_ik(s)(o) += 1
Iy(s) += 1
}}
e_ik.indices.foreach(id => {
e_ik(id).indices.foreach { idx => e_ik(id)(idx) /= Iy(id) }
})
states.indices.take(states.length-1).foreach(i => {
t_ij(states(i))(states(i+1)) += 1f/Iy(states(i))
})
Array(NDArray.array(Iy.map(_/states.length),Shape(1,I),ctx),NDArray.array(t_ij.flatten,Shape(I,I),ctx),NDArray.array(e_ik.flatten, Shape(I,K), ctx))
}
def train(chainsNum:Int,x1:Array[Int]){
var done = false
var n = 0
while(!done && n<1000){
val y = simulation(chainsNum,3,x1)
val Array(pi1,t1,obspi1) = update(y,x1)
if(NDArray.norm(pi1-this.pi_est).toScalar<0.5 && NDArray.norm(t1-this.T_est).toScalar<0.5 && NDArray.norm(obspi1-this.Obs_pi).toScalar<0.5)
done = !done
println(obspi1)
// pi1.copyTo(this.pi_est)
t1.copyTo(this.T_est)
obspi1.copyTo(this.Obs_pi_est)
n += 1
}
}
def viterbiAlgorithm(pi_est:NDArray,T_est:NDArray,obs_pi_est_T:NDArray,x:Array[Int]):Array[Int] = {
val ctx = Context.cpu(0)
val nsamples = x.length
val nstates = T_est.shape(0)
val sobservations = obs_pi_est_T.shape(0)
val delta = NDArray.zeros(Shape(nsamples,nstates), ctx)
val phi = NDArray.zeros(Shape(nsamples,nstates), ctx)
val T_est_T = NDArray.transpose(T_est)
(pi_est*T_est.slice(x(0))).copyTo(delta.slice(0))
delta.slice(0)
for(t <-0 until nsamples-1){
val nda = pi_est*obs_pi_est_T.slice(x(t))
for(i<- 0 until nstates){
delta(t+1,i) += (NDArray.max(nda * T_est_T.slice(i))*obs_pi_est_T(x(t+1),i)).toScalar
}
val boardcast_nda = NDArray.concatenate(nda,nda,nda)
(NDArray.argmaxChannel(boardcast_nda* T_est_T).reshape(Array(1,nstates))).copyTo(phi.slice(t+1))
}
val y = Array.fill[Int](nsamples)(0)
y(nsamples-1) = NDArray.argmaxChannel(delta.slice(nsamples-1)).toScalar.toInt
for(t <- (nsamples-2 to 0 by -1)){
y(t) = NDArray.argmaxChannel(delta.slice(t)*T_est_T.slice(y(t+1))).toScalar.toInt
}
y
}
}
object GIbbsSampling{
def main(args:Array[String]){
// test_homework(1000)
test_homework1
}
def test{
val ctx = Context.cpu(0)
val num_states = 3 // A,B,C
val num_obs = 3
val pi = NDArray.Normalize((NDArray.array(Array(0.1f,0.4f,0.5f),Shape(1,num_states),ctx)))
val obs_pi = NDArray.array(Array(0.5f,0.3f,0.2f,0.1f,0.6f,0.3f,0.0f,0.3f,0.7f),Shape(num_states,num_obs),ctx)
val T = NDArray.array(Array(0.7f,0.2f,0.1f,0.1f,0.6f,0.3f,0.4f,0.2f,0.4f),Shape(num_states,num_states),ctx)
val hmm = new HMM(pi,T,obs_pi)
val (y,x) = hmm.simulation(1000)
x.foreach(println)
val Array(pi1,t1,obspi1) = hmm.train(x)
println(s"pi:$pi1")
println(s"T:$t1")
println(s"obspi:$obspi1")
}
def test1{
val ctx = Context.cpu(0)
val num_states = 2 // A,B,C
val num_obs = 3
val pi = NDArray.Normalize((NDArray.array(Array(0.5f,0.5f),Shape(1,num_states),ctx)))
val obs_pi = NDArray.array(Array(0.7f,0.2f,0.1f,0.1f,0.6f,0.3f),Shape(num_states,num_obs),ctx)
val T = NDArray.array(Array(0.5f,0.5f,0.2f,0.8f),Shape(num_states,num_states),ctx)
val hmm = new HMM(pi,T,obs_pi)
val (y,x) = hmm.simulation(1000)
// val x = Array(2, 0, 0, 0, 0, 0, 0, 1, 0, 0)
// x.foreach(println)
hmm.train(x)
val Array(pi1,t1,obspi1) = hmm.train(x)
println(s"pi:$pi1")
println(s"T:$t1")
println(s"obspi:$obspi1")
}
def test_homework(num:Int){
val ctx = Context.cpu(0)
val num_states = 3 // A,B,C
val num_obs = 2
val pi = NDArray.Normalize((NDArray.array(Array(0.3f,0.3f,0.4f),Shape(1,num_states),ctx)))
val obs_pi = NDArray.array(Array(0.1f,0.9f,0.5f,0.5f,0.9f,0.1f),Shape(num_states,num_obs),ctx)
val T = NDArray.array(Array(0.8f,0.2f,0f,0.1f,0.7f,0.2f,0.1f,0f,0.9f),Shape(num_states,num_states),ctx)
val hmm = new HMM(pi,T,obs_pi)
val Ts = NDArray.zeros(Shape(num,num_states,num_states), ctx)
val obs_pis = NDArray.zeros(Shape(num,num_states,num_obs), ctx)
for (i<- 0 until num){
println(s"**************step $i****************")
val (y,x) = hmm.simulation(1000)
val res = hmm.train(x)
println(s"T:${res(1)}")
println(s"obs_pis:${res(2)}")
res(1).reshape(Array(1,num_states,num_states)).copyTo(Ts.slice(i))
res(2).reshape(Array(1,num_states,num_obs)).copyTo(obs_pis.slice(i))
}
println(s"T variance:"+NDArray.norm(Ts))
println(s"obs_pis variance :"+NDArray.norm(obs_pis))
// println(s"T:$t1")
// println(s"obspi:$obspi1")
}
def test_homework1{
val ctx = Context.cpu(0)
val num_states = 3 // A,B,C
val num_obs = 2
val pi = NDArray.Normalize((NDArray.array(Array(0.3f,0.3f,0.4f),Shape(1,num_states),ctx)))
val obs_pi = NDArray.array(Array(0.1f,0.9f,0.5f,0.5f,0.9f,0.1f),Shape(num_states,num_obs),ctx)
val T = NDArray.array(Array(0.8f,0.2f,0f,0.1f,0.7f,0.2f,0.1f,0f,0.9f),Shape(num_states,num_states),ctx)
val chainsNum = 1000
val gs = new GIbbsSampling(pi,T,obs_pi)
val (y1,x1) = gs.getObservation(chainsNum)
gs.train(chainsNum,x1)
println(s"T:${NDArray.norm(gs.T_est-T)}")
println(s"obspi:${NDArray.norm(gs.Obs_pi_est-obs_pi)}")
val y = gs.simulation(chainsNum,3,x1)
// y.foreach(println)
val y_est = gs.viterbiAlgorithm(gs.pi_est,gs.T_est,NDArray.transpose(gs.Obs_pi_est),x1)
var error = 0f
y zip y_est foreach{case(yi,yie) =>{
error += math.abs(yi-yie)
}}
println(s"TASK 2 estimate Y, error:${error/y.length}")
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/lstmSort/Network.scala
|
<reponame>Liuxg16/BrainMatrix<filename>scalakernel/src/main/java/thu/brainmatrix/lstmSort/Network.scala
package thu.brainmatrix.lstmSort
import thu.brainmatrix._
object Network {
def lenet:Symbol = {
val data = Symbol.CreateVariable("data")
val label = Symbol.CreateVariable("softmax_label")
val conv1 = Symbol.Convolution()(Map("data" -> data, "name" -> "conv1",
"num_filter" -> 20, "kernel" -> (5, 5)/*, "stride" -> (2, 2)*/))
val act1 = Symbol.Activation()(Map("data" -> conv1, "name" -> "tanh1", "act_type" -> "tanh"))
val mp1 = Symbol.Pooling()(Map("data" -> act1, "name" -> "mp1",
"kernel" -> (2, 2), "stride" -> (2, 2), "pool_type" -> "max"))
//second conv
val conv2 = Symbol.Convolution()(Map("data" -> mp1, "name" -> "conv2", "num_filter" -> 50,
"kernel" -> (5, 5), "stride" -> (2, 2)))
val act2 = Symbol.Activation()(Map("data" -> conv2, "name" -> "tanh2", "act_type" -> "tanh"))
val mp2 = Symbol.Pooling()(Map("data" -> act2, "name" -> "mp2",
"kernel" -> (2, 2), "stride" -> (2, 2), "pool_type" -> "max"))
//first fullc
val fl = Symbol.Flatten()(Map("data" -> mp2, "name" -> "flatten"))
val fc1 = Symbol.FullyConnected()(Map("data" -> fl, "name" -> "fc1", "num_hidden" -> 500))
val act3 = Symbol.Activation()(Map("data" -> fc1, "name" -> "tanh3", "act_type" -> "tanh"))
//second fullc
val fc2 = Symbol.FullyConnected()(Map("data" -> act3, "name" -> "fc2", "num_hidden" -> 10))
//loss
val sm = Symbol.SoftmaxOutput()(Map("data" -> fc2,"label"->label, "name" -> "sm"))
val smce = Symbol.Softmax_cross_entropy(fc2, label)
val loss = Symbol.MakeLoss("makeloss")(Map("data"->smce))
loss
}
def lenet1:Symbol = {
val data = Symbol.CreateVariable("data")
val label = Symbol.CreateVariable("softmax_label")
val conv1 = Symbol.Convolution()(Map("data" -> data, "name" -> "conv1",
"num_filter" -> 20, "kernel" -> (5, 5)/*, "stride" -> (2, 2)*/))
val act1 = Symbol.Activation()(Map("data" -> conv1, "name" -> "tanh1", "act_type" -> "tanh"))
val mp1 = Symbol.Pooling()(Map("data" -> act1, "name" -> "mp1",
"kernel" -> (2, 2), "stride" -> (2, 2), "pool_type" -> "max"))
//second conv
val conv2 = Symbol.Convolution()(Map("data" -> mp1, "name" -> "conv2", "num_filter" -> 50,
"kernel" -> (5, 5), "stride" -> (2, 2)))
val act2 = Symbol.Activation()(Map("data" -> conv2, "name" -> "tanh2", "act_type" -> "tanh"))
val mp2 = Symbol.Pooling()(Map("data" -> act2, "name" -> "mp2",
"kernel" -> (2, 2), "stride" -> (2, 2), "pool_type" -> "max"))
//first fullc
val fl = Symbol.Flatten()(Map("data" -> mp2, "name" -> "flatten"))
val fc1 = Symbol.FullyConnected()(Map("data" -> fl, "name" -> "fc1", "num_hidden" -> 500))
val act3 = Symbol.Activation()(Map("data" -> fc1, "name" -> "tanh3", "act_type" -> "tanh"))
//second fullc
val fc2 = Symbol.FullyConnected()(Map("data" -> act3, "name" -> "fc2", "num_hidden" -> 10))
//loss
val sm = Symbol.SoftmaxOutput()(Map("data" -> fc2,"label"->label))
sm
}
def mlp():Symbol = {
val data = Symbol.CreateVariable("data")
val weight_1 = Symbol.CreateVariable("weight_1")
val weight_2 = Symbol.CreateVariable("weight_2")
val weight_3 = Symbol.CreateVariable("weight_3")
val label = Symbol.CreateVariable("softmax_label")
val fc1 = Symbol.FullyConnected()(Map("data" -> data, "name" -> "fc1", "weight"->weight_1,"no_bias"->true,"num_hidden" -> 128))
val act1 = Symbol.Activation()(Map("data" -> fc1, "name" -> "relu1", "act_type" -> "relu"))
val fc2 = Symbol.FullyConnected()(Map("data" -> act1, "name" -> "fc2", "num_hidden" -> 64))
val fc3 = Symbol.FullyConnected()(Map("data" -> data, "name" -> "fc3", "num_hidden" -> 10))
val sm = Symbol.SoftmaxOutput("sm")(Map("data" -> fc3))
val smce = Symbol.Softmax_cross_entropy(fc3, label)
// val loss = Symbol.MakeLoss("makeloss")(Map("data"->(smce+Symbol.sum(Symbol.square(weight_1))*0.0003f)))
val loss = Symbol.MakeLoss("makeloss")(Map("data"->smce))
Symbol.Group(loss,sm)
}
def mlp1():Symbol = {
val data = Symbol.CreateVariable("data")
val weight_1 = Symbol.CreateVariable("weight_1")
val weight_2 = Symbol.CreateVariable("weight_2")
val weight_3 = Symbol.CreateVariable("weight_3")
val label = Symbol.CreateVariable("softmax_label")
val fc1 = Symbol.FullyConnected()(Map("data" -> data, "name" -> "fc1", "weight"->weight_1,"no_bias"->true,"num_hidden" -> 128))
val act1 = Symbol.Activation()(Map("data" -> fc1, "name" -> "relu1", "act_type" -> "relu"))
val fc2 = Symbol.FullyConnected()(Map("data" -> act1, "name" -> "fc2", "num_hidden" -> 64))
val act2 = Symbol.Activation()(Map("data" -> fc2, "name" -> "relu1", "act_type" -> "relu"))
val fc3 = Symbol.FullyConnected()(Map("data" -> act2, "name" -> "fc3", "num_hidden" -> 10))
val sm = Symbol.SoftmaxOutput("sm")(Map("data" -> fc3,"label"->label))
val smce = Symbol.Softmax_cross_entropy(fc3, label)
val loss = Symbol.MakeLoss("makeloss")(Map("data"->smce))
Symbol.Group(loss,sm)
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/Main.scala
|
package thu.brainmatrix
import thu.brainmatrix.optimizer.SGD
import scala.collection.mutable.ListBuffer
object Main {
def main(args:Array[String]){
val batchSize = 100
val data = Symbol.CreateVariable("data")
// val flatten = Symbol.Flatten(Map("data" -> data, "name" -> "flatten"))
val fc1 = Symbol.FullyConnected()(Map("data" -> data, "name" -> "fc1", "num_hidden" -> 128))
val act1 = Symbol.Activation()(Map("data" -> fc1, "name" -> "relu1", "act_type" -> "relu"))
val fc2 = Symbol.FullyConnected()(Map("data" -> act1, "name" -> "fc2", "num_hidden" -> 64))
val act2 = Symbol.Activation()(Map("data" -> fc2, "name" -> "relu2", "act_type" -> "relu"))
val fc3 = Symbol.FullyConnected()(Map("data" -> act2, "name" -> "fc3", "num_hidden" -> 10))
val sm = Symbol.SoftmaxOutput("sm")(Map("data" -> fc3))
val numEpoch = 5
val model = new FeedForward(sm, Context.cpu(), numEpoch = numEpoch,
optimizer = new SGD(learningRate = 0.1f, momentum = 0.9f, wd = 0.0001f))
// get data
// "./scripts/get_mnist_data.sh" !
val trainDataIter = IO.MNISTIter(scala.collection.immutable.Map(
"image" -> "data/train-images-idx3-ubyte",
"label" -> "data/train-labels-idx1-ubyte",
"data_shape" -> "(784)",
"label_name" -> "sm_label",
"batch_size" -> batchSize.toString,
"shuffle" -> "1",
"flat" -> "1",
"silent" -> "0",
"seed" -> "10"))
println(trainDataIter.provideLabel)
val valDataIter = IO.MNISTIter(scala.collection.immutable.Map(
"image" -> "data/t10k-images-idx3-ubyte",
"label" -> "data/t10k-labels-idx1-ubyte",
"data_shape" -> "(784)",
"label_name" -> "sm_label",
"batch_size" -> batchSize.toString,
"shuffle" -> "1",
"flat" -> "1", "silent" -> "0"))
model.fit(trainDataIter, valDataIter)
println("Finish fit ...")
val probArrays = model.predict(valDataIter)
val prob = probArrays(0)
println("Finish predict ...")
valDataIter.reset()
val labels = ListBuffer.empty[NDArray]
while (valDataIter.hasNext) {
var evalData = valDataIter.next()
labels += evalData.label(0).copy()
}
val y = NDArray.concatenate(labels)
val py = NDArray.argmaxChannel(prob)
var numCorrect = 0
var numInst = 0
for ((labelElem, predElem) <- y.toArray zip py.toArray) {
if (labelElem == predElem) {
numCorrect += 1
}
numInst += 1
}
val acc = numCorrect.toFloat / numInst
println("Final accuracy = ")
println(acc)
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/Random.scala
|
<gh_stars>0
package thu.brainmatrix
import thu.brainmatrix.Base._
import thu.brainmatrix.NDArray.{randomGaussian, randomUniform, empty}
/**
* Random Number interface of brainmatrix.
* @author <NAME>
*/
object Random {
/**
* Generate uniform distribution in [low, high) with shape.
*
* @param low The lower bound of distribution.
* @param high The upper bound of distribution.
* @param shape Output shape of the NDArray generated.
* @param ctx Context of output NDArray, will use default context if not specified.
* @param out Output place holder
* @return The result NDArray with generated result.
*/
def uniform(low: Float,
high: Float,
shape: Shape = null,
ctx: Context = null,
out: NDArray = null): NDArray = {
var outCopy = out
if (outCopy != null) {
require(shape == null && ctx == null, "shape and ctx is not needed when out is specified.")
} else {
require(shape != null, "shape is required when out is not specified")
outCopy = empty(shape, ctx)
}
randomUniform(low, high, outCopy)
}
/**
* Generate normal(Gaussian) distribution N(mean, stdvar^^2) with shape.
*
* @param loc The mean of the normal distribution.
* @param scale The standard deviation of normal distribution.
* @param shape Output shape of the NDArray generated.
* @param ctx Context of output NDArray, will use default context if not specified.
* @param out Output place holder
* @return The result NDArray with generated result.
*/
def normal(loc: Float,
scale: Float,
shape: Shape = null,
ctx: Context = null,
out: NDArray = null): NDArray = {
var outCopy = out
if (outCopy != null) {
require(shape == null & ctx == null, "shape and ctx is not needed when out is specified.")
} else {
require(shape != null, "shape is required when out is not specified")
outCopy = empty(shape, ctx)
}
randomGaussian(loc, scale, outCopy)
}
/**
* Seed the random number generators in brainmatrix.
*
* This seed will affect behavior of functions in this module,
* as well as results from executors that contains Random number
* such as Dropout operators.
*
* @param seedState The random number seed to set to all devices.
* @note The random number generator of brainmatrix is by default device specific.
* This means if you set the same seed, the random number sequence
* generated from GPU0 can be different from CPU.
*/
def seed(seedState: Int): Unit = {
checkCall(_LIB.mxRandomSeed(seedState))
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/nce_loss/Toy_softmax.scala
|
package thu.brainmatrix.nce_loss
import thu.brainmatrix._
import thu.brainmatrix.optimizer.SGD
import thu.brainmatrix.optimizer.Adam
import scala.collection.Set
/**
* @author liuxianggen
* @date 20160811
* @brief
* @return
* @example
* @note the performance is so strange!!!
*/
object Toy_softmax {
def main(args:Array[String]){
training_DIY
}
def training_DIY{
val batch_size = 100
val feature_size = 100
val num_label = 6
val vocab_size = 10//10000,
val learningRate = 0.001f//0.01f,
val numEpoch = 3
val dataTrain = new DataIter_(10000,batch_size,feature_size,vocab_size)
val dataTest = new DataIter_(1000,batch_size,feature_size,vocab_size)
// val dataTrain = IO.MNISTIter(scala.collection.immutable.Map(
// "image" -> "data/train-images-idx3-ubyte",
// "label" -> "data/train-labels-idx1-ubyte",
// "data_shape" -> "(1, 28, 28)",
// "label_name" -> "sm_label",
// "batch_size" -> batch_size.toString,
// "shuffle" -> "1",
// "flat" -> "0",
// "silent" -> "0",
// "seed" -> "10"))
//
// val dataTest = IO.MNISTIter(scala.collection.immutable.Map(
// "image" -> "data/t10k-images-idx3-ubyte",
// "label" -> "data/t10k-labels-idx1-ubyte",
// "data_shape" -> "(1, 28, 28)",
// "label_name" -> "sm_label",
// "batch_size" -> batch_size.toString,
// "shuffle" -> "1",
// "flat" -> "0", "silent" -> "0"))
val network = get_net(vocab_size)
val ctx = Context.cpu(0)
val datasAndLabels = dataTrain.provideData ++ dataTrain.provideLabel
val (argShapes, outputShapes, auxShapes) = network.inferShape(datasAndLabels)
val initializer = new Xavier(factorType = "in", magnitude = 2.34f)
val argNames = network.listArguments()
val argDict = argNames.zip(argShapes.map(NDArray.zeros(_, ctx))).toMap
val auxNames = network.listAuxiliaryStates()
val auxDict = auxNames.zip(auxShapes.map(NDArray.zeros(_, ctx))).toMap
//a collection that contains the ndarray of grad parameters
val gradDict = argNames.zip(argShapes).filter {
case (name, shape) =>
!datasAndLabels.contains(name)
}.map(x => x._1 -> NDArray.empty(x._2, ctx) ).toMap
argDict.foreach { case (name, ndArray) =>
if (!datasAndLabels.contains(name)) {
initializer.initWeight(name, ndArray)
}
}
val data = argDict("data")
val label = argDict("label")
val executor = network.bind(ctx, argDict, gradDict)
val opt = new SGD(learningRate = learningRate, momentum=0.9f, wd = 0.0f)
val paramsGrads = gradDict.toList.zipWithIndex.map { case ((name, grad), idx) =>
(idx, name, grad, opt.createState(idx, argDict(name)))
}
val evalMetric = new Accuracy()
val batchEndCallback = new Callback.Speedometer(batch_size, 50)
// val epochEndCallback = Utils.doCheckpoint(s"${incr.saveModelPath}/obama")
for (epoch <- 0 until numEpoch) {
// Training phase
val tic = System.currentTimeMillis
evalMetric.reset()
var nBatch = 0
var epochDone = false
// Iterate over training data.
dataTrain.reset()
while (!epochDone) {
var doReset = true
while (doReset && dataTrain.hasNext) {
val dataBatch = dataTrain.next()
data.set(dataBatch.data(0))
label.set(dataBatch.label(0))
executor.forward(isTrain = true)
executor.backward()
paramsGrads.foreach { case (idx, name, grad, optimState) =>
opt.update(idx, argDict(name), grad, optimState)
}
// evaluate at end, so out_cpu_array can lazy copy
evalMetric.update(dataBatch.label, executor.outputs)
nBatch += 1
batchEndCallback.invoke(epoch, nBatch, evalMetric)
dataBatch.dispose()
}
if (doReset) {
dataTrain.reset()
}
// this epoch is done
epochDone = true
}
var (name, value) = evalMetric.get
println(s"Epoch[$epoch] Train-$name=$value")
val toc = System.currentTimeMillis
println(s"Epoch[$epoch] Time cost=${toc - tic}")
//VALIDATION
evalMetric.reset()
dataTest.reset()
// TODO: make DataIter implement Iterator
while (dataTest.hasNext) {
val evalBatch = dataTest.next()
data.set(evalBatch.data(0))
label.set(evalBatch.label(0))
executor.forward(isTrain = false)
evalMetric.update(evalBatch.label, executor.outputs)
evalBatch.dispose()
}
val (name_eval, value_eval) = evalMetric.get
println(s"Epoch[$epoch] Validation-$name_eval=$value_eval")
// epochEndCallback.invoke(epoch, symbol, argDict, auxDict)
}
executor.dispose()
}
def training_model(){
val batch_size = 100
val vocab_size = 10000
val feature_size = 100
val num_label = 6
val data_train = new DataIter_(100000,batch_size,feature_size,vocab_size)
val data_test = new DataIter_(1000,batch_size,feature_size,vocab_size)
val network = get_net(vocab_size)
val devs = Context.gpu(0)
val models = new FeedForward(symbol = network,ctx = devs,
numEpoch = 8,optimizer = new SGD(learningRate = 0.05f,momentum=0.9f,wd = 0.0001f),
initializer = new Xavier(factorType = "in", magnitude = 2.34f))
models.fit(trainData = data_train,evalData = data_test,evalMetric = new Accuracy(),
kvStoreType = "local",epochEndCallback = null, batchEndCallback = new Callback.Speedometer(batch_size, 50))
}
def get_net(vocab_size:Int):Symbol = {
val data = Symbol.Variable("data")
val label = Symbol.Variable("label")
val embed = Symbol.FullyConnected()(Map("data" -> data, "num_hidden" -> 100))
// val act1 = Symbol.Activation(name = "relu1")(Map("data" -> embed, "act_type" -> "sigmoid"))
// val fc2 = Symbol.FullyConnected(name = "fc2")(Map("data" -> act1, "num_hidden" -> 100))
// val act2 = Symbol.Activation(name = "relu2")(Map("data" -> fc2, "act_type" -> "sigmoid"))
val pred = Symbol.FullyConnected()(Map("data" -> embed , "num_hidden" -> vocab_size))
val sm = Symbol.SoftmaxOutput("sm")(Map("data"->pred,"label"->label))
sm
}
}
/**
* @author liuxianggen
* @date 20150911
* @brief all the global infomation are listed in there
* @param count: the number of class
* @param count: the number of class
* @return
* @example
* @note
*/
class DataIter_(count:Int,batch_size:Int,feature_size:Int,vocab_size: Int) extends DataIter {
/**
* author liuxianggen
* brief a generator of a feature and the label,where the feature is a vector,and the label can be learned
* return:
* data and label
*/
def mock_sample :(Array[Float],Float) = {
val ret = Array.fill[Float](feature_size)(0f)
var rn = Set[Int]()
while(rn.size<3){
rn = rn + scala.util.Random.nextInt(feature_size-1)
}
var s = 0
rn.foreach { x => {
ret(x)= 1.0f
s *= feature_size
s += x
}}
(ret, (s % vocab_size).toFloat)
}
private var idx = 0
override def batchSize: Int = batch_size
/**
* the index of current batch
* @return
*/
override def getIndex(): IndexedSeq[Long] = IndexedSeq[Long]()
// The name and shape of label provided by this iterator
override def provideData: Map[String, Shape] = Map("data"->Shape(batch_size,feature_size))
/**
* get the number of padding examples
* in current batch
* @return number of padding examples in current batch
*/
override def getPad(): Int = 0
// The name and shape of data provided by this iterator
override def provideLabel: Map[String, Shape] = Map("label"->Shape(batch_size))
val datas = (0 until (count/batch_size)).map(x =>{
val mock_samples = (0 until batch_size).map(i =>{
mock_sample
}).toArray
val data_arr = mock_samples.map(_._1).foldLeft(Array[Float]())(_ ++ _)
val label = NDArray.array(mock_samples.map(_._2),Shape(batch_size))
val data =NDArray.array(data_arr,Shape(batch_size,feature_size))
(data,label)
}).toArray
// println(s"DataIter_ batches:${datas.length}")
/**
* wrong template
*/
// override def next(): DataBatch = {
// val tempidx = idx
// idx += 1
// datas(tempidx)
// }
override def next(): DataBatch = {
val tempidx = idx
idx += 1
val (data,label) = datas(tempidx)
// new DataBatch(IndexedSeq(data),IndexedSeq(label),getIndex(),getPad())//error expression
new DataBatch(IndexedSeq(data.copy()),IndexedSeq(label.copy()),getIndex(),getPad())
}
override def reset(): Unit = {
idx = 0
}
override def hasNext: Boolean = {
if (idx < datas.length) true else false
}
/**
* get data of current batch
* @return the data of current batch
*/
override def getData(): IndexedSeq[NDArray] = IndexedSeq(datas(idx)._1)
/**
* Get label of current batch
* @return the label of current batch
*/
override def getLabel(): IndexedSeq[NDArray] = IndexedSeq(datas(idx)._2)
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/char_rnn_symbol/InferCharModel.scala
|
package thu.brainmatrix.char_rnn_symbol
import thu.brainmatrix.Context
import thu.brainmatrix.NDArray
import thu.brainmatrix.Shape
import Config._
class InferCharModel(numLstmLayer: Int, n_alphabet: Int, numHidden: Int,
numEmbed: Int, argParams: Map[String, NDArray],
ctx: Context = Context.cpu(), dropout: Float = 0f) {
private val symbol = Lstm.LSTM_forward(numLstmLayer,SEQ_LENGTH, numHidden, numEmbed, n_alphabet, DROPOUT)
private val batchSize = 1
val initC = (for (l <- 0 until LSTM_N_LAYER)
yield (s"_l${l}_init_c" -> Shape(batchSize, DIM_HIDDEN))).toMap
val initH = (for (l <- 0 until LSTM_N_LAYER)
yield (s"_l${l}_init_h" -> Shape(batchSize, DIM_HIDDEN))).toMap
val dataShape = Map("data" -> Shape(batchSize,n_alphabet),"label" -> Shape(batchSize,1))
private val inputShape = initC ++ initH ++ dataShape
private val executor = symbol.simpleBind(ctx = ctx, shapeDict = inputShape)
for (key <- this.executor.argDict.keys) {
if (!inputShape.contains(key) && argParams.contains(key) && key != "softmax_label") {
argParams(key).copyTo(this.executor.argDict(key))
}
}
private var stateName = (Array[String]() /: (0 until numLstmLayer)) { (acc, i) =>
acc :+ s"l${i}_init_c" :+ s"l${i}_init_h"
}
private val statesDict = stateName.zip(this.executor.outputs.drop(1)).toMap
private val inputArr = NDArray.zeros(dataShape("data"))
def forward(inputData: NDArray, newSeq: Boolean = false): Array[Float] = {
if (newSeq == true) {
for (key <- this.statesDict.keys) {
this.executor.argDict(key).set(0f)
}
}
inputData.copyTo(this.executor.argDict("data"))
this.executor.forward()
for (key <- this.statesDict.keys) {
this.statesDict(key).copyTo(this.executor.argDict(key))
}
val prob = this.executor.outputs(0).toArray
prob
}
def dispose(){
this.executor.dispose()
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/io/FileIO.scala
|
package thu.brainmatrix.io
import org.apache.http.Header;
import org.apache.http.HttpResponse;
import java.io.BufferedInputStream
import java.io.BufferedOutputStream
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URL;
import java.net.URLConnection;
import java.net.HttpURLConnection;
object FileIO {
/**
* 下载远程文件并保存到本地
* @param remoteFilePath 远程文件路径
* @param localFilePath 本地文件路径
*/
def downloadFile(remoteFilePath:String , localFilePath:String )
{
var urlfile:URL = null;
var httpUrl:HttpURLConnection = null;
var bis:BufferedInputStream = null;
var bos:BufferedOutputStream = null;
var f : File = new File(localFilePath);
try
{
urlfile = new URL(remoteFilePath);
// force to transmit URLConnection to HttpURLConnection
httpUrl =(urlfile.openConnection()).asInstanceOf[HttpURLConnection]
httpUrl.connect();
bis = new BufferedInputStream(httpUrl.getInputStream());
bos = new BufferedOutputStream(new FileOutputStream(f));
var len : Int = 20480000;
var b:Array[Byte] = Array.fill[Byte](len)('\0')
len = bis.read(b)
while (len!= -1)
{
// println(len)
bos.write(b, 0, len);
len = bis.read(b)
}
bos.flush();
bis.close();
httpUrl.disconnect();
}
catch
{
case ex: Exception => {
ex.printStackTrace();
sys.exit(1)
}
}
finally
{
try
{
bis.close();
bos.close();
}
catch
{
case e:Exception =>
e.printStackTrace();
}
}
}
def main(args:Array[String]){
downloadFile("http://data.mxnet.io/data/cifar10/cifar10_val.rec","./data/cifar10_val.rec")
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/utilSuite/OpencvSuite.scala
|
package thu.brainmatrix.utilSuite
//
//
//import org.opencv.core.Core
//import org.opencv.highgui.Highgui
//import org.opencv.imgproc.Imgproc
//import org.opencv.core.Mat
//import org.opencv.core.CvType
//import org.opencv.core.MatOfInt
//import org.opencv.core.MatOfFloat
//
//import scala.collection.mutable.ArrayBuffer
//
//
class OpencvSuite{
//
//
// test("opencv test"){
// System.loadLibrary( Core.NATIVE_LIBRARY_NAME );
//
// //读取图像,不改变图像的原始信息
// val m = Highgui.imread("./data/cat.jpg",Highgui.CV_LOAD_IMAGE_COLOR);
//
// //将图片转换成灰度图片
// val gray = new Mat(m.size(),CvType.CV_8UC1);
// Imgproc.cvtColor(m,gray,Imgproc.COLOR_RGB2GRAY);
//
// //计算灰度直方图
// val images = new java.util.ArrayList[Mat]()
//// var images = new ArrayBuffer[Mat](); //List<Mat>
// images.add(gray);
//
// val channels= new MatOfInt(0);
// val histSize = new MatOfInt(256);
// val ranges= new MatOfFloat(0,256);
// val hist = new Mat
// Imgproc.calcHist(images, channels, new Mat(), hist, histSize, ranges);
//
// //mat求和
// System.out.println(Core.sumElems(hist));
//
// //保存转换的图片
// Highgui.imwrite("output/cat.png",gray);
//
// }
//
//
//
//
//
}
|
Liuxg16/BrainMatrix
|
scala-package/core/src/main/scala/ml/dmlc/mxnet/Base.scala
|
<filename>scala-package/core/src/main/scala/ml/dmlc/mxnet/Base.scala
package ml.dmlc.mxnet
import ml.dmlc.mxnet.util.NativeLibraryLoader
import org.slf4j.{LoggerFactory, Logger}
object Base {
private val logger: Logger = LoggerFactory.getLogger("MXNetJVM")
// type definitions
class RefInt(val value: Int = 0)
class RefLong(val value: Long = 0)
class RefFloat(val value: Float = 0)
class RefString(val value: String = null)
type MXUint = Int
type MXFloat = Float
type CPtrAddress = Long
type NDArrayHandle = CPtrAddress
type FunctionHandle = CPtrAddress
type DataIterHandle = CPtrAddress
type DataIterCreator = CPtrAddress
type KVStoreHandle = CPtrAddress
type ExecutorHandle = CPtrAddress
type SymbolHandle = CPtrAddress
type MXUintRef = RefInt
type MXFloatRef = RefFloat
type NDArrayHandleRef = RefLong
type FunctionHandleRef = RefLong
type DataIterHandleRef = RefLong
type DataIterCreatorRef = RefLong
type KVStoreHandleRef = RefLong
type ExecutorHandleRef = RefLong
type SymbolHandleRef = RefLong
try {
try {
tryLoadLibraryOS("mxnet-scala")
} catch {
case e: UnsatisfiedLinkError =>
logger.warn("MXNet Scala native library not found in path. " +
"Copying native library from the archive. " +
"Consider installing the library somewhere in the path " +
"(for Windows: PATH, for Linux: LD_LIBRARY_PATH), " +
"or specifying by Java cmd option -Djava.library.path=[lib path]." +
"Exception:", e)
NativeLibraryLoader.loadLibrary("mxnet-scala")
}
} catch {
case e: UnsatisfiedLinkError =>
logger.error("Couldn't find native library mxnet-scala")
throw e
}
val _LIB = new LibInfo
checkCall(_LIB.nativeLibInit())
// TODO: shutdown hook won't work on Windows
Runtime.getRuntime.addShutdownHook(new Thread() {
override def run(): Unit = {
notifyShutdown()
}
})
@throws(classOf[UnsatisfiedLinkError])
private def tryLoadLibraryOS(libname: String): Unit = {
try {
logger.info(s"Try loading $libname from native path.")
System.loadLibrary(libname)
} catch {
case e: UnsatisfiedLinkError =>
logger.warn("Failed to load from native path. Exception:", e)
val os = System.getProperty("os.name")
// ref: http://lopica.sourceforge.net/os.html
if (os.startsWith("Linux")) {
tryLoadLibraryXPU(libname, "linux-x86_64")
} else if (os.startsWith("Mac")) {
tryLoadLibraryXPU(libname, "osx-x86_64")
} else {
// TODO(yizhi) support windows later
throw new UnsatisfiedLinkError()
}
}
}
@throws(classOf[UnsatisfiedLinkError])
private def tryLoadLibraryXPU(libname: String, arch: String): Unit = {
try {
// try gpu first
logger.info(s"Try loading $libname-$arch-gpu from native path.")
System.loadLibrary(s"$libname-$arch-gpu")
} catch {
case e: UnsatisfiedLinkError =>
logger.info(s"Try loading $libname-$arch-cpu from native path.")
System.loadLibrary(s"$libname-$arch-cpu")
}
}
// helper function definitions
/**
* Check the return value of C API call
*
* This function will raise exception when error occurs.
* Wrap every API call with this function
* @param ret return value from API calls
*/
def checkCall(ret: Int): Unit = {
if (ret != 0) {
throw new MXNetError(_LIB.mxGetLastError())
}
}
// Notify MXNet about a shutdown
private def notifyShutdown(): Unit = {
checkCall(_LIB.mxNotifyShutdown())
}
// Convert ctypes returned doc string information into parameters docstring.
def ctypes2docstring(
argNames: Seq[String],
argTypes: Seq[String],
argDescs: Seq[String]): String = {
val params =
(argNames zip argTypes zip argDescs) map { case ((argName, argType), argDesc) =>
val desc = if (argDesc.isEmpty) "" else s"\n$argDesc"
s"$argName : $argType$desc"
}
s"Parameters\n----------\n${params.mkString("\n")}\n"
}
}
class MXNetError(val err: String) extends Exception(err)
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/LibInfo.scala
|
<gh_stars>0
package thu.brainmatrix
import thu.brainmatrix.Base._
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
/**
* JNI functions
* @author <NAME>
*/
class LibInfo {
@native def nativeLibInit(): Int
// NDArray
@native def mxNDArrayFree(handle: NDArrayHandle): Int
@native def mxGetLastError(): String
@native def mxNDArrayCreateNone(out: NDArrayHandleRef): Int
@native def mxNDArrayCreate(shape: Array[Int],
ndim: Int,
devType: Int,
devId: Int,
delayAlloc: Int,
out: NDArrayHandleRef): Int
@native def mxNDArrayWaitAll(): Int
@native def mxNDArrayWaitToRead(handle: NDArrayHandle): Int
@native def mxListFunctions(functions: ListBuffer[FunctionHandle]): Int
@native def mxFuncDescribe(handle: FunctionHandle,
nUsedVars: MXUintRef,
nScalars: MXUintRef,
nMutateVars: MXUintRef,
typeMask: Base.RefInt): Int
@native def mxFuncGetInfo(handle: FunctionHandle,
name: RefString,
desc: RefString,
numArgs: MXUintRef,
argNames: ListBuffer[String],
argTypes: ListBuffer[String],
argDescs: ListBuffer[String]): Int
@native def mxFuncInvoke(function: FunctionHandle,
useVars: Array[NDArrayHandle],
scalarArgs: Array[MXFloat],
mutateVars: Array[NDArrayHandle]): Int
@native def mxFuncInvokeEx(function: FunctionHandle,
useVars: Array[NDArrayHandle],
scalarArgs: Array[MXFloat],
mutateVars: Array[NDArrayHandle],
numParams: Int,
paramKeys: Array[Array[Byte]],
paramVals: Array[Array[Byte]]): Int
@native def mxNDArrayGetShape(handle: NDArrayHandle,
ndim: MXUintRef,
data: ArrayBuffer[Int]): Int
@native def mxNDArraySyncCopyToCPU(handle: NDArrayHandle,
data: Array[MXFloat],
size: Int): Int
@native def mxNDArraySlice(handle: NDArrayHandle,
start: MXUint,
end: MXUint,
sliceHandle: NDArrayHandleRef): Int
@native def mxNDArrayReshape(handle: NDArrayHandle,
nDim: Int,
dims: Array[Int],
reshapeHandle: NDArrayHandleRef): Int
@native def mxNDArraySyncCopyFromCPU(handle: NDArrayHandle,
source: Array[MXFloat],
size: Int): Int
@native def mxNDArrayLoad(fname: String,
outSize: MXUintRef,
handles: ArrayBuffer[NDArrayHandle],
outNameSize: MXUintRef,
names: ArrayBuffer[String]): Int
@native def mxNDArraySave(fname: String,
handles: Array[NDArrayHandle],
keys: Array[String]): Int
@native def mxNDArrayGetContext(handle: NDArrayHandle, devTypeId: Base.RefInt, devId: Base.RefInt): Int
@native def mxNDArraySaveRawBytes(handle: NDArrayHandle, buf: ArrayBuffer[Byte]): Int
@native def mxNDArrayLoadFromRawBytes(bytes: Array[Byte], handle: NDArrayHandleRef): Int
// KVStore Server
@native def mxInitPSEnv(keys: Array[String], values: Array[String]): Int
@native def mxKVStoreRunServer(handle: KVStoreHandle, controller: KVServerControllerCallback): Int
// KVStore
@native def mxKVStoreCreate(name: String, handle: KVStoreHandleRef): Int
@native def mxKVStoreInit(handle: KVStoreHandle,
len: MXUint,
keys: Array[Int],
values: Array[NDArrayHandle]): Int
@native def mxKVStorePush(handle: KVStoreHandle,
len: MXUint,
keys: Array[Int],
values: Array[NDArrayHandle],
priority: Int): Int
@native def mxKVStorePull(handle: KVStoreHandle,
len: MXUint,
keys: Array[Int],
outs: Array[NDArrayHandle],
priority: Int): Int
@native def mxKVStoreSetUpdater(handle: KVStoreHandle, updaterFunc: MXKVStoreUpdater): Int
@native def mxKVStoreIsWorkerNode(isWorker: RefInt): Int
@native def mxKVStoreGetType(handle: KVStoreHandle, kvType: RefString): Int
@native def mxKVStoreSendCommmandToServers(handle: KVStoreHandle,
head: Int, body: String): Int
@native def mxKVStoreBarrier(handle: KVStoreHandle): Int
@native def mxKVStoreGetGroupSize(handle: KVStoreHandle, size: RefInt): Int
@native def mxKVStoreGetRank(handle: KVStoreHandle, size: RefInt): Int
@native def mxKVStoreFree(handle: KVStoreHandle): Int
// DataIter Funcs
@native def mxListDataIters(handles: ListBuffer[DataIterCreator]): Int
@native def mxDataIterCreateIter(handle: DataIterCreator,
keys: Array[String],
vals: Array[String],
out: DataIterHandleRef): Int
@native def mxDataIterGetIterInfo(creator: DataIterCreator,
name: RefString,
description: RefString,
argNames: ListBuffer[String],
argTypeInfos: ListBuffer[String],
argDescriptions: ListBuffer[String]): Int
@native def mxDataIterFree(handle: DataIterHandle): Int
@native def mxDataIterBeforeFirst(handle: DataIterHandle): Int
@native def mxDataIterNext(handle: DataIterHandle, out: RefInt): Int
@native def mxDataIterGetLabel(handle: DataIterHandle,
out: NDArrayHandleRef): Int
@native def mxDataIterGetData(handle: DataIterHandle,
out: NDArrayHandleRef): Int
@native def mxDataIterGetIndex(handle: DataIterHandle,
outIndex: ListBuffer[Long],
outSize: RefLong): Int
@native def mxDataIterGetPadNum(handle: DataIterHandle,
out: MXUintRef): Int
// Executors
@native def mxExecutorOutputs(handle: ExecutorHandle, outputs: ArrayBuffer[NDArrayHandle]): Int
@native def mxExecutorFree(handle: ExecutorHandle): Int
@native def mxExecutorForward(handle: ExecutorHandle, isTrain: Int): Int
@native def mxExecutorBackward(handle: ExecutorHandle,
grads: Array[NDArrayHandle]): Int
@native def mxExecutorPrint(handle: ExecutorHandle, debugStr: RefString): Int
@native def mxExecutorSetMonitorCallback(handle: ExecutorHandle, callback: MXMonitorCallback): Int
// Symbols
@native def mxSymbolListAtomicSymbolCreators(symbolList: ListBuffer[SymbolHandle]): Int
@native def mxSymbolGetAtomicSymbolInfo(handle: SymbolHandle,
name: RefString,
desc: RefString,
numArgs: MXUintRef,
argNames: ListBuffer[String],
argTypes: ListBuffer[String],
argDescs: ListBuffer[String],
keyVarNumArgs: RefString): Int
@native def mxSymbolCreateAtomicSymbol(handle: SymbolHandle,
paramKeys: Array[String],
paramVals: Array[String],
symHandleRef: SymbolHandleRef): Int
@native def mxSymbolSetAttr(handle: SymbolHandle, key: String, value: String): Int
@native def mxSymbolCompose(handle: SymbolHandle,
name: String,
keys: Array[String],
args: Array[SymbolHandle]): Int
@native def mxSymbolCreateVariable(name: String, out: SymbolHandleRef): Int
@native def mxSymbolGetAttr(handle: SymbolHandle,
key: String,
ret: RefString,
success: RefInt): Int
@native def mxSymbolListArguments(handle: SymbolHandle,
arguments: ArrayBuffer[String]): Int
@native def mxSymbolCopy(handle: SymbolHandle, clonedHandle: SymbolHandleRef): Int
@native def mxSymbolListAuxiliaryStates(handle: SymbolHandle,
arguments: ArrayBuffer[String]): Int
@native def mxSymbolListOutputs(handle: SymbolHandle,
outputs: ArrayBuffer[String]): Int
@native def mxSymbolCreateGroup(handles: Array[SymbolHandle], out: SymbolHandleRef): Int
@native def mxSymbolPrint(handle: SymbolHandle, str: RefString): Int
@native def mxSymbolGetInternals(handle: SymbolHandle, out: SymbolHandleRef): Int
@native def mxSymbolInferType(handle: SymbolHandle,
keys: Array[String],
sdata: Array[Int],
argTypeData: ListBuffer[Int],
outTypeData: ListBuffer[Int],
auxTypeData: ListBuffer[Int],
complete: RefInt): Int
@native def mxSymbolInferShape(handle: SymbolHandle,
numArgs: MXUint,
keys: Array[String],
argIndPtr: Array[MXUint],
argShapeData: Array[MXUint],
inShapeData: ListBuffer[Array[Int]],
outShapeData: ListBuffer[Array[Int]],
auxShapeData: ListBuffer[Array[Int]],
complete: RefInt): Int
@native def mxSymbolGetOutput(handle: SymbolHandle, index: Int, out: SymbolHandleRef): Int
@native def mxSymbolSaveToJSON(handle: SymbolHandle, out: RefString): Int
@native def mxSymbolCreateFromJSON(json: String, handle: SymbolHandleRef): Int
// scalastyle:off parameterNum
@native def mxExecutorBindX(handle: SymbolHandle,
deviceTypeId: Int,
deviceID: Int,
numCtx: Int,
ctxMapKeys: Array[String],
ctxMapDevTypes: Array[Int],
ctxMapDevIDs: Array[Int],
numArgs: Int,
argsHandle: Array[NDArrayHandle],
argsGradHandle: Array[NDArrayHandle],
reqsArray: Array[Int],
auxArgsHandle: Array[NDArrayHandle],
out: ExecutorHandleRef): Int
@native def mxExecutorBindEX(handle: SymbolHandle,
deviceTypeId: Int,
deviceID: Int,
numCtx: Int,
ctxMapKeys: Array[String],
ctxMapDevTypes: Array[Int],
ctxMapDevIDs: Array[Int],
numArgs: Int,
argsHandle: Array[NDArrayHandle],
argsGradHandle: Array[NDArrayHandle],
reqsArray: Array[Int],
auxArgsHandle: Array[NDArrayHandle],
sharedExec: ExecutorHandle,
out: ExecutorHandleRef): Int
// scalastyle:on parameterNum
@native def mxSymbolSaveToFile(handle: SymbolHandle, fname: String): Int
@native def mxSymbolCreateFromFile(fname: String, handle: SymbolHandleRef): Int
@native def mxSymbolFree(handle: SymbolHandle): Int
// Random
@native def mxRandomSeed(seed: Int): Int
@native def mxNotifyShutdown(): Int
/**
* by liuxianggen
* 2016-3-9
*/
@native def mxScalaOpListArguments(handle: SymbolHandle,arguments: ArrayBuffer[String]):Int
/**
* by liuxianggen
* 2016-4-9
*/
@native def mxScalaOpListAuxiliaryStates(handle: SymbolHandle,arguments: ArrayBuffer[String]):Int
/**
* by liuxianggen
* 2016-3-9
*/
@native def mxScalaOpInit(handle:OperatorPropertyHandle,
paramKeys: Array[String],paramVals: Array[String]):Int
@native def mxScalaOpPrintParam(handle:OperatorPropertyHandle):Int
@native def mxScalaCreateOperatorProperty(handle:ScalaSymbolHandle,opHandleRef:OperatorPropertyHandleRef):Int
/**
* @author liuxianggen
* @date 20160707
* @brief get the return of NumVisibleOutputs on op
* @param OperatorPropertyHandle
* @param MXUintRef
* @return the NumVisibleOutputs
* @note
*/
@native def mxScalaOpNumVisibleOutputs(handle:OperatorPropertyHandle,num: MXUintRef):Int
// @native def mxScalaSymbolInferShape(handle: ScalaSymbolHandle,
// numArgs: MXUint,
// keys: Array[String],
// argIndPtr: Array[MXUint],
// argShapeData: Array[MXUint],
// inShapeData: ListBuffer[Array[Int]],
// outShapeData: ListBuffer[Array[Int]],
// auxShapeData: ListBuffer[Array[Int]],
// complete: RefInt): Int
@native def mxScalaOPCopy(handle:OperatorPropertyHandle,opHandleRef:OperatorPropertyHandleRef):Int
@native def mxScalaToStaticGraph(handleref:StaticGraphHandleRef,arg_node_sg:Array[Int],heads_source:Array[Int],heads_index:Array[Int],nods_opHandles:Array[OperatorPropertyHandle],nods_name_len:Int,nods_name:Array[String],
nods_inputs_len_arr:Array[Int] ,nods_inputs_source_ids:Array[Int],nods_inputs_indexs:Array[Int],nods_backward_source_ids:Array[Int],nods_attr_len_arr:Array[Int],nods_attr_len_arr_len:Int,nods_attrs_keys:Array[String],nods_attrs_values:Array[String]):Int
/**
* @author liuxianggen
* @date 20160724
* @brief all the global information are listed in there
* @param handle:the id of StaticGraph
* @param num_arg_nodes: the number of all the arg_nodes,which are always variable
* @param numArgs: the number of input args_node
* @param keys: a array which contains the id of the input arg_nodes
* @param argIndPtr: a array which contains the shape size of the input arg_nodes, in the conventional order
* @param argShapeData: a array which contains the shape of the input arg_nodes, in the conventional order.
* @param inShapeData: the input shape of a symbol, written by jni
* @param outShapeData: the output shape of a symbol , written by jni
* @param auxShapeData: the auxiliary shape of a symbol , written by jni
* @param complete: a flag , written by jni
* @return
* @example
* @note
*/
@native def mxScalaSGInferShape(handle:StaticGraphHandle, num_arg_nodes:MXUint, numArgs: MXUint,keys: Array[MXUint],argIndPtr: Array[MXUint],argShapeData: Array[MXUint],
inShapeData: ListBuffer[Array[Int]],outShapeData: ListBuffer[Array[Int]],auxShapeData: ListBuffer[Array[Int]],complete: RefInt):Int
@native def mxScalaExecutorBindX(handle: StaticGraphHandle,
deviceTypeId: Int,
deviceID: Int,
numCtx: Int,
ctxMapKeys: Array[String],
ctxMapDevTypes: Array[Int],
ctxMapDevIDs: Array[Int],
numArgs: Int,
argsHandle: Array[NDArrayHandle],
argsGradHandle: Array[NDArrayHandle],
reqsArray: Array[Int],
auxArgsHandle: Array[NDArrayHandle],
out: ExecutorHandleRef): Int
/**
* NDArray operators
* by liuxianggen
* 2016-4-4
*
*/
@native def mxNDArrayGetData(handle: NDArrayHandle,data_result: MXFloatRef, index: MXUint): Int //has bug,take care
@native def mxNDArraySetData(handle: NDArrayHandle,data_source: MXFloat, index: MXUint): Int
/**
* by liuxianggen
* 20160729
*/
@native def mxScalaSymbolSaveToFile(handle: StaticGraphHandle, fname: String): Int
@native def mxScalaSymbolCreateFromFile(fname: String, handle: StaticGraphHandleRef): Int
@native def mxStaticGraphFree(handle: StaticGraphHandle): Int
@native def mxStaticGraphSaveToJSON(handle: StaticGraphHandle, out: RefString): Int
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/synapse_symbol/Model.scala
|
package thu.brainmatrix.synapse_symbol
import thu.brainmatrix.NDArray
import thu.brainmatrix.Context
import thu.brainmatrix.Shape
import thu.brainmatrix.Symbol
class Model(val ctx:Context) {
var modules = Vector[Module]();
var indices = Vector[Array[Int]]();
var variables:Array[String] = Array[String]()
var varNumber :Int = 0;
var initialMap = Map[String,NDArray]()
var symbolMap = Map[String,NDArray]()
var initialVector = Vector[NDArray]();
var initialName = Vector[String]()
var model_sym:Symbol = null
def addModule(module:Module){
//add modules
this.modules :+= (module);
// set indices in each module
module.setIndices(this.varNumber);
// update the number of variable number
this.varNumber += module.getVarNumber();
//add initial numbers
for(i <- 0 until module.getInitialY().length){
initialVector :+= (module.getInitialY()(i));
initialName :+= module.getInitialVar()(i)
}
// add the variable indices
this.indices :+= (module.getVarIndices());
this.symbolMap ++= module.getSymbolMap()
//add initial numbers
this.initialMap ++= module.getInitial()
}
def update():Symbol = {
// TODO Auto-generated method stub
val t_onehot = Symbol.CreateVariable("t_onehot")
val y = (for(i<- 0 until this.varNumber) yield {
Symbol.CreateVariable(s"y$i")
}).toArray
// val y = Array.fill[Symbol](this.varNumber)(Symbol.CreateVariable("y0"))
var yDot:Array[Symbol] = y
for(i <- 0 until this.modules.length){
yDot = this.modules(i).update(t_onehot, y, yDot,this.modules(i).getVarIndices());
}
this.model_sym = Symbol.Group(yDot:_*)
this.model_sym
}
def getInitialMap(): Map[String,NDArray] = {
val vec =
this.initialVector zip this.initialName map{case(x,y)=>{
(y->x)
}}
vec.toMap
}
def getInitialY():Array[NDArray] = {
val indicess = this.indices.flatten
indicess.indices.map(i=>{
this.initialVector(indicess(i))
}).toArray
}
def printIndices(){
for(i <- 0 until this.indices.length){
for(j<- 0 until this.indices(i).length){
System.out.print(this.indices(i)(j)+" ");
}
System.out.println();
}
}
def printVarsName(){
for(i <- 0 until this.indices.length){
var module = this.modules(i);
for(j<- 0 until module.getVarsName().length){
System.out.print(module.getVarsName()(j) + " ");
}
System.out.println();
}
}
}
|
Liuxg16/BrainMatrix
|
scala-package/spark/src/main/scala/ml/dmlc/mxnet/spark/io/LongLivingDataBatch.scala
|
package ml.dmlc.mxnet.spark.io
import ml.dmlc.mxnet.{NDArray, DataBatch}
/**
* Dispose only when 'disposeForce' called
* @author <NAME>
*/
class LongLivingDataBatch(
override val data: IndexedSeq[NDArray],
override val label: IndexedSeq[NDArray],
override val index: IndexedSeq[Long],
override val pad: Int) extends DataBatch(data, label, index, pad) {
override def dispose(): Unit = {}
def disposeForce(): Unit = super.dispose()
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/Shape.scala
|
<filename>scalakernel/src/main/java/thu/brainmatrix/Shape.scala
package thu.brainmatrix
/**
* Shape of [[NDArray]] or other data
* @author <NAME>
*/
class Shape(dims: Traversable[Int]) extends Serializable {
private val shape = dims.toVector
def this(dims: Int*) = {
this(dims.toVector)
}
def apply(dim: Int): Int = shape(dim)
def size: Int = shape.size
def length: Int = shape.length
def drop(dim: Int): Shape = new Shape(shape.drop(dim))
def slice(from: Int, end: Int): Shape = new Shape(shape.slice(from, end))
def product: Int = shape.product
def head: Int = shape.head
def ++(other: Shape): Shape = new Shape(shape ++ other.shape)
def toArray: Array[Int] = shape.toArray
def toVector: Vector[Int] = shape
override def toString(): String = s"(${shape.mkString(",")})"
override def equals(o: Any): Boolean = o match {
case that: Shape =>
that != null && that.shape.sameElements(shape)
case _ => false
}
override def hashCode(): Int = {
shape.hashCode()
}
}
object Shape {
def apply(dims: Int *): Shape = new Shape(dims: _*)
def apply(dims: Traversable[Int]): Shape = new Shape(dims)
}
|
Liuxg16/BrainMatrix
|
scala-package/core/src/main/scala/ml/dmlc/mxnet/KVStoreServer.scala
|
<reponame>Liuxg16/BrainMatrix
package ml.dmlc.mxnet
import ml.dmlc.mxnet.Base._
import org.slf4j.{Logger, LoggerFactory}
/**
* Server node for the key value store
* @author <NAME>
*/
class KVStoreServer(private val kvStore: KVStore) {
private val logger: Logger = LoggerFactory.getLogger(classOf[KVStoreServer])
private val handle: KVStoreHandle = kvStore.handle
private val controller = new KVServerControllerCallback {
override def invoke(cmdId: Int, cmdBody: String): Unit = {
logger.debug("Receive cmdId {}, cmdBody: {}", cmdId, cmdBody)
if (cmdId == 0) {
val optimizer = Serializer.getSerializer.deserialize[Optimizer](
Serializer.decodeBase64String(cmdBody))
kvStore.setOptimizer(optimizer)
} else {
logger.warn(s"Server ${kvStore.rank}, unknown command ($cmdId, $cmdBody)")
}
}
}
// run the server, whose behavior is like
// while receive(x):
// if is_command x: controller(x)
// else if is_key_value x: updater(x)
def run(): Unit = {
checkCall(_LIB.mxKVStoreRunServer(handle, controller))
}
}
object KVStoreServer {
// Start server/scheduler according to env variables
def start(): Unit = {
val isWorker = new RefInt
checkCall(_LIB.mxKVStoreIsWorkerNode(isWorker))
require(isWorker.value == 0, "cannot start kv-store server on worker node")
val kvStore = KVStore.create("dist")
val server = new KVStoreServer(kvStore)
server.run()
}
def init(env: Map[String, String]): Unit = {
val keys = env.keys.toArray
val vals = env.values.toArray
checkCall(_LIB.mxInitPSEnv(keys, vals))
}
}
trait KVServerControllerCallback {
def invoke(cmdId: Int, cmdBody: String): Unit
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/test/java/thu/brainmatrix/suite/TestNetwork.scala
|
package thu.brainmatrix.suite
import thu.brainmatrix.Base._
import thu.brainmatrix.StaticGraph
import thu.brainmatrix.Symbol
import thu.brainmatrix.NDArray
import thu.brainmatrix.Executor
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.ListBuffer
import thu.brainmatrix.FeedForward
import thu.brainmatrix.Symbol
import thu.brainmatrix.Shape
import thu.brainmatrix.optimizer.SGD
import thu.brainmatrix.Context
import thu.brainmatrix.IO
import thu.brainmatrix.Random
import thu.brainmatrix.Context.ctx2Array
/**
* 2016-4-1
*/
object TestNetwork {
def main(args:Array[String]){
// simpleNNTest
// simpleNN_model
// simpleNNTest_mxnet
// simpleNNBackwardTest
// simpleNNBackwardTest_2
// simpleNNTrainingTest
// simpleBindingTest
// mlp_test
bindTest
}
def simpleNNForwardTest{
// val dataS = Symbol.CreateVariable("data")
//
// val kwargs_type = Map("name" -> "fc1", "num_hidden" -> "12")
// val sb:Symbol = Symbol.Create("FullyConnected",kwargs_type)
// val kwargs_symbol:Map[String,Symbol] = Map("data"->dataS)
// sb.Compose(kwargs_symbol, "fc1")
//
//// val sm= Symbol.Create("softmaxOutput", kwargs)
//
//// var out_graph= new StaticGraph()
// sb.ToStaticGraph()
// println(sb.staticGraph.debug)
// println("\n---------------------------------------------------")
//// checkCall(out_graph.ToStaticGraph)
// val kwargs_shape = Map("data"->Shape(200,15))
//
// val (argShapes, outShapes, auxShapes) = sb.inferShape1(sb.staticGraph,kwargs_shape)
// argShapes.foreach {println}
// outShapes.foreach {println}
//
//
// val data = NDArray.ones(Shape(200,15))
// val weight = NDArray.ones(Shape(12,15))//according to inferShape function
// val bias = NDArray.ones(Shape(12))//according to inferShape function
//// val label = NDArray.ones(Shape(200,12))
//
// val data_grad = NDArray.ones(Shape(200,15))
// val weight_grad = NDArray.ones(Shape(12,15))//according to inferShape function
// val bias_grad = NDArray.ones(Shape(12))//according to inferShape function
//
// val in_args: Array[NDArray] = Array(data, weight, bias)
// val arg_grad_store: Array[NDArray] = Array(data_grad, weight_grad, bias_grad)
// val grad_req_type: Array[Int] = Array(1,1,1)
//
//
// val ctxMapKeys = ArrayBuffer.empty[String]
// val ctxMapDevTypes = ArrayBuffer.empty[Int]
// val ctxMapDevIDs = ArrayBuffer.empty[Int]
//
// val execHandle = new ExecutorHandleRef
//
// println("---------------------binding-----------------------")
// checkCall(_LIB.mxScalaExecutorBindX(sb.staticGraph.handle,
// 1,//1
// 0,//0
// ctxMapKeys.size,//0
// ctxMapKeys.toArray,//null
// ctxMapDevTypes.toArray,//null
// ctxMapDevIDs.toArray,//null
// in_args.size,
// in_args.map(_.handle),
// arg_grad_store.map(_.handle),
// grad_req_type,
// new Array[NDArrayHandle](0),
// execHandle))
//
// println("---------------------executor-----------------------")
// val executor = new Executor(execHandle.value, null)
// println("---------------------froward-----------------------")
// executor.forward()
// println("---------------------output-----------------------")
// executor.outputs.foreach {println}
//
}
// succeed!
// def simpleNNBackwardTest{
// val dataS = Symbol.CreateVariable("data")
//
// val kwargs_type = Map("name" -> "fc1", "num_hidden" -> "6")
// val sb:Symbol = Symbol.Create("FullyConnected",kwargs_type)
// val kwargs_symbol:Map[String,Symbol] = Map("data"->dataS)
// sb.Compose(kwargs_symbol, "fc1")
//
//// val sm= Symbol.Create("softmaxOutput", kwargs)
//
// var out_graph= new StaticGraph()
// sb.ToStaticGraph()
// println(sb.staticGraph.debug)
// println("\n---------------------------------------------------")
//// checkCall(out_graph.ToStaticGraph)
// val kwargs_shape = Map("data"->Shape(15,10))
//
// val (argShapes, outShapes, auxShapes) = sb.inferShape1(sb.staticGraph,kwargs_shape)
// argShapes.foreach {println}
// outShapes.foreach {println}
//
//
// val data = NDArray.ones(Shape(15,10))
// val weight = NDArray.ones(Shape(6,10))//according to inferShape function
// val bias = NDArray.ones(Shape(6))//according to inferShape function
//// val label = NDArray.ones(Shape(200,12))
//
// val data_grad = NDArray.ones(Shape(15,10))
// val weight_grad = NDArray.ones(Shape(6,10))//according to inferShape function
// val bias_grad = NDArray.ones(Shape(6))//according to inferShape function
//
// val in_args: Array[NDArray] = Array(data, weight, bias)
// val arg_grad_store: Array[NDArray] = Array(data_grad, weight_grad, bias_grad)
//// val arg_grad_store: Array[NDArray] = Array(new NDArray(0), weight_grad, bias_grad)
// val grad_req_type: Array[Int] = Array(0,1,1)
//
//
// val ctxMapKeys = ArrayBuffer.empty[String]
// val ctxMapDevTypes = ArrayBuffer.empty[Int]
// val ctxMapDevIDs = ArrayBuffer.empty[Int]
//
// val execHandle = new ExecutorHandleRef
//
// println("---------------------binding-----------------------")
// checkCall(_LIB.mxScalaExecutorBindX(out_graph.handle,
// 1,//1
// 0,//0
// ctxMapKeys.size,//0
// ctxMapKeys.toArray,//null
// ctxMapDevTypes.toArray,//null
// ctxMapDevIDs.toArray,//null
// in_args.size,
// in_args.map(_.handle),
// arg_grad_store.map(_.handle),
// grad_req_type,
// new Array[NDArrayHandle](0),
// execHandle))
//
// println("---------------------executor-----------------------")
// val executor = new Executor(execHandle.value, null)
// println("---------------------froward-----------------------")
// executor.forward()
// println("---------------------output-----------------------")
//// executor.outputs.foreach {println}
// println(executor.outputs(0))
//
// println("---------------------backward-----------------------")
// val outGrad = Random.uniform(-10f, 10f, Shape(15,5))
// executor.backward(Array(outGrad))
// println(outGrad)
// println(data_grad)
////
// }
// succeed!
def simpleNNTrainingTest{
val num_instance = 10
val input_dim = 15
val dataS = Symbol.CreateVariable("data")
val hidden_1 = 6
val kwargs_type = Map("name" -> "fc1", "num_hidden" -> (""+hidden_1))
val sb:Symbol = Symbol.Create("FullyConnected",kwargs_type)
val kwargs_symbol:Map[String,Symbol] = Map("data"->dataS)
sb.Compose(kwargs_symbol, "fc1")
// val sm= Symbol.Create("softmaxOutput", kwargs)
sb.ToStaticGraph()
println(sb.staticGraph.debug)
sb.staticGraph.ToStaticGraph
// checkCall(out_graph.ToStaticGraph)
val kwargs_shape = Map("data"->Shape(num_instance,input_dim))
val (argShapes, outShapes, auxShapes) = sb.inferShape(kwargs_shape)
argShapes.foreach {println}
outShapes.foreach {println}
val data = NDArray.ones(Shape(num_instance,input_dim))
val weight = NDArray.ones(Shape(hidden_1,input_dim))//according to inferShape function
val bias = NDArray.ones(Shape(hidden_1))//according to inferShape function
println("\n---------------------------------------------------")
val data_grad = NDArray.ones(Shape(num_instance,input_dim))
val weight_grad = NDArray.ones(Shape(hidden_1,input_dim))//according to inferShape function
val bias_grad = NDArray.ones(Shape(hidden_1))//according to inferShape function
val in_args: Array[NDArray] = Array(data, weight, bias)
val arg_grad_store: Array[NDArray] = Array(data_grad, weight_grad, bias_grad)
// val arg_grad_store: Array[NDArray] = Array(new NDArray(0), weight_grad, bias_grad)
val grad_req_type: Array[Int] = Array(0,1,1)
val ctxMapKeys = ArrayBuffer.empty[String]
val ctxMapDevTypes = ArrayBuffer.empty[Int]
val ctxMapDevIDs = ArrayBuffer.empty[Int]
val execHandle = new ExecutorHandleRef
println("---------------------binding-----------------------")
checkCall(_LIB.mxScalaExecutorBindX(sb.staticGraph.handle,
1,//1
0,//0
ctxMapKeys.size,//0
ctxMapKeys.toArray,//null
ctxMapDevTypes.toArray,//null
ctxMapDevIDs.toArray,//null
in_args.size,
in_args.map(_.handle),
arg_grad_store.map(_.handle),
grad_req_type,
new Array[NDArrayHandle](0),
execHandle))
println("---------------------executor-----------------------")
val executor = new Executor(execHandle.value, null)
println("---------------------froward-----------------------")
executor.forward()
println("---------------------output-----------------------")
// executor.outputs.foreach {println}
println(executor.outputs(0))
println("---------------------backward-----------------------")
val outGrad = Random.uniform(-10f, 10f, Shape(num_instance,hidden_1))
executor.backward(Array(outGrad))
println(outGrad)
println(data_grad)
println(weight_grad)
//
}
// succeed!
def simpleBindingTest{
val num_instance = 10
val input_dim = 15
val dataS = Symbol.CreateVariable("data")
val hidden_1 = 6
val kwargs_type = Map("name" -> "fc1", "num_hidden" -> (""+hidden_1))
val sb:Symbol = Symbol.Create("FullyConnected",kwargs_type)
val kwargs_symbol:Map[String,Symbol] = Map("data"->dataS)
sb.Compose(kwargs_symbol, "fc1")
// val sm= Symbol.Create("softmaxOutput", kwargs)
sb.ToStaticGraph()
println(sb.staticGraph.debug)
sb.staticGraph.ToStaticGraph
// val argNDArrays = (argShapes) map { case shape =>
// // TODO: NDArray dtype
// NDArray.zeros(shape, ctx)
// }
// checkCall(out_graph.ToStaticGraph)
val kwargs_shape = Map("data"->Shape(num_instance,input_dim))
val (argShapes, _, auxShapes) = sb.inferShape(kwargs_shape)
argShapes.foreach {println}
require(argShapes != null, "Input node is not complete")
// alloc space
val argNDArrays = (argShapes) map { case shape =>
// TODO: NDArray dtype
NDArray.ones(shape)
}
val gradNDArrays =(argShapes zipWithIndex) map { case (shape,idx) =>
// TODO: NDArray dtype
if(idx!=0 &&idx !=argShapes.size-1 ){
NDArray.ones(shape)
}else{
new NDArray(0)
}
}
val data = NDArray.ones(Shape(num_instance,input_dim))
val weight = NDArray.ones(Shape(hidden_1,input_dim))//according to inferShape function
val bias = NDArray.ones(Shape(hidden_1))//according to inferShape function
println("\n---------------------------------------------------")
val data_grad = NDArray.ones(Shape(num_instance,input_dim))
val weight_grad = NDArray.ones(Shape(hidden_1,input_dim))//according to inferShape function
val bias_grad = NDArray.ones(Shape(hidden_1))//according to inferShape function
val in_args: Array[NDArray] = Array(data, weight, bias)
val arg_grad_store: Array[NDArray] = Array(data_grad, weight_grad, bias_grad)
// val arg_grad_store: Array[NDArray] = Array(new NDArray(0), weight_grad, bias_grad)
val grad_req_type: Array[Int] = Array(1,1,1)
val ctxMapKeys = ArrayBuffer.empty[String]
val ctxMapDevTypes = ArrayBuffer.empty[Int]
val ctxMapDevIDs = ArrayBuffer.empty[Int]
val execHandle = new ExecutorHandleRef
println("---------------------binding-----------------------")
checkCall(_LIB.mxScalaExecutorBindX(sb.staticGraph.handle,
1,//1
0,//0
ctxMapKeys.size,//0
ctxMapKeys.toArray,//null
ctxMapDevTypes.toArray,//null
ctxMapDevIDs.toArray,//null
// in_args.size,
// in_args.map(_.handle),
// arg_grad_store.map(_.handle),
argNDArrays.size,
argNDArrays.map(_.handle).toArray,
gradNDArrays.map{_.handle}.toArray,
grad_req_type,
new Array[NDArrayHandle](0),
execHandle))
println("---------------------executor-----------------------")
val executor = new Executor(execHandle.value, null)
println("---------------------froward-----------------------")
executor.forward()
println("---------------------output-----------------------")
// executor.outputs.foreach {println}
println(executor.outputs(0))
println("---------------------backward-----------------------")
val outGrad = Random.uniform(-10f, 10f, Shape(num_instance,hidden_1))
executor.backward(Array(outGrad))
println(outGrad)
println(data_grad)
println(weight_grad)
//
}
/**
*
*by liuxianggen
* 2016-4-5
*
*/
def simpleNNBackwardTest_2{
val dataS = Symbol.CreateVariable("data")
val sb:Symbol = Symbol.Create("FullyConnected",Map("name" -> "fc1", "num_hidden" -> "5"))
sb.Compose(Map("data"->dataS) , "fc1")
val sb1:Symbol = Symbol.Create("FullyConnected",Map("name" -> "fc2", "num_hidden" -> "5"))
sb1.Compose(Map("data"->sb) , "fc2")
// val act1:Symbol = Symbol.Create("Activation",Map("name" -> "relu2", "act_type" -> "relu"))
// act1.Compose(Map("data"->sb1) , "relu2")
//
val kwargs_type_sm = Map("name" -> "sm")
val sm= Symbol.Create("SoftmaxOutput",Map("grad_scale"->"1"))
sm.Compose(Map("data" -> sb1), "sm")
//
//// val act = Symbol.Create("Activation",Map("name" -> "act", "act_type" -> "relu"))
//// act.Compose(Map("data"->sb),"act")
//
//
// sm.ToStaticGraph()
// println(sb.staticGraph.debug)
// sb.staticGraph.ToStaticGraph
// println("\n---------------------------------------------------")
// val kwargs_shape = Map("data"->Shape(15,10))
// val (argShapes, outShapes, auxShapes) = sm.inferShape1(out_graph,kwargs_shape)
// argShapes.foreach {println}
// outShapes.foreach {println}
// val num=15
// val label = NDArray.zeros(Shape(num))
// for(i <- 0 until num){
// val temp = (i/3).floor
// println(temp)
// label(i) = temp
// println(label)
// }
//
//
val num_instance = 15
val input_dim = 10
val data = NDArray.rangeRows(0, num_instance, input_dim)//num_instance,10
// val data = Random.uniform(-10f, 10f, Shape(num_instance,input_dim))
val label = NDArray.range(0,5,3)
// val data =NDArray.ones(Shape(num_instance,10))
// val label = NDArray.range(num_instance)
//
// for (i <- 0 until num_instance) {
// for (j <- 0 until input_dim) {
// data(i, j) = i * 1.0f + (scala.util.Random.nextFloat - 0.5f)
// }
//
// }
println(data)
println(label)
val weight = NDArray.ones(Shape(5,10))//according to inferShape function
val bias = NDArray.ones(Shape(5))//according to inferShape function
val weight1 = NDArray.ones(Shape(5,5))//according to inferShape function
val bias1 = NDArray.ones(Shape(5))//according to inferShape function
var data_grad = NDArray.ones(Shape(num_instance,10))
var weight_grad = NDArray.ones(Shape(5,10))//according to inferShape function
var bias_grad = NDArray.ones(Shape(5))//according to inferShape function
var weight_grad1 = NDArray.ones(Shape(5,5))//according to inferShape function
var bias_grad1 = NDArray.ones(Shape(5))//according to inferShape function
var label_grad = NDArray.ones(Shape(num_instance))
val in_args: Array[NDArray] = Array(data, weight, bias,weight1, bias1,label)
// var arg_grad_store: Array[NDArray] = Array(data_grad, weight_grad, bias_grad,label_grad)
// val in_args: Array[NDArray] = Array(data, weight, bias)
// val arg_grad_store: Array[NDArray] = Array(data_grad, weight_grad, bias_grad)
val arg_grad_store: Array[NDArray] = Array(new NDArray(0), weight_grad, bias_grad,weight_grad1, bias_grad1,new NDArray(0))
val grad_req_type: Array[Int] = Array(0,1,1,1,1,0)
// val executor = sm.bindHelper(in_args, arg_grad_store, grad_req_type)
//
// println("---------------------froward-----------------------")
//// executor.forward()
// println("---------------------output-----------------------")
//// executor.outputs.foreach {println}
//// println(executor.outputs(0))
// println("---------------------backward-----------------------")
// val outGrad = Random.uniform(-10f, 10f, Shape(15,6))
// executor.backward()
// checkCall(_LIB.mxExecutorBackward(executor.handle, Array(outGrad.handle)))
// executor.backward()
// println(data)
// println(label)
// for(i<- 0 until 10){
// println("epoch:"+i)
// executor.forward()
// executor.backward()
// println(executor.outputs(0))
// val acc: Float = output_accuracy(executor.outputs(0), label)
// Console.println("Accuracy: " + acc)
// println(arg_grad_store(2))
//// println(in_args(2))
// for (j <- 1 to 4) {
// arg_grad_store(j) *= 5*1e-3f
// in_args(j) -= arg_grad_store(j)
//
// }
// }
//// executor.forward()
//// executor.backward()
//// println(outGrad)
//// println(data_grad)
//// println(weight_grad)
//
// executor.dispose()
}
def simpleNNTest_mxnet{
val datas = Symbol.CreateVariable("data")
val fc1 = Symbol.FullyConnected()(Map("data" -> datas, "name" -> "fc1", "num_hidden" -> 10))
//val kwargs_shape = scala.collection.immutable.Map("data"->Shape(200,15))
//val (arg,out,aux) = sm.inferShape(kwargs_shape)
// println(sm.listArguments())
// ArrayBuffer(data, fc1_weight, fc1_bias, sm_label)
// arg.foreach { println}
// Shape(200, 15)
//Shape(10, 15)
//Shape(10)
//Shape(200)
val data = NDArray.ones(Shape(200,15))
val weight = NDArray.ones(Shape(12,15))//according to inferShape function
val bias = NDArray.ones(Shape(12))//according to inferShape function
// val label = NDArray.ones(Shape(200))
val data_grad = NDArray.ones(Shape(200,15))
val weight_grad = NDArray.ones(Shape(12,15))//according to inferShape function
val bias_grad = NDArray.ones(Shape(12))//according to inferShape function
val in_args: Array[NDArray] = Array(data, weight, bias)
// val in_args: Array[NDArray] = Array(data, weight, bias)
val arg_grad_store: Array[NDArray] = Array(data_grad,weight_grad, bias_grad)
val grad_req_type: Array[Int] = Array(1,1,1)
val ctxMapKeys = ArrayBuffer.empty[String]
val ctxMapDevTypes = ArrayBuffer.empty[Int]
val ctxMapDevIDs = ArrayBuffer.empty[Int]
//
//
//
//// println(bias.toString())
val execHandle = new ExecutorHandleRef
//
// println("---------------------binding-----------------------")
//// LIB.mxExecutorBind(out_graph.handle,1, 0, in_args.length, in_args.map(_.handle), arg_grad_store.map(_.handle),
//// grad_req_type, 0, new Array[NDArrayHandle](0), out)
checkCall(_LIB.mxExecutorBindX(fc1.handle,
1,//1
0,//0
ctxMapKeys.size,//0
ctxMapKeys.toArray,//null
ctxMapDevTypes.toArray,//null
ctxMapDevIDs.toArray,//null
in_args.size,
in_args.map(_.handle),
arg_grad_store.map(_.handle),
grad_req_type,
new Array[NDArrayHandle](0),
execHandle))
//
////
//
// println("---------------------executor-----------------------")
// val executor = new Executor(execHandle.value, fc1)
// println("---------------------froward-----------------------")
// executor.forward()
// println("---------------------output-----------------------")
// executor.outputs.foreach {println}
//
//
}
def mlp_test(): Unit = {
val nh1:Int = 20
val nh2:Int = 10
val input_dim = 28
val num_instance = 20
val input = Symbol.CreateVariable("input")
val fc1 = Symbol.FullyConnected()(Map("data" -> input, "name" -> "fc1", "num_hidden" -> nh1))
val relu1 = Symbol.Activation()(Map("data" -> fc1, "act_type" -> "relu"))
// relu1.listArguments().foreach {println}
val fc2 = Symbol.FullyConnected()(Map("data" -> relu1, "name" -> "fc2", "num_hidden" -> nh2))
// fc2.listArguments().foreach(println)
val output = Symbol.SoftmaxOutput()(Map("data" -> fc2, "name" -> "out"))
// output.listArguments().foreach(println)
val (arg,out,aux) = output.inferShape(scala.collection.immutable.Map("input"->Shape(num_instance, input_dim)))
//
arg.foreach { println}
println("---------------------------------------------------------")
out.foreach { println}
val arr_x = NDArray.zeros(num_instance, input_dim)//128,28
val arr_y = NDArray.zeros(num_instance)//128
for (i <- 0 until num_instance) {
for (j <- 0 until input_dim) {
arr_x(i, j) = i % 10 * 1.0f + (scala.util.Random.nextFloat - 0.5f)
}
arr_y(i) = i % 10
}
// Console.println(arr_x)
val arr_W1 = Random.normal(0f, 1f, Shape(nh1, input_dim))//
val arr_b1 = NDArray.zeros(nh1)
val arr_W2 = Random.normal(0f, 1f, Shape(nh2, nh1))
val arr_b2 = NDArray.zeros(nh2)
//
val arr_W1_g = NDArray.zeros(nh1, input_dim)
val arr_b1_g = NDArray.zeros(nh1)
val arr_W2_g = NDArray.zeros(nh2, nh1)
val arr_b2_g = NDArray.zeros(nh2)
//
val in_args: Array[NDArray] = Array(arr_x, arr_W1, arr_b1, arr_W2, arr_b2, arr_y)
val arg_grad_store: Array[NDArray] = Array(NDArray.zeros(1), arr_W1_g, arr_b1_g, arr_W2_g, arr_b2_g, NDArray.zeros(1))
val grad_req_type: Array[Int] = Array(0, 1, 1, 1, 1, 0)
// val executor = output.bind(in_args, arg_grad_store, grad_req_type)
val ctxMapKeys = ArrayBuffer.empty[String]
val ctxMapDevTypes = ArrayBuffer.empty[Int]
val ctxMapDevIDs = ArrayBuffer.empty[Int]
//
//
//
//// println(bias.toString())
val execHandle = new ExecutorHandleRef
checkCall(_LIB.mxExecutorBindX(output.handle,
1,//1
0,//0
ctxMapKeys.size,//0
ctxMapKeys.toArray,//null
ctxMapDevTypes.toArray,//null
ctxMapDevIDs.toArray,//null
in_args.size,
in_args.map(_.handle),
arg_grad_store.map(_.handle),
grad_req_type,
new Array[NDArrayHandle](0),
execHandle))
val executor = new Executor(execHandle.value, fc1)
Console.println("Training ...")
// val max_iters = 12001
// val learning_rate = 0.00001f
val max_iters = 20
val learning_rate = 0.0001f
val grad = NDArray.ones(Shape(3,5))
for (iter <- 0 until max_iters+1) {
executor.forward(true)
if (iter % 1 == 0) {
Console.println("epoch " + iter)
val acc: Float = output_accuracy(executor.outputs(0), arr_y)
Console.println("Accuracy: " + acc)
}
executor.backward(grad)
for (i <- 1 to 4) {
arg_grad_store(i) *= learning_rate
in_args(i) -= arg_grad_store(i)
}
}
}
def output_accuracy(pred: NDArray, target: NDArray): Float = {
val num_instance = pred.shape(0)
val eps = 1e-6
var right = 0
for (i <- 0 until num_instance) {
var mx_p = pred(i, 0)
var p_y: Float = 0
for(j <- 0 until 5){
if(pred(i,j) > mx_p){
mx_p = pred(i,j)
p_y = j
}
}
if(scala.math.abs(p_y - target(i)) < eps) right += 1
}
right * 1.0f / num_instance
}
def bindTest{
val shape = Shape(10, 5)
val lhs = Symbol.CreateVariable("lhs")
val rhs = Symbol.CreateVariable("rhs")
val ret = lhs + rhs
println(ret.listArguments())
// require(ret.listArguments().toArray == Array("lhs", "rhs"))
val lhsArr = Random.uniform(-10f, 10f, shape)
val rhsArr = Random.uniform(-10f, 10f, shape)
val lhsGrad = NDArray.empty(shape)
val rhsGrad = NDArray.empty(shape)
val ctxMapKeys = ArrayBuffer.empty[String]
val ctxMapDevTypes = ArrayBuffer.empty[Int]
val ctxMapDevIDs = ArrayBuffer.empty[Int]
val args = Array(lhsArr, rhsArr)
val argsGrad = Array(lhsGrad, rhsGrad)
val execHandle = new ExecutorHandleRef
checkCall(_LIB.mxExecutorBindX(ret.handle,
1,//1
0,//0
ctxMapKeys.size,//0
ctxMapKeys.toArray,//null
ctxMapDevTypes.toArray,//null
ctxMapDevIDs.toArray,//null
args.size,
args.map(_.handle),
argsGrad.map(_.handle),
Array(1,1),
new Array[NDArrayHandle](0),
execHandle))
val executor = new Executor(execHandle.value, ret)
val exec3 = ret.bind(Context.cpu(), args = Seq(lhsArr, rhsArr))
executor.forward()
exec3.forward()
val out1 = lhsArr + rhsArr
val out2 = executor.outputs(0)
// test gradient
val outGrad = NDArray.ones(shape)
executor.backward(Array(outGrad))
println(lhsGrad-outGrad)
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/nce_loss/nce.scala
|
<gh_stars>0
package thu.brainmatrix.nce_loss
import thu.brainmatrix._
class NceAccuracy extends EvalMetric("NceAccuracy") {
override def update(labels: IndexedSeq[NDArray], preds: IndexedSeq[NDArray]): Unit = {
val label = NDArray.argmaxChannel(labels(1))
val pred = NDArray.argmaxChannel(preds(0))
for ((labelElem, predElem) <- label.toArray zip pred.toArray) {
if (math.abs(labelElem - predElem)<1e-6) {
// println(s"labelElem:$labelElem,predElem:$predElem")
this.sumMetric += 1
}
}
this.numInst += pred.shape(0)
pred.dispose()
label.dispose()
}
}
|
Liuxg16/BrainMatrix
|
scala-package/core/src/main/scala/ml/dmlc/mxnet/optimizer/RMSProp.scala
|
package ml.dmlc.mxnet.optimizer
import ml.dmlc.mxnet.{NDArray, Optimizer, LRScheduler}
import ml.dmlc.mxnet.NDArrayConversions._
/**
* RMSProp optimizer as described in Tieleman & Hinton, 2012.
* http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by <NAME>, 2013.
*
* @author <NAME>, <NAME>
*
* @param learningRate Float, Step size.
* @param gamma1 Float, decay factor of moving average for gradient, gradient^^2.
* @param gamma2 Float, momentum factor of moving average for gradient.
* @param rescaleGradient Float, rescaling factor of gradient.
* @param wd Float, L2 regularization coefficient add to all the weights
* @param clipGradient Float, clip gradient in range [-clip_gradient, clip_gradient]
* @param lrScheduler The learning rate scheduler
*/
class RMSProp(val learningRate: Float = 0.002f, val rescaleGradient: Float = 1.0f,
val gamma1: Float = 0.95f, val gamma2: Float = 0.9f, val wd: Float = 0.0f,
val lrScheduler: LRScheduler = null, val clipGradient: Float = 0f) extends Optimizer {
/**
* Update the parameters.
* @param index An unique integer key used to index the parameters
* @param weight weight ndarray
* @param grad grad ndarray
* @param state NDArray or other objects returned by initState
* The auxiliary state used in optimization.
*/
override def update(index: Int, weight: NDArray, grad: NDArray, state: AnyRef): Unit = {
val lr = this.learningRate * lrScale.getOrElse(index, 1f)
val (n, g, delta) = state.asInstanceOf[(NDArray, NDArray, NDArray)]
val wd = getWd(index, this.wd)
var resdGrad = grad * this.rescaleGrad
if (clipGradient != 0f) {
val oldResdGrad = resdGrad
resdGrad = NDArray.clip(resdGrad, -clipGradient, clipGradient)
oldResdGrad.dispose()
}
val nUpdated = ((1 - this.gamma1) * (resdGrad * resdGrad) + this.gamma1 * n)
.disposeDepsExcept(resdGrad, n)
n.set(nUpdated)
nUpdated.dispose()
val gUpdated = ((1 - this.gamma1) * resdGrad + this.gamma1 * g)
.disposeDepsExcept(resdGrad, g)
g.set(gUpdated)
gUpdated.dispose()
val deltaUpdated =
(this.gamma2 * delta - lr * (resdGrad / NDArray.sqrt(n - g * g + 1e-4f) + wd * weight))
.disposeDepsExcept(delta, resdGrad, n, g, weight)
delta.set(deltaUpdated)
deltaUpdated.dispose()
weight += delta
resdGrad.dispose()
}
override def createState(index: Int, weight: NDArray): (NDArray, NDArray, NDArray) = {
(NDArray.zeros(weight.shape, weight.context), // n
NDArray.zeros(weight.shape, weight.context), // g
NDArray.zeros(weight.shape, weight.context)) // delta
}
// Dispose the state it created
override def disposeState(state: AnyRef): Unit = {
if (state != null) {
val (n, g, delta) = state.asInstanceOf[(NDArray, NDArray, NDArray)]
n.dispose()
g.dispose()
delta.dispose()
}
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/char_rnn_symbol/lstmSuite.scala
|
<reponame>Liuxg16/BrainMatrix
package thu.brainmatrix.char_rnn_symbol
import thu.brainmatrix.char_rnn_symbol.Config._
import scala.io.Source
import thu.brainmatrix.FeedForward
import thu.brainmatrix.Symbol
import thu.brainmatrix.Context
import thu.brainmatrix.Shape
import thu.brainmatrix.optimizer.SGD
import thu.brainmatrix.NDArray
import thu.brainmatrix.Context.ctx2Array
import thu.brainmatrix.char_rnn_symbol.seq_IO
class lstmSuite {
// test("mlp proccess text data"){
def testmlpprocess{
val vocab = seq_IO.build_vocabulary(INPUT_FILE_NAME, VOCAB_FILE_NAME)
val n_alphabet = vocab.size
// val lstm = Lstm.LSTM(LSTM_N_LAYER, SEQ_LENGTH, DIM_HIDDEN, DIM_EMBED, n_alphabet, DROPOUT)
val data = Symbol.CreateVariable("data")
val label = Symbol.CreateVariable("sp_label")
val fc1 = Symbol.FullyConnected()(Map("data" -> data, "name" -> "fc1", "num_hidden" -> 128))
val act1 = Symbol.Activation()(Map("data" -> fc1, "name" -> "relu1", "act_type" -> "relu"))
val fc2 = Symbol.FullyConnected()(Map("data" -> act1, "name" -> "fc2", "num_hidden" -> 64))
val act2 = Symbol.Activation()(Map("data" -> fc2, "name" -> "relu2", "act_type" -> "relu"))
val fc3 = Symbol.FullyConnected()(Map("data" -> act2, "name" -> "fc3", "num_hidden" -> 24))
val linearRO = Symbol.LinearRegressionOutput()(Map("data"->fc3,"label"->label))
// SoftmaxOutput(Map("data" -> fc3, "name" -> "sm"))
// println(linearRO.debug())
val source = Source.fromFile(INPUT_FILE_NAME)
val seq_input = source.mkString
val len_train = math.round(seq_input.length()*DATA_TRAIN_RATIO).toInt
val text_train = seq_input.take(len_train)
val text_val = seq_input.drop(len_train)
val traindata = seq_IO.Str2Char_NDArrayIterator(text = text_train,labelName = "sp_label",vocab = vocab,batch_size = BATCH_SIZE,seq_len = SEQ_LENGTH)
val modelBase = new FeedForward(linearRO, Context.cpu(), numEpoch = N_EPOCH,
optimizer = new SGD(learningRate = LEARNING_RATE, momentum = MOMENTUM, wd = WEIGHT_DECAY))
// modelBase.fit(traindata, traindata,new ReconsAccuracy())
}
// test("vocab reverse"){
def testvocab{
val vocab = seq_IO.build_vocabulary(INPUT_FILE_NAME, VOCAB_FILE_NAME)
var bacov = for((k,v)<- vocab) yield (v,k)
bacov = bacov.updated(5, '?')
assert(bacov(5)=='?')
// println(bacov)
}
// test("data&label"){
def testdataandlabel{
val vocab = seq_IO.build_vocabulary(INPUT_FILE_NAME, VOCAB_FILE_NAME)
var bacov = for((k,v)<- vocab) yield (v,k)
bacov = bacov.updated(bacov.size-1, '?')
val n_alphabet = vocab.size
val lstm = Lstm.LSTMNet(LSTM_N_LAYER, SEQ_LENGTH, DIM_HIDDEN, DIM_EMBED, n_alphabet, DROPOUT)
val source = Source.fromFile(INPUT_FILE_NAME)
val seq_input = source.mkString
val len_train = math.round(seq_input.length()*DATA_TRAIN_RATIO).toInt
val text_train = seq_input.take(len_train)
val text_val = seq_input.drop(len_train)
val traindata = seq_IO.lstmDataIter(text = text_train,labelName = "label",vocab = vocab,batch_size = BATCH_SIZE,seq_len = SEQ_LENGTH)
for(i<- 0 until 17) traindata.next()
for(j<-0 until 2){
val databatch = traindata.next()
val data = databatch.data
val label = databatch.label
val dataText = data.map(x =>bacov(x(0,0).toInt)).mkString
val labelText = label.map(x =>bacov(x(0).toInt)).mkString
// println("------------------------------")
// println(dataText)
// println("------------------------------")
// println(labelText)
}
}
// test("lstm_vec_DataIter"){
def testlstmvecdataiter{
val vocab = seq_IO.build_vocabulary(INPUT_FILE_NAME, VOCAB_FILE_NAME)
var bacov = for((k,v)<- vocab) yield (v,k)
bacov = bacov.updated(bacov.size-1, '?')
println(bacov)
val n_alphabet = vocab.size
val lstm = Lstm.LSTMNet(LSTM_N_LAYER, SEQ_LENGTH, DIM_HIDDEN, DIM_EMBED, n_alphabet, DROPOUT)
val source = Source.fromFile(INPUT_FILE_NAME)
val seq_input = source.mkString
val len_train = math.round(seq_input.length()*DATA_TRAIN_RATIO).toInt
val text_train = seq_input.take(len_train)
val text_val = seq_input.drop(len_train)
val traindata = seq_IO.lstm_vec_DataIter(text = text_train,labelName = "label",vocab = vocab,batch_size = BATCH_SIZE,seq_len = SEQ_LENGTH,vocab_len = n_alphabet)
for(i<- 0 until 19) traindata.next()
for(j<-0 until 19){
val databatch = traindata.next()
val data = databatch.data
val label = databatch.label
val dataText = data.map(x =>{
// val temp =
bacov(NDArray.argmaxChannel(x).toArray(0).toInt)
}).mkString
// println("------------------------------")
val labelText = label.map(x =>bacov(x(0).toInt)).mkString
// println(dataText)
// println("------------------------------")
// println(labelText)
}
}
// test("RNN_OneHot_DataIter"){
def testrnnonehot{
val vocab = seq_IO.build_vocabulary(INPUT_FILE_NAME, VOCAB_FILE_NAME)
var bacov = for((k,v)<- vocab) yield (v,k)
bacov = bacov.updated(bacov.size-1, '?')
println(bacov)
val n_alphabet = vocab.size
val lstm = Lstm.LSTM(LSTM_N_LAYER, SEQ_LENGTH, DIM_HIDDEN, DIM_EMBED, n_alphabet, DROPOUT)
val source = Source.fromFile(INPUT_FILE_NAME)
val seq_input = source.mkString
val len_train = math.round(seq_input.length()*DATA_TRAIN_RATIO).toInt
val text_train = seq_input.take(len_train)
val text_val = seq_input.drop(len_train)
val traindata = seq_IO.RNN_OneHot_DataIter(text = text_train,labelName = "label",vocab = vocab,batch_size = BATCH_SIZE,seq_len = SEQ_LENGTH)
// for(i<- 0 until 19) traindata.next()
// for(j<-0 until 19){
val databatch = traindata.next()
val data = databatch.data(0)
val label = databatch.label(0)
var data_text = ""
for(i<-0 until BATCH_SIZE){
val seq = NDArray.array(data.slice(i).toArray,Shape(SEQ_LENGTH,n_alphabet))
val a = NDArray.argmaxChannel(seq)
data_text += a.toArray.map(x => bacov(x.toInt)).foldRight("")(_+_)
}
// val temp =
// bacov(NDArray.argmaxChannel(x).toArray(0).toInt)
// }).mkString
// println("------------------------------------------------------------")
// val labelText = label.toArray.map(x => bacov(x.toInt)).foldRight("")(_+_)
////
// println(data_text)
// println("-------------------------------------------------------------")
// println(labelText)
// }
}
// test("2layer-lstm") {
def test2layerlstm{
val vocab = seq_IO.build_vocabulary(INPUT_FILE_NAME, VOCAB_FILE_NAME)
val n_alphabet = vocab.size
val lstm = Lstm.LSTMNet(LSTM_N_LAYER, SEQ_LENGTH, DIM_HIDDEN, DIM_EMBED, n_alphabet, DROPOUT)
val source = Source.fromFile(INPUT_FILE_NAME)
val seq_input = source.mkString
val len_train = math.round(seq_input.length()*DATA_TRAIN_RATIO).toInt
val text_train = seq_input.take(len_train)
val text_val = seq_input.drop(len_train)
val traindata = seq_IO.lstmDataIter(text = text_train,labelName = "label",vocab = vocab,batch_size = BATCH_SIZE,seq_len = SEQ_LENGTH)
val aux_input= Map("_l0_init_h"->Shape(16,64),"_l0_init_c"->Shape(16,64),"_l1_init_h"->Shape(16,64),"_l1_init_c"->Shape(16,64)) ++ traindata.provideData ++ traindata.provideLabel
//val map_infer = for((x,y)<-aux_input) yield (x,Random.uniform(0f, 0.1f, y))
// val executor = lstm.simpleBind(ctx = Context.cpu(),gradReq = "write",shapeDict = aux_input)
//
// executor.forward(true)
// val out0 = executor.outputs(0)
// val out15 = executor.outputs(29)
// val out2 = executor.outputs(SEQ_LENGTH-1)
// println(out0)
// println(out15)
// println("----------------------------------------------")
// println(out2)
//// executor.backward()
//// println("----------------------------------------------")
//// println(executor.gradArrays(0))
// println("end...")
}
// test("1 layer-lstm") {
def test1layerlstm{
val vocab = seq_IO.build_vocabulary(INPUT_FILE_NAME, VOCAB_FILE_NAME)
val n_alphabet = vocab.size
val lstm = Lstm.LSTMNet(LSTM_N_LAYER, SEQ_LENGTH, DIM_HIDDEN, DIM_EMBED, n_alphabet, DROPOUT)
// lstm.listArguments().foreach {println}
// println(lstm.debug())
val source = Source.fromFile(INPUT_FILE_NAME)
val seq_input = source.mkString
val len_train = math.round(seq_input.length()*DATA_TRAIN_RATIO).toInt
val text_train = seq_input.take(len_train)
val text_val = seq_input.drop(len_train)
val traindata = seq_IO.lstm_vec_DataIter(text = text_train,labelName = "label",vocab = vocab,batch_size = BATCH_SIZE,seq_len = SEQ_LENGTH,vocab_len = n_alphabet)
val aux_input= Map("_l0_init_h"->Shape(BATCH_SIZE,DIM_HIDDEN),"_l0_init_c"->Shape(BATCH_SIZE,DIM_HIDDEN),"_l1_init_h"->Shape(BATCH_SIZE,DIM_HIDDEN),"_l1_init_c"->Shape(BATCH_SIZE,DIM_HIDDEN)) ++ traindata.provideData ++ traindata.provideLabel
//val map_infer = for((x,y)<-aux_input) yield (x,Random.uniform(0f, 0.1f, y))
// println(aux_input)
// val executor = lstm.simpleBind(ctx = Context.cpu(),gradReq = "write",shapeDict = aux_input)
//
// executor.forward(true)
// val out0 = executor.outputs(0)
// val out15 = executor.outputs(29)
// val out2 = executor.outputs(SEQ_LENGTH-1)
// println(out0)
// println(out15)
// println("----------------------------------------------")
// println(out2)
// executor.backward()
// println("----------------------------------------------")
//// (executor.gradArrays).foreach {println}
// println("end...")
}
// test("inspect file"){
def testinspect{
val source = Source.fromFile(INPUT_FILE_NAME)
val vocab = seq_IO.build_vocabulary(INPUT_FILE_NAME, VOCAB_FILE_NAME)
val seq_input = source.mkString.map(vocab)
// println(seq_input.take(100))
}
// test("check params"){
def checkparams{
// val pretrained = NDArray.load2Map(s"./model/charLSTM.params_${N_EPOCH}")
// println(pretrained.keys)
// println(pretrained("argParams::_pred_0_weight"))
}
// test("debugTraining"){
def debugtrain{
val vocab = seq_IO.build_vocabulary(INPUT_FILE_NAME, VOCAB_FILE_NAME)
val n_alphabet = vocab.size
val lstm = Lstm.LSTM(LSTM_N_LAYER, SEQ_LENGTH, DIM_HIDDEN, DIM_EMBED, n_alphabet, DROPOUT)
// lstm.listArguments().foreach {println}
val shapeInfer = Map("_l0_init_h"->Shape(BATCH_SIZE,DIM_HIDDEN),"_l0_init_c"->Shape(BATCH_SIZE,DIM_HIDDEN),"_l1_init_h"->Shape(BATCH_SIZE,DIM_HIDDEN),
"_l1_init_c"->Shape(BATCH_SIZE,DIM_HIDDEN),"data"->Shape(BATCH_SIZE,SEQ_LENGTH,n_alphabet),"label"->Shape(BATCH_SIZE,SEQ_LENGTH))
// val (a,b,c) = lstm.inferShape(shapeInfer)
// val exe = lstm.simpleBind(Context.defaultCtx,shapeDict=shapeInfer)
// a.foreach(println)
// b.foreach {println}
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/test/java/thu/brainmatrix/suite/PrimarySuite.scala
|
<gh_stars>0
package thu.brainmatrix.suite
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import thu.brainmatrix.rnn.Utils
import thu.brainmatrix.NDArray
import thu.brainmatrix.Shape
import breeze.linalg._
import breeze.plot._
class PrimarySuite extends FunSuite with BeforeAndAfterAll{
// test("plot"){
def testplot{
val f = Figure()
val p = f.subplot(0)
val x = linspace(0.0,1.0)
p += plot(x, x :^ 2.0)
p += plot(x, x :^ 3.0, '.')
p.xlabel = "x axis"
p.ylabel = "y axis"
// f.saveas("lines.png") // save current figure as a .png, eps and pdf also supported
}
// test("plot1"){
def testplot1{
val f = Figure()
val p = f.subplot(0)
val x = linspace(0.0,1.0)
val xx = Array(2d,3d,4d,5d,6d)
val xxx = DenseVector.create(xx, 0, 1,3)
// xxx.data.foreach {println}
p += plot(xxx, xxx :^ 2.0)
// p += plot(x, x :^ 3.0, '.')
// p.xlabel = "x axis"
// p.ylabel = "y axis"
// f.saveas("lines.png") // save current figure as a .png, eps and pdf also supported
}
/**
* generate the indexs of the list
*/
test("List:indices"){
val buckets = List(2,3,4)
val a = buckets.indices
// println(a)
}
/**
* find is useless!!!
*/
test("find"){
val arr = Array(1,2,3,4,4,5)
// arr.find(x => x%2==0).foreach(println)
}
/*
* re-generate a list with the same elements but different order
*/
test("Random:shuffle"){
val plan = Array(1,2,3,4)
// println(scala.util.Random.shuffle(plan.toList))
}
test("perplexity"){
val a = NDArray.diag(Shape(2,3))
// println(a)
val b = NDArray.ones(Shape(2,3))*2
val c = Utils.perplexity(a,b)
// println(c)
}
/*
* return a iterator contains many groups
* @param size
* the number of elements per group
*/
test("grouped"){
val arr = Array(2,3,4,5,3,4,6,7)
val a = arr.grouped(5)
// a.next().foreach { print}
}
test("reduce"){
val arrs = Array(Array(1,2,3,4),Array(6,7,8),Array(6,7,8))
val ret = arrs.reduce(_++_)
// ret.foreach {println}
}
test("foldLeft"){
val arrs = Array(Array(1,2,3,4),Array(6,7,8))
val ret = arrs.foldLeft(Array[Int]())(_++_)
// ret.foreach {println}
}
test("collection:Set"){
var rn = Set[Int]()
rn = rn + 2
rn = rn + 1
rn = rn + 2
// println(rn)
}
test("val a: IndexedSeq[Int]"){
val a = 2 % 10 +: (0 until 10).map(_ => scala.util.Random.nextInt(90 -1))
// println(a.toArray.length)
}
test("sorted"){
val a = Array(2,7,3,51,7)
// a.sorted.foreach(println)
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/synapse_symbol/Dendrite.scala
|
package thu.brainmatrix.synapse_symbol
import thu.brainmatrix.NDArray
import thu.brainmatrix.Symbol
import thu.brainmatrix.Context
import thu.brainmatrix.Shape
class Dendrite(val ctx: Context = Context.defaultCtx,val name:String) extends Module{
val onenda = NDArray.ones(Config.SHAPE,ctx)
override var variable_table = Array[String]("postVm")
override var variableindices = Array(-1)
var tmp_symbol :Symbol = null
override def getSymbol() = this.tmp_symbol
//connectivity
var synapses = Vector[Synapse]();
// symbol graph
var gK = Symbol.CreateVariable(s"gK_$name")
var Vk = Symbol.CreateVariable(s"Vk_$name") // reversal potential for K channel
var Cm = Symbol.CreateVariable(s"Cm_$name")
var postVm = Symbol.CreateVariable(s"postVm_$name")
// var currentinput = Symbol.CreateVariable(s"currentinput")//no use
// parameters
var currentinput_nda = NDArray.zeros(Config.SHAPE,ctx);
var gK_nda :NDArray = onenda //
var Vk_nda :NDArray = onenda * -70f; // reversal potential for K channel
var Cm_nda :NDArray = onenda * 10; // membran capacitance
// variables
var postVm_nda = onenda * -70f;
var y_postVm_nda = onenda * -70f;
// def set(gK:NDArray, Vk:NDArray,Cm:NDArray,postVm:NDArray){
// this.gK = gK;
// this.Vk = Vk;
// this.Cm = Cm;
// this.postVm = postVm;
// }
def getSynapses(idx:Int) :Synapse = {
return synapses(idx);
}
def addSynapse(s:Synapse){
s.dendrite = this
synapses = synapses.:+(s)
}
override def getSymbolMap():Map[String,NDArray] = {
Map(s"gK_$name"->gK_nda,s"Vk_$name"->Vk_nda,s"Cm_$name"->Cm_nda,s"postVm_$name"->y_postVm_nda)
}
override def getInitial(map : Map[String,NDArray]=null): Map[String,NDArray] = {
if(map==null)
Map(s"y${this.variableindices(0)}"->this.postVm_nda)
else {
map
}
}
override def getInitialY():Array[NDArray] = {
Array(this.y_postVm_nda)
}
override def getInitialVar():Array[String] = {
Array(s"y${this.variableindices(0)}")
}
override def update(t_onehot: Symbol, y:Array[Symbol],yDot:Array[ Symbol],indices:Array[Int]):Array[Symbol] = {
this.postVm = y(indices(0));
this.tmp_symbol = this.postVm*Config.one_s
var postI = Config.one_s // some difference
var tEPSC = Config.zero_s
for(i<- 0 until this.synapses.length){
tEPSC += this.synapses(i).EPSC;
}
val d_postVm = (tEPSC+postI+this.gK*(this.postVm-this.Vk))/this.Cm*(-1);
yDot(indices(0)) = d_postVm;
yDot
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/StaticGraph.scala
|
package thu.brainmatrix
import thu.brainmatrix.Base._
import scala.collection.mutable.{ArrayBuffer,ListBuffer}
import scala.collection.mutable.LinkedHashMap
import scala.Vector
/**
* 2016-3-14
* by liuxianggen
* its function is the same as in mxnet c++ part
* brief a struct needing to be converted to mxnet c++ part
*
* note:
* 1.need to add finalize function
*
*/
class StaticGraph(){
private var disposed = false
override protected def finalize(): Unit = {
dispose()
}
/**
* Release the native memory.
* The object shall never be used after it is disposed.
*/
def dispose(): Unit = {
if (!disposed) {
_LIB.mxStaticGraphFree(handle)
disposed = true
}
}
var arg_nodes:Vector[Int] = Vector()
var heads:Vector[DataEntry] = Vector()
var nodes:Vector[Node] = Vector()
var handle: StaticGraphHandle = _
def reset{
this.arg_nodes = Vector()
this.heads = Vector()
this.nodes = Vector()
this.handle = 0
}
def debug:String = {
var s = "-----------StaticGraph debug information ----------------------------\n"
s += "arg_nodes:\n"
// s= "length:" + this.arg_nodes.length
this.arg_nodes.foreach { n => s += " " + n }
s += "\nheads:\n"
this.heads.foreach { x => s += x.Info }
s += "\nnodes:\n"
this.nodes.foreach{
x => {
val sourceid = x.inputs.map(_.source_id.toString()+" ")
s += "\nname:" + x.name+"\n\t" +"is_backward:" + x.backward_source_id+ "\n\tinputs_e_source_id" +sourceid.foldLeft(" ")(_+_)
}
}
s += "\n-----------StaticGraph debug information ----------------------------\n"
s
}
/**
* 2016-3-25
* by liuxianggen
*/
def ToStaticGraph:Int = {
// println("-----------------------StaticGraph Info--------------------------------")
/**
* MXNET:
* DataEntry:source_id, index
*
* struct Node {
/*! \brief wrapped operator property */
std::unique_ptr<OperatorProperty> op;
/*! \brief name of the node */
std::string name;
/*! \brief inputs (node_id, index) for of the nodes*/
std::vector<DataEntry> inputs;
/*!
* \brief If this field is nonnegative, this indicates this
* Node is corresponds to a Backward Operation of Operator.
* backward_source_id will points to the corresponding Forward Node.
*
* For normal node, this field is -1.
* When the node is a Backward node, the op field will be nullptr
*/
int32_t backward_source_id;
/*! \brief additional attributes about the node */
std::map<std::string, std::string> attr;
*
*/
//for arg_nodes
val arg_node_sg:Array[Int] = this.arg_nodes.toArray//
//for heads
val heads1:Vector[(Int,Int)] = heads.map { x => (x.source_id,x.index)}
val (heads_source_V:Vector[Int],heads_index_V:Vector[Int]) = heads1.unzip
val heads_source :Array[Int]= heads_source_V.toArray//
val heads_index :Array[Int]= heads_index_V.toArray//
//for nodes
val nods3:Vector[(OperatorPropertyRef,String,Vector[DataEntry])] = nodes.map{x => (x.opRef, x.name, x.inputs)}
val nods45 = nodes.map { x => (x.backward_source_id,x.attr ) }
val (nods_opRef,nods_name_V,nods_inputs):(Vector[OperatorPropertyRef],Vector[String],Vector[Vector[DataEntry]])= nods3.unzip3//
// val nods_opHandles :Array[OperatorPropertyHandle]= nods_opRef.map({_.value.handle}).toArray
val OperatorPropertyHandleref = new OperatorPropertyHandleRef
var nods_opHandles_V :Vector[OperatorPropertyHandle]= nods_opRef.map( x => {
if(x.value==null)
// x.value.handle
OperatorPropertyHandleref.value
else
x.value.handle
})
nods_opHandles_V :+= OperatorPropertyHandleref.value
val nods_opHandles = nods_opHandles_V.toArray//
val nods_name :Array[String]= nods_name_V.toArray//
// println("nods_name:")
// println(nods_name.length)
// nods_name.foreach {println}
/**
*
* for nods_inputs, actually, it's a Vector[Vector[DataEntry]]
* so complicated for convert to c++ by JNI,
* make it to two matrixes, like matrix1(source_id) and matrix2(index)
* nods_inputs(i) = matrix1(i,:),matrix1(i,:)
*
*/
// println("-------------------------------------------------------")
// println("inputs:")
val nods_inputs_len_arr :Array[Int] = nods_inputs.map { _ .length}.toArray//
// nods_inputs.foreach(x => {
// print("len:"+x.length+" ")
// x.foreach(y => print("\nindex:"+y.index + " source_id:"+y.source_id))
// println
// })
val nods_inputsM = nods_inputs.flatten
val nods_inputs_source_ids:Array[Int] = nods_inputsM.map { _.source_id}.toArray//
val nods_inputs_indexs:Array[Int] = nods_inputsM.map { _.index }.toArray//
/**
* nods_atts:Array[Map[String,String]]
*/
val (nods_backward_source_ids_V:Vector[Int],nods_attrs) = nods45.unzip//
val nods_backward_source_ids = nods_backward_source_ids_V.toArray
val nods_attr_len_arr:Array[Int] = nods_attrs.map( _.size).toArray//
val nods_attr_len_arr_len = nods_attr_len_arr.foldLeft(0)(_ + _)
val nods_attrs_keys:Array[String] = (nods_attrs.map(x => { x.keys}).flatten).toArray//
val nods_attrs_values:Array[String] = (nods_attrs.map(x => { x.values}).flatten).toArray//
/**
*
* (Array[Int],Array[Int],Array[Int],Array[OperatorPropertyHandle],Int,Array[String],
* Array[Int] ,Array[Int],Array[Int],Array[Int],Array[Int],Array[String],Array[String])
*
*/
val handleref:StaticGraphHandleRef = new StaticGraphHandleRef
val ret = _LIB.mxScalaToStaticGraph(handleref,arg_node_sg,heads_source,heads_index,nods_opHandles,nods_name.length,nods_name,nods_inputs_len_arr,nods_inputs_source_ids,nods_inputs_indexs,
nods_backward_source_ids,nods_attr_len_arr,nods_attr_len_arr_len,nods_attrs_keys,nods_attrs_values)
this.handle = handleref.value
// println("-----------------------StaticGraph Info--------------------------------")
ret
}
/**
* @author liuxianggen
* @date 20160724
* @brief check the truth variable and returns the kv:name and its shape, keys_arr: the index of arg_node order
* there is something important:the index is the order of arg_node, not the normal node
* example:
* nodes:1,2,3,4,5,6
* args_nodes:1,3,5,6
* kwargs:Map("data"->Vector(2,3)) where "data" is the node(3)'s name. however, node(3) is the 2th node in the arg_node
* so, return:
* kv = Map("data"->Vector(2,3))
* key_arr = 2
* @param
* @return
* @example
* @note
*/
def identifyVar(kwargs: Map[String, Shape]):(LinkedHashMap[String, Shape],ArrayBuffer[Int])= {
val keys_arr = ArrayBuffer.empty[Int]
val kv =scala.collection.mutable.LinkedHashMap[String,Shape]()
val varNodeName = this.arg_nodes.map{ this.nodes(_) }.map {_.name}
for(i <- 0 until varNodeName.length){
if(kwargs.contains(varNodeName(i))){
keys_arr += i
kv(varNodeName(i)) = kwargs.getOrElse(varNodeName(i),Shape())
}
}
// val v = kwargs.filter(kv => {
// varNodeName.contains(kv._1)})
// v
(kv,keys_arr)
}
/**
* @author liuxianggen
* @date 20160724
* @brief transform the kwargs to the data structure which can recognized by jni, the following comments1 works when needed
* @param
* @return
* @example
* @note
*/
def inferShape(kwargs:Map[String,Shape],inShapeData: ListBuffer[Array[Int]],outShapeData: ListBuffer[Array[Int]],auxShapeData: ListBuffer[Array[Int]],complete: Base.RefInt){
this.ToStaticGraph
val (kv,keys_arr) = this.identifyVar(kwargs)
val indPtr = ArrayBuffer(0)
var sdata = ArrayBuffer.empty[Int]
kv.foreach { case (key, shape) =>
// keys += key
sdata = sdata ++ shape.toVector
indPtr += sdata.size
}
// comments1
// println("----------------------parameter--------------------------------")
// println(indPtr.size-1)
// println(indPtr)
// println(keys_arr)
// println(sdata)
// kv.foreach(println)
// println("---------------------------------------------------------------")
_LIB.mxScalaSGInferShape(this.handle, this.arg_nodes.size, indPtr.size - 1,keys_arr.toArray, indPtr.toArray, sdata.toArray, inShapeData, outShapeData, auxShapeData, complete)
}
def printOperator{
this.nodes.foreach { x => {
if(x.opRef.value!=null){
println(x.name+" operator name:")
println(x.opRef.value.opName)
(x.opRef.value.printParam())
}
} }
}
def bind(in_args:Array[NDArray],arg_grad_store:Array[NDArray],grad_req_type:Array[Int],
auxNDArrays:Array[NDArray] = new Array[NDArray](0)):ExecutorHandleRef = {
val ctxMapKeys = ArrayBuffer.empty[String]
val ctxMapDevTypes = ArrayBuffer.empty[Int]
val ctxMapDevIDs = ArrayBuffer.empty[Int]
val execHandle = new ExecutorHandleRef
if(this.handle == 0){
System.err.println("bind error! handle == 0")
}else{
checkCall(_LIB.mxScalaExecutorBindX(this.handle,
1,//1
0,//0
ctxMapKeys.size,//0
ctxMapKeys.toArray,//null
ctxMapDevTypes.toArray,//null
ctxMapDevIDs.toArray,//null
in_args.size,
in_args.map(_.handle),
arg_grad_store.map(_.handle),
grad_req_type,
auxNDArrays.map(_.handle),
// new Array[NDArrayHandle](0),
execHandle))
}
execHandle
}
def bind(in_argsh:Array[NDArrayHandle],arg_grad_storeh:Array[NDArrayHandle],
grad_req_type:Array[Int]):Executor = {
val ctxMapKeys = ArrayBuffer.empty[String]
val ctxMapDevTypes = ArrayBuffer.empty[Int]
val ctxMapDevIDs = ArrayBuffer.empty[Int]
val execHandle = new ExecutorHandleRef
println("---------------------binding-----------------------")
if(this.handle == 0){
System.err.println("bind error! handle == 0")
}else{
// in_args.foreach{x => println(x.shape)}
// println("---------------------------------------------------------")
// arg_grad_store.foreach{x => println(x.shape)}
checkCall(_LIB.mxScalaExecutorBindX(this.handle,
1,//1
0,//0
ctxMapKeys.size,//0
ctxMapKeys.toArray,//null
ctxMapDevTypes.toArray,//null
ctxMapDevIDs.toArray,//null
in_argsh.size,
in_argsh,
arg_grad_storeh,
grad_req_type,
new Array[NDArrayHandle](0),
execHandle))
}
println("---------------------binding succeed!-----------------------")
new Executor(execHandle.value,null)
}
def bind(deviceTypeid:Int,
deviceID:Int,
numCtx: Int,
ctxMapKeys: Array[String],
ctxMapDevTypes: Array[Int],
ctxMapDevIDs: Array[Int],
numArgs: Int,
argsHandle: Array[NDArrayHandle],
argsGradHandle: Array[NDArrayHandle],
reqsArray: Array[Int],
auxArgsHandle: Array[NDArrayHandle]):ExecutorHandleRef = {
val execHandle = new ExecutorHandleRef
if(this.handle == 0){
throw new java.lang.Error("bind error! handle == 0")
}else{
// in_args.foreach{x => println(x.shape)}
// println("---------------------------------------------------------")
// arg_grad_store.foreach{x => println(x.shape)}
checkCall(_LIB.mxScalaExecutorBindX(this.handle,
deviceTypeid,//1
deviceID,//0
numCtx,//0
ctxMapKeys,//null
ctxMapDevTypes,//null
ctxMapDevIDs,//null
numArgs,
argsHandle,
argsGradHandle,
reqsArray,
auxArgsHandle,
execHandle))
}
execHandle
}
def saveToFile(fname: String){
this.ToStaticGraph
checkCall(_LIB.mxScalaSymbolSaveToFile(this.handle,fname))
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/ml/HMM.scala
|
package thu.brainmatrix.ml
import scala.util.control.Breaks
import thu.brainmatrix.NDArray
import thu.brainmatrix.Context
import thu.brainmatrix.Shape
import thu.brainmatrix.Random
import thu.brainmatrix.util.mathTool
/**
*
*
* properties
* pi:
* T: the transfer probabilities matrix (K,K)
* Obs_pi: the probabilities of the observations,(K,D)
* this model has K hidden different states and D observed states
*
*/
class HMM(val pi:NDArray,val T:NDArray,val Obs_pi:NDArray) {
def simulation(nSteps:Int):(Array[Int],Array[Int]) = {
val observations = Array.fill[Int](nSteps)(0)
val states = Array.fill[Int](nSteps)(0)
val sampleStates = mathTool.SampleByPro1D(this.pi)
val sampleObs = mathTool.SampleByPro1D(this.Obs_pi.slice(states(0)))
for(t<-1 until nSteps){
states(t) = mathTool.SampleByPro1D(this.T.slice(states(t-1)))
observations(t) = mathTool.SampleByPro1D(this.Obs_pi.slice(states(t-1)))
}
(states,observations)
}
def train(observations:Array[Int]):Array[NDArray] = {
val ctx = Context.cpu(0)
val criterion = 0.5
val obs_T = NDArray.transpose(this.Obs_pi)
var pi_est = NDArray.Normalize(NDArray.ones(this.pi.shape,ctx))
var T_est = NDArray.Normalize(NDArray.ones(this.T.shape, ctx))
// var obs_pi_est_T = NDArray.transpose(NDArray.array(Array(0.3f,0.3f,0.4f,0.2f,0.5f,0.3f,0.3f,0.3f,0.4f),this.Obs_pi.shape,ctx))
var obs_pi_est_T = NDArray.Normalize(NDArray.transpose(Random.uniform(0, 1, this.Obs_pi.shape,ctx)))
val nsamples = observations.length
val nstates = this.pi.size
val nhiddenstates = this.Obs_pi.shape(1)
var iter = 0
var done:Boolean = false
while(!done){
val alpha = NDArray.zeros(Shape(nsamples,nstates),ctx)
val alpha_theta = NDArray.zeros(Shape(nsamples,nstates),ctx) // model probability
val alpha_real = NDArray.zeros(Shape(nsamples,nstates),ctx) // estimated probability
val c = Array.fill[Float](nsamples)(0f)
// calculate alpha_0
val alpha_0 = pi_est * obs_pi_est_T.slice(observations(0))
c(0) = 1f/NDArray.sum(alpha_0).toScalar
// and normalize
(alpha_0*c(0)).copyTo(alpha.slice(0))
(this.pi * obs_T.slice(observations(0))).copyTo(alpha_theta.slice(0))
alpha_0.copyTo(alpha_real.slice(0))
// println(this.pi * obs_T.slice(observations(0)))
// println(alpha_theta.slice(0))
for(t <- 1 until nsamples){
// \alpha_{t}(i) = P(x_1\cdots,x_t,y_t=i|\theta) = \Sigma_j \{\alpha_{t-1}(j)t_{j,i}\} e_{i,x_t}
val alpha_t = NDArray.dot(alpha.slice(t-1),T_est) * obs_pi_est_T.slice(observations(t))
c(t) = 1f/NDArray.sum(alpha_t).toScalar
(alpha_t*c(t)).copyTo(alpha.slice(t))
val alpha_theta_tmp = NDArray.dot(alpha_theta.slice(t-1),this.T) * obs_T.slice(observations(t))
val max = 1f/(NDArray.max(alpha_theta_tmp).toScalar)
(alpha_theta_tmp*max).copyTo(alpha_theta.slice(t))
(NDArray.dot(alpha_real.slice(t-1),T_est) * obs_pi_est_T.slice(observations(t))*max).copyTo(alpha_real.slice(t))
// println(alpha_theta.slice(t))
// println(alpha_real.slice(t))
alpha_theta_tmp.dispose()
alpha_t.dispose()
}
// beta_t(i) = (x_{t+1},\cdots,x_T,y_{t+1}|\theta) =
val beta = NDArray.zeros(Shape(nsamples,nstates),ctx)
(NDArray.ones(Shape(1,nstates),ctx)*c(nsamples-1)).copyTo(beta.slice(nsamples-1))
// update beta backwards from end of sequence
for(t<- (1 until nsamples).reverse ){
val beta_t_minus = NDArray.dot(obs_pi_est_T.slice(observations(t))*beta.slice(t),NDArray.transpose(T_est))
(beta_t_minus*c(t-1)).copyTo(beta.slice(t-1))
beta_t_minus.dispose()
}
// \xi_t(i,j)
// val xi = NDArray.zeros(Shape(nsamples,nstates,nstates),ctx)
val xi = Array.fill[NDArray](nsamples)(NDArray.zeros(Shape(nstates,nstates),ctx))
for(t<- (0 until nsamples-1)){
// val denom = NDArray.dot(NDArray.dot(alpha.slice(t), T_est)*obs_pi_est_T.slice(observations(t+1)),NDArray.transpose(beta.slice(t+1))).toScalar
val denom = (NDArray.sum(NDArray.dot(alpha.slice(t),T_est) * obs_pi_est_T.slice(observations(t+1)) *beta.slice(t+1))).toScalar
// println(denom-denom1)
for(i <- 0 until nstates){
val numer =T_est.slice(i) * obs_pi_est_T.slice(observations(t+1)) *beta.slice(t+1) * alpha(t,i)
(numer/denom).copyTo(xi(t).slice(i))
// tmp += numer
numer.dispose()
}
// xi(t) /= NDArray.sum(tmp).toScalar
}
var gamma_arr = xi.map(xij => {
(0 until nstates).map{i =>{
// sum_gamma1(i) += NDArray.sum(xij.slice(i)).toScalar
NDArray.sum(xij.slice(i)).toScalar
}}
}).flatten
var gamma = NDArray.array(gamma_arr, Shape(nsamples,nstates), ctx)
//
val newpi = gamma.slice(0)
var gamma_t = NDArray.transpose(gamma)
val newT = xi.reduceRight(_+_)
var sum_gamma = (0 until nstates).map(i => NDArray.sum(gamma_t.slice(i)).toScalar).toArray
// println(sum_gamma)
(0 until nstates).map(i => {
newT.slice(i) /= sum_gamma(i)
})
val tmp1 = alpha.slice(nsamples-1)*beta.slice(nsamples-1)
(tmp1/NDArray.sum(tmp1).toScalar).copyTo(gamma.slice(nsamples-1))
//beta
NDArray.transpose(gamma).copyTo(gamma_t)
sum_gamma = (0 until nstates).map(i => NDArray.sum(gamma_t.slice(i)).toScalar).toArray
val sum_gamma_nda = NDArray.array(sum_gamma, Shape(1,nstates), ctx)
val newObs_pi_T = NDArray.zeros(obs_pi_est_T.shape,ctx)
observations.indices.foreach(id =>{
val obs = observations(id)
newObs_pi_T.slice(obs) += gamma.slice(id)
})
(0 until nhiddenstates).map(id=>{
newObs_pi_T.slice(id) /= sum_gamma_nda
})
// println(newpi)
// println(newT)
// println(newObs_pi_T)
// println(alpha_real.slice(nsamples-1))
// println(alpha_theta.slice(nsamples-1))
// if(NDArray.norm(pi_est-newpi).toScalar<criterion && NDArray.norm(T_est-newT).toScalar<criterion && NDArray.norm(obs_pi_est_T-newObs_pi_T).toScalar<criterion)
if(math.abs(NDArray.sum(alpha_theta.slice(nsamples-1)-alpha_real.slice(nsamples-1)).toScalar) < criterion || iter>100)
done = !done
newObs_pi_T.copyTo(obs_pi_est_T)
newpi.copyTo(pi_est)
newT.copyTo(T_est)
alpha_real.dispose()
alpha_theta.dispose()
alpha_0.dispose()
alpha.dispose()
beta.dispose()
gamma.dispose()
gamma_t.dispose()
xi.foreach(_.dispose())
iter += 1
}
Array(pi_est,T_est,obs_pi_est_T)
}
def viterbiAlgorithm(pi_est:NDArray,T_est:NDArray,obs_pi_est_T:NDArray,x:Array[Int]):Array[Int] = {
val ctx = Context.cpu(0)
val nsamples = x.length
val nstates = T_est.shape(0)
val sobservations = obs_pi_est_T.shape(0)
val delta = NDArray.zeros(Shape(nsamples,nstates), ctx)
val phi = NDArray.zeros(Shape(nsamples,nstates), ctx)
val T_est_T = NDArray.transpose(T_est)
(pi_est*T_est.slice(x(0))).copyTo(delta.slice(0))
delta.slice(0)
for(t <-0 until nsamples-1){
val nda = pi_est*obs_pi_est_T.slice(x(t))
for(i<- 0 until nstates){
delta(t+1,i) += (NDArray.max(nda * T_est_T.slice(i))*obs_pi_est_T(x(t+1),i)).toScalar
}
val boardcast_nda = NDArray.concatenate(nda,nda,nda)
(NDArray.argmaxChannel(boardcast_nda* T_est_T).reshape(Array(1,nstates))).copyTo(phi.slice(t+1))
}
val y = Array.fill[Int](nsamples)(0)
y(nsamples-1) = NDArray.argmaxChannel(delta.slice(nsamples-1)).toScalar.toInt
for(t <- (nsamples-2 to 0 by -1)){
y(t) = NDArray.argmaxChannel(delta.slice(t)*T_est_T.slice(y(t+1))).toScalar.toInt
}
y
}
}
object HMM{
def main(args:Array[String]){
// test_homework(1000)
test_homework1
}
def test{
val ctx = Context.cpu(0)
val num_states = 3 // A,B,C
val num_obs = 3
val pi = NDArray.Normalize((NDArray.array(Array(0.1f,0.4f,0.5f),Shape(1,num_states),ctx)))
val obs_pi = NDArray.array(Array(0.5f,0.3f,0.2f,0.1f,0.6f,0.3f,0.0f,0.3f,0.7f),Shape(num_states,num_obs),ctx)
val T = NDArray.array(Array(0.7f,0.2f,0.1f,0.1f,0.6f,0.3f,0.4f,0.2f,0.4f),Shape(num_states,num_states),ctx)
val hmm = new HMM(pi,T,obs_pi)
val (y,x) = hmm.simulation(1000)
x.foreach(println)
val Array(pi1,t1,obspi1) = hmm.train(x)
println(s"pi:$pi1")
println(s"T:$t1")
println(s"obspi:$obspi1")
}
def test1{
val ctx = Context.cpu(0)
val num_states = 2 // A,B,C
val num_obs = 3
val pi = NDArray.Normalize((NDArray.array(Array(0.5f,0.5f),Shape(1,num_states),ctx)))
val obs_pi = NDArray.array(Array(0.7f,0.2f,0.1f,0.1f,0.6f,0.3f),Shape(num_states,num_obs),ctx)
val T = NDArray.array(Array(0.5f,0.5f,0.2f,0.8f),Shape(num_states,num_states),ctx)
val hmm = new HMM(pi,T,obs_pi)
val (y,x) = hmm.simulation(1000)
// val x = Array(2, 0, 0, 0, 0, 0, 0, 1, 0, 0)
// x.foreach(println)
hmm.train(x)
val Array(pi1,t1,obspi1) = hmm.train(x)
println(s"pi:$pi1")
println(s"T:$t1")
println(s"obspi:$obspi1")
}
def test_homework(num:Int){
val ctx = Context.cpu(0)
val num_states = 3 // A,B,C
val num_obs = 2
val pi = NDArray.Normalize((NDArray.array(Array(0.3f,0.3f,0.4f),Shape(1,num_states),ctx)))
val obs_pi = NDArray.array(Array(0.1f,0.9f,0.5f,0.5f,0.9f,0.1f),Shape(num_states,num_obs),ctx)
val T = NDArray.array(Array(0.8f,0.2f,0f,0.1f,0.7f,0.2f,0.1f,0f,0.9f),Shape(num_states,num_states),ctx)
val hmm = new HMM(pi,T,obs_pi)
val Ts = NDArray.zeros(Shape(num,num_states,num_states), ctx)
val obs_pis = NDArray.zeros(Shape(num,num_states,num_obs), ctx)
for (i<- 0 until num){
println(s"**************step $i****************")
val (y,x) = hmm.simulation(10000)
val res = hmm.train(x)
println(s"T:${res(1)}")
println(s"obs_pis:${res(2)}")
res(1).reshape(Array(1,num_states,num_states)).copyTo(Ts.slice(i))
res(2).reshape(Array(1,num_states,num_obs)).copyTo(obs_pis.slice(i))
}
println(s"T variance:"+NDArray.norm(Ts))
println(s"obs_pis variance :"+NDArray.norm(obs_pis))
// println(s"T:$t1")
// println(s"obspi:$obspi1")
}
def test_homework1{
val ctx = Context.cpu(0)
val num_states = 3 // A,B,C
val num_obs = 2
val pi = NDArray.Normalize((NDArray.array(Array(0.3f,0.3f,0.4f),Shape(1,num_states),ctx)))
val obs_pi = NDArray.array(Array(0.1f,0.9f,0.5f,0.5f,0.9f,0.1f),Shape(num_states,num_obs),ctx)
val T = NDArray.array(Array(0.8f,0.2f,0f,0.1f,0.7f,0.2f,0.1f,0f,0.9f),Shape(num_states,num_states),ctx)
val hmm = new HMM(pi,T,obs_pi)
val (y,x) = hmm.simulation(10000)
// x.foreach(println)
val Array(pi1,t1,obspi1) = hmm.train(x)
val y_est = hmm.viterbiAlgorithm(pi,T,NDArray.transpose(obs_pi),x)
var error = 0f
y zip y_est foreach{case(yi,yie) =>{
error += math.abs(yi-yie)
}}
println(s"TASK 2 estimate Y, error:${error/y.length}")
println("TASK 3, estimate model:")
// println(s"pi:$pi1")
println(s"T:${NDArray.norm(t1-T)}")
println(s"obspi:${NDArray.norm(obs_pi-obspi1)}")
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/visualization/LeNet.scala
|
package thu.brainmatrix.visualization
import thu.brainmatrix.Symbol
/**
* @author <NAME>
*/
object LeNet {
def getSymbol(numClasses: Int = 10): Symbol = {
val data = Symbol.CreateVariable("data")
val conv1 = Symbol.Convolution()(Map("data" -> data, "name" -> "conv1",
"num_filter" -> 20, "kernel" -> (5, 5)/*, "stride" -> (2, 2)*/))
val act1 = Symbol.Activation()(Map("data" -> conv1, "name" -> "tanh1", "act_type" -> "tanh"))
val mp1 = Symbol.Pooling()(Map("data" -> act1, "name" -> "mp1",
"kernel" -> (2, 2), "stride" -> (2, 2), "pool_type" -> "max"))
//second conv
val conv2 = Symbol.Convolution()(Map("data" -> mp1, "name" -> "conv2", "num_filter" -> 50,
"kernel" -> (5, 5), "stride" -> (2, 2)))
val act2 = Symbol.Activation()(Map("data" -> conv2, "name" -> "tanh2", "act_type" -> "tanh"))
val mp2 = Symbol.Pooling()(Map("data" -> act2, "name" -> "mp2",
"kernel" -> (2, 2), "stride" -> (2, 2), "pool_type" -> "max"))
//first fullc
val fl = Symbol.Flatten()(Map("data" -> mp2, "name" -> "flatten"))
val fc1 = Symbol.FullyConnected()(Map("data" -> fl, "name" -> "fc1", "num_hidden" -> 500))
val act3 = Symbol.Activation()(Map("data" -> fc1, "name" -> "tanh3", "act_type" -> "tanh"))
//second fullc
val fc2 = Symbol.FullyConnected()(Map("data" -> act3, "name" -> "fc2", "num_hidden" -> 30))
//loss
val softmax = Symbol.SoftmaxOutput()(Map("data" -> fc2, "name" -> "sm"))
softmax
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/Monitor.scala
|
<filename>scalakernel/src/main/java/thu/brainmatrix/Monitor.scala
package thu.brainmatrix
import thu.brainmatrix.Base.NDArrayHandle
import org.slf4j.LoggerFactory
import scala.collection.mutable
/**
* Monitor outputs, weights, and gradients for debugging.
*
* @author <NAME>, <NAME>
*
* @param interval Number of batches between printing.
* @param statFunc A function that computes statistics of tensors.
* Takes a NDArray and returns a NDArray. defaults
* to mean absolute value |x|/size(x).
*/
class Monitor(protected val interval: Int, protected var statFunc: (NDArray) => NDArray = null) {
private val logger = LoggerFactory.getLogger(classOf[Monitor])
if (statFunc == null) {
statFunc = (x: NDArray) => {
NDArray.norm(x) / math.sqrt(x.size.toDouble).toFloat
}
}
private var activated: Boolean = false
private var queue = new mutable.Queue[(Int, String, NDArray)]
private var step: Int = 0
private var exes = new mutable.Queue[Executor]
val statHelper: MXMonitorCallback = new MXMonitorCallback {
override def invoke(name: String, arr: NDArrayHandle): Unit = {
// wrapper for executor callback
if (activated) {
val array = new NDArray(arr, writable = false)
val elem = (step, name, statFunc(array))
queue += elem
}
}
}
/**
* Install callback to executor.
* Supports installing to multiple exes
* @param exe the Executor (returned by symbol.bind) to install to.
*/
def install(exe: Executor): Unit = {
exe.setMonitorCallback(statHelper)
exes += exe
}
/**
* Start collecting stats for current batch.
* Call before forward
*/
def tic(): Unit = {
if (step % interval == 0) {
exes.foreach { exe =>
exe.argArrays.foreach(_.waitToRead())
}
queue = new mutable.Queue[(Int, String, NDArray)]
activated = true
}
step += 1
}
/**
* End collecting for current batch and return results.
* Call after computation of current batch.
*/
def toc(): mutable.Queue[(Int, String, String)] = {
if (activated) {
exes.foreach { exe =>
exe.argArrays.foreach(_.waitToRead())
}
exes.foreach { exe =>
(exe.symbol.listArguments() zip exe.argArrays).foreach { case (name, array) =>
val elem = (step, name, statFunc(array))
queue += elem
}
}
activated = false
val res = new mutable.Queue[(Int, String, String)]
queue.foreach { q =>
val (n, k, v) = q
if (v.shape == Shape(1)) {
res += ((n, k, v.toScalar.toString))
} else {
res += ((n, k, s"[${v.toArray.mkString(",")}]"))
}
}
queue = new mutable.Queue[(Int, String, NDArray)]
res
} else {
new mutable.Queue[(Int, String, String)]
}
}
/**
* End collecting and print results
*/
def tocPrint(): Unit = {
val res = toc()
res.foreach { case (n, k, v) =>
logger.info(s"Batch: $n $k $v")
}
}
}
trait MXMonitorCallback {
def invoke(name: String, arr: NDArrayHandle): Unit
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/synapse/Model.scala
|
package thu.brainmatrix.synapse
import thu.brainmatrix.NDArray
import thu.brainmatrix.Context
import thu.brainmatrix.Shape
class Model(val ctx:Context) {
var modules = Vector[Module]();
var indices = Vector[Array[Int]]();
var variables:Array[String] = Array[String]()
var varNumber :Int = 0;
var initialVector = Vector[NDArray]();
def addModule(module:Module){
//add modules
this.modules :+= (module);
//add initial numbers
for(i <- 0 until module.getInitial().length){
initialVector :+= (module.getInitial()(i));
}
// set indices in each module
module.setIndices(this.varNumber);
// update the number of variable number
this.varNumber += module.getVarNumber();
// add the variable indices
this.indices :+= (module.getVarIndices());
}
def update(t: NDArray,y:Array[NDArray]):Array[NDArray] = {
// TODO Auto-generated method stub
var yDot:Array[NDArray] = y.map { x => x.copy() }
for(i <- 0 until this.modules.length){
// println(s"lemonman3$i")
yDot = this.modules(i).update(t, y, yDot,this.modules(i).getVarIndices());
}
// println("lemonman3")
yDot
}
def getInitial():Array[NDArray] = {
this.initialVector.toArray
// var temp = Array.fill[NDArray](this.initialVector.length)(NDArray());
// for(i <- 0 until this.initialVector.length){
// temp(i) = this.initialVector(i);
// }
//
// return temp;
}
def printIndices(){
for(i <- 0 until this.indices.length){
for(j<- 0 until this.indices(i).length){
System.out.print(this.indices(i)(j)+" ");
}
System.out.println();
}
}
def printVarsName(){
for(i <- 0 until this.indices.length){
var module = this.modules(i);
for(j<- 0 until module.getVarsName().length){
System.out.print(module.getVarsName()(j) + " ");
}
System.out.println();
}
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/LRScheduler.scala
|
<gh_stars>0
package thu.brainmatrix
import org.slf4j.LoggerFactory
/**
* Learning rate scheduler, which adaptively changes the learning rate
* based on the training progress.
* @author <NAME>
*/
abstract class LRScheduler(var baseLR: Float = 0.01f) {
/**
* Base class of a learning rate scheduler
*
* The training progress is presented by `num_update`, which can be roughly
* viewed as the number of minibatches executed so far. Its value is
* non-decreasing, and increases at most by one.
*
* The exact value is the upper bound of the number of updates applied to
* a weight/index.
*
* @param numUpdate Int, the maximal number of updates applied to a weight.
*/
def apply(numUpdate: Int): Float
}
/**
* Class for reducing learning rate in factor
*
* Assume the weight has been updated by n times, then the learning rate will
* be base_lr * factor^^(floor(n/step))
*
* @param step Int, schedule learning rate after n updates
* @param factor Float, the factor for reducing the learning rate
*
*/
class FactorScheduler(protected var step: Int, protected var factor: Float) extends LRScheduler {
protected var count: Int = 0
private val logger = LoggerFactory.getLogger(classOf[FactorScheduler])
require(step >= 1, "Schedule step must be greater or equal than 1 round")
require(factor < 1.0, "Factor must be less than 1 to make lr reduce")
def apply(numUpdate: Int): Float = {
if (numUpdate > this.count + this.step) {
this.count += this.step
this.baseLR *= this.factor
this.logger.info(s"Update$numUpdate: Change learning rate to ${this.baseLR}")
}
this.baseLR
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/synapse_symbol/Config.scala
|
package thu.brainmatrix.synapse_symbol
import thu.brainmatrix.Shape
import thu.brainmatrix.NDArray
import thu.brainmatrix.Symbol
import thu.brainmatrix.Context
object Config {
final val NUMBER = 1000
final val SHAPE = Shape(1,NUMBER)
final val SPIKENUM = 10
final val one_s = Symbol.CreateVariable("one_s")
final val zero_s = Symbol.CreateVariable("zero_s")
final val spikes_ones_s = Symbol.CreateVariable("spikes_ones_s")
final val CTX = Context.cpu(0)
final val onenda = NDArray.ones(SHAPE, CTX)
final val zerosnda = NDArray.zeros(SHAPE, CTX)
final val spikes_ones_nda = NDArray.ones(Shape(SPIKENUM,1), CTX)
final val MAP = Map("one_s"->onenda,"zero_s"->zerosnda,"spikes_ones_s"->spikes_ones_nda)
}
|
Liuxg16/BrainMatrix
|
scala-package/core/src/test/scala/ml/dmlc/mxnet/ShapeSuite.scala
|
<filename>scala-package/core/src/test/scala/ml/dmlc/mxnet/ShapeSuite.scala
package ml.dmlc.mxnet
import org.scalatest.{BeforeAndAfterAll, FunSuite}
class ShapeSuite extends FunSuite with BeforeAndAfterAll {
test("to string") {
val s = Shape(1, 2, 3)
assert(s.toString === "(1,2,3)")
}
test("equals") {
assert(Shape(1, 2, 3) === Shape(1, 2, 3))
assert(Shape(1, 2) != Shape(1, 2, 3))
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/synapse/Axon.scala
|
package thu.brainmatrix.synapse
import thu.brainmatrix.NDArray
import thu.brainmatrix.Context
import thu.brainmatrix.Shape
class Axon(val ctx: Context = Context.defaultCtx) extends Module {
override var variable_table = Array[String]("preVm")
override var variableindices = Array(-1)
//connectivity
var synapses = Vector[Synapse]();
var input :Input = null
val onenda = NDArray.ones(Config.SHAPE,ctx)
//parameters
var gK :NDArray = onenda;
var Vk :NDArray = - onenda* 70;
var Cm :NDArray = onenda * 10; // membran capacitance
var SensorIn:NDArray = onenda * 2;
//others
var freeSensor:NDArray = onenda * 0f
// variables
var preVm: NDArray = onenda * -70f
def setValue(gK: NDArray,Vk: NDArray,Cm: NDArray,SensorIn: NDArray,preVm: NDArray){
this.gK = gK;
this.Vk = Vk;
this.Cm = Cm;
this.SensorIn = SensorIn;
this.preVm = preVm;
}
def getSynapses(idx:Int):Synapse = {
synapses(idx)
}
def addSynapse(s:Synapse){
s.axon = this;
synapses = synapses.:+(s);
}
def addSpikeInput(input:Input){
this.input = input;
}
override def getInitial():Array[NDArray] = {
Array(this.preVm)
}
/**
* indices: the variable indexs that this module needs
* vector operations
*/
override def update(t: NDArray, y:Array[NDArray],yDot:Array[ NDArray],indices:Array[Int]):Array[NDArray] = {
this.preVm = y(indices(0))
val input = this.input.getinput(t);
// val input = NDArray.zeros(Config.SHAPE, ctx)
// println(this.preVm.shape)
// println(input.context)
val d_preVm = - (input+this.gK*(this.preVm-this.Vk))/this.Cm;
// Sensor can diffuse between synapses
this.freeSensor = this.SensorIn;
for(i <- 0 until this.synapses.length){
this.freeSensor = this.freeSensor - this.synapses(i).preSensor;
}
yDot(indices(0))=d_preVm;
input.dispose()
yDot
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/test/java/thu/brainmatrix/suite/ExecutorSuite.scala
|
<reponame>Liuxg16/BrainMatrix<filename>scalakernel/src/test/java/thu/brainmatrix/suite/ExecutorSuite.scala
package thu.brainmatrix.suite
import thu.brainmatrix.Symbol
import thu.brainmatrix.Random
import thu.brainmatrix.NDArray
import thu.brainmatrix.Context
import thu.brainmatrix.Shape
import org.scalatest.{BeforeAndAfterAll, FunSuite}
class ExecutorSuite extends FunSuite with BeforeAndAfterAll {
test("bind") {
val shape = Shape(10, 3)
val lhs = Symbol.Variable("lhs")
val rhs = Symbol.Variable("rhs")
val ret1 = lhs + rhs
val ret =ret1/4+rhs
assert(ret.listArguments().toArray === Array("lhs", "rhs"))
// println(ret.debug())
val lhsArr = NDArray.ones(shape)
val rhsArr = NDArray.ones(shape)*2
val lhsGrad = NDArray.zeros(shape)
val rhsGrad = NDArray.empty(shape)
val executor = ret.easy_bind(ctx = Context.cpu(), args = Map("lhs"->lhsArr, "rhs"->rhsArr),
argsGrad = Map("lhs"->lhsGrad, "rhs"-> rhsGrad))
//
executor.forward()
//
// val out1 = lhsArr + rhsArr
val out2 = executor.outputs(0)
//
//
// // test gradient
// val outGrad = NDArray.ones(shape)
// val (lhsGrad2, rhsGrad2) = (outGrad, outGrad)
// executor.backward(Array(outGrad))
//
// println(out2)
}
}
|
Liuxg16/BrainMatrix
|
scala-package/core/src/main/scala/ml/dmlc/mxnet/optimizer/AdaDelta.scala
|
package ml.dmlc.mxnet.optimizer
import ml.dmlc.mxnet.{NDArray, Optimizer}
import ml.dmlc.mxnet.NDArrayConversions._
/**
* AdaDelta optimizer as described in <NAME>, 2012.
* http://arxiv.org/abs/1212.5701
*
* @author <NAME>, <NAME>
*
* @param rho Decay rate for both squared gradients and delta x.
* @param epsilon The constant as described in the thesis
* @param rescaleGradient rescaling factor of gradient.
* @param clipGradient clip gradient in range [-clip_gradient, clip_gradient]
* @param wd L2 regularization coefficient add to all the weights
*/
class AdaDelta(var rho: Float = 0.05f, val rescaleGradient: Float = 1.0f,
val epsilon: Float = 1e-8f, val wd: Float = 0.0f,
val clipGradient: Float = 0f) extends Optimizer {
/**
* Update the parameters.
* @param index An unique integer key used to index the parameters
* @param weight weight ndarray
* @param grad grad ndarray
* @param state NDArray or other objects returned by initState
* The auxiliary state used in optimization.
*/
override def update(index: Int, weight: NDArray, grad: NDArray, state: AnyRef): Unit = {
var resdGrad = grad * this.rescaleGrad
if (clipGradient != 0f) {
val oldResdGrad = resdGrad
resdGrad = NDArray.clip(resdGrad, -clipGradient, clipGradient)
oldResdGrad.dispose()
}
val (accG, accDelta) = state.asInstanceOf[(NDArray, NDArray)]
val newAccG = (this.rho * accG + (1.0f - this.rho) *
resdGrad * resdGrad).disposeDepsExcept(accG, resdGrad)
accG.set(newAccG)
val currentDelta = (
NDArray.sqrt(accDelta + this.epsilon) /
NDArray.sqrt(accG + this.epsilon) * resdGrad).disposeDepsExcept(accDelta, accG, resdGrad)
val newAccDelta = (this.rho * accDelta +
(1.0f - this.rho) * currentDelta * currentDelta).disposeDepsExcept(accDelta, currentDelta)
accDelta.set(newAccDelta)
weight *= (1 - this.wd)
weight -= currentDelta
newAccG.dispose()
newAccDelta.dispose()
resdGrad.dispose()
currentDelta.dispose()
}
override def createState(index: Int, weight: NDArray): (NDArray, NDArray) = {
(NDArray.zeros(weight.shape, weight.context), // accumulated g
NDArray.zeros(weight.shape, weight.context)) // accumulated delta
}
// Dispose the state it created
override def disposeState(state: AnyRef): Unit = {
if (state != null) {
val (g, delta) = state.asInstanceOf[(NDArray, NDArray)]
g.dispose()
delta.dispose()
}
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/synapse/Config.scala
|
<gh_stars>0
package thu.brainmatrix.synapse
import thu.brainmatrix.Shape
object Config {
final val NUMBER = 300
final val SHAPE = Shape(1,NUMBER)
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/synapse_symbol/Input.scala
|
<filename>scalakernel/src/main/java/thu/brainmatrix/synapse_symbol/Input.scala
package thu.brainmatrix.synapse_symbol
import thu.brainmatrix.NDArray
import thu.brainmatrix.Symbol
import thu.brainmatrix.Context
import thu.brainmatrix.Shape
/**
* starttime,endtime,dt,rate,time_last: num_inputs *1
*
*/
class Input(val name:String)(ctx:Context) {
// parameters
// variable
/***
* current:matrix(spikeNum,num_inputs)
* input0
* input1
* ...
*/
val ctx_cpu = Context.cpu(0)
val num_inputs = Config.NUMBER
var current_nda:NDArray = null
val current = Symbol.CreateVariable(s"current_$name")
def initial(rate:Int){
// this.current_nda = NDArray.zeros(Shape(Config.SPIKENUM,num_inputs), ctx_cpu)
val current_tmp = NDArray.zeros(Shape(Config.SPIKENUM,num_inputs), ctx_cpu)
var spikeingI = NDArray.ones(Config.SHAPE, ctx_cpu) * -30f
for(i<- 10 until (Config.SPIKENUM-20) by Math.round(1000/(rate)).toInt){
for(j<- 0 until 15){
// for(k<- 0 until num_inputs){
// this.current_nda(k,i+j) = -30f
// }
spikeingI.copyTo(current_tmp.slice(i+j))
}
}
this.current_nda = NDArray.transpose(current_tmp.copyTo(ctx))
current_tmp.dispose()
}
// (NUMBER,SPIKENUM) => (number)
def getinput(t_onehot:Symbol):Symbol = {
val I = Symbol.Sum("sum")(Map("data"->t_onehot * this.current,"axis"->1))
// val I = Symbol.Dot(t_onehot * this.current,Config.spikes_ones_s , 1)
Symbol.Reshape("reshape")(Map("data"->I,"target_shape" -> s"(1,${Config.NUMBER})"))
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/visualization/SynapseVis.scala
|
<reponame>Liuxg16/BrainMatrix<filename>scalakernel/src/main/java/thu/brainmatrix/visualization/SynapseVis.scala
package thu.brainmatrix.visualization
import thu.brainmatrix.synapse_symbol._
import thu.brainmatrix.Shape
import scala.util.parsing.json._
import thu.brainmatrix.Symbol
import thu.brainmatrix.Visualization
object SynapseVis {
def main(args: Array[String]): Unit = {
val leis = new ExampleVis
leis.net
val ctx = Config.CTX
val xpreinput1 = new Input("input1")(ctx);
xpreinput1.initial(3)
// create an axon
val xaxon1 = new Axon(ctx,"axon1");
xaxon1.addSpikeInput(xpreinput1);
// create a dendrite
val xdendrite1 = new Dendrite(ctx,"Dendrite1");
// create an synapse
val xsynapse1 = new Synapse(ctx,"Synapse1");
xaxon1.addSynapse(xsynapse1);
xdendrite1.addSynapse(xsynapse1);
// input with higher input rates
val xpreinput2 = new Input("input2")(ctx);
xpreinput2.initial(5)
val xaxon2 = new Axon(ctx,"axon2");
xaxon2.addSpikeInput(xpreinput2);
val xdendrite2 = new Dendrite(ctx,"Dendrite2");
val xsynapse2 = new Synapse(ctx,"Synapse2");
xaxon2.addSynapse(xsynapse2);
xdendrite2.addSynapse(xsynapse2);
// create an model
val model = new Model(ctx);
//
model.addModule(xaxon1);
model.addModule(xaxon2);
model.addModule(xsynapse1);
model.addModule(xsynapse2);
model.addModule(xdendrite1);
model.addModule(xdendrite2);
val (sym, shape) = (model.update(),Shape(1, 1, 28, 28))
val dot = Visualization.plotNetwork(symbol = sym,
title = leis.net, shape = Map("data" -> shape),
nodeAttrs = Map("shape" -> "rect", "fixedsize" -> "false"))
dot.render(engine = "dot", format = "pdf", fileName = leis.net, path = leis.outDir)
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/util/IOHelper.scala
|
package thu.brainmatrix.util
import scala.io.Source
import thu.brainmatrix._
import scala.collection.immutable.Set
object IOHelper {
def read_content(path:String):String = {
val content = Source.fromFile(path).mkString
content.replaceAll("\n"," <eos> ")
}
// Build a vocabulary of what word we have in the content
def buildVocab(path: String): Map[String, Int] = {
val content = read_content(path)
var words = content.split(" ")
var vocab = words.filter { _.length()>0 }.toSet
// words.foreach {println}
val vocabs = vocab.toArray.sorted
var idx = 1 // 0 is left for zero padding
var theVocab = Map[String, Int]()
for (word <- vocabs) {
if (!theVocab.contains(word)) {
theVocab = theVocab + (word -> idx)
idx += 1
}
}
theVocab
}
def doCheckpoint(prefix: String): EpochEndCallback = new EpochEndCallback {
override def invoke(epoch: Int, symbol: Symbol,
argParams: Map[String, NDArray],
auxStates: Map[String, NDArray]): Unit = {
Model.saveCheckpoint(prefix, epoch + 1, symbol, argParams, auxStates)
}
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/lstmbyguo/CharRNN.scala
|
package thu.brainmatrix.lstmbyguo
import java.io.File
import java.io.FileNotFoundException
import scala.collection.immutable.Set
import scala.io.Source
import thu.brainmatrix.NDArray
import thu.brainmatrix.Random
import scala.util.control.Breaks
import java.io.PrintWriter
import java.io.FileWriter
import thu.brainmatrix.Shape
/*
*@author guoshen
*@date 2016/7/21
*@ introduction:The model that product the text by char-level use the vanilla rnn.
* */
class CharRNN {
}
object CharRNN {
private val inputfilepath: String = "./seqData/inputs.txt" //数据文件所在的绝对路径
private val outputfilepath: String = "./seqData/outputs.txt"
private val matrixfilepath: String = "./seqData/matrixs.txt"
var outputfile = new File(outputfilepath)
// outputfile.deleteOnExit() //把旧文件删除了
outputfile.createNewFile()
var matrixfile = new File(matrixfilepath)
matrixfile.createNewFile()
/**
* @author guoshen
* @date 2016/7/21
* @brief
* 通过加权的方式进行概率抽样,主要思路如下:
* 假设,概率分布为pro[0.2,0.3,0.5]
* 那么计算一个概率和数组sum[0.2,0.5,1.0]
* 然后随机生成一个[0,1]之间的数rand,将rand与sum里面的数依次比较
* 选择第一个比rand大的sum,不妨设sum[i]>=rand
* 返回sum[i]的index -> i
*/
def plusproform(pro: NDArray): NDArray = {
var sum: Array[Float] = NDArray.zeros(pro.shape).toArray
var temp_sum: Float = 0
for (i <- 0 until pro.size) {
temp_sum += pro(i)
sum(i) = temp_sum
}
var rand = Math.random().toFloat
var res = -1
val loop = new Breaks
loop.breakable {
for (i <- 0 until sum.length) {
if (rand <= sum(i)) { res = i; loop.break() }
}
}
NDArray.array(Array(res), Shape(1, 1))
}
def sample(h: NDArray, seed_ix: Int, n: Int, vocab_size: Int, Wxh: NDArray, Whh: NDArray, Why: NDArray, bh: NDArray, by: NDArray): Array[Int] = {
var x = NDArray.zeros(vocab_size, 1)
x(seed_ix + 1) = 1 //x是由字符表对应产生的字符向量
// println("seed:" + seed_ix + " and x : " + x)
var ixes: Array[Int] = Array()
var temph: NDArray = h
for (t <- 0 until n) {
temph = NDArray.tanh(NDArray.dot(Wxh, x) + NDArray.dot(Whh, temph) + bh)
var y = NDArray.dot(Why, temph) + by
var expy = NDArray.exp(y)
var p = expy / NDArray.sum(expy).toScalar
var temp_p = NDArray.array(p.toArray, Shape(1, vocab_size))
var ix: NDArray = plusproform(temp_p) // NDArray.argmaxChannel(temp_p) //这里应该是利用p的概率分布来生成字符向量,但是似乎没有相应的函数,后面补充
x = NDArray.zeros(vocab_size, 1)
// println("ix : " + ix(0))
x(ix(0).toInt) = 1
ixes = ixes :+ (ix(0).toInt)
}
ixes
}
def lossfunction(inputs: Array[Int],
targets: Array[Int],
hprev: NDArray,
vocab_size: Int,
Wxh: NDArray, Whh: NDArray, Why: NDArray,
bh: NDArray, by: NDArray): (Double, NDArray, NDArray, NDArray, NDArray, NDArray, NDArray) = {
val len = inputs.length
var xs, hs, ys, ps: Array[NDArray] = new Array(len + 1)
// println("len:" + len + ",length:" + xs.length)
hs(0) = hprev
var loss: Double = 0
/* forward pass
* 这里的forward pass的输入是用文本键入的
* 而sample里面的输入是在输入起始数据之后自己生成的*/
for (t <- 1 to len) {
xs(t) = NDArray.zeros(vocab_size, 1)
xs(t)(inputs(t - 1)) = 1 //根据inputs里面第t个字符对xs(t)进行相应的字符向量初始化
hs(t) = NDArray.tanh(NDArray.dot(Wxh, xs(t)) + NDArray.dot(Whh, hs(t - 1)) + bh)
ys(t) = NDArray.dot(Why, hs(t)) + by
var expys = NDArray.exp(ys(t))
ps(t) = expys / NDArray.sum(expys).toScalar //预测字符集中每个字符是下个字符的可能性
// println(s"啊哈$t hehe:$hehe")
loss += -scala.math.log(ps(t).toArray(targets(t - 1))) //这是交叉熵
// println("for内loss:" + loss)
// println("哦吼" + t)
}
println("loss: " + loss)
/* backward pass*/
var dWxh = NDArray.zeros(Wxh.shape)
var dWhh = NDArray.zeros(Whh.shape)
var dWhy = NDArray.zeros(Why.shape)
var dbh = NDArray.zeros(bh.shape)
var dby = NDArray.zeros(by.shape)
var dhnext = NDArray.zeros(hs(1).shape)
for (t <- 0 until len) {
var time = len - t
var dy = NDArray.copy(ps(time))
dy(targets(time - 1)) -= 1 //这里将
dWhy += NDArray.dot(dy, NDArray.transpose(hs(time)))
dby += dy
var dh = NDArray.dot(NDArray.transpose(Why), dy) + dhnext
var dhraw = (NDArray.ones(hs(time).shape) - hs(time) * hs(time)) * dh
dbh += dhraw
dWxh += NDArray.dot(dhraw, NDArray.transpose(xs(time)))
dWhh += NDArray.dot(dhraw, NDArray.transpose(hs(time - 1)))
dhnext = NDArray.dot(NDArray.transpose(Whh), dhraw)
}
var parameterlist: Array[NDArray] = Array(dWxh, dWhh, dWhy, dbh, dby)
for (i <- 0 until parameterlist.length) { //这里类似正则项的效果,用于限制参数大小
parameterlist(i) = NDArray.clip(parameterlist(i), -5, 5)
}
(loss, dWxh, dWhh, dWhy, dbh, dby, hs(len))
}
def main(args: Array[String]): Unit = {
var data: String = ""
var chars: Array[Char] = Array()
var data_size, vocab_size = 0; //data_size是指输入文本的长度,vocab_size是指字符表的长度
try {
val tempdata = Source.fromFile(new File(inputfilepath)).getLines().toList //读出文件所有文本数据,并按行作为list保存
var set: Set[Char] = Set() //将data里面的字符统计为一个字符集合
for (i <- tempdata) {
set = set.++(i.toSet)
data += i + '\n'
}
chars = (set.+('\n')).toArray //小bug,在输入文本里没有换行符的时候这样做是错的
vocab_size = chars.length; data_size = data.length()
} catch {
case e: FileNotFoundException => { println("File Not Found Exception") } // TODO: handle error
}
var char_to_ix: Map[Char, Int] = Map() //输入字符,得到对应的字符编号
var ix_to_char: Map[Int, Char] = Map() //输入字符编号,得到对应的字符
for (index <- 0 until vocab_size) {
char_to_ix += (chars(index) -> index)
ix_to_char += (index -> chars(index))
}
// println(char_to_ix)
// println(ix_to_char)
val hidden_size = 1500 //隐层节点数量
val seq_length = 25 //每次训练用的样本字符长度
var learning_rate = 1e-1.toFloat //学习速率
var Wxh = Random.uniform(0.toFloat, 0.01.toFloat, Shape(hidden_size, vocab_size))
var Wxh2 = Random.uniform(0.toFloat, 0.01.toFloat, Shape(hidden_size, vocab_size))
var Whh = Random.uniform(0.toFloat, 0.01.toFloat, Shape(hidden_size, hidden_size))
var Why = Random.uniform(0.toFloat, 0.01.toFloat, Shape(vocab_size, hidden_size))
var bh = NDArray.zeros(hidden_size, 1)
var by = NDArray.zeros(vocab_size, 1)
var n: Int = 0 //n表示为迭代次数
var p: Int = 0 //p表示指针,指向输入的起始位置
var mWxh = NDArray.zeros(Wxh.shape)
var mWxh2 = NDArray.zeros(Wxh2.shape)
var mWhh = NDArray.zeros(Whh.shape)
var mWhy = NDArray.zeros(Why.shape)
var mbh = NDArray.zeros(bh.shape)
var mby = NDArray.zeros(by.shape)
var smooth_loss = -scala.math.log(1.0 / vocab_size) * seq_length
var hprev = NDArray.zeros(hidden_size, 1)
while (n <= 1000) {
if (p + seq_length + 1 >= data_size) {
p = 0; hprev = NDArray.zeros(hidden_size, 1) //这表示文本全部遍历完成,重置RNN的状态
}
var inputs: Array[Int] = Array(); var targets: Array[Int] = Array()
for (index <- p until p + seq_length) {
println(index)
println(data_size)
inputs = inputs :+ (char_to_ix.apply(data(scala.math.min(index, data_size - 1)))) //apply(key) => value
targets = targets :+ (char_to_ix.apply(data(scala.math.min(index + 1, data_size))))
}
var sample_ix: Array[Int] = Array()
if (n % 100 == 0) {
sample_ix = sample(hprev, inputs(0), 200, vocab_size, Wxh, Whh, Why, bh, by) //这个200就是每次生成长度为200的字符串,可自定义修改
var str = ""
for (ixs <- sample_ix) str += ix_to_char(ixs)
val writer = new FileWriter(outputfile, true)
writer.write("\n\n********************\n\n" + str)
writer.close()
}
var (loss, dWxh, dWhh, dWhy, dbh, dby, temp_hprev) = lossfunction(inputs, targets, hprev, vocab_size, Wxh, Whh, Why, bh, by)
hprev = temp_hprev
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if (n % 100 == 0) {
printf("迭代次数:%d,smooth_loss:%f\n", n, smooth_loss)
val writer = new FileWriter(matrixfilepath, true)
writer.write("\n\n********************\n\n" + Wxh)
writer.write("\n\n********************\n\n" + Whh)
writer.write("\n\n********************\n\n" + Why)
writer.close()
}
var zips = Array(Array(Wxh, dWxh, mWxh), Array(Whh, dWhh, mWhh), Array(Why, dWhy, mWhy), Array(bh, dbh, mbh), Array(by, dby, mby))
val little = 1e-8.toFloat
//利用Adagrad来优化学习速率
for (i <- 0 until zips.length) {
zips(i)(2) += zips(i)(1) * zips(i)(1)
zips(i)(0) += -zips(i)(1) * learning_rate / NDArray.sqrt(zips(i)(2) + NDArray.ones(zips(i)(2).shape) * little)
}
p += seq_length
n += 1
println("第" + n + "轮结束~")
}
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/rnn/RnnModel.scala
|
package thu.brainmatrix.rnn
import thu.brainmatrix.Context
import thu.brainmatrix.NDArray
import thu.brainmatrix.Shape
import thu.brainmatrix.Symbol
object RnnModel {
class LSTMInferenceModel(numLstmLayer: Int, inputSize: Int, numHidden: Int,
numEmbed: Int, numLabel: Int, argParams: Map[String, NDArray],
ctx: Context = Context.cpu(), dropout: Float = 0f) {
private val sym = Lstm.lstmInferenceSymbol(numLstmLayer,
inputSize,
numHidden,
numEmbed,
numLabel,
dropout)
private val batchSize = 1
private val initC = (for (l <- 0 until numLstmLayer)
yield (s"l${l}_init_c" -> Shape(batchSize, numHidden))).toMap
private val initH = (for (l <- 0 until numLstmLayer)
yield (s"l${l}_init_h" -> Shape(batchSize, numHidden))).toMap
private val dataShape = Map("data" -> Shape(batchSize))
private val inputShape = initC ++ initH ++ dataShape
private val executor = sym.simpleBind(ctx = ctx, shapeDict = inputShape)
for (key <- this.executor.argDict.keys) {
if (!inputShape.contains(key) && argParams.contains(key) && key != "softmax_label") {
argParams(key).copyTo(this.executor.argDict(key))
}
}
private var stateName = (Array[String]() /: (0 until numLstmLayer)) { (acc, i) =>
acc :+ s"l${i}_init_c" :+ s"l${i}_init_h"
}
private val statesDict = stateName.zip(this.executor.outputs.drop(1)).toMap
private val inputArr = NDArray.zeros(dataShape("data"))
def forward(inputData: NDArray, newSeq: Boolean = false): Array[Float] = {
if (newSeq == true) {
for (key <- this.statesDict.keys) {
this.executor.argDict(key).set(0f)
}
}
inputData.copyTo(this.executor.argDict("data"))
this.executor.forward()
for (key <- this.statesDict.keys) {
this.statesDict(key).copyTo(this.executor.argDict(key))
}
val prob = this.executor.outputs(0).toArray
prob
}
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/synapse/Input.scala
|
package thu.brainmatrix.synapse
import thu.brainmatrix.NDArray
import thu.brainmatrix.Context
import thu.brainmatrix.Shape
/**
* starttime,endtime,dt,rate,time_last: num_inputs *1
*
*/
class Input(var spikeNum:Int)(ctx:Context) {
// parameters
// variable
/***
* current:matrix(spikeNum,num_inputs)
* input0
* input1
* ...
*/
val ctx_cpu = Context.cpu(0)
val num_inputs = Config.NUMBER
// val deltaT = (endtime-starttime)/spikeNum
var current:NDArray = null
def initial(rate:Int){
this.current = NDArray.zeros(Shape(spikeNum,num_inputs), ctx_cpu)
var spikeingI = - NDArray.ones(Config.SHAPE, ctx_cpu) * 30f
// val dt = (endtime-starttime)/spikeNum
for(i<- 10 until (spikeNum-20) by Math.round(1000/(rate)).toInt){
for(j<- 0 until 15){
spikeingI.copyTo(this.current.slice(i+j))
// NDArray.setColumnSlice(this.current,, i+j)
}
}
// val arrNda = NDArray.array(arr, Shape(spikeNum,1), ctx)
// (0 until this.num_inputs).foreach(i => NDArray.setColumnSlice(this.current, arrNda, i))
}
def getinput(t:NDArray):NDArray = {
// var ttemp = NDArray.zeros(Shape(num_inputs,1), ctx_cpu)
// println("lemonman-input")
// t.waitToRead()
// t.copyTo(ttemp)
val len = t.shape(1)
// println(ttemp.shape)
// println(ttemp)
//
val tt = (0 until len).map{i =>
this.current(t(0,i).toInt,i)
}.toArray
// println("lemonman-input")
NDArray.array(tt, Config.SHAPE, ctx)
}
}
|
Liuxg16/BrainMatrix
|
scala-package/core/src/main/scala/ml/dmlc/mxnet/KVStore.scala
|
<filename>scala-package/core/src/main/scala/ml/dmlc/mxnet/KVStore.scala
package ml.dmlc.mxnet
import ml.dmlc.mxnet.Base._
import org.slf4j.{LoggerFactory, Logger}
/**
* Key value store interface of MXNet for parameter synchronization.
* @author <NAME>
*/
object KVStore {
/**
* Create a new KVStore. <br />
* <b>
* WARNING: it is your responsibility to clear this object through dispose().
* NEVER rely on the GC strategy
* </b>
*
* @param name : {'local', 'dist'}
* The type of KVStore
* - local works for multiple devices on a single machine (single process)
* - dist works for multi-machines (multiple processes)
* @return The created KVStore
*/
def create(name: String = "local"): KVStore = {
val handle = new KVStoreHandleRef
checkCall(_LIB.mxKVStoreCreate(name, handle))
new KVStore(handle.value)
}
}
// scalastyle:off finalize
class KVStore(private[mxnet] val handle: KVStoreHandle) {
private val logger: Logger = LoggerFactory.getLogger(classOf[KVStore])
private var updaterFunc: MXKVStoreUpdater = null
private var disposed = false
override protected def finalize(): Unit = {
dispose()
}
/**
* Release the native memory.
* The object shall never be used after it is disposed.
*/
def dispose(): Unit = {
if (!disposed) {
_LIB.mxKVStoreFree(handle)
disposed = true
}
}
/**
* Initialize a single or a sequence of key-value pairs into the store.
* For each key, one must init it before push and pull.
* Only worker 0's (rank == 0) data are used.
* This function returns after data have been initialized successfully
*
* @param keys The keys.
* @param values The values.
*/
def init(keys: Array[Int], values: Array[NDArray]): Unit = {
require(keys.length == values.length, "len(keys) != len(values)")
val valuePtrs = values.map(_.handle)
checkCall(_LIB.mxKVStoreInit(handle, keys.length, keys, valuePtrs))
}
def init(key: Int, value: NDArray): Unit = {
init(Array(key), Array(value))
}
/**
* Push a single or a sequence of key-value pairs into the store.
* Data consistency:
* 1. this function returns after adding an operator to the engine.
* 2. push is always called after all previous push and pull on the same key are finished
* 3. there is no synchronization between workers. One can use _barrier() to sync all workers
*
* @param keys Keys
* @param values According values
* @param priority
* The priority of the push operation.
* The higher the priority, the faster this action is likely
* to be executed before other push actions.
*/
def push(keys: Array[Int], values: Array[NDArray], priority: Int): Unit = {
require(keys.length == values.length, "len(keys) != len(values)")
val valuePtrs = values.map(_.handle)
checkCall(_LIB.mxKVStorePush(handle, keys.length, keys, valuePtrs, priority))
}
def push(keys: Array[Int], values: Array[NDArray]): Unit = push(keys, values, 0)
def push(key: Int, value: NDArray, priority: Int = 0): Unit = {
push(Array(key), Array(value), priority)
}
def push(key: Int, values: Array[NDArray], priority: Int): Unit = {
val keys = Array.fill(values.length)(key)
push(keys, values, priority)
}
def push(key: Int, values: Array[NDArray]): Unit = {
push(key, values, 0)
}
/**
* Pull a single value or a sequence of values from the store.
*
* Data consistency:
* 1. this function returns after adding an operator to the engine. But any
* further read on out will be blocked until it is finished.
* 2. pull is always called after all previous push and pull on the same key are finished
* 3. It pulls the newest value from the store.
* @param keys Keys
* @param outs According values
* @param priority
* The priority of the push operation.
* The higher the priority, the faster this action is likely
* to be executed before other push actions.
*/
def pull(keys: Array[Int], outs: Array[NDArray], priority: Int): Unit = {
require(keys.length == outs.length, "len(keys) != len(outs)")
val outPtrs = outs.map(_.handle)
checkCall(_LIB.mxKVStorePull(handle, keys.length, keys, outPtrs, priority))
}
def pull(keys: Array[Int], outs: Array[NDArray]): Unit = pull(keys, outs, 0)
def pull(key: Int, out: NDArray, priority: Int = 0): Unit = {
pull(Array(key), Array(out), priority)
}
def pull(key: Int, outs: Array[NDArray], priority: Int): Unit = {
val keys = Array.fill(outs.length)(key)
pull(keys, outs, priority)
}
def pull(key: Int, outs: Array[NDArray]): Unit = {
pull(key, outs, 0)
}
// Get the type of this kvstore
def `type`: String = {
val kvType = new RefString
checkCall(_LIB.mxKVStoreGetType(handle, kvType))
kvType.value
}
/**
* Get the number of worker nodes
* @return The number of worker nodes
*/
def numWorkers: Int = {
val size = new RefInt
checkCall(_LIB.mxKVStoreGetGroupSize(handle, size))
size.value
}
/**
* Get the rank of this worker node
* @return The rank of this node, which is in [0, get_num_workers())
*/
def rank: Int = {
val rank = new RefInt
checkCall(_LIB.mxKVStoreGetRank(handle, rank))
rank.value
}
/**
* Register an optimizer to the store
* If there are multiple machines, this process (should be a worker node)
* will pack this optimizer and send it to all servers. It returns after
* this action is done.
*
* @param optimizer the optimizer
*/
def setOptimizer(optimizer: Optimizer): Unit = {
val isWorker = new RefInt
checkCall(_LIB.mxKVStoreIsWorkerNode(isWorker))
if (`type`.contains("dist") && isWorker.value != 0) {
val optSerialized = Serializer.getSerializer.serialize(optimizer)
val cmd = Serializer.encodeBase64String(optSerialized)
logger.debug("Send optimizer to server: {}", cmd)
sendCommandToServers(0, cmd)
} else {
setUpdater(Optimizer.getUpdater(optimizer))
}
}
/**
* Set a push updater into the store.
*
* This function only changes the local store. Use setOptimizer for
* multi-machines.
*
* @param updater the updater function
*/
def setUpdater(updater: MXKVStoreUpdater): Unit = {
this.updaterFunc = updater
checkCall(_LIB.mxKVStoreSetUpdater(handle, updaterFunc))
}
/**
* Global barrier among all worker nodes
*
* For example, assume there are n machines, we want to let machine 0 first
* init the values, and then pull the inited value to all machines. Before
* pulling, we can place a barrier to guarantee that the initialization is
* finished.
*/
def barrier() {
checkCall(_LIB.mxKVStoreBarrier(handle))
}
/**
* Send a command to all server nodes
*
* Send a command to all server nodes, which will make each server node run
* KVStoreServer.controller
*
* This function returns after the command has been executed in all server nodes
*
* @param head the head of the command
* @param body the body of the command
*/
private def sendCommandToServers(head: Int, body: String): Unit = {
checkCall(_LIB.mxKVStoreSendCommmandToServers(handle, head, body))
}
}
// scalastyle:off finalize
|
Liuxg16/BrainMatrix
|
scalakernel/src/test/java/thu/brainmatrix/suite/SymbolSuite.scala
|
<reponame>Liuxg16/BrainMatrix<filename>scalakernel/src/test/java/thu/brainmatrix/suite/SymbolSuite.scala
package thu.brainmatrix.suite
import thu.brainmatrix.Base._
import thu.brainmatrix._
import thu.brainmatrix.optimizer.SGD
import scala.collection.mutable.Stack
import scala.collection.mutable.ArrayBuffer
import scala.Vector
import org.scalatest.{BeforeAndAfterAll, FunSuite}
/**
* 2016-3-22
* by liuxianggen
*/
class SymbolSuite extends FunSuite with BeforeAndAfterAll{
/**
* author: liuxianggen
* 2017-1-11
*
*/
test("LinearRegressionOutput"){
val ctx = Context.cpu(0)
val batchSize = 5
val dataShape = Shape(batchSize, 1, 4, 4)
val data = Symbol.CreateVariable("data")
val label = Symbol.CreateVariable("label")
val net = Symbol.LinearRegressionOutput()(Map("data"->data,"label"->label))
val dDataShape = Map("data" -> dataShape)
val dLabelShape = Map("label" ->dataShape)
val (dArgShapes, _, dAuxShapes) = net.inferShape(dDataShape ++ dLabelShape)
val dArgNames = net.listArguments()
val dArgDict = dArgNames.zip(
dArgShapes.map(NDArray.ones(_, ctx))).toMap
val dGradDict = (dArgNames.zip(dArgShapes)).filter { case (name, shape) =>
!dLabelShape.contains(name)
}.map(x => x._1 -> NDArray.empty(x._2, ctx) ).toMap
val gAuxNames = net.listAuxiliaryStates()
val gAuxDict = gAuxNames.zip(dAuxShapes.map(NDArray.empty(_, ctx))).toMap
dArgDict("data").set(NDArray.ones(dataShape, ctx)*4)
val executor =net.bind(ctx, dArgDict, dGradDict, "write", gAuxDict, null, null)
executor.forward()
val out2 = executor.outputs(0).slice(0)
// println(out2.reshape(out2.shape.toArray.takeRight(2)))
executor.backward()
// println(dGradDict("data").slice(0).reshape(out2.shape.toArray.takeRight(2)))
}
/**
* author: liuxianggen
* 2017-1-11
*
*/
test("BatchNorm"){
val ctx = Context.cpu(0)
val batchSize = 5
val kernel_num = 9
val ngf = 3
// val iShape = Shape(ngf * 4, 4, 4)
val oShape = Shape(ngf,8,8)
val dataShape = Shape(batchSize, kernel_num, 4, 4)
val stride = (2,2)
val targetShape = (oShape(oShape.length - 2), oShape(oShape.length - 1))
val data = Symbol.CreateVariable("data")
val net = Symbol.BatchNorm("bn")(Map("data" -> data,"fix_gamma" -> true, "eps" -> 1e-12))
val dDataShape = Map("data" -> dataShape)
val dLabelShape = Map("dloss_label" -> Shape(batchSize))
val (dArgShapes, _, dAuxShapes) = net.inferShape(dDataShape ++ dLabelShape)
val dArgNames = net.listArguments()
val dArgDict = dArgNames.zip(
dArgShapes.map(NDArray.zeros(_, ctx))).toMap
val dGradDict = (dArgNames.zip(dArgShapes)).filter { case (name, shape) =>
!dLabelShape.contains(name)
}.map(x => x._1 -> NDArray.empty(x._2, ctx) ).toMap
// println(dArgShapes)
// println(dAuxShapes)
//
// dArgNames.foreach { x => println(x) }
// net.listAuxiliaryStates().foreach(println)
val gAuxNames = net.listAuxiliaryStates()
val gAuxDict = gAuxNames.zip(dAuxShapes.map(NDArray.empty(_, ctx))).toMap
dArgDict("data").set(Random.normal(0, 1.0f, dataShape, ctx))
val executor =net.bind(ctx, dArgDict, dGradDict, "write", gAuxDict, null, null)
// executor.forward()
// val out2 = executor.outputs(0)
// println(out2.shape)
}
/**
* 2017-01-10
* Deconvolution
* author: liuxianggen
* "data" -> (batchSize,1,r,c)
* "num_filter" -> N_F
* "target_shape" -> (r1,c1)
*
* outputshape => (batchSize,num_filter,r1,c1)
* kernelShape => decon_weight
*/
test("Deconvolution"){
val ctx = Context.cpu(0)
val batchSize = 5
val ngf = 3
// val iShape = Shape(ngf * 4, 4, 4)
val oShape = Shape(ngf,8,8)
val dataShape = Shape(batchSize, 1, 4, 4)
val kernelShape = Shape(3,3)
val stride = (2,2)
val targetShape = (oShape(oShape.length - 2), oShape(oShape.length - 1))
val data = Symbol.CreateVariable("data")
val net = Symbol.Deconvolution("decon")(Map(
"data" -> data,
"kernel" -> s"$kernelShape",
"stride" -> s"$stride",
"target_shape" -> s"$targetShape",
"num_filter" -> oShape(0),
"no_bias" -> true))
val dDataShape = Map("data" -> dataShape)
val dLabelShape = Map("dloss_label" -> Shape(batchSize))
val (dArgShapes, _, dAuxShapes) = net.inferShape(dDataShape ++ dLabelShape)
val dArgNames = net.listArguments()
val dArgDict = dArgNames.zip(
dArgShapes.map(NDArray.ones(_, ctx))).toMap
val dGradDict = (dArgNames.zip(dArgShapes)).filter { case (name, shape) =>
!dLabelShape.contains(name)
}.map(x => x._1 -> NDArray.empty(x._2, ctx) ).toMap
// println(dArgShapes)
val executor = net.easy_bind(ctx = Context.cpu(0), args = dArgDict)
executor.forward()
val out2 = executor.outputs(0).slice(0)
// println(out2.reshape(out2.shape.toArray.takeRight(3)))
}
/**
* test gradient
*/
test("sig gradient"){
val rhs = Symbol.CreateVariable("rhs")
val lhs = Symbol.CreateVariable("lhs")
val dot = Symbol.FullyConnected("dot")(Map("data"->rhs,"weight"->lhs,"no_bias"->true,"num_hidden"->4))
val res = Symbol.Activation("sig")(Map("data"->dot,"act_type" -> "sigmoid"))
val lshape = Shape(4,3)
val rshape = Shape(2,3)
// res.listArguments().foreach(println)
val (a,b,c) = res.inferShape(Map("rhs"->rshape))
// a.foreach {x => println(x)}
val rhsArr = NDArray.array(Array(1,0,1,-2,1,0),rshape)
val lhsArr = NDArray.array(Array(1,2,3,4,5,6,1,2,3,4,5,6),lshape)
// println("rhsArr:"+rhsArr)
// println("lhsArr:"+lhsArr)
// println("sigmoid rhsArr:"+NDArray.sigmod(rhsArr))
// println("sigmoid lhsArr:"+NDArray.sigmod(lhsArr))
val rhsArr_g = NDArray.zeros(rshape)
val lhsArr_g = NDArray.zeros(lshape)
val executor = res.easy_bind(ctx = Context.cpu(), args = Map("lhs"->lhsArr,"rhs"->rhsArr),argsGrad = Map("lhs"->lhsArr_g,"rhs"->rhsArr_g))
executor.forward(isTrain=true)
val out2 = executor.outputs(0)
// println(out2)
val error = NDArray.array(Array(2,0,0,0,0,-1,0,0),Shape(2,4))
// val error = NDArray.ones(Shape(2,4))
// println("errro:"+error)
executor.backward(error)
val resarr = NDArray.dot(rhsArr,NDArray.transpose(lhsArr))
val temp = error*(NDArray.sigmod(resarr)*(NDArray.sigmod(resarr)*(-1)+1))
// println("sigmoid gradient:" + NDArray.dot(temp, lhsArr))
// println("-------------------------------")
// executor.gradArrays.foreach {println}
}
test("mul gradient"){
val rhs = Symbol.CreateVariable("rhs")
val lhs = Symbol.CreateVariable("lhs")
// val res = Symbol.FullyConnected("dot")(Map("data"->rhs,"weight"->lhs,"no_bias"->true,"num_hidden"->4))
val res = rhs * lhs
val lshape = Shape(2,3)
// res.listArguments().foreach(println)
// val (a,b,c) = res.inferShape(Map("rhs"->lshape))
// a.foreach {x => println(x)}
val rhsArr = NDArray.array(Array(10,0,1,-2,1,0),lshape)
val lhsArr = NDArray.array(Array(1,2,3,4,5,6),lshape)
// println("rhsArr:"+rhsArr)
// println("lhsArr:"+lhsArr)
val rhsArr_g = NDArray.zeros(lshape)
val lhsArr_g = NDArray.zeros(lshape)
val executor = res.easy_bind(ctx = Context.cpu(), args = Map("lhs"->lhsArr,"rhs"->rhsArr),argsGrad = Map("lhs"->lhsArr_g,"rhs"->rhsArr_g))
executor.forward(isTrain=true)
val out2 = executor.outputs(0)
// println(out2)
val error = NDArray.array(Array(2,0,0,0,-1,0),Shape(2,3))
// val error = NDArray.ones(Shape(2,4))
// println("errro:"+error)
executor.backward(error)
// println("-------------------------------")
// executor.gradArrays.foreach {println}
}
test("add gradient"){
val rhs = Symbol.CreateVariable("rhs")
val lhs = Symbol.CreateVariable("lhs")
// val res = Symbol.FullyConnected("dot")(Map("data"->rhs,"weight"->lhs,"no_bias"->true,"num_hidden"->4))
val res = rhs+ lhs * 2
val lshape = Shape(2,3)
// res.listArguments().foreach(println)
// val (a,b,c) = res.inferShape(Map("rhs"->lshape))
// a.foreach {x => println(x)}
val rhsArr = NDArray.array(Array(1,0,1,-2,1,0),lshape)
val lhsArr = NDArray.array(Array(1,2,3,4,5,6),lshape)
// println("rhsArr:"+rhsArr)
// println("lhsArr:"+lhsArr)
val rhsArr_g = NDArray.zeros(lshape)
val lhsArr_g = NDArray.zeros(lshape)
val executor = res.easy_bind(ctx = Context.cpu(), args = Map("lhs"->lhsArr,"rhs"->rhsArr),argsGrad = Map("lhs"->lhsArr_g,"rhs"->rhsArr_g))
executor.forward(isTrain=true)
val out2 = executor.outputs(0)
// println(out2)
val error = NDArray.diag(Shape(2,3))
// val error = NDArray.ones(Shape(2,4))
// println("errro:"+error)
executor.backward(error)
// println("-------------------------------")
// executor.gradArrays.foreach {println}
}
test("dot gradient"){
val rhs = Symbol.CreateVariable("rhs")
val lhs = Symbol.CreateVariable("lhs")
val res = Symbol.FullyConnected("dot")(Map("data"->rhs,"weight"->Symbol.transpose(lhs),"no_bias"->true,"num_hidden"->4))
// val lshape = Shape(4,3)
val lshape = Shape(3,4)
val rshape = Shape(2,3)
// res.listArguments().foreach(println)
// val (a,b,c) = res.inferShape(Map("rhs"->rshape))
// a.foreach {x => println(x)}
val rhsArr = NDArray.array(Array(1,0,1,-2,1,0),rshape)
val lhsArr = NDArray.array(Array(1,2,3,4,5,6,1,2,3,4,5,6),lshape)
// println("rhsArr:"+rhsArr)
// println("lhsArr:"+lhsArr)
val rhsArr_g = NDArray.zeros(rshape)
val lhsArr_g = NDArray.zeros(lshape)
// println("ddd")
val executor = res.easy_bind(ctx = Context.cpu(), args = Map("lhs"->lhsArr,"rhs"->rhsArr),argsGrad = Map("lhs"->lhsArr_g,"rhs"->rhsArr_g))
executor.forward(isTrain=true)
val out2 = executor.outputs(0)
// println(out2)
val error = NDArray.array(Array(2,0,0,0,0,-1,0,0),Shape(2,4))
// val error = NDArray.ones(Shape(2,4))
// println("errro:"+error)
executor.backward(error)
// println("-------------------------------")
// executor.gradArrays.foreach {println}
}
/**
* operation
*/
/**
* square
*/
test("square"){
val shape = Shape(3, 4)
val lhs = Symbol.CreateVariable("lhs")
val res = Symbol.square(lhs)
val lhsArr = NDArray.ones(shape)*2
lhsArr(1,1) = 4
val executor = res.easy_bind(ctx = Context.cpu(), args = Map("lhs"->lhsArr))
executor.forward()
val out2 = executor.outputs(0)
// println(out2)
}
/**
* by liuxianggen
* 20160825
* there are to steps:
* 1.softmax
* 2.sum{log(p{label(i)})}
* Calculate cross_entropy(lhs, one_hot(rhs))
Parameters
----------
lhs : Symbol
Left symbolic input to the function
rhs : Symbol
Right symbolic input to the function
*/
test("Softmax_cross_entropy"){
val shape = Shape(4,2)
val lhs = Symbol.CreateVariable("lhs")
val weight = Symbol.CreateVariable("weight")
val fully = Symbol.FullyConnected("f")(Map("data"->lhs,"weight"-> weight,"no_bias"-> true,"num_hidden"->4))
val rhs = Symbol.CreateVariable("rhs")
val sum = Symbol.Softmax_cross_entropy(fully,rhs)
// val sum = Symbol.sum(fully)
val weightArr = Random.normal(0f,1f,Shape(4,2))
val lhsArr = NDArray.array(Array(1f,2f,3f,1f),Shape(2,2),Context.defaultCtx)
val rhsArr = NDArray.ones(Shape(2),Context.defaultCtx)
val weightArr_g = NDArray.zeros(Shape(4,2))
val gradDict = Map("weight"->weightArr_g)
val executor = sum.easy_bind(ctx = Context.cpu(), args = Map("lhs"->lhsArr,"rhs"->rhsArr,"weight"->weightArr),argsGrad = gradDict)
println("num:"+executor.outputs.length)
var out2 = executor.outputs(0)
// println(out2)
val gradarr = NDArray.array(Array(1f),Shape(1),Context.defaultCtx)
executor.backward(executor.outputs(0))
// executor.gradArrays.foreach {println}
}
/**
* network backward
*/
test("for-back-network"){
val shape = Shape(4,2)
val lhs = Symbol.CreateVariable("lhs")
val weight = Symbol.CreateVariable("weight")
val fully = Symbol.FullyConnected("f")(Map("data"->lhs,"weight"-> weight,"no_bias"-> true,"num_hidden"->10))
val act1 = Symbol.Activation()(Map("data" -> fully, "name" -> "relu1", "act_type" -> "relu"))
val fc2 = Symbol.FullyConnected()(Map("data" -> act1, "name" -> "fc2", "num_hidden" -> 64))
// val rhs = Symbol.CreateVariable("rhs")
// val sum = Symbol.Softmax_cross_entropy(lhs,rhs)
val sum = Symbol.sum(fully)
val lhsArr = NDArray.array(Array(1f,2f,3f,10f),Shape(2,2))
val weightArr = NDArray.zeros(Shape(2,2))
// val executor = sum.easy_bind(ctx = Context.cpu(), args = Map("lhs"->lhsArr,"weight"->weightArr))
//// println(sum.staticGraph.debug)
// executor.forward(isTrain = true)
// var out2 = executor.outputs(0).copy()
//// println(out2)
// val gradarr = NDArray.array(Array(1f),Shape(1))
// executor.backward(gradarr)
// executor.gradArrays.foreach {println}
}
test("Sum") {
val shape = Shape(10, 3, 4)
val lhs = Symbol.CreateVariable("lhs")
val sum = Symbol.Sum("sum")(Map("data"->lhs))
val lhsArr = NDArray.ones(shape)
lhsArr(1,1) = 4
val executor = sum.easy_bind(ctx = Context.cpu(), args = Map("lhs"->lhsArr))
executor.forward()
val out2 = executor.outputs(0)
// println(out2.shape)
}
/*
* symbol assignment
*/
test("symbol assigment") {
val shape = Shape(3, 4)
val lhs = Symbol.CreateVariable("lhs")
var data = lhs+1
var data1 = data
data1 += lhs
val res = Symbol.Group(data,data1)
val lhsArr = NDArray.ones(shape)
lhsArr(1,1) = 4
val executor = res.easy_bind(ctx = Context.cpu(), args = Map("lhs"->lhsArr))
executor.forward()
val out2 = executor.outputs(0)
// println(out2)
// println(executor.outputs(1))
}
test("broadcast_plus"){
val lhs = Symbol.CreateVariable("lhs")
val rhs = Symbol.CreateVariable("rhs")
val ret = Symbol.broadcast_minus(lhs,rhs)
val lhsArr = NDArray.ones(Shape(4,2))*2
val rhsArr = NDArray.ones(Shape(1,2))
// val executor = ret.easy_bind(ctx = Context.cpu(), args = Map("lhs"->lhsArr,"rhs"->rhsArr))
// executor.forward()
// val out2 = executor.outputs(0)
// println(out2)
}
test("reshape:(4,3)"){
val label = Symbol.CreateVariable("label")
val inputs = Symbol.Reshape()(Map("data" -> label, "shape" -> "(-1,-1,6)"))
val shape = Shape(3, 4)
val lhsArr = NDArray.ones(shape)
// val executor = inputs.easy_bind(ctx = Context.cpu(), args = Map("label"->lhsArr))
// executor.forward()
// val out2 = executor.outputs(0)
// println(out2.shape)
}
test("reshape"){
val label = Symbol.CreateVariable("label")
val inputs = Symbol.Reshape()(Map("data" -> label, "target_shape" -> "(0,)"))
val shape = Shape(10, 4)
val lhsArr = NDArray.ones(shape)
val executor = inputs.easy_bind(ctx = Context.cpu(), args = Map("label"->lhsArr))
executor.forward()
val out2 = executor.outputs(0)
// println(out2.shape)
}
test("SliceChannel"){
val shape = Shape(10, 4, 3)
val data = Symbol.CreateVariable("data")
val inputs = Symbol.SliceChannel()(Array(data),Map("num_outputs" -> 4, "squeeze_axis" -> true))
val lhsArr = NDArray.ones(shape)
val executor = inputs.easy_bind(ctx = Context.cpu(), args = Map("data"->lhsArr))
executor.forward()
val out2 = executor.outputs(0)
// println(out2.shape)
}
test("abs") {
val shape = Shape(10, 3)
val lhs = Symbol.CreateVariable("lhs")
val lhs_abs = Symbol.abs(lhs)
val ret =lhs_abs-lhs
assert(ret.listArguments().toArray === Array("lhs"))
val lhsArr = NDArray.zeros(shape)-NDArray.ones(shape)
val executor = ret.easy_bind(ctx = Context.cpu(), args = Map("lhs"->lhsArr))
executor.forward()
val out1 = lhsArr*2
val out2 = executor.outputs(0)
// println(out2)
}
test("Activation"){
val lhs = Symbol.CreateVariable("lhs")
val s = Symbol.Activation("ss")(Map("data"->lhs,"act_type"->"tanh"))
}
test("concat") {
val shape = Shape(10, 3)
val lhs = Symbol.CreateVariable("lhs")
val concat0=Symbol.Concat("concat0")(Array(lhs))
assert(concat0.listArguments().toArray === Array("lhs"))
val lhsArr = NDArray.ones(shape)
val executor = concat0.easy_bind(ctx = Context.cpu(), args = Map("lhs"->lhsArr))
executor.forward()
val out2 = executor.outputs(0)
// println(out2)
}
/**
*
*
*/
test("ElementWiseSum"){
val lhs = Symbol.CreateVariable("lhs")
val rhs = Symbol.CreateVariable("rhs")
val ret = Symbol.ElementWiseSum("ElementWiseSum1")(Array(lhs,rhs))
val shape = Shape(10, 3)
val lhsArr = NDArray.ones(shape)
val rhsArr = NDArray.ones(shape)*2
val executor = ret.easy_bind(ctx = Context.cpu(), args = Map("lhs"->lhsArr,"rhs"->rhsArr))
executor.forward()
val out2 = executor.outputs(0)
// println(out2)
}
/**
* @author liuxianggen
* @date 20160726
* @brief here, you can test the symbol softmaxOutput operation, and know its loss output and gradient
* more information please refer to the definition of softmaxOutput
* @note
*/
test("softmax Operation"){
val data = Symbol.CreateVariable("data")
val label = Symbol.CreateVariable("label")
val batch_size = 10
val num_input = 6
val hidden = 100
val shape = Shape(batch_size, num_input)
val fully = Symbol.FullyConnected("fc1")(Map("data"->data,"num_hidden"->hidden))
val ret = Symbol.SoftmaxOutput("softmax")(Map("data" -> fully,"label"->label))
// ret.listArguments().foreach(println)
val (a,b,c) = ret.inferShape(Map("data"->shape,"label"->Shape(batch_size)))
// a.foreach {println}
val dataArr = NDArray.ones(shape)
val fc1_weight = NDArray.ones(Shape(hidden,num_input))
val fc1_bias = NDArray.ones(Shape(hidden))
val labelArr = NDArray.ones(Shape(batch_size))*3
val executor = ret.easy_bind(ctx = Context.cpu(), args = Map("data"->dataArr,"fc1_weight"->fc1_weight,"fc1_bias"->fc1_bias,"label"->labelArr))
executor.forward(isTrain=true)
val out2 = executor.outputs(0)
// println(out2)
executor.backward()
// executor.gradArrays.foreach {println}
}
/**
* @author liuxianggen
* @date 20160726
* @brief here, you can test the symbol softmaxOutput operation, and know its loss output and the input regulation
* more information please refer to the definition of softmaxOutput
* @note
*/
test("softmax Operation simple"){
val data = Symbol.CreateVariable("data")
val label = Symbol.CreateVariable("label")
val batch_size = 10
val num_input = 3
val shape = Shape(batch_size, num_input)
val ret = Symbol.SoftmaxOutput("softmax")(Map("data" -> data,"label"->label))
// ret.listArguments().foreach(println)
val (a,b,c) = ret.inferShape(Map("data"->shape))
// a.foreach {println}
val dataArr = NDArray.ones(shape)
dataArr(1,1) = 2
// println(math.exp(2)/(math.exp(1)*2+math.exp(2)))
val labelArr = NDArray.ones(Shape(batch_size))*3
val executor = ret.easy_bind(ctx = Context.cpu(), args = Map("data"->dataArr,"label"->labelArr))
executor.forward(isTrain=true)
val out2 = executor.outputs(0)
// println(out2)
// executor.backward()
// executor.gradArrays.foreach {println}
}
/**
*
*/
test("operation:*"){
val lhs = Symbol.CreateVariable("lhs")
val rhs = Symbol.CreateVariable("rhs")
val ret = lhs*rhs
val shape = Shape(10, 3)
ret.inferShape(Map("lhs"->shape,"rhs"->shape))
val lhsArr = NDArray.ones(shape)
val rhsArr = NDArray.ones(shape)*8
lhsArr(1,1) = 12
val executor = ret.easy_bind(ctx = Context.cpu(), args = Map("lhs"->lhsArr,"rhs"->rhsArr))
executor.forward()
val out2 = executor.outputs(0)
// println(out2)
}
/**
* i have no idea about the embeding operation
*/
test("embeding"){
val data = Symbol.CreateVariable("data")
val embedWeight = Symbol.CreateVariable("embed_W")
val embed = Symbol.Embedding("embed")(Map("data" -> data, "input_dim" -> 30,
"weight" -> embedWeight, "output_dim" -> 5))
val shape = Shape(3, 2)
// val (a,b,c) = embed.inferShape(Map("data"->shape))
// a.foreach {println}
// b.foreach(println)
val dataarr = NDArray.diag(Shape(2,3))
// dataarr(0,2) = 4
val embedWeightarr = NDArray.ones(Shape(30,5))
// lhsArr(1,1) = 12
val executor = embed.easy_bind(ctx = Context.cpu(), args = Map("data"->dataarr,"embed_W"->embedWeightarr))
executor.forward()
val out2 = executor.outputs(0)
// println(out2)
}
/**
* member functions
*/
test("listAuxTest"){
val data = Symbol.CreateVariable("data")
val conv1 = Symbol.Convolution()(Map("data" -> data, "name" -> "conv1",
"num_filter" -> 32, "kernel" -> (3, 3), "stride" -> (2, 2)))
conv1.listAuxiliaryStates().foreach(println)
// println("listAuxTest end ")
}
def main1(args:Array[String]):Unit = {
println("<-----------TEST Symbol Part------------>")
// createTest
// createVariableTest
// composeTest
// ToStaticGraphTest
// unzipTest
// mapTest
// foldLeftTest
// SetAttrTest
// DFSVisitTest
// stackTest
// inferShapeTest
// inferShape_plusTest_fc1
// ToStaticGraphTest_2
// inferShape_plusTest_fc12
// operatorIntegrateTest
// simpleBindTest
listAuxTest
// listArguments_
}
/**
* 2016-3-21
* test create function
* succeed!
*/
def createTest{
// def Create(op: OperatorPropertyRef): Symbol
val operatorName = "Activation"
val kwargs = Map("name" -> "relu1", "act_type" -> "relu")
val sb:Symbol = Symbol.Create(operatorName,kwargs)
sb.heads_.foreach { x => {
// println("the op of heads:")
(x.source.value.opRef.value.printParam())}
}
}
/**
* 2016-3-21
* succeed!
*/
def createVariableTest{
val name = "input"
val sb = Symbol.CreateVariable(name)
sb.heads_.foreach { x => {
println("the name of heads:")
println(x.source.value.name)}
}
}
/**
* 2016-3-20
* succeed!
*/
def composeTest{
// def Compose(kwargs: Map[String, Symbol], name: String) {
val dataS = Symbol.CreateVariable("data")
val weightS = Symbol.CreateVariable("weight")
val biasS = Symbol.CreateVariable("bias")
val sb:Symbol = Symbol.Create("FullyConnected")
val kwargs:Map[String,Symbol] = Map("data"->dataS,"weight"->weightS,"bias"->biasS)
sb.Compose(kwargs, "FullyConnectedS")
sb.heads_(0).source.value.inputs.foreach { x => println(x.Info) }
// println(sb.is_atomic())//true
}
/**
* 2016-3-23
*/
def ToStaticGraphTest{
// def ToStaticGraph(out_graph: StaticGraph) {
val sgref = new StaticGraphHandleRef
val sg:StaticGraph = new StaticGraph()
val dataS = Symbol.CreateVariable("data")
val weightS = Symbol.CreateVariable("weight")
val biasS = Symbol.CreateVariable("bias")
val sb:Symbol = Symbol.Create("FullyConnected")
// val kwargs:Map[String,Symbol] = Map("data"->dataS,"weight"->weightS,"bias"->biasS)
val kwargs:Map[String,Symbol] = Map("data"->dataS)
sb.Compose(kwargs, "FullyConnectedS")
val weightS1 = Symbol.CreateVariable("weight1")
val biasS1 = Symbol.CreateVariable("bias1")
val sb1:Symbol = Symbol.Create("FullyConnected")
val kwargs1:Map[String,Symbol] = Map("data"->sb)
sb1.Compose(kwargs1, "FullyConnectedS1")
// sb1.ToStaticGraph(sg)
println(sg.debug)
}
def ToStaticGraphTest_2{
val dataS = Symbol.CreateVariable("data")
val kwargs_type = Map("name" -> "fc2", "num_hidden" -> "10")
val sb:Symbol = Symbol.Create("FullyConnected",kwargs_type)
val kwargs_symbol:Map[String,Symbol] = Map("data"->dataS)
sb.Compose(kwargs_symbol, "FullyConnectedS")
// var out_graph= new StaticGraph()
// sb.ToStaticGraph(out_graph)
// println(out_graph.debug)
println("\n---------------------------------------------------")
}
/**
* 2016-3-23
* test dfs
*/
def DFSVisitTest{
val dataS = Symbol.CreateVariable("data")
val weightS = Symbol.CreateVariable("weight")
val biasS = Symbol.CreateVariable("bias")
val sb:Symbol = Symbol.Create("FullyConnected")
val kwargs:Map[String,Symbol] = Map("data"->dataS,"weight"->weightS,"bias"->biasS)
sb.Compose(kwargs, "FullyConnectedS")
sb.DFSVisit { noderef => {
println("node:")
println(noderef.value.name)
}
}
}
/**
* 2016-3-23
*/
def stackTest{
var stack: Stack[(String, Int)] = Stack()
stack.push(("a",1),("b",2),("c",3))
stack.update(0, ("c",0))
while (!stack.isEmpty) {
println(stack.pop())
}
}
/**
* 2016-3-25
*/
def unzipTest{
val ve = Vector((1,"a"),(3,"v"))
val (a,b) = ve.unzip
a.foreach(print)
}
def mapTest{
val m:scala.collection.mutable.Map[String,Int] =scala.collection.mutable.Map()
m += ("a"->1,"b"->2)
val (ms,mi) = m.unzip
println(ms)
println(m)
}
def foldLeftTest{
val arr = Array(1,2,3,4,5,5)
println(arr.foldLeft(0)(_ + _))
}
/**
* 2016-3-25
* inferShape function will call ToStaticGraph(g),so it needs to convert StaticGraph
* from java to C++ first
*/
def inferShapeTest{
val dataS = Symbol.CreateVariable("data")
val weightS = Symbol.CreateVariable("weight")
val biasS = Symbol.CreateVariable("bias")
val kwargs_type = Map("name" -> "fc2", "num_hidden" -> "10")
val sb:Symbol = Symbol.Create("FullyConnected",kwargs_type)
val kwargs_symbol:Map[String,Symbol] = Map("data"->dataS)
sb.Compose(kwargs_symbol, "FullyConnectedS")
val kwargs_shape = Map("data"->Shape(200,15))
val keys = ArrayBuffer.empty[String]
val indPtr = ArrayBuffer(0)
val sdata = ArrayBuffer.empty[Int]
kwargs_shape.foreach { case (key, shape) =>
keys += key
sdata ++= shape.toVector
indPtr += sdata.size
}
println("keys:")
keys.foreach {println}
println("\nsdata:")
sdata.foreach(println)
println("\nindPtr:"+indPtr)
println("\n---------------------------------------------------")
// val (argShapes, _, auxShapes) = sb.inferShape(keys.toArray, indPtr.toArray, sdata.toArray)
}
/**
* 2016-3-25
* inferShape function will call ToStaticGraph(g),so it needs to convert StaticGraph
* from java to C++ first
*/
def inferShape_plusTest_fc12{
val dataS = Symbol.CreateVariable("data")
val kwargs_type = Map("name" -> "fc1", "num_hidden" -> "12")
val sb:Symbol = Symbol.Create("FullyConnected",kwargs_type)
val kwargs_symbol:Map[String,Symbol] = Map("data"->dataS)
sb.Compose(kwargs_symbol, "fc1")
val kwargs_type1 = Map("name" -> "fc2", "num_hidden" -> "10")
val sb1:Symbol = Symbol.Create("FullyConnected",kwargs_type1)
val kwargs_symbol1:Map[String,Symbol] = Map("data"->sb)
sb1.Compose(kwargs_symbol1, "fc2")
sb1.ToStaticGraph()
println(sb1.staticGraph.debug)
println("\n---------------------------------------------------")
val kwargs_shape = Map("data"->Shape(200,15))
//
// val (argShapes, _, auxShapes) = sb1.inferShape1(sb1.staticGraph,kwargs_shape)
// argShapes.foreach {println}
}
/**
* 2016-3-25
* inferShape function will call ToStaticGraph(g),so it needs to convert StaticGraph
* from java to C++ first
*/
def inferShape_plusTest_fc1{
val dataS = Symbol.CreateVariable("data")
val kwargs_type = Map("name" -> "fc1", "num_hidden" -> "12")
val sb:Symbol = Symbol.Create("FullyConnected",kwargs_type)
val kwargs_symbol:Map[String,Symbol] = Map("data"->dataS)
sb.Compose(kwargs_symbol, "fc1")
// var out_graph= new StaticGraph()
sb.ToStaticGraph()
println(sb.staticGraph.debug)
println("\n---------------------------------------------------")
val kwargs_shape = Map("data"->Shape(200,15))
// val (argShapes, outShapes, auxShapes) = sb.inferShape1(sb.staticGraph,kwargs_shape)
// argShapes.foreach {println}
// outShapes.foreach {println}
}
/**
* 2016-3-24
* by liuxianggen
* not sure
*/
def SetAttrTest(){
val dataS = Symbol.CreateVariable("data")
val weightS = Symbol.CreateVariable("weight")
val biasS = Symbol.CreateVariable("bias")
val sb:Symbol = Symbol.Create("FullyConnected")
val kwargs:Map[String,Symbol] = Map("data"->dataS,"weight"->weightS,"bias"->biasS)
sb.Compose(kwargs, "FullyConnectedS")
sb.SetAttr("name", "FullyConnected")
sb.SetAttr("hidden", "10")
}
/**
*
*by liuxianggen
* 2016-4-5
* succeed!!
*/
def operatorIntegrateTest{
val num_instance = 15
val input_dim = 10
val hidden_1 =5
val hidden_2 =5
val dataS = Symbol.CreateVariable("data")
val fc1 = Symbol.FullyConnected()(Map("name" -> "fc1", "num_hidden" -> hidden_1 ,"data"->dataS))
val fc2 = Symbol.FullyConnected()(Map("name" -> "fc2", "num_hidden" -> hidden_2 ,"data"->fc1))
val sm = Symbol.SoftmaxOutput()(Map("name" -> "sm","grad_scale"->"1","data"->fc2))
val data = NDArray.rangeRows(0, num_instance, input_dim)//num_instance,10
val label = NDArray.range(0,5,3)
println(data)
println(label)
val weight = NDArray.ones(Shape(5,10))//according to inferShape function
val bias = NDArray.ones(Shape(5))//according to inferShape function
val weight1 = NDArray.ones(Shape(5,5))//according to inferShape function
val bias1 = NDArray.ones(Shape(5))//according to inferShape function
var data_grad = NDArray.ones(Shape(num_instance,10))
var weight_grad = NDArray.ones(Shape(5,10))//according to inferShape function
var bias_grad = NDArray.ones(Shape(5))//according to inferShape function
var weight_grad1 = NDArray.ones(Shape(5,5))//according to inferShape function
var bias_grad1 = NDArray.ones(Shape(5))//according to inferShape function
var label_grad = NDArray.ones(Shape(num_instance))
val in_args: Array[NDArray] = Array(data, weight, bias,weight1, bias1,label)
// var arg_grad_store: Array[NDArray] = Array(data_grad, weight_grad, bias_grad,label_grad)
// val in_args: Array[NDArray] = Array(data, weight, bias)
// val arg_grad_store: Array[NDArray] = Array(data_grad, weight_grad, bias_grad)
val arg_grad_store: Array[NDArray] = Array(new NDArray(0), weight_grad, bias_grad,weight_grad1, bias_grad1,new NDArray(0))
val grad_req_type: Array[Int] = Array(0,1,1,1,1,0)
// var out_graph= new StaticGraph()
// sm.ToStaticGraph(out_graph)
// println(out_graph.debug)
// out_graph.ToStaticGraph
// val executor = out_graph.bind(in_args, arg_grad_store, grad_req_type)
// val executor = sm.bindHelper(in_args, arg_grad_store,grad_req_type)
// println("---------------------froward-----------------------")
//// executor.forward()
// println("---------------------output-----------------------")
//// executor.outputs.foreach {println}
//// println(executor.outputs(0))
// println("---------------------backward-----------------------")
// val outGrad = Random.uniform(-10f, 10f, Shape(15,6))
// executor.backward()
// checkCall(_LIB.mxExecutorBackward(executor.handle, Array(outGrad.handle)))
// executor.backward()
// println(data)
// println(label)
//
// for(i<- 0 until 10){
// println("epoch:"+i)
// executor.forward()
// executor.backward()
// println(executor.outputs(0))
// val acc: Float = mathTool.output_accuracy(executor.outputs(0), label)
// Console.println("Accuracy: " + acc)
// println(arg_grad_store(2))
//// println(in_args(2))
// for (j <- 1 to 4) {
// arg_grad_store(j) *= 5*1e-3f
// in_args(j) -= arg_grad_store(j)
//
// }
// }
//// executor.forward()
//// executor.backward()
//// println(outGrad)
//// println(data_grad)
//// println(weight_grad)
//
// executor.dispose()
}
def simpleBindTest{
import thu.brainmatrix.Context
val batchSize = 100
val data = Symbol.CreateVariable("data")
val conv1 = Symbol.Convolution()(Map("data" -> data, "name" -> "conv1",
"num_filter" -> 32, "kernel" -> (3, 3), "stride" -> (2, 2)))
val bn1 = Symbol.BatchNorm()(Map("data" -> conv1, "name" -> "bn1"))
val act1 = Symbol.Activation()(Map("data" -> bn1, "name" -> "relu1", "act_type" -> "relu"))
val mp1 = Symbol.Pooling()(Map("data" -> act1, "name" -> "mp1",
"kernel" -> (2, 2), "stride" -> (2, 2), "pool_type" -> "max"))
val conv2 = Symbol.Convolution()(Map("data" -> mp1, "name" -> "conv2", "num_filter" -> 32,
"kernel" -> (3, 3), "stride" -> (2, 2)))
val bn2 = Symbol.BatchNorm()(Map("data" -> conv2, "name" -> "bn2"))
val act2 = Symbol.Activation()(Map("data" -> bn2, "name" -> "relu2", "act_type" -> "relu"))
val mp2 = Symbol.Pooling()(Map("data" -> act2, "name" -> "mp2",
"kernel" -> (2, 2), "stride" -> (2, 2), "pool_type" -> "max"))
val fl = Symbol.Flatten()(Map("data" -> mp2, "name" -> "flatten"))
val fc2 = Symbol.FullyConnected()(Map("data" -> fl, "name" -> "fc2", "num_hidden" -> 10))
val softmax = Symbol.SoftmaxOutput()(Map("data" -> fc2, "name" -> "sm"))
softmax.listArguments().foreach(println)
val dataShapes = Map("data" -> Shape(100,1,28, 28))
val dataShapes_ =collection.immutable.Map(dataShapes.toList: _*)
softmax.simpleBind(Context.cpu(), "write", shapeDict = dataShapes_)
}
def listArguments_{
val data = Symbol.CreateVariable("data")
val conv1 = Symbol.Convolution()(Map("data" -> data, "name" -> "conv1",
"num_filter" -> 32, "kernel" -> (3, 3), "stride" -> (2, 2)))
val bn1 = Symbol.BatchNorm()(Map("data" -> conv1, "name" -> "bn1"))
val act1 = Symbol.Activation()(Map("data" -> bn1, "name" -> "relu1", "act_type" -> "relu"))
val mp1 = Symbol.Pooling()(Map("data" -> act1, "name" -> "mp1",
"kernel" -> (2, 2), "stride" -> (2, 2), "pool_type" -> "max"))
val conv2 = Symbol.Convolution()(Map("data" -> mp1, "name" -> "conv2", "num_filter" -> 32,
"kernel" -> (3, 3), "stride" -> (2, 2)))
val bn2 = Symbol.BatchNorm()(Map("data" -> conv2, "name" -> "bn2"))
val act2 = Symbol.Activation()(Map("data" -> bn2, "name" -> "relu2", "act_type" -> "relu"))
val mp2 = Symbol.Pooling()(Map("data" -> act2, "name" -> "mp2",
"kernel" -> (2, 2), "stride" -> (2, 2), "pool_type" -> "max"))
val fl = Symbol.Flatten()(Map("data" -> mp2, "name" -> "flatten"))
val fc2 = Symbol.FullyConnected()(Map("data" -> fl, "name" -> "fc2", "num_hidden" -> 10))
val softmax = Symbol.SoftmaxOutput()(Map("data" -> fc2, "name" -> "sm"))
softmax.listArguments().foreach(println)
}
def listAuxTest{
val data = Symbol.CreateVariable("data")
val conv1 = Symbol.Convolution()(Map("data" -> data, "name" -> "conv1",
"num_filter" -> 32, "kernel" -> (3, 3), "stride" -> (2, 2)))
conv1.listAuxiliaryStates().foreach(println)
}
}
|
Liuxg16/BrainMatrix
|
scala-package/core/src/test/scala/ml/dmlc/mxnet/AttrScopeSuite.scala
|
<gh_stars>100-1000
package ml.dmlc.mxnet
import org.scalatest.{BeforeAndAfterAll, FunSuite}
class AttrScopeSuite extends FunSuite with BeforeAndAfterAll {
test("attr basic") {
val (data, gdata) =
AttrScope(Map("group" -> "4", "data" -> "great")).withScope {
val data = Symbol.Variable("data", attr = Map("dtype" -> "data", "group" -> "1"))
val gdata = Symbol.Variable("data2")
(data, gdata)
}
assert(gdata.attr("group").get === "4")
assert(data.attr("group").get === "1")
val exceedScopeData = Symbol.Variable("data3")
assert(exceedScopeData.attr("group") === None, "No group attr in global attr scope")
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/test/java/thu/brainmatrix/suite/IOSuite.scala
|
package thu.brainmatrix.suite
import scala.io.Source
import thu.brainmatrix.IO
import thu.brainmatrix.Shape
import thu.brainmatrix.DataIter
import thu.brainmatrix.DataBatch
import thu.brainmatrix.NDArray
import thu.brainmatrix.io.NDArrayIter
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import thu.brainmatrix.util.CVTool
/**
* @author liuxianggen
* @date 20160712
* @brief test some functions related IO module
* @param
* @return
* @example
* @note
*/
class IOSuite extends FunSuite with BeforeAndAfterAll{
test("cifar dataset") {
val batchSize = 100
val trainDataIter = IO.ImageRecordIter(Map(
"path_imgrec" -> "data/cifar10_val.rec",
"label_width" -> "1",
"data_shape" -> "(3,28,28)",
"shuffle" -> "1",
"batch_size" -> batchSize.toString))
val data = takeIterElemt(trainDataIter,30).data.head.slice(0)
assert(data.shape === Shape(1,3,28,28))
// println(NDArray.max(data))
// CVTool.saveRGBImage(data.copy(), "./output/cifar.jpg")
}
test("mnist dataset") {
}
def takeIterElemt(Iter: DataIter,idx:Int):DataBatch = {
Iter.reset()
var n = 0
while(n<idx-1){
Iter.next()
n +=1
}
Iter.next()
}
test("readCorpus"){
val fileName = "./seqData/input.txt"
var dict = Map[String,Int]()
val source = Source.fromFile(fileName)
val lineIter = source.getLines()
for(l<- lineIter){
val words = l.split("\\s+")
words.map(w => {
dict = dict.updated(w, dict.getOrElse(w,0)+1)
})
}
// println(dict.size)
}
/**
* @author liuxianggen
* @date 20160718
* @brief there is the encoder of INPUT_FILE,make each char have a id,
* which increase as the frequency decrease. For example:
* input file:
* I love you
* vocab:O->1,I->2,l->3...
* @param
* @return
* @example
* @note
*/
test("genVocab"){
val fileName = "./seqData/input1.txt"
var dict = Map[Char,Int]()
val source = Source.fromFile(fileName)
val lineIter = source.getLines()
for(l<- lineIter){
l.map(w => {
dict = dict.updated(w, dict.getOrElse(w,0)+1)
})
}
// println(dict)
}
/**
* @author liuxianggen
* @date 20160719
* @brief test the construction of NDArrayIter
* @param
* @return
* @example
* @note
*/
test("test NDArrayIter") {
val shape0 = Shape(1000, 2, 2)
val data = IndexedSeq(NDArray.ones(shape0), NDArray.zeros(shape0))
val shape1 = Shape(1000, 1)
val label = IndexedSeq(NDArray.ones(shape1))
val batchData0 = NDArray.ones(Shape(128, 2, 2))
val batchData1 = NDArray.zeros(Shape(128, 2, 2))
val batchLabel = NDArray.ones(Shape(128, 1))
// test pad
val dataIter0 = new NDArrayIter(data, label, 128, false, "pad")
var batchCount = 0
val nBatch0 = 8
while(dataIter0.hasNext) {
val tBatch = dataIter0.next()
batchCount += 1
assert(tBatch.data(0).toArray === batchData0.toArray)
assert(tBatch.data(1).toArray === batchData1.toArray)
assert(tBatch.label(0).toArray === batchLabel.toArray)
}
assert(batchCount === nBatch0)
// test discard
val dataIter1 = new NDArrayIter(data, label, 128, false, "discard")//the rest will discard
val nBatch1 = 7
batchCount = 0
while(dataIter1.hasNext) {
val tBatch = dataIter1.next()
batchCount += 1
assert(tBatch.data(0).toArray === batchData0.toArray)
assert(tBatch.data(1).toArray === batchData1.toArray)
assert(tBatch.label(0).toArray === batchLabel.toArray)
}
assert(batchCount === nBatch1)
}
}
|
Liuxg16/BrainMatrix
|
scala-package/spark/src/main/scala/ml/dmlc/mxnet/spark/MXNDArray.scala
|
package ml.dmlc.mxnet.spark
import ml.dmlc.mxnet.NDArray
/**
* A wrapper for serialize & deserialize [[ml.dmlc.mxnet.NDArray]] in spark job
* @author <NAME>
*/
class MXNDArray(@transient private var ndArray: NDArray) extends Serializable {
require(ndArray != null)
private val arrayBytes: Array[Byte] = ndArray.serialize()
def get: NDArray = {
if (ndArray == null) {
ndArray = NDArray.deserialize(arrayBytes)
}
ndArray
}
}
object MXNDArray {
def apply(ndArray: NDArray): MXNDArray = new MXNDArray(ndArray)
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/synapse_symbol/Axon.scala
|
package thu.brainmatrix.synapse_symbol
import thu.brainmatrix.NDArray
import thu.brainmatrix.Symbol
import thu.brainmatrix.Context
import thu.brainmatrix.Shape
import thu.brainmatrix.Executor
class Axon(val ctx: Context = Context.defaultCtx,val name:String) extends Module {
override var variable_table = Array[String]("preVm")
override var variableindices = Array(-1)
//connectivity
var synapses = Vector[Synapse]();
var input :Input = null
var input_s:Symbol = null
override def getSymbol() = this.input_s
// graphic model
val gK = Symbol.CreateVariable(s"gK_$name")
val Vk = Symbol.CreateVariable(s"Vk_$name")
val Cm = Symbol.CreateVariable(s"Cm_$name")
val SensorIn = Symbol.CreateVariable(s"SensorIn_$name")
var preVm = Symbol.CreateVariable(s"preVm_$name")
var freeSensor = Symbol.CreateVariable(s"freeSensor_$name")
val onenda = NDArray.ones(Config.SHAPE,ctx)
//parameters
var gK_nda :NDArray = onenda;
var Vk_nda :NDArray = onenda* -70f;
var Cm_nda :NDArray = onenda * 10f; // membran capacitance
var SensorIn_nda:NDArray = onenda * 2;
//others
var freeSensor_nda:NDArray = onenda * 0f
// variables
var preVm_nda: NDArray = onenda * -70f
var y_preVm_nda: NDArray = onenda * -70f
override def getSymbolMap():Map[String,NDArray] = {
Map(s"gK_$name"->gK_nda,s"Vk_$name"->Vk_nda,s"Cm_$name"->Cm_nda,s"SensorIn_$name"->SensorIn_nda,
s"preVm_$name"->y_preVm_nda,s"freeSensor_$name"->freeSensor_nda,s"current_${this.input.name}"->this.input.current_nda)
}
// def setValue(gK: NDArray,Vk: NDArray,Cm: NDArray,SensorIn: NDArray,preVm: NDArray){
//
// this.gK_nda = gK;
// this.Vk_nda = Vk;
// this.Cm_nda = Cm;
// this.SensorIn_nda = SensorIn;
// this.preVm_nda = preVm;
// }
def getSynapses(idx:Int):Synapse = {
synapses(idx)
}
def addSynapse(s:Synapse){
s.axon = this;
synapses = synapses.:+(s);
}
def addSpikeInput(input:Input){
this.input = input;
}
override def getInitialY():Array[NDArray] = {
Array(this.y_preVm_nda)
}
override def getInitialVar():Array[String] = {
Array(s"y${this.variableindices(0)}")
}
override def getInitial(map : Map[String,NDArray]): Map[String,NDArray] = {
Map(s"y${this.variableindices(0)}"->this.y_preVm_nda)
}
/**
* indices: the variable indexs that this module needs
* vector operations
*/
override def update(t_onehot: Symbol, y:Array[Symbol],yDot:Array[ Symbol],indices:Array[Int]):Array[Symbol] = {
this.preVm = y(indices(0))
this.input_s = this.input.getinput(t_onehot);
val d_preVm = ((input_s+this.gK*(this.preVm-this.Vk))/this.Cm)*(-1f);
// Sensor can diffuse between synapses
this.freeSensor = this.SensorIn;
for(i <- 0 until this.synapses.length){
this.freeSensor = this.freeSensor - this.synapses(i).preSensor;
// println("dddddddddddd")
}
yDot(indices(0)) = d_preVm;
yDot
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/synapse_symbol/Example.scala
|
<gh_stars>0
package thu.brainmatrix.synapse_symbol
import thu.brainmatrix.Base
import thu.brainmatrix.NDArray
import thu.brainmatrix.Context
import thu.brainmatrix.Shape
import thu.brainmatrix.util.Draw
object Example {
def main(args:Array[String]){
Base.welcome
test()
}
def test(){
val starttime = System.currentTimeMillis()
val ctx = Config.CTX
val steps_num:Int =Config.SPIKENUM
// create an input source
// presynaptic spikes
val xpreinput1 = new Input("input1")(ctx);
xpreinput1.initial(3)
// create an axon
val xaxon1 = new Axon(ctx,"axon1");
xaxon1.addSpikeInput(xpreinput1);
// create a dendrite
val xdendrite1 = new Dendrite(ctx,"Dendrite1");
// create an synapse
val xsynapse1 = new Synapse(ctx,"Synapse1");
xaxon1.addSynapse(xsynapse1);
xdendrite1.addSynapse(xsynapse1);
// input with higher input rates
val xpreinput2 = new Input("input2")(ctx);
xpreinput2.initial(5)
val xaxon2 = new Axon(ctx,"axon2");
xaxon2.addSpikeInput(xpreinput2);
val xdendrite2 = new Dendrite(ctx,"Dendrite2");
val xsynapse2 = new Synapse(ctx,"Synapse2");
xaxon2.addSynapse(xsynapse2);
xdendrite2.addSynapse(xsynapse2);
// create an model
val model = new Model(ctx);
//
model.addModule(xaxon1);
model.addModule(xaxon2);
model.addModule(xsynapse1);
// model.addModule(xsynapse2);
model.addModule(xdendrite1);
// model.addModule(xdendrite2);
val y0 = model.getInitialY()
// create a engine
val engine = new Engine(ctx,model = model)
engine.build()
engine.plot()
val t0 = NDArray.ones(Config.SHAPE, ctx)
val h = NDArray.ones(Config.SHAPE, ctx)
val (t,y) = engine.run(t0, y0, h,steps_num-1);
val elapsetime = System.currentTimeMillis() - starttime
println(s"elapsed time:$elapsetime")
engine.dispose()
model.indices.flatten.foreach(println)
val draw = new Draw()
val tslice0arr = NDArray.transpose(t).slice(0).toArray
t.dispose()
val yrec = y.map { x => NDArray.transpose(x).slice(0).toArray }
y.foreach { x => x.dispose() }
draw.subplot(4,4,0)
draw.add_line(tslice0arr, yrec(xaxon1.getResindex("preVm")))
draw.add_line(tslice0arr, yrec(xaxon2.getResindex("preVm")))
draw.addInfo("preVM", "time(ms)", "presynaptic Vm(mV)")
draw.subplot(4,4,1)
draw.add_line(tslice0arr, yrec(xdendrite1.getResindex("postVm")))
draw.add_line(tslice0arr, yrec(xdendrite2.getResindex("postVm")))
draw.addInfo("postVm", "time(ms)", "postsynaptic Vm(mV)")
draw.subplot(4,4,2)
val preCa1 = yrec(xsynapse1.getResindex("preCa"))
val preCa2 = yrec(xsynapse2.getResindex("preCa"))
draw.add_line(tslice0arr, preCa1)
draw.add_line(tslice0arr, preCa2)
draw.addInfo("presynaptic [Ca]i (uM)", "time(ms)", "presynaptic [Ca]i (uM)")
draw.subplot(4,4,3)
val Sensor1 = yrec(xsynapse1.getResindex("preSensor"))
val Sensor2 = yrec(xsynapse2.getResindex("preSensor"))
draw.add_line(tslice0arr, Sensor1)
draw.add_line(tslice0arr, Sensor2)
draw.addInfo("presynaptic [Sensor]i", "time(ms)", "presynaptic [Sensor]i")
draw.subplot(4,4,4)
val Pr1 = preCa1 zip Sensor1 map{x => x._1 * x._2}
val Pr2 = preCa2 zip Sensor2 map{x => x._1 * x._2}
draw.add_line(tslice0arr, Pr1)
draw.add_line(tslice0arr, Pr2)
draw.addInfo("probability of release", "time(ms)")
draw.subplot(4,4,5)
draw.add_line(tslice0arr, yrec(xsynapse1.getResindex("preCaBuff")))
draw.add_line(tslice0arr, yrec(xsynapse2.getResindex("preCaBuff")))
draw.addInfo("presynaptic Ca buffer", "time(ms)")
draw.subplot(4,4,6)
draw.add_line(tslice0arr, yrec(xsynapse1.getResindex("aPreCDK")))
draw.add_line(tslice0arr, yrec(xsynapse2.getResindex("aPreCDK")))
draw.addInfo("aPreCDK", "time(ms)")
draw.subplot(4,4,7)
draw.add_line(tslice0arr, yrec(xsynapse1.getResindex("aPreTrkB")))
draw.add_line(tslice0arr, yrec(xsynapse2.getResindex("aPreTrkB")))
draw.addInfo("presynaptic TrkB", "time(ms)")
draw.subplot(4,4,8)
draw.add_line(tslice0arr, yrec(xsynapse1.getResindex("preNR2B")))
draw.add_line(tslice0arr, yrec(xsynapse2.getResindex("preNR2B")))
draw.addInfo("presynaptic NR2B", "time(ms)")
draw.subplot(4,4,9)
draw.add_line(tslice0arr, yrec(xsynapse1.getResindex("preMg")))
draw.add_line(tslice0arr, yrec(xsynapse2.getResindex("preMg")))
draw.addInfo("presynaptic [Mg]i (uM)", "time(ms)")
draw.subplot(4,4,10)
draw.add_line(tslice0arr, yrec(xsynapse1.getResindex("preAbeta")))
draw.add_line(tslice0arr, yrec(xsynapse2.getResindex("preAbeta")))
draw.addInfo("presynaptic Abeta", "time(ms)")
draw.subplot(4,4,11)
draw.add_line(tslice0arr, yrec(xsynapse1.getResindex("qNMDAR")))
draw.add_line(tslice0arr, yrec(xsynapse2.getResindex("qNMDAR")))
draw.addInfo("postsynaptic NMDAR", "time(ms)")
draw.subplot(4,4,12)
draw.add_line(tslice0arr, yrec(xsynapse1.getResindex("postCa")))
draw.add_line(tslice0arr, yrec(xsynapse2.getResindex("postCa")))
draw.addInfo("postsynaptic [Ca]i", "time(ms)")
draw.subplot(4,4,13)
draw.add_line(tslice0arr, yrec(xsynapse1.getResindex("postCaBuff")))
draw.add_line(tslice0arr, yrec(xsynapse2.getResindex("postCaBuff")))
draw.addInfo("postsynaptic Ca buffer", "time(ms)")
draw.subplot(4,4,14)
draw.add_line(tslice0arr, yrec(xsynapse1.getResindex("aPostCN")))
draw.add_line(tslice0arr, yrec(xsynapse2.getResindex("aPostCN")))
draw.addInfo("postsynaptic CN", "time(ms)")
draw.subplot(4,4,15)
draw.add_line(tslice0arr, yrec(xsynapse1.getResindex("aPostTrkB")))
draw.add_line(tslice0arr, yrec(xsynapse2.getResindex("aPostTrkB")))
draw.addInfo("post TrkB", "time(ms)")
//
draw.draw()
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/ladder/Solver.scala
|
<reponame>Liuxg16/BrainMatrix
package thu.brainmatrix.ladder
import scala.collection.mutable.ListBuffer
import org.slf4j.LoggerFactory
import thu.brainmatrix.Optimizer
import thu.brainmatrix.optimizer.SGD
import thu.brainmatrix.EvalMetric
import thu.brainmatrix.NDArray
import thu.brainmatrix.Context
import thu.brainmatrix.Base
import thu.brainmatrix.Symbol
import thu.brainmatrix.Shape
import thu.brainmatrix.DataIter
import thu.brainmatrix.MXKVStoreUpdater
class Solver(var optimizer:Optimizer = new SGD()) {
private val logger = LoggerFactory.getLogger(classOf[Solver])
var updater = Optimizer.getUpdater(this.optimizer)
var metric :EvalMetric = null
var monitor:Monitor = null
def set_metric(metric:EvalMetric){
this.metric = metric
}
def set_monitor(monitor:Monitor){
this.monitor = monitor
}
def solve(xpu:Context,sym:Symbol,args:ListBuffer[(String,NDArray)],
args_grad:ListBuffer[(String,NDArray)],auxs:ListBuffer[(String,NDArray)],
X_i:ListBuffer[NDArray],begin_iter:Int=0,end_iter:Int,debug:Boolean=false){
val input_desc:Map[String,Shape] = Map("data"->X_i.head.shape)
val input_names = input_desc.keys
val input_buffs = input_desc.map(x => NDArray.empty(x._2, xpu))
val args_t = args.toMap ++ input_names.zip(input_buffs).toMap
val output_names = sym.listOutputs()
if(debug){
logger.info("need to code in details")
}
val exe = sym.easy_bind(xpu,args=args_t,argsGrad = args_grad.toMap,auxStates = auxs.toMap)
// println("----------------------------")
// println(sym.debugStr)
// println(exe.debugStr)
require(sym.listArguments().length==exe.gradArrays.length,"dismatch error in solve Solver.scala ")
var update_dict = sym.listArguments().zip(exe.gradArrays).toMap
update_dict = update_dict.-(sym.listArguments()(0))
// sym.listArguments().foreach(println)
// println(update_dict.length)
//
val batch_size = input_buffs.head.shape(0)
this.optimizer.setRescaleGrad(1.0f/batch_size)
/**
* output_dict :output info (String,NDArray
* output_buff : the new buffer refered to output_dict
* internal_dict: internal nodes ,not the output
*/
var output_dict = ListBuffer[(String,NDArray)]()
var output_buff = ListBuffer[(String,NDArray)]()
var internal_dict = input_names.zip(input_buffs).toMap
for((key,arr)<-sym.listOutputs().zip(exe.outputs)){
if(output_names.contains(key)){
output_dict :+= (key,arr)
output_buff :+= (key,NDArray.empty(arr.shape,Context.defaultCtx))
}else{
internal_dict += (key->arr)
}
}
val output_buff_m = output_buff.toMap
/**
* training start....
*
*/
for(i<- begin_iter until end_iter){
// println(s"------------------------$i-----")
/**
* update the input training data
*/
X_i(i).copyTo(input_buffs.head)
exe.forward(isTrain=true)
/**
* internal node info: internal_dict
*/
if(this.monitor!=null){
this.monitor.forward_end(i, internal_dict)
}
/***
* backup the output info
*/
for(key<-output_dict){
key._2.copyTo(output_buff_m(key._1))
}
exe.backward()
// println(s"------------------------$i-----")
// println(sym.debugStr)
// println(exe.debugStr)
updateParams(args_t,update_dict,this.updater)
// println(s"------------------------$i-----")
if(this.metric!=null){
// println(input_buffs.last.shape)
// println(output_buff_m(output_names(0)).shape)
this.metric.update(Array(input_buffs.last), Array(output_buff_m(output_names(0))))
}
if(this.monitor !=null){
this.monitor.backward_end(i,args_t,update_dict,this.metric)
}
exe.outputs(0).waitToRead()
}
//
}
def solve_0(xpu:Context,sym:Symbol,arg:ListBuffer[(String,NDArray)],
args_grad:ListBuffer[(String,NDArray)],auxs:ListBuffer[(String,NDArray)],
data_iter:DataIter,begin_iter:Int=0,end_iter:Int,debug:Boolean=false){
// if(this.monitor !=null){
// this.monitor.backward_end(0,arg.toMap,args_grad.toMap,this.metric)
// }
// val input_desc:Map[String,Base.Shape] = data_iter.provideData ++ data_iter.provideLabel
val input_desc:Map[String,Shape] = data_iter.provideData
val input_names = input_desc.keys
val input_buffs = input_desc.map(x => NDArray.empty(x._2, xpu))
val args_t = arg.toMap ++ input_names.zip(input_buffs).toMap
val output_names = sym.listOutputs()
if(debug){
logger.info("need to code in details")
}
println("----------------------------")
println(sym.listArguments())
println("listAuxiliaryStates:"+sym.listAuxiliaryStates().foreach(println))
val exe = sym.easy_bind(xpu,args=args_t,argsGrad = args_grad.toMap,auxStates = auxs.toMap)
// println("----------------------------")
// println(sym.debugStr)
// println(exe.debugStr)
require(sym.listArguments().length==exe.gradArrays.length,"dismatch error in solve Solver.scala ")
var update_dict = sym.listArguments().zip(exe.gradArrays).toMap
update_dict = update_dict.-(sym.listArguments()(0))
// sym.listArguments().foreach(println)
// println(update_dict.length)
//
val batch_size = input_buffs.head.shape(0)
this.optimizer.setRescaleGrad(1.0f/batch_size)
/**
* output_dict :output info (String,NDArray
* output_buff : the new buffer refered to output_dict
* internal_dict: internal nodes ,not the output
*/
var output_dict = ListBuffer[(String,NDArray)]()
var output_buff = ListBuffer[(String,NDArray)]()
var internal_dict = input_names.zip(input_buffs).toMap
for((key,arr)<-sym.listOutputs().zip(exe.outputs)){
if(output_names.contains(key)){
output_dict :+= (key,arr)
output_buff :+= (key,NDArray.empty(arr.shape,Context.defaultCtx))
}else{
internal_dict += (key->arr)
}
}
val output_buff_m = output_buff.toMap
data_iter.reset()
/**
* training start....
*
*/
for(i<- begin_iter until end_iter){
// println(s"------------------------$i-----")
val batch = data_iter.next()
/**
* update the input training data
*/
for((data,buff)<-batch.data.zip(input_buffs)){
data.copyTo(buff)
}
exe.forward(isTrain=true)
/**
* internal node info: internal_dict
*/
if(this.monitor!=null){
this.monitor.forward_end(i, internal_dict)
}
/***
* backup the output info
*/
for(key<-output_dict){
key._2.copyTo(output_buff_m(key._1))
}
exe.backward()
// println(s"------------------------$i-----")
// println(sym.debugStr)
// println(exe.debugStr)
updateParams(args_t,update_dict,this.updater)
// println(s"------------------------$i-----")
if(this.metric!=null){
// println(input_buffs.last.shape)
// println(output_buff_m(output_names(0)).shape)
this.metric.update(Array(input_buffs.last), Array(output_buff_m(output_names(0))))
}
if(this.monitor !=null){
this.monitor.backward_end(i,args_t,update_dict,this.metric)
}
exe.outputs(0).waitToRead()
}
}
// Perform update of param_arrays from grad_arrays not on kvstore
private def updateParams(paramMap: Map[String, NDArray],
gradMap: Map[String, NDArray],
updater: MXKVStoreUpdater,
numDevice: Int=1) {
var idx = 0
for(key<-gradMap.keys){
if(paramMap(key)!=null ){
if(!key.equals("data") && !key.equals("input")){
updater.update(numDevice+idx, gradMap(key), paramMap(key))
idx +=1
}
}else{
throw new java.lang.UnknownError("dismatch error!!!")
}
}
}
}
/**
* a class to monitor the process
* @param interval: interval for each print
*/
class Monitor(val interval:Int){
private val logger = LoggerFactory.getLogger(classOf[Monitor])
def stat(x:NDArray):Float = {
NDArray.mean(NDArray.abs(x)).toScalar
}
def forward_end(i:Int,internals:Map[String,NDArray]){
if(i%this.interval==0){
for(key<- internals.keys){
val arr = internals(key)
val mean = this.stat(arr)
logger.info(s"Iter:$i param:$key \t\t stat(mean):$mean")
System.err.println(s"Iter:$i param:$key \t\t stat(mean):$mean")
}
}
}
def backward_end(i:Int,args:Map[String,NDArray],grads:Map[String,NDArray],metric:EvalMetric){
if(i%this.interval==0){
for(key<- grads.keys){
val arr = grads(key)
val mean_args = this.stat(args(key))
val mean_grad = this.stat(arr)
System.err.println(s"Iter:$i param:$key \t\t stat(mean):$mean_args \t\t grad_stat:$mean_grad")
}
}
if(i%this.interval==0 && metric !=null){
val metricValue = (metric.get._2)
System.err.println(s"Iter:$i \tmetric:$metricValue")
metric.reset()
}
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/test/java/thu/brainmatrix/suite/TestStaticGraph.scala
|
package thu.brainmatrix.suite
import thu.brainmatrix.Base._
import thu.brainmatrix.Symbol
import thu.brainmatrix.StaticGraph
import scala.Vector
import thu.brainmatrix.Shape
/**
*
* 2016-3-25
* by liuxianggen
* as the objuect name says
*
*/
object staticGraphTest {
def main(args:Array[String]){
// identifyTest
toStaticGraphTest
// handleTest
}
def toStaticGraphTest{
val dataS = Symbol.CreateVariable("data")
val kwargs_type = Map("name" -> "fc2", "num_hidden" -> "10")
val sb:Symbol = Symbol.Create("FullyConnected",kwargs_type)
val kwargs_symbol:Map[String,Symbol] = Map("data"->dataS)
sb.Compose(kwargs_symbol, "FullyConnectedS")
// var out_graph= new StaticGraph()
sb.ToStaticGraph()
println(sb.staticGraph.debug)
println("--------------------------------------------")
// sb.staticGraph.ToStaticGraph
// sb.staticGraph.printOperator
}
def OperatorTest{
val dataS = Symbol.CreateVariable("data")
val kwargs_type = Map("name" -> "fc2", "num_hidden" -> "10")
val sb:Symbol = Symbol.Create("FullyConnected",kwargs_type)
val kwargs_symbol:Map[String,Symbol] = Map("data"->dataS)
sb.Compose(kwargs_symbol, "FullyConnectedS")
// var out_graph= new StaticGraph()
sb.ToStaticGraph()
// println(sg.debug)
println("--------------------------------------------")
sb.staticGraph.printOperator
}
/**
* 2016-3-23
*/
def identifyTest{
// def ToStaticGraph(out_graph: StaticGraph) {
val sg:StaticGraph = new StaticGraph()
val dataS = Symbol.CreateVariable("data1")
// val weightS = Symbol.CreateVariable("weight")
// val biasS = Symbol.CreateVariable("bias")
val sb:Symbol = Symbol.Create("FullyConnected")
// val kwargs:Map[String,Symbol] = Map("data"->dataS,"weight"->weightS,"bias"->biasS)
val kwargs:Map[String,Symbol] = Map("data"->dataS)
sb.Compose(kwargs, "FullyConnectedS")
// val weightS1 = Symbol.CreateVariable("weight1")
// val biasS1 = Symbol.CreateVariable("bias1")
val sb1:Symbol = Symbol.Create("FullyConnected")
val kwargs1:Map[String,Symbol] = Map("data"->sb)
sb1.Compose(kwargs1, "FullyConnectedS1")
sb1.ToStaticGraph()
val kwargs_ :Map[String,Shape] = Map("data"->Shape(10,20),"data1"->Shape(2,4))
val (a, b) = sb1.staticGraph.identifyVar(kwargs_)
a.foreach {println}
}
/**
* by liuxianggen
* 2016-4-4
*/
def handleTest{
val sg = new StaticGraph()
println(sg.handle)
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/char_rnn_symbol/seq_IO.scala
|
<reponame>Liuxg16/BrainMatrix<filename>scalakernel/src/main/java/thu/brainmatrix/char_rnn_symbol/seq_IO.scala
package thu.brainmatrix.char_rnn_symbol
import thu.brainmatrix.NDArray
import thu.brainmatrix.Context
import thu.brainmatrix.io.NDArrayLSTMIter
import thu.brainmatrix.Shape
import scala.io.Source
import scala.math
import java.io.File
import java.io.PrintWriter
object seq_IO {
/**
* @author liuxianggen
* @date 20160718
* @brief there is the encoder of INPUT_FILE,make each char have a id,
* which increase as the frequency decrease. For example:
* input file:
* I love you
* vocab:O->1,I->2,l->3...
* @param inputFileName
* @return: vocab_final a map which the max length is 10000
* @example
* @note
*/
def build_vocabulary(inputFileName:String,vocabFileName:String,max_vocab:Int=10000):Map[Char,Int] = {
val vocabfile = new File(vocabFileName)
var vocab_final = Map[Char,Int]()
if(vocabfile.isFile()){
// println(s"INFO:Using $vocabFileName,while vocabulary already exists")
val source = Source.fromFile(vocabfile)
val line = source.mkString
(line.zipWithIndex).map(x=>{
vocab_final = vocab_final ++ Map(x._1->(x._2))
})
}else{
//if the vocabFile is not existed, now we generate one
var dict = Map[Char,Int]()
val source = Source.fromFile(inputFileName)
val lineIter = source.mkString
lineIter.map(w => {
dict = dict.updated(w, dict.getOrElse(w,0)+1)
})
var vocab = dict.toList sortBy(_._2)
// println("------------------")
// println(vocab)
vocab = vocab.take(math.min(max_vocab,vocab.length)).reverse //with decreased order
//write to the vocabfile
val out = new PrintWriter(vocabfile)
vocab.map(x => {
out.print(x._1)
})
out.close()
(vocab.zipWithIndex) map(x=>{
vocab_final = vocab_final ++ Map(x._1._1->(x._2))
})
}
vocab_final = vocab_final ++ Map(Config.UNKNOW_CHAR->vocab_final.size)
vocab_final
}
def char_idx(vocab:Map[Char,Int],c:Char){
if(vocab.contains(c))
vocab.get(c)
else {
vocab.get(Config.UNKNOW_CHAR)
}
}
def Str2Char_NDArrayIterator(text:String,labelName:String = "label",vocab:Map[Char,Int],batch_size:Int,seq_len:Int,ctx:Context = Context.defaultCtx):NDArrayLSTMIter = {
//culculate the number of sequence after delete the first char
val num_seq_len = math.floor((text.length()-1)/seq_len).toInt
//map to index of the char
var array_train = text.map {vocab(_).toFloat}.toArray
var array_label = array_train.drop(1).take(num_seq_len*seq_len)
array_train = array_train.take(num_seq_len*seq_len)
val NDA_train = NDArray.array(array_train, Shape(num_seq_len,seq_len),ctx)
val NDA_label = NDArray.array(array_label,Shape(num_seq_len,seq_len),ctx)
val dataIter = new NDArrayLSTMIter(IndexedSeq(NDA_train),"data",IndexedSeq(NDA_label),labelName, batch_size, false, "discard")//the rest will discard
// println(s"length:${dataIter}")
// println(s"provideData:${dataIter.provideData}")//(32,24)
// println(s"provideData:${dataIter.provideLabel}")//(32,24)
dataIter
}
def lstmDataIter(text:String,inputName:String = "data",labelName:String = "label",vocab:Map[Char,Int],batch_size:Int,seq_len:Int,ctx:Context = Context.defaultCtx):NDArrayLSTMIter = {
//culculate the number of sequence after delete the first char
val num_seq_len_temp = math.floor((text.length()-1)/seq_len).toInt
val num_batch = math.floor(num_seq_len_temp/batch_size).toInt
val num_seq = num_batch*batch_size
val num_char = num_batch*batch_size*seq_len
//map to index of the char
var array_train = text.map {vocab(_).toFloat}.toArray
array_train = array_train.take(num_char+1)
val map_train = (0 until seq_len).map(x => Array.fill[Float](num_seq)(0f)).toArray
val map_label = (0 until seq_len).map(x => Array.fill[Float](num_seq)(0f)).toArray
(0 until num_char).map(x =>{
val id = x%seq_len
map_train(id)(x/seq_len) = array_train(x)
map_label(id)(x/seq_len) = array_train(x+1)
}
)
// val init_state_map = Map("_l0_init_h"->NDArray.zeros(Shape(32,64),ctx),"_l0_init_c"->NDArray.zeros(Shape(32,64),ctx),"_l1_init_h"->NDArray.zeros(Shape(32,64),ctx),"_l1_init_c"->NDArray.zeros(Shape(32,64),ctx))
// val NDA_train = NDArray.array(array_train, Shape(num_seq_len,seq_len),ctx)
// val NDA_label = NDArray.array(array_label,Shape(num_seq_len,seq_len),ctx)
val dataIter = new NDArrayLSTMIter(map_train.map(NDArray.array(_,Shape(num_seq,1))).toIndexedSeq,inputName,map_label.map(NDArray.array(_,Shape(num_seq))).toIndexedSeq,labelName, batch_size, false, "discard")//the rest will discard
// println(s"provideData:${dataIter.provideLabel}")//(32,24)
dataIter
}
def RNN_OneHot_DataIter(text:String,inputName:String = "data",labelName:String = "label",vocab:Map[Char,Int],batch_size:Int,seq_len:Int,ctx:Context = Context.defaultCtx):NDArrayLSTMIter = {
//culculate the number of sequence after delete the first char
val num_seq_len_temp = math.floor((text.length()-1)/seq_len).toInt
val num_batch = math.floor(num_seq_len_temp/batch_size).toInt
val num_seq = num_batch*batch_size
val num_char = num_batch*batch_size*seq_len
//map to index of the char
var array_train = text.map {vocab(_).toFloat}.toArray
val label_arr = NDArray.array(array_train.take(num_char+1).drop(1),Shape(num_seq,seq_len))
array_train = array_train.take(num_char)
val tarin_arr = NDArray.zeros(Shape(num_seq,seq_len,vocab.size), ctx)
(0 until num_char).map(x =>{
val id = x%seq_len
tarin_arr(x/seq_len,id,array_train(x).toInt) = 1
}
)
val dataIter = new NDArrayLSTMIter(IndexedSeq(tarin_arr),inputName,IndexedSeq(label_arr),labelName, batch_size, false, "discard")//the rest will discard
// println(s"provideData:${dataIter.provideData}")//(32,24)
dataIter
}
def lstm_vec_DataIter(text:String,inputName:String = "data",labelName:String = "label",vocab:Map[Char,Int],batch_size:Int,seq_len:Int,vocab_len:Int,ctx:Context = Context.defaultCtx):NDArrayLSTMIter = {
//culculate the number of sequence after delete the first char
val num_seq_len_temp = math.floor((text.length()-1)/seq_len).toInt
val num_batch = math.floor(num_seq_len_temp/batch_size).toInt
val num_seq = num_batch*batch_size
val num_char = num_batch*batch_size*seq_len
//map to index of the char
var array_train = text.map {vocab(_)}.toArray
array_train = array_train.take(num_char+1)
val map_train = (0 until seq_len).map(x => NDArray.zeros(Shape(num_seq,vocab_len), ctx)).toArray
val map_label = (0 until seq_len).map(x => NDArray.zeros(Shape(num_seq), ctx)).toArray
(0 until num_char).map(x =>{
val id = x%seq_len
map_train(id)(x/seq_len,array_train(x)) = 1
map_label(id)(x/seq_len) = array_train(x+1)
}
)
// val init_state_map = Map("_l0_init_h"->NDArray.zeros(Shape(32,64),ctx),"_l0_init_c"->NDArray.zeros(Shape(32,64),ctx),"_l1_init_h"->NDArray.zeros(Shape(32,64),ctx),"_l1_init_c"->NDArray.zeros(Shape(32,64),ctx))
// val NDA_train = NDArray.array(array_train, Shape(num_seq_len,seq_len),ctx)
// val NDA_label = NDArray.array(array_label,Shape(num_seq_len,seq_len),ctx)
val dataIter = new NDArrayLSTMIter(map_train.toIndexedSeq,inputName,map_label.toIndexedSeq,labelName, batch_size, false, "discard")//the rest will discard
// println(s"provideData:${dataIter.provideLabel}")//(32,24)
dataIter
}
def SampleDataIter(text:String,inputName:String = "data",labelName:String = "label",vocab:Map[Char,Int],batch_size:Int,seq_len:Int,ctx:Context = Context.defaultCtx):NDArrayLSTMIter = {
//culculate the number of sequence after delete the first char
val num_seq_len_temp = math.floor((text.length()-1)/seq_len).toInt
val num_batch = 1
val num_seq = num_batch*batch_size
val num_char = num_batch*batch_size*seq_len
//map to index of the char
var array_train = text.map {vocab(_).toFloat}.toArray
array_train = array_train.take(num_char+1)
val map_train = (0 until seq_len).map(x => (s"${inputName}_$x",Array.fill[Float](num_seq)(0f))).toMap
val map_label = (0 until seq_len).map(x => (s"${labelName}_$x",Array.fill[Float](num_seq)(0f))).toMap
(0 until num_char).map(x =>{
val id = x%seq_len
val arr_train = map_train.getOrElse(s"${inputName}_$id",Array.fill[Float](num_seq)(0f))
val arr_label = map_label.getOrElse(s"${labelName}_$id",Array.fill[Float](num_seq)(0f))
arr_train(x/seq_len) = array_train(x)
arr_label(x/seq_len) = array_train(x+1)
}
)
// val init_state_map = Map("_l0_init_h"->NDArray.zeros(Shape(32,64),ctx),"_l0_init_c"->NDArray.zeros(Shape(32,64),ctx),"_l1_init_h"->NDArray.zeros(Shape(32,64),ctx),"_l1_init_c"->NDArray.zeros(Shape(32,64),ctx))
// val NDA_train = NDArray.array(array_train, Shape(num_seq_len,seq_len),ctx)
// val NDA_label = NDArray.array(array_label,Shape(num_seq_len,seq_len),ctx)
val dataIter = new NDArrayLSTMIter(map_train.values.map(NDArray.array(_,Shape(num_seq,1))).toIndexedSeq,inputName,map_label.values.map(NDArray.array(_,Shape(num_seq))).toIndexedSeq,labelName, batch_size, false, "pad")//the rest will discard
// println(s"provideData:${dataIter.provideData}")//(32,24)
dataIter
}
def main(args:Array[String]){
//test build_vocabulary
val vocab = build_vocabulary("./seqData/input.txt","./seqData/vocab.txt")
// vocab.foreach(println)
// println(vocab.values)
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/lstmSort/ModelTraining.scala
|
<reponame>Liuxg16/BrainMatrix
package thu.brainmatrix.lstmSort
import thu.brainmatrix._
import org.kohsuke.args4j.{CmdLineParser, Option}
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
import thu.brainmatrix.optimizer.Adam
import thu.brainmatrix.util.IOHelper
import thu.brainmatrix.rnn.Utils
object ModelTraining {
def main(args:Array[String]){
val path_train = "./data/sort.train.txt"
val path_test = "./data/sort.valid.txt"
val saveModelPath = "./model"
val batch_size = 100
val buckets = List(5)
val num_hidden = 300
val num_embed = 512
val num_lstm_layer = 2
val seqLen = 5
val num_epoch = 1
val learningRate = 0.01f
val momentum = 0.9
val ctx = Context.gpu(0)
// # a dict that contains the word and the index
val vocab = IOHelper.buildVocab("./data/sort.train.txt")
println(vocab)
val symbol = Lstm.bi_lstmUnroll(num_lstm_layer, seqLen, vocab.size,
numHidden = num_hidden, numEmbed = num_embed,
numLabel = vocab.size)
// initalize states for LSTM
val initC = for (l <- 0 until num_lstm_layer) yield (s"l${l}_init_c", (batch_size, num_hidden))
val initH = for (l <- 0 until num_lstm_layer) yield (s"l${l}_init_h", (batch_size, num_hidden))
val initStates = initC ++ initH
//regard '\n' as the separator to train
val dataTrain = new ButketIo.BucketSentenceIter(path_train, vocab, buckets,
batch_size, initStates)
val dataTest = new ButketIo.BucketSentenceIter(path_test, vocab, buckets,
batch_size, initStates)
val datasAndLabels = dataTrain.provideData ++ dataTrain.provideLabel
val (argShapes, outputShapes, auxShapes) = symbol.inferShape(datasAndLabels)
val initializer = new Xavier(factorType = "in", magnitude = 2.34f)
val argNames = symbol.listArguments()
val argDict = argNames.zip(argShapes.map(NDArray.zeros(_, ctx))).toMap
val auxNames = symbol.listAuxiliaryStates()
val auxDict = auxNames.zip(auxShapes.map(NDArray.zeros(_, ctx))).toMap
val gradDict = argNames.zip(argShapes).filter { case (name, shape) =>
!datasAndLabels.contains(name)
}.map(x => x._1 -> NDArray.empty(x._2, ctx) ).toMap
argDict.foreach { case (name, ndArray) =>
if (!datasAndLabels.contains(name)) {
initializer.initWeight(name, ndArray)
}
}
val data = argDict("data")
val label = argDict("softmax_label")
val executor = symbol.bind(ctx, argDict, gradDict)
val opt = new Adam(learningRate = learningRate, wd = 0.0001f)
val paramsGrads = gradDict.toList.zipWithIndex.map { case ((name, grad), idx) =>
(idx, name, grad, opt.createState(idx, argDict(name)))
}
val evalMetric = new CustomMetric(accuracy1, "perplexity")
val batchEndCallback = new Callback.Speedometer(batch_size, 50)
val epochEndCallback = Utils.doCheckpoint(s"${saveModelPath}/lstmSort")
for (epoch <- 0 until num_epoch) {
// Training phase
val tic = System.currentTimeMillis
evalMetric.reset()
var nBatch = 0
var epochDone = false
// Iterate over training data.
dataTrain.reset()
while (!epochDone) {
var doReset = true
while (doReset && dataTrain.hasNext) {
val dataBatch = dataTrain.next()
data.set(dataBatch.data(0))
label.set(dataBatch.label(0))
executor.forward(isTrain = true)
executor.backward()
paramsGrads.foreach { case (idx, name, grad, optimState) =>
opt.update(idx, argDict(name), grad, optimState)
}
// evaluate at end, so out_cpu_array can lazy copy
evalMetric.update(dataBatch.label, executor.outputs)
dataBatch.dispose()
nBatch += 1
batchEndCallback.invoke(epoch, nBatch, evalMetric)
}
if (doReset) {
dataTrain.reset()
}
// this epoch is done
epochDone = true
}
val (name, value) = evalMetric.get
println(s"Epoch[$epoch] Train-$name=$value")
val toc = System.currentTimeMillis
println(s"Epoch[$epoch] Time cost=${toc - tic}")
//VALIDATION
evalMetric.reset()
dataTest.reset()
// TODO: make DataIter implement Iterator
while (dataTest.hasNext) {
val evalBatch = dataTest.next()
data.set(evalBatch.data(0))
label.set(evalBatch.label(0))
executor.forward(isTrain = false)
evalMetric.update(evalBatch.label, executor.outputs)
evalBatch.dispose()
}
val (name_eval, value_eval) = evalMetric.get
println(s"Epoch[$epoch] Validation-$name_eval=$value_eval")
epochEndCallback.invoke(epoch, symbol, argDict, auxDict)
}
executor.dispose()
println("ends...")
}
// Evaluation
def perplexity(label: NDArray, pred: NDArray): Float = {
val shape = label.shape
val size = shape(0) * shape(1)
val labelT = {
val tmp = label.toArray.grouped(shape(1)).toArray
val result = Array.fill[Float](size)(0f)
var idx = 0
for (i <- 0 until shape(1)) {
for (j <- 0 until shape(0)) {
result(idx) = tmp(j)(i)
idx += 1
}
}
result
}
var loss = 0f
val predArray = pred.toArray.grouped(pred.shape(0)).toArray
for (i <- 0 until pred.shape(1)) {
loss += -Math.log(Math.max(1e-10, predArray(i)(labelT(i).toInt)).toFloat).toFloat
}
loss / size
}
// Evaluation
def accuracy1(label: NDArray, pred: NDArray): Float = {
var sumMetric = 0f
val shape = label.shape
val size = shape(0) * shape(1)
val labelT = {
val tmp = label.toArray.grouped(shape(1)).toArray
val result = Array.fill[Float](size)(0f)
var idx = 0
for (i <- 0 until shape(1)) {
for (j <- 0 until shape(0)) {
result(idx) = tmp(j)(i)
idx += 1
}
}
result
}
val predLabel = NDArray.argmaxChannel(pred)
for ((labelElem, predElem) <- labelT zip predLabel.toArray) {
if (math.abs(labelElem - predElem)<1e-6) {
// println(s"labelElem:$labelElem,predElem:$predElem")
sumMetric += 1
}
}
predLabel.dispose()
sumMetric/(label.size)
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/char_rnn_symbol/Training.scala
|
<filename>scalakernel/src/main/java/thu/brainmatrix/char_rnn_symbol/Training.scala
package thu.brainmatrix.char_rnn_symbol
import thu.brainmatrix.NDArray
import thu.brainmatrix.Base
//import thu.brainmatrix.ReshapeAccuracy
import thu.brainmatrix.Shape
import thu.brainmatrix.Accuracy
import thu.brainmatrix.Context
import thu.brainmatrix.FeedForward
import thu.brainmatrix.optimizer.Adam
import thu.brainmatrix.Xavier
import thu.brainmatrix.Symbol
import thu.brainmatrix.Model
import thu.brainmatrix.Callback
import thu.brainmatrix.util.mathTool
import thu.brainmatrix.CustomMetric
import thu.brainmatrix.io.NDArrayLSTMIter
import thu.brainmatrix.EpochEndCallback
import Config._
import scala.io.Source
import scala.collection.mutable.ListBuffer
object Training {
def main(args:Array[String]){
// sampleCharLSTM
// trainCharLSTM
// train_vec_CharLSTM
// trainCharRNN
train_vec_CharLSTM_lxg
}
def trainCharLSTM{
val vocab = seq_IO.build_vocabulary(INPUT_FILE_NAME, VOCAB_FILE_NAME)
val n_alphabet = vocab.size
val lstm = Lstm.LSTMNet(LSTM_N_LAYER, SEQ_LENGTH, DIM_HIDDEN, DIM_EMBED, n_alphabet, DROPOUT)
// lstm.listArguments().foreach {println}
// println(lstm.debug())
val source = Source.fromFile(INPUT_FILE_NAME)
val seq_input = source.mkString
val len_train = math.round(seq_input.length()*DATA_TRAIN_RATIO).toInt
val text_train = seq_input.take(len_train)
val text_val = seq_input.drop(len_train)
val traindata = seq_IO.lstmDataIter(text = text_train,labelName = "label",vocab = vocab,batch_size = BATCH_SIZE,seq_len = SEQ_LENGTH)
val valdata = seq_IO.lstmDataIter(text = text_val,labelName = "label",vocab = vocab,batch_size = BATCH_SIZE,seq_len = SEQ_LENGTH)
Base.INPUTSHAPE_AUXILIARY = Map("_l0_init_h"->Shape(BATCH_SIZE,DIM_HIDDEN),"_l0_init_c"->Shape(BATCH_SIZE,DIM_HIDDEN),"_l1_init_h"->Shape(BATCH_SIZE,DIM_HIDDEN),"_l1_init_c"->Shape(BATCH_SIZE,DIM_HIDDEN))
// val modelBase = new FeedForward(lstm, Context.cpu(), numEpoch = N_EPOCH,optimizer = new SGD(learningRate = LEARNING_RATE, momentum = MOMENTUM, wd = WEIGHT_DECAY),name = "lstm")
//// modelBase.fit(traindata, traindata,new ReconsAccuracy())
// modelBase.fit(traindata,valdata,new Accuracy())
// modelBase.saveModelParams(s"./model/charLSTM.params_${N_EPOCH}")
}
def train_vec_CharLSTM{
val vocab = seq_IO.build_vocabulary(INPUT_FILE_NAME, VOCAB_FILE_NAME)
val n_alphabet = vocab.size
val lstm = Lstm.LSTMNet(LSTM_N_LAYER, SEQ_LENGTH, DIM_HIDDEN, DIM_EMBED, n_alphabet, DROPOUT)
// lstm.listArguments().foreach {println}
// println(lstm.debug())
val source = Source.fromFile(INPUT_FILE_NAME)
val seq_input = source.mkString
val len_train = math.round(seq_input.length()*DATA_TRAIN_RATIO).toInt
val text_train = seq_input.take(len_train)
val text_val = seq_input.drop(len_train)
val traindata = seq_IO.lstm_vec_DataIter(text = text_train,labelName = "label",vocab = vocab,batch_size = BATCH_SIZE,seq_len = SEQ_LENGTH,vocab_len = n_alphabet)
val valdata = seq_IO.lstm_vec_DataIter(text = text_val,labelName = "label",vocab = vocab,batch_size = BATCH_SIZE,seq_len = SEQ_LENGTH,vocab_len = n_alphabet)
// for(j<-0 until 80){
// val databatch = traindata.next()
// val label1 = databatch.label(0)
// println(label1)
// }
Base.INPUTSHAPE_AUXILIARY = Map("_l0_init_h"->Shape(BATCH_SIZE,DIM_HIDDEN),"_l0_init_c"->Shape(BATCH_SIZE,DIM_HIDDEN),"_l1_init_h"->Shape(BATCH_SIZE,DIM_HIDDEN),"_l1_init_c"->Shape(BATCH_SIZE,DIM_HIDDEN))
// val modelBase = new FeedForward(lstm, Context.cpu(), numEpoch = N_EPOCH,optimizer = new SGD(learningRate = LEARNING_RATE, momentum = MOMENTUM, wd = WEIGHT_DECAY),name = "lstm")
// modelBase.fit(traindata,valdata,new Accuracy())
// modelBase.saveModelParams(s"./model/charLSTM.params_${N_EPOCH}")
}
def train_vec_CharLSTM_lxg{
val vocab = seq_IO.build_vocabulary(INPUT_FILE_NAME, VOCAB_FILE_NAME)
val n_alphabet = vocab.size
val lstm = Lstm.LSTM(LSTM_N_LAYER, SEQ_LENGTH, DIM_HIDDEN, DIM_EMBED, n_alphabet, DROPOUT)
// lstm.listArguments().foreach {println}
// println(lstm.debug())
val source = Source.fromFile(INPUT_FILE_NAME)
val seq_input = source.mkString
val len_train = math.round(seq_input.length()*DATA_TRAIN_RATIO).toInt
val text_train = seq_input.take(len_train)
val text_val = seq_input.drop(len_train)
val traindata = seq_IO.RNN_OneHot_DataIter(text = text_train,labelName = "label",vocab = vocab,batch_size = BATCH_SIZE,seq_len = SEQ_LENGTH)
val valdata = seq_IO.RNN_OneHot_DataIter(text = text_val,labelName = "label",vocab = vocab,batch_size = BATCH_SIZE,seq_len = SEQ_LENGTH)
val h = (0 until LSTM_N_LAYER).map(idx =>
(s"_l${idx}_init_h",Shape(BATCH_SIZE,DIM_HIDDEN))
).toMap
val c = (0 until LSTM_N_LAYER).map(idx =>
(s"_l${idx}_init_c",Shape(BATCH_SIZE,DIM_HIDDEN))
).toMap
val ctx = if (N_GPU == -1) Context.cpu() else Context.gpu(N_GPU)
val datasAndLabels = traindata.provideData ++ traindata.provideLabel ++ h ++ c
val (argShapes, outputShapes, auxShapes) = lstm.inferShape(datasAndLabels)
val initializer = new Xavier(factorType = "in", magnitude = 2.34f)
val argNames = lstm.listArguments()
val argDict = argNames.zip(argShapes.map(NDArray.zeros(_, ctx))).toMap
val auxNames = lstm.listAuxiliaryStates()
val auxDict = auxNames.zip(auxShapes.map(NDArray.zeros(_, ctx))).toMap
val gradDict = argNames.zip(argShapes).filter { case (name, shape) =>
!datasAndLabels.contains(name)
}.map(x => x._1 -> NDArray.empty(x._2, ctx) ).toMap
argDict.foreach { case (name, ndArray) =>
if (!datasAndLabels.contains(name)) {
initializer.initWeight(name, ndArray)
}
}
val data = argDict("data")
val label = argDict("label")
val executor = lstm.bind(ctx, argDict, gradDict)
val opt = new Adam(learningRate = LEARNING_RATE, wd = 0.0001f)
val paramsGrads = gradDict.toList.zipWithIndex.map { case ((name, grad), idx) =>
(idx, name, grad, opt.createState(idx, argDict(name)))
}
val evalMetric = new CustomMetric(mathTool.perplexity, "perplexity")
val batchEndCallback = new Callback.Speedometer(BATCH_SIZE, 50)
val epochEndCallback = doCheckpoint("./model/obama")
for (epoch <- 0 until N_EPOCH) {
// Training phase
val tic = System.currentTimeMillis
evalMetric.reset()
var nBatch = 0
var epochDone = false
// Iterate over training data.
traindata.reset()
while (!epochDone) {
var doReset = true
while (doReset && traindata.hasNext) {
val dataBatch = traindata.next()
data.set(dataBatch.data(0))
label.set(dataBatch.label(0))
executor.forward(isTrain = true)
executor.backward()
paramsGrads.foreach { case (idx, name, grad, optimState) =>
opt.update(idx, argDict(name), grad, optimState)
}
// evaluate at end, so out_cpu_array can lazy copy
evalMetric.update(dataBatch.label, executor.outputs)
nBatch += 1
batchEndCallback.invoke(epoch, nBatch, evalMetric)
}
if (doReset) {
traindata.reset()
}
// this epoch is done
epochDone = true
}
val (name, value) = evalMetric.get
println(s"Epoch[$epoch] Train-$name=$value")
val toc = System.currentTimeMillis
println(s"Epoch[$epoch] Time cost=${toc - tic}")
epochEndCallback.invoke(epoch, lstm, argDict, auxDict)
}
executor.dispose()
}
def trainCharRNN{
val vocab = seq_IO.build_vocabulary(INPUT_FILE_NAME, VOCAB_FILE_NAME)
val n_alphabet = vocab.size
val lstm = Lstm.LSTM(LSTM_N_LAYER, SEQ_LENGTH, DIM_HIDDEN, DIM_EMBED, n_alphabet, DROPOUT)
// lstm.listArguments().foreach {println}
// println(lstm.debug())
val source = Source.fromFile(INPUT_FILE_NAME)
val seq_input = source.mkString
val len_train = math.round(seq_input.length()*DATA_TRAIN_RATIO).toInt
val text_train = seq_input.take(len_train)
val text_val = seq_input.drop(len_train)
val traindata = seq_IO.RNN_OneHot_DataIter(text = text_train,labelName = "label",vocab = vocab,batch_size = BATCH_SIZE,seq_len = SEQ_LENGTH)
val valdata = seq_IO.RNN_OneHot_DataIter(text = text_val,labelName = "label",vocab = vocab,batch_size = BATCH_SIZE,seq_len = SEQ_LENGTH)
val h = (0 until LSTM_N_LAYER).map(idx =>{
(s"_l${idx}_init_h",Shape(BATCH_SIZE,DIM_HIDDEN))
}).toMap
val c = (0 until LSTM_N_LAYER).map(idx =>{
(s"_l${idx}_init_c",Shape(BATCH_SIZE,DIM_HIDDEN))
}).toMap
Base.INPUTSHAPE_AUXILIARY = h ++ c
// val modelBase = new FeedForward(lstm, Context.defaultCtx, numEpoch = N_EPOCH,optimizer = new SGD(learningRate = LEARNING_RATE, momentum = MOMENTUM, wd = WEIGHT_DECAY),name = "lstm")
//// modelBase.fit(traindata, traindata,new ReconsAccuracy())
// modelBase.fit(traindata,valdata,new ReshapeAccuracy())
// modelBase.saveModelParams(s"./model/charLSTM.params_${N_EPOCH}")
};
def doCheckpoint(prefix: String): EpochEndCallback = new EpochEndCallback {
override def invoke(epoch: Int, symbol: Symbol,
argParams: Map[String, NDArray],
auxStates: Map[String, NDArray]): Unit = {
Model.saveCheckpoint(prefix, epoch + 1, symbol, argParams, auxStates)
}
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/nce_loss/Toy_nce.scala
|
package thu.brainmatrix.nce_loss
import thu.brainmatrix._
import thu.brainmatrix.optimizer.SGD
import scala.collection.Set
import com.sun.org.apache.xalan.internal.xsltc.compiler.Number
/**
* @author liuxianggen
* @date 20160811
* @brief
* @return
* @example
* @note the performance is so strange!!!
*/
object Toy_nce {
def main(args:Array[String]){
training_DIY
}
def training_DIY{
val batch_size = 128
val vocab_size = 10000
val feature_size = 100
val num_label = 6
val learningRate = 8f//8f=> 95.53%
val numEpoch = 2
val dataTrain = new DataIter_nce(100000,batch_size,feature_size,vocab_size,num_label)
val dataTest = new DataIter_nce(1000,batch_size,feature_size,vocab_size,num_label)
val network = get_net(vocab_size,num_label)
val ctx = Context.cpu(0)
val datasAndLabels = dataTrain.provideData ++ dataTrain.provideLabel
val (argShapes, outputShapes, auxShapes) = network.inferShape(datasAndLabels)
val initializer = new Xavier(factorType = "in", magnitude = 2.34f)
val argNames = network.listArguments()
val argDict = argNames.zip(argShapes.map(NDArray.zeros(_, ctx))).toMap
val auxNames = network.listAuxiliaryStates()
val auxDict = auxNames.zip(auxShapes.map(NDArray.zeros(_, ctx))).toMap
//a collection that contains the ndarray of grad parameters
val gradDict = argNames.zip(argShapes).filter {
case (name, shape) =>
!datasAndLabels.contains(name)
}.map(x => x._1 -> NDArray.empty(x._2, ctx) ).toMap
argDict.foreach { case (name, ndArray) =>
if (!datasAndLabels.contains(name)) {
initializer.initWeight(name, ndArray)
}
}
val data = argDict("data")
val label = argDict("label")
val executor = network.bind(ctx, argDict, gradDict)
val opt = new SGD(learningRate = learningRate, momentum=0.9f, wd = 0.0f)
val paramsGrads = gradDict.toList.zipWithIndex.map { case ((name, grad), idx) =>
(idx, name, grad, opt.createState(idx, argDict(name)))
}
val evalMetric = new NceAccuracy()
val batchEndCallback = new Callback.Speedometer(batch_size, 50)
// val epochEndCallback = Utils.doCheckpoint(s"${incr.saveModelPath}/obama")
for (epoch <- 0 until numEpoch) {
// Training phase
val tic = System.currentTimeMillis
evalMetric.reset()
var nBatch = 0
var epochDone = false
// Iterate over training data.
dataTrain.reset()
while (!epochDone) {
var doReset = true
while (doReset && dataTrain.hasNext) {
val dataBatch = dataTrain.next()
data.set(dataBatch.data(0))
label.set(dataBatch.label(0))
executor.forward(isTrain = true)
executor.backward()
paramsGrads.foreach { case (idx, name, grad, optimState) =>
opt.update(idx, argDict(name), grad, optimState)
}
// evaluate at end, so out_cpu_array can lazy copy
evalMetric.update(dataBatch.label, executor.outputs)
nBatch += 1
batchEndCallback.invoke(epoch, nBatch, evalMetric)
dataBatch.dispose()
}
if (doReset) {
dataTrain.reset()
}
// this epoch is done
epochDone = true
}
var (name, value) = evalMetric.get
println(s"Epoch[$epoch] Train-$name=$value")
val toc = System.currentTimeMillis
println(s"Epoch[$epoch] Time cost=${toc - tic}")
//VALIDATION
evalMetric.reset()
dataTest.reset()
// TODO: make DataIter implement Iterator
while (dataTest.hasNext) {
val evalBatch = dataTest.next()
data.set(evalBatch.data(0))
label.set(evalBatch.label(0))
executor.forward(isTrain = false)
evalMetric.update(evalBatch.label, executor.outputs)
evalBatch.dispose()
}
val (name_eval, value_eval) = evalMetric.get
println(s"Epoch[$epoch] Validation-$name_eval=$value_eval")
// epochEndCallback.invoke(epoch, symbol, argDict, auxDict)
}
executor.dispose()
}
def training_model{
val batch_size = 128
val vocab_size = 1000
val feature_size = 100
val num_label = 6
val data_train = new DataIter_nce(10000,batch_size,feature_size,vocab_size,num_label)
val data_test = new DataIter_nce(1000,batch_size,feature_size,vocab_size,num_label)
val network = get_net(vocab_size,num_label)
val devs = Context.gpu(0)
val models = new FeedForward(symbol = network,ctx = devs,
numEpoch = 8,optimizer = new SGD(learningRate = 0.05f,momentum=0.9f,wd = 0.0001f),
initializer = new Xavier(factorType = "in", magnitude = 2.34f))
models.fit(trainData = data_train,evalData = data_test,evalMetric = new Accuracy(),
kvStoreType = "local",epochEndCallback = null, batchEndCallback = new Callback.Speedometer(batch_size, 50))
}
def get_net(vocab_size:Int,num_label:Int):Symbol = {
val data = Symbol.Variable("data")
val label = Symbol.Variable("label")
val label_weight = Symbol.Variable("label_weight")
val embed_weight = Symbol.Variable("embed_weight")
var pred = Symbol.FullyConnected()(Map("data" -> data, "num_hidden" -> 100))
// pred = Symbol.FullyConnected()(Map("data" -> pred, "num_hidden" -> vocab_size))
nce_loss(pred,label,label_weight,embed_weight,vocab_size,100,num_label)
}
def nce_loss(data:Symbol,label:Symbol,label_weight:Symbol,embed_weight:Symbol,vocab_size:Int,num_hidden:Int,num_label:Int) :Symbol = {
val label_embed = Symbol.Embedding("label_embed")(Map("data" -> label, "input_dim" -> vocab_size,
"weight" -> embed_weight, "output_dim" -> num_hidden))
val hidden = Symbol.Reshape()(Map("data"->data, "shape" -> s"(-1,1,$num_hidden)"))
val pred = Symbol.broadcast_mul(hidden,label_embed)
val pred1 = Symbol.Sum("sum")(Map("data"->pred,"axis"->2))
Symbol.LogisticRegressionOutput("lro")(Map("data"->pred1,"label"->label_weight))
}
}
/**
* @author liuxianggen
* @date 20150911
* @brief all the global infomation are listed in there
* @param count: the number of class
* @param count: the number of class
* @return
* @example
* @note
*/
class DataIter_nce(count:Int,batch_size:Int,feature_size:Int,vocab_size: Int,num_label:Int) extends DataIter {
/**
* author liuxianggen
* brief a generator of a feature and the label,where the feature is a vector,and the label can be learned
* return:
* data and label
*/
def mock_sample :(Array[Float],Array[Float],Array[Float]) = {
val ret = Array.fill[Float](feature_size)(0f)
var rn = Set[Int]()
while(rn.size<3){
rn = rn + scala.util.Random.nextInt(feature_size-1)
}
var s = 0
rn.foreach { x => {
ret(x)= 1.0f
s *= feature_size
s += x
}}
val label = (s % vocab_size).toFloat +: (0 until num_label-1).map(_ => scala.util.Random.nextInt(vocab_size -1).toFloat)
val label_weight = 1f +: Array.fill[Float](num_label-1)(0f)
(ret, label.toArray,label_weight)
}
private var idx = 0
override def batchSize: Int = batch_size
/**
* the index of current batch
* @return
*/
override def getIndex(): IndexedSeq[Long] = IndexedSeq[Long]()
// The name and shape of label provided by this iterator
override def provideData: Map[String, Shape] = Map("data"->Shape(batch_size,feature_size))
/**
* get the number of padding examples
* in current batch
* @return number of padding examples in current batch
*/
override def getPad(): Int = 0
// The name and shape of data provided by this iterator
override def provideLabel: Map[String, Shape] = Map("label"->Shape(batch_size,num_label),"label_weight"->Shape(batch_size,num_label))
val datas = (0 until (count/batch_size)).map(x =>{
val mock_samples = (0 until batch_size).map(i =>{
mock_sample
}).toArray
val data_arr = mock_samples.map(_._1).foldLeft(Array[Float]())(_ ++ _)
val label_arr = mock_samples.map(_._2).foldLeft(Array[Float]())(_ ++ _)
val label_weight_arr = mock_samples.map(_._3).foldLeft(Array[Float]())(_ ++ _)
val data =NDArray.array(data_arr,Shape(batch_size,feature_size))
val label = NDArray.array(label_arr,Shape(batch_size,num_label))
val label_weight = NDArray.array(label_weight_arr,Shape(batch_size,num_label))
(data,label,label_weight)
}).toArray
// println(s"DataIter_ batches:${datas.length}")
/**
* wrong template
*/
// override def next(): DataBatch = {
// val tempidx = idx
// idx += 1
// datas(tempidx)
// }
override def next(): DataBatch = {
val tempidx = idx
idx += 1
val (data,label,label_weight) = datas(tempidx)
// new DataBatch(IndexedSeq(data),IndexedSeq(label),getIndex(),getPad())//error expression
new DataBatch(IndexedSeq(data.copy()),IndexedSeq(label.copy(),label_weight.copy()),getIndex(),getPad())
}
override def reset(): Unit = {
idx = 0
}
override def hasNext: Boolean = {
if (idx < datas.length) true else false
}
/**
* get data of current batch
* @return the data of current batch
*/
override def getData(): IndexedSeq[NDArray] = IndexedSeq(datas(idx)._1)
/**
* Get label of current batch
* @return the label of current batch
*/
override def getLabel(): IndexedSeq[NDArray] = IndexedSeq(datas(idx)._2, datas(idx)._3)
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/test/java/thu/brainmatrix/suite/NDArraySuite.scala
|
package thu.brainmatrix.suite
import thu.brainmatrix.NDArray
import thu.brainmatrix.Random
import thu.brainmatrix.Shape
import thu.brainmatrix.Context
import scala.Vector
import org.scalatest.{ BeforeAndAfterAll, FunSuite }
/**
* by liuxianggen,guoshen
* 2016-8-19
* to test the operations of NDArray
*/
class NDArraySuite extends FunSuite with BeforeAndAfterAll {
/**
* 2016-12-23
*
*/
/**
* 2016-12-10
*
*/
test("NDArray.concatenate"){
val ctx = Context.cpu(0)
val nda = NDArray.ones(Shape(2,3),ctx)
// println(nda)
// println(NDArray.concatenate(nda,nda))
}
/**
* 2016-12-10
*
*/
test("NDArray.argmaxChannel"){
val ctx = Context.cpu(0)
val e_ik = Array.fill[Array[Float]](3)(Array.fill[Float](4)(0f))
var n = 0;
val arr = e_ik.map(e_i => e_i.map(eij =>{
n += 1
eij+ 2*(n%2)+ n
}))
val nda = NDArray.array(arr.flatten,Shape(4,3), ctx)
// println(nda)
// println(NDArray.argmaxChannel(nda).shape)
}
/**
* 2016-12-10
* test a ndarray operator of my own
* this function can not be use for gpu
*/
test("NDArray.array"){
val ctx = Context.cpu(0)
val e_ik = Array.fill[Array[Float]](3)(Array.fill[Float](4)(0f))
var n = 0;
val arr = e_ik.map(e_i => e_i.map(eij =>{
n += 1
eij+n
}))
// println(NDArray.array(arr.flatten,Shape(4,3), ctx))
}
/**
* 2016-12-10
* test a ndarray operator of my own
* this function can not be use for gpu
*/
test("Normalize"){
val ctx = Context.cpu(0)
val nda = NDArray.ones(Shape(2,3))*4
// println(NDArray.Normalize(nda))
}
/**
* 2016-11-29
* test a ndarray operator of my own
* this function can not be use for gpu
*/
test("Random-uniform"){
val ctx = Context.cpu(0)
import thu.brainmatrix.Random
val nda = Random.uniform(0,1, Shape(3,4), ctx, null)
// println(nda)
}
/**
* 2016-11-30
*/
test("one-hot"){
val ctx = Context.cpu(0)
val indices = NDArray.range(0,4)+1.8f
// println(indices)
val out = NDArray.zeros(Shape(4,4), ctx)
NDArray.onehotEncode(indices, out)
// println(out)
}
/**
* 2016-12-2
*/
test("one-hot-bigger"){
val ctx = Context.cpu(0)
val indices = NDArray.range(4,8)+0.5f
// println(indices)
val out = NDArray.ones(Shape(4,10), ctx)
val out1 = NDArray.ones(Shape(4,10), ctx)*9
NDArray.onehotEncode(indices, out)
// println(out * out1)
}
/**
* 2016-11-10
* test a ndarray operator of my own
* this function can not be use for gpu
*/
test("run gpu"){
val ctx = Context.cpu(0)
// val ctxg= Context.gpu(0)
var nda1 = NDArray.ones(ctx, 10,10)*2
var nda2 = NDArray.ones(ctx, 10,10)*3
var n=0
// while(n<1000){
// var j=0
// println(n)
// while(j<10000){
// var nda3 = NDArray.exp(-nda1)*NDArray.sigmod(nda1)
// var nda4 = NDArray.ones(ctx, 10,10)/nda3
// var nda5 = NDArray.ones(ctx, 10,10)/nda4
// var nda6 = NDArray.ones(ctx, 10,10)/nda5
// var nda7 = NDArray.ones(ctx, 10,10)/nda6
// var nda8 = NDArray.ones(ctx, 10,10)/nda7
// var nda9 = NDArray.ones(ctx, 10,10)/nda8
// var nda10 = NDArray.ones(ctx, 10,10)/nda9
// var nda11 = NDArray.ones(ctx, 10,10)/nda10
// var nda12 = NDArray.ones(ctx, 10,10)/nda11
// var nda13 = NDArray.ones(ctx, 10,10)/nda12
// var nda14 = NDArray.ones(ctx, 10,10)/nda13
// var nda15 = NDArray.ones(ctx, 10,10)/nda14
// var nda16 = NDArray.ones(ctx, 10,10)/nda15
// var nda17 = NDArray.ones(ctx, 10,10)/nda16
// var nda18 = NDArray.ones(ctx, 10,10)/nda17
// var nda19 = NDArray.ones(ctx, 10,10)/nda18
// var nda20 = NDArray.ones(ctx, 10,10)/nda19
//
// j += 1
// }
// n += 1
// }
// println(nda2)
}
/**
* 2016-11-10
* test a ndarray operator of my own
* this function can not be use for gpu
*/
test("copy"){
val ctx = Context.cpu(0)
val ctxg= Context.gpu(0)
val nda1 = NDArray.ones(ctx, 1,3)*2
val nda2 = NDArray.ones(ctx, 1,3)*3
var tt = nda1 *3
// tt += NDArray.ones(ctx, 1,3)*3
// nda1.copyTo(nda2.slice(1))
// println(nda1)
}
/**
* test the computational order
*/
test("arithmetic assosiation "){
val ctx = Context.cpu(0)
val nda1 = NDArray.ones(ctx, 2,3)*2
val nda2 = NDArray.ones(ctx, 2,3)*3
// println(nda1 - nda1 * nda2)
// println(nda1 -( nda1 * nda2))
// println(nda1 * nda1 - nda2)
}
/**
* 2016-11-10
* test a ndarray operator of my own
* this function can not be use for gpu
*/
test("integate_lxg"){
val ctx = Context.cpu(0)
val nda1 = NDArray.ones(ctx, 2,3)
val nda2 = NDArray.ones(ctx, 2,3)
// println(nda2)
// println(NDArray.integate_lxg(nda2,nda1))
}
/**
* 2016-11-10
* test a ndarray operator of my own
*
*/
test("setslice_lxg"){
// val ctx = Context.gpu(0)
// val nda1 = NDArray.ones(ctx, 9,14) * 10
// val nda2 = NDArray.ones(ctx, 9, 1)
// println(nda1)
// println(nda2)
// NDArray.setColumnSlice(nda1,nda2,0)
// println(nda1)
// println(nda2)
}
test("dot0"){
val arr = NDArray.ones(5, 4)
// println(arr)
val arr1 = NDArray.ones(4, 1)
val res = NDArray.dot(arr, arr1)
// println(res)
}
test("transpose"){
val arr = NDArray.range(0, 5, 4)
// println(arr)
val arrr = NDArray.transpose(arr)
// println(arrr)
}
test("reshape"){
val arr = NDArray.range(0, 5, 4)
// println(arr)
val arrr = arr.reshape(Array(5,4))
// println(arrr)
}
test("+"){
val arr = NDArray.range(0, 5, 4)
val arrr = NDArray.array(arr.toArray,Shape(2,10))
}
test("toarray"){
val arr = NDArray.zeros(Shape(2,2))
val arrr = NDArray.ones(Shape(2,1))
}
test("toString"){
val av = NDArray.ones(Shape(2,3,4))
av(1,1,1) =2
// println(av)
}
test("load2map"){
// val pretrained = NDArray.load2Map("./model/charLSTM.params_6")
// println(pretrained.head)
}
test("save NDArray"){
val nda = Map("data"->NDArray.ones(2,3))
NDArray.save("./model/test", nda)
}
test("slice"){
val ind = NDArray.ones(Shape(4,3,2))
// println(ind)
ind(1,1,1) = 3
// println(ind.slice(1).slice(0))
}
test("copyto"){
// val ctx = Context.gpu(0)
// val ind = NDArray.ones(Shape(4,3),ctx)
// val ind2 = NDArray.zeros(Shape(4,3))
// println(ind.copyTo(ctx))
}
test("argmaxChannelTest") {
val nmArr = Random.normal(0f, 1f, Shape(4, 8))
// println(nmArr)
val py = NDArray.argmaxChannel(nmArr)
// println(py)
}
def main1(args: Array[String]) {
TestSet
// TestSetloop
// TestSize
// TestListArrayFunc
// TestRange
// ndarrayOperationTest
// argmaxChannelTest
// meanTest
}
def TestSet {
val num_instance = 15
val input_dim = 10
val data = NDArray.ones(Shape(15, 10))
val label = NDArray.zeros(Shape(num_instance))
for (i <- 0 until num_instance) {
for (j <- 0 until input_dim) {
data(i, j) = i % 5 * 1.0f + (scala.util.Random.nextFloat - 0.5f)
}
label(i) = i % 5
println(label(i))
}
println(label)
}
def TestSize() {
var label = NDArray.zeros(Shape(15, 12))
println(label.size)
}
def TestSetloop {
var label = NDArray.zeros(Shape(15))
for (i <- 0 until 15) {
val temp = (i / 5).floor
println(temp)
label(i) = temp
}
println(label)
}
def TestListArrayFunc {
val lhsArr = Random.uniform(-10f, 10f, Shape(3, 4))
}
def TestRange {
// val arr = NDArray.range(0,10)
// val arr = NDArray.rangeRows(0, 10, 5)
val arr = NDArray.range(0, 10, 3)
println(arr)
}
def ndarrayOperationTest {
val lhs = NDArray.ones(Shape(3, 4))
val rhs = NDArray.ones(Shape(3, 4))
val sum = lhs + rhs
println(sum)
}
def meanTest {
val arr = Random.uniform(0, 10, Shape(4, 5))
print(NDArray.mean(arr))
}
def TestTan {
val Pi = scala.math.Pi.toFloat
val h = NDArray.tan(NDArray.array(Array(0, Pi / 4, Pi / 2, 3 * Pi / 4), Shape(1, 4)))
println(h)
}
def TestTanh {
val Pi = scala.math.Pi.toFloat
val h = NDArray.tanh(NDArray.array(Array(-1, 0, 1, 2), Shape(1, 4)))
println(h)
}
def TestTranspose {
val pre = NDArray.array(Array(1, 2, 3, 4, 5, 6), Shape(1, 6))
val after = NDArray.transpose(pre)
println(pre)
println(after)
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/test/java/thu/brainmatrix/suite/Toy_sofrmaxSuite.scala
|
<filename>scalakernel/src/test/java/thu/brainmatrix/suite/Toy_sofrmaxSuite.scala
package thu.brainmatrix.suite
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import thu.brainmatrix.nce_loss.DataIter_
import thu.brainmatrix.nce_loss.DataIter_nce
import thu.brainmatrix.Shape
class Toy_sofrmaxSuite extends FunSuite with BeforeAndAfterAll{
test("dataIter_:dispose()"){
val dataiter_ = new DataIter_(200,32,24,50)
var batch = dataiter_.next()
//println(batch.data(0))
//println(batch.label(0))
batch.dispose()
//println("------------------------------------")
dataiter_.next()
dataiter_.next()
dataiter_.next()
dataiter_.next()
var batch1 = dataiter_.next()
// println(batch1.label(0))
//println("------------------------------------")
dataiter_.reset()
batch1 = dataiter_.next()
// println(batch1.data(0))
// println(batch1.label(0))
}
test("testData"){
val dataiter_ = new DataIter_(100000,128,100,10000)
// println(dataiter_.next().label(0))
}
test("testData_nce"){
val batch_size = 128
val vocab_size = 100
val feature_size = 100
val num_label = 6
val data_train = new DataIter_nce(10000,batch_size,feature_size,vocab_size,num_label)
val batch = data_train.next()
assert(batch.label(0).shape==Shape(128,6))
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/NameManager.scala
|
<reponame>Liuxg16/BrainMatrix
package thu.brainmatrix
import scala.collection.mutable
/**
* NameManager to do automatic naming.
* User can also inherit this object to change naming behavior.
* @author <NAME>
*/
class NameManager {
val counter: mutable.Map[String, Int] = mutable.HashMap.empty[String, Int]
/**
* Get the canonical name for a symbol.
* This is default implementation.
* When user specified a name,
* the user specified name will be used.
* When user did not, we will automatically generate a name based on hint string.
*
* @param name : The name user specified.
* @param hint : A hint string, which can be used to generate name.
* @return A canonical name for the user.
*/
def get(name: Option[String], hint: String): String = {
name.getOrElse {
if (!counter.contains(hint)) {
counter(hint) = 0
}
val generatedName = s"$hint${counter(hint)}"
counter(hint) += 1
generatedName
}
}
def withScope[T](body: => T): T = {
val oldManager = NameManager.current
NameManager.setCurrentManager(this)
try {
body
} finally {
NameManager.setCurrentManager(oldManager)
}
}
}
object NameManager {
private var _current = new NameManager()
def current: NameManager = _current
private def setCurrentManager(manager: NameManager): Unit = {
_current = manager
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/Symbol.scala
|
package thu.brainmatrix
import thu.brainmatrix.Base._
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.mutable.Stack
import scala.collection.mutable.ListBuffer
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.Stack
import scala.Vector
/**
* Symbolic configuration API of brainmatrix. <br />
* <b>
* WARNING: it is your responsibility to clear this object through dispose().
* NEVER rely on the GC strategy
* </b>
* @author <NAME>
*/
// scalastyle:off finalize
class Symbol private(private[brainmatrix] val handle: SymbolHandle) {
private val logger: Logger = LoggerFactory.getLogger(classOf[Symbol])
private var disposed = false
override protected def finalize(): Unit = {
this.staticGraph.dispose()
}
//global variable for symbol graph
var heads_ : Vector[DataEntry] = Vector()
var staticGraph = new StaticGraph()
def setStaticGraph(sg:StaticGraph){
this.staticGraph = sg
}
/**
* Release the native memory.
* The object shall never be used after it is disposed.
*/
def dispose(): Unit = {
if (!disposed) {
this.staticGraph.dispose()
disposed = true
}
}
// 2015-3-6
/**
* by liuxianggen
* depth first search algorithm
*/
private[brainmatrix] def DFSVisit(fvisit: (NodeRef) => Unit): Unit = {
var res: Vector[NodeRef] = Vector()
var stack: Stack[(NodeRef, Int)] = Stack()
var visited: Set[NodeRef] = Set()
heads_.map(head => {
val ptr = head.source
if (!visited.contains(ptr)) {
stack.push((head.source,0))
visited += (ptr)
}
// stack.foreach(println)
while (!stack.isEmpty) {
var back: (NodeRef, Int) = stack.top
// println("back:"+back._1.value.name)
//find its inputs whether have visited all
if (back._2 == (back._1.value.inputs.length)) {
res = res :+ back._1
fvisit(back._1)
stack.pop
} else {
//find its inputs whether have visited all(not)
var inputs: Vector[DataEntry] = back._1.value.inputs
var input: DataEntry = inputs(back._2)
stack.update(0, (back._1,back._2+1))
// back = (back._1,back._2+1)
val ptr = input.source
//add un-visited node to stack and visited
if (!visited.contains(ptr)) {
stack.push((input.source, 0))
visited += ptr
}
}
}
})
}
def is_atomic(): Boolean = {
return heads_(0).source.value.is_atomic
}
def NumVisibleOutputs(): Int = {
1
}
def NumOutputs():Int = {
heads_.length
}
/**
* 2016-3-15
* by liuxianggen
* find each variable and link them with inputs
*/
def Compose(kwargs: Map[String, Symbol], name: String) {
// the name of this
heads_(0).source.value.name = name
var nmatched: Int = 0
// atomic symbol do not have place holder for all the arguments
if (this.is_atomic()) {
// println(heads_(0).source.value.opRef.value.opName)
val req_args: Vector[String] = heads_(0).source.value.opRef.value.ListArguments
// println(" && ")
// req_args.foreach {println}
(0 until req_args.length).map(i => {
val iter: Symbol = kwargs.getOrElse(req_args(i), null)
if (iter != null) {
// added by liuxianggen,which is none in brainmatrix c++
// iter.heads_(0).source.value.backward_source_node = heads_(0).source
heads_(0).source.value.inputs :+= iter.heads_(0)
nmatched += 1
} else {
val noderef = new NodeRef()
val node = new Node(new OperatorPropertyRef, name+"_"+req_args(i))
// added by liuxianggen,which is none in brainmatrix c++
// node.backward_source_node = heads_(0).source
noderef.value = node
heads_(0).source.value.inputs :+= new DataEntry(noderef, 0)
if (heads_(0).source.value.attr.size != 0)
heads_(0).source.value.inputs(i).source.value.attr = heads_(0).source.value.attr
}
})
if (nmatched != kwargs.size)
heads_(0).source.value.inputs = Vector()
} else {
System.err.println("should not execute in there of compose! ")
// find all the arguments positions
var (dup_args, max_dup) = this.FindDuplicateArgs
if (max_dup > 1) {
/**
* operations for kvstores
*/
}
this.DFSVisit { noderef =>
{
/**
* this part is for the in-place algorithm
* need complete
*
*/
// (0 until noderef.value.inputs.size).map(i =>{
// val e:DataEntry = noderef.value.inputs(i)
// if(e.source.value.is_variable()){
// /*
// * translate from:
// * auto iter = kwargs.find(e->source->name);
// * if (iter != kwargs.end()) {...
// */
// if(kwargs.contains(e.source.value.name)){
// var target = kwargs(e.source.value.name).heads_(0)
// }
// }
// })
}
}
}
}
/**
* by liuxianggen
* 2016-7-2
*
* for the operations:
* arithmetric
*
*/
def Compose(args: Array[Symbol],name: String) {
require(!heads_(0).source.value.is_variable(),"Variable cannot be composed!")
heads_(0).source.value.name = name
for(i <- 0 until args.length){
require(args(i).NumOutputs()==1,s"Argument $i is a tuple with one more elements,scalar is required")
}
if(this.is_atomic()){
val req_args :Vector[String]= heads_(0).source.value.opRef.value.ListArguments
// println("--------------------------")
// println(req_args)
// println("--------------------------")
require(args.length==req_args.length,"dismatch of arguments,requires:"+req_args.length+",provided:"+args.length)
heads_(0).source.value.reset_inputs()
for(i <- 0 until args.length){
heads_(0).source.value.inputs :+= args(i).heads_(0)
}
for(i<-args.length until req_args.length){
val noderef = new NodeRef()
val node = new Node(new OperatorPropertyRef, Symbol.DefaultVarName(name,req_args(i)))
// added by liuxianggen,which is none in brainmatrix c++
// node.backward_source_node = heads_(0).source
noderef.value = node
heads_(0).source.value.inputs :+= new DataEntry(noderef, 0)
if (heads_(0).source.value.attr.size != 0)
heads_(0).source.value.inputs(i).source.value.attr = heads_(0).source.value.attr
}
}
}
/**
* @author lxg
* @date 20160706
* @brief get the index-th symbol from this group which is from symbol
* @param index
* @return symbol
* @note
*/
def get(index:Int):Symbol = {
require(index<this.heads_.length,"the index overcome the length of group size!!")
val s = new Symbol((new SymbolHandleRef).value)
s.heads_ :+= this.heads_(index)
s
}
/**
* 2016-3-15
* by liuxianggen
* find the most number of duplicate arguments
*
*/
private def FindDuplicateArgs: (Map[String, Int], Int) = {
import scala.collection.mutable.Map
var out = Map[String, Int]()
var max_dup: Int = 1;
this.DFSVisit { noderef =>
{
if (noderef.value.is_variable)
if (out.contains(noderef.value.name)) {
out(noderef.value.name) += 1
max_dup = Math.max(max_dup, out(noderef.value.name))
} else
out(noderef.value.name) = 1
}
}
(out.toMap, max_dup)
}
/**
* 2016-3-14
* by liuxianggen
* the key function to convert graph from symbol graph
*/
def ToStaticGraph() {
var node_order: Vector[NodeRef] = Vector()
var node_index: Map[NodeRef, Int] = Map()
// this.staticGraph.arg_nodes = Vector()
// this.staticGraph.nodes = Vector()
this.staticGraph.reset
this.DFSVisit { noderef =>
{
var nid: Int = node_index.size
node_index += (noderef -> nid)
if (noderef.value.is_variable()) {
this.staticGraph.arg_nodes :+= nid
}
node_order :+= noderef
}
}
//setup nodes
/**
* which is different with c++, new the node first in scala
*/
(0 until node_order.size).map(nid => {
val ophandle = new OperatorPropertyRef
var node: Node = new Node(ophandle)
if (node_order(nid).value.opRef.value != null) {
node.opRef.value = node_order(nid).value.opRef.value.Copy()
this.staticGraph.nodes :+= node
} else {
this.staticGraph.nodes :+= node
}
if (node_order(nid).value.backward_source_node.value != null) {
this.staticGraph.nodes(nid).backward_source_id = node_index(node_order(nid).value.backward_source_node)
} else {
this.staticGraph.nodes(nid).backward_source_id = -1
}
if (node_order(nid).value.attr != null) {
this.staticGraph.nodes(nid).attr = node_order(nid).value.attr
}
this.staticGraph.nodes(nid).name = node_order(nid).value.name
/*
* out_graph.nodes(nid).inputs.clear
*/
this.staticGraph.nodes(nid).inputs = Vector()
(node_order(nid).value.inputs).map(src => {
var e: DataEntry = new DataEntry(new NodeRef, src.index)
e.source_id = node_index(src.source)
this.staticGraph.nodes(nid).inputs :+= e
})
})
this.staticGraph.heads = Vector()
this.heads_.foreach { head =>
{
var e: DataEntry = new DataEntry(new NodeRef, head.index)
e.source_id = node_index(head.source)
this.staticGraph.heads :+= e
}
}
}
def debug():String = {
this.ToStaticGraph()
this.staticGraph.debug
}
/**
* @author liuxianggen
* @date 20160708
* @brief given the shape of inputs,get total shape info with this special symbol graph
* the shape info include:
* inShapeData:shapes of all the args symbol,in order
* outShapeData:shapes of all the head in heads_ of symbol,when the length of head>1,means this symbol is a group
* auxShapeData:need to clearfy
* @param kwargs:map the name of inputs to it's shape, such as Map("data" -> Vector(1, 3, 4, 5))
* @return as described aboved mentioned
* @note:when this is a group, this function will find all the head, and return all the info from whatever head
* @example:
* sym.inferShape(Map("data" -> Vector(1, 3, inputSize._1, inputSize._2)))
*/
def inferShape(kwargs:Map[String,Shape]): (Seq[Shape], Seq[Shape], Seq[Shape]) = {
val inShapeData = ListBuffer.empty[Array[Int]]
val outShapeData = ListBuffer.empty[Array[Int]]
val auxShapeData = ListBuffer.empty[Array[Int]]
val complete = new RefInt
this.ToStaticGraph()
this.staticGraph.inferShape(kwargs,inShapeData, outShapeData, auxShapeData, complete)
if (complete.value != 0) {
(inShapeData.map(Shape(_)), outShapeData.map(Shape(_)), auxShapeData.map(Shape(_)))
} else {
(null, null, null)
}
}
/**
* 2016-3-23
* by liuxianggen
*/
def SetAttr(key:String,value:String){
val node:NodeRef = heads_(0).source
heads_.foreach { e => {
require(node == e.source,"error")
// if(node == e.source)
// println("True")
}
}
if(node.value.attr.size == 0){
node.value.attr = scala.collection.mutable.Map[String,String]()
}
node.value.attr(key) = value
}
// Set the attribute of the symbol.
def setAttr(attr: Map[String, String]): Unit = {
attr.foreach { case (key, value) =>
SetAttr(key,value)
}
}
def +(other: Symbol): Symbol = Symbol.createFromListedSymbols("_Plus")(Array(this, other))
def +[@specialized(Int, Float, Double) V](other: V): Symbol = {
Symbol.createFromListedSymbols("_PlusScalar")(Array(this), Map("scalar" -> other.toString))
}
def -(other: Symbol): Symbol = Symbol.createFromListedSymbols("_Minus")(Array(this, other))
def -[@specialized(Int, Float, Double) V](other: V): Symbol = {
Symbol.createFromListedSymbols("_MinusScalar")(Array(this), Map("scalar" -> other.toString))
}
def *(other: Symbol): Symbol = Symbol.createFromListedSymbols("_Mul")(Array(this, other))
def *[@specialized(Int, Float, Double) V](other: V): Symbol = {
Symbol.createFromListedSymbols("_MulScalar")(Array(this), Map("scalar" -> other.toString))
}
def /(other: Symbol): Symbol = Symbol.createFromListedSymbols("_Div")(Array(this, other))
def /[@specialized(Int, Float, Double) V](other: V): Symbol = {
Symbol.createFromListedSymbols("_DivScalar")(Array(this), Map("scalar" -> other.toString))
}
//need to change
override def clone(): Symbol = {
val clonedHandle = new SymbolHandleRef
checkCall(_LIB.mxSymbolCopy(handle, clonedHandle))
new Symbol(clonedHandle.value)
}
// def get(index: Int): Symbol = {
// val newHandle = new SymbolHandleRef
// checkCall(_LIB.mxSymbolGetOutput(handle, index, newHandle))
// new Symbol(handle = newHandle.value)
// }
def get(name: String): Symbol = {
var index: Int = -1
for ((output, i) <- listOutputs().view.zipWithIndex) {
if (output == name) {
require(index == -1, s"There are multiple outputs with name $name")
index = i
}
}
require(index >= 0, s"Cannot find output that matches name $name")
get(index)
}
/**
* Infer the type of outputs and arguments of given known types of arguments.
* Tuple of Nones is returned if there is not enough information passed in.
* An error will be raised if there is inconsistency found in the known types passed in.
* @param args Provide type of arguments in a positional way. Unknown type can be marked as null
* @return
* argTypes : list of numpy.dtype or None
* List of types of arguments.
* The order is in the same order as list_arguments()
* outTypes : list of numpy.dtype or None
* List of types of outputs.
* The order is in the same order as list_outputs()
* auxTypes : list of numpy.dtype or None
* List of types of outputs.
* The order is in the same order as list_auxiliary()
*/
def inferType(args: Class[_ >: Float with Int with Double]*)
: (Seq[Class[_ >: Float with Int with Double]],
Seq[Class[_ >: Float with Int with Double]],
Seq[Class[_ >: Float with Int with Double]]) = {
val sdata: Array[Int] = args.map(NDArray.DTYPE_NATIVE_TO_MX.getOrElse(_, -1)).toArray
inferType(null, sdata)
}
/**
* Infer the type of outputs and arguments of given known types of arguments.
* Tuple of Nones is returned if there is not enough information passed in.
* An error will be raised if there is inconsistency found in the known types passed in.
* @param kwargs Provide keyword arguments of known types.
* @return
* argTypes : list of numpy.dtype or None
* List of types of arguments.
* The order is in the same order as list_arguments()
* outTypes : list of numpy.dtype or None
* List of types of outputs.
* The order is in the same order as list_outputs()
* auxTypes : list of numpy.dtype or None
* List of types of outputs.
* The order is in the same order as list_auxiliary()
*/
def inferType(kwargs: Map[String, Class[_ >: Float with Int with Double]])
: (Seq[Class[_ >: Float with Int with Double]],
Seq[Class[_ >: Float with Int with Double]],
Seq[Class[_ >: Float with Int with Double]]) = {
val filteredArgs = kwargs.filter { case (key, value) =>
NDArray.DTYPE_NATIVE_TO_MX.contains(value)
}
val keys = filteredArgs.keys.toArray
val sdata = filteredArgs.values.map(NDArray.DTYPE_NATIVE_TO_MX(_)).toArray
inferType(keys, sdata)
}
private def inferType(keys: Array[String], values: Array[Int])
: (Seq[Class[_ >: Float with Int with Double]],
Seq[Class[_ >: Float with Int with Double]],
Seq[Class[_ >: Float with Int with Double]]) = {
val argTypeData = ListBuffer.empty[Int]
val outTypeData = ListBuffer.empty[Int]
val auxTypeData = ListBuffer.empty[Int]
val complete = new RefInt
checkCall(_LIB.mxSymbolInferType(
handle, keys, values, argTypeData, outTypeData, auxTypeData, complete))
if (complete.value != 0) {
(argTypeData.map(NDArray.DTYPE_MX_TO_NATIVE),
outTypeData.map(NDArray.DTYPE_MX_TO_NATIVE),
auxTypeData.map(NDArray.DTYPE_MX_TO_NATIVE))
} else {
(null, null, null)
}
}
/**
* Infer the shape of outputs and arguments of given known shapes of arguments.
* User can either pass in the known shapes in positional way or keyword argument way.
* Tuple of Nones is returned if there is not enough information passed in.
* An error will be raised if there is inconsistency found in the known shapes passed in.
* @param args Provide shape of arguments in a positional way.
* Unknown shape can be marked as None
* @return
* argShapes List of shapes of arguments. The order is in the same order as list_arguments()
* outShapes List of shapes of outputs. The order is in the same order as list_outputs()
* auxShapes List of shapes of outputs. The order is in the same order as list_auxiliary()
*/
// def inferShape(args: Shape*): (Seq[Shape], Seq[Shape], Seq[Shape]) = {
// val keys: Array[String] = null
// val indPtr = ArrayBuffer(0)
// val sdata = ArrayBuffer.empty[Int]
// args.foreach { shape =>
// if (shape != null) {
// sdata ++= shape.toVector
// indPtr += sdata.size
// }
// }
// inferShape(keys, indPtr.toArray, sdata.toArray)
// }
/**
* Infer the shape of outputs and arguments of given known shapes of arguments.
* User can either pass in the known shapes in positional way or keyword argument way.
* Tuple of Nones is returned if there is not enough information passed in.
* An error will be raised if there is inconsistency found in the known shapes passed in.
* @param kwargs Provide keyword arguments of known shapes.
* @return
* argShapes List of shapes of arguments. The order is in the same order as list_arguments()
* outShapes List of shapes of outputs. The order is in the same order as list_outputs()
* auxShapes List of shapes of outputs. The order is in the same order as list_auxiliary()
*/
// def inferShape(kwargs: Map[String, Shape]): (Seq[Shape], Seq[Shape], Seq[Shape]) = {
// val keys = ArrayBuffer.empty[String]
// val indPtr = ArrayBuffer(0)
// val sdata = ArrayBuffer.empty[Int]
// kwargs.foreach { case (key, shape) =>
// keys += key
// sdata ++= shape.toVector
// indPtr += sdata.size
// }
// inferShape(keys.toArray, indPtr.toArray, sdata.toArray)
// }
//
// def inferShape(keys: Array[String], indPtr: Array[Int], values: Array[Int])
// : (Seq[Shape], Seq[Shape], Seq[Shape]) = {
// val argShapeData = ListBuffer.empty[Array[Int]]
// val outShapeData = ListBuffer.empty[Array[Int]]
// val auxShapeData = ListBuffer.empty[Array[Int]]
// val complete = new RefInt
//
// checkCall(_LIB.mxSymbolInferShape(handle, indPtr.size - 1, keys, indPtr, values,
// argShapeData, outShapeData, auxShapeData, complete))
// if (complete.value != 0) {
// (argShapeData.map(s => Shape(s)),
// outShapeData.map(s => Shape(s)),
// auxShapeData.map(s => Shape(s)))
// } else {
// (null, null, null)
// }
// }
/**
* Get attribute string from the symbol, this function only works for non-grouped symbol.
* @param key The key to get attribute from.
* @return value The attribute value of the key, returns None if attribute do not exist.
*/
// def attr(key: String): Option[String] = {
// val ret = new RefString
// val success = new RefInt
// checkCall(_LIB.mxSymbolGetAttr(handle, key, ret, success))
// if (success.value != 0) {
// Option(ret.value)
// } else {
// None
// }
// }
/**
* Invoke symbol as function on inputs.
* @param name resulting symbol name
* @param symbols provide named symbols
* @return the resulting symbol
*/
def apply(name: String, symbols: Map[String, Symbol]): Symbol = {
val s = clone()
s.compose(name, symbols)
s
}
/**
* Get a debug string.
* @return Debug string of the symbol.
*/
def debugStr: String = {
val str = new RefString
checkCall(_LIB.mxSymbolPrint(handle, str))
str.value
}
// Set the attribute of the symbol.
// private def setAttr(attr: Map[String, String]): Unit = {
// attr.foreach { case (key, value) =>
// checkCall(_LIB.mxSymbolSetAttr(handle, key, value))
// }
// }
/**
* Save symbol into file.
* You can also use pickle to do the job if you only work on python.
* The advantage of load/save is the file is language agnostic.
* This means the file saved using save can be loaded by other language binding of mxnet.
* You also get the benefit being able to directly load/save from cloud storage(S3, HDFS)
*
* @param fname The name of the file
* - s3://my-bucket/path/my-s3-symbol
* - hdfs://my-bucket/path/my-hdfs-symbol
* - /path-to/my-local-symbol
* @see Symbol.load : Used to load symbol from file.
*/
def save(fname: String): Unit = {
this.ToStaticGraph()
this.staticGraph.saveToFile(fname)
}
/**
* Compose symbol on inputs.
* This call mutates the current symbol.
* @param name resulting symbol name
* @param symbols provide positional arguments
* @return the resulting symbol
*/
private def compose(name: String, symbols: Array[Symbol]): Unit = {
val args = symbols.map(_.handle)
checkCall(_LIB.mxSymbolCompose(handle, name, null, args))
}
private def compose(name: String, symbols: Map[String, Symbol]): Unit = {
val keys = symbols.keys.toArray
val args = symbols.values.map(_.handle).toArray
checkCall(_LIB.mxSymbolCompose(handle, name, keys, args))
}
/**
* Bind current symbol to get an executor, allocate all the ndarrays needed.
* Allows specifying data types.
* This function will ask user to pass in ndarray of position
* they like to bind to, and it will automatically allocate the ndarray
* for arguments and auxiliary states that user did not specify explicitly.
*
* @param ctx The device context the generated executor to run on.
* @param gradReq {'write', 'add', 'null'}, or list of str or dict of str to str, optional
* Specifies how we should update the gradient to the args_grad.
* - 'write' means everytime gradient is write to specified args_grad NDArray.
* - 'add' means everytime gradient is add to the specified NDArray.
* - 'null' means no action is taken, the gradient may not be calculated.
* @param typeDict Input type dictionary, name->dtype
* @param shapeDict Input shape dictionary, name->shape
* @return The generated Executor
*/
def simpleBind(ctx: Context, gradReq: String = "write",
shapeDict: Map[String, Shape],
typeDict: Map[String, Class[_ >: Float with Int with Double]] = null): Executor = {
val types =
if (typeDict == null) listArguments().map((_, classOf[Float])).toMap
else typeDict
val (argShapes, _, auxShapes) = inferShape(shapeDict)
// val (argTypes, _, auxTypes) = inferType(types)
// require(argShapes != null && argTypes != null, "Input node is not complete")
require(argShapes != null, "Input node is not complete")
// alloc space
val argNDArrays = (argShapes) map { case (shape) =>
// TODO: NDArray dtype
NDArray.zeros(shape, ctx)
}
val gradNDArrays =
if (gradReq != "null") {
((listArguments() zip argShapes) flatMap { case (name, shape) =>
if (!(name.endsWith("data") || name.endsWith("label"))) {
// TODO: NDArray dtype
Map(name -> NDArray.zeros(shape, ctx))
} else {
Map.empty[String, NDArray]
}
}).toMap
} else {
null
}
val auxNDArrays = (auxShapes) map { case (shape) =>
// TODO: NDArray dtype
NDArray.zeros(shape, ctx)
}
bind(ctx, argNDArrays, gradNDArrays, gradReq, auxNDArrays, null, null)
}
/**
* Bind current symbol to get an executor.
*
* @param ctx Context The device context the generated executor to run on.
* @param args Input arguments to the symbol.
* - If type is list of NDArray, the position is in the same order of list_arguments.
* - If type is dict of str to NDArray, then it maps the name of arguments
* to the corresponding NDArray.
* - In either case, all the arguments must be provided.
* @param argsGrad When specified, args_grad provide NDArrays to hold
* the result of gradient value in backward.
* - If type is list of NDArray,
* the position is in the same order of list_arguments.
* - If type is dict of str to NDArray, then it maps the name of arguments
* to the corresponding NDArray.
* - When the type is dict of str to NDArray, users only need to provide the dict
* for needed argument gradient.
* Only the specified argument gradient will be calculated.
* @param gradReq {'write', 'add', 'null'}, or list of str or dict of str to str, optional
* Specifies how we should update the gradient to the args_grad.
* - 'write' means everytime gradient is write to specified args_grad NDArray.
* - 'add' means everytime gradient is add to the specified NDArray.
* - 'null' means no action is taken, the gradient may not be calculated.
* @param auxStates Input auxiliary states to the symbol, only need to specify when
* list_auxiliary_states is not empty.
* - If type is list of NDArray,
* the position is in the same order of listAuxiliaryStates
* - If type is dict of str to NDArray, then it maps the name of auxiliary_states
* to the corresponding NDArray,
* - In either case, all the auxiliary_states need to be provided.
* @param group2ctx The dict mapping the ``ctx_group`` attribute to the context assignment.
* @param sharedExec Executor to share memory with.
* - This is intended for runtime reshaping, variable length sequences, etc.
* - The returned executor shares state with shared_exec,
* and should not be used in parallel with it.
* @return The generated Executor
* @note
* Auxiliary states are special states of symbols that do not corresponds to an argument,
* and do not have gradient. But still be useful for the specific operations.
* A common example of auxiliary state is the moving_mean and moving_variance in BatchNorm.
* Most operators do not have auxiliary states and this parameter can be safely ignored.
*
* User can give up gradient by using a dict in args_grad and only specify
* gradient they interested in.
*/
def bind(ctx: Context, args: Seq[NDArray], argsGrad: Seq[NDArray],
gradReq: String, auxStates: Seq[NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad,
Seq.fill(symbolArguments.size)(gradReq), auxStates, group2ctx, sharedExec)
}
def bind(ctx: Context, args: Seq[NDArray], argsGrad: Seq[NDArray],
gradReq: String, auxStates: Map[String, NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad,
Seq.fill(symbolArguments.size)(gradReq), auxStates, group2ctx, sharedExec)
}
def bind(ctx: Context, args: Seq[NDArray], argsGrad: Map[String, NDArray],
gradReq: String, auxStates: Seq[NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad,
Seq.fill(symbolArguments.size)(gradReq), auxStates, group2ctx, sharedExec)
}
def bind(ctx: Context, args: Seq[NDArray], argsGrad: Map[String, NDArray],
gradReq: String, auxStates: Map[String, NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad,
Seq.fill(symbolArguments.size)(gradReq), auxStates, group2ctx, sharedExec)
}
def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Seq[NDArray],
gradReq: String, auxStates: Seq[NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad,
Seq.fill(symbolArguments.size)(gradReq), auxStates, group2ctx, sharedExec)
}
def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Seq[NDArray],
gradReq: String, auxStates: Map[String, NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad,
Seq.fill(symbolArguments.size)(gradReq), auxStates, group2ctx, sharedExec)
}
def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Map[String, NDArray],
gradReq: String, auxStates: Seq[NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad,
Seq.fill(symbolArguments.size)(gradReq), auxStates, group2ctx, sharedExec)
}
def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Map[String, NDArray],
gradReq: String, auxStates: Map[String, NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad,
Seq.fill(symbolArguments.size)(gradReq), auxStates, group2ctx, sharedExec)
}
def bind(ctx: Context, args: Seq[NDArray], argsGrad: Seq[NDArray],
gradsReq: Seq[String], auxStates: Seq[NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx,
sharedExec)
}
def bind(ctx: Context, args: Seq[NDArray], argsGrad: Seq[NDArray],
gradsReq: Seq[String], auxStates: Map[String, NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx,
sharedExec)
}
def bind(ctx: Context, args: Seq[NDArray], argsGrad: Map[String, NDArray],
gradsReq: Seq[String], auxStates: Seq[NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx,
sharedExec)
}
def bind(ctx: Context, args: Seq[NDArray], argsGrad: Map[String, NDArray],
gradsReq: Seq[String], auxStates: Map[String, NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx,
sharedExec)
}
def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Seq[NDArray],
gradsReq: Seq[String], auxStates: Seq[NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx,
sharedExec)
}
def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Seq[NDArray],
gradsReq: Seq[String], auxStates: Map[String, NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx,
sharedExec)
}
def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Map[String, NDArray],
gradsReq: Seq[String], auxStates: Seq[NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx,
sharedExec)
}
def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Map[String, NDArray],
gradsReq: Seq[String], auxStates: Map[String, NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx,
sharedExec)
}
def bind(ctx: Context, args: Seq[NDArray], argsGrad: Seq[NDArray],
gradsReq: Map[String, String], auxStates: Seq[NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx,
sharedExec)
}
def bind(ctx: Context, args: Seq[NDArray], argsGrad: Seq[NDArray],
gradsReq: Map[String, String], auxStates: Map[String, NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx,
sharedExec)
}
def bind(ctx: Context, args: Seq[NDArray], argsGrad: Map[String, NDArray],
gradsReq: Map[String, String], auxStates: Seq[NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx,
sharedExec)
}
def bind(ctx: Context, args: Seq[NDArray], argsGrad: Map[String, NDArray],
gradsReq: Map[String, String], auxStates: Map[String, NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx,
sharedExec)
}
def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Seq[NDArray],
gradsReq: Map[String, String], auxStates: Seq[NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx,
sharedExec)
}
def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Seq[NDArray],
gradsReq: Map[String, String], auxStates: Map[String, NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx,
sharedExec)
}
def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Map[String, NDArray],
gradsReq: Map[String, String], auxStates: Seq[NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx,
sharedExec)
}
def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Map[String, NDArray],
gradsReq: Map[String, String], auxStates: Map[String, NDArray],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx,
sharedExec)
}
def bind(ctx: Context, args: Seq[NDArray], argsGrad: Seq[NDArray]): Executor = {
bind(ctx, args, argsGrad, "write", Nil, null, null)
}
def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Map[String, NDArray]): Executor = {
bind(ctx, args, argsGrad, "write", Nil, null, null)
}
def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Seq[NDArray]): Executor = {
bind(ctx, args, argsGrad, "write", Nil, null, null)
}
def bind(ctx: Context, args: Seq[NDArray], argsGrad: Map[String, NDArray]): Executor = {
bind(ctx, args, argsGrad, "write", Nil, null, null)
}
def bind(ctx: Context, args: Seq[NDArray]): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, null,
Seq.fill(symbolArguments.size)("write"), Nil, null, null)
}
def bind(ctx: Context, args: Map[String, NDArray]): Executor = {
val symbolArguments = listArguments()
bindHelper(ctx, symbolArguments, args, null,
Seq.fill(symbolArguments.size)("write"), Nil, null, null)
}
private def bindHelper(ctx: Context, symbolArguments: Seq[String],
args: Iterable[_], argsGrad: Iterable[_],
gradsReq: Iterable[_], auxStates: Iterable[_],
group2ctx: Map[String, Context], sharedExec: Executor): Executor = {
require(args != null && !args.isInstanceOf[Set[_]])
require(argsGrad == null || !argsGrad.isInstanceOf[Set[_]])
require(auxStates == null || !auxStates.isInstanceOf[Set[_]])
require(gradsReq != null && !gradsReq.isInstanceOf[Set[_]])
val (argsHandle, argsNDArray) =
if (args.isInstanceOf[Seq[_]]) {
Symbol.getNDArrayInputs("args", args.asInstanceOf[Seq[NDArray]],
symbolArguments, allowMissing = false)
} else {
Symbol.getNDArrayInputs("args", args.asInstanceOf[Map[String, NDArray]],
symbolArguments, allowMissing = false)
}
// setup args gradient
val (argsGradHandle, argsGradNDArray) =
if (argsGrad == null) {
(Array.fill[NDArrayHandle](args.size)(0L), null)
} else if (argsGrad.isInstanceOf[Seq[_]]) {
Symbol.getNDArrayInputs("args_grad", argsGrad.asInstanceOf[Seq[NDArray]],
symbolArguments, allowMissing = true)
} else {
Symbol.getNDArrayInputs("args_grad", argsGrad.asInstanceOf[Map[String, NDArray]],
symbolArguments, allowMissing = true)
}
val (auxArgsHandle, auxStatesNDArray) =
if (auxStates == null) {
Symbol.getNDArrayInputs("aux_states", Nil, listAuxiliaryStates(), allowMissing = false)
} else if (auxStates.isInstanceOf[Seq[_]]) {
Symbol.getNDArrayInputs("aux_states", auxStates.asInstanceOf[Seq[NDArray]],
listAuxiliaryStates(), allowMissing = false)
} else {
Symbol.getNDArrayInputs("aux_states", auxStates.asInstanceOf[Map[String, NDArray]],
listAuxiliaryStates(), allowMissing = false)
}
// setup requirements
val reqsArray =
if (gradsReq.isInstanceOf[Seq[_]]) {
gradsReq.asInstanceOf[Seq[String]].map { req =>
require(Symbol.bindReqMap.contains(req), s"grad_req must be in ${Symbol.bindReqMap}")
Symbol.bindReqMap(req)
}.toArray
} else {
val gradsReqMap = gradsReq.asInstanceOf[Map[String, String]]
symbolArguments.map { req =>
val value = gradsReqMap.getOrElse(req, "null")
require(Symbol.bindReqMap.contains(value), s"grad_req must be in ${Symbol.bindReqMap}")
Symbol.bindReqMap(value)
}.toArray
}
val ctxMapKeys = ArrayBuffer.empty[String]
val ctxMapDevTypes = ArrayBuffer.empty[Int]
val ctxMapDevIDs = ArrayBuffer.empty[Int]
if (group2ctx != null) {
group2ctx.foreach { case (key, value) =>
ctxMapKeys += key
ctxMapDevTypes += value.deviceTypeid
ctxMapDevIDs += value.deviceId
}
}
val sharedHadle = if (sharedExec != null) sharedExec.handle else 0L
// println("*********************************")
// println("args:")
// println(argsHandle.length)
// println("size:")
// argsHandle.foreach(x => {
// println(new NDArray(x).shape)
// })
//
// println("argsGrad:")
// println(auxArgsHandle.length)
//// println("size:")
//// gradNDArraysHandles.foreach(x => {
//// println(new NDArray(x).shape)
//// })
// auxArgsHandle.foreach(println)
//
// println("!!!")
// reqsArray.foreach(println)
//
// if(auxArgsHandle!=null){
// println("auxArgs:")
// println(auxArgsHandle.length)
// println("size:")
// auxArgsHandle.foreach(x => {
// println(new NDArray(x).shape)
// })
// }
this.ToStaticGraph()
this.staticGraph.ToStaticGraph
// println("---------------")
val execRef = this.staticGraph.bind(ctx.deviceTypeid,//1
ctx.deviceId,//0
ctxMapKeys.size,//0
ctxMapKeys.toArray,//null
ctxMapDevTypes.toArray,//null
ctxMapDevIDs.toArray,//null
args.size,
argsHandle,
argsGradHandle,
reqsArray,
auxArgsHandle)
// println("---------------")
// checkCall(_LIB.mxExecutorBindEX(handle,
// ctx.deviceTypeid,
// ctx.deviceId,
// ctxMapKeys.size,
// ctxMapKeys.toArray,
// ctxMapDevTypes.toArray,
// ctxMapDevIDs.toArray,
// args.size,
// argsHandle,
// argsGradHandle,
// reqsArray,
// auxArgsHandle,
// sharedHadle,
// execHandle))
//
// val executor = new Executor(execHandle.value, this.clone())//vital code!!!
//
val executor = new Executor(execRef.value, this)
executor.argArrays = argsNDArray
executor.gradArrays = argsGradNDArray
executor.auxArrays = auxStatesNDArray
executor._ctx = new Context(ctx.deviceType, ctx.deviceId)
executor._gradsReq = gradsReq
executor._group2ctx =
if (group2ctx == null) null
else group2ctx.map { case (key, value) =>
(key -> new Context(value.deviceType, value.deviceId))
}.toMap
executor
}
def easy_bind(ctx: Context = Context.defaultCtx, args: Map[String,NDArray], argsGrad: Map[String,NDArray]=null,
auxStates: Map[String,NDArray]=null,group2ctx: Map[String, Context]=null, gradReq: String = "write"): Executor = {
val (argHandle,argNDArrays) = Symbol.getNDArrayInputs("args",args,listArguments(),false)
val gradMap =
if(argsGrad==null){
((listArguments() zip argNDArrays) flatMap { case (name, argArr) =>
if (!(name.endsWith("data") || name.endsWith("label"))) {
// TODO: NDArray dtype
Map(name -> NDArray.zeros(argArr.shape, ctx))
} else {
Map.empty[String, NDArray]
}
}).toMap
}else
argsGrad
var (gradNDArraysHandles,gradNDArrays) = Symbol.getNDArrayInputs("args_grad",gradMap,listArguments(),true)
/**
* aux
*/
val (auxArgsHandle, auxStatesNDArray) =
if (auxStates == null) {
Symbol.getNDArrayInputs("aux_states", Nil, listAuxiliaryStates(), allowMissing = false)
} else if (auxStates.isInstanceOf[Seq[_]]) {
Symbol.getNDArrayInputs("aux_states", auxStates.asInstanceOf[Seq[NDArray]],
listAuxiliaryStates(), allowMissing = false)
} else {
Symbol.getNDArrayInputs("aux_states", auxStates.asInstanceOf[Map[String, NDArray]],
listAuxiliaryStates(), allowMissing = false)
}
/**
* reqArray
*
*/
var gradReqArrays = Array[String]()
if(gradReq.equals("write")){
// gradReqArrays= Symbol.getNDArrayInputsPlus("aux_states",auxStates,this.listAuxiliaryStates(),false)._3
gradReqArrays = Array.fill(gradNDArrays.length)("write")
}else
gradReqArrays = Array.fill[String](gradNDArrays.length)("null")
val reqsArray: Array[Int] = gradReqArrays.map(Symbol.bindReqMap(_))
val ctxMapKeys = ArrayBuffer.empty[String]
val ctxMapDevTypes = ArrayBuffer.empty[Int]
val ctxMapDevIDs = ArrayBuffer.empty[Int]
if (group2ctx != null) {
group2ctx.foreach { case (key, value) =>
ctxMapKeys += key
ctxMapDevTypes += value.deviceTypeid
ctxMapDevIDs += value.deviceId
}
}
// println("*********************************")
// println("args:")
// println(argHandle.length)
// println("size:")
// argHandle.foreach(x => {
// println(new NDArray(x).shape)
// })
//
// println("argsGrad:")
// println(gradNDArraysHandles.length)
//// println("size:")
//// gradNDArraysHandles.foreach(x => {
//// println(new NDArray(x).shape)
//// })
// gradNDArraysHandles.foreach(println)
//
// println("!!!")
// reqsArray.foreach(println)
//
// if(auxArgsHandle!=null){
// println("auxArgs:")
// println(auxArgsHandle.length)
// println("size:")
// auxArgsHandle.foreach(x => {
// println(new NDArray(x).shape)
// })
// }
this.ToStaticGraph()
this.staticGraph.ToStaticGraph
val execRef = this.staticGraph.bind(ctx.deviceTypeid,//1
ctx.deviceId,//0
ctxMapKeys.size,//0
ctxMapKeys.toArray,//null
ctxMapDevTypes.toArray,//null
ctxMapDevIDs.toArray,//null
argNDArrays.size,
argHandle,
gradNDArraysHandles,
reqsArray,
auxArgsHandle)
val executor = new Executor(execRef.value, this)
executor.argArrays = argNDArrays
executor.gradArrays = gradNDArrays
executor.auxArrays = auxStatesNDArray
executor
}
/**
* Save symbol into a JSON string.
* See Also
* symbol.loadJson : Used to load symbol from JSON string.
*/
def toJson: String = {
val jsonStr = new RefString
this.ToStaticGraph()
this.staticGraph.ToStaticGraph
checkCall(_LIB.mxStaticGraphSaveToJSON(this.staticGraph.handle,jsonStr))
jsonStr.value
}
/**
* list all the arguments of this symbol
*/
def listArguments():Array[String] = {
val arr = Stack[String]()
if(this.is_atomic()){
heads_(0).source.value.opRef.value.ListArguments.toArray
}else{
this.DFSVisit { x => {
if(x.value.is_variable()){
arr.push(x.value.name)
}
} }
arr.reverse.toArray
}
}
/**
* List all auxiliary states in the symbol.
* @return The names of the auxiliary states.
* @note
* Auxiliary states are special states of symbols that do not corresponds to an argument,
* and do not have gradient. But still be useful for the specific operations.
* A common example of auxiliary state is the moving_mean and moving_variance in BatchNorm.
* Most operators do not have Auxiliary states.
*/
def listAuxiliaryStates(): Seq[String] = {
val aarr = Stack[String]()
if(this.is_atomic()){
heads_(0).source.value.opRef.value.ListAuxiliaryStates()
}else{
this.DFSVisit { x => {
if(x.value.opRef.value!=null){
val aux_args = x.value.opRef.value.ListAuxiliaryStates()
if(aux_args.length>0){
val hname = x.value.name
aux_args.foreach(x => aarr.push(hname + "_" + x))
}
}
} }
}
aarr.reverse
}
/**
* @author lxg
* @date 20161230
* @brief get the all nodes
* @
* @return symbol
* @note
*
* Get a new grouped symbol whose output contains all the internal outputs of this symbol.
* @return The internal of the symbol.
*/
def getInternals():Symbol = {
val s = new Symbol((new SymbolHandleRef).value)
this.heads_.foreach { s.heads_ :+= _ }
var nout = 0
this.DFSVisit { nodeRef => {
val node = nodeRef.value
if(node.is_variable()){
nout = 1
}
else if(node.is_backward()){
nout = node.backward_source_node.value.inputs.size
}
else {
nout = node.opRef.value.NumVisibleOutputs()
}
for(i <- 0 until nout){
s.heads_ :+= new DataEntry(nodeRef, i)
}}
}
s
}
/**
* @author liuxianggen
* @date 20160708
* @brief get the name of all the symbols in the group
* @param
* @return Vector[String]:the name of all the symbols in the group
* @example
* @note
*/
def listOutputs(): Vector[String] = {
var res: Vector[String] = Vector[String]()
this.heads_.map { head =>
{
if (head.source.value.is_variable()) {
res :+= head.source.value.name
} else {
var rname: String = null
// the output of node is the corresponding input of its backward node, so,,,
if (head.source.value.is_backward()) {
rname = head.source.value.backward_source_node.value.opRef.value.ListArguments(head.index)
} else {
rname = head.source.value.opRef.value.ListOutputs(head.index)
}
val hname = head.source.value.name
if (hname.length() == 0)
res :+= rname
else
res :+= (hname + "_" + rname)
}
}
}
res
}
// def listOutputs() : Seq[String] = {
// val arr = ArrayBuffer.empty[String]
// val outputs_arr = Stack[String]()
// for(head <- heads_){
// if(head.source.value.is_variable()){
// outputs_arr.push(head.source.value.name)
// }else{
// val hname = head.source.value.name
// var rname:String =null
// if(head.source.value.is_backward()){
// rname = head.source.value.backward_source_node.value.opRef.value.ListArguments(head.index)
// }else{
// rname = head.source.value.opRef.value.ListOutputs(head.index)
// }
// if(head.source.value.name.length()==0){
// outputs_arr.push(rname)
// }else{
// outputs_arr.push(head.source.value.name + "_" +rname)
// }
//
// }
// }
//
//
// checkCall(_LIB.mxSymbolListOutputs(handle, arr))
// arr
// }
}
// scalastyle:on finalize
object Symbol {
private type SymbolCreateNamedFunc = Map[String, Any] => Symbol
private val logger = LoggerFactory.getLogger(classOf[Symbol])
private val functions: Map[String, SymbolFunction] = initSymbolModule()
private val bindReqMap = Map("null" -> 0, "write" -> 1, "add" -> 3)
// TODO: _CrossDeviceCopy
def pow(sym1: Symbol, sym2: Symbol): Symbol = {
Symbol.createFromListedSymbols("_Power")(Array(sym1, sym2))
}
def pow[@specialized(Int, Float, Double) V](sym: Symbol, number: V): Symbol = {
Symbol.createFromListedSymbols("_PowerScalar")(Array(sym), Map("scalar" -> number.toString))
}
def pow[@specialized(Int, Float, Double) V](number: V, sym: Symbol): Symbol = {
Symbol.createFromListedSymbols("_RPowerScalar")(Array(sym), Map("scalar" -> number.toString))
}
/**
* Take absolute value of the src
* @param src Source symbolic input to the function
*/
def abs(src: Symbol): Symbol = {
createFromListedSymbols("abs")(Array(src))
}
/**
* Take sign value of the src
* @param src Source symbolic input to the function
*/
def sign(src: Symbol): Symbol = {
createFromListedSymbols("sign")(Array(src))
}
/**
* Take round value of the src
* @param src Source input to the function
*/
def round(src: Symbol): Symbol = {
createFromListedSymbols("round")(Array(src))
}
/**
* Take ceil value of the src
* src Source input to the function
*/
def ceil(src: Symbol): Symbol = {
createFromListedSymbols("ceil")(Array(src))
}
/**
* Take floor value of the src
* @param src Source input to the function
*/
def floor(src: Symbol): Symbol = {
createFromListedSymbols("floor")(Array(src))
}
/**
* Take square of the src
* @param src Source symbolic input to the function
*/
def square(src: Symbol): Symbol = {
createFromListedSymbols("square")(Array(src))
}
/**
* Take sum of the src
* @param src Source symbolic input to the function
*/
def sum(src: Symbol): Symbol = {
createFromListedSymbols("sum")(Array(src))
}
/**
* Take sqrt of the src
* src Source symbolic input to the function
*/
def sqrt(src: Symbol): Symbol = {
createFromListedSymbols("sqrt")(Array(src))
}
/**
* Take rsqrt of the src
* @param src Source symbolic input to the function
*/
def rsqrt(src: Symbol): Symbol = {
createFromListedSymbols("rsqrt")(Array(src))
}
/**
* Take exp of the src
* @param src Source symbolic input to the function
*/
def exp(src: Symbol): Symbol = {
createFromListedSymbols("exp")(Array(src))
}
/**
* Take log of the src
* @param src Source symbolic input to the function
*/
def log(src: Symbol): Symbol = {
createFromListedSymbols("log")(Array(src))
}
/**
* Take cos of the src
* @param src Source symbolic input to the function
*/
def cos(src: Symbol): Symbol = {
createFromListedSymbols("cos")(Array(src))
}
/**
* Take sin of the src
* @param src Source symbolic input to the function
*/
def sin(src: Symbol): Symbol = {
createFromListedSymbols("sin")(Array(src))
}
/**
* Return transpose of the src
* @param src Source symbolic input to the function
*/
def transpose(src: Symbol): Symbol = {
createFromListedSymbols("transpose")(Array(src))
}
def max(left: Symbol, right: Symbol): Symbol = {
createFromListedSymbols("_Maximum")(Array(left, right))
}
def max[@specialized(Int, Float, Double) V](left: Symbol, right: V): Symbol = {
createFromListedSymbols("_MaximumScalar")(Array(left), Map("scalar" -> right.toString))
}
def max[@specialized(Int, Float, Double) V](left: V, right: Symbol): Symbol = {
createFromListedSymbols("_MaximumScalar")(Array(right), Map("scalar" -> left.toString))
}
def min(left: Symbol, right: Symbol): Symbol = {
createFromListedSymbols("_Minimum")(Array(left, right))
}
def min[@specialized(Int, Float, Double) V](left: Symbol, right: V): Symbol = {
createFromListedSymbols("_MinimumScalar")(Array(left), Map("scalar" -> right.toString))
}
def min[@specialized(Int, Float, Double) V](left: V, right: Symbol): Symbol = {
createFromListedSymbols("_MinimumScalar")(Array(right), Map("scalar" -> left.toString))
}
def Dot(lhs:Symbol,rhs:Symbol,hiddenSize:Int):Symbol = {
Symbol.FullyConnected("Dot")(Map("data"->lhs,"weight"->Symbol.transpose(rhs),"num_hidden" -> hiddenSize,"no_bias"->true))
}
/**
lhs add rhs with broadcast
Parameters
----------
lhs : Symbol
Left symbolic input to the function
rhs : Symbol
Right symbolic input to the function
*/
def broadcast_plus(left: Symbol, right: Symbol): Symbol = {
createFromListedSymbols("broadcast_plus")(Array(left, right))
}
/**
lhs minus rhs with broadcast
Parameters
----------
lhs : Symbol
Left symbolic input to the function
rhs : Symbol
Right symbolic input to the function
*/
def broadcast_minus(left: Symbol, right: Symbol): Symbol = {
createFromListedSymbols("broadcast_minus")(Array(left, right))
}
/**
lhs multiple rhs with broadcast
Parameters
----------
lhs : Symbol
Left symbolic input to the function
rhs : Symbol
Right symbolic input to the function
*/
def broadcast_mul(left: Symbol, right: Symbol): Symbol = {
createFromListedSymbols("broadcast_mul")(Array(left, right))
}
/**
lhs divide rhs with broadcast
Parameters
----------
lhs : Symbol
Left symbolic input to the function
rhs : Symbol
Right symbolic input to the function
*/
def broadcast_div(left: Symbol, right: Symbol): Symbol = {
createFromListedSymbols("broadcast_div")(Array(left, right))
}
/**
lhs power rhs with broadcast
Parameters
----------
lhs : Symbol
Left symbolic input to the function
rhs : Symbol
Right symbolic input to the function
*/
def broadcast_power(left: Symbol, right: Symbol): Symbol = {
createFromListedSymbols("broadcast_div")(Array(left, right))
}
/**
Take sum of the src in the given axis and returns a NDArray. Follows numpy semantics.
Parameters
----------
src : Symbol
Left symbolic input to the function
axis : Shape(tuple), optional, default=()
Same as Numpy. The axes to perform the reduction.If left empty, a global reduction will be performed.
keepdims : boolean, optional, default=False
Same as Numpy. If keepdims is set to true, the axis which is reduced is left in the result as dimension with size one.
@example:
val sum = Symbol.Sum("sum")(Map("data"->lhs,"axis"->2))
if lhs.shape = (10,3,4)
no axis => (1)
axis = 0 => (3,4)
axis = 1 => (10,4)
axis = 2 => (10,3)
axis = 3 => error:src/operator/././broadcast_reduce_op_common.h:26: Check failed: param_axis[i] < max_ndim axes must be within the range, ndim of the source=3axis=(3,)
*/
def Sum(name: String = null, attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("sum", name, attr)
}
/**
* 2016-2-29
* there are two tasks:
* 1. create symbolBase
* 2. initialize the op
*/
def Create(operator: String,kwargs:Map[String,String] = null): Symbol = {
val opref = new OperatorPropertyRef
val op = OperatorProperty(operator)
// if (kwargs != null){
// val paramkeys = (kwargs - "name").keys.toArray
// val paramvals = (kwargs - "name").values.toArray
// op.Init(paramkeys, paramvals)
// }else
// System.err.println(s"the Symbol: $operator has no type to set, may be wrong")
//
if (kwargs == null){
System.err.println(s"the Symbol: $operator has no type to set, may be wrong")
}
val paramkeys = (kwargs - "name").keys.toArray
val paramvals = (kwargs - "name").values.toArray
op.Init(paramkeys, paramvals)
opref.value = op
//
// if(op.value == null){
// System.err.println("error:op is not be initialized!")
// null
// }
val node = new Node(opref, "")
val nret: Int = op.NumVisibleOutputs()
val sb: Symbol = new Symbol((new SymbolHandleRef).value)
val noderef = new NodeRef()
noderef.value = node
(0 until nret).map(i => {
sb.heads_ :+= new DataEntry(noderef, i)
})
sb
}
def Variable(name: String): Symbol = {
val sb: Symbol = new Symbol((new SymbolHandleRef).value)
val opref = new OperatorPropertyRef
val node = new Node(opref, name)
val noderef = new NodeRef()
noderef.value = node
sb.heads_ :+= new DataEntry(noderef, 0);
sb
}
def CreateVariable(name: String): Symbol = {
val sb: Symbol = new Symbol((new SymbolHandleRef).value)
val opref = new OperatorPropertyRef
val node = new Node(opref, name)
val noderef = new NodeRef()
noderef.value = node
sb.heads_ :+= new DataEntry(noderef, 0);
sb
}
/**
*
*
*
*/
def Group(symbols:Symbol*):Symbol = {
val ret = new Symbol((new SymbolHandleRef).value)
symbols.foreach { s => ret.heads_ = ret.heads_ ++ s.heads_ }
ret
}
/**
* by liuxianggen
* 2016-3-9
*/
def CreateAtomicSymbol_mx(opName: String):OperatorProperty = {
val op = OperatorProperty(opName)
op
}
/**
* 2016-3-15
*/
private def DefaultVarName(op_name: String, arg_name: String): String = {
if (op_name.size == 0)
arg_name
else
op_name + "_" + arg_name
}
/**
* Get output from a symbol and pass 0 gradient back
*
* Parameters
* ----------
* data : Symbol. Input data.
*/
def BlockGrad(name: String = null, attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("BlockGrad", name, attr)
}
/**
* Crop the 2th and 3th dim of input data, with the corresponding size of w_h or with width
* and height of the second input symbol
*
* Parameters
* ----------
* num_args : int, required.
* Number of inputs for crop,
* if equals one, then we will use the h_w for crop height and width,
* else if equals two,
* then we will use the height and width of the second input symbol,
* we name crop_like here
* offset : Shape(tuple), optional, default=(0, 0), corp offset coordinate: (y, x)
* h_w : Shape(tuple), optional, default=(0, 0), corp height and weight: (h, w)
* center_crop : boolean, optional, default=False.
* If set to true, then it will use be the center_crop,
* or it will crop using the shape of crop_like
*/
def Crop(name: String = null, attr: Map[String, String] = null)(
inputs: Array[Symbol], params: Map[String, Any] = null): Symbol = {
createFromListedSymbolsNoCheck("Crop", name, attr)(inputs, params)
}
/**
* Apply dropout to input
*
* Parameters
* ----------
* data : Symbol. Input data to dropout.
* p : float, optional, default=0.5. Fraction of the input that gets dropped out at training time
*/
def Dropout(name: String = null, attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("Dropout", name, attr)
}
/**
* Apply a sparse regularization to the output a sigmoid activation function.
*
* Parameters
* ----------
* data : Symbol. Input data.
* sparseness_target : float, optional, default=0.1. The sparseness target
* penalty : float, optional, default=0.001. The tradeoff parameter for the sparseness penalty
* momentum : float, optional, default=0.9. The momentum for running average
*/
def IdentityAttachKLSparseReg(name: String = null,
attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("IdentityAttachKLSparseReg", name, attr)
}
/**
* Apply activation function to input.
*
* Parameters
* ----------
* data : Symbol. Input data to activation function.
* act_type : {'elu', 'leaky', 'prelu', 'rrelu'},optional, default='leaky'
* Activation function to be applied.
* slope : float, optional, default=0.25. Init slope for the activation. (For leaky and elu only)
* lower_bound : float, optional, default=0.125. Lower bound of random slope. (For rrelu only)
* upper_bound : float, optional, default=0.334. Upper bound of random slope. (For rrelu only)
*/
def LeakyReLU(name: String = null, attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("LeakyReLU", name, attr)
}
/**
* Apply convolution to input then add a bias.
*
* Parameters
* ----------
* data : Symbol. Input data to the ConvolutionOp.
* alpha : float, optional, default=0.0001,
* value of the alpha variance scaling parameter in the normalization formula
* beta : float, optional, default=0.75,
* value of the beta power parameter in the normalization formula
* knorm : float, optional, default=2, value of the k parameter in normalization formula
* nsize : int (non-negative), required, normalization window width in elements.
*/
def LRN(name: String = null, attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("LRN", name, attr)
}
/**
* Use mean absolute error regression for final output, this is used on final output of a net.
*
* Parameters
* ----------
* data : Symbol. Input data to function.
* label : Symbol. Input label to function.
* grad_scale : float, optional, default=1. Scale the gradient by a float factor
*/
def MAERegressionOutput(name: String = null,
attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("MAERegressionOutput", name, attr)
}
/**
* Reshape input to target shape
*
* Parameters
* ----------
* data : Symbol. Input data to reshape.
* target_shape : Shape(tuple), required. Target new shape. One and only one dim can be 0,
* in which case it will be infered from the rest of dims
* note
* ---------
* (neg_idx) < (0) One and only one dim can be inferenced,such as -1
* * example
* val inputs = Symbol.Reshape()(Map("data" -> label, "shape" -> "(-1,-1,6)"))
* if the shape of lhs and rhs are both (10,3,2)
* dim = -1 => auto set this dimension
dim = 0 => delete this dimension
dim = 1 => set 1
dim = 2 => set 2
*
*/
def Reshape(name: String = null, attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("Reshape", name, attr)
}
/**
* Slice channel into many outputs with equally divided channel
*
* Parameters
* ----------
* num_outputs : int, required. Number of outputs to be sliced.
*
* note:
* when error come with that:new and old shape do not match total elements,please add "axis"
* data3_slice = mx.symbol.SliceChannel(data = data_sym3, num_outputs=5, axis=0)
*/
def SliceChannel(name: String = null, attr: Map[String, String] = null)(
inputs: Array[Symbol], params: Map[String, Any] = null): Symbol = {
createFromListedSymbolsNoCheck("SliceChannel", name, attr)(inputs, params)
}
/**
* Apply softmax activation to input.
* This is intended for internal layers. For output (loss layer) please use SoftmaxOutput.
* If type=instance,
* this operator will compute a softmax for each instance in the batch; this is the default mode.
* If type=channel,
* this operator will compute a num_channel-class softmax at each position of each instance;
* this can be used for fully convolutional network, image segmentation, etc.
*
* Parameters
* ----------
* data : Symbol. Input data to activation function.
* type : {'channel', 'instance'},optional, default='instance'. Softmax Mode.
* If set to instance,
* this operator will compute a softmax for each instance in the batch;
* this is the default mode.
* If set to channel,
* this operator will compute a num_channel-class softmax
* at each position of each instance;
* this can be used for fully convolutional network, image segmentation, etc.
*/
def SoftmaxActivation(name: String = null,
attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("SoftmaxActivation", name, attr)
}
/**
* Apply matrix multiplication to input then add a bias.
*
* Parameters
* ----------
* data : Symbol. Input data to the FullyConnectedOp.
* weight : Symbol. Weight matrix.
* bias : Symbol. Bias parameter.
* num_hidden : int, required. Number of hidden nodes of the output.
* no_bias : boolean, optional, default=False. Whether to disable bias parameter.
*/
def FullyConnected(name: String = null,
attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("FullyConnected", name, attr)
}
/**
* Apply activation function to input.
* Softmax Activation is only available with CUDNN on GPUand will be computed
* at each location across channel if input is 4D.
*
* Parameters
* ----------
* data : Symbol. Input data to activation function.
* act_type : {'relu', 'sigmoid', 'softrelu', 'tanh'}, required.
* Activation function to be applied.
*/
def Activation(name: String = null, attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("Activation", name, attr)
}
/**
* Apply convolution to input then add a bias.
*
* Parameters
* ----------
* data : Symbol. Input data to the ConvolutionOp.
* weight : Symbol. Weight matrix.
* bias : Symbol. Bias parameter.
* kernel : Shape(tuple), required. Convolution kernel size: (y, x)
* stride : Shape(tuple), optional, default=(1, 1). Convolution stride: (y, x)
* dilate : Shape(tuple), optional, default=(1, 1). Convolution dilate: (y, x)
* pad : Shape(tuple), optional, default=(0, 0). Pad for convolution: (y, x)
* num_filter : int (non-negative), required. Convolution filter(channel) number
* num_group : int (non-negative), optional, default=1
* Number of groups partition.
* This option is not supported by CuDNN,
* you can use SliceChannel to num_group,
* apply convolution and concat instead to achieve the same need.
* workspace : long (non-negative), optional, default=512. Tmp workspace for convolution (MB).
* no_bias : boolean, optional, default=False. Whether to disable bias parameter.
*
*
*/
def Convolution(name: String = null, attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("Convolution", name, attr)
}
/**
* Apply deconvolution to input then add a bias.
*
* Parameters
* ----------
* data : Symbol. Input data to the DeconvolutionOp.
* weight : Symbol. Weight matrix.
* bias : Symbol. Bias parameter.
* kernel : Shape(tuple), required, deconvolution kernel size: (y, x)
* stride : Shape(tuple), optional, default=(1, 1), deconvolution stride: (y, x)
* pad : Shape(tuple), optional, default=(0, 0), pad for deconvolution: (y, x)
* num_filter : int (non-negative), required, deconvolution filter(channel) number
* num_group : int (non-negative), optional, default=1, number of groups partition
* workspace : long (non-negative), optional, default=512. Tmp workspace for deconvolution (MB)
* no_bias : boolean, optional, default=True. Whether to disable bias parameter.
*/
def Deconvolution(name: String = null,
attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("Deconvolution", name, attr)
}
/**
* Perform spatial pooling on inputs.
*
* Parameters
* ----------
* data : Symbol. Input data to the pooling operator.
* kernel : Shape(tuple), required, pooling kernel size: (y, x)
* pool_type : {'avg', 'max', 'sum'}, required. Pooling type to be applied.
* stride : Shape(tuple), optional, default=(1, 1), stride for pooling (y, x)
* pad : Shape(tuple), optional, default=(0, 0), pad for pooling: (y, x)
*/
def Pooling(name: String = null, attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("Pooling", name, attr)
}
/**
* Flatten input
* Parameters
* ----------
* data : Symbol. Input data to flatten.
*
* example: if input(batchSize,a,b,c)
* output: (batchSize,a*b*c)
*
*/
def Flatten(name: String = null, attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("Flatten", name, attr)
}
/**
* Perform a softmax transformation on input, backprop with logloss.
*
* Parameters
* ----------
* data : Symbol. Input data to softmax.
* label : Symbol. Label data.
* grad_scale : float, optional, default=1. Scale the gradient by a float factor
* ignore_label : float, optional, default=-1.
* the ignore_label will not work in backward,
* and this onlybe used when multi_output=true
* multi_output : boolean, optional, default=False.
* If set to true, for a (n,k,x_1,..,x_n) dimensionalinput tensor,
* softmax will generate n*x_1*...*x_n output, eachhas k classes
* use_ignore : boolean, optional, default=False.
* If set to true,
* the ignore_label value will not contributorto the backward gradient
*/
def SoftmaxOutput(name: String = null,
attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("SoftmaxOutput", name, attr)
}
/**
* Cast array to a different data type.
* Parameters
* ----------
* data : Symbol, Input data to cast function.
* dtype : {Int, Double, Short, Float}, required, Target data type.
*/
def Cast(name: String = null, attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("Cast", name, attr)
}
/**
* Perform an elementwise sum over all the inputs.
*
* Parameters
* ----------
* num_args : int, required. Number of inputs to be sum.
*/
def ElementWiseSum(name: String = null,
attr: Map[String, String] = null)(
symbols: Array[Symbol], params: Map[String, Any] = null): Symbol = {
createFromListedSymbolsNoCheck("ElementWiseSum", name, attr)(symbols, params)
}
/**
* Apply batch normalization to input.
*
* Parameters
* ----------
* data : Symbol, Input data to batch normalization
* eps : float, optional, default=0.001, Epsilon to prevent div 0
* momentum : float, optional, default=0.9, Momentum for moving average
* fix_gamma : boolean, optional, default=True, Fix gamma while training
*/
def BatchNorm(name: String = null, attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("BatchNorm", name, attr)
}
/**
* Perform nearest neighbor/bilinear up sampling to inputs
*
* Parameters
* ----------
* data : Symbol[]. Array of tensors to upsample
* scale : int (non-negative), required. Up sampling scale
* num_filter : int (non-negative), optional, default=0.
* Input filter. Only used by nearest sample_type.
* sample_type : {'bilinear', 'nearest'}, required, upsampling method
* multi_input_mode : {'concat', 'sum'},optional, default='concat'
* How to handle multiple input.
* concat means concatenate upsampled images along the channel dimension.
* sum means add all images together,
* only available for nearest neighbor upsampling.
* num_args : int, required. Number of inputs to be upsampled.
* For nearest neighbor upsampling, this can be 1-N;
* the size of output will be(scale*h_0,scale*w_0)
* and all other inputs will be upsampled to thesame size.
* For bilinear upsampling this must be 2; 1 input and 1 weight.
*/
def UpSampling(name: String = null, attr: Map[String, String] = null)(
inputs: Array[Symbol], params: Map[String, Any] = null): Symbol = {
createFromListedSymbolsNoCheck("UpSampling", name, attr)(inputs, params)
}
/**
* Perform an feature concat on channel dim (dim 1) over all the inputs.
*
* Parameters
* ----------
* data : Symbol[]. List of tensors to concatenate
* num_args : int, required. Number of inputs to be concated.
* dim : int, optional, default='1'. the dimension to be concated.
*
* example
* val concat0=Symbol.Concat("concat0")(Array(lhs,rhs),Map("dim"->0))
* if the shape of lhs and rhs are both (10,3,2)
dim = 0 => (20,3,2)
dim = 1 => (10,6,2)
dim = 2 => (10,3,4)
*
*
*/
def Concat(name: String = null, attr: Map[String, String] = null)(
inputs: Array[Symbol], params: Map[String, Any] = null): Symbol = {
createFromListedSymbolsNoCheck("Concat", name, attr)(inputs, params)
}
/**
* Use Logistic regression for final output, this is used on final output of a net.
* Logistic regression is suitable for binary classification or probability prediction tasks.
* Parameters
* ----------
* data : Symbol. Input data to function.
* label : Symbol. Input label to function.
* grad_scale : float, optional, default=1. Scale the gradient by a float factor
*/
def LogisticRegressionOutput(name: String = null,
attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("LogisticRegressionOutput", name, attr)
}
/**
* Use linear regression for final output, this is used on final output of a net.
* Parameters
* ----------
* data : Symbol. Input data to function.
* label : Symbol. Input label to function.
* grad_scale : float, optional, default=1. Scale the gradient by a float factor
*
* note:
* E = \frac{1}{2N}*\sum_{i,j}(x_{i,j}-label_{i,j})
*
*/
def LinearRegressionOutput(name: String = null,
attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("LinearRegressionOutput", name, attr)
}
/**
* Apply swapaxis to input.
*
* Parameters
* ----------
* data : Symbol. Input data to the SwapAxisOp.
* dim1 : int (non-negative), default=0, the first axis to be swapped.
* dim2 : int (non-negative), default=0, the second axis to be swapped.
*/
def SwapAxis(name: String = null, attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("SwapAxis", name, attr)
}
/**
* Get embedding for one-hot input
*
* Parameters
* ----------
* data : Symbol, Input data to the EmbeddingOp.
* weight : Symbol, Embedding weight matrix.
* input_dim : int, input dim of one-hot encoding
* output_dim : int, output dim of embedding
*/
def Embedding(name: String = null, attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("Embedding", name, attr)
}
/**
* Perform Smooth L1 on inputs.
*
* Parameters
* ----------
* data : Symbol. Input data to the smooth_l1 operator.
* scalar : Float, required.
*/
def SmoothL1(name: String = null, attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("smooth_l1", name, attr)
}
/**
* Special layer for propagating loss
*
* Parameters
* ----------
* data : Symbol, Input data to the MakeLossOp.
* grad_scale : float, optional, default=1.
* Gradient scale as a supplement to unary and binary operators
*/
def MakeLoss(name: String = null, attr: Map[String, String] = null): SymbolCreateNamedFunc = {
createFromNamedSymbolsNoCheck("MakeLoss", name, attr)
}
/**
* by liuxianggen
* 20160825
* there are to steps:
* 1.softmax
* 2.sum{log(p{label(i)})}
* Calculate cross_entropy(lhs, one_hot(rhs))
Parameters
----------
lhs : Symbol
Left symbolic input to the function
rhs : Symbol
Right symbolic input to the function
*/
def Softmax_cross_entropy(left: Symbol, right: Symbol): Symbol = {
createFromListedSymbols("softmax_cross_entropy")(Array(left, right))
}
/**
* Create a symbol that groups symbols together.
* @param symbols List of symbols to be grouped.
* @return The created group symbol.
*/
// def Group(symbols: Symbol*): Symbol = {
// val ihandles = symbols.map(_.handle).toArray
// val handle = new SymbolHandleRef
// checkCall(_LIB.mxSymbolCreateGroup(ihandles, handle))
// new Symbol(handle.value)
// }
// List and add all the atomic symbol functions to current module.
private def initSymbolModule(): Map[String, SymbolFunction] = {
val symbolList = ListBuffer.empty[SymbolHandle]
checkCall(_LIB.mxSymbolListAtomicSymbolCreators(symbolList))
symbolList.map(makeAtomicSymbolFunction).toMap
}
// Create an atomic symbol function by handle and function name.
private def makeAtomicSymbolFunction(handle: SymbolHandle): (String, SymbolFunction) = {
val name = new RefString
val desc = new RefString
val keyVarNumArgs = new RefString
val numArgs = new MXUintRef
val argNames = ListBuffer.empty[String]
val argTypes = ListBuffer.empty[String]
val argDescs = ListBuffer.empty[String]
checkCall(_LIB.mxSymbolGetAtomicSymbolInfo(
handle, name, desc, numArgs, argNames, argTypes, argDescs, keyVarNumArgs))
val paramStr = ctypes2docstring(argNames, argTypes, argDescs)
val docStr = s"${name.value}\n${desc.value}\n\n$paramStr\n"
// println("Atomic Symbol function defination:\n{}", docStr)
(name.value, new SymbolFunction(handle, keyVarNumArgs.value))
}
/**
* Activation Operator of Neural Net.
* The parameters listed below can be passed in as keyword arguments.
* @param symbols Symbol parameters passed to create the resulting symbol
* @param paramKwargs Key-value parameters passed to create the resulting symbol
* @param attr Attributes set to the resulting symbol
* @return the resulting symbol
*/
def createFromListedSymbols(
operator: String, name: String = null, attr: Map[String, String] = null)(
symbols: Array[Symbol], paramKwargs: Map[String, String] = null): Symbol = {
val function = functions(operator)
require(function != null, s"invalid operator name $operator")
val params = if (paramKwargs == null) Map.empty[String, String] else paramKwargs
//the group of operational functions contains the special operator =>1
val addkeyVarNumArgs = (function.keyVarNumArgs != null
&& !function.keyVarNumArgs.isEmpty
&& !params.contains(function.keyVarNumArgs))
val params1: scala.collection.mutable.Map[String, String] = (
if (addkeyVarNumArgs) scala.collection.mutable.Map[String,String](function.keyVarNumArgs->symbols.length.toString)
else scala.collection.mutable.Map[String,String]()
) ++ params
val s = Create(operator,params1.toMap)
val attrAll = AttrScope.current.get(Option(attr))
s.setAttr(attrAll)
val hint = operator.toLowerCase
val managedName = NameManager.current.get(Option(name), hint)
s.Compose(symbols,managedName)
s
}
/**
* Activation Operator of Neural Net.
* The parameters listed below can be passed in as keyword arguments.
* @param symbols Named symbol parameters passed to create the resulting symbol
* @param paramKwargs Key-value parameters passed to create the resulting symbol
* @param attr Attributes set to the resulting symbol
* @return the resulting symbol
*/
def createFromNamedSymbols(
operator: String, name: String = null, attr: Map[String, String] = null)(
symbols: Map[String, Symbol], paramKwargs: Map[String, String] = null): Symbol = {
val function = functions(operator)
require(function != null, s"invalid operator name $operator")
//check the keyVarNumArgs, if not null, get wrong
require(function.keyVarNumArgs == null || function.keyVarNumArgs.isEmpty,
"This function support variable length of Symbol arguments.\n" +
"Please pass all the input Symbols via positional arguments instead of keyword arguments.")
val params = if (paramKwargs == null) Map.empty[String, String] else paramKwargs
val s = Create(operator,params)
val attrAll = AttrScope.current.get(Option(attr))
s.setAttr(attrAll)
val hint = operator.toLowerCase
val managedName = NameManager.current.get(Option(name), hint)
s.Compose(symbols,managedName)
s
}
// a more friendly interface for creating symbols
// all values except symbols in kwargs will be cast to String using its toString() method
def createFromNamedSymbolsNoCheck(
operator: String, name: String = null, attr: Map[String, String] = null)(
kwargs: Map[String, Any]): Symbol = {
val symbolArgs = kwargs.filter { case (key, value) =>
value.isInstanceOf[Symbol]
}.map { case (key, value) =>
(key, value.asInstanceOf[Symbol])
}
val strArgs = kwargs.filter { case (key, value) =>
!value.isInstanceOf[Symbol]
}.map { case (key, value) =>
(key, value.toString)
}
createFromNamedSymbols(operator, name, attr)(symbolArgs, strArgs)
}
// a more friendly interface for creating symbols
// all values except symbols in kwargs will be cast to String using its toString() method
def createFromListedSymbolsNoCheck(
operator: String, name: String = null, attr: Map[String, String] = null)(
symbols: Array[Symbol], kwargs: Map[String, Any] = null): Symbol = {
val args =
if (kwargs == null) null
else kwargs.map { case (key, value) => (key, value.toString) }
createFromListedSymbols(operator, name, attr)(symbols, args)
}
/**
* Helper function to get ndarray lists handles from various inputs.
* @param argKey The name of argument, used for error message.
* @param args list of NDArray or dict of str to NDArray
* Input arguments to the symbols.
* If type is list of NDArray, the position is in the same order of arg_names.
* If type is dict of str to NDArray, then it maps the name of arguments
* to the corresponding NDArray
* @param argNames List of argument names.
* @param allowMissing Whether missing argument is allowed.
* When allowed, the missing handle will be set to None(null)
* @return The positional list of NDArrayHandles generated from input.
*/
private def getNDArrayInputs(argKey: String, args: Seq[NDArray], argNames: Seq[String],
allowMissing: Boolean): (Array[NDArrayHandle], Array[NDArray]) = {
require(args.length == argNames.length, s"Length of $argKey do not match number of arguments")
val argHandles = args.map(_.handle)
(argHandles.toArray, args.toArray)
}
private def getNDArrayInputs(argKey: String, args: Map[String, NDArray], argNames: Seq[String],
allowMissing: Boolean): (Array[NDArrayHandle], Array[NDArray]) = {
val argArrays = ArrayBuffer.empty[NDArray]
val argHandles = ArrayBuffer.empty[NDArrayHandle]
argNames.foreach { name =>
args.get(name) match {
case narr: Some[NDArray] =>
argArrays += narr.get
argHandles += narr.get.handle
case None =>
require(allowMissing, s"Must specify all the arguments in $argKey")
argArrays += null
argHandles += 0L
}
}
(argHandles.toArray, argArrays.toArray)
}
/**
* Load symbol from a JSON file.
*
* You can also use pickle to do the job if you only work on python.
* The advantage of load/save is the file is language agnostic.
* This means the file saved using save can be loaded by other language binding of brainmatrix.
* You also get the benefit being able to directly load/save from cloud storage(S3, HDFS)
*
* @param fname The name of the file, examples:
* - `s3://my-bucket/path/my-s3-symbol`
* - `hdfs://my-bucket/path/my-hdfs-symbol`
* - `/path-to/my-local-symbol`
* @return The loaded symbol.
* @see Symbol.save : Used to save symbol into file.
*/
def load(fname: String): Symbol = {
val handle = new SymbolHandleRef
checkCall(_LIB.mxSymbolCreateFromFile(fname, handle))
new Symbol(handle.value)
}
/**
* Load symbol from json string.
* @param json A json string.
* @return The loaded symbol.
* @see Symbol.tojson : Used to save symbol into json string.
*/
def loadJson(json: String): Symbol = {
val handle = new SymbolHandleRef
checkCall(_LIB.mxSymbolCreateFromJSON(json, handle))
new Symbol(handle.value)
}
/**
* author: yangxiaoer
* 2017-2-10
*
*/
def loadSymFormFile(fname:String): Symbol = {
val handleRef = new StaticGraphHandleRef
checkCall(_LIB.mxScalaSymbolCreateFromFile(fname, handleRef))
val sg = new StaticGraph()
sg.handle = handleRef.value
val symHandle = new SymbolHandleRef
val s =new Symbol(symHandle.value)
s.staticGraph = sg
s
}
}
private case class SymbolFunction(handle: SymbolHandle, keyVarNumArgs: String)
object SymbolConversions {
implicit def int2Scalar(x: Int): SymbolConversions[Int] = new SymbolConversions(x)
implicit def double2Scalar(x: Double): SymbolConversions[Double] = new SymbolConversions(x)
implicit def float2Scalar(x: Float): SymbolConversions[Float] = new SymbolConversions(x)
}
class SymbolConversions[@specialized(Int, Float, Double) V](val value: V) {
def +(other: Symbol): Symbol = {
other + value
}
def -(other: Symbol): Symbol = {
Symbol.createFromListedSymbols("_RMinusScalar")(
Array(other), Map("scalar" -> value.toString))
}
def *(other: Symbol): Symbol = {
other + value
}
def /(other: Symbol): Symbol = {
Symbol.createFromListedSymbols("_RDivScalar")(
Array(other), Map("scalar" -> value.toString))
}
}
class NodeRef { var value: Node = null }
class DataEntryRef { var value: DataEntry = null }
class OperatorPropertyRef { var value: OperatorProperty = null }
class MapRef { var value: Map[String, String] = null }
class Node(val opRef: OperatorPropertyRef, var name: String = null) {
// brief Operator of this node
//var op:OperatorProperty
// brief name of the node
//var name:String
// brief inputs to this node
/**
* as a struct, initialization is very important
*/
var inputs: Vector[DataEntry] = Vector[DataEntry]()
var backward_source_node: NodeRef = new NodeRef()
var attr: scala.collection.mutable.Map[String,String] = scala.collection.mutable.Map()
var backward_source_id: Int = -1
def is_atomic: Boolean = {
return (inputs.length == 0 && opRef.value != null)
}
def is_variable(): Boolean = {
// println(this.name)
// println("1"+opRef.value)
// println("2"+backward_source_node.value)
return (opRef.value == null && this.backward_source_node.value == null)
}
def is_backward(): Boolean = {
//if there is backward node
return (backward_source_node.value != null)
}
def reset_inputs(){
this.inputs = Vector[DataEntry]()
}
}
class DataEntry(var source: NodeRef, var index: Int) {
var source_id: Int = -1
//brief the source of the node of this data
// val source:NodeRef
//brief index of output from the source
// val index:Int
def Info:String = {
var s = "\tDataEntry:"+index
if(source.value != null)
s += "\n node name:" + source.value.name
if(this.source_id != -1)
s += "\n source_id:" + this.source_id
s
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/cnn/TestTraining_gpu.scala
|
<reponame>Liuxg16/BrainMatrix
//import thu.brainmatrix.optimizer.SGD
package thu.brainmatrix.cnn
import scala.collection.mutable.ListBuffer
import thu.brainmatrix.Context
import thu.brainmatrix.NDArray
import thu.brainmatrix.optimizer.SGD
import thu.brainmatrix.IO
import thu.brainmatrix.Context.ctx2Array
import thu.brainmatrix.Symbol
import thu.brainmatrix.FeedForward
/**
* by liuxiangen
* 2016-4-5
*/
object TestTraining_gpu {
def main(args:Array[String]){
/**
* for validation
*/
// val lrs = Array(0.00000001,0.0000001,0.000001,0.00001,0.0001,0.001,0.01,0.02,0.03,
// 0.04,0.05,0.06,0.07,0.08,0.09,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,10,100)//0.1
// val momentums = Array(0.01,0.5,0.1,0.5,0.7,0.8,0.83,0.86,0.88,0.9,0.93,0.96,0.99,1)//0.9
// val wds = Array(1e-6,1e-5,1e-4,1e-3,1e-2,1e-1)
// Array.range(0, 26).map(i => {
// (0 to 13).map(j =>{
// (0 to 5).map(k =>
// train_lenet(lrs(i).toFloat,momentums(j).toFloat,wds(k).toFloat,1)
// )
// })
//
// })
// train_lenet(0.1f,0.9f,0.0001f,1)
Training_mlp
}
def train_lenet(lr:Float,mom:Float,wdd:Float,epochs:Int){
println("----------------validation--------------------")
println("lr: " + lr +"mom: " + mom +"wdd: " + wdd )
val batchSize = 100
val data = Symbol.CreateVariable("data")
val conv1 = Symbol.Convolution()(Map("data" -> data, "name" -> "conv1",
"num_filter" -> 20, "kernel" -> (5, 5)/*, "stride" -> (2, 2)*/))
val act1 = Symbol.Activation()(Map("data" -> conv1, "name" -> "tanh1", "act_type" -> "tanh"))
val mp1 = Symbol.Pooling()(Map("data" -> act1, "name" -> "mp1",
"kernel" -> (2, 2), "stride" -> (2, 2), "pool_type" -> "max"))
//second conv
val conv2 = Symbol.Convolution()(Map("data" -> mp1, "name" -> "conv2", "num_filter" -> 50,
"kernel" -> (5, 5), "stride" -> (2, 2)))
val act2 = Symbol.Activation()(Map("data" -> conv2, "name" -> "tanh2", "act_type" -> "tanh"))
val mp2 = Symbol.Pooling()(Map("data" -> act2, "name" -> "mp2",
"kernel" -> (2, 2), "stride" -> (2, 2), "pool_type" -> "max"))
//first fullc
val fl = Symbol.Flatten()(Map("data" -> mp2, "name" -> "flatten"))
val fc1 = Symbol.FullyConnected()(Map("data" -> fl, "name" -> "fc1", "num_hidden" -> 500))
val act3 = Symbol.Activation()(Map("data" -> fc1, "name" -> "tanh3", "act_type" -> "tanh"))
//second fullc
val fc2 = Symbol.FullyConnected()(Map("data" -> act3, "name" -> "fc2", "num_hidden" -> 10))
//loss
val softmax = Symbol.SoftmaxOutput()(Map("data" -> fc2, "name" -> "sm"))
val numEpoch = epochs
val modelBase = new FeedForward(softmax,Context.gpu(), numEpoch = numEpoch,
optimizer = new SGD(learningRate = lr, momentum = mom, wd = wdd))
val trainDataIter = IO.MNISTIter(scala.collection.immutable.Map(
"image" -> "data/train-images-idx3-ubyte",
"label" -> "data/train-labels-idx1-ubyte",
"data_shape" -> "(1, 28, 28)",
"label_name" -> "sm_label",
"batch_size" -> batchSize.toString,
"shuffle" -> "1",
"flat" -> "0",
"silent" -> "0",
"seed" -> "10"))
val valDataIter = IO.MNISTIter(scala.collection.immutable.Map(
"image" -> "data/t10k-images-idx3-ubyte",
"label" -> "data/t10k-labels-idx1-ubyte",
"data_shape" -> "(1, 28, 28)",
"label_name" -> "sm_label",
"batch_size" -> batchSize.toString,
"shuffle" -> "1",
"flat" -> "0", "silent" -> "0"))
modelBase.fit(trainData = trainDataIter,evalData = valDataIter)
println("Finish fit ...")
val probArrays = modelBase.predict(data = valDataIter)
val prob = probArrays(0)
println("Finish predict ...")
valDataIter.reset()
val labels = ListBuffer.empty[NDArray]
var evalData = valDataIter.next()
while (evalData != null) {
labels += evalData.label(0).copy()
evalData = valDataIter.next()
}
val y = NDArray.concatenate(labels)
val py = NDArray.argmaxChannel(prob)
var numCorrect = 0
var numInst = 0
for ((labelElem, predElem) <- y.toArray zip py.toArray) {
if (labelElem == predElem) {
numCorrect += 1
}
numInst += 1
}
val acc = numCorrect.toFloat / numInst
println("Final accuracy = ")
println(acc)
}
// def Alex_mnist{
// val batchSize = 100
// val input_data = mx.symbol.Variable(name="data")
//// stage 1
// val conv1 = mx.symbol.Convolution(data=input_data, kernel=(11, 11), stride=(4, 4), num_filter=96)
// val relu1 = mx.symbol.Activation(data=conv1, act_type="relu")
// val pool1 = mx.symbol.Pooling(data=relu1, pool_type="max", kernel=(3, 3), stride=(2,2))
// val lrn1 = mx.symbol.LRN(data=pool1, alpha=0.0001, beta=0.75, knorm=1, nsize=5)
//// # stage 2
// val conv2 = mx.symbol.Convolution(data=lrn1, kernel=(5, 5), pad=(2, 2), num_filter=256)
// val relu2 = mx.symbol.Activation(data=conv2, act_type="relu")
// val pool2 = mx.symbol.Pooling(data=relu2, kernel=(3, 3), stride=(2, 2), pool_type="max")
// val lrn2 = mx.symbol.LRN(data=pool2, alpha=0.0001, beta=0.75, knorm=1, nsize=5)
//// # stage 3
// val conv3 = mx.symbol.Convolution(data=lrn2, kernel=(3, 3), pad=(1, 1), num_filter=384)
// val relu3 = mx.symbol.Activation(data=conv3, act_type="relu")
// val conv4 = mx.symbol.Convolution(data=relu3, kernel=(3, 3), pad=(1, 1), num_filter=384)
// val relu4 = mx.symbol.Activation(data=conv4, act_type="relu")
// val conv5 = mx.symbol.Convolution(data=relu4, kernel=(3, 3), pad=(1, 1), num_filter=256)
// val relu5 = mx.symbol.Activation(data=conv5, act_type="relu")
// val pool3 = mx.symbol.Pooling(data=relu5, kernel=(3, 3), stride=(2, 2), pool_type="max")
//// # stage 4
// val flatten = mx.symbol.Flatten(data=pool3)
// val fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=4096)
// val relu6 = mx.symbol.Activation(data=fc1, act_type="relu")
// val dropout1 = mx.symbol.Dropout(data=relu6, p=0.5)
//// # stage 5
// val fc2 = mx.symbol.FullyConnected(data=dropout1, num_hidden=4096)
// val relu7 = mx.symbol.Activation(data=fc2, act_type="relu")
// val dropout2 = mx.symbol.Dropout(data=relu7, p=0.5)
//// # stage 6
// val fc3 = mx.symbol.FullyConnected(data=dropout2, num_hidden=num_classes)
// val softmax = mx.symbol.SoftmaxOutput(data=fc3, name="softmax“)
// }
//
def Training_mlp{
val batchSize = 100
val data = Symbol.CreateVariable("data")
// val flatten = Symbol.Flatten(Map("data" -> data, "name" -> "flatten"))
val fc1 = Symbol.FullyConnected()(Map("data" -> data, "name" -> "fc1", "num_hidden" -> 128))
val act1 = Symbol.Activation()(Map("data" -> fc1, "name" -> "relu1", "act_type" -> "relu"))
val fc2 = Symbol.FullyConnected()(Map("data" -> act1, "name" -> "fc2", "num_hidden" -> 64))
val act2 = Symbol.Activation()(Map("data" -> fc2, "name" -> "relu2", "act_type" -> "relu"))
val fc3 = Symbol.FullyConnected()(Map("data" -> act2, "name" -> "fc3", "num_hidden" -> 10))
val sm = Symbol.SoftmaxOutput("sm")(Map("data" -> fc3))
val numEpoch = 50
val model = new FeedForward(sm, Context.gpu(), numEpoch = numEpoch,
optimizer = new SGD(learningRate = 0.1f, momentum = 0.9f, wd = 0.0001f))
// get data
// "./scripts/get_mnist_data.sh" !
val trainDataIter = IO.MNISTIter(scala.collection.immutable.Map(
"image" -> "data/train-images-idx3-ubyte",
"label" -> "data/train-labels-idx1-ubyte",
"data_shape" -> "(784)",
"label_name" -> "sm_label",
"batch_size" -> batchSize.toString,
"shuffle" -> "1",
"flat" -> "1",
"silent" -> "0",
"seed" -> "10"))
println(trainDataIter.provideLabel)
val valDataIter = IO.MNISTIter(scala.collection.immutable.Map(
"image" -> "data/t10k-images-idx3-ubyte",
"label" -> "data/t10k-labels-idx1-ubyte",
"data_shape" -> "(784)",
"label_name" -> "sm_label",
"batch_size" -> batchSize.toString,
"shuffle" -> "1",
"flat" -> "1", "silent" -> "0"))
model.fit(trainDataIter, valDataIter)
println("Finish fit ...")
val probArrays = model.predict(valDataIter)
val prob = probArrays(0)
println("Finish predict ...")
valDataIter.reset()
val labels = ListBuffer.empty[NDArray]
while (valDataIter.hasNext) {
var evalData = valDataIter.next()
labels += evalData.label(0).copy()
}
val y = NDArray.concatenate(labels)
val py = NDArray.argmaxChannel(prob)
var numCorrect = 0
var numInst = 0
for ((labelElem, predElem) <- y.toArray zip py.toArray) {
if (labelElem == predElem) {
numCorrect += 1
}
numInst += 1
}
val acc = numCorrect.toFloat / numInst
println("Final accuracy = ")
println(acc)
}
def testCNN{
val batchSize = 100
val data = Symbol.CreateVariable("data")
val conv1 = Symbol.Convolution()(Map("data" -> data, "name" -> "conv1",
"num_filter" -> 32, "kernel" -> (3, 3), "stride" -> (2, 2)))
val bn1 = Symbol.BatchNorm()(Map("data" -> conv1, "name" -> "bn1"))
val act1 = Symbol.Activation()(Map("data" -> bn1, "name" -> "relu1", "act_type" -> "relu"))
val mp1 = Symbol.Pooling()(Map("data" -> act1, "name" -> "mp1",
"kernel" -> (2, 2), "stride" -> (2, 2), "pool_type" -> "max"))
val conv2 = Symbol.Convolution()(Map("data" -> mp1, "name" -> "conv2", "num_filter" -> 32,
"kernel" -> (3, 3), "stride" -> (2, 2)))
val bn2 = Symbol.BatchNorm()(Map("data" -> conv2, "name" -> "bn2"))
val act2 = Symbol.Activation()(Map("data" -> bn2, "name" -> "relu2", "act_type" -> "relu"))
val mp2 = Symbol.Pooling()(Map("data" -> act2, "name" -> "mp2",
"kernel" -> (2, 2), "stride" -> (2, 2), "pool_type" -> "max"))
val fl = Symbol.Flatten()(Map("data" -> mp2, "name" -> "flatten"))
val fc2 = Symbol.FullyConnected()(Map("data" -> fl, "name" -> "fc2", "num_hidden" -> 10))
val softmax = Symbol.SoftmaxOutput()(Map("data" -> fc2, "name" -> "sm"))
val numEpoch = 1
val modelBase = new FeedForward(softmax, Context.cpu(), numEpoch = numEpoch,
optimizer = new SGD(learningRate = 0.1f, momentum = 0.9f, wd = 0.0001f))
val trainDataIter = IO.MNISTIter(scala.collection.immutable.Map(
"image" -> "data/train-images-idx3-ubyte",
"label" -> "data/train-labels-idx1-ubyte",
"data_shape" -> "(1, 28, 28)",
"label_name" -> "sm_label",
"batch_size" -> batchSize.toString,
"shuffle" -> "1",
"flat" -> "0",
"silent" -> "0",
"seed" -> "10"))
val valDataIter = IO.MNISTIter(scala.collection.immutable.Map(
"image" -> "data/t10k-images-idx3-ubyte",
"label" -> "data/t10k-labels-idx1-ubyte",
"data_shape" -> "(1, 28, 28)",
"label_name" -> "sm_label",
"batch_size" -> batchSize.toString,
"shuffle" -> "1",
"flat" -> "0", "silent" -> "0"))
modelBase.fit(trainData = trainDataIter,evalData = valDataIter)
// println("Finish fit ...")
//
// val probArrays = modelBase.predict(data = valDataIter)
//
// val prob = probArrays(0)
// println("Finish predict ...")
//
// valDataIter.reset()
// val labels = ListBuffer.empty[NDArray]
// var evalData = valDataIter.next()
// while (evalData != null) {
// labels += evalData.label(0).copy()
// evalData = valDataIter.next()
// }
// val y = NDArray.concatenate(labels)
//
// val py = NDArray.argmaxChannel(prob)
//
// var numCorrect = 0
// var numInst = 0
// for ((labelElem, predElem) <- y.toArray zip py.toArray) {
// if (labelElem == predElem) {
// numCorrect += 1
// }
// numInst += 1
// }
// val acc = numCorrect.toFloat / numInst
// println("Final accuracy = ")
// println(acc)
}
def testCNN1{
// symbol net
val batchSize = 100
val data = Symbol.CreateVariable("data")
val conv1 = Symbol.Convolution()(Map("data" -> data, "name" -> "conv1",
"num_filter" -> 32, "kernel" -> (3, 3), "stride" -> (2, 2)))
val bn1 = Symbol.BatchNorm()(Map("data" -> conv1, "name" -> "bn1"))
val act1 = Symbol.Activation()(Map("data" -> bn1, "name" -> "relu1", "act_type" -> "relu"))
val mp1 = Symbol.Pooling()(Map("data" -> act1, "name" -> "mp1",
"kernel" -> (2, 2), "stride" -> (2, 2), "pool_type" -> "max"))
val conv2 = Symbol.Convolution()(Map("data" -> mp1, "name" -> "conv2", "num_filter" -> 32,
"kernel" -> (3, 3), "stride" -> (2, 2)))
val bn2 = Symbol.BatchNorm()(Map("data" -> conv2, "name" -> "bn2"))
val act2 = Symbol.Activation()(Map("data" -> bn2, "name" -> "relu2", "act_type" -> "relu"))
val mp2 = Symbol.Pooling()(Map("data" -> act2, "name" -> "mp2",
"kernel" -> (2, 2), "stride" -> (2, 2), "pool_type" -> "max"))
val fl = Symbol.Flatten()(Map("data" -> mp2, "name" -> "flatten"))
val fc2 = Symbol.FullyConnected()(Map("data" -> fl, "name" -> "fc2", "num_hidden" -> 10))
val softmax = Symbol.SoftmaxOutput()(Map("data" -> fc2, "name" -> "sm"))
// val (a,b,c) = softmax.inferShape(Map("data"->Vector(32,1,48,48)))
// a.foreach(println)
// println("------------------------------------------------------------")
// b.foreach {println}
//------------------------------------------------------
//Vector(100, 1, 48, 48)
//Vector(32, 1, 3, 3)
//Vector(32)
//Vector(32)
//Vector(32)
//Vector(32, 32, 3, 3)
//Vector(32)
//Vector(32)
//Vector(32)
//Vector(10, 288)
//Vector(10)
//Vector(100)
//------------------------------------------------------------
//Vector(100, 10)
val numEpoch = 2
val modelBase = new FeedForward(softmax, Context.cpu(), numEpoch = numEpoch,
optimizer = new SGD(learningRate = 0.1f, momentum = 0.9f, wd = 0.0001f))
val trainDataIter = IO.MNISTIter(scala.collection.immutable.Map(
"image" -> "data/train-images-idx3-ubyte",
"label" -> "data/train-labels-idx1-ubyte",
"data_shape" -> "(1, 28, 28)",
"label_name" -> "sm_label",
"batch_size" -> batchSize.toString,
"shuffle" -> "1",
"flat" -> "0",
"silent" -> "0",
"seed" -> "10"))
val valDataIter = IO.MNISTIter(scala.collection.immutable.Map(
"image" -> "data/t10k-images-idx3-ubyte",
"label" -> "data/t10k-labels-idx1-ubyte",
"data_shape" -> "(1, 28, 28)",
"label_name" -> "sm_label",
"batch_size" -> batchSize.toString,
"shuffle" -> "1",
"flat" -> "0", "silent" -> "0"))
modelBase.fit(trainData = trainDataIter,evalData = valDataIter)
println("Finish fit ...")
// val prob = probArrays(0)
// println("Finish predict ...")
//
// valDataIter.reset()
// val labels = ListBuffer.empty[NDArray]
// var evalData = valDataIter.next()
// while (evalData != null) {
// labels += evalData.label(0).copy()
// evalData = valDataIter.next()
// }
// val y = NDArray.concatenate(labels)
//
// val py = NDArray.argmaxChannel(prob)
//
//// println(y.shape)
//// println(py.shape)
//
// var numCorrect = 0
// var numInst = 0
// for ((labelElem, predElem) <- y.toArray zip py.toArray) {
// if (labelElem == predElem) {
// numCorrect += 1
// }
// numInst += 1
// }
// val acc = numCorrect.toFloat / numInst
// println("Final accuracy = ")
// println(acc)
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/Callback.scala
|
<gh_stars>0
package thu.brainmatrix
import org.slf4j.{Logger, LoggerFactory}
/**
* Callback functions that can be used to track various status during epoch.
* @author <NAME>
*/
object Callback {
class Speedometer(val batchSize: Int, val frequent: Int = 50) extends BatchEndCallback {
private val logger: Logger = LoggerFactory.getLogger(classOf[Speedometer])
private var init = false
private var tic: Long = 0L
private var lastCount: Int = 0
override def invoke(epoch: Int, count: Int, evalMetric: EvalMetric): Unit = {
if (lastCount > count) {
init = false
}
lastCount = count
if (init) {
if (count % frequent == 0) {
val speed = frequent.toDouble * batchSize / (System.currentTimeMillis - tic) * 1000
if (evalMetric != null) {
val (name, value) = evalMetric.get
println("Epoch[%d] Batch [%d]\tSpeed: %.2f samples/sec\tTrain-%s=%f".format(
epoch, count, speed, name, value))
} else {
println("Iter[%d] Batch [%d]\tSpeed: %.2f samples/sec".format(epoch, count, speed))
}
tic = System.currentTimeMillis
}
} else {
init = true
tic = System.currentTimeMillis
}
}
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/util/mathTool.scala
|
package thu.brainmatrix.util
import thu.brainmatrix.NDArray
import thu.brainmatrix.Shape
import scala.util.control.Breaks
//import org.opencv.core.Core;
//import org.opencv.core.CvType;
//import org.opencv.core.Mat;
//import org.opencv.core.MatOfDouble
//import org.opencv.highgui.Highgui;
object mathTool {
// Evaluation
def perplexity(label: NDArray, pred: NDArray): Float = {
val shape = label.shape
val size = shape(0) * shape(1)
val labelT = {
val tmp = label.toArray.grouped(shape(1)).toArray
val result = Array.fill[Float](size)(0f)
var idx = 0
for (i <- 0 until shape(1)) {
for (j <- 0 until shape(0)) {
result(idx) = tmp(j)(i)
idx += 1
}
}
result
}
var loss = 0f
val predArray = pred.toArray.grouped(pred.shape(1)).toArray
for (i <- 0 until pred.shape(0)) {
loss += -Math.log(Math.max(1e-10, predArray(i)(labelT(i).toInt)).toFloat).toFloat
}
loss / size
}
def output_accuracy(pred: NDArray, target: NDArray): Float = {
val num_instance = pred.shape(0)
val eps = 1e-6
var right = 0
for (i <- 0 until num_instance) {
var mx_p = pred(i, 0)
var p_y: Float = 0
for(j <- 0 until 5){
if(pred(i,j) > mx_p){
mx_p = pred(i,j)
p_y = j
}
}
if(scala.math.abs(p_y - target(i)) < eps) right += 1
}
right * 1.0f / num_instance
}
/**
* @author guoshen
* @date 2016/7/21
* @brief
* 通过加权的方式进行概率抽样,主要思路如下:
* 假设,概率分布为pro[0.2,0.3,0.5]
* 那么计算一个概率和数组sum[0.2,0.5,1.0]
* 然后随机生成一个[0,1]之间的数x,将x与sum里面的数依次比较
* 选择第一个比x大的sum,不妨设sum[i]>=x
* 返回sum[i]的index -> i
* @source http://blog.csdn.net/blueyyc/article/details/51538885
*/
def SampleByPro1D(pro: NDArray): Int = {
var require_flag = true
pro.shape.toVector match {
case Vector(x,y) => if(x==1 ||y==1) require_flag = true
case Vector(x) => require_flag = true
case _ => require_flag = false
}
if(!require_flag)
throw new Exception("the parameter wrong!!")
val proArr = pro.toArray
// require(pro.shape.length==1 || pro.shape)
var sum: Array[Float] = NDArray.zeros(pro.shape).toArray
var temp_sum: Float = 0
for (i <- 0 until proArr.size) {
temp_sum += proArr(i)
sum(i) = temp_sum
}
var rand = Math.random().toFloat
var res = 0
val loop = new Breaks
loop.breakable {
for (i <- 0 until sum.length) {
if (rand <= sum(i)) { res = i; loop.break() }
}
}
res
}
/**
* @author guoshen
* @date 2016/7/21
* @brief
* 通过加权的方式进行概率抽样,主要思路如下:
* 假设,概率分布为pro[0.2,0.3,0.5]
* 那么计算一个概率和数组sum[0.2,0.5,1.0]
* 然后随机生成一个[0,1]之间的数x,将x与sum里面的数依次比较
* 选择第一个比x大的sum,不妨设sum[i]>=x
* 返回sum[i]的index -> i
* @source http://blog.csdn.net/blueyyc/article/details/51538885
*/
def SampleByPro2D(pro: NDArray): Array[Int] = {
var require_flag = true
var (rows,cols) = (0,0)
pro.shape.toVector match {
case Vector(x,y) =>{
require_flag = true
rows = x
cols = y
}
case _ => require_flag = false
}
if(!require_flag)
throw new Exception("the parameter wrong!!")
val sample_arr = for(i <- 0 until rows) yield{
val proi = pro.slice(i)
SampleByPro1D(proi)
}
require(sample_arr.length==rows,s"required:$rows, found:${sample_arr.length}")
sample_arr.toArray
}
// /**
// * arr: an arrary of one dimension
// * return the mat with the shape:rows x cols
// *
// */
// def ArrayToMat(arr:Array[Float],rows:Int,cols:Int):Mat={
// System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
// val m:Mat = Mat.eye(cols, rows, CvType.CV_8UC1);
// for(i<-0 until cols;j<-0 until rows){
// m.put(i,j,arr(j+i*rows))
// }
// m
// }
//
// /**
// * arr: an arrary of one dimension
// * return the mat with the shape:rows x cols
// *
// */
// def NDArrayToMat(nda:NDArray):Mat={
// if(nda.shape.length!=2)
// throw new java.lang.UnsupportedOperationException("This function only surport two dimension NDArray");
// val arr= nda.toArray
// ArrayToMat(arr,nda.shape(0),nda.shape(1))
// }
//
// def showNDArray(nda:NDArray,name:String){
// val mat = NDArrayToMat(nda)
// Highgui.imwrite(name+".png", mat);
// }
//
/**
* Author: Liuxianggen
* data:2016-11-10
*
*
*/
def times[T](arr:Array[T]){
???
}
def main(args:Array[String]){
// val mat = ArrayToMat(Array(230,230,230,5,6,230),3,2)
// val mat = NDArrayToMat(NDArray.ones(3,5)*240)
// Highgui.imwrite("image.png", mat);
// println("mat:"+mat.dump())
}
}
|
Liuxg16/BrainMatrix
|
scala-package/core/src/main/scala/ml/dmlc/mxnet/Executor.scala
|
<gh_stars>1-10
package ml.dmlc.mxnet
import ml.dmlc.mxnet.Base._
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.mutable.ArrayBuffer
object Executor {
// Get the dictionary given name and ndarray pairs.
private[mxnet] def getDict(names: Seq[String],
ndarrays: Seq[NDArray]): Map[String, NDArray] = {
require(names.toSet.size == names.length, "Duplicate names detected")
(names zip ndarrays).toMap
}
/**
* Get input slice from the input shape.
* @param batchSize The number of samples in a mini-batch.
* @param workLoadList The list of work load for different devices, in the same order as ctx
* @return The split slices to get a specific slice.
* @throws IllegalArgumentException
* If there are two many splits such that some slice can be empty.
*/
private[mxnet] def splitInputSlice(batchSize: Int,
workLoadList: Seq[Float]): Array[(Int, Int)] = {
val totalWorkLoad = workLoadList.sum
val batchNumList = workLoadList.map(workLoad =>
math.round(workLoad * batchSize / totalWorkLoad)).toArray
val batchNumSum = batchNumList.sum
if (batchNumSum < batchSize) {
batchNumList(batchNumList.length-1) += batchSize - batchNumSum
}
val slices = ArrayBuffer.empty[(Int, Int)]
var end = 0
batchNumList.foreach(batchNum => {
val begin = math.min(end, batchSize)
end = math.min(begin + batchNum, batchSize)
require(begin < end, "Too many slices such that some splits are empty")
slices.append((begin, end))
})
slices.toArray
}
/**
* Check the argument names of symbol.
* This function checks the duplication of arguments in Symbol.
* The check is done for feedforward net for now.
* @param symbol The network configuration
*/
private[mxnet] def checkArguments(symbol: Symbol): Unit = {
val argNames = symbol.listArguments()
require(argNames.toSet.size == argNames.length,
"Find duplicated argument name," +
"please make the weight name non-duplicated(using name arguments)," +
s"arguments are $argNames")
val auxNames = symbol.listAuxiliaryStates()
require(auxNames.toSet.size == auxNames.length,
"Find duplicated auxiliary param name," +
"please make the weight name non-duplicated(using name arguments)," +
s"arguments are $auxNames")
}
// Load a list of arrays into a list of arrays
private[mxnet] def loadGeneral(data: Seq[NDArray], targets: Seq[NDArray]): Unit = {
(data zip targets).foreach { case (dSrc, dTarget) =>
dSrc.copyTo(dTarget)
}
}
// Load a list of arrays into a list of arrays specified by slices
private[mxnet] def loadGeneralMulti(data: Seq[NDArray],
targets: Seq[Array[(Int, Int, NDArray)]]): Unit = {
for ((src, dTargets) <- data zip targets) {
for ((start, end, dst) <- dTargets) {
val sliced = src.slice(start, end)
sliced.copyTo(dst)
sliced.dispose()
}
}
}
// Load data into sliced arrays
private[mxnet] def loadDataMulti(batch: DataBatch,
targets: Seq[Array[(Int, Int, NDArray)]]): Unit = {
loadGeneralMulti(batch.data, targets)
}
private[mxnet] def loadData(batch: DataBatch, targets: Seq[NDArray]): Unit = {
loadGeneral(batch.data, targets)
}
// Load label into sliced arrays
private[mxnet] def loadLabelMulti(batch: DataBatch,
targets: Seq[Array[(Int, Int, NDArray)]]): Unit = {
loadGeneralMulti(batch.label, targets)
}
private[mxnet] def loadLabel(batch: DataBatch, targets: Seq[NDArray]): Unit = {
loadGeneral(batch.label, targets)
}
}
/**
* Symbolic Executor component of MXNet <br />
* <b>
* WARNING: it is your responsibility to clear this object through dispose().
* NEVER rely on the GC strategy
* </b>
*
* @author <NAME>
*
* Constructor: please use Symbol.bind and Symbol.simpleBind instead.
* @param handle ExecutorHandle generated by calling Bind
* @param symbol
* @see Symbol.bind : to create executor
*/
// scalastyle:off finalize
class Executor private[mxnet](private[mxnet] val handle: ExecutorHandle,
private[mxnet] val symbol: Symbol) {
private[mxnet] var argArrays: Array[NDArray] = null
private[mxnet] var gradArrays: Array[NDArray] = null
private[mxnet] var auxArrays: Array[NDArray] = null
val outputs: Array[NDArray] = getOutputs
protected var _argDict: Map[String, NDArray] = null
protected var _auxDict: Map[String, NDArray] = null
protected var monitorCallback: MXMonitorCallback = null
private[mxnet] var _ctx: Context = null
private[mxnet] var _gradsReq: Iterable[_] = null
private[mxnet] var _group2ctx: Map[String, Context] = null
private var disposed = false
override protected def finalize(): Unit = {
dispose()
}
def dispose(): Unit = {
if (!disposed) {
outputs.foreach(_.dispose())
_LIB.mxExecutorFree(handle)
disposed = true
}
}
/**
* Return a new executor with the same symbol and shared memory,
* but different input/output shapes.
* For runtime reshaping, variable length sequences, etc.
* The returned executor shares state with the current one,
* and cannot be used in parallel with it.
* @param partialShaping Whether to allow changing the shape of unspecified arguments.
* @param allowUpSizing Whether to allow allocating new ndarrays that's larger than the original.
* @param kwargs Map of string to Shape.
* - new shape for arguments.
* @return
* executor A new executor that shares memory with this.
*/
def reshape(partialShaping: Boolean = false, allowUpSizing: Boolean = false,
kwargs: Map[String, Shape]): Executor = {
val (argShapes, _, auxShapes) = this.symbol.inferShape(kwargs)
require(argShapes != null, "Insufficient argument shapes provided.")
var newArgDict = Map[String, NDArray]()
var newGradDict = Map[String, NDArray]()
this.symbol.listArguments().zipWithIndex.foreach { case (name, i) =>
val newShape = argShapes(i)
val arr = this.argArrays(i)
val dArr = if (this.gradArrays == null) null else this.gradArrays(i)
if (partialShaping || kwargs.contains(name) || newShape.equals(arr.shape)) {
if (newShape.product > arr.shape.product) {
require(allowUpSizing, s"New shape of arg:$name larger than original. " +
"First making a big executor and then down sizing it " +
"is more efficient than the reverse." +
"If you really want to up size, set allowUpSizing = true " +
"to enable allocation of new arrays.")
newArgDict = newArgDict + (name -> NDArray.empty(newShape, arr.context))
if (dArr != null) {
newGradDict = newGradDict + (name -> NDArray.empty(newShape, dArr.context))
}
} else {
newArgDict = newArgDict + (name -> arr.reshape(newShape.toArray))
if (dArr != null) {
newGradDict = newGradDict + (name -> dArr.reshape(newShape.toArray))
}
}
} else {
import java.lang.AssertionError
throw new AssertionError(s"Shape of unspecified array arg:$name changed." +
"This can cause the new executor to not share parameters " +
"with the old one. Please check for error in network." +
"If this is intended, set partialShaping = true to suppress this warning.")
}
}
var newAuxDict = Map[String, NDArray]()
val zip3 = (this.symbol.listAuxiliaryStates, auxShapes, this.auxArrays).zipped
zip3.foreach { case (name, newShape, arr) =>
if (partialShaping || newShape.equals(arr.shape)) {
if (newShape.product > arr.shape.product) {
require(allowUpSizing, s"New shape of aux:$name larger than original. " +
"First making a big executor and then down sizing it " +
"is more efficient than the reverse." +
"If you really want to up size, set allowUpSizing = true " +
"to enable allocation of new arrays.")
newAuxDict = newAuxDict + (name -> NDArray.empty(newShape, arr.context))
} else {
newAuxDict = newAuxDict + (name -> arr.reshape(newShape.toArray))
}
} else {
import java.lang.AssertionError
throw new AssertionError(s"Shape of unspecified array aux:$name changed." +
"This can cause the new executor to not share parameters " +
"with the old one. Please check for error in network." +
"If this is intended, set partialShaping = true to suppress this warning.")
}
}
if (this._gradsReq.isInstanceOf[Seq[_]]) {
this.symbol.bind(this._ctx,
newArgDict,
newGradDict,
this._gradsReq.asInstanceOf[Seq[String]],
newAuxDict,
this._group2ctx,
this)
} else {
this.symbol.bind(this._ctx,
newArgDict,
newGradDict,
this._gradsReq.asInstanceOf[Map[String, String]],
newAuxDict,
this._group2ctx,
this)
}
}
/**
* list all the output ndarray
* @return A list of ndarray binded to the heads of executor.
*/
private def getOutputs: Array[NDArray] = {
val ndHandles = ArrayBuffer[NDArrayHandle]()
checkCall(_LIB.mxExecutorOutputs(handle, ndHandles))
ndHandles.toArray.map(new NDArray(_))
}
/**
* Calculate the outputs specified by the binded symbol.
* @param isTrain whether this forward is for evaluation purpose.
* @param kwargs Additional specification of input arguments.
*/
def forward(isTrain: Boolean, kwargs: (String, NDArray)*): Unit = {
kwargs.foreach { case (name, array) =>
require(argDict.contains(name), s"Unknown argument $name")
array.copyTo(argDict(name))
}
checkCall(_LIB.mxExecutorForward(handle, if (isTrain) 1 else 0))
}
def forward(): Unit = {
forward(isTrain = false)
}
/**
* Do backward pass to get the gradient of arguments.
* @param outGrads Gradient on the outputs to be propagated back.
* This parameter is only needed when bind is called
* on outputs that are not a loss function.
*/
def backward(outGrads: Array[NDArray]): Unit = {
require(outGrads != null)
val ndArrayPtrs = outGrads.map(_.handle)
checkCall(_LIB.mxExecutorBackward(handle, ndArrayPtrs))
}
def backward(outGrad: NDArray): Unit = {
require(outGrad != null)
backward(Array(outGrad))
}
def backward(): Unit = {
backward(Array.empty[NDArray])
}
/**
* Install callback.
* @param callback Takes a string and an NDArrayHandle.
*/
def setMonitorCallback(callback: MXMonitorCallback): Unit = {
monitorCallback = callback
checkCall(_LIB.mxExecutorSetMonitorCallback(handle, monitorCallback))
}
/**
* Get dictionary representation of argument arrrays.
* @return The dictionary that maps name of arguments to NDArrays.
* @throws IllegalArgumentException if there are duplicated names in the arguments.
*/
def argDict: Map[String, NDArray] = {
if (_argDict == null) {
_argDict = Executor.getDict(symbol.listArguments(), argArrays)
}
_argDict
}
/**
* Get dictionary representation of auxiliary states arrays.
* @return The dictionary that maps name of auxiliary states to NDArrays.
* @throws IllegalArgumentException if there are duplicated names in the auxiliary states.
*/
def auxDict: Map[String, NDArray] = {
if (_auxDict == null) {
_auxDict = Executor.getDict(symbol.listAuxiliaryStates(), auxArrays)
}
_auxDict
}
/**
* Copy parameters from arg_params, aux_params into executor's internal array.
* @param argParams : dict of name to NDArray of arguments
* @param auxParams : dict of name to NDArray of auxiliary states.
* @param allowExtraParams
* Whether allow extra parameters that are not needed by symbol
* If this is True, no error will be thrown when arg_params or aux_params
* contain extra parameters that is not needed by the executor.
* @throws IllegalArgumentException
* If there is additional parameters in the dict but allow_extra_params=False
*/
def copyParamsFrom(argParams: Map[String, NDArray],
auxParams: Map[String, NDArray],
allowExtraParams: Boolean = false): Unit = {
argParams.foreach { case (name, array) =>
if (argDict.contains(name)) {
array.copyTo(argDict(name))
} else {
require(allowExtraParams, s"Find name $name that is not in the arguments")
}
}
if (auxParams != null) {
auxParams.foreach { case (name, array) =>
if (auxDict.contains(name)) {
array.copyTo(auxDict(name))
} else {
require(allowExtraParams, s"Find name $name that is not in the auxiliary states")
}
}
}
}
def copyParamsFrom(argParams: Map[String, NDArray], allowExtraParams: Boolean): Unit = {
copyParamsFrom(argParams, null, allowExtraParams)
}
def copyParamsFrom(argParams: Map[String, NDArray]): Unit = {
copyParamsFrom(argParams, allowExtraParams = false)
}
/**
* Get a debug string about internal execution plan.
* @return Debug string of the executor.
*/
def debugStr: String = {
val str = new RefString
checkCall(_LIB.mxExecutorPrint(handle, str))
str.value
}
}
// scalastyle:on finalize
/**
* Helper class to manage multiple executors for data parallelism.
* @author <NAME>
* @param symbol output symbol
* @param ctx devices to run on
* @param paramNames Name of all trainable parameters of the network.
* @param argNames Name of all arguments of the network.
* @param auxNames Name of all auxiliary states of the network.
* @param trainData Training data iterator.
* @param workLoadList The list of work load for different devices, in the same order as ctx
* @param logger When not specified, default logger will be used.
*/
class DataParallelExecutorManager(symbol: Symbol,
ctx: Array[Context],
paramNames: Seq[String],
argNames: Seq[String],
private val auxNames: Seq[String],
trainData: DataIter,
private var workLoadList: Seq[Float] = null,
logger: Logger = DataParallelExecutorManager.logger) {
// preparation
private val numDevice = ctx.length
logger.info(s"Start training with [${ctx.mkString(",")}]")
// make sure the architecture is valid
Executor.checkArguments(symbol)
if (workLoadList == null) {
workLoadList = Seq.fill(numDevice)(1f)
}
require(workLoadList.size == numDevice, "Invalid settings for work load.")
private val slices = Executor.splitInputSlice(trainData.batchSize, workLoadList)
private val trainExecs =
ctx.zipWithIndex.map { case (context, i) =>
val dataShapes =
trainData.provideData.map { case (name: String, shape: Shape) =>
(name, Shape(slices(i)._2 - slices(i)._1) ++ shape.drop(1))
}
symbol.simpleBind(context, "write", shapeDict = dataShapes)
}
// data structure
private val dataNames = trainData.provideData.map(_._1).toArray
private val labelNames = trainData.provideLabel.map(_._1).toArray
private val dataArrays =
dataNames.map { name =>
trainExecs.zipWithIndex.map { case (exec, i) =>
val slice = slices(i)
(slice._1, slice._2, exec.argDict(name))
}
}
private val labelArrays =
labelNames.map { name =>
trainExecs.zipWithIndex.map { case (exec, i) =>
val slice = slices(i)
(slice._1, slice._2, exec.argDict(name))
}
}
private val paramIdx = (0 until argNames.length).filter { i =>
paramNames.contains(argNames(i))
}
private[mxnet] val _paramNames = paramIdx.map(argNames(_))
private[mxnet] val paramArrays = paramIdx.map { i =>
trainExecs.map(_.argArrays(i))
}.toArray
private[mxnet] val gradArrays = paramIdx.map { i =>
trainExecs.map(_.gradArrays(i))
}.toArray
private val auxArrays = (0 until auxNames.length).map { i =>
trainExecs.map(_.auxArrays(i))
}.toArray
private val batchSize = trainData.batchSize
private val outputShapes: Array[Shape] = trainExecs(0).outputs.map { x: NDArray =>
Shape(batchSize) ++ x.shape.drop(1)
}
private[mxnet] val cpuOutputArrays = outputShapes.map(NDArray.zeros(_))
/**
* Release the related executors.
* The object shall never be used after it is disposed.
*/
def dispose(): Unit = {
trainExecs.foreach(_.dispose())
}
// Install monitor on all executors
def installMonitor(monitor: Monitor): Unit = {
trainExecs.foreach(monitor.install)
}
/**
* Set parameter and aux values
* @param argParams source parameter arrays
* @param auxParams source aux arrays
*/
def setParams(argParams: Map[String, NDArray], auxParams: Map[String, NDArray]): Unit = {
trainExecs.foreach(_.copyParamsFrom(argParams, auxParams))
}
/**
* Copy data from each executor to `arg_params` and `aux_params`
* @param argParams target parameter arrays
* @param auxParams target aux arrays
* @note This function will inplace update the NDArrays in arg_params and aux_params.
*/
def copyTo(argParams: Map[String, NDArray], auxParams: Map[String, NDArray]): Unit = {
for ((name, block) <- _paramNames zip paramArrays) {
val weight = block.map(_.copyTo(Context.cpu())).reduce(_ + _) / block.length
weight.copyTo(argParams(name))
}
for ((name, block) <- auxNames zip auxArrays) {
val weight = block.map(_.copyTo(Context.cpu())).reduce(_ + _) / block.length
weight.copyTo(auxParams(name))
}
}
// load data and labels into arrays
def loadDataBatch(dataBatch: DataBatch): Unit = {
Executor.loadDataMulti(dataBatch, dataArrays)
Executor.loadLabelMulti(dataBatch, labelArrays)
}
// Perform a forward pass on each executor
def forward(isTrain: Boolean = false): Unit = {
for ((texec, islice) <- trainExecs zip slices) {
texec.forward(isTrain)
for ((cpuOut, devOut) <- cpuOutputArrays zip texec.outputs) {
devOut.copyTo(cpuOut.slice(islice))
}
}
}
// Perform a backward pass on each executor
def backward(): Unit = {
trainExecs.foreach(_.backward())
}
}
object DataParallelExecutorManager {
private val logger = LoggerFactory.getLogger(classOf[Model])
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/test/java/thu/brainmatrix/suite/TestModel.scala
|
<gh_stars>0
package thu.brainmatrix.suite
object TestModel {
def main(args:Array[String]){
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/sae/AEModel.scala
|
<filename>scalakernel/src/main/java/thu/brainmatrix/sae/AEModel.scala
package thu.brainmatrix.sae
import thu.brainmatrix.Symbol
import thu.brainmatrix.NDArray
import thu.brainmatrix.Base._
import thu.brainmatrix.Context
import java.io.FileNotFoundException
import org.slf4j.LoggerFactory
import scala.collection.mutable.ListBuffer
import thu.brainmatrix.DataIter
class AEModel(val xpu: Context = Context.defaultCtx) {
var loss:Symbol = null
/**
* the following four items is array of tuples(key,value),containing
* description + value
*/
var args = ListBuffer[(String,NDArray)]()
var args_grad = ListBuffer[(String,NDArray)]()
var args_mult = ListBuffer[(String,Float)]()
var auxs = ListBuffer[(String,NDArray)]()
def save(fname:String){
AEModel.logger.info("save model!")
}
def load(fname:String){
AEModel.logger.info("load model!")
}
}
object AEModel{
private val logger = LoggerFactory.getLogger(classOf[AEModel])
def extract_feature(sym:Symbol, args:ListBuffer[(String,NDArray)],auxs:ListBuffer[(String,NDArray)],data_iter:DataIter,xpu:Context =Context.cpu())
:Map[String,ListBuffer[NDArray]] = {
val input_buffs = data_iter.provideData.map{
x => NDArray.empty(x._2,xpu)
}
val input_names = data_iter.provideData.map(_._1)
val args_ef = args.toMap ++ input_names.zip(input_buffs).toMap
val exe = sym.easy_bind(xpu, args = args_ef, auxStates = auxs.toMap)
var output_buffs:Array[NDArray] =null
var outputs = Array.fill[ListBuffer[NDArray]](exe.outputs.length)(ListBuffer[NDArray]())
data_iter.reset()
var dataBatch = data_iter.next()
while (dataBatch != null) {
for ((data,buff)<- dataBatch.data.zip(input_buffs)){
data.copyTo(buff)
}
exe.forward(isTrain=false)
if(output_buffs==null){
output_buffs = exe.outputs.map(x => {
NDArray.empty(x.shape, ctx=Context.defaultCtx)
})
}else{
for((out,buff)<-outputs.zip(output_buffs)){
out.append(buff)
}
}
for((out,buff)<-exe.outputs.zip(output_buffs)){
out.copyTo(buff)
}
dataBatch = data_iter.next()
}
for((out,buff)<-outputs.zip(output_buffs)){
out.append(buff)
}
sym.listOutputs().zip(outputs).toMap
}
def main(args:Array[String]){
AEModel.logger.warn("FileNotFoundException ?")
throw new FileNotFoundException("FileNotFoundException!")
println("test!")
}
}
//
//# pylint: skip-file
//import mxnet as mx
//import numpy as np
//import logging
//from solver import Solver, Monitor
//try:
// import cPickle as pickle
//except:
// import pickle
//
//
//def extract_feature(sym, args, auxs, data_iter, N, xpu=mx.cpu()):
// input_buffs = [mx.nd.empty(shape, ctx=xpu) for k, shape in data_iter.provide_data]
// input_names = [k for k, shape in data_iter.provide_data]
// args = dict(args, **dict(zip(input_names, input_buffs)))
// exe = sym.bind(xpu, args=args, aux_states=auxs)
// outputs = [[] for i in exe.outputs]
// output_buffs = None
//
// data_iter.hard_reset()
// for batch in data_iter:
// for data, buff in zip(batch.data, input_buffs):
// data.copyto(buff)
// exe.forward(is_train=False)
// if output_buffs is None:
// output_buffs = [mx.nd.empty(i.shape, ctx=mx.cpu()) for i in exe.outputs]
// else:
// for out, buff in zip(outputs, output_buffs):
// out.append(buff.asnumpy())
// for out, buff in zip(exe.outputs, output_buffs):
// out.copyto(buff)
// for out, buff in zip(outputs, output_buffs):
// out.append(buff.asnumpy())
// outputs = [np.concatenate(i, axis=0)[:N] for i in outputs]
// return dict(zip(sym.list_outputs(), outputs))
//
//class MXModel(object):
// def __init__(self, xpu=mx.cpu(), *args, **kwargs):
// self.xpu = xpu
// self.loss = None
// self.args = {}
// self.args_grad = {}
// self.args_mult = {}
// self.auxs = {}
// self.setup(*args, **kwargs)
//
// def save(self, fname):
// args_save = {key: v.asnumpy() for key, v in self.args.items()}
// with open(fname, 'w') as fout:
// pickle.dump(args_save, fout)
//
// def load(self, fname):
// with open(fname) as fin:
// args_save = pickle.load(fin)
// for key, v in args_save.items():
// if key in self.args:
// self.args[key][:] = v
//
// def setup(self, *args, **kwargs):
// raise NotImplementedError("must override this")
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/Imperative/MLP.scala
|
<filename>scalakernel/src/main/java/thu/brainmatrix/Imperative/MLP.scala<gh_stars>0
package thu.brainmatrix.Imperative
import scala.collection.mutable.ListBuffer
import thu.brainmatrix.Context
import thu.brainmatrix.NDArray
import thu.brainmatrix.optimizer.SGD
import thu.brainmatrix.IO
import thu.brainmatrix.Context.ctx2Array
import thu.brainmatrix.Symbol
import thu.brainmatrix.FeedForward
import thu.brainmatrix.Shape
import thu.brainmatrix.Random
import thu.brainmatrix.DataBatch
object MLP {
val batchSize = 100
val inputSize = 784
val hiddenSize = 40
val classSize:Int = 10
def mlp_entroy(implicit ctx:Context){
val trainDataIter = IO.MNISTIter(scala.collection.immutable.Map(
"image" -> "data/train-images-idx3-ubyte",
"label" -> "data/train-labels-idx1-ubyte",
"data_shape" -> "(1, 784)",
"label_name" -> "sm_label",
"batch_size" -> batchSize.toString,
"shuffle" -> "1",
"flat" -> "1",
"silent" -> "0",
"seed" -> "10"))
val rates = Array(0.00000001f)//succeed!!
// val rates = Array(0.00000001f,0.000001f,0.001f,0.04f,0.9f,3f,10f,50f,100f,1000f,10000f,100000f,1000000f)
var max = 0f
rates.foreach(rate => {
val mlp = new MLP(batchSize,inputSize,hiddenSize,classSize)
var n = 0
var dataBatch = trainDataIter.next()
for(k<-0 to 0){
while(trainDataIter.hasNext && n<100){
n += 1
// dataBatch = trainDataIter.next()
mlp.forward(dataBatch)
mlp.update(rate)
//
println(mlp.outputs(2))
// println(mlp.outputs(4))
// println(mlp.U_nda)
// if(n%10 == 0)
val error = mlp.error(dataBatch.label(0))
if(max<error)
max = error
print(error+" ")
}
}
println(rate)
mlp.dispose()
})
println(s"max:$max")
// println(trainDataIter.getData()(0).shape)
}
def main(args:Array[String]){
implicit val ctx = Context.cpu(0)
mlp_entroy
}
}
class MLP(val batchSize:Int, val inputSize:Int,val hiddenSize:Int,val classSize:Int)(implicit ctx:Context){
val eps = 1e-8
val data = Symbol.CreateVariable("data")
val W = Symbol.CreateVariable("W")
val U = Symbol.CreateVariable("U")
val label = Symbol.CreateVariable("label")
val h = Symbol.FullyConnected("h")(Map("data" -> data, "num_hidden" -> hiddenSize,"weight"->W,"no_bias"->true))
val h_act1 = Symbol.Activation("h_act1")(Map("data" -> h, "act_type" -> "sigmoid"))
val z = Symbol.FullyConnected("z")(Map("data" -> h_act1, "num_hidden" -> classSize,"weight"->U,"no_bias"->true))
val y = Symbol.SoftmaxActivation("y")(Map("data"->z))
val d_z = y - label //(n,10)
// val d_U = Symbol.FullyConnected("h")(Map("data" -> h, "num_hidden" -> hiddenSize,"weight"->(d_z),"no_bias"->true))
val d_U = Symbol.Dot(Symbol.transpose(d_z),h_act1,hiddenSize) //(10,n),(n,hn)=>(10,hiddenSize)
val d_h_act1 = Symbol.Dot(d_z,U,hiddenSize) //(n,10),(10,hn)=>(num,hn)
val d_h = d_h_act1 * h_act1* (h_act1-1)*(-1)
val d_W = Symbol.Dot(Symbol.transpose(d_h),data,inputSize) //(hn,num),(num,inputSize)=>(hn,inputSize)
val out = Symbol.Group(y,d_W,d_U,h,h_act1)
val data_nda =Random.uniform(0,1, Shape(batchSize,inputSize), ctx)
val W_nda = Random.uniform(0,1,Shape(hiddenSize,inputSize), ctx)*1e-8f
val U_nda = Random.uniform(0,1, Shape(classSize,hiddenSize), ctx)*1e-8f
val label_nda = Random.uniform(0,1, Shape(batchSize,classSize), ctx)
// println(W_nda)
// gradient
val data_nda_g =Random.uniform(0,1, Shape(batchSize,inputSize), ctx)
val W_nda_g = NDArray.zeros(Shape(hiddenSize,inputSize), ctx)
val U_nda_g = NDArray.zeros(Shape(classSize,hiddenSize), ctx)
val label_nda_g = Random.uniform(0,1, Shape(batchSize,classSize), ctx)
val in_args = Map("data"->data_nda,"W"->W_nda,"U"->U_nda,"label"->label_nda)
val arg_grad_store = Map("data"->data_nda_g,"W"->W_nda_g,"U"->U_nda_g,"label"->label_nda_g)
val executor = out.easy_bind(ctx,in_args, arg_grad_store)
def forward(batch:DataBatch){
assert(batch.data(0).shape(1)== inputSize)
batch.data(0).copyTo(data_nda)
NDArray.onehotEncode(batch.label(0),label_nda)
// println(label_nda)
// batch.label(0).copyTo(label_nda)
// println(label_nda)
executor.forward(true)
// val h = NDArray.sigmod(NDArray.dot(W, data))
// val z = NDArray.sigmod(NDArray.dot(U, h))
// var expy = NDArray.exp(z)
// p(t) = expy / (NDArray.sum(expy).toScalar)
// println("hehe:" + p(t).toArray(targets(t - 1)))
// loss += -scala.math.log(p(t).toArray(targets(t - 1))) //损失函数,交叉熵
}
// def backward(){
//
//// executor.backward()
//// println(W_nda)
// }
def update(learningRate:Float = 0.9f){
W_nda -= this.outputs(1) *learningRate
U_nda -= this.outputs(2) * learningRate
//
//
// W_nda_g
// W_nda -= W_nda_g
// U_nda_g *= learningRate
// U_nda -= U_nda_g
// println(U_nda_g.slice(0))
// println((U_nda.slice(0)))
// arg_grad_store("W") *= learningRate
// in_args("W") -= arg_grad_store("W")
// arg_grad_store("U") *= learningRate
// in_args("U") += arg_grad_store("U")
}
def error(label:NDArray):Float = {
val label_pred = NDArray.argmaxChannel(executor.outputs(0))
// println(label_pred)
var right = 0
val num_instance = label_pred.shape(0)
for (i <- 0 until num_instance) {
if(scala.math.abs(label_pred(i) - label(i)) < this.eps)
right += 1
}
right * 1.0f / num_instance
}
def output_accuracy(pred: NDArray, target: NDArray): Float = {
val num_instance = pred.shape(0)
val eps = 1e-6
var right = 0
for (i <- 0 until num_instance) {
var mx_p = pred(i, 0)
var p_y: Float = 0
for(j <- 0 until 5){
if(pred(i,j) > mx_p){
mx_p = pred(i,j)
p_y = j
}
}
if(scala.math.abs(p_y - target(i)) < eps) right += 1
}
right * 1.0f / num_instance
}
def outputs = executor.outputs
def dispose(){
executor.dispose()
}
}
class MLP_auto(val batchSize:Int, val inputSize:Int,val hiddenSize:Int,val classSize:Int)(implicit ctx:Context){
val eps = 1e-8
val data = Symbol.CreateVariable("data")
val W = Symbol.CreateVariable("W")
val U = Symbol.CreateVariable("U")
val label = Symbol.CreateVariable("label")
val h = Symbol.FullyConnected("h")(Map("data" -> data, "num_hidden" -> hiddenSize,"weight"->W,"no_bias"->true))
val h_act1 = Symbol.Activation()(Map("data" -> h, "name" -> "h_act1", "act_type" -> "sigmoid"))
val z = Symbol.FullyConnected("z")(Map("data" -> h_act1, "num_hidden" -> classSize,"weight"->U,"no_bias"->true))
val y = Symbol.SoftmaxActivation("y")(Map("data"->z))
val ysoft = Symbol.SoftmaxOutput("ysoft")(Map("data"->z,"label"->label))
val data_nda =Random.uniform(0,1, Shape(batchSize,inputSize), ctx)
val W_nda = Random.uniform(0,1,Shape(hiddenSize,inputSize), ctx)
val U_nda = Random.uniform(0,1, Shape(classSize,hiddenSize), ctx)
val label_nda = Random.uniform(0,1, Shape(batchSize), ctx)
// println(W_nda)
// gradient
val data_nda_g =Random.uniform(0,1, Shape(batchSize,inputSize), ctx)
val W_nda_g = NDArray.zeros(Shape(hiddenSize,inputSize), ctx)
val U_nda_g = NDArray.zeros(Shape(classSize,hiddenSize), ctx)
val label_nda_g = Random.uniform(0,1, Shape(batchSize), ctx)
val in_args = Map("data"->data_nda,"W"->W_nda,"U"->U_nda,"label"->label_nda)
val arg_grad_store = Map("data"->data_nda_g,"W"->W_nda_g,"U"->U_nda_g,"label"->label_nda_g)
val executor = ysoft.easy_bind(ctx,in_args, arg_grad_store)
def forward(batch:DataBatch){
assert(batch.data(0).shape(1)== inputSize)
batch.data(0).copyTo(data_nda)
batch.label(0).copyTo(label_nda)
// println(label_nda)
executor.forward(true)
// val h = NDArray.sigmod(NDArray.dot(W, data))
// val z = NDArray.sigmod(NDArray.dot(U, h))
// var expy = NDArray.exp(z)
// p(t) = expy / (NDArray.sum(expy).toScalar)
// println("hehe:" + p(t).toArray(targets(t - 1)))
// loss += -scala.math.log(p(t).toArray(targets(t - 1))) //损失函数,交叉熵
}
def backward(){
executor.backward()
// println(W_nda)
}
def update(learningRate:Float = 0.9f){
// println(W_nda_g)
// println(in_args("W"))
W_nda_g *= learningRate
W_nda -= W_nda_g
U_nda_g *= learningRate
U_nda -= U_nda_g
println(U_nda_g.slice(0))
println((U_nda.slice(0)))
// arg_grad_store("W") *= learningRate
// in_args("W") -= arg_grad_store("W")
// arg_grad_store("U") *= learningRate
// in_args("U") += arg_grad_store("U")
}
def error(label:NDArray):Float = {
val label_pred = NDArray.argmaxChannel(executor.outputs(0))
// println(label_pred)
var right = 0
val num_instance = label_pred.shape(0)
for (i <- 0 until num_instance) {
if(scala.math.abs(label_pred(i) - label(i)) < this.eps)
right += 1
}
right * 1.0f / num_instance
}
def output_accuracy(pred: NDArray, target: NDArray): Float = {
val num_instance = pred.shape(0)
val eps = 1e-6
var right = 0
for (i <- 0 until num_instance) {
var mx_p = pred(i, 0)
var p_y: Float = 0
for(j <- 0 until 5){
if(pred(i,j) > mx_p){
mx_p = pred(i,j)
p_y = j
}
}
if(scala.math.abs(p_y - target(i)) < eps) right += 1
}
right * 1.0f / num_instance
}
def outputs = executor.outputs
def dispose(){
executor.dispose()
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/Visualization.scala
|
<reponame>Liuxg16/BrainMatrix
package thu.brainmatrix
import scala.util.parsing.json._
import java.io.File
import java.io.PrintWriter
import scala.collection.mutable.ArrayBuffer
/**
* @author <NAME>
*/
object Visualization {
/**
* A simplify implementation of the python-Graphviz library functionality
* based on: https://github.com/xflr6/graphviz/tree/master/graphviz
*/
class Dot(name: String) {
// http://www.graphviz.org/cgi-bin/man?dot
private val ENGINES = Set(
"dot", "neato", "twopi", "circo", "fdp", "sfdp", "patchwork", "osage"
)
// http://www.graphviz.org/doc/info/output.html
private val FORMATS = Set(
"bmp",
"canon", "dot", "gv", "xdot", "xdot1.2", "xdot1.4",
"cgimage",
"cmap",
"eps",
"exr",
"fig",
"gd", "gd2",
"gif",
"gtk",
"ico",
"imap", "cmapx",
"imap_np", "cmapx_np",
"ismap",
"jp2",
"jpg", "jpeg", "jpe",
"pct", "pict",
"pdf",
"pic",
"plain", "plain-ext",
"png",
"pov",
"ps",
"ps2",
"psd",
"sgi",
"svg", "svgz",
"tga",
"tif", "tiff",
"tk",
"vml", "vmlz",
"vrml",
"wbmp",
"webp",
"xlib",
"x11"
)
private val _head = "digraph %s{".format(name)
private val _node = "\t%s %s"
private val _edge = "\t\t%s -> %s %s"
private val _tail = "}"
private val _body = ArrayBuffer[String]()
private def attribute(label: String = null, attrs: Map[String, String]): String = {
if (label != null) {
s"[label=$label ${("" /: attrs){ (acc, elem) => s"$acc ${elem._1}=${elem._2}"}}]"
}
else {
s"[${("" /: attrs){ (acc, elem) => s"$acc ${elem._1}=${elem._2}"}}]"
}
}
/**
* Create a node.
* @param name Unique identifier for the node inside the source.
* @param label Caption to be displayed (defaults to the node name).
* @param attrs Any additional node attributes (must be strings).
*/
def node(name: String, label: String = null, attrs: Map[String, String]): Unit = {
_body += _node.format(name, attribute(label, attrs))
}
/**
* Create an edge between two nodes.
* @param tailName Start node identifier.
* @param headName End node identifier.
* @param label Caption to be displayed near the edge.
* @param attrs Any additional edge attributes (must be strings).
*/
def edge(tailName: String, headName: String,
label: String = null, attrs: Map[String, String]): Unit = {
_body += _edge.format(tailName, headName, attribute(label, attrs))
}
private def save(filename: String, directory: String): String = {
val path = s"$directory${File.separator}$filename"
val writer = new PrintWriter(path)
try {
// scalastyle:off println
writer.println(s"${this._head}")
this._body.toArray.foreach { line => writer.println(s"$line") }
writer.println(s"${this._tail}")
writer.flush()
// scalastyle:off println
} finally {
writer.close()
}
path
}
private def command(engine: String, format: String, filepath: String): String = {
require(ENGINES.contains(engine) == true, s"unknown engine: $engine")
require(FORMATS.contains(format) == true, s"unknown format: $format")
s"$engine -T${format} -O $filepath"
}
/**
* Render file with Graphviz engine into format.
* @param engine The layout commmand used for rendering ('dot', 'neato', ...).
* @param format The output format used for rendering ('pdf', 'png', ...).
* @param fileName Name of the DOT source file to render.
* @param path Path to save the Dot source file.
*/
def render(engine: String = "dot", format: String = "pdf",
fileName: String, path: String): Unit = {
val filePath = this.save(fileName, path)
val args = command(engine, format, filePath)
import sys.process._
try {
args !
} catch { case _ : Throwable =>
val errorMsg = s"""failed to execute "$args", """ +
""""make sure the Graphviz executables are on your systems' path"""
throw new RuntimeException(errorMsg)
}
}
}
/**
* convert shape string to list, internal use only
* @param str shape string
* @return list of string to represent shape
*/
def str2Tuple(str: String): List[String] = {
val re = """\d+""".r
re.findAllIn(str).toList
}
/**
* convert symbol to Dot object for visualization
* @param symbol symbol to be visualized
* @param title title of the dot graph
* @param shape Map of shapes, str -> shape, given input shapes
* @param nodeAttrs Map of node's attributes
* for example:
* nodeAttrs = Map("shape" -> "oval", "fixedsize" -> "fasle")
* means to plot the network in "oval"
* @param hideWeights
* if true (default) then inputs with names like `*_weight`
* or `*_bias` will be hidden
* @return Dot object of symbol
*/
def plotNetwork(symbol: Symbol,
title: String = "plot", shape: Map[String, Shape] = null,
nodeAttrs: Map[String, String] = Map[String, String](),
hideWeights: Boolean = true): Dot = {
val (drawShape, shapeDict) = {
if (shape == null) (false, null)
else {
val internals = symbol.getInternals()
val (_, outShapes, _) = internals.inferShape(shape)
require(outShapes != null, "Input shape is incompete")
val shapeDict = internals.listOutputs().zip(outShapes).toMap
(true, shapeDict++shape)
}
}
println(shapeDict)
val conf = JSON.parseFull(symbol.toJson) match {
case None => null
case Some(map) => map.asInstanceOf[Map[String, Any]]
}
require(conf != null)
require(conf.contains("nodes"))
val nodes = conf("nodes").asInstanceOf[List[Any]]
// default attributes of node
val nodeAttr = scala.collection.mutable.Map("shape" -> "box", "fixedsize" -> "true",
"width" -> "1.3", "height" -> "0.8034", "style" -> "filled")
// merge the dict provided by user and the default one
nodeAttrs.foreach { case (k, v) => nodeAttr(k) = v }
val dot = new Dot(name = title)
// color map
val cm = List(""""#8dd3c7"""", """"#fb8072"""", """"#ffffb3"""",
""""#bebada"""", """"#80b1d3"""", """"#fdb462"""",
""""#b3de69"""", """"#fccde5"""")
// Internal helper to figure out if node should be hidden with hide_weights
def looksLikeWeight(name: String): Boolean = {
if (name.endsWith("_weight") || name.endsWith("_bias")) true
else false
}
// make nodes
val hiddenNodes = scala.collection.mutable.Set[String]()
nodes.foreach { node =>
val params = node.asInstanceOf[Map[String, Any]]
val op = params("op").asInstanceOf[String]
val name = params("name").asInstanceOf[String]
val attrs = params("param").asInstanceOf[Map[String, String]]
// val attrs = {
// if (params.contains("attr")) params("attr").asInstanceOf[Map[String, String]]
// else Map[String, String]()
// }
// input data
val attr = nodeAttr.clone()
var label = op
var continue = false
op match {
case "null" => {
if (looksLikeWeight(name)) {
if (hideWeights) hiddenNodes.add(name)
continue = true
}
attr("shape") = "oval" // inputs get their own shape
label = name
attr("fillcolor") = cm(0)
}
case "Convolution" => {
val kernel = str2Tuple(attrs("kernel"))
val stride = if (attrs.contains("stride")) str2Tuple(attrs("stride")) else List(1)
label =
s""""Convolution\\n${kernel(0)}x${kernel(1)}/${stride(0)}, ${attrs("num_filter")}""""
attr("fillcolor") = cm(1)
}
case "FullyConnected" => {
label = s""""FullyConnected\\n${attrs("num_hidden")}""""
attr("fillcolor") = cm(1)
}
case "BatchNorm" => attr("fillcolor") = cm(3)
case "Activation" | "LeakyReLU" => {
label = s""""${op}\\n${attrs("act_type")}""""
attr("fillcolor") = cm(2)
}
case "Pooling" => {
val kernel = str2Tuple(attrs("kernel"))
val stride = if (attrs.contains("stride")) str2Tuple(attrs("stride")) else List(1)
label =
s""""Pooling\\n${attrs("pool_type")}, ${kernel(0)}x${kernel(1)}/${stride(0)}""""
attr("fillcolor") = cm(4)
}
case "Concat" | "Flatten" | "Reshape" => attr("fillcolor") = cm(5)
case "Softmax" => attr("fillcolor") = cm(6)
case _ => attr("fillcolor") = cm(7)
}
if (!continue) dot.node(name = name , label, attr.toMap)
}
// add edges
nodes.foreach { node =>
val params = node.asInstanceOf[Map[String, Any]]
val op = params("op").asInstanceOf[String]
val name = params("name").asInstanceOf[String]
// val attrs_params = params("param").asInstanceOf[Map[String, Any]]
// println(attrs_params)
if (op != "null") {
println(params)
val inputs = params("inputs").asInstanceOf[List[List[Double]]]
for (item <- inputs) {
val inputNode = nodes(item(0).toInt).asInstanceOf[Map[String, Any]]
val inputName = inputNode("name").asInstanceOf[String]
if (!hiddenNodes.contains(inputName)) {
val attrs = scala.collection.mutable.Map("dir" -> "back", "arrowtail" -> "open")
// add shapes
if (drawShape) {
val key = {
if (inputNode("op").asInstanceOf[String] != "null") s"${inputName}_output"
else inputName
}
val shape = shapeDict(key).toArray.drop(1)
val label = s""""${shape.mkString("x")}""""
attrs("label") = label
}
dot.edge(tailName = name, headName = inputName, attrs = attrs.toMap)
}
}
}
}
dot
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/synapse/Engine.scala
|
<filename>scalakernel/src/main/java/thu/brainmatrix/synapse/Engine.scala<gh_stars>0
package thu.brainmatrix.synapse
import thu.brainmatrix.util.RK4
import thu.brainmatrix.NDArray
import thu.brainmatrix.Context
import thu.brainmatrix.Shape
class Engine(ctx:Context = Context.defaultCtx) {
def run(model:Model,t0:NDArray, y0:Array[NDArray], h:NDArray, stepSize:Int):(NDArray,Array[NDArray]) = {
val rk4 = new RK4(model.update)
val (t, y) = rk4.solve(t0, y0, h, stepSize)(ctx)
(t,y)
}
}
|
Liuxg16/BrainMatrix
|
scala-package/core/src/test/scala/ml/dmlc/mxnet/CheckUtils.scala
|
package ml.dmlc.mxnet
object CheckUtils {
def reldiff(a: NDArray, b: NDArray): Float = {
val diff = NDArray.sum(NDArray.abs(a - b)).toScalar
val norm = NDArray.sum(NDArray.abs(a)).toScalar
diff / norm
}
def reldiff(a: Array[Float], b: Array[Float]): Float = {
val diff =
(a zip b).map { case (aElem, bElem) => Math.abs(aElem - bElem) }.sum
val norm: Float = a.reduce(Math.abs(_) + Math.abs(_))
diff / norm
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/lstmbyguo/Test.scala
|
<gh_stars>0
package thu.brainmatrix.lstmbyguo
import thu.brainmatrix.NDArray
import java.io.File
import java.io.FileWriter
import thu.brainmatrix.Shape
class Test {
}
object Test {
private val matrixfilepath: String = "./seqData/test.txt"
var matrixfile = new File(matrixfilepath)
def test_transpose(src: NDArray) {
var shapes = src.shape
val head = shapes.apply(0)
val tail = shapes.apply(1)
println(shapes)
var res = NDArray.zeros(tail, head).toArray
var tempsrc = src.toArray
// for (i <- 0 until head; j <- 0 until tail) {
//
// }
}
def main(args: Array[String]): Unit = {
var test: NDArray = NDArray.ones(Shape(5, 6))
for (i <- 0 to 4) {
test.slice(i) *= i
}
println(NDArray.transpose(test))
println(test)
// println(NDArray.transpose(test))
// var test2:NDArray = NDArray.ones(Shape(3,4))
// println("--------------------------\n" + test.reshape(Array(2, 3)))
// if (matrixfile.exists()) {
// matrixfile.delete()
// }
// matrixfile.createNewFile()
// var n = 0
// while (n < 10) {
// n += 1
// val writer = new FileWriter(matrixfilepath, true)
// writer.write("" + "\n" + NDArray.ones(2, 3) + "\n")
// writer.close()
// }
// println("ren zha")
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/char_rnn_symbol/SampleChar.scala
|
<reponame>Liuxg16/BrainMatrix
package thu.brainmatrix.char_rnn_symbol
import Config._
import thu.brainmatrix.Base
import thu.brainmatrix.FeedForward
import thu.brainmatrix.Context
import thu.brainmatrix.io.NDArrayLSTMIter
import thu.brainmatrix.optimizer.SGD
import thu.brainmatrix.Model
import scala.io.Source
import thu.brainmatrix.NDArray
import scala.collection.mutable.ListBuffer
import thu.brainmatrix.util.mathTool
import thu.brainmatrix.Shape
import scala.util.Random
object SampleChar {
def main(args:Array[String]){
sampleChar_vec_feather
}
def sampleChar_vec_feather{
val ctx = Context.cpu(0)
val vocab = seq_IO.build_vocabulary(INPUT_FILE_NAME, VOCAB_FILE_NAME)
var bacov = for((k,v)<- vocab) yield (v,k)
val revertVocab = bacov.updated(bacov.size-1, '?')
println(bacov.size)
val n_alphabet = vocab.size
// load from check-point
val (_, argParams, _) = Model.loadCheckpoint("./model/obama", Config.N_EPOCH)
val model = new InferCharModel(LSTM_N_LAYER, n_alphabet,DIM_HIDDEN, DIM_EMBED,argParams,ctx,DROPOUT)
val seqLength = 100
val inputNdarray = NDArray.zeros(1,n_alphabet)
// val revertVocab = Utils.makeRevertVocab(vocab)
// Feel free to change the starter sentence
var output = "hello"
val randomSample = true
var newSentence = true
val ignoreLength = output.length()
for (i <- 0 until seqLength) {
if (i <= ignoreLength - 1) makeInput(output(i), vocab, inputNdarray)
else makeInput(output.takeRight(1)(0), vocab, inputNdarray)
val prob = model.forward(inputNdarray, newSentence)
newSentence = false
val nextChar = makeOutput(prob, revertVocab, randomSample)
if (nextChar == Config.UNKNOW_CHAR) newSentence = true
if (i >= ignoreLength) output = output :+ nextChar
}
// Let's see what we can learned from char in Obama's speech.
println(output)
model.dispose()
println("*----------------------------------------------*")
}
// make input from char
def makeInput(char: Char, vocab: Map[Char, Int], arr: NDArray): Unit = {
val idx = vocab(char)
val tmp = NDArray.zeros(arr.shape)
tmp(0,idx) = 1
arr.set(tmp)
}
// we can use random output or fixed output by choosing largest probability
def makeOutput(prob: Array[Float], vocab: Map[Int, Char],
sample: Boolean = false, temperature: Float = 1f): Char = {
var idx = -1
val char = if (sample == false) {
idx = ((-1f, -1) /: prob.zipWithIndex) { (max, elem) =>
if (max._1 < elem._1) elem else max
}._2
if (vocab.contains(idx)) vocab(idx)
else Config.UNKNOW_CHAR
} else {
val fixDict = Array[Char]() ++ (0 until vocab.size).map(i => vocab(i))
var scaleProb = prob.map(x => if (x < 1e-6) 1e-6 else if (x > 1 - 1e-6) 1 - 1e-6 else x)
var rescale = scaleProb.map(x => Math.exp(Math.log(x) / temperature).toFloat)
val sum = rescale.sum.toFloat
rescale = rescale.map(_ / sum)
choice(fixDict, rescale)
}
char
}
// helper function for random sample
def cdf(weights: Array[Float]): Array[Float] = {
val total = weights.sum
var result = Array[Float]()
var cumsum = 0f
for (w <- weights) {
cumsum += w
result = result :+ (cumsum / total)
}
result
}
def choice(population: Array[Char], weights: Array[Float]): Char = {
assert(population.length == weights.length)
val cdfVals = cdf(weights)
val x = Random.nextFloat()
var idx = 0
var found = false
for (i <- 0 until cdfVals.length) {
if (cdfVals(i) >= x && !found) {
idx = i
found = true
}
}
population(idx)
}
// val ctx_cpu = Context.cpu(0)
// val map_train = NDArray.zeros(Shape(BATCH_SIZE,SEQ_LENGTH,n_alphabet), ctx_cpu)
// for(i<- 0 until BATCH_SIZE){
// for(j<- 0 until SEQ_LENGTH)
// map_train(i,j,j) = 1
// }
//
//// map_train(0)(0,0) = 0
//// map_train(0)(0,5) = 1
// var text_arr = ListBuffer[NDArray]()
// text_arr += NDArray.argmaxChannel(map_train)
//// text_arr.foreach {println}
//// for(i<-0 until SEQ_LENGTH*40){
// val dataIter = new NDArrayLSTMIter(data = IndexedSeq(map_train),dataName = "data",IndexedSeq(NDArray.zeros(Shape(BATCH_SIZE))),"label",
// dataBatchSize = BATCH_SIZE, shuffle = false,lastBatchHandle = "pad")//the rest will discard
//
// data.set(dataIter.next().data(0))
// executor.forward
// val probArrays = executor.outputs
//// println(probArrays(i%SEQ_LENGTH))
//
// val outputArr = probArrays.map{ x => x.copyTo(ctx_cpu) }
// println("-----------------")
// val outcharInt = mathTool.SampleByPro2D(outputArr(0)).map(_.toFloat)
//// val outchar = NDArray.array(outcharInt,Shape(BATCH_SIZE,SEQ_LENGTH))
//// text_arr += outchar
//// val temp = NDArray.zeros(Shape(BATCH_SIZE,n_alphabet))
////// println(outchar)
//// for(j<-0 until BATCH_SIZE){
//// temp(j,outchar(j).toInt) = 1
//// }
//// temp.copyTo(map_train((i+1)%SEQ_LENGTH))
//// }
//
// println("--------------")
// var s = ""
// val a = outcharInt.map(x => bacov(x.toInt))
// a.foreach { x => s += x }
// println(s)
//
//
def sampleChar_id_feather{
val vocab = seq_IO.build_vocabulary(INPUT_FILE_NAME, VOCAB_FILE_NAME)
var bacov = for((k,v)<- vocab) yield (v,k)
bacov = bacov.updated(bacov.size-1, '?')
println(bacov)
val n_alphabet = vocab.size
val lstm = Lstm.lstmGenerator(LSTM_N_LAYER, SEQ_LENGTH, DIM_HIDDEN, DIM_EMBED, n_alphabet, DROPOUT)
Base.INPUTSHAPE_AUXILIARY = Map("_l0_init_h"->Shape(BATCH_SIZE,DIM_HIDDEN),"_l0_init_c"->Shape(BATCH_SIZE,DIM_HIDDEN),"_l1_init_h"->Shape(BATCH_SIZE,DIM_HIDDEN),"_l1_init_c"->Shape(BATCH_SIZE,DIM_HIDDEN))
val modelBase = new FeedForward(lstm, Context.cpu(), numEpoch = N_EPOCH,optimizer = new SGD(learningRate = LEARNING_RATE, momentum = MOMENTUM, wd = WEIGHT_DECAY))
// modelBase.loadModelParams(s"./model/charLSTM.params_${N_EPOCH}")
// lstm.listArguments().foreach {println}
val source = Source.fromFile(INPUT_FILE_NAME)
val seq_input = source.mkString
val len_train = math.round(seq_input.length()*DATA_TRAIN_RATIO).toInt
val text_train = seq_input.take(len_train)
val inputName = "data"
val labelName = "label"
val map_train = (0 until SEQ_LENGTH).map(x => (NDArray.ones(Shape(BATCH_SIZE,1))*10))
var text_arr = ListBuffer[NDArray]()
text_arr += map_train(0)
for(i<-0 until SEQ_LENGTH-1){
val dataIter = new NDArrayLSTMIter(data = map_train,dataName = inputName,IndexedSeq(NDArray.zeros(Shape(BATCH_SIZE,1))),"label",
dataBatchSize = BATCH_SIZE, shuffle = false,lastBatchHandle = "pad")//the rest will discard
// val traindata = seq_IO.SampleDataIter(text = text_train,labelName = "label",vocab = vocab,batch_size = BATCH_SIZE,seq_len = SEQ_LENGTH)
val probArrays = modelBase.predict(data = dataIter)
val outcharInt = mathTool.SampleByPro2D(probArrays(i)).map(_.toFloat)
val outchar = NDArray.array(outcharInt,Shape(BATCH_SIZE,1))
text_arr += outchar
outchar.copyTo(map_train(i+1))
}
for(i<-0 until SEQ_LENGTH-1){
val dataIter = new NDArrayLSTMIter(data = map_train,dataName = inputName,IndexedSeq(NDArray.zeros(Shape(BATCH_SIZE,1))),"label",
dataBatchSize = BATCH_SIZE, shuffle = false,lastBatchHandle = "pad")//the rest will discard
// val traindata = seq_IO.SampleDataIter(text = text_train,labelName = "label",vocab = vocab,batch_size = BATCH_SIZE,seq_len = SEQ_LENGTH)
val probArrays = modelBase.predict(data = dataIter)
val outcharInt = mathTool.SampleByPro2D(probArrays(i)).map(_.toFloat)
val outchar = NDArray.array(outcharInt,Shape(BATCH_SIZE,1))
text_arr += outchar
for(j<- 0 until SEQ_LENGTH-1){
map_train(j+1).copyTo(map_train(j))
}
outchar.copyTo(map_train(SEQ_LENGTH-1))
}
// text_arr.foreach {println}
// println("--------------")
var texts = for(j<-0 until BATCH_SIZE) yield new StringBuilder
for(i<-0 until text_arr.length){
for((c,s)<-NDAtoChar(bacov,text_arr(i)).zip(texts)){
s += c
}
}
for((s,idx)<-texts.zipWithIndex){
println(s"\ntext $idx th:")
println(s)
}
println("\nends...")
println(text_arr.length)
}
def NDAtoChar(vocab:Map[Int,Char],nda:NDArray):Array[Char] = {
nda.toArray.map(x => vocab(x.toInt))
}
}
|
Liuxg16/BrainMatrix
|
scala-package/core/src/test/scala/ml/dmlc/mxnet/KVStoreSuite.scala
|
<gh_stars>100-1000
package ml.dmlc.mxnet
import org.scalatest.{BeforeAndAfterAll, FunSuite}
class KVStoreSuite extends FunSuite with BeforeAndAfterAll {
test("init and pull") {
val kv = KVStore.create()
val shape = Shape(2, 1)
val ndArray = NDArray.zeros(shape)
kv.init(3, NDArray.ones(shape))
kv.pull(3, ndArray)
assert(ndArray.toArray === Array(1f, 1f))
}
test("push and pull") {
val kv = KVStore.create()
val shape = Shape(2, 1)
val ndArray = NDArray.zeros(shape)
kv.init(3, NDArray.ones(shape))
kv.push(3, NDArray.ones(shape) * 4)
kv.pull(3, ndArray)
assert(ndArray.toArray === Array(4f, 4f))
}
test("updater runs when push") {
val kv = KVStore.create()
val updater = new MXKVStoreUpdater {
override def update(key: Int, input: NDArray, stored: NDArray): Unit = {
// scalastyle:off println
println(s"update on key $key")
// scalastyle:on println
stored += input * 2
}
override def dispose(): Unit = {}
}
kv.setUpdater(updater)
val shape = Shape(2, 1)
val ndArray = NDArray.zeros(shape)
kv.init(3, NDArray.ones(shape) * 4)
kv.pull(3, ndArray)
assert(ndArray.toArray === Array(4f, 4f))
kv.push(3, NDArray.ones(shape))
kv.pull(3, ndArray)
assert(ndArray.toArray === Array(6f, 6f))
}
test("get type") {
val kv = KVStore.create("local")
assert(kv.`type` === "local")
}
test("get numWorkers and rank") {
val kv = KVStore.create("local")
assert(kv.numWorkers === 1)
assert(kv.rank === 0)
}
}
|
Liuxg16/BrainMatrix
|
scalakernel/src/main/java/thu/brainmatrix/synapse/Module.scala
|
package thu.brainmatrix.synapse
import thu.brainmatrix.NDArray
abstract class Module {
var variable_table:Array[String]
var variableindices:Array[Int]
def getInitial(): Array[NDArray] = {
Array.fill[NDArray](variable_table.length)(null)
}
def setIndices(indices:Array[Int]){
this.variableindices=indices;
}
def setIndices(startIndex:Int){
var index = startIndex;
val numvariables=this.variable_table.length;
this.variableindices = Array.fill[Int](numvariables)(0)
for(i <- 0 until numvariables){
this.variableindices(i) = index;
index = index + 1
}
}
def getVarIndices():Array[Int] = {
this.variableindices;
}
def getVarNumber():Int = {
this.variable_table.length;
}
def getVarsName():Array[String] = {
this.variable_table;
}
/**
* @param name
* @return >-1, the index; -1 means null
*/
def getResindex(name:String):Int = {
var res = -1;
for(i <- 0 until this.variable_table.length){
if(name.equals(this.variable_table(i)))
res = this.variableindices(i);
}
return res;
}
def update(t: NDArray, y:Array[NDArray],yDot:Array[NDArray],indices:Array[Int]):Array[NDArray] = {
Array.fill[NDArray](y.length)(null)
}
}
|
benhutchison/cats-collections
|
scalacheck/src/main/scala/cats/collections/arbitrary/all.scala
|
<reponame>benhutchison/cats-collections
package cats.collections.arbitrary
trait AllArbitrary
extends ArbitrarySet
with ArbitraryMap
with ArbitraryISet
with CogenInstances
|
benhutchison/cats-collections
|
core/src/main/scala/cats/collections/ISet.scala
|
package cats.collections
import cats._
/**
* An intensional set, which is a set which instead of enumerating its
* elements as a extensional set does, it is defined by a predicate
* which is a test for membership.
*/
abstract class ISet[-A] extends scala.Function1[A, Boolean] { self =>
/**
* returns a set which is the union of this set and another
*/
def union[B <: A](other: ISet[B]): ISet[B] = ISet(a => apply(a) || other(a))
/**
* returns a set which is the union of this set and another
*/
def |[B <: A](other: ISet[B]): ISet[B] = ISet(a => apply(a) || other(a))
/**
* returns a set which is the intersection of this set and another
*/
def intersection[B <: A](other: ISet[B]): ISet[B] = ISet(a => apply(a) && other(a))
/**
* returns a set which is the intersection of this set and another
*/
def &[B <: A](other: ISet[B]): ISet[B] = ISet(a => apply(a) && other(a))
/**
* Returns true if the value is a member of the set.
*/
def contains(a: A): Boolean = apply(a)
/**
* Returns the set which is the the difference of another set removed from this set
*/
def diff[B <: A](remove: ISet[B]): ISet[B] = ISet(a => apply(a) && !remove(a))
/**
* Returns the set which is the the difference of another set removed from this set
*/
def -[B <: A](remove: ISet[B]): ISet[B] = ISet(a => apply(a) && !remove(a))
/**
* Return the set of all As which are not in this set.
*/
def negate: ISet[A] = ISet(a => !apply(a))
/**
* Return the set of all As which are not in this set.
*/
def unary_!(): ISet[A] = negate
}
object ISet extends ISetInstances {
def apply[A](f: A => Boolean): ISet[A] = new ISet[A] {
def apply(a: A) = f(a)
}
def empty: ISet[Any] = apply(_ => false)
}
trait ISetInstances {
implicit def isetMonoid[A]: Monoid[ISet[A]] = new Monoid[ISet[A]] {
override def empty: ISet[A] = ISet.empty
override def combine(l: ISet[A], r: ISet[A]): ISet[A] = l union r
}
implicit val isetInstance: MonoidK[ISet] = new MonoidK[ISet] {
override def empty[A]: ISet[A] = ISet.empty
override def combineK[A](l: ISet[A], r: ISet[A]): ISet[A] = l union r
}
}
|
benhutchison/cats-collections
|
scalacheck/src/main/scala/cats/collections/arbitrary/package.scala
|
<filename>scalacheck/src/main/scala/cats/collections/arbitrary/package.scala
package cats.collections
package object arbitrary {
object all extends AllArbitrary
object set extends ArbitrarySet
object map extends ArbitraryMap
object iset extends ArbitraryISet
object cogen extends CogenInstances
}
|
benhutchison/cats-collections
|
tests/src/test/scala/cats/collections/HeapSpec.scala
|
package cats.collections
package tests
import cats.tests.CatsSuite
/**
* Created by nperez on 3/28/16.
*/
class HeapSpec extends CatsSuite {
test("sorted")(
forAll { (xs: scala.List[Int]) =>
val set = xs.toSet
val heap = set.foldLeft(Heap.empty[Int])((h, i) => h.add(i))
val exp = set.toList
heap.toList should be(exp.sorted)
})
}
|
benhutchison/cats-collections
|
docs/build.sbt
|
<reponame>benhutchison/cats-collections
import microsites._
name := "cats-collections-docs"
lazy val docsMappingsAPIDir = settingKey[String]("Name of subdirectory in site target directory for api docs")
enablePlugins(MicrositesPlugin)
ghpagesNoJekyll := false
micrositeName := "cats-collections"
micrositeDescription := "pure functional data structures for Scala"
micrositeBaseUrl := "/cats-collections/"
micrositeHomepage := "http://typelevel.org/cats-collections/"
micrositeGithubOwner := "typelevel"
micrositeGithubRepo := "cats-collections"
micrositeExtraMdFiles := Map(
file("README.md") -> ExtraMdFileConfig(
"index.md",
"docs",
Map("title" -> "Home", "layout" -> "docs")
)
)
micrositePalette := Map(
"brand-primary" -> "#5B5988",
"brand-secondary" -> "#292E53",
"brand-tertiary" -> "#222749",
"gray-dark" -> "#49494B",
"gray" -> "#7B7B7E",
"gray-light" -> "#E5E5E6",
"gray-lighter" -> "#F4F3F4",
"white-color" -> "#FFFFFF")
includeFilter in Jekyll := (includeFilter in makeSite).value
fork in tut := true
git.remoteRepo := "<EMAIL>:typelevel/cats-collections.git"
scalacOptions := Seq(
"-feature",
"-deprecation",
"-encoding", "utf8",
"-language:postfixOps",
"-language:higherKinds",
"-language:implicitConversions",
"-unchecked",
"-Xcheckinit",
"-Xfuture",
"-Xlint",
"-Ywarn-dead-code",
"-Ywarn-value-discard",
"-Xfuture",
"-nowarn")
|
benhutchison/cats-collections
|
core/src/main/scala/cats/collections/Set.scala
|
<reponame>benhutchison/cats-collections<filename>core/src/main/scala/cats/collections/Set.scala
package cats.collections
import java.util.NoSuchElementException
import scala.annotation.tailrec
import scala.collection.immutable.List
import cats._
import cats.implicits._
import cats.collections.compat.Factory
/**
* An immutable, ordered, extensional set
*
* This data-structure maintains balance using the
* [AVL](https://en.wikipedia.org/wiki/AVL_tree) algorithm.
*/
sealed abstract class AvlSet[A] {
import AvlSet._
/**
* The number of items in the Set.
* O(1)
*/
val size: Int
/**
* Returns `true` if the Set is the empty Set.
* O(1)
*/
def isEmpty: Boolean
/**
* Map a function on all values of the set
*/
def map[B: Order](f: A => B): AvlSet[B] =
foldLeft[AvlSet[B]](empty)((s,a) => s + f(a))
/**
* Map a function on all values of the set
*/
def flatMap[B: Order](f: A => AvlSet[B]): AvlSet[B] =
foldLeft[AvlSet[B]](empty)((s,a) => s ++ f(a))
/**
* Returns None if the set is empty, otherwise returns the minimum
* element.
* O(log n)
*/
def min: Option[A] = {
@tailrec def loop(sub: AvlSet[A], x: A): A = sub match {
case Branch(a, l, _) => loop(l, a)
case _ => x
}
this match {
case Branch(a, l, _) => Some(loop(l, a))
case _ => None
}
}
/**
* Returns `None` if the set is empty, otherwise returns the maximum
* element.
* O(log n)
*/
def max: Option[A] = {
@tailrec def loop(sub: AvlSet[A], x: A): A = sub match {
case Branch(a, _, r) => loop(r, a)
case _ => x
}
this match {
case Branch(a, _, r) => Some(loop(r, a))
case _ => None
}
}
/**
* Applies a function to each element, in ascending order
* O(n)
*/
def foreach(f: A => Unit): Unit = this match {
case Branch(v, l, r) => l.foreach(f); f(v); r.foreach(f)
case _ =>
}
/**
* fold the elements together from min to max, using the passed
* seed, and accumulator function.
* O(n)
*/
def foldLeft[B](z: B)(f: (B, A) => B): B = this match {
case Branch(v, l, r) => r.foldLeft(f(l.foldLeft(z)(f), v))(f)
case _ => z
}
/**
* fold the elements together from min to max, using the passed
* seed, and accumulator function.
* O(n)
*/
def foldRight[B](z: Eval[B])(f: (A, Eval[B]) => Eval[B]): Eval[B] = this match {
case Branch(v, l, r) => l.foldRight(f(v, r.foldRight(z)(f)))(f)
case _ => z
}
/**
* Find the minimum element matching the given predicate. Returns
* None if there is no element matching the predicate.
* O(log n)
*/
def find(pred: A => Boolean): Option[A] = this match {
case Branch(v, l, r) =>
l.find(pred) orElse (if(pred(v)) Some(v) else r.find(pred))
case _ => None
}
/**
* Returns `true` if the given element is in the set.
* O(log n)
*/
def contains(x: A)(implicit order: Order[A]): Boolean = this match {
case Branch(a, l, r) => order.compare(x, a) match {
case 0 => true
case o if o < 0 => l.contains(x)
case _ => r.contains(x)
}
case _ => false
}
/**
* Add's the given element to the set if it is not already present.
* O(log n)
*/
def add(x: A)(implicit order: Order[A]): Branch[A] =
(this match {
case Branch(a, l, r) => order.compare(x, a) match {
case 0 => Branch(x, l, r)
case o if o < 0 => Branch(a, l.add(x), r)
case _ => Branch(a, l, r.add(x))
}
case _ => Branch(x, AvlSet.empty, AvlSet.empty)
}).balance
/**
* Add's the given element to the set if it is not already present.
* O(log n)
*/
def +(x: A)(implicit order: Order[A]): AvlSet[A] = add(x)
/**
* Return a set which does not contain the given element.
* O(log n)
*/
def remove(x: A)(implicit order: Order[A]): AvlSet[A] =
this match {
case Branch(a, l, r) =>
order.compare(x, a) match {
case 0 => r.min match {
case None => l
case Some(v) => Branch(v,l,r.remove(v)).balance
}
case o if o < 0 => Branch(a, l.remove(x), r).balance
case _ => Branch(a, l, r.remove(x)).balance
}
case _ => AvlSet.empty
}
// STU: this is used by Map, not sure what to do about this
private[collections] def removef[B](x: B, f: A => B)(implicit B: Order[B]): AvlSet[A] =
this match {
case Branch(a, l, r) =>
B.compare(x, f(a)) match {
case 0 => r.min match {
case None => l
case Some(v) =>
Branch(v,l,r.removef(f(v), f)).balance
}
case o if o < 0 => Branch(a, l.removef(x, f), r).balance
case _ => Branch(a, l, r.removef(x, f)).balance
}
case _ => AvlSet.empty
}
/**
* Return a set containing the union of elements with this set and
* the given set.
* O(n log n)
*/
def union(another: AvlSet[A])(implicit order: Order[A]): AvlSet[A] = another.foldLeft(this)(_ + _)
/**
* Return a set containing the union of elements with this set and
* the given set.
* O(n log n)
*/
def |(another: AvlSet[A])(implicit order: Order[A]): AvlSet[A] = this union another
/**
* Return a set containing the intersection of elements with this set and
* the given set.
* O(n log n)
*/
def intersect(another: AvlSet[A])(implicit order: Order[A]): AvlSet[A] = {
def _intersect(small: AvlSet[A], large: AvlSet[A]): AvlSet[A] =
small.foldLeft[AvlSet[A]](empty)((t,a) => if(large.contains(a)) t + a else t)
if (this.size < another.size)
_intersect(this, another)
else
_intersect(another, this)
}
/**
* Return a set containing the intersection of elements with this set and
* the given set.
* O(n log n)
*/
def &(another: AvlSet[A])(implicit order: Order[A]): AvlSet[A] = this intersect another
/**
* Return a set containing the union of elements with this set and
* the given set.
* O(n log n)
*/
def ++(another: AvlSet[A])(implicit order: Order[A]): AvlSet[A] = this union another
/**
* Return a set that has any elements appearing in the removals set removed
* O(n log n)
*/
def diff(removals: AvlSet[A])(implicit order: Order[A]): AvlSet[A] =
removals.foldLeft(this)(_ remove _)
/**
* Return a set that has any elements appearing in the removals set removed
* O(n log n)
*/
def -(removals: AvlSet[A])(implicit order: Order[A]): AvlSet[A] =
removals.foldLeft(this)(_ remove _)
/**
* Return an ISet (intentional set) with the same members as this set
*/
def iset(implicit order: Order[A]): ISet[A] = ISet(contains)
/**
* Converts this set into a Scala collection
* O(n)
*/
def to[Col[_]](implicit cbf: Factory[A, Col[A]]): Col[A] = {
val builder = cbf.newBuilder
this.foreach(builder += _)
builder.result()
}
/**
* Return the sorted list of elements.
* O(n)
*/
def toList: List[A] = to[List]
/**
* Return a Scala set containing the elements in the set
* O(n)
*/
def toScalaSet: Set[A] = to[Set]
def toIterator: Iterator[A] = new Iterator[A] {
var stack: List[Either[A, AvlSet[A]]] = List(Right(AvlSet.this))
@tailrec override def hasNext: Boolean = stack match {
case Nil => false
case Left(_) :: _ => true
case Right(Branch(_, _, _)) :: _ => true
case _ :: ss =>
stack = ss
hasNext
}
@tailrec override def next(): A = stack match {
case Nil => throw new NoSuchElementException()
case Left(v) :: ss =>
stack = ss
v
case Right(Branch(v, l, r)) :: ss =>
stack = Right(l) :: Left(v) :: Right(r) :: ss
next()
case _ :: ss =>
stack = ss
next()
}
}
override def toString: String =
"Set(" + Foldable[List].intercalate(toList.map(_.toString), ",") + ")"
// So yeah. we had to make a decision, either we have to make this
// structure Key/Value pairs even when we don't always need a value
// (in the case of a Set), or we have to have separate structures
// for Set and Map, or we have to have a function like this one,
// that only really make sense fo Map. I chose this one. This
// function makes it so that we can find things in the tree just
// based on a Key, when the set is really storing a Key value pair.
// The name was chosen so that it would be convenient for nobody to
// remember.
private[collections] def _getkv[B](f: A => B, b: B)(implicit B: Order[B]): Option[A] = {
@tailrec def go(t: AvlSet[A]): Option[A] = t match {
case Branch(v,l,r) =>
B.compare(b, f(v)) match {
case 0 => Some(v)
case x if x < 0 => go(l)
case _ => go(r)
}
case _ => None
}
go(this)
}
private[collections] def updateKey[K,V](key: K, value: V)(implicit order: Order[K], ev: A =:= (K,V), V: Semigroup[V]): AvlSet[A] = {
(this match {
case Branch(a, l, r) => order.compare(key, ev(a)._1) match {
case 0 =>
val (k,v) = ev(a)
Branch((k -> V.combine(v,value)).asInstanceOf[A], l, r)
case o if o < 0 => Branch(a, l.updateKey(key, value), r)
case _ => Branch(a, l, r.updateKey(key,value))
}
case _ => Branch((key -> value).asInstanceOf[A], AvlSet.empty, AvlSet.empty)
}).balance
}
private[collections] val height: Int
}
object AvlSet extends AvlSetInstances {
/**
* Create a set with the given elements.
*/
def apply[A: Order](as: A*): AvlSet[A] =
as.foldLeft[AvlSet[A]](empty)(_ + _)
def fromList[A: Order](as: List[A]): AvlSet[A] =
as.foldLeft[AvlSet[A]](empty)(_ + _)
/**
* The empty set.
*/
def empty[A]: AvlSet[A] = BTNil()
private[collections] case class Branch[A](value: A,
left: AvlSet[A],
right: AvlSet[A]) extends AvlSet[A] {
val size = left.size + right.size + 1
val height = java.lang.Math.max(left.height, right.height) + 1
override def isEmpty: Boolean = false
// Determine the direction that the tree should be rotated,
// given the allowed amount of imbalance.
// Returns -1 when a left rotation is called for.
// Returns 0 when a right rotation is called for.
// Returns 1 when the tree is withing the allowance.
private def rotation(l: Int, r: Int, allow: Int): Int =
if(l - r > allow ) 1
else if(r - l > allow) -1
else 0
private[collections] def balance: Branch[A] = {
val r = rotation(left.height, right.height, 1)
if(r == 0) this
else if(r > 0) {
left match {
case Branch(lv,ll,lr) =>
if(rotation(ll.height, lr.height, 0) < 0) {
val Branch(lrv,lrl,lrr) = lr
Branch(lrv,Branch(lv, ll, lrl), Branch(value, lrr, right))
} else {
Branch(lv, ll, Branch(value, lr, right))
}
case _ => this
}
} else {
right match {
case Branch(rv,rl,rr) =>
if(rotation(rl.height, rr.height, 0) > 0) {
val Branch(rlv,rll,rlr) = rl
Branch(rlv, Branch(value, left, rll), Branch(rv, rlr, rr))
} else {
Branch(rv, Branch(value, left, rl), rr)
}
case _ => this
}
}
}
}
private[collections] case object BTNil extends AvlSet[Nothing] {
override def isEmpty: Boolean = true
def apply[A](): AvlSet[A] = this.asInstanceOf[AvlSet[A]]
def unapply[A](a: AvlSet[A]): Boolean = a.isEmpty
override val size: Int = 0
override val height: Int = 0
}
}
trait AvlSetInstances {
implicit def eqSet[A: Eq]: Eq[AvlSet[A]] = new Eq[AvlSet[A]] {
override def eqv(x: AvlSet[A], y: AvlSet[A]): Boolean =
iteratorEq(x.toIterator, y.toIterator)
}
}
|
benhutchison/cats-collections
|
scalacheck/src/main/scala/cats/collections/arbitrary/ArbitraryISet.scala
|
<gh_stars>0
package cats.collections
package arbitrary
import org.scalacheck.{Gen, Arbitrary}
import cats.Order
trait ArbitraryISet {
import set._
def isetGen[A: Arbitrary: Order]: Gen[ISet[A]] =
setGen.map(_.iset)
implicit def isetArbitrary[A: Arbitrary: Order]: Arbitrary[ISet[A]] =
Arbitrary(isetGen[A])
}
|
benhutchison/cats-collections
|
core/src/main/scala/cats/collections/Heap.scala
|
<gh_stars>0
/**
* Created by nperez on 3/28/16.
*/
package cats.collections
import cats._
/**
* `Heap` is a Purely Functional Binary Heap. Binary Heaps are not common in the functional space, especially because
* their implementation depends on mutable arrays in order to gain in performance. This functional binary heap is based
* on <NAME>'s paper and it does support the basic operations on a heap without compromising performance.
*
* It is important to note that we can, in fact, to create the Binary Heap in order O(n) from a `List` using the
* function `heapify`.
*/
sealed abstract class Heap[A] {
import Heap._
/**
* Internal representation of the min value to avoid deconstruction of `min: Option[A]` since min is heavily used.
*/
private[collections] val min: A
/**
* Returns min value on the heap.
*/
def getMin: Option[A]
private[collections] def left: Heap[A]
private[collections] def right: Heap[A]
/**
* Returns the size of the heap.
*/
def size: Int
/**
* Returns the height of the heap.
*/
def height: Int
/**
* Verifies if the heap is empty.
*/
def isEmpty: Boolean
/**
* Insert a new element into the heap.
* Order O(log n)
*/
def add(x: A)(implicit order: Order[A]): Heap[A] =
if (isEmpty)
Heap(x, Leaf(), Leaf())
else if (left.size < (1 >> right.height) - 1)
bubbleUp(min, left.add(x), right)
else if (right.size < (1 >> right.height) - 1)
bubbleUp(min, left, right.add(x))
else if (right.height < left.height)
bubbleUp(min, left, right.add(x))
else
bubbleUp(min, left.add(x), right)
/**
* Build a heap using a list.
* Order O(n)
*/
def heapify(a: List[A])(implicit order: Order[A]): Heap[A] = {
def loop(i: Int, xs: scala.List[A]): Heap[A] =
if (i < xs.length) {
bubbleDown(xs(i), loop(2 * i + 1, xs), loop(2 * i + 2, xs))
}
else {
Leaf()
}
loop(0, a)
}
/**
* Remove the min element from the heap (the root).
* Order O(log n)
*/
def remove(implicit order: Order[A]): Heap[A] = this match {
case Leaf() => Leaf()
case Branch(_, l, r, _, _) => bubbleRootDown(mergeChildren(l, r))
}
/**
* Returns a sorted list of the elements within the heap.
*/
def toList(implicit order: Order[A]): List[A] = this match {
case Leaf() => Nil
case Branch(m, _, _, _, _) => m :: remove.toList
}
/**
* Alias for add
*/
def +(x: A)(implicit order: Order[A]): Heap[A] = add(x)
/**
* Alias for remove
*/
def --(implicit order: Order[A]): Heap[A] = remove
}
object Heap {
def empty[A]: Heap[A] = Leaf()
def apply[A](x: A): Heap[A] = Branch(x, empty, empty, 1, 1)
def apply[A](x: A, l: Heap[A], r: Heap[A]): Heap[A] =
Branch(x, l, r, l.size + r.size + 1, scala.math.max(l.height, r.height) + 1)
private[collections] case class Branch[A](min: A, left: Heap[A], right: Heap[A], size: Int, height: Int) extends Heap[A] {
override def isEmpty: Boolean = false
override def getMin: Option[A] = Some(min)
}
private[collections] case object Leaf extends Heap[Option[Nothing]] {
def apply[A](): Heap[A] = this.asInstanceOf[Heap[A]]
def unapply[A](heap: Heap[A]): Boolean = heap.isEmpty
override def size: Int = 0
override def height: Int = 0
override def left: Heap[Option[Nothing]] = Leaf
override def right: Heap[Option[Nothing]] = Leaf
override def isEmpty: Boolean = true
override def getMin: Option[Option[Nothing]] = None
override private[collections] val min: Option[Nothing] = None
}
private[collections] def bubbleUp[A](x: A, l: Heap[A], r: Heap[A])(implicit order: Order[A]): Heap[A] = (l, r) match {
case (Branch(y, lt, rt, _, _), _) if order.gt(x , y) =>
Heap(y, Heap(x, lt, rt), r)
case (_, Branch(z, lt, rt, _, _)) if order.gt(x , z) =>
Heap(z, l, Heap(x, lt, rt))
case (_, _) => Heap(x, l, r)
}
private[collections] def bubbleDown[A](x: A, l: Heap[A], r: Heap[A])(implicit order: Order[A]): Heap[A] = (l, r) match {
case (Branch(y, _, _, _, _), Branch(z, lt, rt, _, _))
if (order.lt(z , y) && order.gt(x , z)) => Heap(z, l, bubbleDown(x, lt, rt))
case (Branch(y, lt, rt, _, _), _)
if order.gt(x , y) => Heap(y, bubbleDown(x, lt, rt), r)
case (_, _) => Heap(x, l, r)
}
private[collections] def bubbleRootDown[A](h: Heap[A])(implicit order: Order[A]): Heap[A] =
if (h.isEmpty) {
Leaf()
}
else {
bubbleDown(h.min, h.left, h.right)
}
private[collections] def mergeChildren[A](l: Heap[A], r: Heap[A]): Heap[A] =
if (l.isEmpty && r.isEmpty) {
Leaf()
}
else if (l.size < (1 >> l.height) - 1) {
floatLeft(l.min, mergeChildren(l.left, l.right), r)
}
else if (r.size < (1 >> r.height) - 1) {
floatRight(r.min, l, mergeChildren(r.left, r.right))
}
else if (r.height < l.height) {
floatLeft(l.min, mergeChildren(l.left, l.right), r)
}
else {
floatRight(r.min, l, mergeChildren(r.left, r.right))
}
private[collections] def floatLeft[A](x: A, l: Heap[A], r: Heap[A]): Heap[A] = l match {
case Branch(y, lt, rt, _, _) => Heap(y, Heap(x, lt, rt), r)
case _ => Heap(x, l, r)
}
private[collections] def floatRight[A](x: A, l: Heap[A], r: Heap[A]): Heap[A] = r match {
case Branch(y, lt, rt, _, _) => Heap(y, l, Heap(x, lt, rt))
case _ => Heap(x, l, r)
}
implicit def toShowable[A](implicit s: Show[A], order: Order[A]): Show[Heap[A]] = new Show[Heap[A]] {
override def show(f: Heap[A]): String = f.toList match {
case Nil => "[]"
case h :: t => t.foldLeft("[" + s.show(h))((acc, r) => acc + ", " + s.show(r)) + "]"
}
}
}
|
benhutchison/cats-collections
|
core/src/main/scala/cats/collections/Discrete.scala
|
<gh_stars>0
package cats.collections
/**
* Represent discrete operations that can be performed on A
*/
trait Discrete[A] {
/**
* Return the successor of x.
*/
def succ(x: A): A
/**
* Returns the predecessor of x.
*/
def pred(x: A): A
/**
* Returns true if x and y are consecutive.
*/
def adj(x: A, y: A): Boolean = succ(x) == y
}
object Discrete {
implicit val intDiscrete: Discrete[Int] = new Discrete[Int] {
override def succ(x: Int): Int = x + 1
override def pred(x: Int): Int = x - 1
}
implicit val bigIntDiscrete: Discrete[BigInt] = new Discrete[BigInt] {
override def succ(x: BigInt): BigInt = x + 1
override def pred(x: BigInt): BigInt = x - 1
}
}
|
benhutchison/cats-collections
|
core/src/main/scala/cats/collections/Streaming.scala
|
package cats.collections
import scala.annotation.tailrec
import cats._, cats.Eval._
import cats.implicits._
/**
* `Streaming[A]` represents a stream of values. A stream can be
* thought of as a collection, with two key differences:
*
* 1. It may be infinite; it does not necessarily have a finite
* length. For this reason, there is no `.length` method.
*
* 2. It may be lazy. In other words, the entire stream may not be in
* memory. In this case, each "step" of the stream has
* instructions for producing the next step.
*
* Streams are not necessarily lazy: they use `Eval[Streaming[A]]` to
* represent a tail that may (or may not be) lazy. If `now[A]` is used
* for each tail, then `Streaming[A]` will behave similarly to
* `List[A]`. If `Later[A]` is used for each tail, then `Streaming[A]`
* will behave similarly to `scala.Stream[A]` (i.e. it will
* lazily-compute the tail, and will memoize the result to improve the
* performance of repeated traversals). If `always[A]` is used for
* each tail, the result will be a lazy stream which does not memoize
* results (saving space at the cost of potentially-repeated
* calculations).
*
* Since `Streaming[A]` has been compared to `scala.Stream[A]` it is
* worth noting some key differences between the two types:
*
* 1. When the entire stream is known ahead of time, `Streaming[A]`
* can represent it more efficiently, using `now[A]`, rather than
* allocating a list of closures.
*
* 2. `Streaming[A]` does not memoize by default. This protects
* against cases where a reference to head will prevent the entire
* stream from being garbage collected, and is a better default.
* A stream can be memoized later using the `.memoize` method.
*
* 3. `Streaming[A]` does not inherit from the standard collections,
* meaning a wide variety of methods which are dangerous on
* streams (`.length`, `.apply`, etc.) are not present.
*
* 4. `scala.Stream[A]` requires an immediate value for `.head`. This
* means that operations like `.filter` will block until a
* matching value is found, or the stream is exhausted (which
* could be never in the case of an infinite stream). By contrast,
* `Streaming[A]` values can be totally lazy (and can be
* lazily-constructed using `Streaming.defer()`), so methods like
* `.filter` are completely lazy.
*
* 5. The use of `Eval[Streaming[A]]` to represent the "tail" of the
* stream means that streams can be lazily (and safely)
* constructed with `Foldable#foldRight`, and that `.map` and
* `.flatMap` operations over the tail will be safely trampolined.
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
sealed abstract class Streaming[A] extends Product with Serializable { lhs =>
import Streaming.{Empty, Wait, Cons}
/**
* Deconstruct a stream into a head and tail (if available).
*
* This method will evaluate the stream until it finds a head and
* tail, or until the stream is exhausted. The head will be
* evaluated, whereas the tail will remain (potentially) lazy within
* Eval.
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def uncons: Option[(A, Eval[Streaming[A]])] = {
@tailrec def unroll(s: Streaming[A]): Option[(A, Eval[Streaming[A]])] =
s match {
case Empty() => None
case Wait(lt) => unroll(lt.value)
case Cons(a, lt) => Some((a, lt))
}
unroll(this)
}
/**
* Lazily transform the stream given a function `f`.
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def map[B](f: A => B): Streaming[B] =
this match {
case Empty() => Empty()
case Wait(lt) => Wait(lt.map(_.map(f)))
case Cons(a, lt) => Cons(f(a), lt.map(_.map(f)))
}
/**
* Eagerly fold the stream to a single value from the left.
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def foldLeft[B](b: B)(f: (B, A) => B): B = {
@tailrec def unroll(s: Streaming[A], b: B): B =
s match {
case Empty() => b
case Wait(lt) => unroll(lt.value, b)
case Cons(a, lt) => unroll(lt.value, f(b, a))
}
unroll(this, b)
}
/**
* Lazily fold the stream to a single value from the right.
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def foldRight[B](b: Eval[B])(f: (A, Eval[B]) => Eval[B]): Eval[B] =
this match {
case Empty() => b
case Wait(lt) => lt.flatMap(_.foldRight(b)(f))
case Cons(a, lt) => f(a, lt.flatMap(_.foldRight(b)(f)))
}
/**
* Lazily concatenate two streams.
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def ++(rhs: Streaming[A]): Streaming[A] =
this match {
case Empty() => rhs
case Wait(lt) => Wait(lt.map(_ ++ rhs))
case Cons(a, lt) => Cons(a, lt.map(_ ++ rhs))
}
/**
* Lazily concatenate two streams.
*
* In this case the evaluation of the second stream may be deferred.
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def ++(rhs: Eval[Streaming[A]]): Streaming[A] =
this match {
case Empty() => Wait(rhs)
case Wait(lt) => Wait(lt.map(_ ++ rhs))
case Cons(a, lt) => Cons(a, lt.map(_ ++ rhs))
}
/**
* Lazily zip two streams together, using the given function `f` to
* produce output values.
*
* The length of the result will be the shorter of the two
* arguments.
*
* The expression:
*
* (lhs zipMap rhs)(f)
*
* is equivalent to (but more efficient than):
*
* (lhs zip rhs).map { case (a, b) => f(a, b) }
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def zipMap[B, C](rhs: Streaming[B])(f: (A, B) => C): Streaming[C] =
(lhs, rhs) match {
case (Cons(a, lta), Cons(b, ltb)) =>
Cons(f(a, b), for { ta <- lta; tb <- ltb } yield (ta zipMap tb)(f))
case (Empty(), _) =>
Empty()
case (_, Empty()) =>
Empty()
case (Wait(lta), s) =>
Wait(lta.map(_.zipMap(s)(f)))
case (s, Wait(ltb)) =>
Wait(ltb.map(s.zipMap(_)(f)))
}
/**
* Zip two streams together, using the given function `f` to produce
* the output values.
*
* Unlike zipMap, the length of the result will be the *longer* of
* the two input streams. The functions `g` and `h` will be used in
* this case to produce valid `C` values.
*
* The expression:
*
* (lhs izipMap rhs)(f, g, h)
*
* is equivalent to (but more efficient than):
*
* (lhs izip rhs).map {
* case Ior.Both(a, b) => f(a, b)
* case Ior.Left(a) => g(a)
* case Ior.Right(b) => h(b)
* }
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def izipMap[B, C](rhs: Streaming[B])(f: (A, B) => C, g: A => C, h: B => C): Streaming[C] =
(lhs, rhs) match {
case (Cons(a, lta), Cons(b, ltb)) =>
Cons(f(a, b), for { ta <- lta; tb <- ltb } yield (ta izipMap tb)(f, g, h))
case (Wait(lta), tb) =>
Wait(lta.map(_.izipMap(tb)(f, g, h)))
case (ta, Wait(ltb)) =>
Wait(ltb.map(ta.izipMap(_)(f, g, h)))
case (Empty(), tb) =>
tb.map(h)
case (ta, Empty()) =>
ta.map(g)
}
/**
* Return true if every element of the stream satisfies the
* predicate, false otherwise.
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def forall(f: A => Boolean): Boolean = {
@tailrec def unroll(s: Streaming[A]): Boolean =
s match {
case Empty() => true
case Wait(lt) => unroll(lt.value)
case Cons(a, lt) => if (f(a)) unroll(lt.value) else false
}
unroll(this)
}
/**
* Provide a list of elements in the stream.
*
* This will evaluate the stream immediately, and will hang in the
* case of infinite streams.
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def toList: List[A] = foldLeft[List[A]](List.empty)((as,a) => a :: as).reverse
}
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
object Streaming extends StreamingInstances {
/**
* Concrete Streaming[A] types:
*
* - Empty(): an empty stream.
* - Cons(a, tail): a non-empty stream containing (at least) `a`.
* - Wait(tail): a deferred stream.
*
* Cons represents a lazy, possibly infinite stream of values.
* Eval[_] is used to represent possible laziness (via now, later,
* and always). The head of `Cons` is eager -- a lazy head can be
* represented using `Wait(always(...))` or `Wait(Later(...))`.
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
final case class Empty[A]() extends Streaming[A]
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
final case class Wait[A](next: Eval[Streaming[A]]) extends Streaming[A]
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
final case class Cons[A](a: A, tail: Eval[Streaming[A]]) extends Streaming[A]
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def unfold[A,B](b: B)(f: B => Option[(A,B)]): Streaming[A] = f(b) match {
case None => Streaming.empty
case Some((a,b)) => Streaming.cons(a, defer(unfold(b)(f)))
}
/**
* Create an empty stream of type A.
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def empty[A]: Streaming[A] =
Empty()
/**
* Create a stream consisting of a single value.
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def apply[A](a: A): Streaming[A] =
Cons(a, now(Empty()))
/**
* Prepend a value to a stream.
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def cons[A](a: A, s: Streaming[A]): Streaming[A] =
Cons(a, now(s))
/**
* Prepend a value to an Eval[Streaming[A]].
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def cons[A](a: A, ls: Eval[Streaming[A]]): Streaming[A] =
Cons(a, ls)
/**
* Defer stream creation.
*
* Given an expression which creates a stream, this method defers
* that creation, allowing the head (if any) to be lazy.
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def defer[A](s: => Streaming[A]): Streaming[A] =
wait(always(s))
/**
* Create a stream from an `Eval[Streaming[A]]` value.
*
* Given an expression which creates a stream, this method defers
* that creation, allowing the head (if any) to be lazy.
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def wait[A](ls: Eval[Streaming[A]]): Streaming[A] =
Wait(ls)
/**
* Create a stream from an iterator.
*
* The stream will be created lazily, to support potentially large
* (or infinite) iterators. Iterators passed to this method should
* not be used elsewhere -- doing so will result in problems.
*
* The use case for this method is code like .fromIterable, which
* creates an iterator for the express purpose of calling this
* method.
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def fromIteratorUnsafe[A](it: scala.collection.Iterator[A]): Streaming[A] =
if (it.hasNext) Cons(it.next, Later(fromIteratorUnsafe(it))) else Empty()
/**
* Produce a stream given an "unfolding" function.
*
* None represents an empty stream. Some(a) represents an initial
* element, and we can compute the tail (if any) via f(a).
*/
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
def unfold[A](o: Option[A])(f: A => Option[A]): Streaming[A] =
o match {
case None => Empty()
case Some(a) => Cons(a, always(unfold(f(a))(f)))
}
}
private[collections] sealed trait StreamingInstances {
@deprecated("Streaming is obsolete. Use either fs2, Monix, or iteratees.", "cats-collections 0.7.0")
implicit def streamEq[A: Eq]: Eq[Streaming[A]] =
new Eq[Streaming[A]] {
def eqv(x: Streaming[A], y: Streaming[A]): Boolean =
(x izipMap y)(_ === _, _ => false, _ => false)
.forall(identity)
}
}
|
tfellison/tips-service
|
app/utilities/TimeUtils.scala
|
<filename>app/utilities/TimeUtils.scala
package utilities
import java.text.SimpleDateFormat
import java.util.Date
import java.util.TimeZone
/**
* Provides utilities for working with time values
*
* @author tellison
*/
object TimeUtils {
/**
* Returns the UTC current timestamp at millisecond resolution in ISO 8601 format
*
* @return Current UTC timestamp at millisecond resolution in ISO 8601 format
*/
def getCurrentTimestampUTC : String = {
val timeZone = TimeZone.getTimeZone("UTC")
val dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'")
dateFormat.setTimeZone(timeZone)
return dateFormat.format(new Date())
}
}
|
tfellison/tips-service
|
app/persistence/TipsRepo.scala
|
<reponame>tfellison/tips-service
package persistence
import javax.inject.Inject
import play.api.libs.json.{JsObject, Json}
import play.modules.reactivemongo.ReactiveMongoApi
import play.modules.reactivemongo.json._
import play.modules.reactivemongo.json.collection.JSONCollection
import reactivemongo.api.ReadPreference
import reactivemongo.api.commands.WriteResult
import reactivemongo.bson.{BSONDocument, BSONObjectID}
import scala.concurrent.{ExecutionContext, Future}
/**
* Provides facility for executing standard CRUD operations against tips collection in database
*
* @author tfellison
*/
class TipsRepo @Inject() (reactiveMongoApi: ReactiveMongoApi) {
def collection = reactiveMongoApi.db.collection[JSONCollection]("tips")
/**
* Insert a new document into the database
*
* @param document Document to be saved
* @param ec ExecutionContext used to execute the operation
* @return Result of the write operation
*/
def save(document: BSONDocument)(implicit ec: ExecutionContext): Future[WriteResult] = {
collection.update(BSONDocument("id" -> document.get("id").getOrElse(BSONObjectID.generate)), document, upsert = true)
}
/**
* Find and return a document from the database
*
* @param selector Selector used to identify the document
* @param ec ExecutionContext used to execute the operation
* @return JsObject representation of the found document
*/
def find(selector: BSONDocument)(implicit ec: ExecutionContext): Future[Option[JsObject]] = {
collection.find(selector).one[JsObject]
}
/**
* Update an existing document
*
* @param selector Selector used to identify the document
* @param update Modifications to be made to the document
* @param ec ExecutionContext used to execute the operation
* @return Result of the write operation
*/
def update(selector: BSONDocument, update: BSONDocument)(implicit ec: ExecutionContext): Future[WriteResult] = {
collection.update(selector, update)
}
/**
* Drop an existing document from the database
*
* @param document Document to be dropped
* @param ec ExecutionContext used to execute the operation
* @return Result of the write operation
*/
def remove(document: BSONDocument)(implicit ec: ExecutionContext): Future[WriteResult] = {
collection.remove(document)
}
/**
* Find all documents in the database and return them in a list
*
* @param ec ExecutionContext used to execute the operation
* @return List containing all documents in the database
*/
def findAll()(implicit ec: ExecutionContext): Future[List[JsObject]] = {
val queryBuilder = collection.find(Json.obj())
val cursor = queryBuilder.cursor[JsObject](ReadPreference.Primary)
cursor.collect[List]()
}
/**
* Drop the entire collection of documetns
*
* @param ec ExecutionContext used to execute the operation
* @return void
*/
def dropAll()(implicit ec: ExecutionContext): Future[Unit] = Future {
collection.drop()
}
}
|
tfellison/tips-service
|
app/controllers/TipsController.scala
|
<gh_stars>0
package controllers
import javax.inject.Inject
import java.util.UUID
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.api.libs.json._
import play.api.mvc._
import play.modules.reactivemongo.{MongoController, ReactiveMongoApi, ReactiveMongoComponents}
import reactivemongo.api.commands.WriteResult
import reactivemongo.bson._
import persistence.TipsRepo
import models._
import utilities.TimeUtils
import scala.concurrent.Future
/**
* This controller defines actions to handle HTTP requests made to the tips-service API.
*
* @param reactiveMongoApi API object used to communicate with tips collection in MongoDB
*/
class TipsController @Inject()(val reactiveMongoApi: ReactiveMongoApi) extends Controller with MongoController with ReactiveMongoComponents {
val InvalidInputMessage = "Operation failed: Request body appeared to contain valid JSON, but the structure was incorrect for the specified operation."
val IdNotFoundMessage = "Specified ID not found."
def tipsRepo = new TipsRepo(reactiveMongoApi)
/**
* Create and store a new tip
*
* @return Simple JSON document containing ID of newly created tip
*/
def createTip(): Action[JsValue] = Action.async(BodyParsers.parse.json) { implicit request =>
request.body.validate[CreateTipInput] match {
case success: JsSuccess[CreateTipInput] => {
val timestamp = TimeUtils.getCurrentTimestampUTC.toString
val id = UUID.randomUUID.toString
tipsRepo.save(BSONDocument(
TipFieldNames.Id -> id,
TipFieldNames.Submitter -> success.get.submitter,
TipFieldNames.CreatedTime -> timestamp,
TipFieldNames.LastUpdatedTime -> timestamp,
TipFieldNames.Message -> success.get.message,
TipFieldNames.Comments -> Array[String]()
)).map(result => Created(Json.obj("id" -> id)))
}
case JsError(error) => scala.concurrent.Future { BadRequest(Json.obj("result" -> InvalidInputMessage)) }
}
}
/**
* Fetch an existing tip from the database
*
* @return Tip indicated by the provided ID
*/
def fetchTip(): Action[JsValue] = Action.async(BodyParsers.parse.json) { implicit request =>
request.body.validate[FetchTipInput] match {
case success: JsSuccess[FetchTipInput] => {
tipsRepo.find(BSONDocument(TipFieldNames.Id -> BSONString(success.get.id))).map(tip => Ok(if (tip.isDefined) Json.toJson(tip) else Json.obj("result" -> IdNotFoundMessage)))
}
case JsError(error) => scala.concurrent.Future { BadRequest(Json.obj("result" -> InvalidInputMessage)) }
}
}
/**
* Update the message of an existing tip
*
* @return Simple JSON document indicating whether operation was successful
*/
def updateTip(): Action[JsValue] = Action.async(BodyParsers.parse.json) { implicit request =>
request.body.validate[UpdateTipInput] match {
case success: JsSuccess[UpdateTipInput] => {
tipsRepo.update(BSONDocument(TipFieldNames.Id -> BSONString(success.get.id)),
BSONDocument("$set" -> BSONDocument(
TipFieldNames.LastUpdatedTime -> TimeUtils.getCurrentTimestampUTC.toString,
TipFieldNames.Message -> success.get.message
))).map(result => Ok(if (result.n > 0) Json.obj("result" -> s"Operation successful: Message updated for tip ${success.get.id}.") else Json.obj("result" -> IdNotFoundMessage)))
}
case JsError(error) => scala.concurrent.Future { BadRequest(Json.obj("result" -> InvalidInputMessage)) }
}
}
/**
* Delete an existing tip
*
* @return Simple JSON document indicating whether operation was successful
*/
def deleteTip(): Action[JsValue] = Action.async(BodyParsers.parse.json) { implicit request =>
request.body.validate[DeleteTipInput] match {
case success: JsSuccess[DeleteTipInput] => {
tipsRepo.remove(BSONDocument(TipFieldNames.Id -> BSONString(success.get.id))).map(result => Ok(if (result.n > 0) Json.obj("result" -> s"Operation successful: Tip ${success.get.id} deleted.") else Json.obj("result" -> IdNotFoundMessage)))
}
case JsError(error) => scala.concurrent.Future { BadRequest(Json.obj("result" -> InvalidInputMessage)) }
}
}
/**
* Add a comment to an existing tip
*
* @return Simple JSON document indicating whether operation was successful
*/
def addComment(): Action[JsValue] = Action.async(BodyParsers.parse.json) { implicit request =>
request.body.validate[AddCommentInput] match {
case success: JsSuccess[AddCommentInput] => {
tipsRepo.update(BSONDocument(TipFieldNames.Id -> BSONString(success.get.id)),
BSONDocument("$set" -> BSONDocument(TipFieldNames.LastUpdatedTime -> TimeUtils.getCurrentTimestampUTC.toString),
"$push" -> BSONDocument(TipFieldNames.Comments -> success.get.comment)
)).map(result => Ok(if (result.n > 0) Json.obj("result" -> s"Operation successful: Comment added to tip ${success.get.id}.") else Json.obj("result" -> IdNotFoundMessage)))
}
case JsError(error) => scala.concurrent.Future { BadRequest(Json.obj("result" -> InvalidInputMessage)) }
}
}
/**
* Fetch all existing tips from the database
*
* @return JSON array of all existing tips
*/
def fetchAllTips(): Action[AnyContent] = Action.async { implicit request =>
tipsRepo.findAll().map(tips => Ok(Json.toJson(tips)))
}
/**
* Drop the entire collection of tips from the database
*
* @return Simple JSON document confirming all tips have been deleted
*/
def deleteAllTips(): Action[AnyContent] = Action.async { implicit request =>
tipsRepo.dropAll().map(result => Ok(Json.obj("result" -> "Operation successful: Tips collection cleared.")))
}
}
|
tfellison/tips-service
|
test/IntegrationSpec.scala
|
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import play.api.test.WithBrowser
/**
* Test basic application functionality using a headless browser
*
* @author tfellison
*/
@RunWith(classOf[JUnitRunner])
class IntegrationSpec extends Specification {
"Application" should {
"respond to index requests from browser by confirming ready status" in new WithBrowser {
browser.goTo("http://localhost:" + port)
browser.pageSource must be equalTo "Application ready..."
}
"connect to database and clear existing data" in new WithBrowser {
browser.goTo("http://localhost:" + port + "/api/tips/delete-all-tips")
browser.pageSource must contain("Tips collection cleared.")
}
"fetch empty result set from database" in new WithBrowser {
browser.goTo("http://localhost:" + port + "/api/tips/fetch-all-tips")
browser.pageSource must be equalTo "[]"
}
}
}
|
tfellison/tips-service
|
app/controllers/DefaultController.scala
|
<reponame>tfellison/tips-service<gh_stars>0
package controllers
import javax.inject.Inject
import play.api.mvc._
/**
* Defines actions to handle requests not associated with a specific API
*
* @author tellison
*/
class DefaultController @Inject() extends Controller {
/**
* Handles requests made to application root
*
* @return Response indicating application is ready
*/
def index = Action {
Ok("Application ready...")
}
/**
* Catches requests made to any path not associated with a valid operation
*
* @return
*/
def catchAll(path: String) = Action {
Ok(s"The specified route is not defined: $path")
}
}
|
tfellison/tips-service
|
app/models/InputStructures.scala
|
<reponame>tfellison/tips-service
package models
import play.api.libs.json._
import play.api.libs.functional.syntax._
/**
* Defines formats of expected valid inputs to service operations
*
* @author tfellison
*/
/**
* Expected input format for create-tip operation
*
* @param submitter Name of user submitting the tip
* @param message Message that makes up the textual body of the tip
*/
case class CreateTipInput(submitter: String, message: String)
object CreateTipInput {
implicit val reads: Reads[CreateTipInput] = (
(JsPath \ "submitter").read[String] and
(JsPath \ "message").read[String])(CreateTipInput.apply _)
}
/**
* Expected input format for fetch-tip operation
*
* @param id Identifier of the tip to fetch
*/
case class FetchTipInput(id: String)
object FetchTipInput {
implicit val reads: Reads[FetchTipInput] = (JsPath \ "id").read[String].map(FetchTipInput.apply _)
}
/**
* Expected input format for delete-tip operation
*
* @param id Identifier of the tip to fetch
*/
case class DeleteTipInput(id: String)
object DeleteTipInput {
implicit val reads: Reads[DeleteTipInput] = (JsPath \ "id").read[String].map(DeleteTipInput.apply _)
}
/**
* Expected input format for the update-tip operation
*
* @param id Identifier of the tip to update
* @param message New message to make up the textual body of the tip
*/
case class UpdateTipInput(id: String, message: String)
object UpdateTipInput {
implicit val reads: Reads[UpdateTipInput] = (
(JsPath \ "id").read[String] and
(JsPath \ "message").read[String])(UpdateTipInput.apply _)
}
/**
* Expected input format for the add-comment operation
*
* @param id Identifier of the tip to which to add the comment
* @param comment Textual body of the comment
*/
case class AddCommentInput(id: String, comment: String)
object AddCommentInput {
implicit val reads: Reads[AddCommentInput] = (
(JsPath \ "id").read[String] and
(JsPath \ "comment").read[String])(AddCommentInput.apply _)
}
|
tfellison/tips-service
|
app/models/PersistedStructures.scala
|
<gh_stars>0
package models
/**
* Contains structures/formats for representing objects persisted in database
*
* @author tfellison
*/
/** Field names of a tip as persisted in a database document */
object TipFieldNames {
val Id = "id"
val Submitter = "submitter"
val CreatedTime = "createdTime"
val LastUpdatedTime = "lastUpdatedTime"
val Message = "message"
val Comments = "comments"
}
|
tcheuer/LearningAkka-Chapter2General
|
src/main/scala/com/stringReverse/Interface/Interface.scala
|
<reponame>tcheuer/LearningAkka-Chapter2General<filename>src/main/scala/com/stringReverse/Interface/Interface.scala
package com.stringReverse.Interface
import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.util.Timeout
import com.stringReverse.Actors.StringReverse
import com.stringReverse.messages.ReversibleString
import scala.concurrent.Future
import scala.concurrent.duration._
/**
* Interface for interacting with the reverse string actor
*
* @author <NAME> on 3/7/16
*
*
*/
class Interface {
/** Returns a future string which will contain the reverse of the string passed to it */
def revString(inputString: String): Future[String] = {
val actorSystem = ActorSystem("stringReversing")
val stringReverseActor = actorSystem.actorOf(Props[StringReverse],"sRevActor")
implicit val timeout = Timeout(5 seconds)
val future = ask(stringReverseActor, ReversibleString(inputString)).mapTo[String]
future
}
}
|
tcheuer/LearningAkka-Chapter2General
|
src/test/scala/StringReverseSpec.scala
|
import akka.actor.ActorSystem
import akka.testkit.TestActorRef
import akka.util.Timeout
import com.stringReverse.Actors.StringReverse
import com.stringReverse.messages.ReversibleString
import org.scalatest.{FunSpecLike, Matchers}
import akka.pattern.ask
import scala.concurrent.{Await, Future }
import scala.concurrent.duration._
import scala.Seq
/**
* @author <NAME> on 3/7/17.
*/
class StringReverseSpec extends FunSpecLike with Matchers {
implicit val system = ActorSystem()
implicit val timeout = Timeout(5 seconds)
val testString = "Hello, world."
describe("StringReverser") {
describe("given ReversibleString") {
it("should return the reversed string in a message") {
val actorRef = TestActorRef(new StringReverse)
val future = ask(actorRef, ReversibleString(testString)).mapTo[String]
val result = Await.result(future, 1 second)
result should equal(testString.reverse)
}
}
describe("given anything else") {
it("should return a failure message") {
val actorRef = TestActorRef(new StringReverse)
val future = ask(actorRef, "Yo").mapTo[String]
val result = Await.result(future, 1 second)
result should equal("ERROR: Unknown Message")
}
}
}
describe("running through a list of strings and passing them individually"){
it("should successfully reverse each individual string"){
val stringList: Seq[String] = Seq("Word1", "Word2", "What's Up", "Backwordz")
val actorRef = TestActorRef(new StringReverse)
stringList.foreach( (i: String) => {
val future = ask(actorRef, ReversibleString(i)).mapTo[String]
val result = Await.result(future, 1 second)
result should equal(i.reverse)
})
}
}
}
|
tcheuer/LearningAkka-Chapter2General
|
src/main/scala/com/stringReverse/Actors/stringReverse.scala
|
package com.stringReverse.Actors
import akka.actor.{Actor, Status}
import com.stringReverse.messages.{ReversibleString, ReversedString}
import akka.event.Logging
/**
* Akka actor which will reverse a string.
*
* This actor receives a ReversibleString message, reverses it and stores the string into
* a val, then sends a ReversedString message to the sender.
*
* @author <NAME> on 3/7/17.
*/
class StringReverse extends Actor{
val log = Logging(context.system, this)
override def receive = {
case ReversibleString(passedString) =>
log.info("Received String - {} , Returned String - {}", passedString, passedString.reverse)
println(passedString.reverse)
sender() ! passedString.reverse
case o =>
Status.Failure(new ClassNotFoundException)
log.info("Unknown Message Received")
sender() ! "ERROR: Unknown Message"
}
}
|
tcheuer/LearningAkka-Chapter2General
|
src/main/scala/com/stringReverse/messages/messages.scala
|
package com.stringReverse.messages
/**
* Created by pmitdev1 on 3/7/17.
*/
case class ReversibleString(passedString: String)
case class ReversedString (returnString: String)
|
tcheuer/LearningAkka-Chapter2General
|
src/main/scala/com/stringReverse/Main.scala
|
package com.stringReverse
import akka.util.Timeout
import com.stringReverse.Interface.Interface
import scala.concurrent.Await
import scala.concurrent.duration._
object Main {
def main(args: Array[String]): Unit = {
val toReverse = "Hello, world!"
val revObj = new Interface()
implicit val timeout = Timeout(5 seconds)
val future = revObj.revString(toReverse)
val result = Await.result(future , 1 second)
println("In main result: " + result)
System.exit(0)
}
}
|
inigo/gpx-parser
|
src/test/scala/net/surguy/gpxparser/GpxParserSpec.scala
|
package net.surguy.gpxparser
import java.time.Instant
import org.specs2.mutable.Specification
class GpxParserSpec extends Specification {
val parser = new GpxParser()
"Reading components of a GPX file" should {
"parse a trkpt" in {
parser.parsePoint(<trkpt lat="50.987654321" lon="-1.123456789"><ele>58.0</ele><time>2015-07-26T10:23:47Z</time></trkpt>) mustEqual
TrackPoint(Coordinate(50.987654321, -1.123456789), 58D, Instant.parse("2015-07-26T10:23:47Z"))
}
"parse metadata" in {
parser.parseGpxMetadata(<gpx version="1.1" creator="Runkeeper - http://www.runkeeper.com"/>) mustEqual
GpxMetadata(version = "1.1", creator = "Runkeeper - http://www.runkeeper.com")
}
"parse tracks" in {
val track = parser.parseTrack(
<trk>
<name><![CDATA[Running 7/26/15 10:23 am]]></name>
<time>2015-07-26T10:23:47Z</time>
<trkseg>
<trkpt lat="50.987654321" lon="-1.123456789"><ele>58.0</ele><time>2015-07-26T10:23:47Z</time></trkpt>
<trkpt lat="50.987654325" lon="-1.123456780"><ele>58.0</ele><time>2015-07-26T10:23:48Z</time></trkpt>
</trkseg>
</trk>)
track.name mustEqual "Running 7/26/15 10:23 am"
track.startTime mustEqual Instant.parse("2015-07-26T10:23:47Z")
track.points.map(_.time) mustEqual Seq(Instant.parse("2015-07-26T10:23:47Z"), Instant.parse("2015-07-26T10:23:48Z"))
}
}
"Reading a complete GPX file" should {
"return a valid Gpx object" in {
val gpx = parser.parse(this.getClass.getResourceAsStream("/runkeeper.gpx"))
gpx.metadata.creator mustEqual "Runkeeper - http://www.runkeeper.com"
gpx.tracks.map(_.name) mustEqual Seq("Running 7/26/15 10:23 am")
gpx.tracks.head.points must haveSize(5648)
}
}
}
|
inigo/gpx-parser
|
build.sbt
|
name := """gpx-parser"""
version := "1.0"
scalaVersion := "2.11.7"
libraryDependencies += "org.scala-lang.modules" % "scala-xml_2.11" % "1.0.4"
libraryDependencies += "org.specs2" % "specs2-core_2.11" % "3.6.3" % "test"
|
inigo/gpx-parser
|
src/main/scala/net/surguy/gpxparser/GpxParser.scala
|
<filename>src/main/scala/net/surguy/gpxparser/GpxParser.scala
package net.surguy.gpxparser
import java.io.InputStream
import java.time.Instant
import scala.language.postfixOps
import scala.xml.{Elem, XML}
/**
* Parse a cut-down version of the GPX file format, as produced by Runkeeper.
*
* @author <NAME>
*/
class GpxParser {
def parse(xml: String) = parseGpx(XML.loadString(xml))
def parse(xmlStream: InputStream) = parseGpx(XML.load(xmlStream))
def parseGpx(gpx: Elem): Gpx = Gpx(parseGpxMetadata(gpx), (gpx \ "trk").collect{ case e: Elem => parseTrack(e) } )
private[gpxparser] def parseGpxMetadata(gpx: Elem): GpxMetadata = GpxMetadata(gpx \ "@version" text, gpx \ "@creator" text)
private[gpxparser] def parseTrack(trk: Elem): Track = {
Track(trk \ "name" text, Instant.parse(trk \ "time" text),
(trk \ "trkseg" \ "trkpt").collect{ case e: Elem => parsePoint(e) })
}
// <trkpt lat="51.752529000" lon="-1.281438000"><ele>58.0</ele><time>2015-07-26T10:23:47Z</time></trkpt>
private[gpxparser] def parsePoint(trkpt: Elem): TrackPoint = {
TrackPoint(Coordinate((trkpt \ "@lat" text).toDouble, (trkpt \ "@lon" text).toDouble),
(trkpt \ "ele" text).toDouble, Instant.parse(trkpt \ "time" text))
}
}
case class Gpx(metadata: GpxMetadata, tracks: Seq[Track])
case class GpxMetadata(version: String, creator: String)
case class Track(name: String, startTime: Instant, points: Seq[TrackPoint])
case class TrackPoint(location: Coordinate, elevationInMeters: Double, time: Instant)
case class Coordinate(lat: Double, long: Double)
|
retronym/scala-sandbox
|
lessons/src/main/scala/retronym/lessons/collections/VectorAverage.scala
|
package retronym.lessons.collections
import _root_.org.spex.Specification
import org.specs.matcher.Matcher
import scala.Stream
import scalaz.Equal
object VectorAverage extends Specification {
def zipAll[A](streams: List[Stream[A]]): Stream[List[A]] = {
if (streams.exists(_.isEmpty)) Stream.empty
else Stream.cons(streams.map(_.head), zipAll(streams.map(_.tail)))
}
def zipAllWith[A, B](streams: List[Stream[A]], f: List[A] => B): Stream[B] = {
zipAll(streams).map(f)
}
def averageList(vals: List[Double]) = {
vals.reduceLeft(_ + _) / vals.length
}
def average(vectors: List[List[Double]]): List[Double] = {
val maxLength = vectors.map((_.length)).reduceLeft(Math.max _)
val streams = vectors.map(_.toStream.append(Stream.const(0.0)))
zipAllWith(streams, averageList _).take(maxLength).force
}
"VectorAverage" should {
"avergage" in {
println(classOf[BigInt].getName, classOf[BigInt].getProtectionDomain.getCodeSource)
val input = List(List(1.0, 1.5, 2.0), List(3.0, 1.5))
implicit val eqMilli = FixedEqual.EqualApproxDouble(0.001)
import FixedEqual.EqualSeq
average(input) must be_Equal(List(2.0, 1.5, 1.0))
}
}
}
case class be_Equal[T](expected: T)(implicit eq: Equal[_ >: T]) extends Matcher[T] {
override def apply(actual: => T) = {
(eq.equal(expected, actual), "matched", "expected: " + expected)
}
}
object FixedEqual {
import scalaz.Equal.equal
implicit def EqualSeq[A](implicit e: Equal[A]): Equal[Seq[A]] = equal[Seq[A]]((a1, a2) => a1.equalsWith(a2)(e.equal _))
def EqualApproxDouble(tolerance: Double): Equal[Double] = equal[Double]((a1, a2) => Math.abs(a1 - a2) <= tolerance)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.