idx
int64
0
41.2k
question
stringlengths
74
4.04k
target
stringlengths
7
750
30,000
protected double decisionFunctionR ( int v ) { double sum = 0 ; for ( int i = 0 ; i < vecs . size ( ) ; i ++ ) if ( alphas [ i ] != alpha_s [ i ] ) sum += ( alphas [ i ] - alpha_s [ i ] ) * kEval ( v , i ) ; return sum ; }
Returns the local decision function for regression training purposes without the bias term
30,001
public void setEpsilon ( double epsilon ) { if ( Double . isNaN ( epsilon ) || Double . isInfinite ( epsilon ) || epsilon <= 0 ) throw new IllegalArgumentException ( "epsilon must be in (0, infty), not " + epsilon ) ; this . epsilon = epsilon ; }
Sets the epsilon for the epsilon insensitive loss when performing regression . This variable has no impact during classification problems . For regression problems any predicated value that is within the epsilon of the target will be treated as correct . Increasing epsilon usually decreases the number of support vectors but may reduce the accuracy of the model
30,002
public void setMaxPointError ( double maxPointError ) { if ( maxPointError < 0 || Double . isInfinite ( maxPointError ) || Double . isNaN ( maxPointError ) ) throw new ArithmeticException ( "The error must be a positive value, not " + maxPointError ) ; this . maxPointError = maxPointError ; }
Each data point not in the initial training set will be tested against . If a data points error is sufficiently small it will be added to the set of inliers .
30,003
protected double P ( DataPoint x ) { double fx = F ( x ) ; double efx = Math . exp ( fx ) ; double enfx = Math . exp ( - fx ) ; if ( Double . isInfinite ( efx ) && efx > 0 && enfx < 1e-15 ) return 1.0 ; return efx / ( efx + enfx ) ; }
Returns the probability that a given data point belongs to class 1
30,004
public static double loss ( double pred , double y ) { final double x = - y * pred ; if ( x >= 30 ) return x ; else if ( x <= - 30 ) return 0 ; return log ( 1 + exp ( x ) ) ; }
Computes the logistic loss
30,005
public static double deriv ( double pred , double y ) { final double x = y * pred ; if ( x >= 30 ) return 0 ; else if ( x <= - 30 ) return y ; return - y / ( 1 + exp ( y * pred ) ) ; }
Computes the first derivative of the logistic loss
30,006
public static double deriv2 ( double pred , double y ) { final double x = y * pred ; if ( x >= 30 ) return 0 ; else if ( x <= - 30 ) return 0 ; final double p = 1 / ( 1 + exp ( y * pred ) ) ; return p * ( 1 - p ) ; }
Computes the second derivative of the logistic loss
30,007
private TreeNodeVisitor walkCorruptedPath ( TreeLearner model , DataPoint dp , int j , Random rand ) { TreeNodeVisitor curNode = model . getTreeNodeVisitor ( ) ; while ( ! curNode . isLeaf ( ) ) { int path = curNode . getPath ( dp ) ; int numChild = curNode . childrenCount ( ) ; if ( curNode . featuresUsed ( ) . contains ( j ) ) { path = ( path + rand . nextInt ( numChild ) ) % numChild ; } if ( curNode . isPathDisabled ( path ) ) break ; else curNode = curNode . getChild ( path ) ; } return curNode ; }
walks the tree down to a leaf node adding corruption for a specific feature
30,008
public void setR ( double r ) { if ( Double . isNaN ( r ) || Double . isInfinite ( r ) || r <= 0 ) throw new IllegalArgumentException ( "r must be a postive constant, not " + r ) ; this . r = r ; }
Sets the r parameter of AROW which controls the regularization . Larger values reduce the change in the model on each update .
30,009
static public void sampleWithReplacement ( int [ ] sampleCounts , int samples , Random rand ) { Arrays . fill ( sampleCounts , 0 ) ; for ( int i = 0 ; i < samples ; i ++ ) sampleCounts [ rand . nextInt ( sampleCounts . length ) ] ++ ; }
Performs the sampling based on the number of data points storing the counts in an array to be constructed from XXXX
30,010
public void setTrainingProportion ( double trainingProportion ) { if ( trainingProportion > 1 || trainingProportion <= 0 || Double . isNaN ( trainingProportion ) ) throw new ArithmeticException ( "Training Proportion is invalid" ) ; this . trainingProportion = trainingProportion ; }
The GB version uses the whole data set at each iteration . SGB can use a fraction of the data set at each iteration in order to reduce overfitting and add randomness .
30,011
private Function1D getDerivativeFunc ( final RegressionDataSet backingResidsList , final Regressor h ) { final Function1D fhPrime = ( double x ) -> { double c1 = x ; double eps = 1e-5 ; double c1Pc2 = c1 * 2 - eps ; double result = 0 ; for ( int i = 0 ; i < backingResidsList . size ( ) ; i ++ ) { double hEst = h . regress ( backingResidsList . getDataPoint ( i ) ) ; double target = backingResidsList . getTargetValue ( i ) ; result += hEst * ( c1Pc2 * hEst - 2 * target ) ; } return result * eps ; } ; return fhPrime ; }
Returns a function object that approximates the derivative of the squared error of the Regressor as a function of the constant factor multiplied on the Regressor s output .
30,012
public static SimpleDataSet loadArffFile ( File file ) { try { return loadArffFile ( new FileReader ( file ) ) ; } catch ( FileNotFoundException ex ) { Logger . getLogger ( ARFFLoader . class . getName ( ) ) . log ( Level . SEVERE , null , ex ) ; return null ; } }
Uses the given file path to load a data set from an ARFF file .
30,013
private static String nameTrim ( String in ) { in = in . trim ( ) ; if ( in . startsWith ( "'" ) || in . startsWith ( "\"" ) ) in = in . substring ( 1 ) ; if ( in . endsWith ( "'" ) || in . startsWith ( "\"" ) ) in = in . substring ( 0 , in . length ( ) - 1 ) ; return in . trim ( ) ; }
Removes the quotes at the end and front of a string if there are any as well as spaces at the front and end
30,014
public void setInitialLearningRate ( double initialLearningRate ) { if ( Double . isInfinite ( initialLearningRate ) || Double . isNaN ( initialLearningRate ) || initialLearningRate <= 0 ) throw new ArithmeticException ( "Learning rate must be a positive constant, not " + initialLearningRate ) ; this . initialLearningRate = initialLearningRate ; }
Sets the rate at which input is incorporated at each iteration of the SOM algorithm
30,015
public void setProb ( int cat , double prob ) { if ( cat > probabilities . length ) throw new IndexOutOfBoundsException ( "There are only " + probabilities . length + " posibilties, " + cat + " is invalid" ) ; else if ( prob < 0 || Double . isInfinite ( prob ) || Double . isNaN ( prob ) ) throw new ArithmeticException ( "Only zero and positive values are valid, not " + prob ) ; probabilities [ cat ] = prob ; }
Sets the probability that a sample belongs to a given category .
30,016
public int mostLikely ( ) { int top = 0 ; for ( int i = 1 ; i < probabilities . length ; i ++ ) { if ( probabilities [ i ] > probabilities [ top ] ) top = i ; } return top ; }
Returns the category that is the most likely according to the current probability values
30,017
public void setWeakLearner ( Classifier weakL ) { if ( weakL == null ) throw new NullPointerException ( ) ; this . weakL = weakL ; if ( weakL instanceof Regressor ) this . weakR = ( Regressor ) weakL ; }
Sets the weak learner used for classification . If it also supports regressions that will be set as well .
30,018
public void setWeakLearner ( Regressor weakR ) { if ( weakR == null ) throw new NullPointerException ( ) ; this . weakR = weakR ; if ( weakR instanceof Classifier ) this . weakL = ( Classifier ) weakR ; }
Sets the weak learner used for regressions . If it also supports classification that will be set as well .
30,019
public static void hess ( Matrix A , ExecutorService threadpool ) { if ( ! A . isSquare ( ) ) throw new ArithmeticException ( "Only square matrices can be converted to Upper Hessenberg form" ) ; int m = A . rows ( ) ; DenseVector columnUpdateTmp = new DenseVector ( m ) ; double [ ] vk = new double [ m ] ; double [ ] subMatrixUpdateTmp = new double [ m ] ; double tmp ; for ( int i = 0 ; i < m - 2 ; i ++ ) { double s = 0.0 ; double sigh = A . get ( i + 1 , i ) ; vk [ i + 1 ] = sigh ; s += sigh * sigh ; sigh = sigh > 0 ? 1 : - 1 ; for ( int j = i + 2 ; j < m ; j ++ ) { tmp = A . get ( j , i ) ; vk [ j ] = tmp ; s += tmp * tmp ; } double s1 = - sigh * Math . sqrt ( s ) ; s -= vk [ i + 1 ] * vk [ i + 1 ] ; vk [ i + 1 ] -= s1 ; s += vk [ i + 1 ] * vk [ i + 1 ] ; double s1Inv = 1.0 / Math . sqrt ( s ) ; for ( int j = i + 1 ; j < m ; j ++ ) vk [ j ] *= s1Inv ; Matrix subA = new SubMatrix ( A , i + 1 , i , m , m ) ; DenseVector vVec = new DenseVector ( vk , i + 1 , m ) ; Vec tmpV = new DenseVector ( subMatrixUpdateTmp , i , m ) ; tmpV . zeroOut ( ) ; vVec . multiply ( subA , tmpV ) ; if ( threadpool == null ) OuterProductUpdate ( subA , vVec , tmpV , - 2.0 ) ; else OuterProductUpdate ( subA , vVec , tmpV , - 2.0 , threadpool ) ; A . set ( i + 1 , i , s1 ) ; for ( int j = i + 2 ; j < m ; j ++ ) A . set ( j , i , 0.0 ) ; subA = new SubMatrix ( A , 0 , i + 1 , m , m ) ; columnUpdateTmp . zeroOut ( ) ; subA . multiply ( vVec , 1.0 , columnUpdateTmp ) ; if ( threadpool == null ) OuterProductUpdate ( subA , columnUpdateTmp , vVec , - 2.0 ) ; else OuterProductUpdate ( subA , columnUpdateTmp , vVec , - 2.0 , threadpool ) ; } }
Alters the matrix A such that it is in upper Hessenberg form .
30,020
public static KernelFunction autoKernel ( Vec dataPoints ) { if ( dataPoints . length ( ) < 30 ) return GaussKF . getInstance ( ) ; else if ( dataPoints . length ( ) < 1000 ) return EpanechnikovKF . getInstance ( ) ; else return UniformKF . getInstance ( ) ; }
Automatically selects a good Kernel function for the data set that balances Execution time and accuracy
30,021
private double pdf ( double x , int j ) { int from = Arrays . binarySearch ( X , x - h * k . cutOff ( ) ) ; int to = Arrays . binarySearch ( X , x + h * k . cutOff ( ) ) ; from = from < 0 ? - from - 1 : from ; to = to < 0 ? - to - 1 : to ; if ( weights . length == 0 && k instanceof UniformKF ) return ( to - from ) * 0.5 / ( sumOFWeights * h ) ; double sum = 0 ; for ( int i = Math . max ( 0 , from ) ; i < Math . min ( X . length , to + 1 ) ; i ++ ) if ( i != j ) sum += k . k ( ( x - X [ i ] ) / h ) * getWeight ( i ) ; return sum / ( sumOFWeights * h ) ; }
Computes the Leave One Out PDF of the estimator
30,022
public static double loss ( double pred , double y , double eps ) { final double x = Math . abs ( pred - y ) ; return Math . max ( 0 , x - eps ) ; }
Computes the &epsilon ; - insensitive loss
30,023
public static double deriv ( double pred , double y , double eps ) { final double x = pred - y ; if ( eps < Math . abs ( x ) ) return Math . signum ( x ) ; else return 0 ; }
Computes the first derivative of the &epsilon ; - insensitive loss
30,024
public SimpleDataSet generateData ( int samples ) { int totalClasses = 1 ; for ( int d : dimensions ) totalClasses *= d ; catDataInfo = new CategoricalData [ ] { new CategoricalData ( totalClasses ) } ; List < DataPoint > dataPoints = new ArrayList < DataPoint > ( totalClasses * samples ) ; int [ ] curClassPointer = new int [ 1 ] ; for ( int i = 0 ; i < dimensions [ 0 ] ; i ++ ) { int [ ] curDim = new int [ dimensions . length ] ; curDim [ 0 ] = i ; addSamples ( curClassPointer , 0 , samples , dataPoints , curDim ) ; } return new SimpleDataSet ( dataPoints ) ; }
Generates a new data set .
30,025
public void setMinRate ( double min ) { if ( min <= 0 || Double . isNaN ( min ) || Double . isInfinite ( min ) ) throw new RuntimeException ( "minRate should be positive, not " + min ) ; this . min = min ; }
Sets the minimum learning rate to return
30,026
public static double digamma ( double x ) { if ( x == 0 ) return Double . NaN ; else if ( x < 0 ) { if ( Math . rint ( x ) == x ) return Double . NaN ; return digamma ( 1 - x ) - PI / tan ( PI * x ) ; } double xp2 = x + 2 ; return log ( xp2 ) - ( 6 * x + 13 ) / ( 12 * xp2 * xp2 ) - ( 2 * x + 1 ) / ( x * x + x ) ; }
Computes the digamma function of the input
30,027
private void fixMergeOrderAndAssign ( double [ ] mergedDistance , IntList merge_kept , IntList merge_removed , int lowK , final int N , int highK , int [ ] designations ) { IndexTable it = new IndexTable ( mergedDistance ) ; it . apply ( merge_kept ) ; it . apply ( merge_removed ) ; it . apply ( mergedDistance ) ; for ( int i = 0 ; i < it . length ( ) ; i ++ ) { merges [ merges . length - i * 2 - 1 ] = merge_removed . get ( i ) ; merges [ merges . length - i * 2 - 2 ] = merge_kept . get ( i ) ; } OnLineStatistics distChange = new OnLineStatistics ( ) ; double maxStndDevs = Double . MIN_VALUE ; int clusterSize = lowK ; for ( int i = 0 ; i < mergedDistance . length ; i ++ ) { distChange . add ( mergedDistance [ i ] ) ; int curK = N - i ; if ( curK >= lowK && curK <= highK ) { double stndDevs = ( mergedDistance [ i ] - distChange . getMean ( ) ) / distChange . getStandardDeviation ( ) ; if ( stndDevs > maxStndDevs ) { maxStndDevs = stndDevs ; clusterSize = curK ; } } } PriorityHAC . assignClusterDesignations ( designations , clusterSize , merges ) ; }
After clustering we need to fix up the merge order - since the NNchain only gets the merges correct not their ordering . This also figures out what number of clusters to use
30,028
public void await ( int ID ) throws InterruptedException { if ( parties == 1 ) return ; final boolean startCondition = competitionCondition ; int competingFor = ( locks . length * 2 - 1 - ID ) / 2 ; while ( competingFor >= 0 ) { final Lock node = locks [ competingFor ] ; if ( node . tryLock ( ) ) { synchronized ( node ) { while ( competitionCondition == startCondition ) node . wait ( ) ; } node . unlock ( ) ; wakeUpTarget ( competingFor * 2 + 1 ) ; wakeUpTarget ( competingFor * 2 + 2 ) ; return ; } else { if ( competingFor == 0 ) break ; competingFor = ( competingFor - 1 ) / 2 ; } } competitionCondition = ! competitionCondition ; wakeUpTarget ( 0 ) ; }
Waits for all threads to reach this barrier .
30,029
public void setBeta ( double beta ) { if ( beta <= 0 || beta >= 1 || Double . isNaN ( beta ) ) throw new IllegalArgumentException ( "shrinkage term must be in (0, 1), not " + beta ) ; this . beta = beta ; }
Sets the shrinkage term used for the line search .
30,030
public static List < Double > unmodifiableView ( double [ ] array , int length ) { return Collections . unmodifiableList ( view ( array , length ) ) ; }
Creates an returns an unmodifiable view of the given double array that requires only a small object allocation .
30,031
public static DoubleList view ( double [ ] array , int length ) { if ( length > array . length || length < 0 ) throw new IllegalArgumentException ( "length must be non-negative and no more than the size of the array(" + array . length + "), not " + length ) ; return new DoubleList ( array , length ) ; }
Creates and returns a view of the given double array that requires only a small object allocation . Changes to the list will be reflected in the array up to a point . If the modification would require increasing the capacity of the array a new array will be allocated - at which point operations will no longer be reflected in the original array .
30,032
private void updateStats ( final List < Double > lambdas , OnLineStatistics [ ] [ ] stats , int indx , double val , double [ ] mins , double weight ) { for ( int k = 0 ; k < lambdas . size ( ) ; k ++ ) stats [ k ] [ indx ] . add ( transform ( val , lambdas . get ( k ) , mins [ indx ] ) , weight ) ; }
Updates the online stats for each value of lambda
30,033
public void increment ( int index , double val ) { int baseIndex = getBaseIndex ( index ) ; vecs [ baseIndex ] . increment ( index - lengthSums [ baseIndex ] , val ) ; }
The following are implemented only for performance reasons
30,034
public void setC ( double c ) { if ( c <= 0 || Double . isNaN ( c ) || Double . isInfinite ( c ) ) throw new IllegalArgumentException ( "coefficient must be in (0, Inf), not " + c ) ; this . c = c ; }
Sets the positive additive coefficient
30,035
public static void addDiag ( Matrix A , int start , int to , double c ) { for ( int i = start ; i < to ; i ++ ) A . increment ( i , i , c ) ; }
Updates the values along the main diagonal of the matrix by adding a constant to them
30,036
public static void fillRow ( Matrix A , int i , int from , int to , double val ) { for ( int j = from ; j < to ; j ++ ) A . set ( i , j , val ) ; }
Fills the values in a row of the matrix
30,037
private void indexArrayStore ( int e , int i ) { if ( valueIndexStore . length < e ) { int oldLength = valueIndexStore . length ; valueIndexStore = Arrays . copyOf ( valueIndexStore , e + 2 ) ; Arrays . fill ( valueIndexStore , oldLength , valueIndexStore . length , - 1 ) ; } valueIndexStore [ e ] = i ; }
Sets the given index to use the specific value
30,038
private void heapifyUp ( int i ) { int iP = parent ( i ) ; while ( i != 0 && cmp ( i , iP ) < 0 ) { swapHeapValues ( iP , i ) ; i = iP ; iP = parent ( i ) ; } }
Heapify up from the given index in the heap and make sure everything is correct . Stops when the child value is in correct order with its parent .
30,039
private void swapHeapValues ( int i , int j ) { if ( fastValueRemove == Mode . HASH ) { valueIndexMap . put ( heap [ i ] , j ) ; valueIndexMap . put ( heap [ j ] , i ) ; } else if ( fastValueRemove == Mode . BOUNDED ) { valueIndexStore [ heap [ i ] ] = j ; valueIndexStore [ heap [ j ] ] = i ; } int tmp = heap [ i ] ; heap [ i ] = heap [ j ] ; heap [ j ] = tmp ; }
Swaps the values stored in the heap for the given indices
30,040
protected int removeHeapNode ( int i ) { int val = heap [ i ] ; int rightMost = -- size ; heap [ i ] = heap [ rightMost ] ; heap [ rightMost ] = 0 ; if ( fastValueRemove == Mode . HASH ) { valueIndexMap . remove ( val ) ; if ( size != 0 ) valueIndexMap . put ( heap [ i ] , i ) ; } else if ( fastValueRemove == Mode . BOUNDED ) { valueIndexStore [ val ] = - 1 ; } heapDown ( i ) ; return val ; }
Removes the node specified from the heap
30,041
public void setMaxNorm ( double maxNorm ) { if ( Double . isNaN ( maxNorm ) || Double . isInfinite ( maxNorm ) || maxNorm <= 0 ) throw new IllegalArgumentException ( "The maximum norm must be a positive constant, not " + maxNorm ) ; this . maxNorm = maxNorm ; }
Sets the maximum allowed 2 norm for a single neuron s weights
30,042
public static double loss ( double pred , double y , double c ) { final double x = y - pred ; if ( Math . abs ( x ) <= c ) return x * x * 0.5 ; else return c * ( Math . abs ( x ) - c / 2 ) ; }
Computes the HuberLoss loss
30,043
public static double deriv ( double pred , double y , double c ) { double x = pred - y ; if ( Math . abs ( x ) <= c ) return x ; else return c * Math . signum ( x ) ; }
Computes the first derivative of the HuberLoss loss
30,044
public Vec solve ( Vec b ) { Vec y = forwardSub ( L , b ) ; Vec x = backSub ( L , y ) ; return x ; }
Solves the linear system of equations A x = b
30,045
public Matrix solve ( Matrix B ) { Matrix y = forwardSub ( L , B ) ; Matrix x = backSub ( L , y ) ; return x ; }
Solves the linear system of equations A x = B
30,046
public double getDet ( ) { double det = 1 ; for ( int i = 0 ; i < L . rows ( ) ; i ++ ) det *= L . get ( i , i ) ; return det ; }
Computes the determinant of A
30,047
private void applyL2Reg ( final double eta_t ) { if ( lambda0 > 0 ) for ( Vec v : ws ) v . mutableMultiply ( 1 - eta_t * lambda0 ) ; }
Applies L2 regularization to the model
30,048
private void applyL1Reg ( final double eta_t , Vec x ) { if ( lambda1 > 0 ) { l1U += eta_t * lambda1 ; for ( int k = 0 ; k < ws . length ; k ++ ) { final Vec w_k = ws [ k ] ; final double [ ] l1Q_k = l1Q [ k ] ; for ( IndexValue iv : x ) { final int i = iv . getIndex ( ) ; final double z = w_k . get ( i ) ; double newW_i = 0 ; if ( z > 0 ) newW_i = Math . max ( 0 , z - ( l1U + l1Q_k [ i ] ) ) ; else if ( z < 0 ) newW_i = Math . min ( 0 , z + ( l1U - l1Q_k [ i ] ) ) ; l1Q_k [ i ] += ( newW_i - z ) ; w_k . set ( i , newW_i ) ; } } } }
Applies L1 regularization to the model
30,049
private void projectVector ( Vec vec , int slot , int [ ] projLocation , Vec projected ) { randProjMatrix . multiply ( vec , 1.0 , projected ) ; int pos = 0 ; int bitsLeft = Integer . SIZE ; int curVal = 0 ; while ( pos < slotsPerEntry ) { while ( bitsLeft > 0 ) { curVal <<= 1 ; if ( projected . get ( pos * Integer . SIZE + ( Integer . SIZE - bitsLeft ) ) >= 0 ) curVal |= 1 ; bitsLeft -- ; } projLocation [ slot + pos ] = curVal ; curVal = 0 ; bitsLeft = Integer . SIZE ; pos ++ ; } }
Projects a given vector into the array of integers .
30,050
public static void prune ( TreeNodeVisitor root , PruningMethod method , ClassificationDataSet testSet ) { if ( method == PruningMethod . NONE ) return ; else if ( method == PruningMethod . REDUCED_ERROR ) pruneReduceError ( null , - 1 , root , testSet ) ; else if ( method == PruningMethod . ERROR_BASED ) pruneErrorBased ( null , - 1 , root , testSet , 0.25 ) ; else throw new RuntimeException ( "BUG: please report" ) ; }
Performs pruning starting from the root node of a tree
30,051
private static int pruneReduceError ( TreeNodeVisitor parent , int pathFollowed , TreeNodeVisitor current , ClassificationDataSet testSet ) { if ( current == null ) return 0 ; int nodesPruned = 0 ; if ( ! current . isLeaf ( ) ) { int numSplits = current . childrenCount ( ) ; List < ClassificationDataSet > splits = new ArrayList < > ( numSplits ) ; IntList hadMissing = new IntList ( ) ; double [ ] fracs = new double [ numSplits ] ; double wSum = 0 ; for ( int i = 0 ; i < numSplits ; i ++ ) splits . add ( testSet . emptyClone ( ) ) ; for ( int i = 0 ; i < testSet . size ( ) ; i ++ ) { double w_i = testSet . getWeight ( i ) ; int path = current . getPath ( testSet . getDataPoint ( i ) ) ; if ( path >= 0 ) { splits . get ( path ) . addDataPoint ( testSet . getDataPoint ( i ) , testSet . getDataPointCategory ( i ) , w_i ) ; wSum += w_i ; fracs [ path ] += w_i ; } else hadMissing . add ( i ) ; } for ( int i = 0 ; i < numSplits ; i ++ ) fracs [ i ] /= wSum + 1e-15 ; if ( ! hadMissing . isEmpty ( ) ) DecisionStump . distributMissing ( splits , fracs , testSet , hadMissing ) ; for ( int i = numSplits - 1 ; i >= 0 ; i -- ) nodesPruned += pruneReduceError ( current , i , current . getChild ( i ) , splits . get ( i ) ) ; } if ( current . isLeaf ( ) && parent != null ) { double childCorrect = 0 ; double parrentCorrect = 0 ; for ( int i = 0 ; i < testSet . size ( ) ; i ++ ) { DataPoint dp = testSet . getDataPoint ( i ) ; int truth = testSet . getDataPointCategory ( i ) ; if ( current . localClassify ( dp ) . mostLikely ( ) == truth ) childCorrect += testSet . getWeight ( i ) ; if ( parent . localClassify ( dp ) . mostLikely ( ) == truth ) parrentCorrect += testSet . getWeight ( i ) ; } if ( parrentCorrect >= childCorrect ) { parent . disablePath ( pathFollowed ) ; return nodesPruned + 1 ; } return nodesPruned ; } return nodesPruned ; }
Performs pruning to reduce error on the testing set
30,052
public void setScale ( double scale ) { if ( scale <= 0 || Double . isNaN ( scale ) || Double . isInfinite ( scale ) ) throw new ArithmeticException ( "Scale must be a positive value, not " + scale ) ; this . scale = scale ; this . logScale = log ( scale ) ; }
Sets the scale of the Levy distribution
30,053
public void setLocation ( double location ) { if ( Double . isNaN ( location ) || Double . isInfinite ( location ) ) throw new ArithmeticException ( "location must be a real number" ) ; this . location = location ; }
Sets location of the Levy distribution .
30,054
private void sgdTrain ( ClassificationDataSet D , MatrixOfVecs W , Vec b , int sign_mul , boolean parallel ) { IntList order = new IntList ( D . size ( ) ) ; ListUtils . addRange ( order , 0 , D . size ( ) , 1 ) ; final double lambda_adj = lambda / ( D . size ( ) * epochs ) ; int [ ] owned = new int [ K ] ; int assigned_positive_instances = 0 ; int [ ] assignments = new int [ D . size ( ) ] ; Arrays . fill ( assignments , - 1 ) ; Vec dots = new DenseVector ( W . rows ( ) ) ; long t = 0 ; for ( int epoch = 0 ; epoch < epochs ; epoch ++ ) { Collections . shuffle ( order ) ; for ( int i : order ) { t ++ ; double eta = 1 / ( lambda_adj * t ) ; Vec x_i = D . getDataPoint ( i ) . getNumericalValues ( ) ; int y_i = ( D . getDataPointCategory ( i ) * 2 - 1 ) * sign_mul ; b . copyTo ( dots ) ; W . multiply ( x_i , 1.0 , dots ) ; if ( y_i == - 1 ) { for ( int k = 0 ; k < K ; k ++ ) if ( dots . get ( k ) > - 1 ) { W . getRowView ( k ) . mutableSubtract ( eta , x_i ) ; b . increment ( k , - eta ) ; } } else { int k_true_max = 0 ; for ( int k = 1 ; k < dots . length ( ) ; k ++ ) if ( dots . get ( k ) > dots . get ( k_true_max ) ) k_true_max = k ; if ( dots . get ( k_true_max ) < 1 ) { int z = ASSIGN ( dots , i , k_true_max , owned , assignments , assigned_positive_instances ) ; W . getRowView ( z ) . mutableAdd ( eta , x_i ) ; b . increment ( z , eta ) ; if ( assignments [ i ] < 0 ) assigned_positive_instances ++ ; else owned [ assignments [ i ] ] -- ; owned [ z ] ++ ; assignments [ i ] = z ; } } W . mutableMultiply ( 1 - 1.0 / t ) ; b . mutableMultiply ( 1 - 1.0 / t ) ; } } }
Training procedure that can be applied to each version of the CPM sub - problem .
30,055
public void setLearningRate ( double learningRate ) { if ( Double . isInfinite ( learningRate ) || Double . isNaN ( learningRate ) || learningRate <= 0 ) throw new IllegalArgumentException ( "Learning rate must be positive, not " + learningRate ) ; this . learningRate = learningRate ; }
Sets the learning rate to use
30,056
public void setThreshold ( double threshold ) { if ( Double . isNaN ( threshold ) || threshold <= 0 ) throw new IllegalArgumentException ( "Threshold must be positive, not " + threshold ) ; this . threshold = threshold ; }
Sets the threshold for a coefficient value to avoid regularization . While a coefficient reaches this magnitude regularization will not be applied .
30,057
public void setGravity ( double gravity ) { if ( Double . isInfinite ( gravity ) || Double . isNaN ( gravity ) || gravity <= 0 ) throw new IllegalArgumentException ( "Gravity must be positive, not " + gravity ) ; this . gravity = gravity ; }
Sets the gravity regularization parameter that weighs down the coefficient values . Larger gravity values impose stronger regularization and encourage greater sparsity .
30,058
private void performUpdate ( final Vec x , final double y , final double yHat ) { for ( IndexValue iv : x ) { final int j = iv . getIndex ( ) ; w . set ( j , T ( w . get ( j ) + 2 * learningRate * ( y - yHat ) * iv . getValue ( ) , ( ( time - t [ j ] ) / K ) * gravity * learningRate , threshold ) ) ; t [ j ] += ( ( time - t [ j ] ) / K ) * K ; } }
Performs the sparse update of the weight vector
30,059
static protected int SFSSelectFeature ( Set < Integer > available , DataSet dataSet , Set < Integer > catToRemove , Set < Integer > numToRemove , Set < Integer > catSelecteed , Set < Integer > numSelected , Object evaluater , int folds , Random rand , double [ ] PbestScore , int minFeatures ) { int nCat = dataSet . getNumCategoricalVars ( ) ; int curBest = - 1 ; double curBestScore = Double . POSITIVE_INFINITY ; for ( int feature : available ) { removeFeature ( feature , nCat , catToRemove , numToRemove ) ; DataSet workOn = dataSet . shallowClone ( ) ; RemoveAttributeTransform remove = new RemoveAttributeTransform ( workOn , catToRemove , numToRemove ) ; workOn . applyTransform ( remove ) ; double score = getScore ( workOn , evaluater , folds , rand ) ; if ( score < curBestScore ) { curBestScore = score ; curBest = feature ; } addFeature ( feature , nCat , catToRemove , numToRemove ) ; } if ( curBestScore <= 1e-14 && PbestScore [ 0 ] <= 1e-14 && catSelecteed . size ( ) + numSelected . size ( ) >= minFeatures ) return - 1 ; if ( curBestScore < PbestScore [ 0 ] || catSelecteed . size ( ) + numSelected . size ( ) < minFeatures || Math . abs ( PbestScore [ 0 ] - curBestScore ) < 1e-3 ) { PbestScore [ 0 ] = curBestScore ; addFeature ( curBest , nCat , catSelecteed , numSelected ) ; removeFeature ( curBest , nCat , catToRemove , numToRemove ) ; available . remove ( curBest ) ; return curBest ; } else return - 1 ; }
Attempts to add one feature to the list of features while increasing or maintaining the current accuracy
30,060
protected static double getScore ( DataSet workOn , Object evaluater , int folds , Random rand ) { if ( workOn instanceof ClassificationDataSet ) { ClassificationModelEvaluation cme = new ClassificationModelEvaluation ( ( Classifier ) evaluater , ( ClassificationDataSet ) workOn ) ; cme . evaluateCrossValidation ( folds , rand ) ; return cme . getErrorRate ( ) ; } else if ( workOn instanceof RegressionDataSet ) { RegressionModelEvaluation rme = new RegressionModelEvaluation ( ( Regressor ) evaluater , ( RegressionDataSet ) workOn ) ; rme . evaluateCrossValidation ( folds , rand ) ; return rme . getMeanError ( ) ; } return Double . POSITIVE_INFINITY ; }
The score function for a data set and a learner by cross validation of a classifier
30,061
public void setM ( double m ) { if ( m < 0 || Double . isInfinite ( m ) || Double . isNaN ( m ) ) throw new ArithmeticException ( "The minimum count must be a non negative number" ) ; this . m = m ; }
Sets the minimum prior observation value needed for an attribute combination to have enough support to be included in the final estimate .
30,062
public static Vec extractTrueVec ( Vec b ) { while ( b instanceof VecPaired ) b = ( ( VecPaired ) b ) . getVector ( ) ; return b ; }
This method is used assuming multiple VecPaired are used together . The implementation of the vector may have logic to handle the case that the other vector is of the same type . This will go through every layer of VecPaired to return the final base vector .
30,063
private boolean addWord ( String word , SparseVector vec , Integer value ) { Integer indx = wordIndex . get ( word ) ; if ( indx == null ) { Integer index_for_new_word ; if ( ( index_for_new_word = wordIndex . putIfAbsent ( word , - 1 ) ) == null ) { index_for_new_word = currentLength . getAndIncrement ( ) ; wordIndex . put ( word , index_for_new_word ) ; } if ( index_for_new_word < 0 ) return false ; AtomicInteger termCount = new AtomicInteger ( 0 ) , tmp = null ; tmp = termDocumentFrequencys . putIfAbsent ( index_for_new_word , termCount ) ; if ( tmp != null ) termCount = tmp ; termCount . incrementAndGet ( ) ; int newLen = Math . max ( index_for_new_word + 1 , vec . length ( ) ) ; vec . setLength ( newLen ) ; vec . set ( index_for_new_word , value ) ; } else { if ( indx < 0 ) return false ; AtomicInteger toInc = termDocumentFrequencys . get ( indx ) ; if ( toInc == null ) { toInc = termDocumentFrequencys . putIfAbsent ( indx , new AtomicInteger ( 1 ) ) ; if ( toInc == null ) toInc = termDocumentFrequencys . get ( indx ) ; } toInc . incrementAndGet ( ) ; if ( vec . length ( ) <= indx ) vec . setLength ( indx + 1 ) ; vec . set ( indx , value ) ; } return true ; }
Does the work to add a given word to the sparse vector . May not succeed in race conditions when two ore more threads are trying to add a word at the same time .
30,064
public void setLambda ( double lambda ) { if ( Double . isNaN ( lambda ) || lambda <= 0 || Double . isInfinite ( lambda ) ) throw new IllegalArgumentException ( "lambda must be positive, not " + lambda ) ; this . lambda = lambda ; }
Sets the average rate of the event occurring in a unit of time
30,065
public int getMedianIndex ( final List < Integer > data , int pivot ) { int medianIndex = data . size ( ) / 2 ; while ( medianIndex < data . size ( ) - 1 && allVecs . get ( data . get ( medianIndex ) ) . get ( pivot ) == allVecs . get ( data . get ( medianIndex + 1 ) ) . get ( pivot ) ) medianIndex ++ ; return medianIndex ; }
Returns the index for the median adjusted incase multiple features have the same value .
30,066
public void setCardinality ( double cardinality ) { if ( cardinality < 0 || Double . isNaN ( cardinality ) ) throw new IllegalArgumentException ( "Cardinality must be a positive integer or infinity, not " + cardinality ) ; this . cardinality = Math . ceil ( cardinality ) ; fixCache ( ) ; }
Sets the cardinality of the distribution defining the maximum number of items that Zipf can return .
30,067
public void setSkew ( double skew ) { if ( skew <= 0 || Double . isNaN ( skew ) || Double . isInfinite ( skew ) ) throw new IllegalArgumentException ( "Skew must be a positive value, not " + skew ) ; this . skew = skew ; fixCache ( ) ; }
Sets the skewness of the distribution . Lower values spread out the probability distribution while higher values concentrate on the lowest ranks .
30,068
public void writePoint ( double weight , DataPoint dp , double label ) throws IOException { ByteArrayOutputStream baos = local_baos . get ( ) ; pointToBytes ( weight , dp , label , baos ) ; if ( baos . size ( ) >= LOCAL_BUFFER_SIZE ) synchronized ( out ) { baos . writeTo ( out ) ; baos . reset ( ) ; } }
Write out the given data point to the output stream
30,069
private int increment ( int [ ] setTo , int max , int curCount ) { setTo [ 0 ] ++ ; curCount ++ ; if ( curCount <= max ) return curCount ; int carryPos = 0 ; while ( carryPos < setTo . length - 1 && curCount > max ) { curCount -= setTo [ carryPos ] ; setTo [ carryPos ] = 0 ; setTo [ ++ carryPos ] ++ ; curCount ++ ; } return curCount ; }
Increments the array to contain representation of the next combination of values in the polynomial
30,070
protected Parameter getParameterByName ( String name ) throws IllegalArgumentException { Parameter param ; if ( baseClassifier != null ) param = ( ( Parameterized ) baseClassifier ) . getParameter ( name ) ; else param = ( ( Parameterized ) baseRegressor ) . getParameter ( name ) ; if ( param == null ) throw new IllegalArgumentException ( "Parameter " + name + " does not exist" ) ; return param ; }
Finds the parameter object with the given name or throws an exception if a parameter with the given name does not exist .
30,071
private double getPreScore ( Vec x ) { return k . evalSum ( vecs , accelCache , alpha . getBackingArray ( ) , x , 0 , alpha . size ( ) ) ; }
Computes the margin score for the given data point
30,072
public void setCovariance ( Matrix covMatrix ) { if ( ! covMatrix . isSquare ( ) ) throw new ArithmeticException ( "Covariance matrix must be square" ) ; else if ( covMatrix . rows ( ) != this . mean . length ( ) ) throw new ArithmeticException ( "Covariance matrix does not agree with the mean" ) ; CholeskyDecomposition cd = new CholeskyDecomposition ( covMatrix . clone ( ) ) ; L = cd . getLT ( ) ; L . mutableTranspose ( ) ; LUPDecomposition lup = new LUPDecomposition ( covMatrix . clone ( ) ) ; int k = mean . length ( ) ; double det = lup . det ( ) ; if ( Double . isNaN ( det ) || det < 1e-10 ) { SingularValueDecomposition svd = new SingularValueDecomposition ( covMatrix . clone ( ) ) ; this . logPDFConst = 0.5 * log ( svd . getPseudoDet ( ) * pow ( 2 * PI , svd . getRank ( ) ) ) ; this . invCovariance = svd . getPseudoInverse ( ) ; } else { this . logPDFConst = ( - k * log ( 2 * PI ) - log ( det ) ) * 0.5 ; this . invCovariance = lup . solve ( Matrix . eye ( k ) ) ; } }
Sets the covariance matrix for this matrix .
30,073
protected double cluster ( DataSet data , boolean doInit , int [ ] medioids , int [ ] assignments , List < Double > cacheAccel , boolean parallel ) { DoubleAdder totalDistance = new DoubleAdder ( ) ; LongAdder changes = new LongAdder ( ) ; Arrays . fill ( assignments , - 1 ) ; int [ ] bestMedCand = new int [ medioids . length ] ; double [ ] bestMedCandDist = new double [ medioids . length ] ; List < Vec > X = data . getDataVectors ( ) ; final List < Double > accel ; if ( doInit ) { TrainableDistanceMetric . trainIfNeeded ( dm , data ) ; accel = dm . getAccelerationCache ( X ) ; selectIntialPoints ( data , medioids , dm , accel , rand , seedSelection ) ; } else accel = cacheAccel ; int iter = 0 ; do { changes . reset ( ) ; totalDistance . reset ( ) ; ParallelUtils . run ( parallel , data . size ( ) , ( start , end ) -> { for ( int i = start ; i < end ; i ++ ) { int assignment = 0 ; double minDist = dm . dist ( medioids [ 0 ] , i , X , accel ) ; for ( int k = 1 ; k < medioids . length ; k ++ ) { double dist = dm . dist ( medioids [ k ] , i , X , accel ) ; if ( dist < minDist ) { minDist = dist ; assignment = k ; } } if ( assignments [ i ] != assignment ) { changes . increment ( ) ; assignments [ i ] = assignment ; } totalDistance . add ( minDist * minDist ) ; } } ) ; Arrays . fill ( bestMedCandDist , Double . MAX_VALUE ) ; for ( int i = 0 ; i < data . size ( ) ; i ++ ) { double thisCandidateDistance ; final int clusterID = assignments [ i ] ; final int medCandadate = i ; final int ii = i ; thisCandidateDistance = ParallelUtils . range ( data . size ( ) , parallel ) . filter ( j -> j != ii && assignments [ j ] == clusterID ) . mapToDouble ( j -> Math . pow ( dm . dist ( medCandadate , j , X , accel ) , 2 ) ) . sum ( ) ; if ( thisCandidateDistance < bestMedCandDist [ clusterID ] ) { bestMedCand [ clusterID ] = i ; bestMedCandDist [ clusterID ] = thisCandidateDistance ; } } System . arraycopy ( bestMedCand , 0 , medioids , 0 , medioids . length ) ; } while ( changes . sum ( ) > 0 && iter ++ < iterLimit ) ; return totalDistance . sum ( ) ; }
Performs the actual work of PAM .
30,074
public void setRho ( double rho ) { if ( rho <= 0 || rho >= 1 || Double . isNaN ( rho ) ) throw new IllegalArgumentException ( "Rho must be in (0, 1)" ) ; this . rho = rho ; }
Sets the decay rate used by AdaDelta . Lower values focus more on the current gradient where higher values incorporate a longer history .
30,075
public void setSmoothing ( double smoothing ) { if ( smoothing <= 0 || smoothing > 1 || Double . isNaN ( smoothing ) ) throw new IllegalArgumentException ( "Smoothing must be in (0, 1], not " + smoothing ) ; this . smoothing = smoothing ; }
Sets the smoothing parameter value to use . Must be in the range ( 0 1 ] . Changing this value will impact how quickly the statistics adapt to changes with larger values increasing rate of change and smaller values decreasing it .
30,076
public void add ( double x ) { if ( Double . isNaN ( mean ) ) { mean = x ; variance = 0 ; } else { variance = ( 1 - smoothing ) * ( variance + smoothing * Math . pow ( x - mean , 2 ) ) ; mean = ( 1 - smoothing ) * mean + smoothing * x ; } }
Adds the given data point to the statistics
30,077
public void setMinMax ( int min , int max ) { if ( min >= max ) throw new IllegalArgumentException ( "The input minimum (" + min + ") must be less than the given max (" + max + ")" ) ; this . min = min ; this . max = max ; }
Sets the minimum and maximum values at the same time this is useful if setting them one at a time may have caused a conflict with the previous values
30,078
public void setC ( double C ) { if ( Double . isNaN ( C ) || Double . isInfinite ( C ) || C <= 0 ) throw new IllegalArgumentException ( "C must be a postive constant, not " + C ) ; this . C = C ; }
Set the aggressiveness parameter . Increasing the value of this parameter increases the aggressiveness of the algorithm . It must be a positive value . This parameter essentially performs a type of regularization on the updates
30,079
public static Distribution guessRegularization ( DataSet d ) { double T2 = d . size ( ) ; T2 *= T2 ; return new LogUniform ( Math . pow ( 2 , - 3 ) / T2 , Math . pow ( 2 , 3 ) / T2 ) ; }
Guesses the distribution to use for the Regularization parameter
30,080
public static < T > Comparator < T > getReverse ( final Comparator < T > cmp ) { return ( T o1 , T o2 ) -> - cmp . compare ( o1 , o2 ) ; }
Obtains the reverse order comparator
30,081
public void reset ( ) { for ( int i = 0 ; i < index . size ( ) ; i ++ ) index . set ( i , i ) ; }
Resets the index table so that the returned indices are in linear order meaning the original input would be returned in its original order instead of sorted order .
30,082
public < T extends Comparable < T > > void sort ( List < T > list ) { sort ( list , defaultComp ) ; }
Adjust this index table to contain the sorted index order for the given list
30,083
public < T extends Comparable < T > > void sortR ( List < T > list ) { sort ( list , getReverse ( defaultComp ) ) ; }
Adjusts this index table to contain the reverse sorted index order for the given list
30,084
public < T > void sort ( List < T > list , Comparator < T > cmp ) { if ( index . size ( ) < list . size ( ) ) for ( int i = index . size ( ) ; i < list . size ( ) ; i ++ ) index . add ( i ) ; if ( list . size ( ) == index . size ( ) ) Collections . sort ( index , new IndexViewCompList ( list , cmp ) ) ; else { Collections . sort ( index ) ; Collections . sort ( index . subList ( 0 , list . size ( ) ) , new IndexViewCompList ( list , cmp ) ) ; } prevSize = list . size ( ) ; }
Sets up the index table based on the given list of the same size and comparator .
30,085
public ClassificationDataSet asClassificationDataSet ( int index ) { if ( index < 0 ) throw new IllegalArgumentException ( "Index must be a non-negative value" ) ; else if ( getNumCategoricalVars ( ) == 0 ) throw new IllegalArgumentException ( "Dataset has no categorical variables, can not create classification dataset" ) ; else if ( index >= getNumCategoricalVars ( ) ) throw new IllegalArgumentException ( "Index " + index + " is larger than number of categorical features " + getNumCategoricalVars ( ) ) ; return new ClassificationDataSet ( this , index ) ; }
Converts this dataset into one meant for classification problems . The given categorical feature index is removed from the data and made the target variable for the classification problem .
30,086
public RegressionDataSet asRegressionDataSet ( int index ) { if ( index < 0 ) throw new IllegalArgumentException ( "Index must be a non-negative value" ) ; else if ( getNumNumericalVars ( ) == 0 ) throw new IllegalArgumentException ( "Dataset has no numeric variables, can not create regression dataset" ) ; else if ( index >= getNumNumericalVars ( ) ) throw new IllegalArgumentException ( "Index " + index + " i larger than number of numeric features " + getNumNumericalVars ( ) ) ; RegressionDataSet rds = new RegressionDataSet ( this . datapoints . toList ( ) , index ) ; for ( int i = 0 ; i < size ( ) ; i ++ ) rds . setWeight ( i , this . getWeight ( i ) ) ; return rds ; }
Converts this dataset into one meant for regression problems . The given numeric feature index is removed from the data and made the target variable for the regression problem .
30,087
public void setReflection ( double reflection ) { if ( reflection <= 0 || Double . isNaN ( reflection ) || Double . isInfinite ( reflection ) ) throw new ArithmeticException ( "Reflection constant must be > 0, not " + reflection ) ; this . reflection = reflection ; }
Sets the reflection constant which must be greater than 0
30,088
public void setExpansion ( double expansion ) { if ( expansion <= 1 || Double . isNaN ( expansion ) || Double . isInfinite ( expansion ) ) throw new ArithmeticException ( "Expansion constant must be > 1, not " + expansion ) ; else if ( expansion <= reflection ) throw new ArithmeticException ( "Expansion constant must be less than the reflection constant" ) ; this . expansion = expansion ; }
Sets the expansion constant which must be greater than 1 and the reflection constant
30,089
public Matrix transpose ( ) { Matrix toReturn = new DenseMatrix ( cols ( ) , rows ( ) ) ; this . transpose ( toReturn ) ; return toReturn ; }
Returns a new matrix that is the transpose of this matrix .
30,090
public void copyTo ( Matrix other ) { if ( this . rows ( ) != other . rows ( ) || this . cols ( ) != other . cols ( ) ) throw new ArithmeticException ( "Matrices are not of the same dimension" ) ; for ( int i = 0 ; i < rows ( ) ; i ++ ) this . getRowView ( i ) . copyTo ( other . getRowView ( i ) ) ; }
Copes the values of this matrix into the other matrix of the same dimensions
30,091
protected void accessingRow ( int r ) { if ( r < 0 ) { specific_row_cache_row = - 1 ; specific_row_cache_values = null ; return ; } if ( cacheMode == CacheMode . ROWS ) { double [ ] cache = partialCache . get ( r ) ; if ( cache == null ) { cache = new double [ vecs . size ( ) ] ; Arrays . fill ( cache , Double . NaN ) ; double [ ] cache_missed = partialCache . putIfAbsentAndGet ( r , cache ) ; if ( cache_missed != null ) cache = cache_missed ; } specific_row_cache_values = cache ; specific_row_cache_row = r ; } }
This method allows the caller to hint that they are about to access many kernel values for a specific row . The row may be selected out from the cache into its own location to avoid excess LRU overhead . Giving a negative index indicates that we are done with the row and removes it . This method may be called multiple times with different row values . But when done accessing a specific row a negative value should be passed in .
30,092
protected double k ( int a , int b ) { evalCount ++ ; return kernel . eval ( a , b , vecs , accelCache ) ; }
Internal kernel eval source . Only call directly if you KNOW you will not be re - using the resulting value and intentionally wish to skip the caching system
30,093
protected void sparsify ( ) { final int N = vecs . size ( ) ; int accSize = accelCache == null ? 0 : accelCache . size ( ) / N ; int svCount = 0 ; for ( int i = 0 ; i < N ; i ++ ) if ( alphas [ i ] != 0 ) { ListUtils . swap ( vecs , svCount , i ) ; if ( accelCache != null ) for ( int j = i * accSize ; j < ( i + 1 ) * accSize ; j ++ ) ListUtils . swap ( accelCache , svCount * accSize + j - i * accSize , j ) ; alphas [ svCount ++ ] = alphas [ i ] ; } vecs = new ArrayList < Vec > ( vecs . subList ( 0 , svCount ) ) ; alphas = Arrays . copyOfRange ( alphas , 0 , svCount ) ; }
Sparsifies the SVM by removing the vectors with &alpha ; = 0 from the dataset .
30,094
@ Parameter . WarmParameter ( prefLowToHigh = false ) public void setLambda ( double lambda ) { if ( lambda <= 0 || Double . isInfinite ( lambda ) || Double . isNaN ( lambda ) ) throw new IllegalArgumentException ( "Regularization term lambda must be a positive value, not " + lambda ) ; this . lambda = lambda ; }
Sets the regularization term where larger values indicate a larger regularization penalty .
30,095
public void addDataPoint ( Vec numerical , int [ ] categories , double val ) { if ( numerical . length ( ) != numNumerVals ) throw new RuntimeException ( "Data point does not contain enough numerical data points" ) ; if ( categories . length != categories . length ) throw new RuntimeException ( "Data point does not contain enough categorical data points" ) ; for ( int i = 0 ; i < categories . length ; i ++ ) if ( ! this . categories [ i ] . isValidCategory ( categories [ i ] ) && categories [ i ] >= 0 ) throw new RuntimeException ( "Categoriy value given is invalid" ) ; DataPoint dp = new DataPoint ( numerical , categories , this . categories ) ; addDataPoint ( dp , val ) ; }
Creates a new data point to be added to the data set . The arguments will be used directly modifying them after will effect the data set .
30,096
public DataPointPair < Double > getDataPointPair ( int i ) { return new DataPointPair < > ( getDataPoint ( i ) , targets . get ( i ) ) ; }
Returns the i th data point in the data set paired with its target regressor value . Modifying the DataPointPair will effect the data set .
30,097
public Map < Integer , Integer > getReverseNumericMap ( ) { Map < Integer , Integer > map = new HashMap < Integer , Integer > ( ) ; for ( int newIndex = 0 ; newIndex < numIndexMap . length ; newIndex ++ ) map . put ( newIndex , numIndexMap [ newIndex ] ) ; return map ; }
Returns a mapping from the numeric indices in the transformed space back to their original indices
30,098
public Map < Integer , Integer > getReverseNominalMap ( ) { Map < Integer , Integer > map = new HashMap < Integer , Integer > ( ) ; for ( int newIndex = 0 ; newIndex < catIndexMap . length ; newIndex ++ ) map . put ( newIndex , catIndexMap [ newIndex ] ) ; return map ; }
Returns a mapping from the nominal indices in the transformed space back to their original indices
30,099
protected final void setUp ( DataSet dataSet , Set < Integer > categoricalToRemove , Set < Integer > numericalToRemove ) { for ( int i : categoricalToRemove ) if ( i >= dataSet . getNumCategoricalVars ( ) ) throw new RuntimeException ( "The data set does not have a categorical value " + i + " to remove" ) ; for ( int i : numericalToRemove ) if ( i >= dataSet . getNumNumericalVars ( ) ) throw new RuntimeException ( "The data set does not have a numercal value " + i + " to remove" ) ; catIndexMap = new int [ dataSet . getNumCategoricalVars ( ) - categoricalToRemove . size ( ) ] ; newCatHeader = new CategoricalData [ catIndexMap . length ] ; numIndexMap = new int [ dataSet . getNumNumericalVars ( ) - numericalToRemove . size ( ) ] ; int k = 0 ; for ( int i = 0 ; i < dataSet . getNumCategoricalVars ( ) ; i ++ ) { if ( categoricalToRemove . contains ( i ) ) continue ; newCatHeader [ k ] = dataSet . getCategories ( ) [ i ] . clone ( ) ; catIndexMap [ k ++ ] = i ; } k = 0 ; for ( int i = 0 ; i < dataSet . getNumNumericalVars ( ) ; i ++ ) { if ( numericalToRemove . contains ( i ) ) continue ; numIndexMap [ k ++ ] = i ; } }
Sets up the Remove Attribute Transform properly