idx int64 0 41.2k | question stringlengths 74 4.04k | target stringlengths 7 750 |
|---|---|---|
29,900 | public Vec feedfoward ( Vec x ) { Vec a_lprev = x ; for ( int l = 0 ; l < layersActivation . size ( ) ; l ++ ) { Vec z_l = new DenseVector ( layerSizes [ l + 1 ] ) ; z_l . zeroOut ( ) ; W . get ( l ) . multiply ( a_lprev , 1.0 , z_l ) ; final Vec B_l = B . get ( l ) ; z_l . mutableAdd ( B_l ) ; layersActivation . get ( l ) . activate ( z_l , z_l ) ; a_lprev = z_l ; } return a_lprev ; } | Feeds the given singular pattern through the network and computes its activations |
29,901 | private static void applyDropout ( final Matrix X , final int randThresh , final Random rand , ExecutorService ex ) { if ( ex == null ) { for ( int i = 0 ; i < X . rows ( ) ; i ++ ) for ( int j = 0 ; j < X . cols ( ) ; j ++ ) if ( rand . nextInt ( ) < randThresh ) X . set ( i , j , 0.0 ) ; } else { final CountDownLatch latch = new CountDownLatch ( SystemInfo . LogicalCores ) ; for ( int id = 0 ; id < SystemInfo . LogicalCores ; id ++ ) { final int ID = id ; ex . submit ( new Runnable ( ) { public void run ( ) { for ( int i = ID ; i < X . rows ( ) ; i += SystemInfo . LogicalCores ) for ( int j = 0 ; j < X . cols ( ) ; j ++ ) if ( rand . nextInt ( ) < randThresh ) X . set ( i , j , 0.0 ) ; latch . countDown ( ) ; } } ) ; } try { latch . await ( ) ; } catch ( InterruptedException ex1 ) { Logger . getLogger ( SGDNetworkTrainer . class . getName ( ) ) . log ( Level . SEVERE , null , ex1 ) ; } } } | Applies dropout to the given matrix |
29,902 | private DataPoint getPredVecR ( DataPoint data ) { Vec w = new DenseVector ( baseRegressors . size ( ) ) ; for ( int i = 0 ; i < baseRegressors . size ( ) ; i ++ ) w . set ( i , baseRegressors . get ( i ) . regress ( data ) ) ; return new DataPoint ( w ) ; } | Gets the predicted vector wrapped in a new DataPoint from a data point assuming we are doing regression |
29,903 | public static int getNextPow2TwinPrime ( int m ) { int pos = Arrays . binarySearch ( twinPrimesP2 , m + 1 ) ; if ( pos >= 0 ) return twinPrimesP2 [ pos ] ; else return twinPrimesP2 [ - pos - 1 ] ; } | Gets the next twin prime that is near a power of 2 and greater than or equal to the given value |
29,904 | public void replaceNumericFeatures ( List < Vec > newNumericFeatures ) { if ( this . size ( ) != newNumericFeatures . size ( ) ) throw new RuntimeException ( "Input list does not have the same not of dataums as the dataset" ) ; for ( int i = 0 ; i < newNumericFeatures . size ( ) ; i ++ ) { DataPoint dp_i = getDataPoint ( i ) ; setDataPoint ( i , new DataPoint ( newNumericFeatures . get ( i ) , dp_i . getCategoricalValues ( ) , dp_i . getCategoricalData ( ) ) ) ; } this . numNumerVals = getDataPoint ( 0 ) . numNumericalValues ( ) ; if ( this . numericalVariableNames != null ) this . numericalVariableNames . clear ( ) ; } | This method will replace every numeric feature in this dataset with a Vec object from the given list . All vecs in the given list must be of the same size . |
29,905 | protected void base_add ( DataPoint dp , double weight ) { datapoints . addDataPoint ( dp ) ; setWeight ( size ( ) - 1 , weight ) ; } | Adds a new datapoint to this set . This method is protected as not all datasets will be satisfied by adding just a data point . |
29,906 | public Iterator < DataPoint > getDataPointIterator ( ) { Iterator < DataPoint > iteData = new Iterator < DataPoint > ( ) { int cur = 0 ; int to = size ( ) ; public boolean hasNext ( ) { return cur < to ; } public DataPoint next ( ) { return getDataPoint ( cur ++ ) ; } public void remove ( ) { throw new UnsupportedOperationException ( "This operation is not supported for DataSet" ) ; } } ; return iteData ; } | Returns an iterator that will iterate over all data points in the set . The behavior is not defined if one attempts to modify the data set while being iterated . |
29,907 | public Type getMissingDropped ( ) { List < Integer > hasNoMissing = new IntList ( ) ; for ( int i = 0 ; i < size ( ) ; i ++ ) { DataPoint dp = getDataPoint ( i ) ; boolean missing = dp . getNumericalValues ( ) . countNaNs ( ) > 0 ; for ( int c : dp . getCategoricalValues ( ) ) if ( c < 0 ) missing = true ; if ( ! missing ) hasNoMissing . add ( i ) ; } return getSubset ( hasNoMissing ) ; } | This method returns a dataset that is a subset of this dataset where only the rows that have no missing values are kept . The new dataset is backed by this dataset . |
29,908 | public List < Type > randomSplit ( Random rand , double ... splits ) { if ( splits . length < 1 ) throw new IllegalArgumentException ( "Input array of split fractions must be non-empty" ) ; IntList randOrder = new IntList ( size ( ) ) ; ListUtils . addRange ( randOrder , 0 , size ( ) , 1 ) ; Collections . shuffle ( randOrder , rand ) ; int [ ] stops = new int [ splits . length ] ; double sum = 0 ; for ( int i = 0 ; i < splits . length ; i ++ ) { sum += splits [ i ] ; if ( sum >= 1.001 ) throw new IllegalArgumentException ( "Input splits sum is greater than 1 by index " + i + " reaching a sum of " + sum ) ; stops [ i ] = ( int ) Math . round ( sum * randOrder . size ( ) ) ; } List < Type > datasets = new ArrayList < > ( splits . length ) ; int prev = 0 ; for ( int i = 0 ; i < stops . length ; i ++ ) { datasets . add ( getSubset ( randOrder . subList ( prev , stops [ i ] ) ) ) ; prev = stops [ i ] ; } return datasets ; } | Splits the dataset randomly into proportionally sized partitions . |
29,909 | public List < DataPoint > getDataPoints ( ) { List < DataPoint > list = new ArrayList < > ( size ( ) ) ; for ( int i = 0 ; i < size ( ) ; i ++ ) list . add ( getDataPoint ( i ) ) ; return list ; } | Creates a list containing the same DataPoints in this set . They are soft copies in the same order as this data set . However altering this list will have no effect on DataSet . Altering the DataPoints in the list will effect the DataPoints in this DataSet . |
29,910 | public List < Vec > getDataVectors ( ) { List < Vec > vecs = new ArrayList < > ( size ( ) ) ; for ( int i = 0 ; i < size ( ) ; i ++ ) vecs . add ( getDataPoint ( i ) . getNumericalValues ( ) ) ; return vecs ; } | Creates a list of the vectors values for each data point in the correct order . |
29,911 | public void setWeight ( int i , double w ) { if ( i >= size ( ) || i < 0 ) throw new IndexOutOfBoundsException ( "Dataset has only " + size ( ) + " members, can't access index " + i ) ; else if ( Double . isNaN ( w ) || Double . isInfinite ( w ) || w < 0 ) throw new ArithmeticException ( "Invalid weight assignment of " + w ) ; if ( w == 1 && weights == null ) return ; if ( weights == null ) { weights = new double [ size ( ) ] ; Arrays . fill ( weights , 1.0 ) ; } if ( weights . length <= i ) weights = Arrays . copyOfRange ( weights , 0 , Math . max ( weights . length * 2 , i + 1 ) ) ; weights [ i ] = w ; } | Sets the weight of a given datapoint within this data set . |
29,912 | public double getWeight ( int i ) { if ( i >= size ( ) || i < 0 ) throw new IndexOutOfBoundsException ( "Dataset has only " + size ( ) + " members, can't access index " + i ) ; if ( weights == null ) return 1 ; else if ( weights . length <= i ) return 1 ; else return weights [ i ] ; } | Returns the weight of the specified data point |
29,913 | public Vec getDataWeights ( ) { final int N = this . size ( ) ; if ( N == 0 ) return new DenseVector ( 0 ) ; double weight = getWeight ( 0 ) ; double [ ] weights_copy = null ; for ( int i = 1 ; i < N ; i ++ ) { double w_i = getWeight ( i ) ; if ( weights_copy != null || weight != w_i ) { if ( weights_copy == null ) { weights_copy = new double [ N ] ; Arrays . fill ( weights_copy , 0 , i , weight ) ; } weights_copy [ i ] = w_i ; } } if ( weights_copy == null ) return new ConstantVector ( weight , size ( ) ) ; else return new DenseVector ( weights_copy ) ; } | This method returns the weight of each data point in a single Vector . When all data points have the same weight this will return a vector that uses fixed memory instead of allocating a full double backed array . |
29,914 | public < Type extends DataSet > OnLineStatistics [ ] evaluateFeatureImportance ( DataSet < Type > data , TreeFeatureImportanceInference imp ) { OnLineStatistics [ ] importances = new OnLineStatistics [ data . getNumFeatures ( ) ] ; for ( int i = 0 ; i < importances . length ; i ++ ) importances [ i ] = new OnLineStatistics ( ) ; for ( ExtraTree tree : forrest ) { double [ ] feats = imp . getImportanceStats ( tree , data ) ; for ( int i = 0 ; i < importances . length ; i ++ ) importances [ i ] . add ( feats [ i ] ) ; } return importances ; } | Measures the statistics of feature importance from the trees in this forest . |
29,915 | @ WarmParameter ( prefLowToHigh = true ) public void setC ( double C ) { if ( C <= 0 || Double . isInfinite ( C ) || Double . isNaN ( C ) ) throw new IllegalArgumentException ( "Regularization term C must be a positive value, not " + C ) ; this . C = C ; } | Sets the regularization term where smaller values indicate a larger regularization penalty . |
29,916 | private double getM_Bar_for_w0 ( int n , int l , List < Vec > columnsOfX , double [ ] col_neg_class_sum , double col_neg_class_sum_bias ) { final double D_part_i = 0.5 ; double M_bar = 0 ; for ( int j = 0 ; j < n ; j ++ ) { final double w_j = 0 ; double delta_j_L = - columnsOfX . get ( j ) . sum ( ) * 0.5 ; delta_j_L = + C * ( delta_j_L + col_neg_class_sum [ j ] ) ; double deltaS_j_fw ; deltaS_j_fw = signum ( delta_j_L ) * max ( abs ( delta_j_L ) - alpha , 0 ) ; M_bar += abs ( deltaS_j_fw ) ; } if ( useBias ) { double delta_j_L = 0 ; for ( int i = 0 ; i < l ; i ++ ) delta_j_L += - D_part_i ; delta_j_L = C * ( delta_j_L + col_neg_class_sum_bias ) ; double deltaS_j_fw = delta_j_L ; M_bar += abs ( deltaS_j_fw ) ; } return M_bar ; } | When we perform a warm start we want to train to the same point that we would have if we had not done a warm start . But our stopping point is based on the initial relative error . To get around that this method computes what the error would have been for the zero weight vector |
29,917 | public void add ( double x , double weight ) { if ( weight < 0 ) throw new ArithmeticException ( "Can not add a negative weight" ) ; else if ( weight == 0 ) return ; double n1 = n ; n += weight ; double delta = x - mean ; double delta_n = delta * weight / n ; double delta_n2 = delta_n * delta_n ; double term1 = delta * delta_n * n1 ; mean += delta_n ; m4 += term1 * delta_n2 * ( n * n - 3 * n + 3 ) + 6 * delta_n2 * m2 - 4 * delta_n * m3 ; m3 += term1 * delta_n * ( n - 2 ) - 3 * delta_n * m2 ; m2 += weight * delta * ( x - mean ) ; if ( min == null ) min = max = x ; else { min = Math . min ( min , x ) ; max = Math . max ( max , x ) ; } } | Adds a data sample the the counts with the provided weight of influence . |
29,918 | public void setBurnIn ( double burnIn ) { if ( Double . isNaN ( burnIn ) || burnIn < 0 || burnIn >= 1 ) throw new IllegalArgumentException ( "BurnInFraction must be in [0, 1), not " + burnIn ) ; this . burnIn = burnIn ; } | Sets the burn in fraction . SBP averages the intermediate solutions from each step as the final solution . The intermediate steps of SBP are highly correlated and the begging solutions are usually not as meaningful toward the converged solution . To overcome this issue a certain fraction of the iterations are not averaged into the final solution making them the burn in fraction . A value of 0 . 25 would then be ignoring the initial 25% of solutions . |
29,919 | public boolean add ( int e ) { if ( e < 0 || e >= has . length ) throw new IllegalArgumentException ( "Input must be in range [0, " + has . length + ") not " + e ) ; else if ( contains ( e ) ) return false ; else { if ( nnz == 0 ) { first = e ; next [ e ] = prev [ e ] = STOP ; } else { prev [ first ] = e ; next [ e ] = first ; prev [ e ] = STOP ; first = e ; } nnz ++ ; return has [ e ] = true ; } } | Adds a new integer into the set |
29,920 | public static List < Integer > unmodifiableView ( int [ ] array , int length ) { return Collections . unmodifiableList ( view ( array , length ) ) ; } | Creates and returns an unmodifiable view of the given int array that requires only a small object allocation . |
29,921 | public static IntList view ( int [ ] array , int length ) { if ( length > array . length || length < 0 ) throw new IllegalArgumentException ( "length must be non-negative and no more than the size of the array(" + array . length + "), not " + length ) ; return new IntList ( array , length ) ; } | Creates and returns a view of the given int array that requires only a small object allocation . Changes to the list will be reflected in the array up to a point . If the modification would require increasing the capacity of the array a new array will be allocated - at which point operations will no longer be reflected in the original array . |
29,922 | public static IntList range ( int start , int end , int step ) { IntList l = new IntList ( ( end - start ) / step + 1 ) ; for ( int i = start ; i < end ; i ++ ) l . add ( i ) ; return l ; } | Returns a new IntList containing values in the given range |
29,923 | public int dataPointToCord ( DataPointPair < Integer > dataPoint , int targetClass , int [ ] cord ) { if ( cord . length != getDimensionSize ( ) ) throw new ArithmeticException ( "Storage space and CPT dimension miss match" ) ; DataPoint dp = dataPoint . getDataPoint ( ) ; int skipVal = - 1 ; for ( int i = 0 ; i < dimSize . length ; i ++ ) { if ( realIndexToCatIndex [ i ] == targetClass ) { if ( targetClass == dp . numCategoricalValues ( ) ) skipVal = dataPoint . getPair ( ) ; else skipVal = dp . getCategoricalValue ( realIndexToCatIndex [ i ] ) ; } if ( realIndexToCatIndex [ i ] == predictingIndex ) cord [ i ] = dataPoint . getPair ( ) ; else cord [ i ] = dp . getCategoricalValue ( realIndexToCatIndex [ i ] ) ; } return skipVal ; } | Converts a data point pair into a coordinate . The paired value contains the value for the predicting index . Though this value will not be used if the predicting class of the original data set was not used to make the table . |
29,924 | public double query ( int targetClass , int targetValue , int [ ] cord ) { double sumVal = 0 ; double targetVal = 0 ; int realTargetIndex = catIndexToRealIndex [ targetClass ] ; CategoricalData queryData = valid . get ( targetClass ) ; for ( int i = 0 ; i < queryData . getNumOfCategories ( ) ; i ++ ) { cord [ realTargetIndex ] = i ; double tmp = countArray [ cordToIndex ( cord ) ] ; sumVal += tmp ; if ( i == targetValue ) targetVal = tmp ; } return targetVal / sumVal ; } | Queries the CPT for the probability of the target class occurring with the specified value given the class values of the other attributes |
29,925 | public static Map < String , Parameter > toParameterMap ( List < Parameter > params ) { Map < String , Parameter > map = new HashMap < String , Parameter > ( params . size ( ) ) ; for ( Parameter param : params ) { if ( map . put ( param . getASCIIName ( ) , param ) != null ) throw new RuntimeException ( "Name collision, two parameters use the name '" + param . getASCIIName ( ) + "'" ) ; if ( ! param . getName ( ) . equals ( param . getASCIIName ( ) ) ) if ( map . put ( param . getName ( ) , param ) != null ) throw new RuntimeException ( "Name collision, two parameters use the name '" + param . getName ( ) + "'" ) ; } return map ; } | Creates a map of all possible parameter names to their corresponding object . No two parameters may have the same name . |
29,926 | private static String spaceCamelCase ( String in ) { StringBuilder sb = new StringBuilder ( in . length ( ) + 5 ) ; for ( int i = 0 ; i < in . length ( ) ; i ++ ) { char c = in . charAt ( i ) ; if ( Character . isUpperCase ( c ) ) sb . append ( ' ' ) ; sb . append ( c ) ; } return sb . toString ( ) . trim ( ) ; } | Returns a version of the same string that has spaced inserted before each capital letter |
29,927 | public static Distribution guessNumberOfBins ( DataSet data ) { if ( data . size ( ) < 20 ) return new UniformDiscrete ( 2 , data . size ( ) - 1 ) ; else if ( data . size ( ) >= 1000000 ) return new LogUniform ( 50 , 1000 ) ; int sqrt = ( int ) Math . sqrt ( data . size ( ) ) ; return new UniformDiscrete ( Math . max ( sqrt / 3 , 2 ) , Math . min ( sqrt * 3 , data . size ( ) - 1 ) ) ; } | Attempts to guess the number of bins to use |
29,928 | private SingularValueDecomposition getSVD ( DataSet dataSet ) { Matrix cov = covarianceMatrix ( meanVector ( dataSet ) , dataSet ) ; for ( int i = 0 ; i < cov . rows ( ) ; i ++ ) for ( int j = 0 ; j < i ; j ++ ) cov . set ( j , i , cov . get ( i , j ) ) ; EigenValueDecomposition evd = new EigenValueDecomposition ( cov ) ; evd . sortByEigenValue ( new Comparator < Double > ( ) { public int compare ( Double o1 , Double o2 ) { return - Double . compare ( o1 , o2 ) ; } } ) ; return new SingularValueDecomposition ( evd . getVRaw ( ) , evd . getVRaw ( ) , evd . getRealEigenvalues ( ) ) ; } | Gets a SVD for the covariance matrix of the data set |
29,929 | public double backwardNaive ( int n , double ... args ) { double term = getA ( n , args ) / getB ( n , args ) ; for ( n = n - 1 ; n > 0 ; n -- ) { term = getA ( n , args ) / ( getB ( n , args ) + term ) ; } return term + getB ( 0 , args ) ; } | Approximates the continued fraction using a naive approximation |
29,930 | public double lentz ( double ... args ) { double f_n = getB ( 0 , args ) ; if ( f_n == 0.0 ) f_n = 1e-30 ; double c_n , c_0 = f_n ; double d_n , d_0 = 0 ; double delta = 0 ; int j = 0 ; while ( Math . abs ( delta - 1 ) > 1e-15 ) { j ++ ; d_n = getB ( j , args ) + getA ( j , args ) * d_0 ; if ( d_n == 0.0 ) d_n = 1e-30 ; c_n = getB ( j , args ) + getA ( j , args ) / c_0 ; if ( c_n == 0.0 ) c_n = 1e-30 ; d_n = 1 / d_n ; delta = c_n * d_n ; f_n *= delta ; d_0 = d_n ; c_0 = c_n ; } return f_n ; } | Uses Thompson and Barnett s modified Lentz s algorithm create an approximation that should be accurate to full precision . |
29,931 | public static List < List < DataPoint > > createClusterListFromAssignmentArray ( int [ ] assignments , DataSet dataSet ) { List < List < DataPoint > > clusterings = new ArrayList < > ( ) ; for ( int i = 0 ; i < dataSet . size ( ) ; i ++ ) { while ( clusterings . size ( ) <= assignments [ i ] ) clusterings . add ( new ArrayList < > ( ) ) ; if ( assignments [ i ] >= 0 ) clusterings . get ( assignments [ i ] ) . add ( dataSet . getDataPoint ( i ) ) ; } return clusterings ; } | Convenient helper method . A list of lists to represent a cluster may be desirable . In such a case this method will take in an array of cluster assignments and return a list of lists . |
29,932 | public static List < DataPoint > getDatapointsFromCluster ( int c , int [ ] assignments , DataSet dataSet , int [ ] indexFrom ) { List < DataPoint > list = new ArrayList < > ( ) ; int pos = 0 ; for ( int i = 0 ; i < dataSet . size ( ) ; i ++ ) if ( assignments [ i ] == c ) { list . add ( dataSet . getDataPoint ( i ) ) ; if ( indexFrom != null ) indexFrom [ pos ++ ] = i ; } return list ; } | Gets a list of the datapoints in a data set that belong to the indicated cluster |
29,933 | public Complex add ( Complex c ) { Complex ret = new Complex ( real , imag ) ; ret . mutableAdd ( c ) ; return ret ; } | Creates a new complex number containing the resulting addition of this and another |
29,934 | public Complex subtract ( Complex c ) { Complex ret = new Complex ( real , imag ) ; ret . mutableSubtract ( c ) ; return ret ; } | Creates a new complex number containing the resulting subtracting another from this one |
29,935 | public static void cMul ( double a , double b , double c , double d , double [ ] results ) { results [ 0 ] = a * c - b * d ; results [ 1 ] = b * c + a * d ; } | Performs a complex multiplication |
29,936 | public void mutableMultiply ( double c , double d ) { double newR = this . real * c - this . imag * d ; double newI = this . imag * c + this . real * d ; this . real = newR ; this . imag = newI ; } | Alters this complex number as if a multiplication of another complex number was performed . |
29,937 | public Complex multiply ( Complex c ) { Complex ret = new Complex ( real , imag ) ; ret . mutableMultiply ( c ) ; return ret ; } | Creates a new complex number containing the resulting multiplication between this and another |
29,938 | public void mutableDivide ( double c , double d ) { final double [ ] r = new double [ 2 ] ; cDiv ( real , imag , c , d , r ) ; this . real = r [ 0 ] ; this . imag = r [ 1 ] ; } | Alters this complex number as if a division by another complex number was performed . |
29,939 | public Complex divide ( Complex c ) { Complex ret = new Complex ( real , imag ) ; ret . mutableDivide ( c ) ; return ret ; } | Creates a new complex number containing the resulting division of this by another |
29,940 | public void setDelta ( double delta ) { if ( delta <= 0 || delta >= 1 || Double . isNaN ( delta ) ) throw new IllegalArgumentException ( "delta must be in (0,1), not " + delta ) ; this . delta = delta ; } | Sets the upper bound on the false positive rate for detecting concept drifts |
29,941 | private void compress ( ) { ListIterator < OnLineStatistics > listIter = windows . listIterator ( ) ; double lastSizeSeen = - Double . MAX_VALUE ; int lastSizeCount = 0 ; while ( listIter . hasNext ( ) ) { OnLineStatistics window = listIter . next ( ) ; double n = window . getSumOfWeights ( ) ; if ( n == lastSizeSeen ) { if ( ++ lastSizeCount > M ) { listIter . previous ( ) ; window . add ( listIter . previous ( ) ) ; listIter . remove ( ) ; if ( listIter . hasNext ( ) ) listIter . next ( ) ; lastSizeSeen = window . getSumOfWeights ( ) ; lastSizeCount = 1 ; } } else { lastSizeSeen = n ; lastSizeCount = 1 ; } } } | Compresses the current window |
29,942 | private void computeSubClusterSplit ( final int [ ] [ ] subDesignation , int originalCluster , List < DataPoint > listOfDataPointsInCluster , DataSet fullDataSet , int [ ] fullDesignations , final int [ ] [ ] originalPositions , final double [ ] splitEvaluation , PriorityQueue < Integer > clusterToSplit , boolean parallel ) { subDesignation [ originalCluster ] = new int [ listOfDataPointsInCluster . size ( ) ] ; int pos = 0 ; for ( int i = 0 ; i < fullDataSet . size ( ) ; i ++ ) { if ( fullDesignations [ i ] != originalCluster ) continue ; originalPositions [ originalCluster ] [ pos ++ ] = i ; } SimpleDataSet dpSubC1DataSet = new SimpleDataSet ( listOfDataPointsInCluster ) ; try { baseClusterer . cluster ( dpSubC1DataSet , 2 , parallel , subDesignation [ originalCluster ] ) ; splitEvaluation [ originalCluster ] = clusterEvaluation . evaluate ( subDesignation [ originalCluster ] , dpSubC1DataSet ) ; clusterToSplit . add ( originalCluster ) ; } catch ( ClusterFailureException ex ) { splitEvaluation [ originalCluster ] = Double . POSITIVE_INFINITY ; } } | Takes the data set and computes the clustering of a sub cluster and stores its information and places the result in the queue |
29,943 | public static double sampleCorCoeff ( Vec xData , Vec yData ) { if ( yData . length ( ) != xData . length ( ) ) throw new ArithmeticException ( "X and Y data sets must have the same length" ) ; double xMean = xData . mean ( ) ; double yMean = yData . mean ( ) ; double topSum = 0 ; for ( int i = 0 ; i < xData . length ( ) ; i ++ ) { topSum += ( xData . get ( i ) - xMean ) * ( yData . get ( i ) - yMean ) ; } return topSum / ( ( xData . length ( ) - 1 ) * xData . standardDeviation ( ) * yData . standardDeviation ( ) ) ; } | Computes the sample correlation coefficient for two data sets X and Y . The lengths of X and Y must be the same and each element in X should correspond to the element in Y . |
29,944 | public void setRange ( double A , double B ) { if ( A == B ) throw new RuntimeException ( "Values must be different" ) ; else if ( B > A ) { double tmp = A ; A = B ; B = tmp ; } this . A = A ; this . B = B ; } | Sets the min and max value to scale the data to . If given in the wrong order this method will swap them |
29,945 | private double queryWork ( Vec x , Set < Integer > validIndecies , SparseVector logProd ) { if ( originalVecs == null ) throw new UntrainedModelException ( "Model has not yet been created, queries can not be perfomed" ) ; double logH = 0 ; for ( int i = 0 ; i < sortedDimVals . length ; i ++ ) { double [ ] X = sortedDimVals [ i ] ; double h = bandwidth [ i ] ; logH += log ( h ) ; double xi = x . get ( i ) ; int from = Arrays . binarySearch ( X , xi - h * k . cutOff ( ) ) ; int to = Arrays . binarySearch ( X , xi + h * k . cutOff ( ) ) ; from = from < 0 ? - from - 1 : from ; to = to < 0 ? - to - 1 : to ; Set < Integer > subIndecies = new IntSet ( ) ; for ( int j = max ( 0 , from ) ; j < min ( X . length , to + 1 ) ; j ++ ) { int trueIndex = sortedIndexVals [ i ] [ j ] ; if ( i == 0 ) { validIndecies . add ( trueIndex ) ; logProd . set ( trueIndex , log ( k . k ( ( xi - X [ j ] ) / h ) ) ) ; } else if ( validIndecies . contains ( trueIndex ) ) { logProd . increment ( trueIndex , log ( k . k ( ( xi - X [ j ] ) / h ) ) ) ; subIndecies . add ( trueIndex ) ; } } if ( i > 0 ) { validIndecies . retainAll ( subIndecies ) ; if ( validIndecies . isEmpty ( ) ) break ; } } return logH ; } | Performs the main work for performing a density query . |
29,946 | static private < T > void fillList ( final int listsToAdd , Stack < List < T > > reusableLists , List < List < T > > aSplit ) { for ( int j = 0 ; j < listsToAdd ; j ++ ) if ( reusableLists . isEmpty ( ) ) aSplit . add ( new ArrayList < > ( ) ) ; else aSplit . add ( reusableLists . pop ( ) ) ; } | Add lists to a list of lists |
29,947 | private void batch_insert ( Collection < Integer > set , boolean parallel ) { for ( int i : set ) store [ size ++ ] = i ; if ( parallel ) Arrays . parallelSort ( store , 0 , size ) ; else Arrays . sort ( store , 0 , size ) ; } | more efficient insertion of many items by placing them all into the backing store and then doing one large sort . |
29,948 | public void setMinMax ( double min , double max ) { if ( min <= 0 || Double . isNaN ( min ) || Double . isInfinite ( min ) ) throw new IllegalArgumentException ( "min value must be positive, not " + min ) ; else if ( min >= max || Double . isNaN ( max ) || Double . isInfinite ( max ) ) throw new IllegalArgumentException ( "max (" + max + ") must be larger than min (" + min + ")" ) ; this . max = max ; this . min = min ; this . logMax = Math . log ( max ) ; this . logMin = Math . log ( min ) ; this . logDiff = logMax - logMin ; this . diff = max - min ; } | Sets the minimum and maximum values for this distribution |
29,949 | public static void write ( ClassificationDataSet data , OutputStream os ) { PrintWriter writer = new PrintWriter ( os ) ; for ( int i = 0 ; i < data . size ( ) ; i ++ ) { int pred = data . getDataPointCategory ( i ) ; Vec vals = data . getDataPoint ( i ) . getNumericalValues ( ) ; writer . write ( pred + " " ) ; for ( IndexValue iv : vals ) { double val = iv . getValue ( ) ; if ( Math . rint ( val ) == val ) writer . write ( ( iv . getIndex ( ) + 1 ) + ":" + ( long ) val + " " ) ; else writer . write ( ( iv . getIndex ( ) + 1 ) + ":" + val + " " ) ; } writer . write ( "\n" ) ; } writer . flush ( ) ; writer . close ( ) ; } | Writes out the given classification data set as a LIBSVM data file |
29,950 | protected static double eq24 ( final double beta_i , final double gN , final double gP , final double U ) { double vi = 0 ; if ( beta_i == 0 ) { if ( gN >= 0 ) vi = gN ; else if ( gP <= 0 ) vi = - gP ; } else { if ( beta_i < 0 ) { if ( beta_i > - U || ( beta_i == - U && gN <= 0 ) ) vi = Math . abs ( gN ) ; } else { if ( beta_i < U || ( beta_i == U && gP >= 0 ) ) vi = Math . abs ( gP ) ; } } return vi ; } | returns the result of evaluation equation 24 of an individual index |
29,951 | public int getSplittingAttribute ( ) { if ( splittingAttribute < catAttributes . length ) return numNumericFeatures + splittingAttribute ; int numerAttribute = splittingAttribute - catAttributes . length ; return numerAttribute ; } | Returns the attribute that this stump has decided to use to compute results . Numeric features start from 0 and categorical features start from the number of numeric features . |
29,952 | protected double getGain ( ImpurityScore origScore , ClassificationDataSet source , List < IntList > aSplit ) { ImpurityScore [ ] scores = getSplitScores ( source , aSplit ) ; return ImpurityScore . gain ( origScore , scores ) ; } | From the score for the original set that is being split this computes the gain as the improvement in classification from the original split . |
29,953 | public int whichPath ( DataPoint data ) { int paths = getNumberOfPaths ( ) ; if ( paths < 0 ) return paths ; else if ( paths == 1 ) return 0 ; else if ( splittingAttribute < catAttributes . length ) return data . getCategoricalValue ( splittingAttribute ) ; int numerAttribute = splittingAttribute - catAttributes . length ; double val = data . getNumericalValues ( ) . get ( numerAttribute ) ; if ( Double . isNaN ( val ) ) return - 1 ; if ( results != null ) { int pos = Collections . binarySearch ( boundries , val ) ; pos = pos < 0 ? - pos - 1 : pos ; return owners . get ( pos ) ; } else { if ( regressionResults . length == 1 ) return 0 ; else if ( val <= regressionResults [ 2 ] ) return 0 ; else return 1 ; } } | Determines which split path this data point would follow from this decision stump . Works for both classification and regression . |
29,954 | public CategoricalResults result ( int i ) { if ( i < 0 || i >= getNumberOfPaths ( ) ) throw new IndexOutOfBoundsException ( "Invalid path, can to return a result for path " + i ) ; return results [ i ] ; } | Returns the categorical result of the i th path . |
29,955 | public List < ClassificationDataSet > trainC ( ClassificationDataSet dataPoints , Set < Integer > options ) { return trainC ( dataPoints , options , false ) ; } | This is a helper function that does the work of training this stump . It may be called directly by other classes that are creating decision trees to avoid redundant repackaging of lists . |
29,956 | static protected < T > void distributMissing ( List < ClassificationDataSet > splits , double [ ] fracs , ClassificationDataSet source , IntList hadMissing ) { for ( int i : hadMissing ) { DataPoint dp = source . getDataPoint ( i ) ; for ( int j = 0 ; j < fracs . length ; j ++ ) { double nw = fracs [ j ] * source . getWeight ( i ) ; if ( Double . isNaN ( nw ) ) continue ; if ( nw <= 1e-13 ) continue ; splits . get ( j ) . addDataPoint ( dp , source . getDataPointCategory ( i ) , nw ) ; } } } | Distributes a list of datapoints that had missing values to each split re - weighted by the indicated fractions |
29,957 | public void setMaxTokenLength ( int maxTokenLength ) { if ( maxTokenLength < 1 ) throw new IllegalArgumentException ( "Max token length must be positive, not " + maxTokenLength ) ; if ( maxTokenLength <= minTokenLength ) throw new IllegalArgumentException ( "Max token length must be larger than the min token length" ) ; this . maxTokenLength = maxTokenLength ; } | Sets the maximum allowed length for any token . Any token discovered exceeding the length will not be accepted and skipped over . The default is unbounded . |
29,958 | public void setMinTokenLength ( int minTokenLength ) { if ( minTokenLength < 0 ) throw new IllegalArgumentException ( "Minimum token length must be non negative, not " + minTokenLength ) ; if ( minTokenLength > maxTokenLength ) throw new IllegalArgumentException ( "Minimum token length can not exced the maximum token length" ) ; this . minTokenLength = minTokenLength ; } | Sets the minimum allowed token length . Any token discovered shorter than the minimum length will not be accepted and skipped over . The default is 0 . |
29,959 | public void addNewKernelPoint ( ) { KernelPoint source = points . get ( 0 ) ; KernelPoint toAdd = new KernelPoint ( k , errorTolerance ) ; toAdd . setMaxBudget ( maxBudget ) ; toAdd . setBudgetStrategy ( budgetStrategy ) ; standardMove ( toAdd , source ) ; toAdd . kernelAccel = source . kernelAccel ; toAdd . vecs = source . vecs ; toAdd . alpha = new DoubleList ( source . alpha . size ( ) ) ; for ( int i = 0 ; i < source . alpha . size ( ) ; i ++ ) toAdd . alpha . add ( 0.0 ) ; points . add ( toAdd ) ; } | Adds a new Kernel Point to the internal list this object represents . The new Kernel Point will be equivalent to creating a new KernelPoint directly . |
29,960 | private void standardMove ( KernelPoint destination , KernelPoint source ) { destination . InvK = source . InvK ; destination . InvKExpanded = source . InvKExpanded ; destination . K = source . K ; destination . KExpanded = source . KExpanded ; } | Updates the gram matrix storage of the destination to point at the exact same objects as the ones from the source . |
29,961 | public List < Vec > getRawBasisVecs ( ) { List < Vec > vecs = new ArrayList < Vec > ( getBasisSize ( ) ) ; vecs . addAll ( this . points . get ( 0 ) . vecs ) ; return vecs ; } | Returns a list of the raw vectors being used by the kernel points . Altering this vectors will alter the same vectors used by these objects and will cause inconsistent results . |
29,962 | private void addMissingZeros ( ) { for ( int i = 0 ; i < points . size ( ) ; i ++ ) while ( points . get ( i ) . alpha . size ( ) < this . points . get ( 0 ) . vecs . size ( ) ) points . get ( i ) . alpha . add ( 0.0 ) ; } | Adds zeros to all alpha vecs that are not of the same length as the vec list |
29,963 | private void updateAverage ( ) { if ( t == last_t || t < burnIn ) return ; else if ( last_t < burnIn ) { for ( int i = 0 ; i < alphaAveraged . size ( ) ; i ++ ) alphaAveraged . set ( i , alphas . get ( i ) ) ; } double w = t - last_t ; for ( int i = 0 ; i < alphaAveraged . size ( ) ; i ++ ) { double delta = alphas . getD ( i ) - alphaAveraged . getD ( i ) ; alphaAveraged . set ( i , alphaAveraged . getD ( i ) + delta * w / t ) ; } last_t = t ; } | Updates the average model to reflect the current time average |
29,964 | public void setSigma ( double sigma ) { if ( sigma <= 0 || Double . isNaN ( sigma ) || Double . isInfinite ( sigma ) ) throw new IllegalArgumentException ( "Sigma must be a positive constant, not " + sigma ) ; this . sigma = sigma ; this . sigmaSqrd2Inv = 0.5 / ( sigma * sigma ) ; } | Sets the kernel width parameter which must be a positive value . Larger values indicate a larger width |
29,965 | public void setMaxScaled ( double maxFeature ) { if ( Double . isNaN ( maxFeature ) ) throw new ArithmeticException ( "NaN is not a valid feature value" ) ; else if ( maxFeature > 1 ) throw new ArithmeticException ( "Maximum possible feature value is 1, can not use " + maxFeature ) ; else if ( maxFeature <= minScaled ) throw new ArithmeticException ( "Maximum feature value must be learger than the minimum" ) ; this . maxScaled = maxFeature ; } | Sets the maximum value of any feature after scaling is applied . This value can be no greater than 1 . |
29,966 | public void setMinScaled ( double minFeature ) { if ( Double . isNaN ( minFeature ) ) throw new ArithmeticException ( "NaN is not a valid feature value" ) ; else if ( minFeature < - 1 ) throw new ArithmeticException ( "Minimum possible feature value is -1, can not use " + minFeature ) ; else if ( minFeature >= maxScaled ) throw new ArithmeticException ( "Minimum feature value must be smaller than the maximum" ) ; this . minScaled = minFeature ; } | Sets the minimum value of any feature after scaling is applied . This value can be no smaller than - 1 |
29,967 | public void setOmega ( double omega ) { if ( omega <= 0 || Double . isNaN ( omega ) || Double . isInfinite ( omega ) ) throw new ArithmeticException ( "omega must be positive, not " + omega ) ; this . omega = omega ; this . cnst = Math . sqrt ( Math . pow ( 2 , 1 / omega ) - 1 ) ; } | Sets the omega parameter value which controls the shape of the kernel |
29,968 | public void setSigma ( double sigma ) { if ( sigma <= 0 || Double . isNaN ( sigma ) || Double . isInfinite ( sigma ) ) throw new ArithmeticException ( "sigma must be positive, not " + sigma ) ; this . sigma = sigma ; } | Sets the sigma parameter value which controls the width of the kernel |
29,969 | private static Vec getColumn ( Matrix x ) { Vec t ; for ( int i = 0 ; i < x . cols ( ) ; i ++ ) { t = x . getColumn ( i ) ; if ( t . dot ( t ) > 0 ) return t ; } throw new ArithmeticException ( "Matrix is essentially zero" ) ; } | Returns the first non zero column |
29,970 | private void doWarmStartIfNotNull ( Object warmSolution ) throws FailedToFitException { if ( warmSolution != null ) { if ( warmSolution instanceof SimpleWeightVectorModel ) { SimpleWeightVectorModel warm = ( SimpleWeightVectorModel ) warmSolution ; if ( warm . numWeightsVecs ( ) != ws . length ) throw new FailedToFitException ( "Warm solution has " + warm . numWeightsVecs ( ) + " weight vectors instead of " + ws . length ) ; for ( int i = 0 ; i < ws . length ; i ++ ) { warm . getRawWeight ( i ) . copyTo ( ws [ i ] ) ; if ( useBiasTerm ) bs [ i ] = warm . getBias ( i ) ; } } else throw new FailedToFitException ( "Can not warm warm from " + warmSolution . getClass ( ) . getCanonicalName ( ) ) ; } } | Performs a warm start if the given object is of the appropriate class . Nothing happens if input it null . |
29,971 | public static < T > List < T > mergedView ( final List < T > left , final List < T > right ) { List < T > merged = new AbstractList < T > ( ) { public T get ( int index ) { if ( index < left . size ( ) ) return left . get ( index ) ; else if ( index - left . size ( ) < right . size ( ) ) return right . get ( index - left . size ( ) ) ; else throw new IndexOutOfBoundsException ( "List of lengt " + size ( ) + " has no index " + index ) ; } public int size ( ) { return left . size ( ) + right . size ( ) ; } } ; return merged ; } | Returns a new unmodifiable view that is the merging of two lists |
29,972 | public static < T > List < T > collectFutures ( Collection < Future < T > > futures ) throws ExecutionException , InterruptedException { ArrayList < T > collected = new ArrayList < T > ( futures . size ( ) ) ; for ( Future < T > future : futures ) collected . add ( future . get ( ) ) ; return collected ; } | Collects all future values in a collection into a list and returns said list . This method will block until all future objects are collected . |
29,973 | public static IntList range ( int start , int to , int step ) { if ( to < start ) throw new RuntimeException ( "starting index " + start + " must be less than or equal to ending index" + to ) ; else if ( step < 1 ) throw new RuntimeException ( "Step size must be a positive integer, not " + step ) ; IntList toRet = new IntList ( ( to - start ) / step ) ; for ( int i = start ; i < to ; i += step ) toRet . add ( i ) ; return toRet ; } | Returns a list of integers with values in the given range |
29,974 | protected double invCdfRootFinding ( double p , double tol ) { if ( p < 0 || p > 1 ) throw new ArithmeticException ( "Value of p must be in the range [0,1], not " + p ) ; if ( min ( ) >= Integer . MIN_VALUE ) if ( p <= cdf ( min ( ) ) ) return min ( ) ; if ( max ( ) < Integer . MAX_VALUE ) if ( p > cdf ( max ( ) - 1 ) ) return max ( ) ; Function1D cdfInterpolated = ( double x ) -> { double query = x ; if ( Math . rint ( query ) == query ) return cdf ( ( int ) query ) - p ; double larger = query + 1 ; double diff = larger - query ; return cdf ( query ) * diff + cdf ( larger ) * ( 1 - diff ) - p ; } ; double a = Double . isInfinite ( min ( ) ) ? Integer . MIN_VALUE * .95 : min ( ) ; double b = Double . isInfinite ( max ( ) ) ? Integer . MAX_VALUE * .95 : max ( ) ; double toRet = Zeroin . root ( tol , a , b , cdfInterpolated ) ; return Math . round ( toRet ) ; } | Helper method that computes the inverse CDF by performing root - finding on the CDF of the function . This provides a convenient default method for any invCdfRootFinding implementation but may not be as fast or accurate as possible . |
29,975 | public void setMomentum ( double momentum ) { if ( momentum <= 0 || momentum >= 1 || Double . isNaN ( momentum ) ) throw new IllegalArgumentException ( "Momentum must be in (0,1) not " + momentum ) ; this . momentum = momentum ; } | Sets the momentum for accumulating gradients . |
29,976 | public static double logPdf ( double x , double mu , double sigma ) { return - 0.5 * log ( 2 * PI ) - log ( sigma ) + - pow ( x - mu , 2 ) / ( 2 * sigma * sigma ) ; } | Computes the log probability of a given value |
29,977 | public void setEta ( double eta ) { if ( Double . isNaN ( eta ) || Double . isInfinite ( eta ) || eta <= 0 ) throw new ArithmeticException ( "convergence parameter must be a positive value" ) ; this . eta = eta ; } | Sets the learning rate used during training |
29,978 | public void setEpsilon ( double eps ) { if ( eps < 0 || Double . isInfinite ( eps ) || Double . isNaN ( eps ) ) throw new ArithmeticException ( "Regularization must be a positive value" ) ; this . eps = eps ; } | Sets the regularization to apply the the diagonal of the scatter matrix when creating each new metric . |
29,979 | private int threshHoldExtractCluster ( List < Integer > orderedFile , int [ ] designations ) { int clustersFound = 0 ; OnLineStatistics stats = new OnLineStatistics ( ) ; for ( double r : reach_d ) if ( ! Double . isInfinite ( r ) ) stats . add ( r ) ; double thresh = stats . getMean ( ) + stats . getStandardDeviation ( ) ; for ( int i = 0 ; i < orderedFile . size ( ) ; i ++ ) { if ( reach_d [ orderedFile . get ( i ) ] >= thresh ) continue ; while ( i < orderedFile . size ( ) && reach_d [ orderedFile . get ( i ) ] < thresh ) designations [ i ++ ] = clustersFound ; while ( i + 1 < orderedFile . size ( ) && reach_d [ orderedFile . get ( i ) ] < reach_d [ orderedFile . get ( i + 1 ) ] ) designations [ i ++ ] = clustersFound ; clustersFound ++ ; } return clustersFound ; } | Finds clusters by segmenting the reachability plot witha line that is the mean reachability distance times |
29,980 | public void setK ( final int K ) { if ( K < 2 ) throw new IllegalArgumentException ( "At least 2 topics must be learned" ) ; this . K = K ; gammaLocal = new ThreadLocal < Vec > ( ) { protected Vec initialValue ( ) { return new DenseVector ( K ) ; } } ; logThetaLocal = new ThreadLocal < Vec > ( ) { protected Vec initialValue ( ) { return new DenseVector ( K ) ; } } ; expLogThetaLocal = new ThreadLocal < Vec > ( ) { protected Vec initialValue ( ) { return new DenseVector ( K ) ; } } ; lambda = null ; } | Sets the number of topics that LDA will try to learn |
29,981 | public void setTau0 ( double tau0 ) { if ( tau0 <= 0 || Double . isInfinite ( tau0 ) || Double . isNaN ( tau0 ) ) throw new IllegalArgumentException ( "Eta must be a positive constant, not " + tau0 ) ; this . tau0 = tau0 ; } | A learning rate constant to control the influence of early iterations on the solution . Larger values reduce the influence of earlier iterations smaller values increase the weight of earlier iterations . |
29,982 | public void setKappa ( double kappa ) { if ( kappa < 0.5 || kappa > 1.0 || Double . isNaN ( kappa ) ) throw new IllegalArgumentException ( "Kapp must be in [0.5, 1], not " + kappa ) ; this . kappa = kappa ; } | The forgetfulness factor in the learning rate . Larger values increase the rate at which old information is forgotten |
29,983 | public Vec getTopicVec ( int k ) { return new ScaledVector ( 1.0 / lambda . get ( k ) . sum ( ) , lambda . get ( k ) ) ; } | Returns the topic vector for a given topic . The vector should not be altered and is scaled so that the sum of all term weights sums to one . |
29,984 | public void model ( DataSet dataSet , int topics , ExecutorService ex ) { if ( ex == null ) ex = new FakeExecutor ( ) ; setK ( topics ) ; setD ( dataSet . size ( ) ) ; setVocabSize ( dataSet . getNumNumericalVars ( ) ) ; final List < Vec > docs = dataSet . getDataVectors ( ) ; for ( int epoch = 0 ; epoch < epochs ; epoch ++ ) { Collections . shuffle ( docs ) ; for ( int i = 0 ; i < D ; i += miniBatchSize ) { int to = Math . min ( i + miniBatchSize , D ) ; update ( docs . subList ( i , to ) , ex ) ; } } } | Fits the LDA model against the given data set |
29,985 | private void prepareGammaTheta ( Vec gamma_i , Vec eLogTheta_i , Vec expLogTheta_i , Random rand ) { final double lambdaInv = ( W * K ) / ( D * 100.0 ) ; for ( int j = 0 ; j < gamma_i . length ( ) ; j ++ ) gamma_i . set ( j , sampleExpoDist ( lambdaInv , rand . nextDouble ( ) ) + eta ) ; expandPsiMinusPsiSum ( gamma_i , gamma_i . sum ( ) , eLogTheta_i ) ; for ( int j = 0 ; j < eLogTheta_i . length ( ) ; j ++ ) expLogTheta_i . set ( j , FastMath . exp ( eLogTheta_i . get ( j ) ) ) ; } | Prepares gamma and the associated theta expectations are initialized so that the iterative updates to them can begin . |
29,986 | public void addNode ( N node ) { if ( ! nodes . containsKey ( node ) ) nodes . put ( node , new Pair < HashSet < N > , HashSet < N > > ( new HashSet < N > ( ) , new HashSet < N > ( ) ) ) ; } | Adds a new node to the graph |
29,987 | public Set < N > getParents ( N n ) { Pair < HashSet < N > , HashSet < N > > p = nodes . get ( n ) ; if ( p == null ) return null ; return p . getIncoming ( ) ; } | Returns the set of all parents of the requested node or null if the node does not exist in the graph |
29,988 | public Set < N > getChildren ( N n ) { Pair < HashSet < N > , HashSet < N > > p = nodes . get ( n ) ; if ( p == null ) return null ; return p . getOutgoing ( ) ; } | Returns the set of all children of the requested node or null if the node does not exist in the graph . |
29,989 | public void removeNode ( N node ) { Pair < HashSet < N > , HashSet < N > > p = nodes . remove ( node ) ; if ( p == null ) return ; HashSet < N > incomingNodes = p . getIncoming ( ) ; for ( N incomingNode : incomingNodes ) nodes . get ( incomingNode ) . getOutgoing ( ) . remove ( node ) ; } | Removes the specified node from the graph . If the node was not in the graph not change occurs |
29,990 | public void depends ( int parent , int child ) { dag . addNode ( child ) ; dag . addNode ( parent ) ; dag . addEdge ( parent , child ) ; } | Adds a dependency relation ship between two variables that will be in the network . The integer value corresponds the the index of the i th categorical variable where the class target s value is the number of categorical variables . |
29,991 | public void setTau ( double tau ) { if ( tau <= 0 || Double . isInfinite ( tau ) || Double . isNaN ( tau ) ) throw new IllegalArgumentException ( "tau must be a positive constant, not " + tau ) ; this . tau = tau ; } | Controls the rate early in time but has a decreasing impact on the rate returned as time goes forward . Larger values of &tau ; dampen the initial rates returned while lower values let the initial rates start higher . |
29,992 | public double regress ( DataPoint dp ) { TreeNodeVisitor node = this ; while ( ! node . isLeaf ( ) ) { int path = node . getPath ( dp ) ; if ( path < 0 ) { double sum = 0 ; double resultSum = 0 ; for ( int child = 0 ; child < childrenCount ( ) ; child ++ ) { if ( node . isPathDisabled ( child ) ) continue ; double child_result = node . getChild ( child ) . regress ( dp ) ; sum += node . getPathWeight ( child ) ; resultSum += node . getPathWeight ( child ) * child_result ; } if ( sum == 0 ) break ; if ( sum < 1.0 - 1e-5 ) resultSum /= ( sum + 1e-6 ) ; return resultSum ; } if ( node . isPathDisabled ( path ) ) break ; node = node . getChild ( path ) ; } return node . localRegress ( dp ) ; } | Performs regression on the given data point by following it down the tree until it finds the correct terminal node . |
29,993 | public final double updateAndGet ( DoubleUnaryOperator updateFunction ) { double prev , next ; do { prev = get ( ) ; next = updateFunction . applyAsDouble ( prev ) ; } while ( ! compareAndSet ( prev , next ) ) ; return next ; } | Atomically updates the current value with the results of applying the given function returning the updated value . The function should be side - effect - free since it may be re - applied when attempted updates fail due to contention among threads . |
29,994 | public final double getAndAccumulate ( double x , DoubleBinaryOperator accumulatorFunction ) { double prev , next ; do { prev = get ( ) ; next = accumulatorFunction . applyAsDouble ( prev , x ) ; } while ( ! compareAndSet ( prev , next ) ) ; return prev ; } | Atomically updates the current value with the results of applying the given function to the current and given values returning the previous value . The function should be side - effect - free since it may be re - applied when attempted updates fail due to contention among threads . The function is applied with the current value as its first argument and the given update as the second argument . |
29,995 | public void applyTo ( List < String > list ) { for ( int i = 0 ; i < list . size ( ) ; i ++ ) list . set ( i , stem ( list . get ( i ) ) ) ; } | Replaces each value in the list with the stemmed version of the word |
29,996 | public void applyTo ( String [ ] arr ) { for ( int i = 0 ; i < arr . length ; i ++ ) arr [ i ] = stem ( arr [ i ] ) ; } | Replaces each value in the array with the stemmed version of the word |
29,997 | private void updateSetsLabeled ( int i1 , final double a1 , final double C ) { final double y_i = label [ i1 ] ; I1 [ i1 ] = a1 == 0 && y_i == 1 ; I2 [ i1 ] = a1 == C && y_i == - 1 ; I3 [ i1 ] = a1 == C && y_i == 1 ; I4 [ i1 ] = a1 == 0 && y_i == - 1 ; } | Updates the index sets |
29,998 | private void updateThreshold ( int i ) { double Fi = fcache [ i ] ; double F_tilde_i = b_low ; if ( I0_b [ i ] || I2 [ i ] ) F_tilde_i = Fi + epsilon ; else if ( I0_a [ i ] || I1 [ i ] ) F_tilde_i = Fi - epsilon ; double F_bar_i = b_up ; if ( I0_a [ i ] || I3 [ i ] ) F_bar_i = Fi - epsilon ; else if ( I0_b [ i ] || I1 [ i ] ) F_bar_i = Fi + epsilon ; if ( b_low < F_tilde_i ) { b_low = F_tilde_i ; i_low = i ; } if ( b_up > F_bar_i ) { b_up = F_bar_i ; i_up = i ; } } | Updates the threshold for regression based off of using only i1 i2 and indices in I_0 |
29,999 | protected double decisionFunction ( int v ) { double sum = 0 ; for ( int i = 0 ; i < vecs . size ( ) ; i ++ ) if ( alphas [ i ] > 0 ) sum += alphas [ i ] * label [ i ] * kEval ( v , i ) ; return sum ; } | Returns the local decision function for classification training purposes without the bias term |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.