idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
29,900
private void batch_insert ( Collection < Integer > set , boolean parallel ) { for ( int i : set ) store [ size ++ ] = i ; if ( parallel ) Arrays . parallelSort ( store , 0 , size ) ; else Arrays . sort ( store , 0 , size ) ; }
more efficient insertion of many items by placing them all into the backing store and then doing one large sort .
62
21
29,901
public void setMinMax ( double min , double max ) { if ( min <= 0 || Double . isNaN ( min ) || Double . isInfinite ( min ) ) throw new IllegalArgumentException ( "min value must be positive, not " + min ) ; else if ( min >= max || Double . isNaN ( max ) || Double . isInfinite ( max ) ) throw new IllegalArgumentException ( "max (" + max + ") must be larger than min (" + min + ")" ) ; this . max = max ; this . min = min ; this . logMax = Math . log ( max ) ; this . logMin = Math . log ( min ) ; this . logDiff = logMax - logMin ; this . diff = max - min ; }
Sets the minimum and maximum values for this distribution
166
10
29,902
public static void write ( ClassificationDataSet data , OutputStream os ) { PrintWriter writer = new PrintWriter ( os ) ; for ( int i = 0 ; i < data . size ( ) ; i ++ ) { int pred = data . getDataPointCategory ( i ) ; Vec vals = data . getDataPoint ( i ) . getNumericalValues ( ) ; writer . write ( pred + " " ) ; for ( IndexValue iv : vals ) { double val = iv . getValue ( ) ; if ( Math . rint ( val ) == val ) //cast to long before writting to save space writer . write ( ( iv . getIndex ( ) + 1 ) + ":" + ( long ) val + " " ) ; //+1 b/c 1 based indexing else writer . write ( ( iv . getIndex ( ) + 1 ) + ":" + val + " " ) ; //+1 b/c 1 based indexing } writer . write ( "\n" ) ; } writer . flush ( ) ; writer . close ( ) ; }
Writes out the given classification data set as a LIBSVM data file
227
15
29,903
protected static double eq24 ( final double beta_i , final double gN , final double gP , final double U ) { //6.2.2 double vi = 0 ; //Used as "other" value if ( beta_i == 0 ) //if beta_i = 0 ... { //if beta_i = 0 and g'n(beta_i) >= 0 if ( gN >= 0 ) vi = gN ; else if ( gP <= 0 ) //if beta_i = 0 and g'p(beta_i) <= 0 vi = - gP ; } else //beta_i is non zero { //Two cases //if beta_i in (−U, 0), or //beta_i = −U and g'n(beta_i) <= 0 //then v_i = |g'n| //if beta_i in (0,U), or //beta_i = U and g'p(βi) >= 0 //then v_i = |g'p| if ( beta_i < 0 ) //first set of cases { if ( beta_i > - U || ( beta_i == - U && gN <= 0 ) ) vi = Math . abs ( gN ) ; } else //second case { if ( beta_i < U || ( beta_i == U && gP >= 0 ) ) vi = Math . abs ( gP ) ; } } return vi ; }
returns the result of evaluation equation 24 of an individual index
307
12
29,904
public int getSplittingAttribute ( ) { //TODO refactor the splittingAttribute to just be in this order already if ( splittingAttribute < catAttributes . length ) //categorical feature return numNumericFeatures + splittingAttribute ; //else, is Numerical attribute int numerAttribute = splittingAttribute - catAttributes . length ; return numerAttribute ; }
Returns the attribute that this stump has decided to use to compute results . Numeric features start from 0 and categorical features start from the number of numeric features .
74
32
29,905
protected double getGain ( ImpurityScore origScore , ClassificationDataSet source , List < IntList > aSplit ) { ImpurityScore [ ] scores = getSplitScores ( source , aSplit ) ; return ImpurityScore . gain ( origScore , scores ) ; }
From the score for the original set that is being split this computes the gain as the improvement in classification from the original split .
58
26
29,906
public int whichPath ( DataPoint data ) { int paths = getNumberOfPaths ( ) ; if ( paths < 0 ) return paths ; //Not trained else if ( paths == 1 ) //ONLY one option, entropy was zero return 0 ; else if ( splittingAttribute < catAttributes . length ) //Same for classification and regression return data . getCategoricalValue ( splittingAttribute ) ; //else, is Numerical attribute - but regression or classification? int numerAttribute = splittingAttribute - catAttributes . length ; double val = data . getNumericalValues ( ) . get ( numerAttribute ) ; if ( Double . isNaN ( val ) ) return - 1 ; //missing if ( results != null ) //Categorical! { int pos = Collections . binarySearch ( boundries , val ) ; pos = pos < 0 ? - pos - 1 : pos ; return owners . get ( pos ) ; } else //Regression! It is trained, it would have been grabed at the top if not { if ( regressionResults . length == 1 ) return 0 ; else if ( val <= regressionResults [ 2 ] ) return 0 ; else return 1 ; } }
Determines which split path this data point would follow from this decision stump . Works for both classification and regression .
245
23
29,907
public CategoricalResults result ( int i ) { if ( i < 0 || i >= getNumberOfPaths ( ) ) throw new IndexOutOfBoundsException ( "Invalid path, can to return a result for path " + i ) ; return results [ i ] ; }
Returns the categorical result of the i th path .
59
11
29,908
public List < ClassificationDataSet > trainC ( ClassificationDataSet dataPoints , Set < Integer > options ) { return trainC ( dataPoints , options , false ) ; }
This is a helper function that does the work of training this stump . It may be called directly by other classes that are creating decision trees to avoid redundant repackaging of lists .
36
36
29,909
static protected < T > void distributMissing ( List < ClassificationDataSet > splits , double [ ] fracs , ClassificationDataSet source , IntList hadMissing ) { for ( int i : hadMissing ) { DataPoint dp = source . getDataPoint ( i ) ; for ( int j = 0 ; j < fracs . length ; j ++ ) { double nw = fracs [ j ] * source . getWeight ( i ) ; if ( Double . isNaN ( nw ) ) //happens when no weight is available continue ; if ( nw <= 1e-13 ) continue ; splits . get ( j ) . addDataPoint ( dp , source . getDataPointCategory ( i ) , nw ) ; } } }
Distributes a list of datapoints that had missing values to each split re - weighted by the indicated fractions
159
23
29,910
public void setMaxTokenLength ( int maxTokenLength ) { if ( maxTokenLength < 1 ) throw new IllegalArgumentException ( "Max token length must be positive, not " + maxTokenLength ) ; if ( maxTokenLength <= minTokenLength ) throw new IllegalArgumentException ( "Max token length must be larger than the min token length" ) ; this . maxTokenLength = maxTokenLength ; }
Sets the maximum allowed length for any token . Any token discovered exceeding the length will not be accepted and skipped over . The default is unbounded .
87
30
29,911
public void setMinTokenLength ( int minTokenLength ) { if ( minTokenLength < 0 ) throw new IllegalArgumentException ( "Minimum token length must be non negative, not " + minTokenLength ) ; if ( minTokenLength > maxTokenLength ) throw new IllegalArgumentException ( "Minimum token length can not exced the maximum token length" ) ; this . minTokenLength = minTokenLength ; }
Sets the minimum allowed token length . Any token discovered shorter than the minimum length will not be accepted and skipped over . The default is 0 .
88
29
29,912
public void addNewKernelPoint ( ) { KernelPoint source = points . get ( 0 ) ; KernelPoint toAdd = new KernelPoint ( k , errorTolerance ) ; toAdd . setMaxBudget ( maxBudget ) ; toAdd . setBudgetStrategy ( budgetStrategy ) ; standardMove ( toAdd , source ) ; toAdd . kernelAccel = source . kernelAccel ; toAdd . vecs = source . vecs ; toAdd . alpha = new DoubleList ( source . alpha . size ( ) ) ; for ( int i = 0 ; i < source . alpha . size ( ) ; i ++ ) toAdd . alpha . ( 0.0 ) ; points . add ( toAdd ) ; }
Adds a new Kernel Point to the internal list this object represents . The new Kernel Point will be equivalent to creating a new KernelPoint directly .
155
28
29,913
private void standardMove ( KernelPoint destination , KernelPoint source ) { destination . InvK = source . InvK ; destination . InvKExpanded = source . InvKExpanded ; destination . K = source . K ; destination . KExpanded = source . KExpanded ; }
Updates the gram matrix storage of the destination to point at the exact same objects as the ones from the source .
59
23
29,914
public List < Vec > getRawBasisVecs ( ) { List < Vec > vecs = new ArrayList < Vec > ( getBasisSize ( ) ) ; vecs . addAll ( this . points . get ( 0 ) . vecs ) ; return vecs ; }
Returns a list of the raw vectors being used by the kernel points . Altering this vectors will alter the same vectors used by these objects and will cause inconsistent results .
61
33
29,915
private void addMissingZeros ( ) { //go back and add 0s for the onces we missed for ( int i = 0 ; i < points . size ( ) ; i ++ ) while ( points . get ( i ) . alpha . size ( ) < this . points . get ( 0 ) . vecs . size ( ) ) points . get ( i ) . alpha . add ( 0.0 ) ; }
Adds zeros to all alpha vecs that are not of the same length as the vec list
88
19
29,916
private void updateAverage ( ) { if ( t == last_t || t < burnIn ) return ; else if ( last_t < burnIn ) //first update since done burning { for ( int i = 0 ; i < alphaAveraged . size ( ) ; i ++ ) alphaAveraged . set ( i , alphas . get ( i ) ) ; } double w = t - last_t ; //time elapsed for ( int i = 0 ; i < alphaAveraged . size ( ) ; i ++ ) { double delta = alphas . getD ( i ) - alphaAveraged . getD ( i ) ; alphaAveraged . set ( i , alphaAveraged . getD ( i ) + delta * w / t ) ; } last_t = t ; //average done }
Updates the average model to reflect the current time average
174
11
29,917
public void setSigma ( double sigma ) { if ( sigma <= 0 || Double . isNaN ( sigma ) || Double . isInfinite ( sigma ) ) throw new IllegalArgumentException ( "Sigma must be a positive constant, not " + sigma ) ; this . sigma = sigma ; this . sigmaSqrd2Inv = 0.5 / ( sigma * sigma ) ; }
Sets the kernel width parameter which must be a positive value . Larger values indicate a larger width
92
20
29,918
public void setMaxScaled ( double maxFeature ) { if ( Double . isNaN ( maxFeature ) ) throw new ArithmeticException ( "NaN is not a valid feature value" ) ; else if ( maxFeature > 1 ) throw new ArithmeticException ( "Maximum possible feature value is 1, can not use " + maxFeature ) ; else if ( maxFeature <= minScaled ) throw new ArithmeticException ( "Maximum feature value must be learger than the minimum" ) ; this . maxScaled = maxFeature ; }
Sets the maximum value of any feature after scaling is applied . This value can be no greater than 1 .
113
22
29,919
public void setMinScaled ( double minFeature ) { if ( Double . isNaN ( minFeature ) ) throw new ArithmeticException ( "NaN is not a valid feature value" ) ; else if ( minFeature < - 1 ) throw new ArithmeticException ( "Minimum possible feature value is -1, can not use " + minFeature ) ; else if ( minFeature >= maxScaled ) throw new ArithmeticException ( "Minimum feature value must be smaller than the maximum" ) ; this . minScaled = minFeature ; }
Sets the minimum value of any feature after scaling is applied . This value can be no smaller than - 1
114
22
29,920
public void setOmega ( double omega ) { if ( omega <= 0 || Double . isNaN ( omega ) || Double . isInfinite ( omega ) ) throw new ArithmeticException ( "omega must be positive, not " + omega ) ; this . omega = omega ; this . cnst = Math . sqrt ( Math . pow ( 2 , 1 / omega ) - 1 ) ; }
Sets the omega parameter value which controls the shape of the kernel
85
13
29,921
public void setSigma ( double sigma ) { if ( sigma <= 0 || Double . isNaN ( sigma ) || Double . isInfinite ( sigma ) ) throw new ArithmeticException ( "sigma must be positive, not " + sigma ) ; this . sigma = sigma ; }
Sets the sigma parameter value which controls the width of the kernel
67
14
29,922
private static Vec getColumn ( Matrix x ) { Vec t ; for ( int i = 0 ; i < x . cols ( ) ; i ++ ) { t = x . getColumn ( i ) ; if ( t . dot ( t ) > 0 ) return t ; } throw new ArithmeticException ( "Matrix is essentially zero" ) ; }
Returns the first non zero column
73
6
29,923
private void doWarmStartIfNotNull ( Object warmSolution ) throws FailedToFitException { if ( warmSolution != null ) { if ( warmSolution instanceof SimpleWeightVectorModel ) { SimpleWeightVectorModel warm = ( SimpleWeightVectorModel ) warmSolution ; if ( warm . numWeightsVecs ( ) != ws . length ) throw new FailedToFitException ( "Warm solution has " + warm . numWeightsVecs ( ) + " weight vectors instead of " + ws . length ) ; for ( int i = 0 ; i < ws . length ; i ++ ) { warm . getRawWeight ( i ) . copyTo ( ws [ i ] ) ; if ( useBiasTerm ) bs [ i ] = warm . getBias ( i ) ; } } else throw new FailedToFitException ( "Can not warm warm from " + warmSolution . getClass ( ) . getCanonicalName ( ) ) ; } }
Performs a warm start if the given object is of the appropriate class . Nothing happens if input it null .
207
22
29,924
public static < T > List < T > mergedView ( final List < T > left , final List < T > right ) { List < T > merged = new AbstractList < T > ( ) { @ Override public T get ( int index ) { if ( index < left . size ( ) ) return left . get ( index ) ; else if ( index - left . size ( ) < right . size ( ) ) return right . get ( index - left . size ( ) ) ; else throw new IndexOutOfBoundsException ( "List of lengt " + size ( ) + " has no index " + index ) ; } @ Override public int size ( ) { return left . size ( ) + right . size ( ) ; } } ; return merged ; }
Returns a new unmodifiable view that is the merging of two lists
162
14
29,925
public static < T > List < T > collectFutures ( Collection < Future < T > > futures ) throws ExecutionException , InterruptedException { ArrayList < T > collected = new ArrayList < T > ( futures . size ( ) ) ; for ( Future < T > future : futures ) collected . ( future . get ( ) ) ; return collected ; }
Collects all future values in a collection into a list and returns said list . This method will block until all future objects are collected .
76
27
29,926
public static IntList range ( int start , int to , int step ) { if ( to < start ) throw new RuntimeException ( "starting index " + start + " must be less than or equal to ending index" + to ) ; else if ( step < 1 ) throw new RuntimeException ( "Step size must be a positive integer, not " + step ) ; IntList toRet = new IntList ( ( to - start ) / step ) ; for ( int i = start ; i < to ; i += step ) toRet . ( i ) ; return toRet ; }
Returns a list of integers with values in the given range
121
11
29,927
protected double invCdfRootFinding ( double p , double tol ) { if ( p < 0 || p > 1 ) throw new ArithmeticException ( "Value of p must be in the range [0,1], not " + p ) ; //two special case checks, as they can cause a failure to get a positive and negative value on the ends, which means we can't do a search for the root //Special case check, p < min value if ( min ( ) >= Integer . MIN_VALUE ) if ( p <= cdf ( min ( ) ) ) return min ( ) ; //special case check, p >= max value if ( max ( ) < Integer . MAX_VALUE ) if ( p > cdf ( max ( ) - 1 ) ) return max ( ) ; //stewpwise nature fo discrete can cause problems for search, so we will use a smoothed cdf to pass in // double toRet= invCdf(p, ); //Lets use an interpolated version of the CDF so that our numerical methods will behave better Function1D cdfInterpolated = ( double x ) -> { double query = x ; //if it happens to fall on an int we just compute the regular value if ( Math . rint ( query ) == query ) return cdf ( ( int ) query ) - p ; //else, interpolate double larger = query + 1 ; double diff = larger - query ; return cdf ( query ) * diff + cdf ( larger ) * ( 1 - diff ) - p ; } ; double a = Double . isInfinite ( min ( ) ) ? Integer . MIN_VALUE * .95 : min ( ) ; double b = Double . isInfinite ( max ( ) ) ? Integer . MAX_VALUE * .95 : max ( ) ; double toRet = Zeroin . root ( tol , a , b , cdfInterpolated ) ; return Math . round ( toRet ) ; }
Helper method that computes the inverse CDF by performing root - finding on the CDF of the function . This provides a convenient default method for any invCdfRootFinding implementation but may not be as fast or accurate as possible .
412
47
29,928
public void setMomentum ( double momentum ) { if ( momentum <= 0 || momentum >= 1 || Double . isNaN ( momentum ) ) throw new IllegalArgumentException ( "Momentum must be in (0,1) not " + momentum ) ; this . momentum = momentum ; }
Sets the momentum for accumulating gradients .
62
9
29,929
public static double logPdf ( double x , double mu , double sigma ) { return - 0.5 * log ( 2 * PI ) - log ( sigma ) + - pow ( x - mu , 2 ) / ( 2 * sigma * sigma ) ; }
Computes the log probability of a given value
58
9
29,930
public void setEta ( double eta ) { if ( Double . isNaN ( eta ) || Double . isInfinite ( eta ) || eta <= 0 ) throw new ArithmeticException ( "convergence parameter must be a positive value" ) ; this . eta = eta ; }
Sets the learning rate used during training
66
8
29,931
public void setEpsilon ( double eps ) { if ( eps < 0 || Double . isInfinite ( eps ) || Double . isNaN ( eps ) ) throw new ArithmeticException ( "Regularization must be a positive value" ) ; this . eps = eps ; }
Sets the regularization to apply the the diagonal of the scatter matrix when creating each new metric .
65
20
29,932
private int threshHoldExtractCluster ( List < Integer > orderedFile , int [ ] designations ) { int clustersFound = 0 ; OnLineStatistics stats = new OnLineStatistics ( ) ; for ( double r : reach_d ) if ( ! Double . isInfinite ( r ) ) stats . add ( r ) ; double thresh = stats . getMean ( ) + stats . getStandardDeviation ( ) ; for ( int i = 0 ; i < orderedFile . size ( ) ; i ++ ) { if ( reach_d [ orderedFile . get ( i ) ] >= thresh ) continue ; //Everything in between is part of the cluster while ( i < orderedFile . size ( ) && reach_d [ orderedFile . get ( i ) ] < thresh ) designations [ i ++ ] = clustersFound ; //Climb up to the top of the hill, everything we climbed over is part of the cluster while ( i + 1 < orderedFile . size ( ) && reach_d [ orderedFile . get ( i ) ] < reach_d [ orderedFile . get ( i + 1 ) ] ) designations [ i ++ ] = clustersFound ; clustersFound ++ ; } return clustersFound ; }
Finds clusters by segmenting the reachability plot witha line that is the mean reachability distance times
259
21
29,933
public void setK ( final int K ) { if ( K < 2 ) throw new IllegalArgumentException ( "At least 2 topics must be learned" ) ; this . K = K ; gammaLocal = new ThreadLocal < Vec > ( ) { @ Override protected Vec initialValue ( ) { return new DenseVector ( K ) ; } } ; logThetaLocal = new ThreadLocal < Vec > ( ) { @ Override protected Vec initialValue ( ) { return new DenseVector ( K ) ; } } ; expLogThetaLocal = new ThreadLocal < Vec > ( ) { @ Override protected Vec initialValue ( ) { return new DenseVector ( K ) ; } } ; lambda = null ; }
Sets the number of topics that LDA will try to learn
152
13
29,934
public void setTau0 ( double tau0 ) { if ( tau0 <= 0 || Double . isInfinite ( tau0 ) || Double . isNaN ( tau0 ) ) throw new IllegalArgumentException ( "Eta must be a positive constant, not " + tau0 ) ; this . tau0 = tau0 ; }
A learning rate constant to control the influence of early iterations on the solution . Larger values reduce the influence of earlier iterations smaller values increase the weight of earlier iterations .
78
33
29,935
public void setKappa ( double kappa ) { if ( kappa < 0.5 || kappa > 1.0 || Double . isNaN ( kappa ) ) throw new IllegalArgumentException ( "Kapp must be in [0.5, 1], not " + kappa ) ; this . kappa = kappa ; }
The forgetfulness factor in the learning rate . Larger values increase the rate at which old information is forgotten
73
21
29,936
public Vec getTopicVec ( int k ) { return new ScaledVector ( 1.0 / lambda . get ( k ) . sum ( ) , lambda . get ( k ) ) ; }
Returns the topic vector for a given topic . The vector should not be altered and is scaled so that the sum of all term weights sums to one .
41
30
29,937
public void model ( DataSet dataSet , int topics , ExecutorService ex ) { if ( ex == null ) ex = new FakeExecutor ( ) ; //Use notation same as original paper setK ( topics ) ; setD ( dataSet . size ( ) ) ; setVocabSize ( dataSet . getNumNumericalVars ( ) ) ; final List < Vec > docs = dataSet . getDataVectors ( ) ; for ( int epoch = 0 ; epoch < epochs ; epoch ++ ) { Collections . shuffle ( docs ) ; for ( int i = 0 ; i < D ; i += miniBatchSize ) { int to = Math . min ( i + miniBatchSize , D ) ; update ( docs . subList ( i , to ) , ex ) ; } } }
Fits the LDA model against the given data set
172
11
29,938
private void prepareGammaTheta ( Vec gamma_i , Vec eLogTheta_i , Vec expLogTheta_i , Random rand ) { final double lambdaInv = ( W * K ) / ( D * 100.0 ) ; for ( int j = 0 ; j < gamma_i . length ( ) ; j ++ ) gamma_i . set ( j , sampleExpoDist ( lambdaInv , rand . nextDouble ( ) ) + eta ) ; expandPsiMinusPsiSum ( gamma_i , gamma_i . sum ( ) , eLogTheta_i ) ; for ( int j = 0 ; j < eLogTheta_i . length ( ) ; j ++ ) expLogTheta_i . set ( j , FastMath . exp ( eLogTheta_i . get ( j ) ) ) ; }
Prepares gamma and the associated theta expectations are initialized so that the iterative updates to them can begin .
183
22
29,939
public void addNode ( N node ) { if ( ! nodes . containsKey ( node ) ) nodes . put ( node , new Pair < HashSet < N > , HashSet < N > > ( new HashSet < N > ( ) , new HashSet < N > ( ) ) ) ; }
Adds a new node to the graph
63
7
29,940
public Set < N > getParents ( N n ) { Pair < HashSet < N > , HashSet < N > > p = nodes . get ( n ) ; if ( p == null ) return null ; return p . getIncoming ( ) ; }
Returns the set of all parents of the requested node or null if the node does not exist in the graph
54
21
29,941
public Set < N > getChildren ( N n ) { Pair < HashSet < N > , HashSet < N > > p = nodes . get ( n ) ; if ( p == null ) return null ; return p . getOutgoing ( ) ; }
Returns the set of all children of the requested node or null if the node does not exist in the graph .
54
22
29,942
public void removeNode ( N node ) { Pair < HashSet < N > , HashSet < N > > p = nodes . remove ( node ) ; if ( p == null ) return ; //Outgoing edges we can ignore removint he node drops them. We need to avoid dangling incoming edges to this node we have removed HashSet < N > incomingNodes = p . getIncoming ( ) ; for ( N incomingNode : incomingNodes ) nodes . get ( incomingNode ) . getOutgoing ( ) . remove ( node ) ; }
Removes the specified node from the graph . If the node was not in the graph not change occurs
115
20
29,943
public void depends ( int parent , int child ) { dag . addNode ( child ) ; dag . addNode ( parent ) ; dag . addEdge ( parent , child ) ; }
Adds a dependency relation ship between two variables that will be in the network . The integer value corresponds the the index of the i th categorical variable where the class target s value is the number of categorical variables .
38
43
29,944
public void setTau ( double tau ) { if ( tau <= 0 || Double . isInfinite ( tau ) || Double . isNaN ( tau ) ) throw new IllegalArgumentException ( "tau must be a positive constant, not " + tau ) ; this . tau = tau ; }
Controls the rate early in time but has a decreasing impact on the rate returned as time goes forward . Larger values of &tau ; dampen the initial rates returned while lower values let the initial rates start higher .
70
45
29,945
public double regress ( DataPoint dp ) { TreeNodeVisitor node = this ; while ( ! node . isLeaf ( ) ) { int path = node . getPath ( dp ) ; if ( path < 0 ) //missing value case { double sum = 0 ; double resultSum = 0 ; for ( int child = 0 ; child < childrenCount ( ) ; child ++ ) { if ( node . isPathDisabled ( child ) ) continue ; double child_result = node . getChild ( child ) . regress ( dp ) ; sum += node . getPathWeight ( child ) ; resultSum += node . getPathWeight ( child ) * child_result ; } if ( sum == 0 ) //all paths disabled? break ; //break out and do local classify if ( sum < 1.0 - 1e-5 ) //re-normalize our result resultSum /= ( sum + 1e-6 ) ; return resultSum ; } if ( node . isPathDisabled ( path ) ) //if missing value makes path < 0, return local regression dec break ; node = node . getChild ( path ) ; } return node . localRegress ( dp ) ; }
Performs regression on the given data point by following it down the tree until it finds the correct terminal node .
251
22
29,946
public final double updateAndGet ( DoubleUnaryOperator updateFunction ) { double prev , next ; do { prev = get ( ) ; next = updateFunction . applyAsDouble ( prev ) ; } while ( ! compareAndSet ( prev , next ) ) ; return next ; }
Atomically updates the current value with the results of applying the given function returning the updated value . The function should be side - effect - free since it may be re - applied when attempted updates fail due to contention among threads .
59
46
29,947
public final double getAndAccumulate ( double x , DoubleBinaryOperator accumulatorFunction ) { double prev , next ; do { prev = get ( ) ; next = accumulatorFunction . applyAsDouble ( prev , x ) ; } while ( ! compareAndSet ( prev , next ) ) ; return prev ; }
Atomically updates the current value with the results of applying the given function to the current and given values returning the previous value . The function should be side - effect - free since it may be re - applied when attempted updates fail due to contention among threads . The function is applied with the current value as its first argument and the given update as the second argument .
68
73
29,948
public void applyTo ( List < String > list ) { for ( int i = 0 ; i < list . size ( ) ; i ++ ) list . set ( i , stem ( list . get ( i ) ) ) ; }
Replaces each value in the list with the stemmed version of the word
48
14
29,949
public void applyTo ( String [ ] arr ) { for ( int i = 0 ; i < arr . length ; i ++ ) arr [ i ] = stem ( arr [ i ] ) ; }
Replaces each value in the array with the stemmed version of the word
41
14
29,950
private void updateSetsLabeled ( int i1 , final double a1 , final double C ) { final double y_i = label [ i1 ] ; I1 [ i1 ] = a1 == 0 && y_i == 1 ; I2 [ i1 ] = a1 == C && y_i == - 1 ; I3 [ i1 ] = a1 == C && y_i == 1 ; I4 [ i1 ] = a1 == 0 && y_i == - 1 ; }
Updates the index sets
109
5
29,951
private void updateThreshold ( int i ) { double Fi = fcache [ i ] ; double F_tilde_i = b_low ; if ( I0_b [ i ] || I2 [ i ] ) F_tilde_i = Fi + epsilon ; else if ( I0_a [ i ] || I1 [ i ] ) F_tilde_i = Fi - epsilon ; double F_bar_i = b_up ; if ( I0_a [ i ] || I3 [ i ] ) F_bar_i = Fi - epsilon ; else if ( I0_b [ i ] || I1 [ i ] ) F_bar_i = Fi + epsilon ; //update the bounds if ( b_low < F_tilde_i ) { b_low = F_tilde_i ; i_low = i ; } if ( b_up > F_bar_i ) { b_up = F_bar_i ; i_up = i ; } }
Updates the threshold for regression based off of using only i1 i2 and indices in I_0
225
21
29,952
protected double decisionFunction ( int v ) { double sum = 0 ; for ( int i = 0 ; i < vecs . size ( ) ; i ++ ) if ( alphas [ i ] > 0 ) sum += alphas [ i ] * label [ i ] * kEval ( v , i ) ; return sum ; }
Returns the local decision function for classification training purposes without the bias term
69
13
29,953
protected double decisionFunctionR ( int v ) { double sum = 0 ; for ( int i = 0 ; i < vecs . size ( ) ; i ++ ) if ( alphas [ i ] != alpha_s [ i ] ) //multipler would be zero sum += ( alphas [ i ] - alpha_s [ i ] ) * kEval ( v , i ) ; return sum ; }
Returns the local decision function for regression training purposes without the bias term
85
13
29,954
public void setEpsilon ( double epsilon ) { if ( Double . isNaN ( epsilon ) || Double . isInfinite ( epsilon ) || epsilon <= 0 ) throw new IllegalArgumentException ( "epsilon must be in (0, infty), not " + epsilon ) ; this . epsilon = epsilon ; }
Sets the epsilon for the epsilon insensitive loss when performing regression . This variable has no impact during classification problems . For regression problems any predicated value that is within the epsilon of the target will be treated as correct . Increasing epsilon usually decreases the number of support vectors but may reduce the accuracy of the model
82
68
29,955
public void setMaxPointError ( double maxPointError ) { if ( maxPointError < 0 || Double . isInfinite ( maxPointError ) || Double . isNaN ( maxPointError ) ) throw new ArithmeticException ( "The error must be a positive value, not " + maxPointError ) ; this . maxPointError = maxPointError ; }
Each data point not in the initial training set will be tested against . If a data points error is sufficiently small it will be added to the set of inliers .
77
34
29,956
protected double P ( DataPoint x ) { /** * F(x) * e * p(x) = --------------- * F(x) - F(x) * e + e */ double fx = F ( x ) ; double efx = Math . exp ( fx ) ; double enfx = Math . exp ( - fx ) ; if ( Double . isInfinite ( efx ) && efx > 0 && enfx < 1e-15 ) //Well classified point could return a Infinity which turns into NaN return 1.0 ; return efx / ( efx + enfx ) ; }
Returns the probability that a given data point belongs to class 1
130
12
29,957
public static double loss ( double pred , double y ) { final double x = - y * pred ; if ( x >= 30 ) //as x -> inf, L(x) -> x. At 30 exp(x) is O(10^13), getting unstable. L(x)-x at this value is O(10^-14), also avoids exp and log ops return x ; else if ( x <= - 30 ) return 0 ; return log ( 1 + exp ( x ) ) ; }
Computes the logistic loss
104
6
29,958
public static double deriv ( double pred , double y ) { final double x = y * pred ; if ( x >= 30 ) return 0 ; else if ( x <= - 30 ) return y ; return - y / ( 1 + exp ( y * pred ) ) ; }
Computes the first derivative of the logistic loss
56
10
29,959
public static double deriv2 ( double pred , double y ) { final double x = y * pred ; if ( x >= 30 ) return 0 ; else if ( x <= - 30 ) return 0 ; final double p = 1 / ( 1 + exp ( y * pred ) ) ; return p * ( 1 - p ) ; }
Computes the second derivative of the logistic loss
68
10
29,960
private TreeNodeVisitor walkCorruptedPath ( TreeLearner model , DataPoint dp , int j , Random rand ) { TreeNodeVisitor curNode = model . getTreeNodeVisitor ( ) ; while ( ! curNode . isLeaf ( ) ) { int path = curNode . getPath ( dp ) ; int numChild = curNode . childrenCount ( ) ; if ( curNode . featuresUsed ( ) . contains ( j ) ) //corrupt the feature! { //this gets us a random OTHER path, wont be the same b/c we would need to wrap around 1 farther path = ( path + rand . nextInt ( numChild ) ) % numChild ; } if ( curNode . isPathDisabled ( path ) ) break ; else curNode = curNode . getChild ( path ) ; } return curNode ; }
walks the tree down to a leaf node adding corruption for a specific feature
181
15
29,961
public void setR ( double r ) { if ( Double . isNaN ( r ) || Double . isInfinite ( r ) || r <= 0 ) throw new IllegalArgumentException ( "r must be a postive constant, not " + r ) ; this . r = r ; }
Sets the r parameter of AROW which controls the regularization . Larger values reduce the change in the model on each update .
62
27
29,962
static public void sampleWithReplacement ( int [ ] sampleCounts , int samples , Random rand ) { Arrays . fill ( sampleCounts , 0 ) ; for ( int i = 0 ; i < samples ; i ++ ) sampleCounts [ rand . nextInt ( sampleCounts . length ) ] ++ ; }
Performs the sampling based on the number of data points storing the counts in an array to be constructed from XXXX
67
23
29,963
public void setTrainingProportion ( double trainingProportion ) { //+- Inf case captured in >1 <= 0 case if ( trainingProportion > 1 || trainingProportion <= 0 || Double . isNaN ( trainingProportion ) ) throw new ArithmeticException ( "Training Proportion is invalid" ) ; this . trainingProportion = trainingProportion ; }
The GB version uses the whole data set at each iteration . SGB can use a fraction of the data set at each iteration in order to reduce overfitting and add randomness .
76
36
29,964
private Function1D getDerivativeFunc ( final RegressionDataSet backingResidsList , final Regressor h ) { final Function1D fhPrime = ( double x ) -> { double c1 = x ; //c2=c1-eps double eps = 1e-5 ; double c1Pc2 = c1 * 2 - eps ; //c1+c2 = c1+c1-eps double result = 0 ; /* * Computing the estimate of the derivative directly, f'(x) approx = f(x)-f(x-eps) * * hEst is the output of the new regressor, target is the true residual target value * * So we have several * (hEst_i c1 - target)^2 - (hEst_i c2 -target)^2 //4 muls, 3 subs * Where c2 = c1-eps * Which simplifies to * (c1 - c2) hEst ((c1 + c2) hEst - 2 target) * = * eps hEst (c1Pc2 hEst - 2 target)//3 muls, 1 sub, 1 shift (mul by 2) * * because eps is on the outside and independent of each * individual summation, we can move it out and do the eps * multiplicatio ont he final result. Reducing us to * * 2 muls, 1 sub, 1 shift (mul by 2) * * per loop * * Which reduce computation, and allows us to get the result * in one pass of the data */ for ( int i = 0 ; i < backingResidsList . size ( ) ; i ++ ) { double hEst = h . regress ( backingResidsList . getDataPoint ( i ) ) ; double target = backingResidsList . getTargetValue ( i ) ; result += hEst * ( c1Pc2 * hEst - 2 * target ) ; } return result * eps ; } ; return fhPrime ; }
Returns a function object that approximates the derivative of the squared error of the Regressor as a function of the constant factor multiplied on the Regressor s output .
432
32
29,965
public static SimpleDataSet loadArffFile ( File file ) { try { return loadArffFile ( new FileReader ( file ) ) ; } catch ( FileNotFoundException ex ) { Logger . getLogger ( ARFFLoader . class . getName ( ) ) . log ( Level . SEVERE , null , ex ) ; return null ; } }
Uses the given file path to load a data set from an ARFF file .
79
17
29,966
private static String nameTrim ( String in ) { in = in . trim ( ) ; if ( in . startsWith ( "'" ) || in . startsWith ( "\"" ) ) in = in . substring ( 1 ) ; if ( in . endsWith ( "'" ) || in . startsWith ( "\"" ) ) in = in . substring ( 0 , in . length ( ) - 1 ) ; return in . trim ( ) ; }
Removes the quotes at the end and front of a string if there are any as well as spaces at the front and end
97
25
29,967
public void setInitialLearningRate ( double initialLearningRate ) { if ( Double . isInfinite ( initialLearningRate ) || Double . isNaN ( initialLearningRate ) || initialLearningRate <= 0 ) throw new ArithmeticException ( "Learning rate must be a positive constant, not " + initialLearningRate ) ; this . initialLearningRate = initialLearningRate ; }
Sets the rate at which input is incorporated at each iteration of the SOM algorithm
77
16
29,968
public void setProb ( int cat , double prob ) { if ( cat > probabilities . length ) throw new IndexOutOfBoundsException ( "There are only " + probabilities . length + " posibilties, " + cat + " is invalid" ) ; else if ( prob < 0 || Double . isInfinite ( prob ) || Double . isNaN ( prob ) ) throw new ArithmeticException ( "Only zero and positive values are valid, not " + prob ) ; probabilities [ cat ] = prob ; }
Sets the probability that a sample belongs to a given category .
110
13
29,969
public int mostLikely ( ) { int top = 0 ; for ( int i = 1 ; i < probabilities . length ; i ++ ) { if ( probabilities [ i ] > probabilities [ top ] ) top = i ; } return top ; }
Returns the category that is the most likely according to the current probability values
51
14
29,970
public void setWeakLearner ( Classifier weakL ) { if ( weakL == null ) throw new NullPointerException ( ) ; this . weakL = weakL ; if ( weakL instanceof Regressor ) this . weakR = ( Regressor ) weakL ; }
Sets the weak learner used for classification . If it also supports regressions that will be set as well .
59
23
29,971
public void setWeakLearner ( Regressor weakR ) { if ( weakR == null ) throw new NullPointerException ( ) ; this . weakR = weakR ; if ( weakR instanceof Classifier ) this . weakL = ( Classifier ) weakR ; }
Sets the weak learner used for regressions . If it also supports classification that will be set as well .
59
23
29,972
public static void hess ( Matrix A , ExecutorService threadpool ) { if ( ! A . isSquare ( ) ) throw new ArithmeticException ( "Only square matrices can be converted to Upper Hessenberg form" ) ; int m = A . rows ( ) ; /** * Space used to store the vector for updating the columns of A */ DenseVector columnUpdateTmp = new DenseVector ( m ) ; double [ ] vk = new double [ m ] ; /** * Space used for updating the sub matrix at step i */ double [ ] subMatrixUpdateTmp = new double [ m ] ; double tmp ; //Used for temp values for ( int i = 0 ; i < m - 2 ; i ++ ) { //Holds the norm, sqrt{a_i^2 + ... + a_m^2} double s = 0.0 ; //First step of the loop done outside to do extra bit double sigh = A . get ( i + 1 , i ) ; //Holds the multiplication factor vk [ i + 1 ] = sigh ; s += sigh * sigh ; sigh = sigh > 0 ? 1 : - 1 ; //Sign dosnt change the squaring, so we do it first for ( int j = i + 2 ; j < m ; j ++ ) { tmp = A . get ( j , i ) ; vk [ j ] = tmp ; s += tmp * tmp ; } double s1 = - sigh * Math . sqrt ( s ) ; //Now re use s to quickly get the norm of vk, since it will be almost the same vector s -= vk [ i + 1 ] * vk [ i + 1 ] ; vk [ i + 1 ] -= s1 ; s += vk [ i + 1 ] * vk [ i + 1 ] ; double s1Inv = 1.0 / Math . sqrt ( s ) ; //Re use to store the norm of vk. Do the inverse to multiply quickly instead of divide for ( int j = i + 1 ; j < m ; j ++ ) vk [ j ] *= s1Inv ; //Update sub sub matrix A[i+1:m, i:m] //NOTE: The first column that will be altered can be done ourslves, since we know the value set (s1) and that all below it will ber zero Matrix subA = new SubMatrix ( A , i + 1 , i , m , m ) ; DenseVector vVec = new DenseVector ( vk , i + 1 , m ) ; Vec tmpV = new DenseVector ( subMatrixUpdateTmp , i , m ) ; tmpV . zeroOut ( ) ; vVec . multiply ( subA , tmpV ) ; if ( threadpool == null ) OuterProductUpdate ( subA , vVec , tmpV , - 2.0 ) ; else OuterProductUpdate ( subA , vVec , tmpV , - 2.0 , threadpool ) ; //Zero out ourselves after. //TODO implement so we dont compute the first row A . set ( i + 1 , i , s1 ) ; for ( int j = i + 2 ; j < m ; j ++ ) A . set ( j , i , 0.0 ) ; //Update the columns of A[0:m, i+1:m] subA = new SubMatrix ( A , 0 , i + 1 , m , m ) ; columnUpdateTmp . zeroOut ( ) ; subA . multiply ( vVec , 1.0 , columnUpdateTmp ) ; if ( threadpool == null ) OuterProductUpdate ( subA , columnUpdateTmp , vVec , - 2.0 ) ; else OuterProductUpdate ( subA , columnUpdateTmp , vVec , - 2.0 , threadpool ) ; } }
Alters the matrix A such that it is in upper Hessenberg form .
818
15
29,973
public static KernelFunction autoKernel ( Vec dataPoints ) { if ( dataPoints . length ( ) < 30 ) return GaussKF . getInstance ( ) ; else if ( dataPoints . length ( ) < 1000 ) return EpanechnikovKF . getInstance ( ) ; else //For very large data sets, Uniform is FAST and just as accurate return UniformKF . getInstance ( ) ; }
Automatically selects a good Kernel function for the data set that balances Execution time and accuracy
88
17
29,974
private double pdf ( double x , int j ) { /* * n * ===== /x - x \ * 1 \ | i| * f(x) = --- > K|------| * n h / \ h / * ===== * i = 1 * */ //Only values within a certain range will have an effect on the result, so we will skip to that range! int from = Arrays . binarySearch ( X , x - h * k . cutOff ( ) ) ; int to = Arrays . binarySearch ( X , x + h * k . cutOff ( ) ) ; //Mostly likely the exact value of x is not in the list, so it returns the inseration points from = from < 0 ? - from - 1 : from ; to = to < 0 ? - to - 1 : to ; //Univariate opt, if uniform weights, the sum is just the number of elements divide by half if ( weights . length == 0 && k instanceof UniformKF ) return ( to - from ) * 0.5 / ( sumOFWeights * h ) ; double sum = 0 ; for ( int i = Math . max ( 0 , from ) ; i < Math . min ( X . length , to + 1 ) ; i ++ ) if ( i != j ) sum += k . k ( ( x - X [ i ] ) / h ) * getWeight ( i ) ; return sum / ( sumOFWeights * h ) ; }
Computes the Leave One Out PDF of the estimator
307
11
29,975
public static double loss ( double pred , double y , double eps ) { final double x = Math . abs ( pred - y ) ; return Math . max ( 0 , x - eps ) ; }
Computes the &epsilon ; - insensitive loss
43
11
29,976
public static double deriv ( double pred , double y , double eps ) { final double x = pred - y ; if ( eps < Math . abs ( x ) ) return Math . signum ( x ) ; else return 0 ; }
Computes the first derivative of the &epsilon ; - insensitive loss
50
15
29,977
public SimpleDataSet generateData ( int samples ) { int totalClasses = 1 ; for ( int d : dimensions ) totalClasses *= ; catDataInfo = new CategoricalData [ ] { new CategoricalData ( totalClasses ) } ; List < DataPoint > dataPoints = new ArrayList < DataPoint > ( totalClasses * samples ) ; int [ ] curClassPointer = new int [ 1 ] ; for ( int i = 0 ; i < dimensions [ 0 ] ; i ++ ) { int [ ] curDim = new int [ dimensions . length ] ; curDim [ 0 ] = i ; addSamples ( curClassPointer , 0 , samples , dataPoints , curDim ) ; } return new SimpleDataSet ( dataPoints ) ; }
Generates a new data set .
164
7
29,978
public void setMinRate ( double min ) { if ( min <= 0 || Double . isNaN ( min ) || Double . isInfinite ( min ) ) throw new RuntimeException ( "minRate should be positive, not " + min ) ; this . min = min ; }
Sets the minimum learning rate to return
59
8
29,979
public static double digamma ( double x ) { if ( x == 0 ) return Double . NaN ; //complex infinity else if ( x < 0 ) //digamma(1-x) == digamma(x)+pi/tan(pi*x), to make x positive { if ( Math . rint ( x ) == x ) return Double . NaN ; //the zeros are complex infinity return digamma ( 1 - x ) - PI / tan ( PI * x ) ; } /* * shift over 2 values to the left and use truncated approximation * log(x+2)-1/(2 (x+2))-1/(12 (x+2)^2) -1/x-1/(x+1), * the x+2 and x and x+1 are grouped sepratly */ double xp2 = x + 2 ; return log ( xp2 ) - ( 6 * x + 13 ) / ( 12 * xp2 * xp2 ) - ( 2 * x + 1 ) / ( x * x + x ) ; }
Computes the digamma function of the input
227
10
29,980
private void fixMergeOrderAndAssign ( double [ ] mergedDistance , IntList merge_kept , IntList merge_removed , int lowK , final int N , int highK , int [ ] designations ) { //Now that we are done clustering, we need to re-order the merges so that the smallest distances are mergered first IndexTable it = new IndexTable ( mergedDistance ) ; it . apply ( merge_kept ) ; it . apply ( merge_removed ) ; it . apply ( mergedDistance ) ; for ( int i = 0 ; i < it . length ( ) ; i ++ ) { merges [ merges . length - i * 2 - 1 ] = merge_removed . get ( i ) ; merges [ merges . length - i * 2 - 2 ] = merge_kept . get ( i ) ; } //Now lets figure out a guess at the cluster size /* * Keep track of the average dist when merging, mark when it becomes abnormaly large as a guess at K */ OnLineStatistics distChange = new OnLineStatistics ( ) ; double maxStndDevs = Double . MIN_VALUE ; /** * How many clusters to return */ int clusterSize = lowK ; for ( int i = 0 ; i < mergedDistance . length ; i ++ ) { //Keep track of the changes in cluster size, and mark if this one was abnormall large distChange . add ( mergedDistance [ i ] ) ; int curK = N - i ; if ( curK >= lowK && curK <= highK ) //In the cluster window? { double stndDevs = ( mergedDistance [ i ] - distChange . getMean ( ) ) / distChange . getStandardDeviation ( ) ; if ( stndDevs > maxStndDevs ) { maxStndDevs = stndDevs ; clusterSize = curK ; } } } PriorityHAC . assignClusterDesignations ( designations , clusterSize , merges ) ; }
After clustering we need to fix up the merge order - since the NNchain only gets the merges correct not their ordering . This also figures out what number of clusters to use
425
37
29,981
public void await ( int ID ) throws InterruptedException { if ( parties == 1 ) //what are you doing?! return ; final boolean startCondition = competitionCondition ; int competingFor = ( locks . length * 2 - 1 - ID ) / 2 ; while ( competingFor >= 0 ) { final Lock node = locks [ competingFor ] ; if ( node . tryLock ( ) ) //we lose, must wait { synchronized ( node ) //ignore warning, its correct. We are using the lock both for competition AND to do an internal wait { while ( competitionCondition == startCondition ) node . wait ( ) ; } node . unlock ( ) ; wakeUpTarget ( competingFor * 2 + 1 ) ; wakeUpTarget ( competingFor * 2 + 2 ) ; return ; } else //we win, comete for another round! { if ( competingFor == 0 ) break ; //we have won the last round! competingFor = ( competingFor - 1 ) / 2 ; } } //We won! Inform the losers competitionCondition = ! competitionCondition ; wakeUpTarget ( 0 ) ; //biggest loser }
Waits for all threads to reach this barrier .
229
10
29,982
public void setBeta ( double beta ) { if ( beta <= 0 || beta >= 1 || Double . isNaN ( beta ) ) throw new IllegalArgumentException ( "shrinkage term must be in (0, 1), not " + beta ) ; this . beta = beta ; }
Sets the shrinkage term used for the line search .
61
12
29,983
public static List < Double > unmodifiableView ( double [ ] array , int length ) { return Collections . unmodifiableList ( view ( array , length ) ) ; }
Creates an returns an unmodifiable view of the given double array that requires only a small object allocation .
37
22
29,984
public static DoubleList view ( double [ ] array , int length ) { if ( length > array . length || length < 0 ) throw new IllegalArgumentException ( "length must be non-negative and no more than the size of the array(" + array . length + "), not " + length ) ; return new DoubleList ( array , length ) ; }
Creates and returns a view of the given double array that requires only a small object allocation . Changes to the list will be reflected in the array up to a point . If the modification would require increasing the capacity of the array a new array will be allocated - at which point operations will no longer be reflected in the original array .
75
66
29,985
private void updateStats ( final List < Double > lambdas , OnLineStatistics [ ] [ ] stats , int indx , double val , double [ ] mins , double weight ) { for ( int k = 0 ; k < lambdas . size ( ) ; k ++ ) stats [ k ] [ indx ] . ( transform ( val , lambdas . get ( k ) , mins [ indx ] ) , weight ) ; }
Updates the online stats for each value of lambda
93
10
29,986
@ Override public void increment ( int index , double val ) { int baseIndex = getBaseIndex ( index ) ; vecs [ baseIndex ] . increment ( index - lengthSums [ baseIndex ] , val ) ; }
The following are implemented only for performance reasons
48
8
29,987
public void setC ( double c ) { if ( c <= 0 || Double . isNaN ( c ) || Double . isInfinite ( c ) ) throw new IllegalArgumentException ( "coefficient must be in (0, Inf), not " + c ) ; this . c = c ; }
Sets the positive additive coefficient
64
6
29,988
public static void addDiag ( Matrix A , int start , int to , double c ) { for ( int i = start ; i < to ; i ++ ) A . increment ( i , i , ) ; }
Updates the values along the main diagonal of the matrix by adding a constant to them
45
17
29,989
public static void fillRow ( Matrix A , int i , int from , int to , double val ) { for ( int j = from ; j < to ; j ++ ) A . set ( i , j , val ) ; }
Fills the values in a row of the matrix
48
10
29,990
private void indexArrayStore ( int e , int i ) { if ( valueIndexStore . length < e ) { int oldLength = valueIndexStore . length ; valueIndexStore = Arrays . copyOf ( valueIndexStore , e + 2 ) ; Arrays . fill ( valueIndexStore , oldLength , valueIndexStore . length , - 1 ) ; } valueIndexStore [ e ] = i ; }
Sets the given index to use the specific value
86
10
29,991
private void heapifyUp ( int i ) { int iP = parent ( i ) ; while ( i != 0 && cmp ( i , iP ) < 0 ) //Should not be greater then our parent { swapHeapValues ( iP , i ) ; i = iP ; iP = parent ( i ) ; } }
Heapify up from the given index in the heap and make sure everything is correct . Stops when the child value is in correct order with its parent .
66
32
29,992
private void swapHeapValues ( int i , int j ) { if ( fastValueRemove == Mode . HASH ) { valueIndexMap . put ( heap [ i ] , j ) ; valueIndexMap . put ( heap [ j ] , i ) ; } else if ( fastValueRemove == Mode . BOUNDED ) { //Already in the array, so just need to set valueIndexStore [ heap [ i ] ] = j ; valueIndexStore [ heap [ j ] ] = i ; } int tmp = heap [ i ] ; heap [ i ] = heap [ j ] ; heap [ j ] = tmp ; }
Swaps the values stored in the heap for the given indices
131
12
29,993
protected int removeHeapNode ( int i ) { int val = heap [ i ] ; int rightMost = -- size ; heap [ i ] = heap [ rightMost ] ; heap [ rightMost ] = 0 ; if ( fastValueRemove == Mode . HASH ) { valueIndexMap . remove ( val ) ; if ( size != 0 ) valueIndexMap . put ( heap [ i ] , i ) ; } else if ( fastValueRemove == Mode . BOUNDED ) { valueIndexStore [ val ] = - 1 ; } heapDown ( i ) ; return val ; }
Removes the node specified from the heap
122
8
29,994
public void setMaxNorm ( double maxNorm ) { if ( Double . isNaN ( maxNorm ) || Double . isInfinite ( maxNorm ) || maxNorm <= 0 ) throw new IllegalArgumentException ( "The maximum norm must be a positive constant, not " + maxNorm ) ; this . maxNorm = maxNorm ; }
Sets the maximum allowed 2 norm for a single neuron s weights
71
13
29,995
public static double loss ( double pred , double y , double c ) { final double x = y - pred ; if ( Math . abs ( x ) <= c ) return x * x * 0.5 ; else return c * ( Math . abs ( x ) - c / 2 ) ; }
Computes the HuberLoss loss
61
8
29,996
public static double deriv ( double pred , double y , double c ) { double x = pred - y ; if ( Math . abs ( x ) <= c ) return x ; else return c * Math . signum ( x ) ; }
Computes the first derivative of the HuberLoss loss
49
12
29,997
public Vec solve ( Vec b ) { //Solve A x = L L^T x = b, for x //First solve L y = b Vec y = forwardSub ( L , b ) ; //Sole L^T x = y Vec x = backSub ( L , y ) ; return x ; }
Solves the linear system of equations A x = b
66
11
29,998
public Matrix solve ( Matrix B ) { //Solve A x = L L^T x = b, for x //First solve L y = b Matrix y = forwardSub ( L , B ) ; //Sole L^T x = y Matrix x = backSub ( L , y ) ; return x ; }
Solves the linear system of equations A x = B
66
11
29,999
public double getDet ( ) { double det = 1 ; for ( int i = 0 ; i < L . rows ( ) ; i ++ ) det *= L . get ( i , i ) ; return det ; }
Computes the determinant of A
46
7