idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
28,800
static public DHistogram [ ] initialHist ( Frame fr , int ncols , int nbins , DHistogram hs [ ] , int min_rows , boolean doGrpSplit , boolean isBinom ) { Vec vecs [ ] = fr . vecs ( ) ; for ( int c = 0 ; c < ncols ; c ++ ) { Vec v = vecs [ c ] ; final float minIn = ( float ) Math . max ( v . min ( ) , - Float . MAX_VALUE ) ; // inclusive vector min final float maxIn = ( float ) Math . min ( v . max ( ) , Float . MAX_VALUE ) ; // inclusive vector max final float maxEx = find_maxEx ( maxIn , v . isInt ( ) ? 1 : 0 ) ; // smallest exclusive max final long vlen = v . length ( ) ; hs [ c ] = v . naCnt ( ) == vlen || v . min ( ) == v . max ( ) ? null : make ( fr . _names [ c ] , nbins , ( byte ) ( v . isEnum ( ) ? 2 : ( v . isInt ( ) ? 1 : 0 ) ) , minIn , maxEx , vlen , min_rows , doGrpSplit , isBinom ) ; } return hs ; }
The initial histogram bins are setup from the Vec rollups .
290
13
28,801
public boolean isConstantResponse ( ) { double m = Double . NaN ; for ( int b = 0 ; b < _bins . length ; b ++ ) { if ( _bins [ b ] == 0 ) continue ; if ( var ( b ) > 1e-14 ) return false ; double mean = mean ( b ) ; if ( mean != m ) if ( Double . isNaN ( m ) ) m = mean ; else if ( Math . abs ( m - mean ) > 1e-6 ) return false ; } return true ; }
Check for a constant response variable
118
6
28,802
static void lockCloud ( ) { if ( _cloudLocked ) return ; // Fast-path cutout synchronized ( Paxos . class ) { while ( ! _commonKnowledge ) try { Paxos . class . wait ( ) ; } catch ( InterruptedException ie ) { } _cloudLocked = true ; } }
change cloud shape - the distributed writes will be in the wrong place .
68
14
28,803
protected Vec [ ] [ ] makeTemplates ( ) { Vec anyVec = dataset . anyVec ( ) ; final long [ ] [ ] espcPerSplit = computeEspcPerSplit ( anyVec . _espc , anyVec . length ( ) ) ; final int num = dataset . numCols ( ) ; // number of columns in input frame final int nsplits = espcPerSplit . length ; // number of splits final String [ ] [ ] domains = dataset . domains ( ) ; // domains final boolean [ ] uuids = dataset . uuids ( ) ; final byte [ ] times = dataset . times ( ) ; Vec [ ] [ ] t = new Vec [ nsplits ] [ /*num*/ ] ; // resulting vectors for all for ( int i = 0 ; i < nsplits ; i ++ ) { // vectors for j-th split t [ i ] = new Vec ( Vec . newKey ( ) , espcPerSplit [ i /*-th split*/ ] ) . makeZeros ( num , domains , uuids , times ) ; } return t ; }
Create a templates for vector composing output frame
238
8
28,804
public static PSetupGuess guessSetup ( byte [ ] bits ) { InputStream is = new ByteArrayInputStream ( bits ) ; XlsParser p = new XlsParser ( ) ; CustomInspectDataOut dout = new CustomInspectDataOut ( ) ; try { p . streamParse ( is , dout ) ; } catch ( Exception e ) { } return new PSetupGuess ( new ParserSetup ( ParserType . XLS , CsvParser . AUTO_SEP , dout . _ncols , dout . _header , dout . _header ? dout . data ( ) [ 0 ] : null , false ) , dout . _nlines , dout . _invalidLines , dout . data ( ) , dout . _nlines > dout . _invalidLines , null ) ; }
Try to parse the bits as svm light format return SVMParser instance if the input is in svm light format null otherwise .
188
27
28,805
public static Frame mmul ( Frame x , Frame y ) { MatrixMulJob mmj = new MatrixMulJob ( Key . make ( "mmul" + ++ cnt ) , Key . make ( "mmulProgress" ) , x , y ) ; mmj . fork ( ) . _fjtask . join ( ) ; DKV . remove ( mmj . _dstKey ) ; // do not leave garbage in KV mmj . _z . reloadVecs ( ) ; return mmj . _z ; }
to be invoked from R expression
115
6
28,806
public T invokeOnAllNodes ( ) { H2O cloud = H2O . CLOUD ; Key [ ] args = new Key [ cloud . size ( ) ] ; String skey = "RunOnAll" + Key . rand ( ) ; for ( int i = 0 ; i < args . length ; ++ i ) args [ i ] = Key . make ( skey , ( byte ) 0 , Key . DFJ_INTERNAL_USER , cloud . _memary [ i ] ) ; invoke ( args ) ; for ( Key arg : args ) DKV . remove ( arg ) ; return self ( ) ; }
Invokes the task on all nodes
134
7
28,807
@ Override public boolean block ( ) throws InterruptedException { while ( ! isDone ( ) ) { try { get ( ) ; } catch ( ExecutionException eex ) { // skip the execution part Throwable tex = eex . getCause ( ) ; if ( tex instanceof Error ) throw ( Error ) tex ; if ( tex instanceof DistributedException ) throw ( DistributedException ) tex ; if ( tex instanceof JobCancelledException ) throw ( JobCancelledException ) tex ; throw new RuntimeException ( tex ) ; } catch ( CancellationException cex ) { Log . errRTExcept ( cex ) ; } } return true ; }
deadlock is otherwise all threads would block on waits .
140
11
28,808
private final void dcompute ( ) { // Work to do the distribution // Split out the keys into disjointly-homed sets of keys. // Find the split point. First find the range of home-indices. H2O cloud = H2O . CLOUD ; int lo = cloud . _memary . length , hi = - 1 ; for ( Key k : _keys ) { int i = k . home ( cloud ) ; if ( i < lo ) lo = i ; if ( i > hi ) hi = i ; // lo <= home(keys) <= hi } // Classic fork/join, but on CPUs. // Split into 3 arrays of keys: lo keys, hi keys and self keys final ArrayList < Key > locals = new ArrayList < Key > ( ) ; final ArrayList < Key > lokeys = new ArrayList < Key > ( ) ; final ArrayList < Key > hikeys = new ArrayList < Key > ( ) ; int self_idx = cloud . nidx ( H2O . SELF ) ; int mid = ( lo + hi ) >>> 1 ; // Mid-point for ( Key k : _keys ) { int idx = k . home ( cloud ) ; if ( idx == self_idx ) locals . add ( k ) ; else if ( idx < mid ) lokeys . add ( k ) ; else hikeys . add ( k ) ; } // Launch off 2 tasks for the other sets of keys, and get a place-holder // for results to block on. _lo = remote_compute ( lokeys ) ; _hi = remote_compute ( hikeys ) ; // Setup for local recursion: just use the local keys. if ( locals . size ( ) != 0 ) { // Shortcut for no local work _local = clone ( ) ; // 'this' is completer for '_local', so awaits _local completion _local . _is_local = true ; _local . _keys = locals . toArray ( new Key [ locals . size ( ) ] ) ; // Keys, including local keys (if any) _local . init ( ) ; // One-time top-level init H2O . submitTask ( _local ) ; // Begin normal execution on a FJ thread } else { tryComplete ( ) ; // No local work, so just immediate tryComplete } }
Override to specify local work
507
5
28,809
private final void donCompletion ( CountedCompleter caller ) { // Distributed completion assert _lo == null || _lo . isDone ( ) ; assert _hi == null || _hi . isDone ( ) ; // Fold up results from left & right subtrees if ( _lo != null ) reduce2 ( _lo . get ( ) ) ; if ( _hi != null ) reduce2 ( _hi . get ( ) ) ; if ( _local != null ) reduce2 ( _local ) ; // Note: in theory (valid semantics) we could push these "over the wire" // and block for them as we're blocking for the top-level initial split. // However, that would require sending "isDone" flags over the wire also. // MUCH simpler to just block for them all now, and send over the empty set // of not-yet-blocked things. if ( _local != null && _local . _fs != null ) _local . _fs . blockForPending ( ) ; // Block on all other pending tasks, also _keys = null ; // Do not return _keys over wire if ( _top_level ) postGlobal ( ) ; }
Override for local completion
249
4
28,810
protected JsonObject argumentsToJson ( ) { JsonObject result = new JsonObject ( ) ; for ( Argument a : _arguments ) { if ( a . specified ( ) ) result . addProperty ( a . _name , a . originalValue ( ) ) ; } return result ; }
Returns a json object containing all arguments specified to the page .
64
12
28,811
@ Override protected void init ( ) { super . init ( ) ; assert 0 <= ntrees && ntrees < 1000000 ; // Sanity check // Not enough rows to run if ( source . numRows ( ) - response . naCnt ( ) <= 0 ) throw new IllegalArgumentException ( "Dataset contains too many NAs!" ) ; if ( ! classification && ( ! ( response . isEnum ( ) || response . isInt ( ) ) ) ) throw new IllegalArgumentException ( "Classification cannot be performed on a float column!" ) ; if ( classification ) { if ( 0.0f > sample_rate || sample_rate > 1.0f ) throw new IllegalArgumentException ( "Sampling rate must be in [0,1] but found " + sample_rate ) ; } if ( regression ) throw new IllegalArgumentException ( "SpeeDRF does not currently support regression." ) ; }
Put here all precondition verification
202
7
28,812
public static void build ( final Key jobKey , final Key modelKey , final DRFParams drfParams , final Data localData , int ntrees , int numSplitFeatures , int [ ] rowsPerChunks ) { Timer t_alltrees = new Timer ( ) ; Tree [ ] trees = new Tree [ ntrees ] ; Log . info ( Log . Tag . Sys . RANDF , "Building " + ntrees + " trees" ) ; Log . info ( Log . Tag . Sys . RANDF , "Number of split features: " + numSplitFeatures ) ; Log . info ( Log . Tag . Sys . RANDF , "Starting RF computation with " + localData . rows ( ) + " rows " ) ; Random rnd = Utils . getRNG ( localData . seed ( ) + ROOT_SEED_ADD ) ; Sampling sampler = createSampler ( drfParams , rowsPerChunks ) ; byte producerId = ( byte ) H2O . SELF . index ( ) ; for ( int i = 0 ; i < ntrees ; ++ i ) { long treeSeed = rnd . nextLong ( ) + TREE_SEED_INIT ; // make sure that enough bits is initialized trees [ i ] = new Tree ( jobKey , modelKey , localData , producerId , drfParams . max_depth , drfParams . stat_type , numSplitFeatures , treeSeed , i , drfParams . _exclusiveSplitLimit , sampler , drfParams . _verbose , drfParams . regression , ! drfParams . _useNonLocalData , ( ( SpeeDRFModel ) UKV . get ( modelKey ) ) . score_pojo ) ; } Log . info ( "Invoking the tree build tasks on all nodes." ) ; DRemoteTask . invokeAll ( trees ) ; Log . info ( Log . Tag . Sys . RANDF , "All trees (" + ntrees + ") done in " + t_alltrees ) ; }
Build random forest for data stored on this node .
453
10
28,813
static void listJobs ( ) throws Exception { HttpClient client = new HttpClient ( ) ; GetMethod get = new GetMethod ( URL + "/Jobs.json" ) ; int status = client . executeMethod ( get ) ; if ( status != 200 ) throw new Exception ( get . getStatusText ( ) ) ; Gson gson = new Gson ( ) ; JobsRes res = gson . fromJson ( new InputStreamReader ( get . getResponseBodyAsStream ( ) ) , JobsRes . class ) ; System . out . println ( "Running jobs:" ) ; for ( Job job : res . jobs ) System . out . println ( job . description + " " + job . destination_key ) ; get . releaseConnection ( ) ; }
Lists jobs currently running .
163
6
28,814
static void exportModel ( ) throws Exception { HttpClient client = new HttpClient ( ) ; GetMethod get = new GetMethod ( URL + "/2/ExportModel.json?model=MyInitialNeuralNet" ) ; int status = client . executeMethod ( get ) ; if ( status != 200 ) throw new Exception ( get . getStatusText ( ) ) ; JsonObject response = ( JsonObject ) new JsonParser ( ) . parse ( new InputStreamReader ( get . getResponseBodyAsStream ( ) ) ) ; JsonElement model = response . get ( "model" ) ; JsonWriter writer = new JsonWriter ( new FileWriter ( JSON_FILE ) ) ; writer . setLenient ( true ) ; writer . setIndent ( " " ) ; Streams . write ( model , writer ) ; writer . close ( ) ; get . releaseConnection ( ) ; }
Exports a model to a JSON file .
192
9
28,815
public static void importModel ( ) throws Exception { // Upload file to H2O HttpClient client = new HttpClient ( ) ; PostMethod post = new PostMethod ( URL + "/Upload.json?key=" + JSON_FILE . getName ( ) ) ; Part [ ] parts = { new FilePart ( JSON_FILE . getName ( ) , JSON_FILE ) } ; post . setRequestEntity ( new MultipartRequestEntity ( parts , post . getParams ( ) ) ) ; if ( 200 != client . executeMethod ( post ) ) throw new RuntimeException ( "Request failed: " + post . getStatusLine ( ) ) ; post . releaseConnection ( ) ; // Parse the key into a model GetMethod get = new GetMethod ( URL + "/2/ImportModel.json?" // + "destination_key=MyImportedNeuralNet&" // + "type=NeuralNetModel&" // + "json=" + JSON_FILE . getName ( ) ) ; if ( 200 != client . executeMethod ( get ) ) throw new RuntimeException ( "Request failed: " + get . getStatusLine ( ) ) ; get . releaseConnection ( ) ; }
Imports a model from a JSON file .
255
9
28,816
private void checkAndLimitFeatureUsedPerSplit ( Frame fr ) { int validCols = fr . numCols ( ) - 1 ; // for classIdx column if ( validCols < _rfParams . num_split_features ) { Log . info ( Log . Tag . Sys . RANDF , "Limiting features from " + _rfParams . num_split_features + " to " + validCols + " because there are no more valid columns in the dataset" ) ; _rfParams . num_split_features = validCols ; } }
Check that we have proper number of valid columns vs . features selected if not cap
124
16
28,817
private long getChunkId ( final Frame fr ) { Key [ ] keys = new Key [ fr . anyVec ( ) . nChunks ( ) ] ; for ( int i = 0 ; i < fr . anyVec ( ) . nChunks ( ) ; ++ i ) { keys [ i ] = fr . anyVec ( ) . chunkKey ( i ) ; } for ( int i = 0 ; i < keys . length ; ++ i ) { if ( keys [ i ] . home ( ) ) return i ; } return - 99999 ; //throw new Error("No key on this node"); }
Return chunk index of the first chunk on this node . Used to identify the trees built here .
131
19
28,818
public static String JSON2HTML ( String name ) { if ( name . length ( ) < 1 ) return name ; if ( name == "row" ) { return name . substring ( 0 , 1 ) . toUpperCase ( ) + name . replace ( "_" , " " ) . substring ( 1 ) ; } return name . substring ( 0 , 1 ) + name . replace ( "_" , " " ) . substring ( 1 ) ; }
Returns the name of the JSON property pretty printed . That is spaces instead of underscores and capital first letter .
98
21
28,819
public static PSetupGuess guessSetup ( byte [ ] bytes ) { // find the last eof int i = bytes . length - 1 ; while ( i > 0 && bytes [ i ] != ' ' ) -- i ; assert i >= 0 ; InputStream is = new ByteArrayInputStream ( Arrays . copyOf ( bytes , i ) ) ; SVMLightParser p = new SVMLightParser ( new ParserSetup ( ParserType . SVMLight , CsvParser . AUTO_SEP , false ) ) ; InspectDataOut dout = new InspectDataOut ( ) ; try { p . streamParse ( is , dout ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } return new PSetupGuess ( new ParserSetup ( ParserType . SVMLight , CsvParser . AUTO_SEP , dout . _ncols , false , null , false ) , dout . _nlines , dout . _invalidLines , dout . data ( ) , dout . _ncols > 0 && dout . _nlines > 0 && dout . _nlines > dout . _invalidLines , dout . errors ( ) ) ; }
Try to parse the bytes as svm light format return SVMParser instance if the input is in svm light format null otherwise .
270
27
28,820
public static Unsafe getUnsafe ( ) { // Not on bootclasspath if ( UtilUnsafe . class . getClassLoader ( ) == null ) return Unsafe . getUnsafe ( ) ; try { final Field fld = Unsafe . class . getDeclaredField ( "theUnsafe" ) ; fld . setAccessible ( true ) ; return ( Unsafe ) fld . get ( UtilUnsafe . class ) ; } catch ( Exception e ) { throw new RuntimeException ( "Could not obtain access to sun.misc.Unsafe" , e ) ; } }
Fetch the Unsafe . Use With Caution .
126
11
28,821
protected void bprop ( float target ) { assert ( target != missing_real_value ) ; if ( params . loss != Loss . MeanSquare ) throw new UnsupportedOperationException ( "Regression is only implemented for MeanSquare error." ) ; final int row = 0 ; // Computing partial derivative: dE/dnet = dE/dy * dy/dnet = dE/dy * 1 final float g = target - _a . get ( row ) ; //for MSE -dMSE/dy = target-y float m = momentum ( ) ; float r = _minfo . adaDelta ( ) ? 0 : rate ( _minfo . get_processed_total ( ) ) * ( 1f - m ) ; bprop ( row , g , r , m ) ; }
Backpropagation for regression
170
6
28,822
public double score_interpreter ( final HashMap < String , Comparable > row ) { double score = _initialScore ; for ( int i = 0 ; i < _rules . length ; i ++ ) score += _rules [ i ] . score ( row . get ( _colNames [ i ] ) ) ; return score ; }
Use the rule interpreter
71
4
28,823
public static String getName ( String pname , DataTypes type , StringBuilder sb ) { String jname = xml2jname ( pname ) ; // Emit the code to do the load return jname ; }
to emit it at runtime .
47
6
28,824
private boolean set_cache ( long cache ) { while ( true ) { // Spin till get it long old = _cache ; // Read once at the start if ( ! H2O . larger ( cloud ( cache ) , cloud ( old ) ) ) // Rolling backwards? // Attempt to set for an older Cloud. Blow out with a failure; caller // should retry on a new Cloud. return false ; assert cloud ( cache ) != cloud ( old ) || cache == old ; if ( old == cache ) return true ; // Fast-path cutout if ( _cacheUpdater . compareAndSet ( this , old , cache ) ) return true ; // Can fail if the cache is really old, and just got updated to a version // which is still not the latest, and we are trying to update it again. } }
Update the cache but only to strictly newer Clouds
172
9
28,825
public long cloud_info ( H2O cloud ) { long x = _cache ; // See if cached for this Cloud. This should be the 99% fast case. if ( cloud ( x ) == cloud . _idx ) return x ; // Cache missed! Probaby it just needs (atomic) updating. // But we might be holding the stale cloud... // Figure out home Node in this Cloud char home = ( char ) D ( 0 ) ; // Figure out what replica # I am, if any int desired = desired ( x ) ; int replica = - 1 ; for ( int i = 0 ; i < desired ; i ++ ) { int idx = D ( i ) ; if ( idx >= 0 && cloud . _memary [ idx ] == H2O . SELF ) { replica = i ; break ; } } long cache = build_cache ( cloud . _idx , home , replica , desired ) ; set_cache ( cache ) ; // Attempt to upgrade cache, but ignore failure return cache ; // Return the magic word for this Cloud }
Return the info word for this Cloud . Use the cache if possible
224
13
28,826
static public Key make ( byte [ ] kb , byte rf ) { if ( rf == - 1 ) throw new IllegalArgumentException ( ) ; Key key = new Key ( kb ) ; Key key2 = H2O . getk ( key ) ; // Get the interned version, if any if ( key2 != null ) // There is one! Return it instead return key2 ; // Set the cache with desired replication factor, and a fake cloud index H2O cloud = H2O . CLOUD ; // Read once key . _cache = build_cache ( cloud . _idx - 1 , 0 , 0 , rf ) ; key . cloud_info ( cloud ) ; // Now compute & cache the real data return key ; }
Make new Keys . Optimistically attempt interning but no guarantee .
159
13
28,827
static public String rand ( ) { UUID uid = UUID . randomUUID ( ) ; long l1 = uid . getLeastSignificantBits ( ) ; long l2 = uid . getMostSignificantBits ( ) ; return "_" + Long . toHexString ( l1 ) + Long . toHexString ( l2 ) ; }
A random string useful as a Key name or partial Key suffix .
81
13
28,828
static public Key make ( String s , byte rf , byte systemType , H2ONode ... replicas ) { return make ( decodeKeyName ( s ) , rf , systemType , replicas ) ; }
If the addresses are not specified returns a key with no home information .
46
14
28,829
static public Key make ( byte [ ] kb , byte rf , byte systemType , H2ONode ... replicas ) { // no more than 3 replicas allowed to be stored in the key assert 0 <= replicas . length && replicas . length <= 3 ; assert systemType < 32 ; // only system keys allowed // Key byte layout is: // 0 - systemType, from 0-31 // 1 - replica-count, plus up to 3 bits for ip4 vs ip6 // 2-n - zero, one, two or 3 IP4 (4+2 bytes) or IP6 (16+2 bytes) addresses // 2-5- 4 bytes of chunk#, or -1 for masters // n+ - repeat of the original kb AutoBuffer ab = new AutoBuffer ( ) ; ab . put1 ( systemType ) . put1 ( replicas . length ) ; for ( H2ONode h2o : replicas ) h2o . write ( ) ; ab . put4 ( - 1 ) ; ab . putA1 ( kb , kb . length ) ; return make ( Arrays . copyOf ( ab . buf ( ) , ab . position ( ) ) , rf ) ; }
Make a Key which is homed to specific nodes .
254
11
28,830
final public static Key makeSystem ( String s ) { byte [ ] kb = decodeKeyName ( s ) ; byte [ ] kb2 = new byte [ kb . length + 1 ] ; System . arraycopy ( kb , 0 , kb2 , 1 , kb . length ) ; kb2 [ 0 ] = Key . BUILT_IN_KEY ; return Key . make ( kb2 ) ; }
Hide a user key by turning it into a system key of type HIDDEN_USER_KEY
84
20
28,831
protected void decorateActiveStep ( final TutorStep step , StringBuilder sb ) { sb . append ( "<h4>" ) . append ( step . summary ( ) ) . append ( "</h4>" ) ; sb . append ( step . content ( ) ) ; }
Shows the active workflow step
61
6
28,832
@ Override public void reduce ( JStackCollectorTask that ) { if ( _result == null ) _result = that . _result ; else for ( int i = 0 ; i < _result . length ; ++ i ) if ( _result [ i ] == null ) _result [ i ] = that . _result [ i ] ; }
for each node in the cloud it contains all threads stack traces
73
12
28,833
public float [ ] predict ( Map < String , Double > row , double data [ ] , float preds [ ] ) { return predict ( map ( row , data ) , preds ) ; }
Does the mapping lookup for every row no allocation
41
9
28,834
private void emitLogHeader ( Context context , String mapredTaskId ) throws IOException , InterruptedException { Configuration conf = context . getConfiguration ( ) ; Text textId = new Text ( mapredTaskId ) ; for ( Map . Entry < String , String > entry : conf ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( entry . getKey ( ) ) ; sb . append ( "=" ) ; sb . append ( entry . getValue ( ) ) ; context . write ( textId , new Text ( sb . toString ( ) ) ) ; } context . write ( textId , new Text ( "----- Properties -----" ) ) ; String [ ] plist = { "mapred.local.dir" , "mapred.child.java.opts" , } ; for ( String k : plist ) { String v = conf . get ( k ) ; if ( v == null ) { v = "(null)" ; } context . write ( textId , new Text ( k + " " + v ) ) ; } String userDir = System . getProperty ( "user.dir" ) ; context . write ( textId , new Text ( "user.dir " + userDir ) ) ; try { java . net . InetAddress localMachine = java . net . InetAddress . getLocalHost ( ) ; context . write ( textId , new Text ( "hostname " + localMachine . getHostName ( ) ) ) ; } catch ( java . net . UnknownHostException uhe ) { // handle exception } }
Emit a bunch of logging output at the beginning of the map task .
337
15
28,835
@ Override protected Response serve ( ) { if ( src_key == null ) return RequestServer . _http404 . serve ( ) ; Vec v = src_key . anyVec ( ) ; if ( v . isEnum ( ) ) { map = Arrays . asList ( v . domain ( ) ) . indexOf ( str ) ; } else if ( v . masterVec ( ) != null && v . masterVec ( ) . isEnum ( ) ) { map = Arrays . asList ( v . masterVec ( ) . domain ( ) ) . indexOf ( str ) ; } else { map = - 1 ; } return Response . done ( this ) ; }
Just validate the frame and fill in the summary bits
148
10
28,836
static public Value DputIfMatch ( Key key , Value val , Value old , Futures fs ) { return DputIfMatch ( key , val , old , fs , false ) ; }
to consume .
40
3
28,837
static public void write_barrier ( ) { for ( H2ONode h2o : H2O . CLOUD . _memary ) for ( RPC rpc : h2o . tasks ( ) ) if ( rpc . _dt instanceof TaskPutKey || rpc . _dt instanceof Atomic ) rpc . get ( ) ; }
Used to order successive writes .
76
6
28,838
static public Value get ( Key key , int len , int priority ) { while ( true ) { // Read the Cloud once per put-attempt, to keep a consistent snapshot. H2O cloud = H2O . CLOUD ; Value val = H2O . get ( key ) ; // Hit in local cache? if ( val != null ) { if ( len > val . _max ) len = val . _max ; // See if we have enough data cached locally if ( len == 0 || val . rawMem ( ) != null || val . rawPOJO ( ) != null || val . isPersisted ( ) ) return val ; assert ! key . home ( ) ; // Master must have *something*; we got nothing & need to fetch } // While in theory we could read from any replica, we always need to // inform the home-node that his copy has been Shared... in case it // changes and he needs to issue an invalidate. For now, always and only // fetch from the Home node. H2ONode home = cloud . _memary [ key . home ( cloud ) ] ; // If we missed in the cache AND we are the home node, then there is // no V for this K (or we have a disk failure). if ( home == H2O . SELF ) return null ; // Pending write to same key from this node? Take that write instead. // Moral equivalent of "peeking into the cpu store buffer". Can happen, // e.g., because a prior 'put' of a null (i.e. a remove) is still mid- // send to the remote, so the local get has missed above, but a remote // get still might 'win' because the remote 'remove' is still in-progress. for ( RPC < ? > rpc : home . tasks ( ) ) if ( rpc . _dt instanceof TaskPutKey ) { assert rpc . _target == home ; TaskPutKey tpk = ( TaskPutKey ) rpc . _dt ; Key k = tpk . _key ; if ( k != null && key . equals ( k ) ) return tpk . _xval ; } return TaskGetKey . get ( home , key , priority ) ; } }
User - Weak - Get a Key from the distributed cloud .
479
12
28,839
private void zipDir ( String dir2zip , ZipOutputStream zos ) throws IOException { try { //create a new File object based on the directory we have to zip. File zipDir = new File ( dir2zip ) ; //get a listing of the directory content String [ ] dirList = zipDir . list ( ) ; byte [ ] readBuffer = new byte [ 4096 ] ; int bytesIn = 0 ; //loop through dirList, and zip the files for ( int i = 0 ; i < dirList . length ; i ++ ) { File f = new File ( zipDir , dirList [ i ] ) ; if ( f . isDirectory ( ) ) { //if the File object is a directory, call this //function again to add its content recursively String filePath = f . getPath ( ) ; zipDir ( filePath , zos ) ; //loop again continue ; } //if we reached here, the File object f was not a directory //create a FileInputStream on top of f FileInputStream fis = new FileInputStream ( f ) ; // create a new zip entry ZipEntry anEntry = new ZipEntry ( f . getPath ( ) ) ; anEntry . setTime ( f . lastModified ( ) ) ; //place the zip entry in the ZipOutputStream object zos . putNextEntry ( anEntry ) ; //now write the content of the file to the ZipOutputStream boolean stopEarlyBecauseTooMuchData = false ; while ( ( bytesIn = fis . read ( readBuffer ) ) != - 1 ) { zos . write ( readBuffer , 0 , bytesIn ) ; if ( baos . size ( ) > MAX_SIZE ) { stopEarlyBecauseTooMuchData = true ; break ; } } //close the Stream fis . close ( ) ; zos . closeEntry ( ) ; if ( stopEarlyBecauseTooMuchData ) { Log . warn ( "LogCollectorTask stopEarlyBecauseTooMuchData" ) ; break ; } } } catch ( Exception e ) { //handle exception } }
here is the code for the method
435
7
28,840
@ Override public float [ ] scoreKey ( Object modelKey , String [ ] colNames , String domains [ ] [ ] , double [ ] row ) { Key key = ( Key ) modelKey ; String sk = key . toString ( ) ; Value v = DKV . get ( key ) ; if ( v == null ) throw new IllegalArgumentException ( "Key " + sk + " not found!" ) ; try { return scoreModel ( v . get ( ) , colNames , domains , row ) ; } catch ( Throwable t ) { Log . err ( t ) ; throw new IllegalArgumentException ( "Key " + sk + " is not a Model key" ) ; } }
All - in - one call to lookup a model map the columns and score
147
15
28,841
void incr1 ( int b , double y , double yy ) { Utils . AtomicDoubleArray . add ( _sums , b , y ) ; Utils . AtomicDoubleArray . add ( _ssqs , b , yy ) ; }
Same except square done by caller
54
6
28,842
protected final String checkArguments ( Properties args , RequestType type ) { // Why the following lines duplicate lines from Request#92 - handling query? // reset all arguments for ( Argument arg : _arguments ) arg . reset ( ) ; // return query if in query mode if ( type == RequestType . query ) return buildQuery ( args , type ) ; /* // Check that for each actual input argument from the user, there is some // request argument that this method is expecting. //*/ if ( H2O . OPT_ARGS . check_rest_params && ! ( this instanceof GridSearch ) && ! ( this instanceof HTTP500 ) ) { Enumeration en = args . propertyNames ( ) ; while ( en . hasMoreElements ( ) ) { boolean found = false ; String key = ( String ) en . nextElement ( ) ; for ( Argument arg : _arguments ) { if ( arg . _name . equals ( key ) ) { found = true ; break ; } } if ( ! found ) { return jsonError ( "Request specifies the argument '" + key + "' but it is not a valid parameter for this query " + this . getClass ( ) . getName ( ) ) . toString ( ) ; } } } // check the arguments now for ( Argument arg : _arguments ) { if ( ! arg . disabled ( ) ) { try { arg . check ( RequestQueries . this , args . getProperty ( arg . _name , "" ) ) ; queryArgumentValueSet ( arg , args ) ; } catch ( IllegalArgumentException e ) { if ( type == RequestType . json ) return jsonError ( "Argument '" + arg . _name + "' error: " + e . getMessage ( ) ) . toString ( ) ; else return buildQuery ( args , type ) ; } } } return null ; }
Checks the given arguments .
395
6
28,843
static public void basic_packet_handling ( AutoBuffer ab ) throws java . io . IOException { // Randomly drop 1/10th of the packets, as-if broken network. Dropped // packets are timeline recorded before dropping - and we still will // respond to timelines and suicide packets. int drop = H2O . OPT_ARGS . random_udp_drop != null && RANDOM_UDP_DROP . nextInt ( 5 ) == 0 ? 2 : 0 ; // Record the last time we heard from any given Node TimeLine . record_recv ( ab , false , drop ) ; ab . _h2o . _last_heard_from = System . currentTimeMillis ( ) ; // Snapshots are handled *IN THIS THREAD*, to prevent more UDP packets from // being handled during the dump. Also works for packets from outside the // Cloud... because we use Timelines to diagnose Paxos failures. int ctrl = ab . getCtrl ( ) ; ab . getPort ( ) ; // skip the port bytes if ( ctrl == UDP . udp . timeline . ordinal ( ) ) { UDP . udp . timeline . _udp . call ( ab ) ; return ; } // Suicide packet? Short-n-sweet... if ( ctrl == UDP . udp . rebooted . ordinal ( ) ) UDPRebooted . checkForSuicide ( ctrl , ab ) ; // Drop the packet. if ( drop != 0 ) return ; // Get the Cloud we are operating under for this packet H2O cloud = H2O . CLOUD ; // Check cloud membership; stale ex-members are "fail-stop" - we mostly // ignore packets from them (except paxos packets). boolean is_member = cloud . contains ( ab . _h2o ) ; // Paxos stateless packets & ACKs just fire immediately in a worker // thread. Dups are handled by these packet handlers directly. No // current membership check required for Paxos packets if ( UDP . udp . UDPS [ ctrl ] . _paxos || is_member ) { H2O . submitTask ( new FJPacket ( ab , ctrl ) ) ; return ; } // Some non-Paxos packet from a non-member. Probably should record & complain. // Filter unknown-packet-reports. In bad situations of poisoned Paxos // voting we can get a LOT of these packets/sec, flooding the console. _unknown_packets_per_sec ++ ; long timediff = ab . _h2o . _last_heard_from - _unknown_packet_time ; if ( timediff > 1000 ) { Log . warn ( "UDP packets from outside the cloud: " + _unknown_packets_per_sec + "/sec, last one from " + ab . _h2o + " @ " + new Date ( ) ) ; _unknown_packets_per_sec = 0 ; _unknown_packet_time = ab . _h2o . _last_heard_from ; } ab . close ( ) ; }
- Timeline record it
663
4
28,844
void push ( int slots ) { assert 0 <= slots && slots < 1000 ; int len = _d . length ; _sp += slots ; while ( _sp > len ) { _key = Arrays . copyOf ( _key , len << 1 ) ; _ary = Arrays . copyOf ( _ary , len << 1 ) ; _d = Arrays . copyOf ( _d , len << 1 ) ; _fcn = Arrays . copyOf ( _fcn , len <<= 1 ) ; _str = Arrays . copyOf ( _str , len << 1 ) ; } }
Push k empty slots
127
4
28,845
void push_slot ( int d , int n ) { assert d == 0 ; // Should use a fcn's closure for d>1 int idx = _display [ _tod - d ] + n ; push ( 1 ) ; _ary [ _sp - 1 ] = addRef ( _ary [ idx ] ) ; _d [ _sp - 1 ] = _d [ idx ] ; _fcn [ _sp - 1 ] = addRef ( _fcn [ idx ] ) ; _str [ _sp - 1 ] = _str [ idx ] ; assert _ary [ 0 ] == null || check_refcnt ( _ary [ 0 ] . anyVec ( ) ) ; }
Copy from display offset d nth slot
153
8
28,846
void tos_into_slot ( int d , int n , String id ) { // In a copy-on-modify language, only update the local scope, or return val assert d == 0 || ( d == 1 && _display [ _tod ] == n + 1 ) ; int idx = _display [ _tod - d ] + n ; // Temporary solution to kill a UDF from global name space. Needs to fix in the future. if ( _tod == 0 ) ASTOp . removeUDF ( id ) ; subRef ( _ary [ idx ] , _key [ idx ] ) ; subRef ( _fcn [ idx ] ) ; Frame fr = _ary [ _sp - 1 ] ; _ary [ idx ] = fr == null ? null : addRef ( new Frame ( fr ) ) ; _d [ idx ] = _d [ _sp - 1 ] ; _str [ idx ] = _str [ _sp - 1 ] ; _fcn [ idx ] = addRef ( _fcn [ _sp - 1 ] ) ; _key [ idx ] = d == 0 && fr != null ? id : null ; // Temporary solution to add a UDF to global name space. Needs to fix in the future. if ( _tod == 0 && _fcn [ _sp - 1 ] != null ) ASTOp . putUDF ( _fcn [ _sp - 1 ] , id ) ; assert _ary [ 0 ] == null || check_refcnt ( _ary [ 0 ] . anyVec ( ) ) ; }
Copy from TOS into a slot . Does NOT pop results .
342
13
28,847
void tos_into_slot ( int idx , String id ) { subRef ( _ary [ idx ] , _key [ idx ] ) ; subRef ( _fcn [ idx ] ) ; Frame fr = _ary [ _sp - 1 ] ; _ary [ idx ] = fr == null ? null : addRef ( new Frame ( fr ) ) ; _d [ idx ] = _d [ _sp - 1 ] ; _fcn [ idx ] = addRef ( _fcn [ _sp - 1 ] ) ; _str [ idx ] = _str [ _sp - 1 ] ; _key [ idx ] = fr != null ? id : null ; assert _ary [ 0 ] == null || check_refcnt ( _ary [ 0 ] . anyVec ( ) ) ; }
Copy from TOS into a slot using absolute index .
179
11
28,848
public Frame popXAry ( ) { Frame fr = popAry ( ) ; for ( Vec vec : fr . vecs ( ) ) { popVec ( vec ) ; if ( vec . masterVec ( ) != null ) popVec ( vec . masterVec ( ) ) ; } return fr ; }
Assumption is that this Frame will get pushed again shortly .
68
12
28,849
public void poppush ( int n , Frame ary , String key ) { addRef ( ary ) ; for ( int i = 0 ; i < n ; i ++ ) { assert _sp > 0 ; _sp -- ; _fcn [ _sp ] = subRef ( _fcn [ _sp ] ) ; _ary [ _sp ] = subRef ( _ary [ _sp ] , _key [ _sp ] ) ; } push ( 1 ) ; _ary [ _sp - 1 ] = ary ; _key [ _sp - 1 ] = key ; assert check_all_refcnts ( ) ; }
Replace a function invocation with it s result
136
9
28,850
public Futures subRef ( Vec vec , Futures fs ) { assert fs != null : "Future should not be null!" ; if ( vec . masterVec ( ) != null ) subRef ( vec . masterVec ( ) , fs ) ; int cnt = _refcnt . get ( vec ) . _val - 1 ; if ( cnt > 0 ) { _refcnt . put ( vec , new IcedInt ( cnt ) ) ; } else { UKV . remove ( vec . _key , fs ) ; _refcnt . remove ( vec ) ; } return fs ; }
Subtract reference count .
130
6
28,851
public S fillFrom ( Properties parms ) { // Get passed-in fields, assign into Schema Class clz = getClass ( ) ; for ( String key : parms . stringPropertyNames ( ) ) { try { Field f = clz . getDeclaredField ( key ) ; // No such field error, if parm is junk int mods = f . getModifiers ( ) ; if ( Modifier . isTransient ( mods ) || Modifier . isStatic ( mods ) ) // Attempting to set a transient or static; treat same as junk fieldname throw new IllegalArgumentException ( "Unknown argument " + key ) ; // Only support a single annotation which is an API, and is required API api = ( API ) f . getAnnotations ( ) [ 0 ] ; // Must have one of these set to be an input field if ( api . validation ( ) . length ( ) == 0 && api . values ( ) . length ( ) == 0 && api . dependsOn ( ) . length == 0 ) throw new IllegalArgumentException ( "Attempting to set output field " + key ) ; // Primitive parse by field type f . set ( this , parse ( parms . getProperty ( key ) , f . getType ( ) ) ) ; } catch ( NoSuchFieldException nsfe ) { // Convert missing-field to IAE throw new IllegalArgumentException ( "Unknown argument " + key ) ; } catch ( ArrayIndexOutOfBoundsException aioobe ) { // Come here if missing annotation throw new RuntimeException ( "Broken internal schema; missing API annotation: " + key ) ; } catch ( IllegalAccessException iae ) { // Come here if field is final or private throw new RuntimeException ( "Broken internal schema; cannot be private nor final: " + key ) ; } } // Here every thing in 'parms' was set into some field - so we have already // checked for unknown or extra parms. // Confirm required fields are set do { for ( Field f : clz . getDeclaredFields ( ) ) { int mods = f . getModifiers ( ) ; if ( Modifier . isTransient ( mods ) || Modifier . isStatic ( mods ) ) continue ; // Ignore transient & static API api = ( API ) f . getAnnotations ( ) [ 0 ] ; if ( api . validation ( ) . length ( ) > 0 ) { // TODO: execute "validation language" in the BackEnd, which includes a "required check", if any if ( parms . getProperty ( f . getName ( ) ) == null ) throw new IllegalArgumentException ( "Required field " + f . getName ( ) + " not specified" ) ; } } clz = clz . getSuperclass ( ) ; } while ( Iced . class . isAssignableFrom ( clz . getSuperclass ( ) ) ) ; return ( S ) this ; }
private . Input fields get filled here so must not be final .
623
13
28,852
public final boolean isNA ( long i ) { long x = i - ( _start > 0 ? _start : 0 ) ; if ( 0 <= x && x < _len ) return isNA0 ( ( int ) x ) ; throw new ArrayIndexOutOfBoundsException ( getClass ( ) . getSimpleName ( ) + " " + _start + " <= " + i + " < " + ( _start + _len ) ) ; }
Fetch the missing - status the slow way .
96
10
28,853
public final long set0 ( int idx , long l ) { setWrite ( ) ; if ( _chk2 . set_impl ( idx , l ) ) return l ; ( _chk2 = inflate_impl ( new NewChunk ( this ) ) ) . set_impl ( idx , l ) ; return l ; }
Set a long element in a chunk given a 0 - based chunk local index .
74
16
28,854
public final double set0 ( int idx , double d ) { setWrite ( ) ; if ( _chk2 . set_impl ( idx , d ) ) return d ; ( _chk2 = inflate_impl ( new NewChunk ( this ) ) ) . set_impl ( idx , d ) ; return d ; }
Set a double element in a chunk given a 0 - based chunk local index .
74
16
28,855
public final float set0 ( int idx , float f ) { setWrite ( ) ; if ( _chk2 . set_impl ( idx , f ) ) return f ; ( _chk2 = inflate_impl ( new NewChunk ( this ) ) ) . set_impl ( idx , f ) ; return f ; }
Set a floating element in a chunk given a 0 - based chunk local index .
74
16
28,856
public final boolean setNA0 ( int idx ) { setWrite ( ) ; if ( _chk2 . setNA_impl ( idx ) ) return true ; ( _chk2 = inflate_impl ( new NewChunk ( this ) ) ) . setNA_impl ( idx ) ; return true ; }
Set the element in a chunk as missing given a 0 - based chunk local index .
70
17
28,857
protected Frame scoreImpl ( Frame adaptFrm ) { if ( isSupervised ( ) ) { int ridx = adaptFrm . find ( responseName ( ) ) ; assert ridx == - 1 : "Adapted frame should not contain response in scoring method!" ; assert nfeatures ( ) == adaptFrm . numCols ( ) : "Number of model features " + nfeatures ( ) + " != number of test set columns: " + adaptFrm . numCols ( ) ; assert adaptFrm . vecs ( ) . length == nfeatures ( ) : "Scoring data set contains wrong number of columns: " + adaptFrm . vecs ( ) . length + " instead of " + nfeatures ( ) ; } // Create a new vector for response // If the model produces a classification/enum, copy the domain into the // result vector. int nc = nclasses ( ) ; Vec [ ] newVecs = new Vec [ ] { adaptFrm . anyVec ( ) . makeZero ( classNames ( ) ) } ; if ( nc > 1 ) newVecs = Utils . join ( newVecs , adaptFrm . anyVec ( ) . makeZeros ( nc ) ) ; String [ ] names = new String [ newVecs . length ] ; names [ 0 ] = "predict" ; for ( int i = 1 ; i < names . length ; ++ i ) names [ i ] = classNames ( ) [ i - 1 ] ; final int num_features = nfeatures ( ) ; new MRTask2 ( ) { @ Override public void map ( Chunk chks [ ] ) { double tmp [ ] = new double [ num_features ] ; // We do not need the last field representing response float preds [ ] = new float [ nclasses ( ) == 1 ? 1 : nclasses ( ) + 1 ] ; int len = chks [ 0 ] . _len ; for ( int row = 0 ; row < len ; row ++ ) { float p [ ] = score0 ( chks , row , tmp , preds ) ; for ( int c = 0 ; c < preds . length ; c ++ ) chks [ num_features + ] . set0 ( row , p [ ] ) ; } } } . doAll ( Utils . join ( adaptFrm . vecs ( ) , newVecs ) ) ; // Return just the output columns return new Frame ( names , newVecs ) ; }
Score already adapted frame .
534
5
28,858
public final float [ ] score ( Frame fr , boolean exact , int row ) { double tmp [ ] = new double [ fr . numCols ( ) ] ; for ( int i = 0 ; i < tmp . length ; i ++ ) tmp [ i ] = fr . vecs ( ) [ i ] . at ( row ) ; return score ( fr . names ( ) , fr . domains ( ) , exact , tmp ) ; }
Single row scoring on a compatible Frame .
91
8
28,859
public final float [ ] score ( String names [ ] , String domains [ ] [ ] , boolean exact , double row [ ] ) { return score ( adapt ( names , domains , exact ) , row , new float [ nclasses ( ) ] ) ; }
Single row scoring on a compatible set of data . Fairly expensive to adapt .
53
16
28,860
protected SB toJavaSuper ( SB sb ) { sb . nl ( ) ; sb . ii ( 1 ) ; sb . i ( ) . p ( "public String[] getNames() { return NAMES; } " ) . nl ( ) ; sb . i ( ) . p ( "public String[][] getDomainValues() { return DOMAINS; }" ) . nl ( ) ; String uuid = this . uniqueId != null ? this . uniqueId . getId ( ) : this . _key . toString ( ) ; sb . i ( ) . p ( "public String getUUID() { return " ) . ps ( uuid ) . p ( "; }" ) . nl ( ) ; return sb ; }
Generate implementation for super class .
167
7
28,861
private SB toJavaPredict ( SB ccsb , SB fileCtxSb ) { // ccsb = classContext ccsb . nl ( ) ; ccsb . p ( " // Pass in data in a double[], pre-aligned to the Model's requirements." ) . nl ( ) ; ccsb . p ( " // Jam predictions into the preds[] array; preds[0] is reserved for the" ) . nl ( ) ; ccsb . p ( " // main prediction (class for classifiers or value for regression)," ) . nl ( ) ; ccsb . p ( " // and remaining columns hold a probability distribution for classifiers." ) . nl ( ) ; ccsb . p ( " public final float[] predict( double[] data, float[] preds) { preds = predict( data, preds, " + toJavaDefaultMaxIters ( ) + "); return preds; }" ) . nl ( ) ; // ccsb.p(" public final float[] predict( double[] data, float[] preds) { return predict( data, preds, "+toJavaDefaultMaxIters()+"); }").nl(); ccsb . p ( " public final float[] predict( double[] data, float[] preds, int maxIters ) {" ) . nl ( ) ; SB classCtxSb = new SB ( ) ; toJavaPredictBody ( ccsb . ii ( 1 ) , classCtxSb , fileCtxSb ) ; ccsb . di ( 1 ) ; ccsb . p ( " return preds;" ) . nl ( ) ; ccsb . p ( " }" ) . nl ( ) ; ccsb . p ( classCtxSb ) ; return ccsb ; }
Wrapper around the main predict call including the signature and return value
396
13
28,862
protected final void emptyLTrash ( ) { if ( _lVecTrash . isEmpty ( ) ) return ; Futures fs = new Futures ( ) ; cleanupTrash ( _lVecTrash , fs ) ; fs . blockForPending ( ) ; }
User call which empty local trash of vectors .
60
9
28,863
@ Override protected void registered ( RequestServer . API_VERSION ver ) { super . registered ( ver ) ; for ( Argument arg : _arguments ) { if ( arg . _name . equals ( "activation" ) || arg . _name . equals ( "initial_weight_distribution" ) || arg . _name . equals ( "expert_mode" ) || arg . _name . equals ( "adaptive_rate" ) || arg . _name . equals ( "replicate_training_data" ) || arg . _name . equals ( "balance_classes" ) || arg . _name . equals ( "n_folds" ) || arg . _name . equals ( "autoencoder" ) || arg . _name . equals ( "checkpoint" ) ) { arg . setRefreshOnChange ( ) ; } } }
Helper to specify which arguments trigger a refresh on change
182
10
28,864
private DataInfo prepareDataInfo ( ) { final boolean del_enum_resp = classification && ! response . isEnum ( ) ; final Frame train = FrameTask . DataInfo . prepareFrame ( source , autoencoder ? null : response , ignored_cols , classification , ignore_const_cols , true /*drop >20% NA cols*/ ) ; final DataInfo dinfo = new FrameTask . DataInfo ( train , autoencoder ? 0 : 1 , true , autoencoder || use_all_factor_levels , //use all FactorLevels for auto-encoder autoencoder ? DataInfo . TransformType . NORMALIZE : DataInfo . TransformType . STANDARDIZE , //transform predictors classification ? DataInfo . TransformType . NONE : DataInfo . TransformType . STANDARDIZE ) ; //transform response if ( ! autoencoder ) { final Vec resp = dinfo . _adaptedFrame . lastVec ( ) ; //convention from DataInfo: response is the last Vec assert ( ! classification ^ resp . isEnum ( ) ) : "Must have enum response for classification!" ; //either regression or enum response if ( del_enum_resp ) ltrash ( resp ) ; } return dinfo ; }
Helper to create a DataInfo object from the source and response
271
12
28,865
Frame updateFrame ( Frame target , Frame src ) { if ( src != target ) ltrash ( src ) ; return src ; }
Helper to update a Frame and adding it to the local trash at the same time
28
16
28,866
private void lock_data ( ) { source . read_lock ( self ( ) ) ; if ( validation != null && source . _key != null && validation . _key != null && ! source . _key . equals ( validation . _key ) ) validation . read_lock ( self ( ) ) ; }
Lock the input datasets against deletes
65
7
28,867
private void unlock_data ( ) { source . unlock ( self ( ) ) ; if ( validation != null && source . _key != null && validation . _key != null && ! source . _key . equals ( validation . _key ) ) validation . unlock ( self ( ) ) ; }
Release the lock for the input datasets
61
7
28,868
private Frame reBalance ( final Frame fr , boolean local ) { int chunks = ( int ) Math . min ( 4 * H2O . NUMCPUS * ( local ? 1 : H2O . CLOUD . size ( ) ) , fr . numRows ( ) ) ; if ( fr . anyVec ( ) . nChunks ( ) > chunks && ! reproducible ) { Log . info ( "Dataset already contains " + fr . anyVec ( ) . nChunks ( ) + " chunks. No need to rebalance." ) ; return fr ; } else if ( reproducible ) { Log . warn ( "Reproducibility enforced - using only 1 thread - can be slow." ) ; chunks = 1 ; } if ( ! quiet_mode ) Log . info ( "ReBalancing dataset into (at least) " + chunks + " chunks." ) ; // return MRUtils.shuffleAndBalance(fr, chunks, seed, local, shuffle_training_data); String snewKey = fr . _key != null ? ( fr . _key . toString ( ) + ".balanced" ) : Key . rand ( ) ; Key newKey = Key . makeSystem ( snewKey ) ; RebalanceDataSet rb = new RebalanceDataSet ( fr , newKey , chunks ) ; H2O . submitTask ( rb ) ; rb . join ( ) ; return UKV . get ( newKey ) ; }
Rebalance a frame for load balancing
312
8
28,869
private static float computeRowUsageFraction ( final long numRows , final long train_samples_per_iteration , final boolean replicate_training_data ) { float rowUsageFraction = ( float ) train_samples_per_iteration / numRows ; if ( replicate_training_data ) rowUsageFraction /= H2O . CLOUD . size ( ) ; assert ( rowUsageFraction > 0 ) ; return rowUsageFraction ; }
Compute the fraction of rows that need to be used for training during one iteration
102
16
28,870
public static void crossValidate ( Job . ValidatedJob job ) { if ( job . state != Job . JobState . RUNNING ) return ; //don't do cross-validation if the full model builder failed if ( job . validation != null ) throw new IllegalArgumentException ( "Cannot provide validation dataset and n_folds > 0 at the same time." ) ; if ( job . n_folds <= 1 ) throw new IllegalArgumentException ( "n_folds must be >= 2 for cross-validation." ) ; final String basename = job . destination_key . toString ( ) ; long [ ] offsets = new long [ job . n_folds + 1 ] ; Frame [ ] cv_preds = new Frame [ job . n_folds ] ; try { for ( int i = 0 ; i < job . n_folds ; ++ i ) { if ( job . state != Job . JobState . RUNNING ) break ; Key [ ] destkeys = new Key [ ] { Key . make ( basename + "_xval" + i + "_train" ) , Key . make ( basename + "_xval" + i + "_holdout" ) } ; NFoldFrameExtractor nffe = new NFoldFrameExtractor ( job . source , job . n_folds , i , destkeys , Key . make ( ) /*key used for locking only*/ ) ; H2O . submitTask ( nffe ) ; Frame [ ] splits = nffe . getResult ( ) ; // Cross-validate individual splits try { job . crossValidate ( splits , cv_preds , offsets , i ) ; //this removes the enum-ified response! job . _cv_count ++ ; } finally { // clean-up the results if ( ! job . keep_cross_validation_splits ) for ( Frame f : splits ) f . delete ( ) ; } } if ( job . state != Job . JobState . RUNNING ) return ; final int resp_idx = job . source . find ( job . _responseName ) ; Vec response = job . source . vecs ( ) [ resp_idx ] ; boolean put_back = UKV . get ( job . response . _key ) == null ; // In the case of rebalance, rebalance response will be deleted if ( put_back ) { job . response = response ; if ( job . classification ) job . response = job . response . toEnum ( ) ; DKV . put ( job . response . _key , job . response ) ; //put enum-ified response back to K-V store } ( ( Model ) UKV . get ( job . destination_key ) ) . scoreCrossValidation ( job , job . source , response , cv_preds , offsets ) ; if ( put_back ) UKV . remove ( job . response . _key ) ; } finally { // clean-up prediction frames for splits for ( Frame f : cv_preds ) if ( f != null ) f . delete ( ) ; } }
Cross - Validate a ValidatedJob
658
8
28,871
protected static List < water . ModelMetrics > fetchAll ( ) { return new ArrayList < water . ModelMetrics > ( H2O . KeySnapshot . globalSnapshot ( ) . fetchAll ( water . ModelMetrics . class ) . values ( ) ) ; }
Fetch all ModelMetrics from the KV store .
59
12
28,872
private Response serveOneOrAll ( List < water . ModelMetrics > list ) { JsonArray metricsArray = new JsonArray ( ) ; for ( water . ModelMetrics metrics : list ) { JsonObject metricsJson = metrics . toJSON ( ) ; metricsArray . add ( metricsJson ) ; } JsonObject result = new JsonObject ( ) ; result . add ( "metrics" , metricsArray ) ; return Response . done ( result ) ; }
For one or more water . ModelMetrics from the KV store return Response containing a map of them .
102
22
28,873
public static void scoreTree ( double data [ ] , float preds [ ] , CompressedTree [ ] ts ) { for ( int c = 0 ; c < ts . length ; c ++ ) if ( ts [ c ] != null ) preds [ ts . length == 1 ? 0 : c + 1 ] += ts [ c ] . score ( data ) ; }
Score given tree on the row of data .
77
9
28,874
public static Request registerRequest ( Request req ) { assert req . supportedVersions ( ) . length > 0 ; for ( API_VERSION ver : req . supportedVersions ( ) ) { String href = req . href ( ver ) ; assert ( ! _requests . containsKey ( href ) ) : "Request with href " + href + " already registered" ; _requests . put ( href , req ) ; req . registered ( ver ) ; } return req ; }
Registers the request with the request server .
97
9
28,875
public static void start ( ) { new Thread ( new Runnable ( ) { @ Override public void run ( ) { while ( true ) { try { // Try to get the NanoHTTP daemon started SERVER = new RequestServer ( H2O . _apiSocket ) ; break ; } catch ( Exception ioe ) { Log . err ( Sys . HTTPD , "Launching NanoHTTP server got " , ioe ) ; try { Thread . sleep ( 1000 ) ; } catch ( InterruptedException e ) { } // prevent denial-of-service } } } } , "Request Server launcher" ) . start ( ) ; }
Keep spinning until we get to launch the NanoHTTPD
135
12
28,876
public final T invoke ( Key key ) { RPC < Atomic < T >> rpc = fork ( key ) ; return ( T ) ( rpc == null ? this : rpc . get ( ) ) ; // Block for it }
Block until it completes even if run remotely
48
8
28,877
public static String [ ] concat ( String [ ] ... aa ) { int l = 0 ; for ( String [ ] a : aa ) l += . length ; String [ ] r = new String [ l ] ; l = 0 ; for ( String [ ] a : aa ) { System . arraycopy ( a , 0 , r , l , a . length ) ; l += a . length ; } return r ; }
Concatenate given list of arrays into one long array .
91
13
28,878
public static Frame parse ( File file ) { Key fkey = NFSFileVec . make ( file ) ; Key dest = Key . make ( file . getName ( ) ) ; Frame frame = ParseDataset2 . parse ( dest , new Key [ ] { fkey } ) ; return frame ; }
Parse a dataset into a Frame .
67
8
28,879
public static Frame create ( String [ ] headers , double [ ] [ ] rows ) { Futures fs = new Futures ( ) ; Vec [ ] vecs = new Vec [ rows [ 0 ] . length ] ; Key keys [ ] = new Vec . VectorGroup ( ) . addVecs ( vecs . length ) ; for ( int c = 0 ; c < vecs . length ; c ++ ) { AppendableVec vec = new AppendableVec ( keys [ c ] ) ; NewChunk chunk = new NewChunk ( vec , 0 ) ; for ( int r = 0 ; r < rows . length ; r ++ ) chunk . addNum ( rows [ r ] [ ] ) ; chunk . close ( 0 , fs ) ; vecs [ c ] = vec . close ( fs ) ; } fs . blockForPending ( ) ; return new Frame ( headers , vecs ) ; }
Creates a frame programmatically .
194
7
28,880
@ Override public Value chunkIdx ( int cidx ) { final long nchk = nChunks ( ) ; assert 0 <= cidx && cidx < nchk ; Key dkey = chunkKey ( cidx ) ; Value val1 = DKV . get ( dkey ) ; // Check for an existing one... will fetch data as needed if ( val1 != null ) return val1 ; // Found an existing one? // Lazily create a DVec for this chunk int len = ( int ) ( cidx < nchk - 1 ? CHUNK_SZ : ( _len - chunk2StartElem ( cidx ) ) ) ; // DVec is just the raw file data with a null-compression scheme Value val2 = new Value ( dkey , len , null , TypeMap . C1NCHUNK , _be ) ; val2 . setdsk ( ) ; // It is already on disk. // If not-home, then block till the Key is everywhere. Most calls here are // from the parser loading a text file, and the parser splits the work such // that most puts here are on home - so this is a simple speed optimization: // do not make a Futures nor block on it on home. Futures fs = dkey . home ( ) ? null : new Futures ( ) ; // Atomically insert: fails on a race, but then return the old version Value val3 = DKV . DputIfMatch ( dkey , val2 , null , fs ) ; if ( ! dkey . home ( ) && fs != null ) fs . blockForPending ( ) ; return val3 == null ? val2 : val3 ; }
Touching the DVec will force the file load .
366
11
28,881
private static void summarizeAndEnhanceFrame ( FrameSummary summary , Frame frame , boolean find_compatible_models , Map < String , Model > all_models , Map < String , Set < String > > all_models_cols ) { UniqueId unique_id = frame . getUniqueId ( ) ; summary . id = unique_id . getId ( ) ; summary . key = unique_id . getKey ( ) ; summary . creation_epoch_time_millis = unique_id . getCreationEpochTimeMillis ( ) ; summary . column_names = frame . _names ; summary . is_raw_frame = frame . isRawData ( ) ; if ( find_compatible_models ) { Map < String , Model > compatible_models = findCompatibleModels ( frame , all_models , all_models_cols ) ; summary . compatible_models = compatible_models . keySet ( ) ; } }
Summarize fields in water . fvec . Frame .
200
12
28,882
private Response serveOneOrAll ( Map < String , Frame > framesMap ) { // returns empty sets if !this.find_compatible_models Pair < Map < String , Model > , Map < String , Set < String > > > models_info = fetchModels ( ) ; Map < String , Model > all_models = models_info . getFirst ( ) ; Map < String , Set < String > > all_models_cols = models_info . getSecond ( ) ; Map < String , FrameSummary > frameSummaries = Frames . generateFrameSummaries ( null , framesMap , find_compatible_models , all_models , all_models_cols ) ; Map resultsMap = new LinkedHashMap ( ) ; resultsMap . put ( "frames" , frameSummaries ) ; // If find_compatible_models then include a map of the Model summaries. Should we put this on a separate switch? if ( this . find_compatible_models ) { Set < String > all_referenced_models = new TreeSet < String > ( ) ; for ( Map . Entry < String , FrameSummary > entry : frameSummaries . entrySet ( ) ) { FrameSummary summary = entry . getValue ( ) ; all_referenced_models . addAll ( summary . compatible_models ) ; } Map < String , ModelSummary > modelSummaries = Models . generateModelSummaries ( all_referenced_models , all_models , false , null , null ) ; resultsMap . put ( "models" , modelSummaries ) ; } // TODO: temporary hack to get things going String json = gson . toJson ( resultsMap ) ; JsonObject result = gson . fromJson ( json , JsonElement . class ) . getAsJsonObject ( ) ; return Response . done ( result ) ; }
For one or more Frame from the KV store sumamrize and enhance them and Response containing a map of them .
402
25
28,883
public static Vec compose ( TransfVec origVec , int [ ] [ ] transfMap , String [ ] domain , boolean keepOrig ) { // Do a mapping from INT -> ENUM -> this vector ENUM int [ ] [ ] domMap = Utils . compose ( new int [ ] [ ] { origVec . _values , origVec . _indexes } , transfMap ) ; Vec result = origVec . masterVec ( ) . makeTransf ( domMap [ 0 ] , domMap [ 1 ] , domain ) ; ; if ( ! keepOrig ) DKV . remove ( origVec . _key ) ; return result ; }
Compose given origVector with given transformation . Always returns a new vector . Original vector is kept if keepOrig is true .
141
25
28,884
public static Response redirect ( Request req , Key src_key ) { return Response . redirect ( req , "/2/Inspector" , "src_key" , src_key . toString ( ) ) ; }
Called from some other page to redirect that other page to this page .
46
15
28,885
public void addr ( NewChunk nc ) { long [ ] tmpl = _ls ; _ls = nc . _ls ; nc . _ls = tmpl ; int [ ] tmpi = _xs ; _xs = nc . _xs ; nc . _xs = tmpi ; tmpi = _id ; _id = nc . _id ; nc . _id = tmpi ; double [ ] tmpd = _ds ; _ds = nc . _ds ; nc . _ds = tmpd ; int tmp = _sparseLen ; _sparseLen = nc . _sparseLen ; nc . _sparseLen = tmp ; tmp = _len ; _len = nc . _len ; nc . _len = tmp ; add ( nc ) ; }
PREpend all of nc onto the current NewChunk . Kill nc .
178
17
28,886
void append2 ( long l , int x ) { if ( _id == null || l != 0 ) { if ( _ls == null || _sparseLen == _ls . length ) { append2slow ( ) ; // again call append2 since calling append2slow might have changed things (eg might have switched to sparse and l could be 0) append2 ( l , x ) ; return ; } _ls [ _sparseLen ] = l ; _xs [ _sparseLen ] = x ; if ( _id != null ) _id [ _sparseLen ] = _len ; _sparseLen ++ ; } _len ++ ; assert _sparseLen <= _len ; }
Fast - path append long data
147
6
28,887
static public void put ( Key key , Value val , Futures fs ) { assert ! val . isLockable ( ) ; Value res = DKV . put ( key , val , fs ) ; assert res == null || ! res . isLockable ( ) ; }
have to use the Lockable interface for all updates .
56
11
28,888
static public void put ( Key key , Freezable fr ) { if ( fr == null ) UKV . remove ( key ) ; else UKV . put ( key , new Value ( key , fr ) ) ; }
Also allow auto - serialization
46
6
28,889
void remove_task_tracking ( int task ) { RPC . RPCCall rpc = _work . get ( task ) ; if ( rpc == null ) return ; // Already stopped tracking // Atomically attempt to remove the 'dt'. If we win, we are the sole // thread running the dt.onAckAck. Also helps GC: the 'dt' is done (sent // to client and we received the ACKACK), but the rpc might need to stick // around a long time - and the dt might be big. DTask dt = rpc . _dt ; // The existing DTask, if any if ( dt != null && RPC . RPCCall . CAS_DT . compareAndSet ( rpc , dt , null ) ) { assert rpc . _computed : "Still not done #" + task + " " + dt . getClass ( ) + " from " + rpc . _client ; AckAckTimeOutThread . PENDING . remove ( rpc ) ; dt . onAckAck ( ) ; // One-time call on stop-tracking } // Roll-up as many done RPCs as we can, into the _removed_task_ids list while ( true ) { int t = _removed_task_ids . get ( ) ; // Last already-removed ID RPC . RPCCall rpc2 = _work . get ( t + 1 ) ; // RPC of 1st not-removed ID if ( rpc2 == null || rpc2 . _dt != null || ! _removed_task_ids . compareAndSet ( t , t + 1 ) ) break ; // Stop when we hit in-progress tasks _work . remove ( t + 1 ) ; // Else we can remove the tracking now } }
Stop tracking a remote task because we got an ACKACK .
389
13
28,890
public static Frame frame ( String [ ] names , double [ ] ... rows ) { assert names == null || names . length == rows [ 0 ] . length ; Futures fs = new Futures ( ) ; Vec [ ] vecs = new Vec [ rows [ 0 ] . length ] ; Key keys [ ] = Vec . VectorGroup . VG_LEN1 . addVecs ( vecs . length ) ; for ( int c = 0 ; c < vecs . length ; c ++ ) { AppendableVec vec = new AppendableVec ( keys [ c ] ) ; NewChunk chunk = new NewChunk ( vec , 0 ) ; for ( int r = 0 ; r < rows . length ; r ++ ) chunk . addNum ( rows [ r ] [ ] ) ; chunk . close ( 0 , fs ) ; vecs [ c ] = vec . close ( fs ) ; } fs . blockForPending ( ) ; return new Frame ( names , vecs ) ; }
Create a new frame based on given row data .
212
10
28,891
public static Frame parseFrame ( Key okey , File ... files ) { assert files . length > 0 : "Ups. No files to parse!" ; for ( File f : files ) if ( ! f . exists ( ) ) throw new RuntimeException ( "File not found " + f ) ; // Create output key if not specified if ( okey == null ) okey = Key . make ( files [ 0 ] . getName ( ) ) ; Key [ ] fkeys = new Key [ files . length ] ; int cnt = 0 ; for ( File f : files ) fkeys [ cnt ++ ] = NFSFileVec . make ( ) ; return parseFrame ( okey , fkeys ) ; }
Parse given file into the form of frame represented by the given key .
151
15
28,892
private void processCustomDimensionParameters ( @ SuppressWarnings ( "rawtypes" ) GoogleAnalyticsRequest request , List < NameValuePair > postParms ) { Map < String , String > customDimParms = new HashMap < String , String > ( ) ; for ( String defaultCustomDimKey : defaultRequest . customDimentions ( ) . keySet ( ) ) { customDimParms . put ( defaultCustomDimKey , defaultRequest . customDimentions ( ) . get ( defaultCustomDimKey ) ) ; } @ SuppressWarnings ( "unchecked" ) Map < String , String > requestCustomDims = request . customDimentions ( ) ; for ( String requestCustomDimKey : requestCustomDims . keySet ( ) ) { customDimParms . put ( requestCustomDimKey , requestCustomDims . get ( requestCustomDimKey ) ) ; } for ( String key : customDimParms . keySet ( ) ) { postParms . add ( new BasicNameValuePair ( key , customDimParms . get ( key ) ) ) ; } }
Processes the custom dimensions and adds the values to list of parameters which would be posted to GA .
239
20
28,893
private void processCustomMetricParameters ( @ SuppressWarnings ( "rawtypes" ) GoogleAnalyticsRequest request , List < NameValuePair > postParms ) { Map < String , String > customMetricParms = new HashMap < String , String > ( ) ; for ( String defaultCustomMetricKey : defaultRequest . custommMetrics ( ) . keySet ( ) ) { customMetricParms . put ( defaultCustomMetricKey , defaultRequest . custommMetrics ( ) . get ( defaultCustomMetricKey ) ) ; } @ SuppressWarnings ( "unchecked" ) Map < String , String > requestCustomMetrics = request . custommMetrics ( ) ; for ( String requestCustomDimKey : requestCustomMetrics . keySet ( ) ) { customMetricParms . put ( requestCustomDimKey , requestCustomMetrics . get ( requestCustomDimKey ) ) ; } for ( String key : customMetricParms . keySet ( ) ) { postParms . add ( new BasicNameValuePair ( key , customMetricParms . get ( key ) ) ) ; } }
Processes the custom metrics and adds the values to list of parameters which would be posted to GA .
247
20
28,894
public static int clen ( int values , int bpv ) { int len = ( values * bpv ) >> 3 ; return values * bpv % 8 == 0 ? len : len + 1 ; }
Returns compressed len of the given array length if the value if represented by bpv - bits .
45
20
28,895
public static Value get ( H2ONode target , Key key , int priority ) { RPC < TaskGetKey > rpc , old ; while ( true ) { // Repeat until we get a unique TGK installed per key // Do we have an old TaskGetKey in-progress? rpc = TGKS . get ( key ) ; if ( rpc != null && rpc . _dt . _priority >= priority ) break ; old = rpc ; // Make a new TGK. rpc = new RPC ( target , new TaskGetKey ( key , priority ) , 1.0f ) ; if ( TGKS . putIfMatchUnlocked ( key , rpc , old ) == old ) { rpc . setTaskNum ( ) . call ( ) ; // Start the op break ; // Successful install of a fresh RPC } } Value val = rpc . get ( ) . _val ; // Block for, then fetch out the result TGKS . putIfMatchUnlocked ( key , null , rpc ) ; // Clear from cache return val ; }
Get a value from a named remote node
224
8
28,896
protected String build ( Response response ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( "<div class='container'>" ) ; sb . append ( "<div class='row-fluid'>" ) ; sb . append ( "<div class='span12'>" ) ; sb . append ( buildJSONResponseBox ( response ) ) ; if ( response . _status == Response . Status . done ) response . toJava ( sb ) ; sb . append ( buildResponseHeader ( response ) ) ; Builder builder = response . getBuilderFor ( ROOT_OBJECT ) ; if ( builder == null ) { sb . append ( "<h3>" + name ( ) + "</h3>" ) ; builder = OBJECT_BUILDER ; } for ( String h : response . getHeaders ( ) ) sb . append ( h ) ; if ( response . _response == null ) { boolean done = response . _req . toHTML ( sb ) ; if ( ! done ) { JsonParser parser = new JsonParser ( ) ; String json = new String ( response . _req . writeJSON ( new AutoBuffer ( ) ) . buf ( ) ) ; JsonObject o = ( JsonObject ) parser . parse ( json ) ; sb . append ( builder . build ( response , o , "" ) ) ; } } else sb . append ( builder . build ( response , response . _response , "" ) ) ; sb . append ( "</div></div></div>" ) ; return sb . toString ( ) ; }
Builds the HTML for the given response .
340
9
28,897
public void addExternalJars ( File file ) throws IllegalAccessException , InvocationTargetException , MalformedURLException { assert file . exists ( ) : "Unable to find external file: " + file . getAbsolutePath ( ) ; if ( file . isDirectory ( ) ) { for ( File f : file . listFiles ( ) ) addExternalJars ( ) ; } else if ( file . getName ( ) . endsWith ( ".jar" ) ) { Log . POST ( 22 , "before (in addExternalJars) invoke _addUrl " + file . toURI ( ) . toURL ( ) ) ; _addUrl . invoke ( _systemLoader , file . toURI ( ) . toURL ( ) ) ; Log . POST ( 22 , "after (in addExternalJars) invoke _addUrl " + file . toURI ( ) . toURL ( ) ) ; } }
Adds all jars in given directory to the classpath .
195
11
28,898
private void extractInternalFiles ( ) throws IOException { Enumeration entries = _h2oJar . entries ( ) ; while ( entries . hasMoreElements ( ) ) { ZipEntry e = ( ZipEntry ) entries . nextElement ( ) ; String name = e . getName ( ) ; if ( e . isDirectory ( ) ) continue ; // mkdirs() will handle these if ( ! name . endsWith ( ".jar" ) ) continue ; // extract the entry File out = internalFile ( name ) ; out . getParentFile ( ) . mkdirs ( ) ; try { FileOutputStream fos = new FileOutputStream ( out ) ; BufferedInputStream is = new BufferedInputStream ( _h2oJar . getInputStream ( e ) ) ; BufferedOutputStream os = new BufferedOutputStream ( fos ) ; int read ; byte [ ] buffer = new byte [ 4096 ] ; while ( ( read = is . read ( buffer ) ) != - 1 ) os . write ( buffer , 0 , read ) ; os . flush ( ) ; fos . getFD ( ) . sync ( ) ; // Force the output; throws SyncFailedException if full os . close ( ) ; is . close ( ) ; } catch ( FileNotFoundException ex ) { // Expected FNF if 2 H2O instances are attempting to unpack in the same directory } catch ( IOException ex ) { Log . die ( "Unable to extract file " + name + " because of " + ex + ". Make sure that directory " + _parentDir + " contains at least 50MB of free space to unpack H2O libraries." ) ; throw ex ; // dead code } } }
Extracts the libraries from the jar file to given local path .
363
14
28,899
@ Override public synchronized Class loadClass ( String name , boolean resolve ) throws ClassNotFoundException { assert ! name . equals ( Weaver . class . getName ( ) ) ; Class z = loadClass2 ( name ) ; // Do all the work in here if ( resolve ) resolveClass ( z ) ; // Resolve here instead in the work method return z ; }
search THEN the System or parent loader .
77
8