idx
int64
0
41.2k
question
stringlengths
74
4.04k
target
stringlengths
7
750
28,800
public static Attributes lookupAttributes ( LdapOperations ldapOperations , Name dn , String [ ] attributes ) { return loopForAllAttributeValues ( ldapOperations , dn , attributes ) . getCollectedAttributes ( ) ; }
Lookup all values for the specified attributes looping through the results incrementally if necessary .
28,801
public Object getObject ( ) throws Exception { if ( converterConfigList == null ) { throw new FactoryBeanNotInitializedException ( "converterConfigList has not been set" ) ; } ConverterManagerImpl result = new ConverterManagerImpl ( ) ; for ( ConverterConfig converterConfig : converterConfigList ) { if ( converterConfig . fromClasses == null || converterConfig . toClasses == null || converterConfig . converter == null ) { throw new FactoryBeanNotInitializedException ( String . format ( "All of fromClasses, toClasses and converter must be specified in bean %1$s" , converterConfig . toString ( ) ) ) ; } for ( Class < ? > fromClass : converterConfig . fromClasses ) { for ( Class < ? > toClass : converterConfig . toClasses ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( String . format ( "Adding converter from %1$s to %2$s" , fromClass , toClass ) ) ; } result . addConverter ( fromClass , converterConfig . syntax , toClass , converterConfig . converter ) ; } } } return result ; }
Creates a ConverterManagerImpl populating it with Converter instances from the converterConfigList property .
28,802
public void setAttribute ( Attribute attribute ) { if ( ! updateMode ) { originalAttrs . put ( attribute ) ; } else { updatedAttrs . put ( attribute ) ; } }
Set the supplied attribute .
28,803
public LdapContext getInnermostDelegateLdapContext ( ) { final LdapContext delegateLdapContext = this . getDelegateLdapContext ( ) ; if ( delegateLdapContext instanceof DelegatingLdapContext ) { return ( ( DelegatingLdapContext ) delegateLdapContext ) . getInnermostDelegateLdapContext ( ) ; } return delegateLdapContext ; }
Recursivley inspect delegates until a non - delegating ldap context is found .
28,804
protected DirContext getContext ( DirContextType dirContextType ) { final DirContext dirContext ; try { dirContext = ( DirContext ) this . keyedObjectPool . borrowObject ( dirContextType ) ; } catch ( Exception e ) { throw new DataAccessResourceFailureException ( "Failed to borrow DirContext from pool." , e ) ; } if ( dirContext instanceof LdapContext ) { return new DelegatingLdapContext ( this . keyedObjectPool , ( LdapContext ) dirContext , dirContextType ) ; } return new DelegatingDirContext ( this . keyedObjectPool , dirContext , dirContextType ) ; }
Gets a DirContext of the specified type from the keyed object pool .
28,805
public static String filterEncode ( String value ) { if ( value == null ) return null ; StringBuilder encodedValue = new StringBuilder ( value . length ( ) * 2 ) ; int length = value . length ( ) ; for ( int i = 0 ; i < length ; i ++ ) { char c = value . charAt ( i ) ; if ( c < FILTER_ESCAPE_TABLE . length ) { encodedValue . append ( FILTER_ESCAPE_TABLE [ c ] ) ; } else { encodedValue . append ( c ) ; } } return encodedValue . toString ( ) ; }
Escape a value for use in a filter .
28,806
public static String nameEncode ( String value ) { if ( value == null ) return null ; StringBuilder encodedValue = new StringBuilder ( value . length ( ) * 2 ) ; int length = value . length ( ) ; int last = length - 1 ; for ( int i = 0 ; i < length ; i ++ ) { char c = value . charAt ( i ) ; if ( c == ' ' && ( i == 0 || i == last ) ) { encodedValue . append ( "\\ " ) ; continue ; } if ( c < NAME_ESCAPE_TABLE . length ) { String esc = NAME_ESCAPE_TABLE [ c ] ; if ( esc != null ) { encodedValue . append ( esc ) ; continue ; } } encodedValue . append ( c ) ; } return encodedValue . toString ( ) ; }
LDAP Encodes a value for use with a DN . Escapes for LDAP not JNDI!
28,807
static public String nameDecode ( String value ) throws BadLdapGrammarException { if ( value == null ) return null ; StringBuilder decoded = new StringBuilder ( value . length ( ) ) ; int i = 0 ; while ( i < value . length ( ) ) { char currentChar = value . charAt ( i ) ; if ( currentChar == '\\' ) { if ( value . length ( ) <= i + 1 ) { throw new BadLdapGrammarException ( "Unexpected end of value " + "unterminated '\\'" ) ; } else { char nextChar = value . charAt ( i + 1 ) ; if ( nextChar == ',' || nextChar == '=' || nextChar == '+' || nextChar == '<' || nextChar == '>' || nextChar == '#' || nextChar == ';' || nextChar == '\\' || nextChar == '\"' || nextChar == ' ' ) { decoded . append ( nextChar ) ; i += 2 ; } else { if ( value . length ( ) <= i + 2 ) { throw new BadLdapGrammarException ( "Unexpected end of value " + "expected special or hex, found '" + nextChar + "'" ) ; } else { String hexString = "" + nextChar + value . charAt ( i + 2 ) ; decoded . append ( ( char ) Integer . parseInt ( hexString , HEX ) ) ; i += 3 ; } } } } else { decoded . append ( currentChar ) ; i ++ ; } } return decoded . toString ( ) ; }
Decodes a value . Converts escaped chars to ordinary chars .
28,808
public static String printBase64Binary ( byte [ ] val ) { Assert . notNull ( val , "val must not be null!" ) ; String encoded = DatatypeConverter . printBase64Binary ( val ) ; int length = encoded . length ( ) ; StringBuilder sb = new StringBuilder ( length + length / RFC2849_MAX_BASE64_CHARS_PER_LINE ) ; for ( int i = 0 , len = length ; i < len ; i ++ ) { sb . append ( encoded . charAt ( i ) ) ; if ( ( i + 1 ) % RFC2849_MAX_BASE64_CHARS_PER_LINE == 0 ) { sb . append ( '\n' ) ; sb . append ( ' ' ) ; } } return sb . toString ( ) ; }
Converts an array of bytes into a Base64 encoded string according to the rules for converting LDAP Attributes in RFC2849 .
28,809
public static byte [ ] parseBase64Binary ( String val ) { Assert . notNull ( val , "val must not be null!" ) ; int length = val . length ( ) ; StringBuilder sb = new StringBuilder ( length ) ; for ( int i = 0 , len = length ; i < len ; i ++ ) { char c = val . charAt ( i ) ; if ( c == '\n' ) { if ( i + 1 < len && val . charAt ( i + 1 ) == ' ' ) { i ++ ; } continue ; } sb . append ( c ) ; } return DatatypeConverter . parseBase64Binary ( sb . toString ( ) ) ; }
Converts the Base64 encoded string argument into an array of bytes .
28,810
protected Object invokeMethod ( String method , Class < ? > clazz , Object control ) { Method actualMethod = ReflectionUtils . findMethod ( clazz , method ) ; return ReflectionUtils . invokeMethod ( actualMethod , control ) ; }
Utility method for invoking a method on a Control .
28,811
public Set < User > findAllMembers ( Iterable < Name > absoluteIds ) { return Sets . newLinkedHashSet ( userRepo . findAll ( toRelativeIds ( absoluteIds ) ) ) ; }
This method expects absolute DNs of group members . In order to find the actual users the DNs need to have the base LDAP path removed .
28,812
private User updateUserStandard ( LdapName originalId , User existingUser ) { User savedUser = userRepo . save ( existingUser ) ; if ( ! originalId . equals ( savedUser . getId ( ) ) ) { LdapName oldMemberDn = toAbsoluteDn ( originalId ) ; LdapName newMemberDn = toAbsoluteDn ( savedUser . getId ( ) ) ; Collection < Group > groups = groupRepo . findByMember ( oldMemberDn ) ; updateGroupReferences ( groups , oldMemberDn , newMemberDn ) ; } return savedUser ; }
Update the user and - if its id changed - update all group references to the user .
28,813
protected ModificationItem getCompensatingModificationItem ( Attributes originalAttributes , ModificationItem modificationItem ) { Attribute modificationAttribute = modificationItem . getAttribute ( ) ; Attribute originalAttribute = originalAttributes . get ( modificationAttribute . getID ( ) ) ; if ( modificationItem . getModificationOp ( ) == DirContext . REMOVE_ATTRIBUTE ) { if ( modificationAttribute . size ( ) == 0 ) { return new ModificationItem ( DirContext . ADD_ATTRIBUTE , ( Attribute ) originalAttribute . clone ( ) ) ; } else { return new ModificationItem ( DirContext . ADD_ATTRIBUTE , ( Attribute ) modificationAttribute . clone ( ) ) ; } } else if ( modificationItem . getModificationOp ( ) == DirContext . REPLACE_ATTRIBUTE ) { if ( originalAttribute != null ) { return new ModificationItem ( DirContext . REPLACE_ATTRIBUTE , ( Attribute ) originalAttribute . clone ( ) ) ; } else { return new ModificationItem ( DirContext . REMOVE_ATTRIBUTE , new BasicAttribute ( modificationAttribute . getID ( ) ) ) ; } } else { if ( originalAttribute == null ) { return new ModificationItem ( DirContext . REMOVE_ATTRIBUTE , new BasicAttribute ( modificationAttribute . getID ( ) ) ) ; } else { return new ModificationItem ( DirContext . REPLACE_ATTRIBUTE , ( Attribute ) originalAttribute . clone ( ) ) ; } } }
Get a ModificationItem to use for rollback of the supplied modification .
28,814
public int compare ( Object o1 , Object o2 ) { List list1 = ( List ) o1 ; List list2 = ( List ) o2 ; for ( int i = 0 ; i < list1 . size ( ) ; i ++ ) { if ( list2 . size ( ) > i ) { Comparable component1 = ( Comparable ) list1 . get ( i ) ; Comparable component2 = ( Comparable ) list2 . get ( i ) ; int componentsCompared = component1 . compareTo ( component2 ) ; if ( componentsCompared != 0 ) { return componentsCompared ; } } else { return 1 ; } } if ( list2 . size ( ) > list1 . size ( ) ) { return - 1 ; } else { return 0 ; } }
Compare two lists of Comparable objects .
28,815
public String getSqlWithValues ( ) { final StringBuilder sb = new StringBuilder ( ) ; final String statementQuery = getStatementQuery ( ) ; int currentParameter = 0 ; for ( int pos = 0 ; pos < statementQuery . length ( ) ; pos ++ ) { char character = statementQuery . charAt ( pos ) ; if ( statementQuery . charAt ( pos ) == '?' && currentParameter <= parameterValues . size ( ) ) { Value value = parameterValues . get ( currentParameter ) ; sb . append ( value != null ? value . toString ( ) : new Value ( ) . toString ( ) ) ; currentParameter ++ ; } else { sb . append ( character ) ; } } return sb . toString ( ) ; }
Generates the query for the prepared statement with all parameter placeholders replaced with the actual parameter values
28,816
protected static void doLogElapsed ( int connectionId , long timeElapsedNanos , Category category , String prepared , String sql , String url ) { doLog ( connectionId , timeElapsedNanos , category , prepared , sql , url ) ; }
this is an internal method called by logElapsed
28,817
protected static void doLog ( int connectionId , long elapsedNanos , Category category , String prepared , String sql , String url ) { if ( logger == null ) { initialize ( ) ; if ( logger == null ) { return ; } } final String format = P6SpyOptions . getActiveInstance ( ) . getDateformat ( ) ; final String stringNow ; if ( format == null ) { stringNow = Long . toString ( System . currentTimeMillis ( ) ) ; } else { stringNow = new SimpleDateFormat ( format ) . format ( new java . util . Date ( ) ) . trim ( ) ; } logger . logSQL ( connectionId , stringNow , TimeUnit . NANOSECONDS . toMillis ( elapsedNanos ) , category , prepared , sql , url ) ; final boolean stackTrace = P6SpyOptions . getActiveInstance ( ) . getStackTrace ( ) ; if ( stackTrace ) { final String stackTraceClass = P6SpyOptions . getActiveInstance ( ) . getStackTraceClass ( ) ; Exception e = new Exception ( ) ; if ( stackTraceClass != null ) { StringWriter sw = new StringWriter ( ) ; PrintWriter pw = new PrintWriter ( sw ) ; e . printStackTrace ( pw ) ; String stack = sw . toString ( ) ; if ( stack . indexOf ( stackTraceClass ) == - 1 ) { e = null ; } } if ( e != null ) { logger . logException ( e ) ; } } }
Writes log information provided .
28,818
private static boolean meetsThresholdRequirement ( long timeTaken ) { final P6LogLoadableOptions opts = P6LogOptions . getActiveInstance ( ) ; long executionThreshold = null != opts ? opts . getExecutionThreshold ( ) : 0 ; return executionThreshold <= 0 || TimeUnit . NANOSECONDS . toMillis ( timeTaken ) > executionThreshold ; }
on whether on not it has taken greater than x amount of time .
28,819
protected synchronized void bindDataSource ( ) throws SQLException { if ( null != realDataSource ) { return ; } final P6SpyLoadableOptions options = P6SpyOptions . getActiveInstance ( ) ; if ( rdsName == null ) { rdsName = options . getRealDataSource ( ) ; } if ( rdsName == null ) { throw new SQLException ( "P6DataSource: no value for Real Data Source Name, cannot perform jndi lookup" ) ; } Hashtable < String , String > env = null ; String factory ; if ( ( factory = options . getJNDIContextFactory ( ) ) != null ) { env = new Hashtable < String , String > ( ) ; env . put ( Context . INITIAL_CONTEXT_FACTORY , factory ) ; String url = options . getJNDIContextProviderURL ( ) ; if ( url != null ) { env . put ( Context . PROVIDER_URL , url ) ; } String custom = options . getJNDIContextCustom ( ) ; if ( custom != null ) { env . putAll ( parseDelimitedString ( custom ) ) ; } } InitialContext ctx ; try { if ( env != null ) { ctx = new InitialContext ( env ) ; } else { ctx = new InitialContext ( ) ; } realDataSource = ( CommonDataSource ) ctx . lookup ( rdsName ) ; } catch ( NamingException e ) { throw new SQLException ( "P6DataSource: naming exception during jndi lookup of Real Data Source Name of '" + rdsName + "'. " + e . getMessage ( ) , e ) ; } Map < String , String > props = parseDelimitedString ( options . getRealDataSourceProperties ( ) ) ; if ( props != null ) { setDataSourceProperties ( props ) ; } if ( realDataSource == null ) { throw new SQLException ( "P6DataSource: jndi lookup for Real Data Source Name of '" + rdsName + "' failed, cannot bind named data source." ) ; } }
Binds the JNDI DataSource to proxy .
28,820
public void generateLogMessage ( ) { if ( lastRowLogged != currRow ) { P6LogQuery . log ( Category . RESULTSET , this ) ; resultMap . clear ( ) ; lastRowLogged = currRow ; } }
Generates log message with column values accessed if the row s column values have not already been logged .
28,821
public void setExclude ( String exclude ) { optionsRepository . setSet ( String . class , EXCLUDE_LIST , exclude ) ; optionsRepository . set ( String . class , EXCLUDE , P6Util . joinNullSafe ( optionsRepository . getSet ( String . class , EXCLUDE_LIST ) , "," ) ) ; optionsRepository . setOrUnSet ( Pattern . class , INCLUDE_EXCLUDE_PATTERN , computeIncludeExcludePattern ( ) , defaults . get ( INCLUDE_EXCLUDE_PATTERN ) ) ; }
JMX exposed API
28,822
protected Response serve ( ) { invoke ( ) ; String s = ParseTime . getTimezone ( ) . getID ( ) ; JsonObject response = new JsonObject ( ) ; response . addProperty ( "tz" , s ) ; return Response . done ( response ) ; }
Reply value is the current setting
28,823
public V get ( ) { Thread cThr = Thread . currentThread ( ) ; int priority = ( cThr instanceof FJWThr ) ? ( ( FJWThr ) cThr ) . _priority : - 1 ; assert _dt . priority ( ) > priority || ( ( _dt instanceof DRemoteTask || _dt instanceof MRTask2 ) ) : "*** Attempting to block on task (" + _dt . getClass ( ) + ") with equal or lower priority. Can lead to deadlock! " + _dt . priority ( ) + " <= " + priority ; if ( _done ) return result ( ) ; try { try { ForkJoinPool . managedBlock ( this ) ; } catch ( InterruptedException e ) { } } catch ( Throwable t ) { throw new RuntimeException ( t ) ; } if ( _done ) return result ( ) ; assert isCancelled ( ) ; return null ; }
null for canceled tasks including those where the target dies .
28,824
protected int response ( AutoBuffer ab ) { assert _tasknum == ab . getTask ( ) ; if ( _done ) return ab . close ( ) ; int flag = ab . getFlag ( ) ; if ( flag == SERVER_TCP_SEND ) return ab . close ( ) ; assert flag == SERVER_UDP_SEND ; synchronized ( this ) { if ( _done ) return ab . close ( ) ; UDPTimeOutThread . PENDING . remove ( this ) ; _dt . read ( ab ) ; _size_rez = ab . size ( ) ; ab . close ( ) ; _dt . onAck ( ) ; _done = true ; ab . _h2o . taskRemove ( _tasknum ) ; notifyAll ( ) ; } doAllCompletions ( ) ; return 0 ; }
Install it as The Answer packet and wake up anybody waiting on an answer .
28,825
private void accum_all ( Chunk chks [ ] , Chunk wrks , int nnids [ ] ) { final DHistogram hcs [ ] [ ] = _hcs ; int nh [ ] = new int [ hcs . length + 1 ] ; for ( int i : nnids ) if ( i >= 0 ) nh [ i + 1 ] ++ ; for ( int i = 0 ; i < hcs . length ; i ++ ) nh [ i + 1 ] += nh [ i ] ; int rows [ ] = new int [ nnids . length ] ; for ( int row = 0 ; row < nnids . length ; row ++ ) if ( nnids [ row ] >= 0 ) rows [ nh [ nnids [ row ] ] ++ ] = row ; accum_all2 ( chks , wrks , nh , rows ) ; }
histograms once - per - NID but requires pre - sorting the rows by NID .
28,826
private void accum_all2 ( Chunk chks [ ] , Chunk wrks , int nh [ ] , int [ ] rows ) { final DHistogram hcs [ ] [ ] = _hcs ; int bins [ ] = new int [ nbins ] ; double sums [ ] = new double [ nbins ] ; double ssqs [ ] = new double [ nbins ] ; for ( int c = 0 ; c < _ncols ; c ++ ) { Chunk chk = chks [ c ] ; for ( int n = 0 ; n < hcs . length ; n ++ ) { final DRealHistogram rh = ( ( DRealHistogram ) hcs [ n ] [ c ] ) ; if ( rh == null ) continue ; final int lo = n == 0 ? 0 : nh [ n - 1 ] ; final int hi = nh [ n ] ; float min = rh . _min2 ; float max = rh . _maxIn ; if ( rh . _bins . length >= bins . length ) { bins = new int [ rh . _bins . length ] ; sums = new double [ rh . _bins . length ] ; ssqs = new double [ rh . _bins . length ] ; } for ( int xrow = lo ; xrow < hi ; xrow ++ ) { int row = rows [ xrow ] ; float col_data = ( float ) chk . at0 ( row ) ; if ( col_data < min ) min = col_data ; if ( col_data > max ) max = col_data ; int b = rh . bin ( col_data ) ; bins [ b ] ++ ; double resp = wrks . at0 ( row ) ; sums [ b ] += resp ; ssqs [ b ] += resp * resp ; } rh . setMin ( min ) ; rh . setMax ( max ) ; for ( int b = 0 ; b < rh . _bins . length ; b ++ ) { if ( bins [ b ] != 0 ) { Utils . AtomicIntArray . add ( rh . _bins , b , bins [ b ] ) ; bins [ b ] = 0 ; } if ( ssqs [ b ] != 0 ) { rh . incr1 ( b , sums [ b ] , ssqs [ b ] ) ; sums [ b ] = ssqs [ b ] = 0 ; } } } } }
For all columns for all NIDs for all ROWS ...
28,827
public String setAndServe ( String offset ) { _offset . reset ( ) ; _offset . check ( null , offset ) ; _view . reset ( ) ; _view . check ( null , "20" ) ; _filter . reset ( ) ; return new Gson ( ) . toJson ( serve ( ) . _response ) ; }
Used by tests
28,828
public int addArgument ( String str , String next ) { int i = commandLineArgs . length ; int consumed = 1 ; commandLineArgs = Arrays . copyOf ( commandLineArgs , i + 1 ) ; if ( str . startsWith ( "-" ) ) { int startOffset = ( str . startsWith ( "--" ) ) ? 2 : 1 ; String arg = "" ; String opt ; boolean flag = false ; int eqPos = str . indexOf ( "=" ) ; if ( eqPos > 0 || ( next != null && ! next . startsWith ( "-" ) ) ) { if ( eqPos > 0 ) { opt = str . substring ( startOffset , eqPos ) ; arg = str . substring ( eqPos + 1 ) ; } else { opt = str . substring ( startOffset ) ; arg = next ; consumed = 2 ; } } else { flag = true ; opt = str . substring ( startOffset ) ; } commandLineArgs [ i ] = new Entry ( opt , arg , flag , i ) ; return consumed ; } else { commandLineArgs [ i ] = new Entry ( "" , str , true , i ) ; return consumed ; } }
Add a new argument to this command line . The argument will be parsed and add at the end of the list . Bindings have the following format - name = value if value is empty the binding is treated as an option . Options have the form - name . All other strings are treated as values .
28,829
private int extract ( Arg arg , Field [ ] fields ) { int count = 0 ; for ( Field field : fields ) { String name = field . getName ( ) ; Class cl = field . getType ( ) ; String opt = getValue ( name ) ; try { if ( cl . isPrimitive ( ) ) { if ( cl == Boolean . TYPE ) { boolean curval = field . getBoolean ( arg ) ; boolean xval = curval ; if ( opt != null ) xval = ! curval ; if ( "1" . equals ( opt ) || "true" . equals ( opt ) ) xval = true ; if ( "0" . equals ( opt ) || "false" . equals ( opt ) ) xval = false ; if ( opt != null ) field . setBoolean ( arg , xval ) ; } else if ( opt == null || opt . length ( ) == 0 ) continue ; else if ( cl == Integer . TYPE ) field . setInt ( arg , Integer . parseInt ( opt ) ) ; else if ( cl == Float . TYPE ) field . setFloat ( arg , Float . parseFloat ( opt ) ) ; else if ( cl == Double . TYPE ) field . setDouble ( arg , Double . parseDouble ( opt ) ) ; else if ( cl == Long . TYPE ) field . setLong ( arg , Long . parseLong ( opt ) ) ; else continue ; count ++ ; } else if ( cl == String . class ) { if ( opt != null ) { field . set ( arg , opt ) ; count ++ ; } } } catch ( Exception e ) { Log . err ( "Argument failed with " , e ) ; } } Arrays . sort ( commandLineArgs ) ; for ( int i = 0 ; i < commandLineArgs . length ; i ++ ) commandLineArgs [ i ] . position = i ; return count ; }
Extracts bindings and options ; and sets appropriate fields in the CommandLineArgument object .
28,830
private void parse ( String [ ] s ) { commandLineArgs = new Entry [ 0 ] ; for ( int i = 0 ; i < s . length ; ) { String next = ( i + 1 < s . length ) ? s [ i + 1 ] : null ; i += addArgument ( s [ i ] , next ) ; } }
Parse the command line arguments and extracts options . The current implementation allows the same command line instance to parse several argument lists the results will be merged .
28,831
public float progress ( ) { Freezable f = UKV . get ( destination_key ) ; if ( f instanceof Progress ) return ( ( Progress ) f ) . progress ( ) ; return 0 ; }
Return progress of this job .
28,832
public static Job [ ] all ( ) { List list = UKV . get ( LIST ) ; Job [ ] jobs = new Job [ list == null ? 0 : list . _jobs . length ] ; int j = 0 ; for ( int i = 0 ; i < jobs . length ; i ++ ) { Job job = UKV . get ( list . _jobs [ i ] ) ; if ( job != null ) jobs [ j ++ ] = job ; } if ( j < jobs . length ) jobs = Arrays . copyOf ( jobs , j ) ; return jobs ; }
Returns a list of all jobs in a system .
28,833
public static boolean isRunning ( Key job_key ) { Job j = UKV . get ( job_key ) ; assert j != null : "Job should be always in DKV!" ; return j . isRunning ( ) ; }
Check if given job is running .
28,834
public void remove ( ) { end_time = System . currentTimeMillis ( ) ; if ( state == JobState . RUNNING ) state = JobState . DONE ; replaceByJobHandle ( ) ; }
Marks job as finished and records job end time .
28,835
public static Job findJobByDest ( final Key destKey ) { Job job = null ; for ( Job current : Job . all ( ) ) { if ( current . dest ( ) . equals ( destKey ) ) { job = current ; break ; } } return job ; }
Finds a job with given dest key or returns null
28,836
public Job fork ( ) { init ( ) ; H2OCountedCompleter task = new H2OCountedCompleter ( ) { public void compute2 ( ) { try { try { Job . this . exec ( ) ; Job . this . remove ( ) ; } catch ( Throwable t ) { if ( ! ( t instanceof ExpectedExceptionForDebug ) ) Log . err ( t ) ; Job . this . cancel ( t ) ; } } finally { tryComplete ( ) ; } } } ; start ( task ) ; H2O . submitTask ( task ) ; return this ; }
Forks computation of this job .
28,837
public static void waitUntilJobEnded ( Key jobkey , int pollingIntervalMillis ) { while ( true ) { if ( Job . isEnded ( jobkey ) ) { return ; } try { Thread . sleep ( pollingIntervalMillis ) ; } catch ( Exception ignore ) { } } }
Block synchronously waiting for a job to end success or not .
28,838
public static < T extends FrameJob > T hygiene ( T job ) { job . source = null ; return job ; }
Hygienic method to prevent accidental capture of non desired values .
28,839
protected Split ltSplit ( int col , Data d , int [ ] dist , int distWeight , Random rand ) { final int [ ] distL = new int [ d . classes ( ) ] , distR = dist . clone ( ) ; final double upperBoundReduction = upperBoundReduction ( d . classes ( ) ) ; double maxReduction = - 1 ; int bestSplit = - 1 ; int totL = 0 , totR = 0 ; int classL = 0 , classR = 0 ; for ( int e : distR ) { totR += e ; if ( e != 0 ) classR ++ ; } for ( int i = 0 ; i < _columnDists [ col ] . length - 1 ; ++ i ) { int [ ] cdis = _columnDists [ col ] [ i ] ; for ( int j = 0 ; j < distL . length ; ++ j ) { int v = cdis [ j ] ; if ( v == 0 ) continue ; totL += v ; totR -= v ; if ( distL [ j ] == 0 ) classL ++ ; distL [ j ] += v ; distR [ j ] -= v ; if ( distR [ j ] == 0 ) classR -- ; } if ( totL == 0 ) continue ; if ( totR == 0 ) continue ; double eL = 0 , eR = 0 ; if ( classL > 1 ) for ( int e : distL ) eL += gain ( e , totL ) ; if ( classR > 1 ) for ( int e : distR ) eR += gain ( e , totR ) ; double eReduction = upperBoundReduction - ( ( eL * totL + eR * totR ) / ( totL + totR ) ) ; if ( eReduction == maxReduction ) { } else if ( eReduction > maxReduction ) { bestSplit = i ; maxReduction = eReduction ; } } return bestSplit == - 1 ? Split . impossible ( Utils . maxIndex ( dist , _random ) ) : Split . split ( col , bestSplit , maxReduction ) ; }
LessThenEqual splits s
28,840
public boolean inetAddressOnNetwork ( InetAddress ia ) { int i = ( _o1 << 24 ) | ( _o2 << 16 ) | ( _o3 << 8 ) | ( _o4 << 0 ) ; byte [ ] barr = ia . getAddress ( ) ; if ( barr . length != 4 ) { return false ; } int j = ( ( ( int ) barr [ 0 ] & 0xff ) << 24 ) | ( ( ( int ) barr [ 1 ] & 0xff ) << 16 ) | ( ( ( int ) barr [ 2 ] & 0xff ) << 8 ) | ( ( ( int ) barr [ 3 ] & 0xff ) << 0 ) ; long mask1 = ( ( long ) 1 << ( 32 - _bits ) ) ; long mask2 = mask1 - 1 ; long mask3 = ~ mask2 ; int mask4 = ( int ) ( mask3 & 0xffffffff ) ; if ( ( i & mask4 ) == ( j & mask4 ) ) { return true ; } return false ; }
Test if an internet address lives on this user specified network .
28,841
public Lockable write_lock ( Key job_key ) { Log . debug ( Log . Tag . Sys . LOCKS , "write-lock " + _key + " by job " + job_key ) ; return ( ( PriorWriteLock ) new PriorWriteLock ( job_key ) . invoke ( _key ) ) . _old ; }
Write - lock this returns OLD guy
28,842
public T delete_and_lock ( Key job_key ) { Lockable old = write_lock ( job_key ) ; if ( old != null ) { Log . debug ( Log . Tag . Sys . LOCKS , "lock-then-clear " + _key + " by job " + job_key ) ; old . delete_impl ( new Futures ( ) ) . blockForPending ( ) ; } return ( T ) this ; }
Write - lock this delete any old thing returns NEW guy
28,843
public static void delete ( Key k , Key job_key ) { if ( k == null ) return ; Value val = DKV . get ( k ) ; if ( val == null ) return ; if ( ! val . isLockable ( ) ) UKV . remove ( k ) ; else ( ( Lockable ) val . get ( ) ) . delete ( job_key , 0.0f ) ; }
Write - lock & delete k . Will fail if k is locked by anybody other than job_key
28,844
public void delete ( Key job_key , float dummy ) { if ( _key != null ) { Log . debug ( Log . Tag . Sys . LOCKS , "lock-then-delete " + _key + " by job " + job_key ) ; new PriorWriteLock ( job_key ) . invoke ( _key ) ; } Futures fs = new Futures ( ) ; delete_impl ( fs ) ; if ( _key != null ) DKV . remove ( _key , fs ) ; fs . blockForPending ( ) ; }
Will fail if locked by anybody other than job_key
28,845
public void update ( Key job_key ) { Log . debug ( Log . Tag . Sys . LOCKS , "update write-locked " + _key + " by job " + job_key ) ; new Update ( job_key ) . invoke ( _key ) ; }
Atomically set a new version of self
28,846
public void unlock ( Key job_key ) { if ( _key != null ) { Log . debug ( Log . Tag . Sys . LOCKS , "unlock " + _key + " by job " + job_key ) ; new Unlock ( job_key ) . invoke ( _key ) ; } }
Atomically set a new version of self & unlock .
28,847
static public DHistogram [ ] initialHist ( Frame fr , int ncols , int nbins , DHistogram hs [ ] , int min_rows , boolean doGrpSplit , boolean isBinom ) { Vec vecs [ ] = fr . vecs ( ) ; for ( int c = 0 ; c < ncols ; c ++ ) { Vec v = vecs [ c ] ; final float minIn = ( float ) Math . max ( v . min ( ) , - Float . MAX_VALUE ) ; final float maxIn = ( float ) Math . min ( v . max ( ) , Float . MAX_VALUE ) ; final float maxEx = find_maxEx ( maxIn , v . isInt ( ) ? 1 : 0 ) ; final long vlen = v . length ( ) ; hs [ c ] = v . naCnt ( ) == vlen || v . min ( ) == v . max ( ) ? null : make ( fr . _names [ c ] , nbins , ( byte ) ( v . isEnum ( ) ? 2 : ( v . isInt ( ) ? 1 : 0 ) ) , minIn , maxEx , vlen , min_rows , doGrpSplit , isBinom ) ; } return hs ; }
The initial histogram bins are setup from the Vec rollups .
28,848
public boolean isConstantResponse ( ) { double m = Double . NaN ; for ( int b = 0 ; b < _bins . length ; b ++ ) { if ( _bins [ b ] == 0 ) continue ; if ( var ( b ) > 1e-14 ) return false ; double mean = mean ( b ) ; if ( mean != m ) if ( Double . isNaN ( m ) ) m = mean ; else if ( Math . abs ( m - mean ) > 1e-6 ) return false ; } return true ; }
Check for a constant response variable
28,849
static void lockCloud ( ) { if ( _cloudLocked ) return ; synchronized ( Paxos . class ) { while ( ! _commonKnowledge ) try { Paxos . class . wait ( ) ; } catch ( InterruptedException ie ) { } _cloudLocked = true ; } }
change cloud shape - the distributed writes will be in the wrong place .
28,850
protected Vec [ ] [ ] makeTemplates ( ) { Vec anyVec = dataset . anyVec ( ) ; final long [ ] [ ] espcPerSplit = computeEspcPerSplit ( anyVec . _espc , anyVec . length ( ) ) ; final int num = dataset . numCols ( ) ; final int nsplits = espcPerSplit . length ; final String [ ] [ ] domains = dataset . domains ( ) ; final boolean [ ] uuids = dataset . uuids ( ) ; final byte [ ] times = dataset . times ( ) ; Vec [ ] [ ] t = new Vec [ nsplits ] [ ] ; for ( int i = 0 ; i < nsplits ; i ++ ) { t [ i ] = new Vec ( Vec . newKey ( ) , espcPerSplit [ i ] ) . makeZeros ( num , domains , uuids , times ) ; } return t ; }
Create a templates for vector composing output frame
28,851
public static PSetupGuess guessSetup ( byte [ ] bits ) { InputStream is = new ByteArrayInputStream ( bits ) ; XlsParser p = new XlsParser ( ) ; CustomInspectDataOut dout = new CustomInspectDataOut ( ) ; try { p . streamParse ( is , dout ) ; } catch ( Exception e ) { } return new PSetupGuess ( new ParserSetup ( ParserType . XLS , CsvParser . AUTO_SEP , dout . _ncols , dout . _header , dout . _header ? dout . data ( ) [ 0 ] : null , false ) , dout . _nlines , dout . _invalidLines , dout . data ( ) , dout . _nlines > dout . _invalidLines , null ) ; }
Try to parse the bits as svm light format return SVMParser instance if the input is in svm light format null otherwise .
28,852
public static Frame mmul ( Frame x , Frame y ) { MatrixMulJob mmj = new MatrixMulJob ( Key . make ( "mmul" + ++ cnt ) , Key . make ( "mmulProgress" ) , x , y ) ; mmj . fork ( ) . _fjtask . join ( ) ; DKV . remove ( mmj . _dstKey ) ; mmj . _z . reloadVecs ( ) ; return mmj . _z ; }
to be invoked from R expression
28,853
public T invokeOnAllNodes ( ) { H2O cloud = H2O . CLOUD ; Key [ ] args = new Key [ cloud . size ( ) ] ; String skey = "RunOnAll" + Key . rand ( ) ; for ( int i = 0 ; i < args . length ; ++ i ) args [ i ] = Key . make ( skey , ( byte ) 0 , Key . DFJ_INTERNAL_USER , cloud . _memary [ i ] ) ; invoke ( args ) ; for ( Key arg : args ) DKV . remove ( arg ) ; return self ( ) ; }
Invokes the task on all nodes
28,854
public boolean block ( ) throws InterruptedException { while ( ! isDone ( ) ) { try { get ( ) ; } catch ( ExecutionException eex ) { Throwable tex = eex . getCause ( ) ; if ( tex instanceof Error ) throw ( Error ) tex ; if ( tex instanceof DistributedException ) throw ( DistributedException ) tex ; if ( tex instanceof JobCancelledException ) throw ( JobCancelledException ) tex ; throw new RuntimeException ( tex ) ; } catch ( CancellationException cex ) { Log . errRTExcept ( cex ) ; } } return true ; }
deadlock is otherwise all threads would block on waits .
28,855
private final void dcompute ( ) { H2O cloud = H2O . CLOUD ; int lo = cloud . _memary . length , hi = - 1 ; for ( Key k : _keys ) { int i = k . home ( cloud ) ; if ( i < lo ) lo = i ; if ( i > hi ) hi = i ; } final ArrayList < Key > locals = new ArrayList < Key > ( ) ; final ArrayList < Key > lokeys = new ArrayList < Key > ( ) ; final ArrayList < Key > hikeys = new ArrayList < Key > ( ) ; int self_idx = cloud . nidx ( H2O . SELF ) ; int mid = ( lo + hi ) >>> 1 ; for ( Key k : _keys ) { int idx = k . home ( cloud ) ; if ( idx == self_idx ) locals . add ( k ) ; else if ( idx < mid ) lokeys . add ( k ) ; else hikeys . add ( k ) ; } _lo = remote_compute ( lokeys ) ; _hi = remote_compute ( hikeys ) ; if ( locals . size ( ) != 0 ) { _local = clone ( ) ; _local . _is_local = true ; _local . _keys = locals . toArray ( new Key [ locals . size ( ) ] ) ; _local . init ( ) ; H2O . submitTask ( _local ) ; } else { tryComplete ( ) ; } }
Override to specify local work
28,856
private final void donCompletion ( CountedCompleter caller ) { assert _lo == null || _lo . isDone ( ) ; assert _hi == null || _hi . isDone ( ) ; if ( _lo != null ) reduce2 ( _lo . get ( ) ) ; if ( _hi != null ) reduce2 ( _hi . get ( ) ) ; if ( _local != null ) reduce2 ( _local ) ; if ( _local != null && _local . _fs != null ) _local . _fs . blockForPending ( ) ; _keys = null ; if ( _top_level ) postGlobal ( ) ; }
Override for local completion
28,857
protected JsonObject argumentsToJson ( ) { JsonObject result = new JsonObject ( ) ; for ( Argument a : _arguments ) { if ( a . specified ( ) ) result . addProperty ( a . _name , a . originalValue ( ) ) ; } return result ; }
Returns a json object containing all arguments specified to the page .
28,858
protected void init ( ) { super . init ( ) ; assert 0 <= ntrees && ntrees < 1000000 ; if ( source . numRows ( ) - response . naCnt ( ) <= 0 ) throw new IllegalArgumentException ( "Dataset contains too many NAs!" ) ; if ( ! classification && ( ! ( response . isEnum ( ) || response . isInt ( ) ) ) ) throw new IllegalArgumentException ( "Classification cannot be performed on a float column!" ) ; if ( classification ) { if ( 0.0f > sample_rate || sample_rate > 1.0f ) throw new IllegalArgumentException ( "Sampling rate must be in [0,1] but found " + sample_rate ) ; } if ( regression ) throw new IllegalArgumentException ( "SpeeDRF does not currently support regression." ) ; }
Put here all precondition verification
28,859
public static void build ( final Key jobKey , final Key modelKey , final DRFParams drfParams , final Data localData , int ntrees , int numSplitFeatures , int [ ] rowsPerChunks ) { Timer t_alltrees = new Timer ( ) ; Tree [ ] trees = new Tree [ ntrees ] ; Log . info ( Log . Tag . Sys . RANDF , "Building " + ntrees + " trees" ) ; Log . info ( Log . Tag . Sys . RANDF , "Number of split features: " + numSplitFeatures ) ; Log . info ( Log . Tag . Sys . RANDF , "Starting RF computation with " + localData . rows ( ) + " rows " ) ; Random rnd = Utils . getRNG ( localData . seed ( ) + ROOT_SEED_ADD ) ; Sampling sampler = createSampler ( drfParams , rowsPerChunks ) ; byte producerId = ( byte ) H2O . SELF . index ( ) ; for ( int i = 0 ; i < ntrees ; ++ i ) { long treeSeed = rnd . nextLong ( ) + TREE_SEED_INIT ; trees [ i ] = new Tree ( jobKey , modelKey , localData , producerId , drfParams . max_depth , drfParams . stat_type , numSplitFeatures , treeSeed , i , drfParams . _exclusiveSplitLimit , sampler , drfParams . _verbose , drfParams . regression , ! drfParams . _useNonLocalData , ( ( SpeeDRFModel ) UKV . get ( modelKey ) ) . score_pojo ) ; } Log . info ( "Invoking the tree build tasks on all nodes." ) ; DRemoteTask . invokeAll ( trees ) ; Log . info ( Log . Tag . Sys . RANDF , "All trees (" + ntrees + ") done in " + t_alltrees ) ; }
Build random forest for data stored on this node .
28,860
static void listJobs ( ) throws Exception { HttpClient client = new HttpClient ( ) ; GetMethod get = new GetMethod ( URL + "/Jobs.json" ) ; int status = client . executeMethod ( get ) ; if ( status != 200 ) throw new Exception ( get . getStatusText ( ) ) ; Gson gson = new Gson ( ) ; JobsRes res = gson . fromJson ( new InputStreamReader ( get . getResponseBodyAsStream ( ) ) , JobsRes . class ) ; System . out . println ( "Running jobs:" ) ; for ( Job job : res . jobs ) System . out . println ( job . description + " " + job . destination_key ) ; get . releaseConnection ( ) ; }
Lists jobs currently running .
28,861
static void exportModel ( ) throws Exception { HttpClient client = new HttpClient ( ) ; GetMethod get = new GetMethod ( URL + "/2/ExportModel.json?model=MyInitialNeuralNet" ) ; int status = client . executeMethod ( get ) ; if ( status != 200 ) throw new Exception ( get . getStatusText ( ) ) ; JsonObject response = ( JsonObject ) new JsonParser ( ) . parse ( new InputStreamReader ( get . getResponseBodyAsStream ( ) ) ) ; JsonElement model = response . get ( "model" ) ; JsonWriter writer = new JsonWriter ( new FileWriter ( JSON_FILE ) ) ; writer . setLenient ( true ) ; writer . setIndent ( " " ) ; Streams . write ( model , writer ) ; writer . close ( ) ; get . releaseConnection ( ) ; }
Exports a model to a JSON file .
28,862
public static void importModel ( ) throws Exception { HttpClient client = new HttpClient ( ) ; PostMethod post = new PostMethod ( URL + "/Upload.json?key=" + JSON_FILE . getName ( ) ) ; Part [ ] parts = { new FilePart ( JSON_FILE . getName ( ) , JSON_FILE ) } ; post . setRequestEntity ( new MultipartRequestEntity ( parts , post . getParams ( ) ) ) ; if ( 200 != client . executeMethod ( post ) ) throw new RuntimeException ( "Request failed: " + post . getStatusLine ( ) ) ; post . releaseConnection ( ) ; GetMethod get = new GetMethod ( URL + "/2/ImportModel.json?" + "destination_key=MyImportedNeuralNet&" + "type=NeuralNetModel&" + "json=" + JSON_FILE . getName ( ) ) ; if ( 200 != client . executeMethod ( get ) ) throw new RuntimeException ( "Request failed: " + get . getStatusLine ( ) ) ; get . releaseConnection ( ) ; }
Imports a model from a JSON file .
28,863
private void checkAndLimitFeatureUsedPerSplit ( Frame fr ) { int validCols = fr . numCols ( ) - 1 ; if ( validCols < _rfParams . num_split_features ) { Log . info ( Log . Tag . Sys . RANDF , "Limiting features from " + _rfParams . num_split_features + " to " + validCols + " because there are no more valid columns in the dataset" ) ; _rfParams . num_split_features = validCols ; } }
Check that we have proper number of valid columns vs . features selected if not cap
28,864
private long getChunkId ( final Frame fr ) { Key [ ] keys = new Key [ fr . anyVec ( ) . nChunks ( ) ] ; for ( int i = 0 ; i < fr . anyVec ( ) . nChunks ( ) ; ++ i ) { keys [ i ] = fr . anyVec ( ) . chunkKey ( i ) ; } for ( int i = 0 ; i < keys . length ; ++ i ) { if ( keys [ i ] . home ( ) ) return i ; } return - 99999 ; }
Return chunk index of the first chunk on this node . Used to identify the trees built here .
28,865
public static String JSON2HTML ( String name ) { if ( name . length ( ) < 1 ) return name ; if ( name == "row" ) { return name . substring ( 0 , 1 ) . toUpperCase ( ) + name . replace ( "_" , " " ) . substring ( 1 ) ; } return name . substring ( 0 , 1 ) + name . replace ( "_" , " " ) . substring ( 1 ) ; }
Returns the name of the JSON property pretty printed . That is spaces instead of underscores and capital first letter .
28,866
public static PSetupGuess guessSetup ( byte [ ] bytes ) { int i = bytes . length - 1 ; while ( i > 0 && bytes [ i ] != '\n' ) -- i ; assert i >= 0 ; InputStream is = new ByteArrayInputStream ( Arrays . copyOf ( bytes , i ) ) ; SVMLightParser p = new SVMLightParser ( new ParserSetup ( ParserType . SVMLight , CsvParser . AUTO_SEP , false ) ) ; InspectDataOut dout = new InspectDataOut ( ) ; try { p . streamParse ( is , dout ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } return new PSetupGuess ( new ParserSetup ( ParserType . SVMLight , CsvParser . AUTO_SEP , dout . _ncols , false , null , false ) , dout . _nlines , dout . _invalidLines , dout . data ( ) , dout . _ncols > 0 && dout . _nlines > 0 && dout . _nlines > dout . _invalidLines , dout . errors ( ) ) ; }
Try to parse the bytes as svm light format return SVMParser instance if the input is in svm light format null otherwise .
28,867
public static Unsafe getUnsafe ( ) { if ( UtilUnsafe . class . getClassLoader ( ) == null ) return Unsafe . getUnsafe ( ) ; try { final Field fld = Unsafe . class . getDeclaredField ( "theUnsafe" ) ; fld . setAccessible ( true ) ; return ( Unsafe ) fld . get ( UtilUnsafe . class ) ; } catch ( Exception e ) { throw new RuntimeException ( "Could not obtain access to sun.misc.Unsafe" , e ) ; } }
Fetch the Unsafe . Use With Caution .
28,868
protected void bprop ( float target ) { assert ( target != missing_real_value ) ; if ( params . loss != Loss . MeanSquare ) throw new UnsupportedOperationException ( "Regression is only implemented for MeanSquare error." ) ; final int row = 0 ; final float g = target - _a . get ( row ) ; float m = momentum ( ) ; float r = _minfo . adaDelta ( ) ? 0 : rate ( _minfo . get_processed_total ( ) ) * ( 1f - m ) ; bprop ( row , g , r , m ) ; }
Backpropagation for regression
28,869
public double score_interpreter ( final HashMap < String , Comparable > row ) { double score = _initialScore ; for ( int i = 0 ; i < _rules . length ; i ++ ) score += _rules [ i ] . score ( row . get ( _colNames [ i ] ) ) ; return score ; }
Use the rule interpreter
28,870
public static String getName ( String pname , DataTypes type , StringBuilder sb ) { String jname = xml2jname ( pname ) ; return jname ; }
to emit it at runtime .
28,871
private boolean set_cache ( long cache ) { while ( true ) { long old = _cache ; if ( ! H2O . larger ( cloud ( cache ) , cloud ( old ) ) ) return false ; assert cloud ( cache ) != cloud ( old ) || cache == old ; if ( old == cache ) return true ; if ( _cacheUpdater . compareAndSet ( this , old , cache ) ) return true ; } }
Update the cache but only to strictly newer Clouds
28,872
public long cloud_info ( H2O cloud ) { long x = _cache ; if ( cloud ( x ) == cloud . _idx ) return x ; char home = ( char ) D ( 0 ) ; int desired = desired ( x ) ; int replica = - 1 ; for ( int i = 0 ; i < desired ; i ++ ) { int idx = D ( i ) ; if ( idx >= 0 && cloud . _memary [ idx ] == H2O . SELF ) { replica = i ; break ; } } long cache = build_cache ( cloud . _idx , home , replica , desired ) ; set_cache ( cache ) ; return cache ; }
Return the info word for this Cloud . Use the cache if possible
28,873
static public Key make ( byte [ ] kb , byte rf ) { if ( rf == - 1 ) throw new IllegalArgumentException ( ) ; Key key = new Key ( kb ) ; Key key2 = H2O . getk ( key ) ; if ( key2 != null ) return key2 ; H2O cloud = H2O . CLOUD ; key . _cache = build_cache ( cloud . _idx - 1 , 0 , 0 , rf ) ; key . cloud_info ( cloud ) ; return key ; }
Make new Keys . Optimistically attempt interning but no guarantee .
28,874
static public String rand ( ) { UUID uid = UUID . randomUUID ( ) ; long l1 = uid . getLeastSignificantBits ( ) ; long l2 = uid . getMostSignificantBits ( ) ; return "_" + Long . toHexString ( l1 ) + Long . toHexString ( l2 ) ; }
A random string useful as a Key name or partial Key suffix .
28,875
static public Key make ( String s , byte rf , byte systemType , H2ONode ... replicas ) { return make ( decodeKeyName ( s ) , rf , systemType , replicas ) ; }
If the addresses are not specified returns a key with no home information .
28,876
static public Key make ( byte [ ] kb , byte rf , byte systemType , H2ONode ... replicas ) { assert 0 <= replicas . length && replicas . length <= 3 ; assert systemType < 32 ; AutoBuffer ab = new AutoBuffer ( ) ; ab . put1 ( systemType ) . put1 ( replicas . length ) ; for ( H2ONode h2o : replicas ) h2o . write ( ab ) ; ab . put4 ( - 1 ) ; ab . putA1 ( kb , kb . length ) ; return make ( Arrays . copyOf ( ab . buf ( ) , ab . position ( ) ) , rf ) ; }
Make a Key which is homed to specific nodes .
28,877
final public static Key makeSystem ( String s ) { byte [ ] kb = decodeKeyName ( s ) ; byte [ ] kb2 = new byte [ kb . length + 1 ] ; System . arraycopy ( kb , 0 , kb2 , 1 , kb . length ) ; kb2 [ 0 ] = Key . BUILT_IN_KEY ; return Key . make ( kb2 ) ; }
Hide a user key by turning it into a system key of type HIDDEN_USER_KEY
28,878
protected void decorateActiveStep ( final TutorStep step , StringBuilder sb ) { sb . append ( "<h4>" ) . append ( step . summary ( ) ) . append ( "</h4>" ) ; sb . append ( step . content ( ) ) ; }
Shows the active workflow step
28,879
public void reduce ( JStackCollectorTask that ) { if ( _result == null ) _result = that . _result ; else for ( int i = 0 ; i < _result . length ; ++ i ) if ( _result [ i ] == null ) _result [ i ] = that . _result [ i ] ; }
for each node in the cloud it contains all threads stack traces
28,880
public float [ ] predict ( Map < String , Double > row , double data [ ] , float preds [ ] ) { return predict ( map ( row , data ) , preds ) ; }
Does the mapping lookup for every row no allocation
28,881
private void emitLogHeader ( Context context , String mapredTaskId ) throws IOException , InterruptedException { Configuration conf = context . getConfiguration ( ) ; Text textId = new Text ( mapredTaskId ) ; for ( Map . Entry < String , String > entry : conf ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( entry . getKey ( ) ) ; sb . append ( "=" ) ; sb . append ( entry . getValue ( ) ) ; context . write ( textId , new Text ( sb . toString ( ) ) ) ; } context . write ( textId , new Text ( "----- Properties -----" ) ) ; String [ ] plist = { "mapred.local.dir" , "mapred.child.java.opts" , } ; for ( String k : plist ) { String v = conf . get ( k ) ; if ( v == null ) { v = "(null)" ; } context . write ( textId , new Text ( k + " " + v ) ) ; } String userDir = System . getProperty ( "user.dir" ) ; context . write ( textId , new Text ( "user.dir " + userDir ) ) ; try { java . net . InetAddress localMachine = java . net . InetAddress . getLocalHost ( ) ; context . write ( textId , new Text ( "hostname " + localMachine . getHostName ( ) ) ) ; } catch ( java . net . UnknownHostException uhe ) { } }
Emit a bunch of logging output at the beginning of the map task .
28,882
protected Response serve ( ) { if ( src_key == null ) return RequestServer . _http404 . serve ( ) ; Vec v = src_key . anyVec ( ) ; if ( v . isEnum ( ) ) { map = Arrays . asList ( v . domain ( ) ) . indexOf ( str ) ; } else if ( v . masterVec ( ) != null && v . masterVec ( ) . isEnum ( ) ) { map = Arrays . asList ( v . masterVec ( ) . domain ( ) ) . indexOf ( str ) ; } else { map = - 1 ; } return Response . done ( this ) ; }
Just validate the frame and fill in the summary bits
28,883
static public Value DputIfMatch ( Key key , Value val , Value old , Futures fs ) { return DputIfMatch ( key , val , old , fs , false ) ; }
to consume .
28,884
static public void write_barrier ( ) { for ( H2ONode h2o : H2O . CLOUD . _memary ) for ( RPC rpc : h2o . tasks ( ) ) if ( rpc . _dt instanceof TaskPutKey || rpc . _dt instanceof Atomic ) rpc . get ( ) ; }
Used to order successive writes .
28,885
static public Value get ( Key key , int len , int priority ) { while ( true ) { H2O cloud = H2O . CLOUD ; Value val = H2O . get ( key ) ; if ( val != null ) { if ( len > val . _max ) len = val . _max ; if ( len == 0 || val . rawMem ( ) != null || val . rawPOJO ( ) != null || val . isPersisted ( ) ) return val ; assert ! key . home ( ) ; } H2ONode home = cloud . _memary [ key . home ( cloud ) ] ; if ( home == H2O . SELF ) return null ; for ( RPC < ? > rpc : home . tasks ( ) ) if ( rpc . _dt instanceof TaskPutKey ) { assert rpc . _target == home ; TaskPutKey tpk = ( TaskPutKey ) rpc . _dt ; Key k = tpk . _key ; if ( k != null && key . equals ( k ) ) return tpk . _xval ; } return TaskGetKey . get ( home , key , priority ) ; } }
User - Weak - Get a Key from the distributed cloud .
28,886
private void zipDir ( String dir2zip , ZipOutputStream zos ) throws IOException { try { File zipDir = new File ( dir2zip ) ; String [ ] dirList = zipDir . list ( ) ; byte [ ] readBuffer = new byte [ 4096 ] ; int bytesIn = 0 ; for ( int i = 0 ; i < dirList . length ; i ++ ) { File f = new File ( zipDir , dirList [ i ] ) ; if ( f . isDirectory ( ) ) { String filePath = f . getPath ( ) ; zipDir ( filePath , zos ) ; continue ; } FileInputStream fis = new FileInputStream ( f ) ; ZipEntry anEntry = new ZipEntry ( f . getPath ( ) ) ; anEntry . setTime ( f . lastModified ( ) ) ; zos . putNextEntry ( anEntry ) ; boolean stopEarlyBecauseTooMuchData = false ; while ( ( bytesIn = fis . read ( readBuffer ) ) != - 1 ) { zos . write ( readBuffer , 0 , bytesIn ) ; if ( baos . size ( ) > MAX_SIZE ) { stopEarlyBecauseTooMuchData = true ; break ; } } fis . close ( ) ; zos . closeEntry ( ) ; if ( stopEarlyBecauseTooMuchData ) { Log . warn ( "LogCollectorTask stopEarlyBecauseTooMuchData" ) ; break ; } } } catch ( Exception e ) { } }
here is the code for the method
28,887
public float [ ] scoreKey ( Object modelKey , String [ ] colNames , String domains [ ] [ ] , double [ ] row ) { Key key = ( Key ) modelKey ; String sk = key . toString ( ) ; Value v = DKV . get ( key ) ; if ( v == null ) throw new IllegalArgumentException ( "Key " + sk + " not found!" ) ; try { return scoreModel ( v . get ( ) , colNames , domains , row ) ; } catch ( Throwable t ) { Log . err ( t ) ; throw new IllegalArgumentException ( "Key " + sk + " is not a Model key" ) ; } }
All - in - one call to lookup a model map the columns and score
28,888
void incr1 ( int b , double y , double yy ) { Utils . AtomicDoubleArray . add ( _sums , b , y ) ; Utils . AtomicDoubleArray . add ( _ssqs , b , yy ) ; }
Same except square done by caller
28,889
protected final String checkArguments ( Properties args , RequestType type ) { for ( Argument arg : _arguments ) arg . reset ( ) ; if ( type == RequestType . query ) return buildQuery ( args , type ) ; if ( H2O . OPT_ARGS . check_rest_params && ! ( this instanceof GridSearch ) && ! ( this instanceof HTTP500 ) ) { Enumeration en = args . propertyNames ( ) ; while ( en . hasMoreElements ( ) ) { boolean found = false ; String key = ( String ) en . nextElement ( ) ; for ( Argument arg : _arguments ) { if ( arg . _name . equals ( key ) ) { found = true ; break ; } } if ( ! found ) { return jsonError ( "Request specifies the argument '" + key + "' but it is not a valid parameter for this query " + this . getClass ( ) . getName ( ) ) . toString ( ) ; } } } for ( Argument arg : _arguments ) { if ( ! arg . disabled ( ) ) { try { arg . check ( RequestQueries . this , args . getProperty ( arg . _name , "" ) ) ; queryArgumentValueSet ( arg , args ) ; } catch ( IllegalArgumentException e ) { if ( type == RequestType . json ) return jsonError ( "Argument '" + arg . _name + "' error: " + e . getMessage ( ) ) . toString ( ) ; else return buildQuery ( args , type ) ; } } } return null ; }
Checks the given arguments .
28,890
static public void basic_packet_handling ( AutoBuffer ab ) throws java . io . IOException { int drop = H2O . OPT_ARGS . random_udp_drop != null && RANDOM_UDP_DROP . nextInt ( 5 ) == 0 ? 2 : 0 ; TimeLine . record_recv ( ab , false , drop ) ; ab . _h2o . _last_heard_from = System . currentTimeMillis ( ) ; int ctrl = ab . getCtrl ( ) ; ab . getPort ( ) ; if ( ctrl == UDP . udp . timeline . ordinal ( ) ) { UDP . udp . timeline . _udp . call ( ab ) ; return ; } if ( ctrl == UDP . udp . rebooted . ordinal ( ) ) UDPRebooted . checkForSuicide ( ctrl , ab ) ; if ( drop != 0 ) return ; H2O cloud = H2O . CLOUD ; boolean is_member = cloud . contains ( ab . _h2o ) ; if ( UDP . udp . UDPS [ ctrl ] . _paxos || is_member ) { H2O . submitTask ( new FJPacket ( ab , ctrl ) ) ; return ; } _unknown_packets_per_sec ++ ; long timediff = ab . _h2o . _last_heard_from - _unknown_packet_time ; if ( timediff > 1000 ) { Log . warn ( "UDP packets from outside the cloud: " + _unknown_packets_per_sec + "/sec, last one from " + ab . _h2o + " @ " + new Date ( ) ) ; _unknown_packets_per_sec = 0 ; _unknown_packet_time = ab . _h2o . _last_heard_from ; } ab . close ( ) ; }
- Timeline record it
28,891
void push ( int slots ) { assert 0 <= slots && slots < 1000 ; int len = _d . length ; _sp += slots ; while ( _sp > len ) { _key = Arrays . copyOf ( _key , len << 1 ) ; _ary = Arrays . copyOf ( _ary , len << 1 ) ; _d = Arrays . copyOf ( _d , len << 1 ) ; _fcn = Arrays . copyOf ( _fcn , len <<= 1 ) ; _str = Arrays . copyOf ( _str , len << 1 ) ; } }
Push k empty slots
28,892
void push_slot ( int d , int n ) { assert d == 0 ; int idx = _display [ _tod - d ] + n ; push ( 1 ) ; _ary [ _sp - 1 ] = addRef ( _ary [ idx ] ) ; _d [ _sp - 1 ] = _d [ idx ] ; _fcn [ _sp - 1 ] = addRef ( _fcn [ idx ] ) ; _str [ _sp - 1 ] = _str [ idx ] ; assert _ary [ 0 ] == null || check_refcnt ( _ary [ 0 ] . anyVec ( ) ) ; }
Copy from display offset d nth slot
28,893
void tos_into_slot ( int d , int n , String id ) { assert d == 0 || ( d == 1 && _display [ _tod ] == n + 1 ) ; int idx = _display [ _tod - d ] + n ; if ( _tod == 0 ) ASTOp . removeUDF ( id ) ; subRef ( _ary [ idx ] , _key [ idx ] ) ; subRef ( _fcn [ idx ] ) ; Frame fr = _ary [ _sp - 1 ] ; _ary [ idx ] = fr == null ? null : addRef ( new Frame ( fr ) ) ; _d [ idx ] = _d [ _sp - 1 ] ; _str [ idx ] = _str [ _sp - 1 ] ; _fcn [ idx ] = addRef ( _fcn [ _sp - 1 ] ) ; _key [ idx ] = d == 0 && fr != null ? id : null ; if ( _tod == 0 && _fcn [ _sp - 1 ] != null ) ASTOp . putUDF ( _fcn [ _sp - 1 ] , id ) ; assert _ary [ 0 ] == null || check_refcnt ( _ary [ 0 ] . anyVec ( ) ) ; }
Copy from TOS into a slot . Does NOT pop results .
28,894
void tos_into_slot ( int idx , String id ) { subRef ( _ary [ idx ] , _key [ idx ] ) ; subRef ( _fcn [ idx ] ) ; Frame fr = _ary [ _sp - 1 ] ; _ary [ idx ] = fr == null ? null : addRef ( new Frame ( fr ) ) ; _d [ idx ] = _d [ _sp - 1 ] ; _fcn [ idx ] = addRef ( _fcn [ _sp - 1 ] ) ; _str [ idx ] = _str [ _sp - 1 ] ; _key [ idx ] = fr != null ? id : null ; assert _ary [ 0 ] == null || check_refcnt ( _ary [ 0 ] . anyVec ( ) ) ; }
Copy from TOS into a slot using absolute index .
28,895
public Frame popXAry ( ) { Frame fr = popAry ( ) ; for ( Vec vec : fr . vecs ( ) ) { popVec ( vec ) ; if ( vec . masterVec ( ) != null ) popVec ( vec . masterVec ( ) ) ; } return fr ; }
Assumption is that this Frame will get pushed again shortly .
28,896
public void poppush ( int n , Frame ary , String key ) { addRef ( ary ) ; for ( int i = 0 ; i < n ; i ++ ) { assert _sp > 0 ; _sp -- ; _fcn [ _sp ] = subRef ( _fcn [ _sp ] ) ; _ary [ _sp ] = subRef ( _ary [ _sp ] , _key [ _sp ] ) ; } push ( 1 ) ; _ary [ _sp - 1 ] = ary ; _key [ _sp - 1 ] = key ; assert check_all_refcnts ( ) ; }
Replace a function invocation with it s result
28,897
public Futures subRef ( Vec vec , Futures fs ) { assert fs != null : "Future should not be null!" ; if ( vec . masterVec ( ) != null ) subRef ( vec . masterVec ( ) , fs ) ; int cnt = _refcnt . get ( vec ) . _val - 1 ; if ( cnt > 0 ) { _refcnt . put ( vec , new IcedInt ( cnt ) ) ; } else { UKV . remove ( vec . _key , fs ) ; _refcnt . remove ( vec ) ; } return fs ; }
Subtract reference count .
28,898
public S fillFrom ( Properties parms ) { Class clz = getClass ( ) ; for ( String key : parms . stringPropertyNames ( ) ) { try { Field f = clz . getDeclaredField ( key ) ; int mods = f . getModifiers ( ) ; if ( Modifier . isTransient ( mods ) || Modifier . isStatic ( mods ) ) throw new IllegalArgumentException ( "Unknown argument " + key ) ; API api = ( API ) f . getAnnotations ( ) [ 0 ] ; if ( api . validation ( ) . length ( ) == 0 && api . values ( ) . length ( ) == 0 && api . dependsOn ( ) . length == 0 ) throw new IllegalArgumentException ( "Attempting to set output field " + key ) ; f . set ( this , parse ( parms . getProperty ( key ) , f . getType ( ) ) ) ; } catch ( NoSuchFieldException nsfe ) { throw new IllegalArgumentException ( "Unknown argument " + key ) ; } catch ( ArrayIndexOutOfBoundsException aioobe ) { throw new RuntimeException ( "Broken internal schema; missing API annotation: " + key ) ; } catch ( IllegalAccessException iae ) { throw new RuntimeException ( "Broken internal schema; cannot be private nor final: " + key ) ; } } do { for ( Field f : clz . getDeclaredFields ( ) ) { int mods = f . getModifiers ( ) ; if ( Modifier . isTransient ( mods ) || Modifier . isStatic ( mods ) ) continue ; API api = ( API ) f . getAnnotations ( ) [ 0 ] ; if ( api . validation ( ) . length ( ) > 0 ) { if ( parms . getProperty ( f . getName ( ) ) == null ) throw new IllegalArgumentException ( "Required field " + f . getName ( ) + " not specified" ) ; } } clz = clz . getSuperclass ( ) ; } while ( Iced . class . isAssignableFrom ( clz . getSuperclass ( ) ) ) ; return ( S ) this ; }
private . Input fields get filled here so must not be final .
28,899
public final boolean isNA ( long i ) { long x = i - ( _start > 0 ? _start : 0 ) ; if ( 0 <= x && x < _len ) return isNA0 ( ( int ) x ) ; throw new ArrayIndexOutOfBoundsException ( getClass ( ) . getSimpleName ( ) + " " + _start + " <= " + i + " < " + ( _start + _len ) ) ; }
Fetch the missing - status the slow way .