idx int64 0 165k | question stringlengths 73 4.15k | target stringlengths 5 918 | len_question int64 21 890 | len_target int64 3 255 |
|---|---|---|---|---|
22,500 | public static String addNamedOutput ( Job job , String namedOutput , OutputFormat outputFormat , Class < ? > keyClass , Class < ? > valueClass ) throws FileNotFoundException , IOException , URISyntaxException { checkNamedOutputName ( job , namedOutput , true ) ; Configuration conf = job . getConfiguration ( ) ; String uniqueName = UUID . randomUUID ( ) . toString ( ) + ' ' + "out-format.dat" ; InstancesDistributor . distribute ( outputFormat , uniqueName , conf ) ; conf . set ( MULTIPLE_OUTPUTS , conf . get ( MULTIPLE_OUTPUTS , "" ) + " " + namedOutput ) ; conf . set ( MO_PREFIX + namedOutput + FORMAT_INSTANCE_FILE , uniqueName ) ; conf . setClass ( MO_PREFIX + namedOutput + KEY , keyClass , Object . class ) ; conf . setClass ( MO_PREFIX + namedOutput + VALUE , valueClass , Object . class ) ; return uniqueName ; } | Adds a named output for the job . Returns the instance file that has been created . | 236 | 17 |
22,501 | @ SuppressWarnings ( "unchecked" ) public < K , V > void write ( String namedOutput , K key , V value , String baseOutputPath ) throws IOException , InterruptedException { checkNamedOutputName ( context , namedOutput , false ) ; checkBaseOutputPath ( baseOutputPath ) ; if ( ! namedOutputs . contains ( namedOutput ) ) { throw new IllegalArgumentException ( "Undefined named output '" + namedOutput + "'" ) ; } getRecordWriter ( baseOutputPath ) . write ( key , value ) ; } | Write key and value to baseOutputPath using the namedOutput . | 122 | 13 |
22,502 | public void close ( ) throws IOException , InterruptedException { for ( OutputContext outputContext : this . outputContexts . values ( ) ) { outputContext . recordWriter . close ( outputContext . taskAttemptContext ) ; outputContext . outputCommitter . commitTask ( outputContext . taskAttemptContext ) ; // This is a trick for Hadoop 2.0 where there is extra business logic in commitJob() JobContext jContext ; try { jContext = JobContextFactory . get ( outputContext . taskAttemptContext . getConfiguration ( ) , new JobID ( ) ) ; } catch ( Exception e ) { throw new IOException ( e ) ; } try { Class cl = Class . forName ( OutputCommitter . class . getName ( ) ) ; Method method = cl . getMethod ( "commitJob" , Class . forName ( JobContext . class . getName ( ) ) ) ; if ( method != null ) { method . invoke ( outputContext . outputCommitter , jContext ) ; } } catch ( Exception e ) { // Hadoop 2.0 : do nothing // we need to call commitJob as a trick, but the trick itself may throw an IOException. // it doesn't mean that something went wrong. // If there was something really wrong it would have failed before. } outputContext . outputCommitter . cleanupJob ( outputContext . jobContext ) ; } } | Closes all the opened outputs . | 294 | 7 |
22,503 | private WhiteSpace getDatatypeWhiteSpace ( ) { Grammar currGr = this . getCurrentGrammar ( ) ; if ( currGr . isSchemaInformed ( ) && currGr . getNumberOfEvents ( ) > 0 ) { Production prod = currGr . getProduction ( 0 ) ; if ( prod . getEvent ( ) . getEventType ( ) == EventType . CHARACTERS ) { Characters ch = ( Characters ) prod . getEvent ( ) ; return ch . getDatatype ( ) . getWhiteSpace ( ) ; } } return null ; } | returns null if no CH datatype is available or schema - less | 127 | 15 |
22,504 | public void skip ( long n ) throws IOException { if ( capacity == 0 ) { // aligned while ( n != 0 ) { n -= istream . skip ( n ) ; } } else { // not aligned, grrr for ( int i = 0 ; i < n ; n ++ ) { readBits ( 8 ) ; } } } | Skip n bytes | 73 | 3 |
22,505 | public int readBits ( int n ) throws IOException { assert ( n > 0 ) ; int result ; if ( n <= capacity ) { // buffer already holds all necessary bits result = ( buffer >> ( capacity -= n ) ) & ( 0xff >> ( BUFFER_CAPACITY - n ) ) ; } else if ( capacity == 0 && n == BUFFER_CAPACITY ) { // possible to read direct byte, nothing else to do result = readDirectByte ( ) ; } else { // get as many bits from buffer as possible result = buffer & ( 0xff >> ( BUFFER_CAPACITY - capacity ) ) ; n -= capacity ; capacity = 0 ; // possibly read whole bytes while ( n > 7 ) { if ( capacity == 0 ) { readBuffer ( ) ; } result = ( result << BUFFER_CAPACITY ) | buffer ; n -= BUFFER_CAPACITY ; capacity = 0 ; } // read the rest of the bits if ( n > 0 ) { if ( capacity == 0 ) { readBuffer ( ) ; } result = ( result << n ) | ( buffer >> ( capacity = ( BUFFER_CAPACITY - n ) ) ) ; } } return result ; } | Read the next n bits and return the result as an integer . | 256 | 13 |
22,506 | public void readFields ( ITuple tuple , Deserializer [ ] customDeserializers ) throws IOException { readFields ( tuple , readSchema , customDeserializers ) ; } | Read fields using the specified readSchema in the constructor . | 41 | 12 |
22,507 | @ Override public final void mutate ( Context context ) throws MutagenException { // Perform the mutation performMutation ( context ) ; int version = getResultingState ( ) . getID ( ) ; String change = getChangeSummary ( ) ; if ( change == null ) { change = "" ; } String changeHash = md5String ( change ) ; // The straightforward way, without locking try { MutationBatch batch = getKeyspace ( ) . prepareMutationBatch ( ) ; batch . withRow ( CassandraSubject . VERSION_CF , CassandraSubject . ROW_KEY ) . putColumn ( CassandraSubject . VERSION_COLUMN , version ) ; batch . withRow ( CassandraSubject . VERSION_CF , String . format ( "%08d" , version ) ) . putColumn ( "change" , change ) . putColumn ( "hash" , changeHash ) ; batch . execute ( ) ; } catch ( ConnectionException e ) { throw new MutagenException ( "Could not update \"schema_version\" " + "column family to state " + version + "; schema is now out of sync with recorded version" , e ) ; } // TAF: Why does this fail with a StaleLockException? Do we need to use a // separate lock table? // // Attempt to acquire a lock to update the version // ColumnPrefixDistributedRowLock<String> lock = // new ColumnPrefixDistributedRowLock<String>(getKeyspace(), // CassandraSubject.VERSION_CF,CassandraSubject.VERSION_COLUMN) // .withBackoff(new BoundedExponentialBackoff(250, 10000, 10)) // .expireLockAfter(1, TimeUnit.SECONDS) //// .failOnStaleLock(false); // .failOnStaleLock(true); // // try { // lock.acquire(); // } // catch (StaleLockException e) { // // Won't happen // throw new MutagenException("Could not update "+ // "\"schema_version\" column family to state "+version+ // " because lock expired",e); // } // catch (BusyLockException e) { // throw new MutagenException("Could not update "+ // "\"schema_version\" column family to state "+version+ // " because another client is updating the recorded version",e); // } // catch (Exception e) { // if (e instanceof RuntimeException) { // throw (RuntimeException)e; // } // else { // throw new MutagenException("Could not update "+ // "\"schema_version\" column family to state "+version+ // " because a write lock could not be obtained",e); // } // } // finally { // try { // MutationBatch batch=getKeyspace().prepareMutationBatch(); // batch.withRow(CassandraSubject.VERSION_CF, // CassandraSubject.ROW_KEY) // .putColumn(CassandraSubject.VERSION_COLUMN,version); // // // Release and update // lock.releaseWithMutation(batch); // } // catch (Exception e) { // if (e instanceof RuntimeException) { // throw (RuntimeException)e; // } // else { // throw new MutagenException("Could not update "+ // "\"schema_version\" column family to state "+version+ // "; schema is now out of sync with recorded version",e); // } // } // } } | Performs the actual mutation and then updates the recorded schema version | 735 | 12 |
22,508 | public static String toHex ( byte [ ] bytes ) { StringBuilder hexString = new StringBuilder ( ) ; for ( int i = 0 ; i < bytes . length ; i ++ ) { String hex = Integer . toHexString ( 0xFF & bytes [ i ] ) ; if ( hex . length ( ) == 1 ) { hexString . append ( ' ' ) ; } hexString . append ( hex ) ; } return hexString . toString ( ) ; } | Encode a byte array as a hexadecimal string | 101 | 12 |
22,509 | public static List < String > getDeployed ( String url , String token ) throws Exception { List < String > deployed = new ArrayList < String > ( ) ; HttpClient client = httpClient ( ) ; HttpGet get = new HttpGet ( url + "/system/deployment/list" ) ; addAuthHeader ( token , get ) ; HttpResponse resp = client . execute ( get ) ; if ( resp . getStatusLine ( ) . getStatusCode ( ) == HttpStatus . SC_OK ) { List < String > respList = new Gson ( ) . fromJson ( EntityUtils . toString ( resp . getEntity ( ) ) , new TypeToken < List < String > > ( ) { } . getType ( ) ) ; if ( respList != null ) { deployed . addAll ( respList ) ; } } return deployed ; } | Retrieves a list of Cadmium wars that are deployed . | 188 | 14 |
22,510 | public static void undeploy ( String url , String warName , String token ) throws Exception { HttpClient client = httpClient ( ) ; HttpPost del = new HttpPost ( url + "/system/undeploy" ) ; addAuthHeader ( token , del ) ; del . addHeader ( "Content-Type" , MediaType . APPLICATION_JSON ) ; UndeployRequest req = new UndeployRequest ( ) ; req . setWarName ( warName ) ; del . setEntity ( new StringEntity ( new Gson ( ) . toJson ( req ) , "UTF-8" ) ) ; HttpResponse resp = client . execute ( del ) ; if ( resp . getStatusLine ( ) . getStatusCode ( ) == HttpStatus . SC_OK ) { String respStr = EntityUtils . toString ( resp . getEntity ( ) ) ; if ( ! respStr . equals ( "ok" ) ) { throw new Exception ( "Failed to undeploy " + warName ) ; } else { System . out . println ( "Undeployment of " + warName + " successful" ) ; } } else { System . err . println ( "Failed to undeploy " + warName ) ; System . err . println ( resp . getStatusLine ( ) . getStatusCode ( ) + ": " + EntityUtils . toString ( resp . getEntity ( ) ) ) ; } } | Sends the undeploy command to a Cadmium - Deployer war . | 308 | 16 |
22,511 | public void set ( int bit , boolean value ) { int bite = byteForBit ( bit ) ; ensureSpace ( bite + 1 ) ; int bitOnByte = bitOnByte ( bit , bite ) ; if ( value ) { bits [ bite ] = byteBitSet ( bitOnByte , bits [ bite ] ) ; } else { bits [ bite ] = byteBitUnset ( bitOnByte , bits [ bite ] ) ; } } | Sets or unsets a bit . The smaller allowed bit is 0 | 92 | 14 |
22,512 | public boolean isSet ( int bit ) { int bite = byteForBit ( bit ) ; if ( bite >= bits . length || bits . length == 0 ) { return false ; } int bitOnByte = bitOnByte ( bit , bite ) ; return ( ( 1 << bitOnByte ) & bits [ bite ] ) != 0 ; } | Returns the value of a given bit . False is returned for unexisting bits . | 71 | 16 |
22,513 | public void ser ( DataOutput out ) throws IOException { if ( bits . length == 0 ) { out . writeByte ( 0 ) ; return ; } // removing trailing empty bytes. int bytesToWrite ; for ( bytesToWrite = bits . length ; bytesToWrite > 1 && bits [ bytesToWrite - 1 ] == 0 ; bytesToWrite -- ) ; // Writing first bytes, with the rightmost bit set for ( int i = 0 ; i < ( bytesToWrite - 1 ) ; i ++ ) { out . writeByte ( ( bits [ i ] | 1 ) ) ; } // Writing the last byte, with the rightmost bit unset out . writeByte ( ( bits [ bytesToWrite - 1 ] & ~ 1 ) ) ; } | Serializes the bit field to the data output . It uses one byte per each 7 bits . If the rightmost bit of the read byte is set that means that there are more bytes to consume . The latest byte has the rightmost bit unset . | 158 | 51 |
22,514 | public int deser ( byte [ ] bytes , int start ) throws IOException { int idx = 0 ; byte current ; do { current = bytes [ start + idx ] ; ensureSpace ( idx + 1 ) ; // The last bit must be clear bits [ idx ] = ( byte ) ( current & ~ 1 ) ; idx ++ ; } while ( ( current & 1 ) != 0 ) ; // clear the remaining bytes. for ( int i = idx ; i < bits . length ; i ++ ) { bits [ i ] = 0 ; } return idx ; } | Deserialize a BitField serialized from a byte array . Return the number of bytes consumed . | 123 | 20 |
22,515 | protected void ensureSpace ( int bytes ) { if ( bits . length < bytes ) { bits = Arrays . copyOf ( bits , bytes ) ; } } | Ensures a minimum size for the backing byte array | 33 | 11 |
22,516 | @ Override public TypeDescription addTypeDescription ( TypeDescription definition ) { if ( definition != null && definition . getTag ( ) != null ) { tagsDefined . add ( definition . getTag ( ) ) ; } return super . addTypeDescription ( definition ) ; } | Overridden to capture what tags are defined specially . | 57 | 10 |
22,517 | @ Override protected Construct getConstructor ( Node node ) { Construct construct = super . getConstructor ( node ) ; logger . trace ( "getting constructor for node {} Tag {} = {}" , new Object [ ] { node , node . getTag ( ) , construct } ) ; if ( construct instanceof ConstructYamlObject && ! tagsDefined . contains ( node . getTag ( ) ) ) { try { node . getTag ( ) . getClassName ( ) ; } catch ( YAMLException e ) { node . setUseClassConstructor ( true ) ; String value = null ; if ( node . getNodeId ( ) == NodeId . scalar ) { value = ( ( ScalarNode ) node ) . getValue ( ) ; } node . setTag ( resolver . resolve ( node . getNodeId ( ) , value , true ) ) ; construct = super . getConstructor ( node ) ; try { resolveType ( node ) ; } catch ( ClassNotFoundException e1 ) { logger . debug ( "Could not find class." , e1 ) ; } } } logger . trace ( "returning constructor for node {} type {} Tag {} = {}" , new Object [ ] { node , node . getType ( ) , node . getTag ( ) , construct } ) ; return construct ; } | Overridden to fetch constructor even if tag is not mapped . | 281 | 12 |
22,518 | private void resolveType ( Node node ) throws ClassNotFoundException { String typeName = node . getTag ( ) . getClassName ( ) ; if ( typeName . equals ( "int" ) ) { node . setType ( Integer . TYPE ) ; } else if ( typeName . equals ( "float" ) ) { node . setType ( Float . TYPE ) ; } else if ( typeName . equals ( "double" ) ) { node . setType ( Double . TYPE ) ; } else if ( typeName . equals ( "bool" ) ) { node . setType ( Boolean . TYPE ) ; } else if ( typeName . equals ( "date" ) ) { node . setType ( Date . class ) ; } else if ( typeName . equals ( "seq" ) ) { node . setType ( List . class ) ; } else if ( typeName . equals ( "str" ) ) { node . setType ( String . class ) ; } else if ( typeName . equals ( "map" ) ) { node . setType ( Map . class ) ; } else { node . setType ( getClassForName ( node . getTag ( ) . getClassName ( ) ) ) ; } } | Resolves the type of a node after the tag gets re - resolved . | 259 | 15 |
22,519 | protected UUID getRequestIdFrom ( Request request , Response response ) { return optUuid ( response . getHeader ( OTHeaders . REQUEST_ID ) ) ; } | Provides a hook whereby an alternate source can be provided for grabbing the requestId | 38 | 16 |
22,520 | public static LoggerConfig [ ] setLogLevel ( String loggerName , String level ) { if ( StringUtils . isBlank ( loggerName ) ) { loggerName = ch . qos . logback . classic . Logger . ROOT_LOGGER_NAME ; } LoggerContext context = ( LoggerContext ) LoggerFactory . getILoggerFactory ( ) ; log . debug ( "Setting {} to level {}" , loggerName , level ) ; ch . qos . logback . classic . Logger logger = null ; try { logger = context . getLogger ( loggerName ) ; if ( logger != null ) { if ( level . equals ( "null" ) || level . equals ( "none" ) ) { logger . setLevel ( null ) ; } else { logger . setLevel ( Level . toLevel ( level ) ) ; } logger = context . getLogger ( loggerName ) ; return new LoggerConfig [ ] { new LoggerConfig ( logger . getName ( ) , logger . getLevel ( ) + "" ) } ; } return new LoggerConfig [ ] { } ; } catch ( Throwable t ) { log . warn ( "Failed to change log level for logger " + loggerName + " to level " + level , t ) ; return new LoggerConfig [ ] { } ; } } | Updates a logger with a given name to the given level . | 285 | 13 |
22,521 | public int decodeNBitUnsignedInteger ( int n ) throws IOException { assert ( n >= 0 ) ; int bitsRead = 0 ; int result = 0 ; while ( bitsRead < n ) { // result = (result << 8) | is.read(); result += ( decode ( ) << bitsRead ) ; bitsRead += 8 ; } return result ; } | Decodes and returns an n - bit unsigned integer using the minimum number of bytes required for n bits . | 76 | 21 |
22,522 | public Set < String > configureJob ( Job job ) throws FileNotFoundException , IOException , TupleMRException { Set < String > instanceFiles = new HashSet < String > ( ) ; for ( Output output : getNamedOutputs ( ) ) { try { if ( output . isDefault ) { instanceFiles . add ( PangoolMultipleOutputs . setDefaultNamedOutput ( job , output . outputFormat , output . keyClass , output . valueClass ) ) ; } else { instanceFiles . add ( PangoolMultipleOutputs . addNamedOutput ( job , output . name , output . outputFormat , output . keyClass , output . valueClass ) ) ; } } catch ( URISyntaxException e1 ) { throw new TupleMRException ( e1 ) ; } for ( Map . Entry < String , String > contextKeyValue : output . specificContext . entrySet ( ) ) { PangoolMultipleOutputs . addNamedOutputContext ( job , output . name , contextKeyValue . getKey ( ) , contextKeyValue . getValue ( ) ) ; } } return instanceFiles ; } | Use this method for configuring a Job instance according to the named outputs specs that has been specified . Returns the instance files that have been created . | 244 | 29 |
22,523 | public boolean canCheckWar ( String warName , String url , HttpClient client ) { HttpOptions opt = new HttpOptions ( url + "/" + warName ) ; try { HttpResponse response = client . execute ( opt ) ; if ( response . getStatusLine ( ) . getStatusCode ( ) == HttpStatus . SC_OK ) { Header allowHeader [ ] = response . getHeaders ( "Allow" ) ; for ( Header allow : allowHeader ) { List < String > values = Arrays . asList ( allow . getValue ( ) . toUpperCase ( ) . split ( "," ) ) ; if ( values . contains ( "GET" ) ) { return true ; } } } EntityUtils . consumeQuietly ( response . getEntity ( ) ) ; } catch ( Exception e ) { log . warn ( "Failed to check if endpoint exists." , e ) ; } finally { opt . releaseConnection ( ) ; } return false ; } | Checks via an http options request that the endpoint exists to check for deployment state . | 210 | 17 |
22,524 | public void encodeBinary ( byte [ ] b ) throws IOException { encodeUnsignedInteger ( b . length ) ; encode ( b , 0 , b . length ) ; } | Encode a binary value as a length - prefixed sequence of octets . | 37 | 16 |
22,525 | public void encodeString ( final String s ) throws IOException { final int lenChars = s . length ( ) ; final int lenCharacters = s . codePointCount ( 0 , lenChars ) ; encodeUnsignedInteger ( lenCharacters ) ; encodeStringOnly ( s ) ; } | Encode a string as a length - prefixed sequence of UCS codepoints each of which is encoded as an integer . Look for codepoints of more than 16 bits that are represented as UTF - 16 surrogate pairs in Java . | 60 | 49 |
22,526 | public void encodeInteger ( int n ) throws IOException { // signalize sign if ( n < 0 ) { encodeBoolean ( true ) ; // For negative values, the Unsigned Integer holds the // magnitude of the value minus 1 encodeUnsignedInteger ( ( - n ) - 1 ) ; } else { encodeBoolean ( false ) ; encodeUnsignedInteger ( n ) ; } } | Encode an arbitrary precision integer using a sign bit followed by a sequence of octets . The most significant bit of the last octet is set to zero to indicate sequence termination . Only seven bits per octet are used to store the integer s value . | 81 | 51 |
22,527 | public void encodeUnsignedInteger ( int n ) throws IOException { if ( n < 0 ) { throw new UnsupportedOperationException ( ) ; } if ( n < 128 ) { // write byte as is encode ( n ) ; } else { final int n7BitBlocks = MethodsBag . numberOf7BitBlocksToRepresent ( n ) ; switch ( n7BitBlocks ) { case 5 : encode ( 128 | n ) ; n = n >>> 7 ; case 4 : encode ( 128 | n ) ; n = n >>> 7 ; case 3 : encode ( 128 | n ) ; n = n >>> 7 ; case 2 : encode ( 128 | n ) ; n = n >>> 7 ; case 1 : // 0 .. 7 (last byte) encode ( 0 | n ) ; } } } | Encode an arbitrary precision non negative integer using a sequence of octets . The most significant bit of the last octet is set to zero to indicate sequence termination . Only seven bits per octet are used to store the integer s value . | 166 | 48 |
22,528 | public void encodeFloat ( FloatValue fv ) throws IOException { // encode mantissa and exponent encodeIntegerValue ( fv . getMantissa ( ) ) ; encodeIntegerValue ( fv . getExponent ( ) ) ; } | Encode a Float represented as two consecutive Integers . The first Integer represents the mantissa of the floating point number and the second Integer represents the 10 - based exponent of the floating point number | 50 | 38 |
22,529 | private void addAddressHelper ( InternetAddressSet set , String address ) { if ( address . contains ( "," ) || address . contains ( ";" ) ) { String [ ] addresses = address . split ( "[,;]" ) ; for ( String a : addresses ) { set . add ( a ) ; } } else { set . add ( address ) ; } } | Checks if the addresses need to be split either on or ; | 77 | 13 |
22,530 | public void simplify ( ) { // remove all addresses from the cc and bcc that are in the to address set. ccSet . removeAll ( toSet ) ; bccSet . removeAll ( toSet ) ; // remove all address from the bcc set that are in the cc set. bccSet . removeAll ( ccSet ) ; } | Simplifies this email by removing duplicate pieces of information . The standard implementation removes duplicate recipient emails in the to cc and bcc sets . | 73 | 28 |
22,531 | protected void populate ( MimeMessage message ) throws MessagingException { // add all of the to addresses. message . addRecipients ( Message . RecipientType . TO , toSet . toInternetAddressArray ( ) ) ; message . addRecipients ( Message . RecipientType . CC , ccSet . toInternetAddressArray ( ) ) ; message . addRecipients ( Message . RecipientType . BCC , bccSet . toInternetAddressArray ( ) ) ; message . setFrom ( from ) ; if ( replyTo != null ) { message . setReplyTo ( new InternetAddress [ ] { replyTo } ) ; } if ( subject != null ) { message . setSubject ( subject ) ; } } | Populates a mime message with the recipient addresses from address reply to address and the subject . | 152 | 19 |
22,532 | public static AttachLogFilter attach ( Filter < ILoggingEvent > filter , String configKey ) { return new AttachLogFilter ( filter , configKey ) ; } | Create an attach log filter | 36 | 5 |
22,533 | public static void enableThriftSerialization ( Configuration conf ) { String ser = conf . get ( "io.serializations" ) . trim ( ) ; if ( ser . length ( ) != 0 ) { ser += "," ; } //Adding the Thrift serialization ser += ThriftSerialization . class . getName ( ) ; conf . set ( "io.serializations" , ser ) ; } | Enables Thrift Serialization support in Hadoop . | 85 | 12 |
22,534 | public static void main ( String [ ] args ) { try { jCommander = new JCommander ( ) ; jCommander . setProgramName ( "cadmium" ) ; HelpCommand helpCommand = new HelpCommand ( ) ; jCommander . addCommand ( "help" , helpCommand ) ; Map < String , CliCommand > commands = wireCommands ( jCommander ) ; try { jCommander . parse ( args ) ; } catch ( ParameterException pe ) { System . err . println ( pe . getMessage ( ) ) ; System . exit ( 1 ) ; } String commandName = jCommander . getParsedCommand ( ) ; if ( commandName == null ) { System . out . println ( "Please use one of the following commands:" ) ; for ( String command : jCommander . getCommands ( ) . keySet ( ) ) { String desc = jCommander . getCommands ( ) . get ( command ) . getObjects ( ) . get ( 0 ) . getClass ( ) . getAnnotation ( Parameters . class ) . commandDescription ( ) ; System . out . format ( " %16s -%s\n" , command , desc ) ; } } else if ( commandName . equals ( "help" ) ) { if ( helpCommand . subCommand == null || helpCommand . subCommand . size ( ) == 0 ) { jCommander . usage ( ) ; return ; } else { JCommander subCommander = jCommander . getCommands ( ) . get ( helpCommand . subCommand . get ( 0 ) ) ; if ( subCommander == null ) { System . out . println ( "Unknown sub command " + commandName ) ; return ; } subCommander . usage ( ) ; return ; } } else if ( commands . containsKey ( commandName ) ) { CliCommand command = commands . get ( commandName ) ; if ( command instanceof AuthorizedOnly ) { setupSsh ( ( ( AuthorizedOnly ) command ) . isAuthQuiet ( ) ) ; setupAuth ( ( AuthorizedOnly ) command ) ; } command . execute ( ) ; } } catch ( Exception e ) { System . err . println ( "Error: " + e . getMessage ( ) ) ; logger . debug ( "Cli Failed" , e ) ; e . printStackTrace ( ) ; System . exit ( 1 ) ; } } | The main entry point to Cadmium cli . | 515 | 11 |
22,535 | private static void setupSsh ( boolean noPrompt ) { File sshDir = new File ( System . getProperty ( "user.home" ) , ".ssh" ) ; if ( sshDir . exists ( ) ) { GitService . setupLocalSsh ( sshDir . getAbsolutePath ( ) , noPrompt ) ; } } | Sets up the ssh configuration that git will use to communicate with the remote git repositories . | 72 | 18 |
22,536 | public static void emptyMatrix ( byte [ ] [ ] matrix , int maxX , int maxY ) { for ( int i = 0 ; i < maxX ; i ++ ) { for ( int j = 0 ; j < maxY ; j ++ ) { matrix [ i ] [ j ] = 0 ; } } } | It is not very efficient but it is simple enough | 67 | 10 |
22,537 | static public byte [ ] decode ( String encoded ) { if ( encoded == null ) return null ; int lengthData = encoded . length ( ) ; if ( lengthData % 2 != 0 ) return null ; char [ ] binaryData = encoded . toCharArray ( ) ; int lengthDecode = lengthData / 2 ; byte [ ] decodedData = new byte [ lengthDecode ] ; byte temp1 , temp2 ; char tempChar ; for ( int i = 0 ; i < lengthDecode ; i ++ ) { tempChar = binaryData [ i * 2 ] ; temp1 = ( tempChar < BASELENGTH ) ? hexNumberTable [ tempChar ] : - 1 ; if ( temp1 == - 1 ) return null ; tempChar = binaryData [ i * 2 + 1 ] ; temp2 = ( tempChar < BASELENGTH ) ? hexNumberTable [ tempChar ] : - 1 ; if ( temp2 == - 1 ) return null ; decodedData [ i ] = ( byte ) ( ( temp1 << 4 ) | temp2 ) ; } return decodedData ; } | Decode hex string to a byte array | 230 | 8 |
22,538 | public static DateTimeValue parse ( Calendar cal , DateTimeType type ) { int sYear = 0 ; int sMonthDay = 0 ; int sTime = 0 ; int sFractionalSecs = 0 ; boolean sPresenceTimezone = false ; int sTimezone ; switch ( type ) { case gYear : // gYear Year, [Time-Zone] case gYearMonth : // gYearMonth Year, MonthDay, [TimeZone] case date : // date Year, MonthDay, [TimeZone] sYear = cal . get ( Calendar . YEAR ) ; sMonthDay = getMonthDay ( cal ) ; break ; case dateTime : // dateTime Year, MonthDay, Time, [FractionalSecs], // [TimeZone] sYear = cal . get ( Calendar . YEAR ) ; sMonthDay = getMonthDay ( cal ) ; // Note: *no* break; case time : // time Time, [FractionalSecs], [TimeZone] sTime = getTime ( cal ) ; sFractionalSecs = cal . get ( Calendar . MILLISECOND ) ; break ; case gMonth : // gMonth MonthDay, [TimeZone] case gMonthDay : // gMonthDay MonthDay, [TimeZone] case gDay : // gDay MonthDay, [TimeZone] sMonthDay = getMonthDay ( cal ) ; break ; default : throw new UnsupportedOperationException ( ) ; } // [TimeZone] sTimezone = getTimeZoneInMinutesOffset ( cal ) ; if ( sTimezone != 0 ) { sPresenceTimezone = true ; } return new DateTimeValue ( type , sYear , sMonthDay , sTime , sFractionalSecs , sPresenceTimezone , sTimezone ) ; } | Encode Date - Time as a sequence of values representing the individual components of the Date - Time . | 384 | 20 |
22,539 | protected static void setMonthDay ( int monthDay , Calendar cal ) { // monthDay = month * 32 + day; int month = monthDay / MONTH_MULTIPLICATOR ; cal . set ( Calendar . MONTH , month - 1 ) ; int day = monthDay - month * MONTH_MULTIPLICATOR ; cal . set ( Calendar . DAY_OF_MONTH , day ) ; } | Sets month and day of the given calendar making use of of the monthDay representation defined in EXI format | 90 | 22 |
22,540 | protected static void setTime ( int time , Calendar cal ) { // ((Hour * 64) + Minutes) * 64 + seconds int hour = time / ( 64 * 64 ) ; time -= hour * ( 64 * 64 ) ; int minute = time / 64 ; time -= minute * 64 ; // second cal . set ( Calendar . HOUR_OF_DAY , hour ) ; cal . set ( Calendar . MINUTE , minute ) ; cal . set ( Calendar . SECOND , time ) ; } | Sets hour minute and second of the given calendar making use of of the time representation defined in EXI format | 103 | 22 |
22,541 | private static void addPortMapping ( Integer insecurePort , Integer securePort ) { TO_SECURE_PORT_MAP . put ( insecurePort , securePort ) ; TO_INSECURE_PORT_MAP . put ( securePort , insecurePort ) ; } | Adds an entry to the secure and insecure port map . | 55 | 11 |
22,542 | public static int getDefaultPort ( String protocol ) { if ( HTTP_PROTOCOL . equals ( protocol ) ) { return DEFAULT_HTTP_PORT ; } else if ( HTTPS_PROTOCOL . equals ( protocol ) ) { return DEFAULT_HTTPS_PORT ; } else { throw new IllegalArgumentException ( "No known default for " + protocol ) ; } } | Returns the default port for the specified protocol . | 82 | 9 |
22,543 | public static int mapPort ( Map < Integer , Integer > mapping , int port ) { Integer mappedPort = mapping . get ( port ) ; if ( mappedPort == null ) throw new RuntimeException ( "Could not map port " + port ) ; return mappedPort ; } | Looks up a corresponding port number from a port mapping . | 56 | 11 |
22,544 | public String secureUrl ( HttpServletRequest request , HttpServletResponse response ) throws IOException { String protocol = getProtocol ( request ) ; if ( protocol . equalsIgnoreCase ( HTTP_PROTOCOL ) ) { int port = mapPort ( TO_SECURE_PORT_MAP , getPort ( request ) ) ; try { URI newUri = changeProtocolAndPort ( HTTPS_PROTOCOL , port == DEFAULT_HTTPS_PORT ? - 1 : port , request ) ; return newUri . toString ( ) ; } catch ( URISyntaxException e ) { throw new IllegalStateException ( "Failed to create URI." , e ) ; } } else { throw new UnsupportedProtocolException ( "Cannot build secure url for " + protocol ) ; } } | Returns the secure version of the original URL for the request . | 175 | 12 |
22,545 | public String insecureUrl ( HttpServletRequest request , HttpServletResponse response ) throws IOException { String protocol = getProtocol ( request ) ; if ( protocol . equalsIgnoreCase ( HTTPS_PROTOCOL ) ) { int port = mapPort ( TO_INSECURE_PORT_MAP , getPort ( request ) ) ; try { return changeProtocolAndPort ( HTTP_PROTOCOL , port == DEFAULT_HTTP_PORT ? - 1 : port , request ) . toString ( ) ; } catch ( URISyntaxException e ) { throw new IllegalStateException ( "Failed to create URI." , e ) ; } } else { throw new UnsupportedProtocolException ( "Cannot build insecure url for " + protocol ) ; } } | Returns the insecure version of the original URL for the request . | 166 | 12 |
22,546 | @ Override public void makeSecure ( HttpServletRequest request , HttpServletResponse response ) throws IOException { response . setStatus ( HttpServletResponse . SC_MOVED_PERMANENTLY ) ; response . setHeader ( "Location" , secureUrl ( request , response ) ) ; response . getOutputStream ( ) . flush ( ) ; response . getOutputStream ( ) . close ( ) ; } | Sends a moved perminately redirect to the secure form of the request URL . | 92 | 17 |
22,547 | @ Override public void makeInsecure ( HttpServletRequest request , HttpServletResponse response ) throws IOException { response . setStatus ( HttpServletResponse . SC_MOVED_PERMANENTLY ) ; response . setHeader ( "Location" , insecureUrl ( request , response ) ) ; response . getOutputStream ( ) . flush ( ) ; response . getOutputStream ( ) . close ( ) ; } | Sends a moved perminately redirect to the insecure form of the request URL . | 93 | 17 |
22,548 | public void init ( Configuration conf , Path generatedModel ) throws IOException , InterruptedException { FileSystem fileSystem = FileSystem . get ( conf ) ; for ( Category category : Category . values ( ) ) { wordCountPerCategory . put ( category , new HashMap < String , Integer > ( ) ) ; // init token count } // Use a HashSet to calculate the total vocabulary size Set < String > vocabulary = new HashSet < String > ( ) ; // Read tuples from generate job for ( FileStatus fileStatus : fileSystem . globStatus ( generatedModel ) ) { TupleFile . Reader reader = new TupleFile . Reader ( fileSystem , conf , fileStatus . getPath ( ) ) ; Tuple tuple = new Tuple ( reader . getSchema ( ) ) ; while ( reader . next ( tuple ) ) { // Read Tuple Integer count = ( Integer ) tuple . get ( "count" ) ; Category category = ( Category ) tuple . get ( "category" ) ; String word = tuple . get ( "word" ) . toString ( ) ; vocabulary . add ( word ) ; tokensPerCategory . put ( category , MapUtils . getInteger ( tokensPerCategory , category , 0 ) + count ) ; wordCountPerCategory . get ( category ) . put ( word , count ) ; } reader . close ( ) ; } V = vocabulary . size ( ) ; } | Read the Naive Bayes Model from HDFS | 295 | 10 |
22,549 | public Category classify ( String text ) { StringTokenizer itr = new StringTokenizer ( text ) ; Map < Category , Double > scorePerCategory = new HashMap < Category , Double > ( ) ; double bestScore = Double . NEGATIVE_INFINITY ; Category bestCategory = null ; while ( itr . hasMoreTokens ( ) ) { String token = NaiveBayesGenerate . normalizeWord ( itr . nextToken ( ) ) ; for ( Category category : Category . values ( ) ) { int count = MapUtils . getInteger ( wordCountPerCategory . get ( category ) , token , 0 ) + 1 ; double wordScore = Math . log ( count / ( double ) ( tokensPerCategory . get ( category ) + V ) ) ; double totalScore = MapUtils . getDouble ( scorePerCategory , category , 0. ) + wordScore ; if ( totalScore > bestScore ) { bestScore = totalScore ; bestCategory = category ; } scorePerCategory . put ( category , totalScore ) ; } } return bestCategory ; } | Naive Bayes Text Classification with Add - 1 Smoothing | 228 | 13 |
22,550 | public static int numberOf7BitBlocksToRepresent ( final long l ) { if ( l < 0xffffffff ) { return numberOf7BitBlocksToRepresent ( ( int ) l ) ; } // 35 bits else if ( l < 0x800000000 L ) { return 5 ; } // 42 bits else if ( l < 0x40000000000 L ) { return 6 ; } // 49 bits else if ( l < 0x2000000000000 L ) { return 7 ; } // 56 bits else if ( l < 0x100000000000000 L ) { return 8 ; } // 63 bits else if ( l < 0x8000000000000000 L ) { return 9 ; } // 70 bits else { // long, 64 bits return 10 ; } } | Returns the least number of 7 bit - blocks that is needed to represent the parameter l . Returns 1 if parameter l is 0 . | 156 | 26 |
22,551 | public static MimeBodyPart newMultipartBodyPart ( Multipart multipart ) throws MessagingException { MimeBodyPart mimeBodyPart = new MimeBodyPart ( ) ; mimeBodyPart . setContent ( multipart ) ; return mimeBodyPart ; } | Creates a body part for a multipart . | 60 | 10 |
22,552 | public static MimeBodyPart newHtmlAttachmentBodyPart ( URL contentUrl , String contentId ) throws MessagingException { MimeBodyPart mimeBodyPart = new MimeBodyPart ( ) ; mimeBodyPart . setDataHandler ( new DataHandler ( contentUrl ) ) ; if ( contentId != null ) { mimeBodyPart . setHeader ( "Content-ID" , contentId ) ; } return mimeBodyPart ; } | Creates a body part for an attachment that is used by an html body part . | 96 | 17 |
22,553 | public static String fileNameForUrl ( URL contentUrl ) { String fileName = null ; Matcher matcher = FILE_NAME_PATTERN . matcher ( contentUrl . getPath ( ) ) ; if ( matcher . find ( ) ) { fileName = matcher . group ( 1 ) ; } return fileName ; } | Returns the content disposition file name for a url . If a file name cannot be parsed from this url then null is returned . | 71 | 25 |
22,554 | private void initCommonAndGroupSchemaSerialization ( ) { //TODO Should SerializationInfo contain Configuration ? commonSerializers = getSerializers ( commonSchema , null ) ; commonDeserializers = getDeserializers ( commonSchema , commonSchema , null ) ; groupSerializers = getSerializers ( groupSchema , null ) ; groupDeserializers = getDeserializers ( groupSchema , groupSchema , null ) ; } | This serializers have been defined by the user in an OBJECT field | 97 | 14 |
22,555 | private Field checkFieldInAllSchemas ( String name ) throws TupleMRException { Field field = null ; for ( int i = 0 ; i < mrConfig . getIntermediateSchemas ( ) . size ( ) ; i ++ ) { Field fieldInSource = checkFieldInSchema ( name , i ) ; if ( field == null ) { field = fieldInSource ; } else if ( field . getType ( ) != fieldInSource . getType ( ) || field . getObjectClass ( ) != fieldInSource . getObjectClass ( ) ) { throw new TupleMRException ( "The type for field '" + name + "' is not the same in all the sources" ) ; } else if ( fieldInSource . isNullable ( ) ) { // IMPORTANT CASE. Nullable fields must be returned when present nullable and non nullable fields mixed field = fieldInSource ; } } return field ; } | Checks that the field with the given name is in all schemas and select a representative field that will be used for serializing . In the case of having a mixture of fields some of them nullable and some others no nullables a nullable Field will be returned . | 202 | 55 |
22,556 | public File resolveMavenArtifact ( String artifact ) throws ArtifactResolutionException { // NOTE: This page on Aether (https://docs.sonatype.org/display/AETHER/Home) states that // the plexus container uses the context class loader. ClassLoader oldContext = Thread . currentThread ( ) . getContextClassLoader ( ) ; try { Thread . currentThread ( ) . setContextClassLoader ( this . getClass ( ) . getClassLoader ( ) ) ; RepositorySystem repoSystem = newRepositorySystem ( ) ; RepositorySystemSession session = newSession ( repoSystem ) ; Artifact artifactObj = new DefaultArtifact ( artifact ) ; RemoteRepository repo = new RemoteRepository ( "cadmium-central" , "default" , remoteMavenRepository ) ; // TODO: we should remove the snapshot policy in production mode. repo . setPolicy ( true , new RepositoryPolicy ( true , RepositoryPolicy . UPDATE_POLICY_ALWAYS , RepositoryPolicy . CHECKSUM_POLICY_WARN ) ) ; repo . setPolicy ( false , new RepositoryPolicy ( true , RepositoryPolicy . UPDATE_POLICY_DAILY , RepositoryPolicy . CHECKSUM_POLICY_WARN ) ) ; ArtifactRequest artifactRequest = new ArtifactRequest ( ) ; artifactRequest . setArtifact ( artifactObj ) ; artifactRequest . addRepository ( repo ) ; ArtifactResult artifactResult = repoSystem . resolveArtifact ( session , artifactRequest ) ; artifactObj = artifactResult . getArtifact ( ) ; return artifactObj . getFile ( ) ; } finally { Thread . currentThread ( ) . setContextClassLoader ( oldContext ) ; } } | Fetches a maven artifact and returns a File Object that points to its location . | 366 | 18 |
22,557 | protected RepositorySystemSession newSession ( RepositorySystem system ) { MavenRepositorySystemSession session = new MavenRepositorySystemSession ( ) ; LocalRepository localRepo = new LocalRepository ( localRepository ) ; session . setLocalRepositoryManager ( system . newLocalRepositoryManager ( localRepo ) ) ; return session ; } | Creates a new RepositorySystemSession . | 75 | 9 |
22,558 | private static Path locateFileInCache ( Configuration conf , String filename ) throws IOException { return new Path ( getInstancesFolder ( FileSystem . get ( conf ) , conf ) , filename ) ; } | Locates a file in the temporal folder | 42 | 8 |
22,559 | public static File getWritableDirectoryWithFailovers ( String ... directories ) throws FileNotFoundException { File logDir = null ; for ( String directory : directories ) { if ( directory != null ) { try { logDir = ensureDirectoryWriteable ( new File ( directory ) ) ; } catch ( FileNotFoundException e ) { log . debug ( "Failed to get writeable directory: " + directory , e ) ; continue ; } break ; } } if ( logDir == null ) { throw new FileNotFoundException ( "Could not get a writeable directory!" ) ; } return logDir ; } | Gets the first writable directory that exists or can be created . | 128 | 14 |
22,560 | public static File ensureDirectoryWriteable ( File logDir ) throws FileNotFoundException { try { FileUtils . forceMkdir ( logDir ) ; } catch ( IOException e ) { log . debug ( "Failed to create directory " + logDir , e ) ; throw new FileNotFoundException ( "Failed to create directory: " + logDir + " IOException: " + e . getMessage ( ) ) ; } if ( ! logDir . canWrite ( ) ) { log . debug ( "Init param log dir cannot be used!" ) ; throw new FileNotFoundException ( "Directory is not writable: " + logDir ) ; } return logDir ; } | Try s to create a directory and ensures that it is writable . | 146 | 14 |
22,561 | @ Override public int getPartition ( DatumWrapper < ITuple > key , NullWritable value , int numPartitions ) { if ( numPartitions == 1 ) { // in this case the schema is not checked if it's valid return 0 ; } else { ITuple tuple = key . datum ( ) ; String sourceName = tuple . getSchema ( ) . getName ( ) ; Integer schemaId = tupleMRConfig . getSchemaIdByName ( sourceName ) ; if ( schemaId == null ) { throw new RuntimeException ( "Schema name '" + sourceName + "' is unknown. Known schemas are : " + tupleMRConfig . getIntermediateSchemaNames ( ) ) ; } int [ ] fieldsToPartition = serInfo . getPartitionFieldsIndexes ( ) . get ( schemaId ) ; if ( fieldsToPartition . length == 0 ) { throw new RuntimeException ( "Fields to partition is 0. Something has been wrongly configured." ) ; } return ( partialHashCode ( tuple , fieldsToPartition ) & Integer . MAX_VALUE ) % numPartitions ; } } | to perform hashCode of strings | 242 | 6 |
22,562 | public int partialHashCode ( ITuple tuple , int [ ] fields ) { int result = 0 ; for ( int field : fields ) { Object o = tuple . get ( field ) ; if ( o == null ) { // nulls don't account for hashcode continue ; } int hashCode ; if ( o instanceof String ) { // since String.hashCode() != Utf8.hashCode() HELPER_UTF8 . set ( ( String ) o ) ; hashCode = HELPER_UTF8 . hashCode ( ) ; } else if ( o instanceof Text ) { HELPER_UTF8 . set ( ( Text ) o ) ; hashCode = HELPER_UTF8 . hashCode ( ) ; } else if ( o instanceof byte [ ] ) { hashCode = hashBytes ( ( byte [ ] ) o , 0 , ( ( byte [ ] ) o ) . length ) ; } else if ( o instanceof ByteBuffer ) { ByteBuffer buffer = ( ByteBuffer ) o ; int offset = buffer . arrayOffset ( ) + buffer . position ( ) ; int length = buffer . limit ( ) - buffer . position ( ) ; hashCode = hashBytes ( buffer . array ( ) , offset , length ) ; } else { hashCode = o . hashCode ( ) ; } result = result * 31 + hashCode ; } return result ; } | Calculates a combinated hashCode using the specified number of fields . | 287 | 15 |
22,563 | @ Override public void init ( FilterConfig config ) throws ServletException { if ( config . getInitParameter ( "ignorePrefix" ) != null ) { ignorePath = config . getInitParameter ( "ignorePrefix" ) ; } } | Configures the ignore prefix . | 52 | 6 |
22,564 | @ SuppressWarnings ( "rawtypes" ) @ Override public int compare ( ITuple w1 , ITuple w2 ) { int schemaId1 = tupleMRConf . getSchemaIdByName ( w1 . getSchema ( ) . getName ( ) ) ; int schemaId2 = tupleMRConf . getSchemaIdByName ( w2 . getSchema ( ) . getName ( ) ) ; int [ ] indexes1 = serInfo . getGroupSchemaIndexTranslation ( schemaId1 ) ; int [ ] indexes2 = serInfo . getGroupSchemaIndexTranslation ( schemaId2 ) ; Serializer [ ] serializers = serInfo . getGroupSchemaSerializers ( ) ; return compare ( w1 . getSchema ( ) , groupCriteria , w1 , indexes1 , w2 , indexes2 , serializers ) ; } | Never called in MapRed jobs . Just for completion and test purposes | 186 | 13 |
22,565 | public void encodeNBitUnsignedInteger ( int b , int n ) throws IOException { if ( b < 0 || n < 0 ) { throw new IllegalArgumentException ( "Encode negative value as unsigned integer is invalid!" ) ; } assert ( b >= 0 ) ; assert ( n >= 0 ) ; ostream . writeBits ( b , n ) ; } | Encode n - bit unsigned integer . The n least significant bits of parameter b starting with the most significant i . e . from left to right . | 78 | 30 |
22,566 | public static Schema subSetOf ( Schema schema , String ... subSetFields ) { return subSetOf ( "subSetSchema" + ( COUNTER ++ ) , schema , subSetFields ) ; } | Creates a subset of the input Schema exactly with the fields whose names are specified . The name of the schema is auto - generated with a static counter . | 47 | 32 |
22,567 | public static Schema subSetOf ( String newName , Schema schema , String ... subSetFields ) { List < Field > newSchema = new ArrayList < Field > ( ) ; for ( String subSetField : subSetFields ) { newSchema . add ( schema . getField ( subSetField ) ) ; } return new Schema ( newName , newSchema ) ; } | Creates a subset of the input Schema exactly with the fields whose names are specified . The name of the schema is also specified as a parameter . | 86 | 30 |
22,568 | public static Schema superSetOf ( Schema schema , Field ... newFields ) { return superSetOf ( "superSetSchema" + ( COUNTER ++ ) , schema , newFields ) ; } | Creates a superset of the input Schema taking all the Fields in the input schema and adding some new ones . The new fields are fully specified in a Field class . The name of the schema is auto - generated with a static counter . | 45 | 49 |
22,569 | public static Schema superSetOf ( String newName , Schema schema , Field ... newFields ) { List < Field > newSchema = new ArrayList < Field > ( ) ; newSchema . addAll ( schema . getFields ( ) ) ; for ( Field newField : newFields ) { newSchema . add ( newField ) ; } return new Schema ( newName , newSchema ) ; } | Creates a superset of the input Schema taking all the Fields in the input schema and adding some new ones . The new fields are fully specified in a Field class . The name of the schema is also specified as a parameter . | 92 | 47 |
22,570 | @ PostConstruct public void setupScheduler ( ) { for ( SchedulerTask task : tasks ) { if ( task . getDelay ( ) > 0 && task . getInterval ( ) > 0 ) { executor . scheduleWithFixedDelay ( task . getTask ( ) , task . getDelay ( ) , task . getInterval ( ) , task . getTimeUnit ( ) ) ; } else if ( task . isImmediate ( ) && task . getInterval ( ) > 0 ) { executor . scheduleWithFixedDelay ( task . getTask ( ) , 0l , task . getInterval ( ) , task . getTimeUnit ( ) ) ; } else if ( task . getDelay ( ) > 0 ) { executor . schedule ( task . getTask ( ) , task . getDelay ( ) , task . getTimeUnit ( ) ) ; } else { executor . execute ( task . getTask ( ) ) ; } } } | Schedules all tasks injected in by guice . | 208 | 11 |
22,571 | public File getAlternateContentDirectory ( String userAgent ) { Configuration config = liveConfig ; for ( AlternateContent configuration : config . configs ) { try { if ( configuration . compiledPattern . matcher ( userAgent ) . matches ( ) ) { return new File ( config . metaDir , configuration . getContentDirectory ( ) ) ; } } catch ( Exception e ) { logger . warn ( "Failed to process config: " + configuration . getPattern ( ) + "->" + configuration . getContentDirectory ( ) , e ) ; } } return null ; } | Iterates through AlternateContent objects trying to match against their pre compiled pattern . | 119 | 15 |
22,572 | public void addIntermediateSchema ( Schema schema ) throws TupleMRException { if ( schemaAlreadyExists ( schema . getName ( ) ) ) { throw new TupleMRException ( "There's a schema with that name '" + schema . getName ( ) + "'" ) ; } schemas . add ( schema ) ; } | Adds a Map - output schema . Tuples emitted by TupleMapper will use one of the schemas added by this method . Schemas added in consecutive calls to this method must be named differently . | 77 | 42 |
22,573 | public void setOrderBy ( OrderBy ordering ) throws TupleMRException { failIfNull ( ordering , "OrderBy can't be null" ) ; failIfEmpty ( ordering . getElements ( ) , "OrderBy can't be empty" ) ; failIfEmpty ( schemas , "Need to specify source schemas" ) ; failIfEmpty ( groupByFields , "Need to specify group by fields" ) ; if ( schemas . size ( ) == 1 ) { if ( ordering . getSchemaOrderIndex ( ) != null ) { throw new TupleMRException ( "Not able to use source order when just one source specified" ) ; } } Schema firstSchema = schemas . get ( 0 ) ; for ( SortElement sortElement : ordering . getElements ( ) ) { if ( ! fieldPresentInAllSchemas ( sortElement . getName ( ) ) ) { throw new TupleMRException ( "Can't sort by field '" + sortElement . getName ( ) + "' . Not present in all sources" ) ; } if ( ! fieldSameTypeInAllSources ( sortElement . getName ( ) ) ) { throw new TupleMRException ( "Can't sort by field '" + sortElement . getName ( ) + "' since its type differs among sources" ) ; } if ( sortElement . getCustomComparator ( ) != null ) { Field field = firstSchema . getField ( sortElement . getName ( ) ) ; if ( field . getType ( ) != Type . OBJECT ) { throw new TupleMRException ( "Not allowed to specify custom comparator for type=" + field . getType ( ) ) ; } } } // group by fields need to be a prefix of sort by fields for ( String groupField : groupByFields ) { if ( ! ordering . containsBeforeSchemaOrder ( groupField ) ) { throw new TupleMRException ( "Group by field '" + groupField + "' is not present in common order by before source order" ) ; } } this . commonOrderBy = ordering ; } | Sets the criteria to sort the tuples by . In a multi - schema scenario all the fields defined in the specified ordering must be present in every intermediate schema defined . | 455 | 34 |
22,574 | public void setSpecificOrderBy ( String schemaName , OrderBy ordering ) throws TupleMRException { // TODO failIfNull ( schemaName , "Not able to set specific orderBy for null source" ) ; if ( ! schemaAlreadyExists ( schemaName ) ) { throw new TupleMRException ( "Unknown source '" + schemaName + "' in specific OrderBy" ) ; } failIfNull ( ordering , "Not able to set null criteria for source '" + schemaName + "'" ) ; failIfEmpty ( ordering . getElements ( ) , "Can't set empty ordering" ) ; failIfNull ( commonOrderBy , "Not able to set specific order with no previous common OrderBy" ) ; if ( commonOrderBy . getSchemaOrderIndex ( ) == null ) { throw new TupleMRException ( "Need to specify source order in common OrderBy when using specific OrderBy" ) ; } if ( ordering . getSchemaOrderIndex ( ) != null ) { throw new TupleMRException ( "Not allowed to set source order in specific order" ) ; } Schema schema = getSchemaByName ( schemaName ) ; Map < String , String > aliases = fieldAliases . get ( schema . getName ( ) ) ; for ( SortElement e : ordering . getElements ( ) ) { if ( ! Schema . containsFieldUsingAlias ( schema , e . getName ( ) , aliases ) ) { throw new TupleMRException ( "Source '" + schemaName + "' doesn't contain field '" + e . getName ( ) ) ; } if ( e . getCustomComparator ( ) != null ) { Field field = schema . getField ( e . getName ( ) ) ; if ( field == null ) { field = schema . getField ( aliases . get ( e . getName ( ) ) ) ; } if ( field . getType ( ) != Type . OBJECT ) { throw new TupleMRException ( "Not allowed to set custom comparator for type=" + field . getType ( ) ) ; } } } for ( SortElement e : ordering . getElements ( ) ) { if ( commonOrderBy . containsFieldName ( e . getName ( ) ) ) { throw new TupleMRException ( "Common sort by already contains sorting for field '" + e . getName ( ) ) ; } } this . specificsOrderBy . put ( schemaName , ordering ) ; } | Sets how tuples from the specific schemaName will be sorted after being sorted by commonOrderBy and schemaOrder | 534 | 23 |
22,575 | private void initComparators ( ) { TupleMRConfigBuilder . initializeComparators ( context . getHadoopContext ( ) . getConfiguration ( ) , tupleMRConfig ) ; customComparators = new RawComparator < ? > [ maxDepth + 1 ] ; for ( int i = minDepth ; i <= maxDepth ; i ++ ) { SortElement element = tupleMRConfig . getCommonCriteria ( ) . getElements ( ) . get ( i ) ; if ( element . getCustomComparator ( ) != null ) { customComparators [ i ] = element . getCustomComparator ( ) ; } } } | Initialize the custom comparators . Creates a quick access array for the custom comparators . | 132 | 19 |
22,576 | @ SuppressWarnings ( { "unchecked" , "rawtypes" } ) public Record toRecord ( ITuple tuple , Record reuse ) throws IOException { Record record = reuse ; if ( record == null ) { record = new Record ( avroSchema ) ; } if ( schemaValidation && ! tuple . getSchema ( ) . equals ( pangoolSchema ) ) { throw new IOException ( "Tuple '" + tuple + "' " + "contains schema not expected." + "Expected schema '" + pangoolSchema + " and actual: " + tuple . getSchema ( ) ) ; } for ( int i = 0 ; i < pangoolSchema . getFields ( ) . size ( ) ; i ++ ) { Object obj = tuple . get ( i ) ; Field field = pangoolSchema . getField ( i ) ; if ( obj == null ) { throw new IOException ( "Field '" + field . getName ( ) + "' can't be null in tuple:" + tuple ) ; } switch ( field . getType ( ) ) { case INT : case LONG : case FLOAT : case BOOLEAN : case DOUBLE : case BYTES : record . put ( i , obj ) ; //optimistic break ; case OBJECT : Serializer customSer = customSerializers [ i ] ; DataOutputBuffer buffer = buffers [ i ] ; buffer . reset ( ) ; if ( customSer != null ) { customSer . open ( buffer ) ; customSer . serialize ( obj ) ; customSer . close ( ) ; //TODO is this safe ? } else { hadoopSer . ser ( obj , buffer ) ; } //TODO this byteBuffer instances should be cached and reused ByteBuffer byteBuffer = ByteBuffer . wrap ( buffer . getData ( ) , 0 , buffer . getLength ( ) ) ; record . put ( i , byteBuffer ) ; break ; case ENUM : record . put ( i , obj . toString ( ) ) ; break ; case STRING : record . put ( i , new Utf8 ( obj . toString ( ) ) ) ; //could be directly String ? break ; default : throw new IOException ( "Not correspondence to Avro type from Pangool type " + field . getType ( ) ) ; } } return record ; } | Moves data between a Tuple and an Avro Record | 505 | 12 |
22,577 | public Properties appendToDefaultProperties ( File configFile ) { if ( defaultProperties != null && configFile . canRead ( ) ) { defaultProperties = appendProperties ( defaultProperties , configFile ) ; } return defaultProperties ; } | Adds properties from file to the default properties if the file exists . | 53 | 13 |
22,578 | public Properties appendProperties ( Properties properties , File configFile ) { if ( ! configFile . exists ( ) ) { return properties ; } return reader . appendProperties ( properties , configFile , log ) ; } | Add new properties to an existing Properties object | 45 | 8 |
22,579 | public Properties getSystemProperties ( ) { Properties properties = new Properties ( ) ; properties . putAll ( System . getenv ( ) ) ; properties . putAll ( System . getProperties ( ) ) ; return properties ; } | Read in The system properties | 48 | 5 |
22,580 | public Properties getPropertiesByContext ( ServletContext context , String path ) { return reader . getProperties ( context , path , log ) ; } | Read in properties based on a ServletContext and a path to a config file | 32 | 16 |
22,581 | public synchronized void persistProperties ( Properties properties , File propsFile , String message ) { Properties toWrite = new Properties ( ) ; for ( String key : properties . stringPropertyNames ( ) ) { if ( System . getProperties ( ) . containsKey ( key ) && ! properties . getProperty ( key ) . equals ( System . getProperty ( key ) ) ) { toWrite . setProperty ( key , properties . getProperty ( key ) ) ; } else if ( System . getenv ( ) . containsKey ( key ) && ! properties . getProperty ( key ) . equals ( System . getenv ( key ) ) ) { toWrite . setProperty ( key , properties . getProperty ( key ) ) ; } else if ( ! System . getProperties ( ) . containsKey ( key ) && ! System . getenv ( ) . containsKey ( key ) ) { toWrite . setProperty ( key , properties . getProperty ( key ) ) ; } } writer . persistProperties ( toWrite , propsFile , message , log ) ; } | Persist properties that are not system env or other system properties in a thread synchronized manner | 221 | 17 |
22,582 | public void makeConfigParserLive ( ) { if ( stagedConfigParser != null ) { notifyListeners ( listeners , stagedConfigParser , log ) ; liveConfigParser = stagedConfigParser ; latch . countDown ( ) ; } } | This notifies all registered listeners then make a staged configuration live . | 48 | 13 |
22,583 | private static Class < ? > [ ] getListenerGenericTypes ( Class < ? > listenerClass , Logger log ) { List < Class < ? > > configClasses = new ArrayList < Class < ? > > ( ) ; Type [ ] typeVars = listenerClass . getGenericInterfaces ( ) ; if ( typeVars != null ) { for ( Type interfaceClass : typeVars ) { if ( interfaceClass instanceof ParameterizedType ) { if ( ( ( ParameterizedType ) interfaceClass ) . getRawType ( ) instanceof Class ) { if ( ConfigurationListener . class . isAssignableFrom ( ( Class < ? > ) ( ( ParameterizedType ) interfaceClass ) . getRawType ( ) ) ) { ParameterizedType pType = ( ParameterizedType ) interfaceClass ; Type [ ] typeArgs = pType . getActualTypeArguments ( ) ; if ( typeArgs != null && typeArgs . length == 1 && typeArgs [ 0 ] instanceof Class ) { Class < ? > type = ( Class < ? > ) typeArgs [ 0 ] ; if ( type . isAnnotationPresent ( CadmiumConfig . class ) ) { log . debug ( "Adding " + type + " to the configuration types interesting to " + listenerClass ) ; configClasses . add ( type ) ; } } } } } } } return configClasses . toArray ( new Class < ? > [ ] { } ) ; } | Gets a list of classes that the listenerClass is interesting in listening to . | 311 | 16 |
22,584 | protected void doSanityCheck ( ) throws EXIException { // Self-contained elements do not work with re-ordered if ( fidelityOptions . isFidelityEnabled ( FidelityOptions . FEATURE_SC ) && ( codingMode == CodingMode . COMPRESSION || codingMode == CodingMode . PRE_COMPRESSION ) ) { throw new EXIException ( "(Pre-)Compression and selfContained elements cannot work together" ) ; } if ( ! this . grammar . isSchemaInformed ( ) ) { this . maximumNumberOfBuiltInElementGrammars = - 1 ; this . maximumNumberOfBuiltInProductions = - 1 ; this . grammarLearningDisabled = false ; // TODO warn user? } // blockSize in NON compression mode? Just ignore it! // canonical EXI (http://www.w3.org/TR/exi-c14n/) if ( this . getEncodingOptions ( ) . isOptionEnabled ( EncodingOptions . CANONICAL_EXI ) ) { updateFactoryAccordingCanonicalEXI ( ) ; } } | some consistency and sanity checks | 232 | 5 |
22,585 | static public int zipDirectory ( final Configuration conf , final ZipOutputStream zos , final String baseName , final String root , final Path itemToZip ) throws IOException { LOG . info ( String . format ( "zipDirectory: %s %s %s" , baseName , root , itemToZip ) ) ; LocalFileSystem localFs = FileSystem . getLocal ( conf ) ; int count = 0 ; final FileStatus itemStatus = localFs . getFileStatus ( itemToZip ) ; if ( itemStatus . isDir ( ) ) { final FileStatus [ ] statai = localFs . listStatus ( itemToZip ) ; // Add a directory entry to the zip file final String zipDirName = relativePathForZipEntry ( itemToZip . toUri ( ) . getPath ( ) , baseName , root ) ; final ZipEntry dirZipEntry = new ZipEntry ( zipDirName + Path . SEPARATOR_CHAR ) ; LOG . info ( String . format ( "Adding directory %s to zip" , zipDirName ) ) ; zos . putNextEntry ( dirZipEntry ) ; zos . closeEntry ( ) ; count ++ ; if ( statai == null || statai . length == 0 ) { LOG . info ( String . format ( "Skipping empty directory %s" , itemToZip ) ) ; return count ; } for ( FileStatus status : statai ) { count += zipDirectory ( conf , zos , baseName , root , status . getPath ( ) ) ; } LOG . info ( String . format ( "Wrote %d entries for directory %s" , count , itemToZip ) ) ; return count ; } final String inZipPath = relativePathForZipEntry ( itemToZip . toUri ( ) . getPath ( ) , baseName , root ) ; if ( inZipPath . length ( ) == 0 ) { LOG . warn ( String . format ( "Skipping empty zip file path for %s (%s %s)" , itemToZip , root , baseName ) ) ; return 0 ; } // Take empty files in case the place holder is needed FSDataInputStream in = null ; try { in = localFs . open ( itemToZip ) ; final ZipEntry ze = new ZipEntry ( inZipPath ) ; ze . setTime ( itemStatus . getModificationTime ( ) ) ; // Comments confuse looking at the zip file // ze.setComment(itemToZip.toString()); zos . putNextEntry ( ze ) ; IOUtils . copyBytes ( in , zos , conf , false ) ; zos . closeEntry ( ) ; LOG . info ( String . format ( "Wrote %d entries for file %s" , count , itemToZip ) ) ; return 1 ; } finally { in . close ( ) ; } } | Write a file to a zip output stream removing leading path name components from the actual file name when creating the zip file entry . | 608 | 25 |
22,586 | public static void addListener ( Document doc , Element root ) { Element listener = doc . createElement ( "listener" ) ; Element listenerClass = doc . createElement ( "listener-class" ) ; listener . appendChild ( listenerClass ) ; listenerClass . appendChild ( doc . createTextNode ( "org.apache.shiro.web.env.EnvironmentLoaderListener" ) ) ; addRelativeTo ( root , listener , "listener" , true ) ; } | Adds a shiro environment listener to load the shiro config file . | 102 | 14 |
22,587 | public static void addContextParam ( Document doc , Element root ) { Element ctxParam = doc . createElement ( "context-param" ) ; Element paramName = doc . createElement ( "param-name" ) ; paramName . appendChild ( doc . createTextNode ( "shiroConfigLocations" ) ) ; ctxParam . appendChild ( paramName ) ; Element paramValue = doc . createElement ( "param-value" ) ; paramValue . appendChild ( doc . createTextNode ( "file:" + new File ( System . getProperty ( "com.meltmedia.cadmium.contentRoot" ) , "shiro.ini" ) . getAbsoluteFile ( ) . getAbsolutePath ( ) ) ) ; ctxParam . appendChild ( paramValue ) ; addRelativeTo ( root , ctxParam , "listener" , false ) ; } | Adds a context parameter to a web . xml file to override where the shiro config location is to be loaded from . The location loaded from will be represented by the com . meltmedia . cadmium . contentRoot system property . | 192 | 47 |
22,588 | public static void addEnvContextParam ( Document doc , Element root ) { Element ctxParam = doc . createElement ( "context-param" ) ; Element paramName = doc . createElement ( "param-name" ) ; paramName . appendChild ( doc . createTextNode ( "shiroEnvironmentClass" ) ) ; ctxParam . appendChild ( paramName ) ; Element paramValue = doc . createElement ( "param-value" ) ; paramValue . appendChild ( doc . createTextNode ( "com.meltmedia.cadmium.servlets.shiro.WebEnvironment" ) ) ; ctxParam . appendChild ( paramValue ) ; addRelativeTo ( root , ctxParam , "listener" , false ) ; } | Adds a context parameter to a web . xml file to override where the shiro environment class . | 164 | 19 |
22,589 | public static void addFilter ( Document doc , Element root ) { Element filter = doc . createElement ( "filter" ) ; Element filterName = doc . createElement ( "filter-name" ) ; filterName . appendChild ( doc . createTextNode ( "ShiroFilter" ) ) ; filter . appendChild ( filterName ) ; Element filterClass = doc . createElement ( "filter-class" ) ; filterClass . appendChild ( doc . createTextNode ( "org.apache.shiro.web.servlet.ShiroFilter" ) ) ; filter . appendChild ( filterClass ) ; addRelativeTo ( root , filter , "filter" , true ) ; } | Adds the shiro filter to a web . xml file . | 145 | 12 |
22,590 | public static void addFilterMapping ( Document doc , Element root ) { Element filterMapping = doc . createElement ( "filter-mapping" ) ; Element filterName = doc . createElement ( "filter-name" ) ; filterName . appendChild ( doc . createTextNode ( "ShiroFilter" ) ) ; filterMapping . appendChild ( filterName ) ; Element urlPattern = doc . createElement ( "url-pattern" ) ; urlPattern . appendChild ( doc . createTextNode ( "/*" ) ) ; filterMapping . appendChild ( urlPattern ) ; addDispatchers ( doc , filterMapping , "REQUEST" , "FORWARD" , "INCLUDE" , "ERROR" ) ; addRelativeTo ( root , filterMapping , "filter-mapping" , true ) ; } | Adds the filter mapping for the shiro filter to a web . xml file . | 180 | 16 |
22,591 | public static void addDispatchers ( Document doc , Element filterMapping , String ... names ) { if ( names != null ) { for ( String name : names ) { Element dispatcher = doc . createElement ( "dispatcher" ) ; dispatcher . appendChild ( doc . createTextNode ( name ) ) ; filterMapping . appendChild ( dispatcher ) ; } } } | Adds dispatchers for each item in the vargs parameter names to a filter mapping element of a web . xml file . | 79 | 25 |
22,592 | public static void storeXmlDocument ( ZipOutputStream outZip , ZipEntry jbossWeb , Document doc ) throws IOException , TransformerFactoryConfigurationError , TransformerConfigurationException , TransformerException { jbossWeb = new ZipEntry ( jbossWeb . getName ( ) ) ; outZip . putNextEntry ( jbossWeb ) ; TransformerFactory tFactory = TransformerFactory . newInstance ( ) ; Transformer transformer = tFactory . newTransformer ( ) ; transformer . setOutputProperty ( OutputKeys . INDENT , "yes" ) ; transformer . setOutputProperty ( "{http://xml.apache.org/xslt}indent-amount" , "2" ) ; DOMSource source = new DOMSource ( doc ) ; StreamResult result = new StreamResult ( outZip ) ; transformer . transform ( source , result ) ; outZip . closeEntry ( ) ; } | Writes a xml document to a zip file with an entry specified by the jbossWeb parameter . | 187 | 20 |
22,593 | public static void removeNodesByTagName ( Element doc , String tagname ) { NodeList nodes = doc . getElementsByTagName ( tagname ) ; for ( int i = 0 ; i < nodes . getLength ( ) ; i ++ ) { Node n = nodes . item ( i ) ; doc . removeChild ( n ) ; } } | Removes elements by a specified tag name from the xml Element passed in . | 75 | 15 |
22,594 | public static void storeProperties ( ZipOutputStream outZip , ZipEntry cadmiumPropertiesEntry , Properties cadmiumProps , List < String > newWarNames ) throws IOException { ZipEntry newCadmiumEntry = new ZipEntry ( cadmiumPropertiesEntry . getName ( ) ) ; outZip . putNextEntry ( newCadmiumEntry ) ; cadmiumProps . store ( outZip , "Initial git properties for " + newWarNames . get ( 0 ) ) ; outZip . closeEntry ( ) ; } | Adds a properties file to a war . | 119 | 8 |
22,595 | public static String getWarName ( ServletContext context ) { String [ ] pathSegments = context . getRealPath ( "/WEB-INF/web.xml" ) . split ( "/" ) ; String warName = pathSegments [ pathSegments . length - 3 ] ; if ( ! warName . endsWith ( ".war" ) ) { URL webXml = WarUtils . class . getClassLoader ( ) . getResource ( "/cadmium-version.properties" ) ; if ( webXml != null ) { String urlString = webXml . toString ( ) . substring ( 0 , webXml . toString ( ) . length ( ) - "/WEB-INF/classes/cadmium-version.properties" . length ( ) ) ; File warFile = null ; if ( webXml . getProtocol ( ) . equalsIgnoreCase ( "file" ) ) { warFile = new File ( urlString . substring ( 5 ) ) ; } else if ( webXml . getProtocol ( ) . equalsIgnoreCase ( "vfszip" ) ) { warFile = new File ( urlString . substring ( 7 ) ) ; } else if ( webXml . getProtocol ( ) . equalsIgnoreCase ( "vfsfile" ) ) { warFile = new File ( urlString . substring ( 8 ) ) ; } else if ( webXml . getProtocol ( ) . equalsIgnoreCase ( "vfs" ) && System . getProperty ( JBOSS_7_DEPLOY_DIR ) != null ) { String path = urlString . substring ( "vfs:/" . length ( ) ) ; String deployDir = System . getProperty ( JBOSS_7_DEPLOY_DIR ) ; warFile = new File ( deployDir , path ) ; } if ( warFile != null ) { warName = warFile . getName ( ) ; } } } return warName ; } | Gets the currently deployed war file name from the ServletContext . | 432 | 14 |
22,596 | public final int decodeUnsignedInteger ( ) throws IOException { // 0XXXXXXX ... 1XXXXXXX 1XXXXXXX int result = decode ( ) ; // < 128: just one byte, optimal case // ELSE: multiple bytes... if ( result >= 128 ) { result = ( result & 127 ) ; int mShift = 7 ; int b ; do { // 1. Read the next octet b = decode ( ) ; // 2. Multiply the value of the unsigned number represented by // the 7 least significant // bits of the octet by the current multiplier and add the // result to the current value. result += ( b & 127 ) << mShift ; // 3. Multiply the multiplier by 128 mShift += 7 ; // 4. If the most significant bit of the octet was 1, go back to // step 1 } while ( b >= 128 ) ; } return result ; } | Decode an arbitrary precision non negative integer using a sequence of octets . The most significant bit of the last octet is set to zero to indicate sequence termination . Only seven bits per octet are used to store the integer s value . | 187 | 48 |
22,597 | public DateTimeValue decodeDateTimeValue ( DateTimeType type ) throws IOException { int year = 0 , monthDay = 0 , time = 0 , fractionalSecs = 0 ; switch ( type ) { case gYear : // Year, [Time-Zone] year = decodeInteger ( ) + DateTimeValue . YEAR_OFFSET ; break ; case gYearMonth : // Year, MonthDay, [TimeZone] case date : // Year, MonthDay, [TimeZone] year = decodeInteger ( ) + DateTimeValue . YEAR_OFFSET ; monthDay = decodeNBitUnsignedInteger ( DateTimeValue . NUMBER_BITS_MONTHDAY ) ; break ; case dateTime : // Year, MonthDay, Time, [FractionalSecs], [TimeZone] // e.g. "0001-01-01T00:00:00.111+00:33"; year = decodeInteger ( ) + DateTimeValue . YEAR_OFFSET ; monthDay = decodeNBitUnsignedInteger ( DateTimeValue . NUMBER_BITS_MONTHDAY ) ; // Note: *no* break; case time : // Time, [FractionalSecs], [TimeZone] // e.g. "12:34:56.135" time = decodeNBitUnsignedInteger ( DateTimeValue . NUMBER_BITS_TIME ) ; boolean presenceFractionalSecs = decodeBoolean ( ) ; fractionalSecs = presenceFractionalSecs ? decodeUnsignedInteger ( ) : 0 ; break ; case gMonth : // MonthDay, [TimeZone] // e.g. "--12" case gMonthDay : // MonthDay, [TimeZone] // e.g. "--01-28" case gDay : // MonthDay, [TimeZone] // "---16"; monthDay = decodeNBitUnsignedInteger ( DateTimeValue . NUMBER_BITS_MONTHDAY ) ; break ; default : throw new UnsupportedOperationException ( ) ; } boolean presenceTimezone = decodeBoolean ( ) ; int timeZone = presenceTimezone ? decodeNBitUnsignedInteger ( DateTimeValue . NUMBER_BITS_TIMEZONE ) - DateTimeValue . TIMEZONE_OFFSET_IN_MINUTES : 0 ; return new DateTimeValue ( type , year , monthDay , time , fractionalSecs , presenceTimezone , timeZone ) ; } | Decode Date - Time as sequence of values representing the individual components of the Date - Time . | 521 | 19 |
22,598 | public Set < String > configureJob ( Job job ) throws FileNotFoundException , IOException { Set < String > instanceFiles = new HashSet < String > ( ) ; for ( Map . Entry < Path , List < Input > > entry : multiInputs . entrySet ( ) ) { for ( int inputId = 0 ; inputId < entry . getValue ( ) . size ( ) ; inputId ++ ) { Input input = entry . getValue ( ) . get ( inputId ) ; instanceFiles . addAll ( PangoolMultipleInputs . addInputPath ( job , input . path , input . inputFormat , input . inputProcessor , input . specificContext , inputId ) ) ; } } return instanceFiles ; } | Use this method for configuring a Job instance according to the multiple input specs that has been specified . Returns the instance files created . | 155 | 26 |
22,599 | public static void waitForToken ( String siteUri , String token , Long since , Long timeout ) throws Exception { if ( ! siteUri . endsWith ( "/system/history" ) ) { siteUri += "/system/history" ; } siteUri += "/" + token ; if ( since != null ) { siteUri += "/" + since ; } HttpClient httpClient = httpClient ( ) ; HttpGet get = new HttpGet ( siteUri ) ; Long currentTime = System . currentTimeMillis ( ) ; Long timeoutTime = currentTime + timeout ; do { currentTime = System . currentTimeMillis ( ) ; HttpResponse resp = httpClient . execute ( get ) ; if ( resp . getStatusLine ( ) . getStatusCode ( ) == HttpStatus . SC_OK ) { String response = EntityUtils . toString ( resp . getEntity ( ) ) ; if ( response != null && response . trim ( ) . equalsIgnoreCase ( "true" ) ) { return ; } else { Thread . sleep ( 1000l ) ; } } else { String errorResponse = EntityUtils . toString ( resp . getEntity ( ) ) ; if ( errorResponse != null ) { throw new Exception ( errorResponse . trim ( ) ) ; } else { throw new Exception ( "Command failed!" ) ; } } } while ( currentTime < timeoutTime ) ; if ( currentTime >= timeoutTime ) { throw new Exception ( "Timed out waiting for command to complete!" ) ; } } | Waits until a timeout is reached or a token shows up in the history of a site as finished or failed . | 328 | 23 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.