signature stringlengths 43 39.1k | implementation stringlengths 0 450k |
|---|---|
public class EJBFactoryHome { /** * Create wrapper instance of the type of wrappers managed by this
* home . < p >
* This method provides a wrapper factory capability .
* @ param id The < code > BeanId < / code > to associate with the wrapper
* @ return < code > EJSWrapper < / code > instance whose most specific
* type is the type of wrappers managed by this home < p > */
@ Override public EJSWrapperCommon internalCreateWrapper ( BeanId beanId ) throws CreateException , RemoteException , CSIException { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) Tr . entry ( tc , "internalCreateWrapper : " + beanId ) ; // Creating the wrappers for an EJBFactory is fairly simple ; there is
// only one remote wrapper , which is really just a wrapper around the
// HomeOfHomes .
// A different instance of the wrapper class ( EJBFactory ) is requred
// for every binding , to insure it is associated with the correct
// cluster , for proper WLM routing . The ' primary key ' of an EJBFactory
// is the J2EEName of either the applicaton or module , and is used
// to obtain the proper cluster identity .
// A special EJSWrapperCommon constructor is used , that processes
// just the one wrapper , and avoids the requirement of passing
// BeanMetaData . There really is no BMD for the EJBFactory , as it
// just represents the HomeOfHomes .
EJBFactoryImpl ejbFactory = new EJBFactoryImpl ( ivEJBLinkResolver ) ; J2EEName factoryKey = ( J2EEName ) beanId . getPrimaryKey ( ) ; Object cluster = ivContainer . getEJBRuntime ( ) . getClusterIdentity ( factoryKey ) ; EJSWrapperCommon wrappers = new EJSWrapperCommon ( ejbFactory , beanId , cluster , ivContainer ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) Tr . exit ( tc , "internalCreateWrapper : " + wrappers ) ; return wrappers ; |
public class AbstractCacheConfig { /** * Add a configuration for a { @ link javax . cache . event . CacheEntryListener } .
* @ param cacheEntryListenerConfiguration the { @ link CacheEntryListenerConfiguration }
* @ return the { @ link CacheConfig }
* @ throws IllegalArgumentException if the same CacheEntryListenerConfiguration
* is used more than once */
@ Override public CacheConfiguration < K , V > addCacheEntryListenerConfiguration ( CacheEntryListenerConfiguration < K , V > cacheEntryListenerConfiguration ) { } } | checkNotNull ( cacheEntryListenerConfiguration , "CacheEntryListenerConfiguration can't be null" ) ; if ( ! getListenerConfigurations ( ) . add ( cacheEntryListenerConfiguration ) ) { throw new IllegalArgumentException ( "A CacheEntryListenerConfiguration can " + "be registered only once" ) ; } return this ; |
public class WordBuilder { /** * Appends { @ code num } copies of a symbol to the contents of the internal storage .
* @ param num
* the number of copies
* @ param symbol
* the symbol
* @ return { @ code this } */
public WordBuilder < I > repeatAppend ( int num , I symbol ) { } } | if ( num == 0 ) { return this ; } ensureAdditionalCapacity ( num ) ; if ( symbol == null ) { length += num ; } else { for ( int i = num ; i > 0 ; i -- ) { array [ length ++ ] = symbol ; } } return this ; |
public class ElasticsearchClusterConfigStatusMarshaller { /** * Marshall the given parameter object . */
public void marshall ( ElasticsearchClusterConfigStatus elasticsearchClusterConfigStatus , ProtocolMarshaller protocolMarshaller ) { } } | if ( elasticsearchClusterConfigStatus == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( elasticsearchClusterConfigStatus . getOptions ( ) , OPTIONS_BINDING ) ; protocolMarshaller . marshall ( elasticsearchClusterConfigStatus . getStatus ( ) , STATUS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; } |
public class BooleanConstraint { /** * A factory method for creating { @ link BooleanConstraint } s from an arbitrary
* propositional logic formula ( wff ) . Allowed connectives are
* < code > ^ < / code > ( and ) , < code > v < / code > ( or ) , < code > ~ < / code > ( not ) , < code > - & gt ; < / code > ( implies ) , < code > & lt ; - & gt ; < / code > ( iff ) } .
* Atoms in the formula should be named " xN " where
* x is in [ a - z ] and N is in { 1 . . < code > scope . length < / code > } . The signature
* of the formula must contain all and only the variables in the scope .
* The conversion to CNF is provided by the propositional logic < code > CNFTransformer < / code > class
* of the < code > aima - java < / code > library ( see < a href = " http : / / aima - java . googlecode . com " > aima - java . googlecode . com < / a > ) .
* < br >
* Note : the given wff must be composed of < i > binary < / i > clauses ( i . e . , all parantheses must be made explicit ) .
* For example , the following wff
* < br >
* < code > ( x1 ^ x2 ) ^ ( x2 v ~ x3 ^ x4 ) ^ ( ~ x1 v x3 ) ^ ( x2 v ~ x3 ^ ~ x4 ) < / code >
* < br >
* must be input as
* < br >
* < code > ( ( ( ( x1 ^ x2 ) ^ ( x2 v ( ~ x3 ^ x4 ) ) ) ^ ( ~ x1 v x3 ) ) ^ ( x2 v ( ~ x3 ^ ~ x4 ) ) ) < / code >
* < br >
* or as
* < br >
* < code > ( ( ( x1 ^ x2 ) ^ ( x2 v ( ~ x3 ^ x4 ) ) ) ^ ( ( ~ x1 v x3 ) ^ ( x2 v ( ~ x3 ^ ~ x4 ) ) ) ) < / code >
* < br >
* @ param scope The { @ link BooleanVariable } s referred to in the formula .
* @ param wff An arbitrary propositional logic formula .
* @ return One or more { @ link BooleanConstraint } s representing the given formula in CNF . */
public static BooleanConstraint [ ] createBooleanConstraints ( BooleanVariable [ ] scope , String wff ) { } } | try { wff = wff . replace ( "~" , "NOT " ) ; wff = wff . replace ( "-" , "=" ) ; wff = wff . replace ( "v" , "OR" ) ; wff = wff . replace ( "^" , "AND" ) ; MetaCSPLogging . getLogger ( BooleanConstraint . class ) . finest ( "Converting WFF: " + wff ) ; Converter < Symbol > sConv = new Converter < Symbol > ( ) ; PEParser parser = new PEParser ( ) ; Sentence s = ( Sentence ) parser . parse ( wff ) ; Set < Sentence > clauses = new CNFClauseGatherer ( ) . getClausesFrom ( new CNFTransformer ( ) . transform ( s ) ) ; // List < Symbol > signature = sConv . setToList ( new SymbolClassifier ( ) . getSymbolsIn ( s ) ) ;
Vector < BooleanConstraint > cons = new Vector < BooleanConstraint > ( ) ; for ( Sentence cl : clauses ) { HashMap < BooleanVariable , Boolean > newClause = new HashMap < BooleanVariable , Boolean > ( ) ; List < Symbol > positiveSymbols = sConv . setToList ( new SymbolClassifier ( ) . getPositiveSymbolsIn ( cl ) ) ; List < Symbol > negativeSymbols = sConv . setToList ( new SymbolClassifier ( ) . getNegativeSymbolsIn ( cl ) ) ; try { for ( Symbol ps : positiveSymbols ) { BooleanVariable bv = scope [ Integer . parseInt ( ps . toString ( ) . substring ( 1 ) ) - 1 ] ; newClause . put ( bv , true ) ; } for ( Symbol ns : negativeSymbols ) { BooleanVariable bv = scope [ Integer . parseInt ( ns . toString ( ) . substring ( 1 ) ) - 1 ] ; if ( newClause . containsKey ( bv ) ) newClause . remove ( bv ) ; else newClause . put ( bv , false ) ; } } catch ( ArrayIndexOutOfBoundsException e ) { throw new Error ( "Variable numbering in WFF must be within scope" ) ; } catch ( NumberFormatException e ) { throw new Error ( "Variables in WFF must be in the format [a-z][1-MAXINT]" ) ; } if ( newClause . size ( ) > 0 ) { BooleanVariable [ ] relevantVars = new BooleanVariable [ newClause . size ( ) ] ; boolean [ ] positive = new boolean [ newClause . size ( ) ] ; int counter = 0 ; for ( Entry < BooleanVariable , Boolean > ent : newClause . entrySet ( ) ) { relevantVars [ counter ] = ent . getKey ( ) ; positive [ counter ++ ] = ent . getValue ( ) ; } BooleanConstraint bc = new BooleanConstraint ( relevantVars , positive ) ; MetaCSPLogging . getLogger ( BooleanConstraint . class ) . finest ( "Created constraint " + bc ) ; boolean subsumed = false ; for ( BooleanConstraint otherBc : cons ) { if ( bc . isEquivalent ( otherBc ) ) { subsumed = true ; break ; } } if ( ! subsumed ) cons . add ( bc ) ; } } MetaCSPLogging . getLogger ( BooleanConstraint . class ) . finest ( "CNF(WFF): " + cons ) ; return cons . toArray ( new BooleanConstraint [ cons . size ( ) ] ) ; } catch ( java . lang . RuntimeException e ) { throw new Error ( "Malformed BooleanConstraint - allowed logical connectives:\n\t^ : AND\n\tv : OR\n\t-> : implication\n\t<-> : iff\n\t~ : NOT" ) ; } |
public class Variables { /** * Get an instance of the { @ link Variables } stack from the given { @ link GraphRewrite } event context . */
public static Variables instance ( GraphRewrite event ) { } } | Variables instance = ( Variables ) event . getRewriteContext ( ) . get ( Variables . class ) ; if ( instance == null ) { instance = new Variables ( ) ; event . getRewriteContext ( ) . put ( Variables . class , instance ) ; } return instance ; |
public class QuorumImpl { /** * Determines if the quorum is present for the given member collection , caches the result and publishes an event under
* the { @ link # quorumName } topic if there was a change in presence .
* < strong > This method is not thread safe and should not be called concurrently . < / strong >
* @ param members the members for which the presence is determined */
void update ( Collection < Member > members ) { } } | QuorumState previousQuorumState = quorumState ; QuorumState newQuorumState = QuorumState . ABSENT ; try { boolean present = quorumFunction . apply ( members ) ; newQuorumState = present ? QuorumState . PRESENT : QuorumState . ABSENT ; } catch ( Exception e ) { ILogger logger = nodeEngine . getLogger ( QuorumService . class ) ; logger . severe ( "Quorum function of quorum: " + quorumName + " failed! Quorum status is set to " + newQuorumState , e ) ; } quorumState = newQuorumState ; if ( previousQuorumState != newQuorumState ) { createAndPublishEvent ( members , newQuorumState == QuorumState . PRESENT ) ; } |
public class DocPretty { /** * Print list . , with separators */
protected void print ( List < ? extends DocTree > list , String sep ) throws IOException { } } | if ( list . isEmpty ( ) ) return ; boolean first = true ; for ( DocTree t : list ) { if ( ! first ) print ( sep ) ; print ( t ) ; first = false ; } |
public class SearchIndex { /** * Retrieves the root of the indexing aggregate for
* < code > removedUUIDs < / code > and puts it into < code > map < / code > .
* @ param removedNodeIds
* the UUIDs of removed nodes .
* @ param map
* aggregate roots are collected in this map . Key = UUID ,
* value = NodeState . */
protected void retrieveAggregateRoot ( final Set < String > removedNodeIds , final Map < String , NodeData > map ) { } } | if ( indexingConfig != null ) { AggregateRule [ ] aggregateRules = indexingConfig . getAggregateRules ( ) ; if ( aggregateRules == null ) { return ; } long time = 0 ; if ( log . isDebugEnabled ( ) ) { time = System . currentTimeMillis ( ) ; } int found = SecurityHelper . doPrivilegedAction ( new PrivilegedAction < Integer > ( ) { public Integer run ( ) { int found = 0 ; try { CachingMultiIndexReader reader = indexRegister . getDefaultIndex ( ) . getIndexReader ( ) ; try { Term aggregateUUIDs = new Term ( FieldNames . AGGREGATED_NODE_UUID , "" ) ; TermDocs tDocs = reader . termDocs ( ) ; try { ItemDataConsumer ism = getContext ( ) . getItemStateManager ( ) ; for ( Iterator < String > it = removedNodeIds . iterator ( ) ; it . hasNext ( ) ; ) { String id = it . next ( ) ; aggregateUUIDs = aggregateUUIDs . createTerm ( id ) ; tDocs . seek ( aggregateUUIDs ) ; while ( tDocs . next ( ) ) { Document doc = reader . document ( tDocs . doc ( ) , FieldSelectors . UUID ) ; String uuid = doc . get ( FieldNames . UUID ) ; ItemData itd = ism . getItemData ( uuid ) ; if ( itd == null ) { continue ; } if ( ! itd . isNode ( ) ) { throw new RepositoryException ( "Item with id:" + uuid + " is not a node" ) ; } map . put ( uuid , ( NodeData ) itd ) ; found ++ ; } } } finally { tDocs . close ( ) ; } } finally { reader . release ( ) ; } } catch ( Exception e ) { log . warn ( "Exception while retrieving aggregate roots" , e ) ; } return found ; } } ) ; if ( log . isDebugEnabled ( ) ) { time = System . currentTimeMillis ( ) - time ; log . debug ( "Retrieved {} aggregate roots in {} ms." , new Integer ( found ) , new Long ( time ) ) ; } } |
public class CodeGenerator { /** * generate import code .
* @ param code the code */
private void genImportCode ( ClassCode code ) { } } | code . importClass ( "java.util.*" ) ; code . importClass ( "java.io.IOException" ) ; code . importClass ( "java.lang.reflect.*" ) ; code . importClass ( "com.baidu.bjf.remoting.protobuf.code.*" ) ; code . importClass ( "com.baidu.bjf.remoting.protobuf.utils.*" ) ; code . importClass ( "com.baidu.bjf.remoting.protobuf.*" ) ; code . importClass ( "com.google.protobuf.*" ) ; if ( ! StringUtils . isEmpty ( getPackage ( ) ) ) { code . importClass ( ClassHelper . getInternalName ( cls . getCanonicalName ( ) ) ) ; } |
public class LibratoWriter { /** * Send given metrics to the Graphite server . */
@ Override public void write ( Iterable < QueryResult > results ) { } } | logger . debug ( "Export to '{}', proxy {} metrics {}" , url , proxy , results ) ; List < QueryResult > counters = new ArrayList < QueryResult > ( ) ; List < QueryResult > gauges = new ArrayList < QueryResult > ( ) ; for ( QueryResult result : results ) { if ( METRIC_TYPE_GAUGE . equals ( result . getType ( ) ) ) { gauges . add ( result ) ; } else if ( METRIC_TYPE_COUNTER . equals ( result . getType ( ) ) ) { counters . add ( result ) ; } else if ( null == result . getType ( ) ) { logger . info ( "Unspecified type for result {}, export it as counter" ) ; counters . add ( result ) ; } else { logger . info ( "Unsupported metric type '{}' for result {}, export it as counter" , result . getType ( ) , result ) ; counters . add ( result ) ; } } HttpURLConnection urlConnection = null ; try { if ( proxy == null ) { urlConnection = ( HttpURLConnection ) url . openConnection ( ) ; } else { urlConnection = ( HttpURLConnection ) url . openConnection ( proxy ) ; } urlConnection . setRequestMethod ( "POST" ) ; urlConnection . setDoInput ( true ) ; urlConnection . setDoOutput ( true ) ; urlConnection . setReadTimeout ( libratoApiTimeoutInMillis ) ; urlConnection . setRequestProperty ( "content-type" , "application/json; charset=utf-8" ) ; urlConnection . setRequestProperty ( "Authorization" , "Basic " + basicAuthentication ) ; urlConnection . setRequestProperty ( "User-Agent" , httpUserAgent ) ; serialize ( counters , gauges , urlConnection . getOutputStream ( ) ) ; int responseCode = urlConnection . getResponseCode ( ) ; if ( responseCode != 200 ) { exceptionCounter . incrementAndGet ( ) ; logger . warn ( "Failure {}:'{}' to send result to Librato server '{}' with proxy {}, user {}" , responseCode , urlConnection . getResponseMessage ( ) , url , proxy , user ) ; } if ( logger . isTraceEnabled ( ) ) { IoUtils2 . copy ( urlConnection . getInputStream ( ) , System . out ) ; } } catch ( Exception e ) { exceptionCounter . incrementAndGet ( ) ; logger . warn ( "Failure to send result to Librato server '{}' with proxy {}, user {}" , url , proxy , user , e ) ; } finally { if ( urlConnection != null ) { try { InputStream in = urlConnection . getInputStream ( ) ; IoUtils2 . copy ( in , IoUtils2 . nullOutputStream ( ) ) ; IoUtils2 . closeQuietly ( in ) ; InputStream err = urlConnection . getErrorStream ( ) ; if ( err != null ) { IoUtils2 . copy ( err , IoUtils2 . nullOutputStream ( ) ) ; IoUtils2 . closeQuietly ( err ) ; } } catch ( IOException e ) { logger . warn ( "Exception flushing http connection" , e ) ; } } } |
public class MapTileTransitionModel { /** * Get the new transition type from two transitions .
* @ param a The inner transition .
* @ param b The outer transition .
* @ param ox The horizontal offset to update .
* @ param oy The vertical offset to update .
* @ return The new transition type , < code > null < / code > if none . */
private static TransitionType getTransition ( TransitionType a , TransitionType b , int ox , int oy ) { } } | final TransitionType type = getTransitionHorizontalVertical ( a , b , ox , oy ) ; if ( type != null ) { return type ; } return getTransitionDiagonals ( a , b , ox , oy ) ; |
public class Faker { /** * Set a random check or uncheck state
* @ param view */
public void fillWithCheckState ( CompoundButton view ) { } } | validateNotNullableView ( view ) ; validateIfIsACompoundButton ( view ) ; view . setChecked ( new Random ( ) . nextBoolean ( ) ) ; |
public class CmsFrameset { /** * Returns the html for the " preferences " button depending on the current users permissions and
* the default workplace settings . < p >
* @ return the html for the " preferences " button */
public String getPreferencesButton ( ) { } } | int buttonStyle = getSettings ( ) . getUserSettings ( ) . getWorkplaceButtonStyle ( ) ; if ( ! getCms ( ) . getRequestContext ( ) . getCurrentUser ( ) . isManaged ( ) ) { return button ( "../commons/preferences.jsp" , "body" , "preferences.png" , Messages . GUI_BUTTON_PREFERENCES_0 , buttonStyle ) ; } else { return button ( null , null , "preferences_in.png" , Messages . GUI_BUTTON_PREFERENCES_0 , buttonStyle ) ; } |
public class AbcGrammar { /** * major : : = " maj " [ " o " [ " r " ] ] */
Rule Major ( ) { } } | return Sequence ( IgnoreCase ( "maj" ) , Optional ( Sequence ( IgnoreCase ( "o" ) , Optional ( IgnoreCase ( "r" ) ) ) ) ) . label ( Major ) . suppressSubnodes ( ) ; |
public class CmsNotification { /** * Sends a new notification , that will be removed automatically . < p >
* @ param type the notification type
* @ param message the message */
public void send ( Type type , final String message ) { } } | final CmsNotificationMessage notificationMessage = new CmsNotificationMessage ( Mode . NORMAL , type , message ) ; m_messages . add ( notificationMessage ) ; if ( hasWidget ( ) ) { m_widget . addMessage ( notificationMessage ) ; } Timer timer = new Timer ( ) { @ Override public void run ( ) { removeMessage ( notificationMessage ) ; } } ; timer . schedule ( 4000 * ( type == Type . NORMAL ? 1 : 2 ) ) ; |
public class aaakcdaccount { /** * Use this API to fetch all the aaakcdaccount resources that are configured on netscaler . */
public static aaakcdaccount [ ] get ( nitro_service service ) throws Exception { } } | aaakcdaccount obj = new aaakcdaccount ( ) ; aaakcdaccount [ ] response = ( aaakcdaccount [ ] ) obj . get_resources ( service ) ; return response ; |
public class WiresDockingControlImpl { /** * Reurn the closer magnet
* @ param shape
* @ param parent
* @ param allowOverlap should allow overlapping docked shape or not
* @ return closer magnet or null if none are available */
private WiresMagnet getCloserMagnet ( final WiresShape shape , final WiresContainer parent , final boolean allowOverlap ) { } } | final WiresShape parentShape = ( WiresShape ) parent ; final MagnetManager . Magnets magnets = parentShape . getMagnets ( ) ; final Point2D shapeLocation = shape . getComputedLocation ( ) ; final Point2D shapeCenter = Geometry . findCenter ( shape . getPath ( ) . getBoundingBox ( ) ) ; final double shapeX = shapeCenter . getX ( ) + shapeLocation . getX ( ) ; final double shapeY = shapeCenter . getX ( ) + shapeLocation . getY ( ) ; int magnetIndex = - 1 ; Double minDistance = null ; // not considering the zero magnet , that is the center .
for ( int i = 1 ; i < magnets . size ( ) ; i ++ ) { final WiresMagnet magnet = magnets . getMagnet ( i ) ; // skip magnet that has shape over it
if ( allowOverlap || ! hasShapeOnMagnet ( magnet , parentShape ) ) { final double magnetX = magnet . getControl ( ) . getLocation ( ) . getX ( ) ; final double magnetY = magnet . getControl ( ) . getLocation ( ) . getY ( ) ; final double distance = Geometry . distance ( magnetX , magnetY , shapeX , shapeY ) ; // getting shorter distance
if ( ( minDistance == null ) || ( distance < minDistance ) ) { minDistance = distance ; magnetIndex = i ; } } } return ( magnetIndex > 0 ? magnets . getMagnet ( magnetIndex ) : null ) ; |
public class BELScriptLexer { /** * $ ANTLR start " T _ _ 63" */
public final void mT__63 ( ) throws RecognitionException { } } | try { int _type = T__63 ; int _channel = DEFAULT_TOKEN_CHANNEL ; // BELScript . g : 50:7 : ( ' sec ' )
// BELScript . g : 50:9 : ' sec '
{ match ( "sec" ) ; } state . type = _type ; state . channel = _channel ; } finally { } |
public class Util { /** * Depending on the type of the < code > reader < / code > this method either
* closes or releases the reader . The reader is released if it implements
* { @ link ReleaseableIndexReader } .
* @ param reader the index reader to close or release .
* @ throws IOException if an error occurs while closing or releasing the
* index reader . */
public static void closeOrRelease ( final IndexReader reader ) throws IOException { } } | if ( reader instanceof ReleaseableIndexReader ) { ( ( ReleaseableIndexReader ) reader ) . release ( ) ; } else { reader . close ( ) ; } |
public class SoyNodeCompiler { /** * Interprets the given expressions as the arguments of a { @ code range ( . . . ) } expression in a
* { @ code foreach } loop . */
private CompiledForeachRangeArgs calculateRangeArgs ( ForNode forNode , Scope scope ) { } } | RangeArgs rangeArgs = RangeArgs . createFromNode ( forNode ) . get ( ) ; ForNonemptyNode nonEmptyNode = ( ForNonemptyNode ) forNode . getChild ( 0 ) ; ImmutableList . Builder < Statement > initStatements = ImmutableList . builder ( ) ; Expression startExpression = computeRangeValue ( SyntheticVarName . foreachLoopRangeStart ( nonEmptyNode ) , rangeArgs . start ( ) , 0 , scope , initStatements ) ; Expression stepExpression = computeRangeValue ( SyntheticVarName . foreachLoopRangeStep ( nonEmptyNode ) , rangeArgs . increment ( ) , 1 , scope , initStatements ) ; Expression endExpression = computeRangeValue ( SyntheticVarName . foreachLoopRangeEnd ( nonEmptyNode ) , Optional . of ( rangeArgs . limit ( ) ) , Integer . MAX_VALUE , scope , initStatements ) ; return new AutoValue_SoyNodeCompiler_CompiledForeachRangeArgs ( startExpression , endExpression , stepExpression , initStatements . build ( ) ) ; |
public class CheckpointCoordinator { /** * Shuts down the checkpoint coordinator .
* < p > After this method has been called , the coordinator does not accept
* and further messages and cannot trigger any further checkpoints . */
public void shutdown ( JobStatus jobStatus ) throws Exception { } } | synchronized ( lock ) { if ( ! shutdown ) { shutdown = true ; LOG . info ( "Stopping checkpoint coordinator for job {}." , job ) ; periodicScheduling = false ; triggerRequestQueued = false ; // shut down the hooks
MasterHooks . close ( masterHooks . values ( ) , LOG ) ; masterHooks . clear ( ) ; // shut down the thread that handles the timeouts and pending triggers
timer . shutdownNow ( ) ; // clear and discard all pending checkpoints
for ( PendingCheckpoint pending : pendingCheckpoints . values ( ) ) { pending . abort ( CheckpointFailureReason . CHECKPOINT_COORDINATOR_SHUTDOWN ) ; } pendingCheckpoints . clear ( ) ; completedCheckpointStore . shutdown ( jobStatus ) ; checkpointIdCounter . shutdown ( jobStatus ) ; } } |
public class EC2MetadataUtils { /** * Returns the temporary security credentials ( AccessKeyId , SecretAccessKey ,
* SessionToken , and Expiration ) associated with the IAM roles on the
* instance . */
public static Map < String , IAMSecurityCredential > getIAMSecurityCredentials ( ) { } } | Map < String , IAMSecurityCredential > credentialsInfoMap = new HashMap < String , IAMSecurityCredential > ( ) ; List < String > credentials = getItems ( EC2_METADATA_ROOT + "/iam/security-credentials" ) ; if ( null != credentials ) { for ( String credential : credentials ) { String json = getData ( EC2_METADATA_ROOT + "/iam/security-credentials/" + credential ) ; try { IAMSecurityCredential credentialInfo = mapper . readValue ( json , IAMSecurityCredential . class ) ; credentialsInfoMap . put ( credential , credentialInfo ) ; } catch ( Exception e ) { log . warn ( "Unable to process the credential (" + credential + "). " + e . getMessage ( ) , e ) ; } } } return credentialsInfoMap ; |
public class AbstractConversionTable { /** * Remove the current type conversion . */
@ SuppressWarnings ( "unchecked" ) protected void removeCurrentTypeConversion ( ) { } } | final IStructuredSelection selection = this . list . getStructuredSelection ( ) ; final String [ ] types = new String [ selection . size ( ) ] ; final Iterator < ConversionMapping > iter = selection . iterator ( ) ; int i = 0 ; while ( iter . hasNext ( ) ) { types [ i ] = iter . next ( ) . getSource ( ) ; i ++ ; } removeTypeConversions ( types ) ; |
public class PrimitiveEntrySizeCalculator { /** * Returns a sun . misc . Unsafe . Suitable for use in a 3rd party package .
* Replace with a simple call to Unsafe . getUnsafe when integrating
* into a jdk .
* @ return a sun . misc . Unsafe */
static Unsafe getUnsafe ( ) { } } | try { return Unsafe . getUnsafe ( ) ; } catch ( SecurityException tryReflectionInstead ) { } try { return java . security . AccessController . doPrivileged ( new java . security . PrivilegedExceptionAction < Unsafe > ( ) { public Unsafe run ( ) throws Exception { Class < Unsafe > k = Unsafe . class ; for ( java . lang . reflect . Field f : k . getDeclaredFields ( ) ) { f . setAccessible ( true ) ; Object x = f . get ( null ) ; if ( k . isInstance ( x ) ) return k . cast ( x ) ; } throw new NoSuchFieldError ( "the Unsafe" ) ; } } ) ; } catch ( java . security . PrivilegedActionException e ) { throw new RuntimeException ( "Could not initialize intrinsics" , e . getCause ( ) ) ; } |
public class WTableRowRenderer { /** * The preparePaintComponent method has been overridden to ensure that expanded row renderers have been correctly
* initialised .
* Expanded row renderers are lazily instantiated and added to the shared structure as needed . This means for the
* first use of a renderer , it will not have been part of the WComponent tree , and would not have had its
* preparePaintComponent called . We therefore add the renderer to the tree here , and manually call its preparePaint .
* @ param request the Request being responded to . */
@ Override protected void preparePaintComponent ( final Request request ) { } } | super . preparePaintComponent ( request ) ; Class < ? extends WComponent > rowRendererClass = getRowRendererClass ( ) ; if ( rowRendererClass != null && ! expandedRenderers . containsKey ( rowRendererClass ) ) { getExpandedTreeNodeRenderer ( rowRendererClass ) . preparePaint ( request ) ; } |
public class BundleUtils { /** * Returns a optional { @ link android . os . Parcelable } value . In other words , returns the value mapped by key if it exists and is a { @ link android . os . Parcelable } .
* The bundle argument is allowed to be { @ code null } . If the bundle is null , this method returns null .
* @ param bundle a bundle . If the bundle is null , this method will return null .
* @ param key a key for the value .
* @ param fallback fallback value .
* @ return a { @ link android . os . Parcelable } value if exists , null otherwise .
* @ see android . os . Bundle # getParcelable ( String ) */
@ Nullable public static < T extends Parcelable > T optParcelable ( @ Nullable Bundle bundle , @ Nullable String key , @ Nullable T fallback ) { } } | if ( bundle == null ) { return fallback ; } return bundle . getParcelable ( key ) ; |
public class Commands { /** * Constructs a list of string parameters for a metadata call .
* < p > The number of items is equal to the number of arguments once
* the command line ( the { @ code line } parameter ) has been parsed ,
* typically three ( catalog , schema , table name ) .
* < p > Parses the command line , and assumes that the the first word is
* a compound identifier . If the compound identifier has fewer parts
* than required , fills from the right .
* < p > The result is a mutable list of strings .
* @ param line Command line
* @ param paramName Name of parameter being read from command line
* @ param defaultValues Default values for each component of parameter
* @ return Mutable list of strings */
private List < Object > buildMetadataArgs ( String line , String paramName , String [ ] defaultValues ) { } } | final List < Object > list = new ArrayList < > ( ) ; final String [ ] [ ] ret = sqlLine . splitCompound ( line ) ; String [ ] compound ; if ( ret == null || ret . length != 2 ) { if ( defaultValues [ defaultValues . length - 1 ] == null ) { throw new IllegalArgumentException ( sqlLine . loc ( "arg-usage" , ret == null || ret . length == 0 ? "" : ret [ 0 ] [ 0 ] , paramName ) ) ; } compound = new String [ 0 ] ; } else { compound = ret [ 1 ] ; } if ( compound . length <= defaultValues . length ) { list . addAll ( Arrays . asList ( defaultValues ) . subList ( 0 , defaultValues . length - compound . length ) ) ; list . addAll ( Arrays . asList ( compound ) ) ; } else { list . addAll ( Arrays . asList ( compound ) . subList ( 0 , defaultValues . length ) ) ; } return list ; |
public class RedisCache { /** * Removes the entry for a key only if currently mapped to a given value .
* This is equivalent to
* < pre >
* if ( map . containsKey ( key ) & amp ; & amp ; map . get ( key ) . equals ( value ) ) {
* map . remove ( key ) ;
* return true ;
* } else return false ; < / pre >
* except that the action is performed atomically .
* @ param key key with which the specified value is associated
* @ param value value expected to be associated with the specified key
* @ return < tt > true < / tt > if the value was removed
* @ throws UnsupportedOperationException if the < tt > remove < / tt > operation
* is not supported by this map
* @ throws ClassCastException if the key or value is of an inappropriate
* type for this map
* ( < a href = " . . / Collection . html # optional - restrictions " > optional < / a > )
* @ throws NullPointerException if the specified key or value is null ,
* and this map does not permit null keys or values
* ( < a href = " . . / Collection . html # optional - restrictions " > optional < / a > ) */
@ Override public boolean remove ( Object key , Object value ) { } } | return rMap . remove ( key , value ) ; |
public class Streams { /** * Create a sliding view over this Stream
* < pre >
* { @ code
* List < List < Integer > > list = Streams . sliding ( Stream . of ( 1,2,3,4,5,6)
* , 2,1)
* . collect ( CyclopsCollectors . toList ( ) ) ;
* assertThat ( list . getValue ( 0 ) , hasItems ( 1,2 ) ) ;
* assertThat ( list . getValue ( 1 ) , hasItems ( 2,3 ) ) ;
* < / pre >
* @ param windowSize
* Size of sliding window
* @ return Stream with sliding view over monad */
public final static < T > Stream < Streamable < T > > window ( final Stream < T > stream , final int windowSize , final int increment ) { } } | final Iterator < T > it = stream . iterator ( ) ; final Mutable < PersistentList < T > > list = Mutable . of ( Seq . empty ( ) ) ; return Streams . stream ( new Iterator < Streamable < T > > ( ) { @ Override public boolean hasNext ( ) { return it . hasNext ( ) ; } @ Override public Streamable < T > next ( ) { for ( int i = 0 ; i < increment && list . get ( ) . size ( ) > 0 ; i ++ ) list . mutate ( var -> var . removeAt ( 0 ) ) ; for ( ; list . get ( ) . size ( ) < windowSize && it . hasNext ( ) ; ) { if ( it . hasNext ( ) ) { list . mutate ( var -> var . insertAt ( Math . max ( 0 , var . size ( ) ) , it . next ( ) ) ) ; } } return Streamable . fromIterable ( list . get ( ) ) ; } } ) ; |
public class UnsafeBuffer { @ Override public short getShort ( final int index ) { } } | boundsCheck ( index , Bits . SHORT_SIZE_IN_BYTES ) ; short bits = MEM . getShort ( byteArray , addressOffset + index ) ; if ( NATIVE_BYTE_ORDER != PROTOCOL_BYTE_ORDER ) { bits = Short . reverseBytes ( bits ) ; } return bits ; |
public class JournalUpgrader { /** * Reads a journal via
* { @ code java - cp \
* assembly / server / target / alluxio - assembly - server - < ALLUXIO - VERSION > - jar - with - dependencies . jar \
* alluxio . master . journal . JournalUpgrader - master BlockMaster } .
* @ param args arguments passed to the tool */
public static void main ( String [ ] args ) { } } | if ( ! parseInputArgs ( args ) ) { usage ( ) ; System . exit ( EXIT_FAILED ) ; } if ( sHelp ) { usage ( ) ; System . exit ( EXIT_SUCCEEDED ) ; } List < String > masters = new ArrayList < > ( ) ; for ( MasterFactory factory : ServiceUtils . getMasterServiceLoader ( ) ) { masters . add ( factory . getName ( ) ) ; } for ( String master : masters ) { Upgrader upgrader = new Upgrader ( master , new InstancedConfiguration ( ConfigurationUtils . defaults ( ) ) ) ; try { upgrader . upgrade ( ) ; } catch ( IOException e ) { LOG . error ( "Failed to upgrade the journal for {}." , master , e ) ; System . exit ( EXIT_FAILED ) ; } } |
public class AbcGrammar { /** * ifield - title : : = % 5B . % 54 . % 3A * WSP tex - text - ifield % 5D < p >
* < tt > [ T : second version ] < / tt > */
Rule IfieldTitle ( ) { } } | return Sequence ( String ( "[T:" ) , ZeroOrMore ( WSP ( ) ) . suppressNode ( ) , TexTextIfield ( ) , String ( "]" ) ) . label ( IfieldTitle ) ; |
public class GitHubConnector { /** * Must be called after # { createGitRepository ( ) } */
public void pushAllChangesToGit ( ) throws IOException { } } | if ( localRepository == null ) { throw new IOException ( "Git has not been created, call createGitRepositoryFirst" ) ; } try { UserService userService = new UserService ( ) ; userService . getClient ( ) . setOAuth2Token ( oAuthToken ) ; User user = userService . getUser ( ) ; String name = user . getLogin ( ) ; String email = user . getEmail ( ) ; if ( email == null ) { // This is the e - mail addressed used by GitHub on web commits where the users mail is private . See :
// https : / / github . com / settings / emails
email = name + "@users.noreply.github.com" ; } localRepository . add ( ) . addFilepattern ( "." ) . call ( ) ; localRepository . commit ( ) . setMessage ( "Initial commit" ) . setCommitter ( name , email ) . call ( ) ; PushCommand pushCommand = localRepository . push ( ) ; addAuth ( pushCommand ) ; pushCommand . call ( ) ; } catch ( GitAPIException e ) { throw new IOException ( "Error pushing changes to GitHub" , e ) ; } |
public class ReleaseHostsRequest { /** * The IDs of the Dedicated Hosts to release .
* @ return The IDs of the Dedicated Hosts to release . */
public java . util . List < String > getHostIds ( ) { } } | if ( hostIds == null ) { hostIds = new com . amazonaws . internal . SdkInternalList < String > ( ) ; } return hostIds ; |
public class WaitForState { /** * Waits for the element to be present . The provided wait time will be used
* and if the element isn ' t present after that time , it will fail , and log
* the issue with a screenshot for traceability and added debugging support .
* @ param seconds - how many seconds to wait for */
public void present ( double seconds ) { } } | try { double timeTook = elementPresent ( seconds ) ; checkPresent ( seconds , timeTook ) ; } catch ( TimeoutException e ) { checkPresent ( seconds , seconds ) ; } |
public class CmsFavoriteEntry { /** * Prepares the CmsObject for jumping to this favorite location , and returns the appropriate URL .
* @ param cms the CmsObject to initialize for jumping to the favorite
* @ return the link for the favorite location
* @ throws CmsException if something goes wrong */
public String updateContextAndGetFavoriteUrl ( CmsObject cms ) throws CmsException { } } | CmsResourceFilter filter = CmsResourceFilter . IGNORE_EXPIRATION ; CmsProject project = null ; switch ( getType ( ) ) { case explorerFolder : CmsResource folder = cms . readResource ( getStructureId ( ) , filter ) ; project = cms . readProject ( getProjectId ( ) ) ; cms . getRequestContext ( ) . setSiteRoot ( getSiteRoot ( ) ) ; cms . getRequestContext ( ) . setCurrentProject ( project ) ; String explorerLink = CmsVaadinUtils . getWorkplaceLink ( ) + "#!" + CmsFileExplorerConfiguration . APP_ID + "/" + getProjectId ( ) + "!!" + getSiteRoot ( ) + "!!" + cms . getSitePath ( folder ) ; return explorerLink ; case page : project = cms . readProject ( getProjectId ( ) ) ; CmsResource target = cms . readResource ( getStructureId ( ) , filter ) ; CmsResource detailContent = null ; String link = null ; cms . getRequestContext ( ) . setCurrentProject ( project ) ; cms . getRequestContext ( ) . setSiteRoot ( getSiteRoot ( ) ) ; if ( getDetailId ( ) != null ) { detailContent = cms . readResource ( getDetailId ( ) ) ; link = OpenCms . getLinkManager ( ) . substituteLinkForUnknownTarget ( cms , cms . getSitePath ( detailContent ) , cms . getSitePath ( target ) , false ) ; } else { link = OpenCms . getLinkManager ( ) . substituteLink ( cms , target ) ; } return link ; default : return null ; } |
public class IdScriptableObject { /** * ' thisObj ' will be null if invoked as constructor , in which case
* * instance of Scriptable should be returned . */
@ Override public Object execIdCall ( IdFunctionObject f , Context cx , Scriptable scope , Scriptable thisObj , Object [ ] args ) { } } | throw f . unknown ( ) ; |
public class UpdateTaskRequestMarshaller { /** * Marshall the given parameter object . */
public void marshall ( UpdateTaskRequest updateTaskRequest , ProtocolMarshaller protocolMarshaller ) { } } | if ( updateTaskRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( updateTaskRequest . getTaskArn ( ) , TASKARN_BINDING ) ; protocolMarshaller . marshall ( updateTaskRequest . getOptions ( ) , OPTIONS_BINDING ) ; protocolMarshaller . marshall ( updateTaskRequest . getName ( ) , NAME_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; } |
public class ConcatVector { /** * Sets a component to a set of sparse indices , each with a value .
* @ param component the index of the component to set
* @ param indices the indices of the vector to give values to
* @ param values their values */
public void setSparseComponent ( int component , int [ ] indices , double [ ] values ) { } } | if ( component >= pointers . length ) { increaseSizeTo ( component + 1 ) ; } assert ( indices . length == values . length ) ; if ( indices . length == 0 ) { pointers [ component ] = new double [ 2 ] ; sparse [ component ] = true ; copyOnWrite [ component ] = false ; } else { double [ ] sparseInfo = new double [ indices . length * 2 ] ; for ( int i = 0 ; i < indices . length ; i ++ ) { sparseInfo [ i * 2 ] = indices [ i ] ; sparseInfo [ ( i * 2 ) + 1 ] = values [ i ] ; } pointers [ component ] = sparseInfo ; sparse [ component ] = true ; copyOnWrite [ component ] = false ; } |
public class TemporalProperty { /** * Sets the value of the property at the specified time .
* For internal and test use only . < p / >
* Values are assumed to be set in chronological order .
* @ param effectiveFrom
* @ param value */
void set ( Instant effectiveFrom , T value ) { } } | T oldValue = get ( ) ; values . put ( effectiveFrom , value ) ; notifyListeners ( oldValue , value ) ; |
public class techsupport { /** * < pre >
* Use this operation to generate technical support archive .
* < / pre > */
public static techsupport [ ] start ( nitro_service client , techsupport resource ) throws Exception { } } | return ( ( techsupport [ ] ) resource . perform_operation ( client , "start" ) ) ; |
public class RPCHelper { public static < I , T extends I > void registerMethod ( final Method method , final T instance , final RSBLocalServer server ) throws CouldNotPerformException { } } | // synchronized ( syncObject ) {
// if ( ! methodCountMap . containsKey ( method . getName ( ) ) ) {
// methodCountMap . put ( method . getName ( ) , 0 ) ;
// methodCountMap . put ( method . getName ( ) , methodCountMap . get ( method . getName ( ) ) + 1 ) ;
// int sum = 0;
// for ( Integer value : methodCountMap . values ( ) ) {
// sum + = value ;
// if ( sum > ( 500 * test ) ) {
// test + + ;
// printMap ( ) ;
final Logger logger = LoggerFactory . getLogger ( instance . getClass ( ) ) ; logger . debug ( "Register Method[" + method . getName ( ) + "] on Scope[" + server . getScope ( ) + "]." ) ; try { server . addMethod ( method . getName ( ) , event -> { try { if ( event == null ) { throw new NotAvailableException ( "event" ) ; } Object result ; Class < ? > payloadType ; Future < Object > resultFuture = null ; try { // Encapsulate invocation to detect method invocation stall via timeout
// TODO : please check via benchmark if this causes into a performance issue compared to the direct invocation . Related to openbase / jul # 46 Validate performance of method invocation encapsulation
resultFuture = GlobalCachedExecutorService . submit ( ( ) -> { if ( event . getData ( ) == null ) { return method . invoke ( instance ) ; } else { return method . invoke ( instance , event . getData ( ) ) ; } } ) ; result = resultFuture . get ( RPC_TIMEOUT , TimeUnit . MILLISECONDS ) ; // Implementation of Future support by resolving result to reach inner future object .
if ( result instanceof Future ) { try { result = ( ( Future ) result ) . get ( RPC_TIMEOUT , TimeUnit . MILLISECONDS ) ; } catch ( final TimeoutException ex ) { ( ( Future ) result ) . cancel ( true ) ; throw ex ; } } } catch ( final TimeoutException ex ) { if ( resultFuture != null && ! resultFuture . isDone ( ) ) { resultFuture . cancel ( true ) ; } throw new CouldNotPerformException ( "Remote task was canceled!" , ex ) ; } if ( result == null ) { payloadType = Void . class ; } else { payloadType = result . getClass ( ) ; } Event returnEvent = new Event ( payloadType , result ) ; returnEvent . getMetaData ( ) . setUserTime ( USER_TIME_KEY , System . nanoTime ( ) ) ; return returnEvent ; } catch ( InterruptedException ex ) { Thread . currentThread ( ) . interrupt ( ) ; } catch ( CouldNotPerformException | IllegalArgumentException | ExecutionException | CancellationException | RejectedExecutionException ex ) { final CouldNotPerformException exx = new CouldNotPerformException ( "Could not invoke Method[" + method . getReturnType ( ) . getClass ( ) . getSimpleName ( ) + " " + method . getName ( ) + "(" + eventDataToArgumentString ( event ) + ")] of " + instance + "!" , ex ) ; if ( ! ( ExceptionProcessor . getInitialCause ( ex ) instanceof ShutdownInProgressException ) ) { ExceptionPrinter . printHistoryAndReturnThrowable ( exx , logger ) ; } throw new UserCodeException ( exx ) ; } return new Event ( Void . class ) ; } ) ; } catch ( CouldNotPerformException ex ) { if ( ex . getCause ( ) instanceof InvalidStateException ) { // method was already register
ExceptionPrinter . printHistory ( "Skip Method[" + method . getName ( ) + "] registration on Scope[" + server . getScope ( ) + "] of " + instance + " because message was already registered!" , ex , logger , LogLevel . DEBUG ) ; } else { throw new CouldNotPerformException ( "Could not register Method[" + method . getName ( ) + "] on Scope[" + server . getScope ( ) + "] of " + instance + "!" , ex ) ; } } |
public class StandardBullhornData { /** * Makes the " entity " api call for getting multiple entities .
* HTTP Method : GET
* @ param type
* @ param idList
* @ param fieldSet
* @ param params
* @ param < L >
* @ param < T >
* @ return */
protected < L extends ListWrapper < T > , T extends BullhornEntity > L handleGetMultipleEntities ( Class < T > type , Set < Integer > idList , Set < String > fieldSet , EntityParams params ) { } } | String ids = idList . stream ( ) . map ( id -> String . valueOf ( id ) ) . collect ( Collectors . joining ( "," ) ) ; Map < String , String > uriVariables = restUriVariablesFactory . getUriVariablesForGetMultiple ( BullhornEntityInfo . getTypesRestEntityName ( type ) , ids , fieldSet , params ) ; String url = restUrlFactory . assembleEntityUrl ( params ) ; try { String response = this . performGetRequest ( url , String . class , uriVariables ) ; try { return restJsonConverter . jsonToEntityDoNotUnwrapRoot ( response , BullhornEntityInfo . getTypesListWrapperType ( type ) ) ; } catch ( RestMappingException onlyOneEntityWasReturned ) { List < T > list = new ArrayList < T > ( ) ; list . add ( restJsonConverter . jsonToEntityUnwrapRoot ( response , type ) ) ; return ( L ) new StandardListWrapper < T > ( list ) ; } } catch ( RestApiException noneReturned ) { List < T > list = new ArrayList < T > ( ) ; return ( L ) new StandardListWrapper < T > ( list ) ; } |
public class JobMasterId { /** * If the given uuid is null , this returns null , otherwise a JobMasterId that
* corresponds to the UUID , via { @ link # JobMasterId ( UUID ) } . */
public static JobMasterId fromUuidOrNull ( @ Nullable UUID uuid ) { } } | return uuid == null ? null : new JobMasterId ( uuid ) ; |
public class Cells { /** * Returns the { @ code Byte [ ] } value of the { @ link Cell } ( associated to { @ code table } ) whose name iscellName , or null
* if this Cells object contains no cell whose name is cellName .
* @ param nameSpace the name of the owning table
* @ param cellName the name of the Cell we want to retrieve from this Cells object .
* @ return the { @ code Byte [ ] } value of the { @ link Cell } ( associated to { @ code table } ) whose name is cellName , or
* null if this Cells object contains no cell whose name is cellName */
public Byte [ ] getBytes ( String nameSpace , String cellName ) { } } | return getValue ( nameSpace , cellName , Byte [ ] . class ) ; |
public class DbRemoteConfigLoader { /** * 覆盖本地application . yml文件
* @ param content 文件内容 */
private void overrideLocalCanalConfig ( String content ) { } } | try ( OutputStreamWriter writer = new OutputStreamWriter ( new FileOutputStream ( CommonUtils . getConfPath ( ) + "application.yml" ) , StandardCharsets . UTF_8 ) ) { writer . write ( content ) ; writer . flush ( ) ; } catch ( Exception e ) { logger . error ( e . getMessage ( ) , e ) ; } |
public class SlotManager { /** * Starts the slot manager with the given leader id and resource manager actions .
* @ param newResourceManagerId to use for communication with the task managers
* @ param newMainThreadExecutor to use to run code in the ResourceManager ' s main thread
* @ param newResourceActions to use for resource ( de - ) allocations */
public void start ( ResourceManagerId newResourceManagerId , Executor newMainThreadExecutor , ResourceActions newResourceActions ) { } } | LOG . info ( "Starting the SlotManager." ) ; this . resourceManagerId = Preconditions . checkNotNull ( newResourceManagerId ) ; mainThreadExecutor = Preconditions . checkNotNull ( newMainThreadExecutor ) ; resourceActions = Preconditions . checkNotNull ( newResourceActions ) ; started = true ; taskManagerTimeoutCheck = scheduledExecutor . scheduleWithFixedDelay ( ( ) -> mainThreadExecutor . execute ( ( ) -> checkTaskManagerTimeouts ( ) ) , 0L , taskManagerTimeout . toMilliseconds ( ) , TimeUnit . MILLISECONDS ) ; slotRequestTimeoutCheck = scheduledExecutor . scheduleWithFixedDelay ( ( ) -> mainThreadExecutor . execute ( ( ) -> checkSlotRequestTimeouts ( ) ) , 0L , slotRequestTimeout . toMilliseconds ( ) , TimeUnit . MILLISECONDS ) ; |
public class ICUResourceBundle { /** * Fills some of the keys array with the keys on the path to this resource object .
* Writes the top - level key into index 0 and increments from there .
* @ param keys
* @ param depth must be { @ link # getResDepth ( ) } */
private void getResPathKeys ( String [ ] keys , int depth ) { } } | ICUResourceBundle b = this ; while ( depth > 0 ) { keys [ -- depth ] = b . key ; b = b . container ; assert ( depth == 0 ) == ( b . container == null ) ; } |
public class StandardBullhornData { /** * Makes the " entity " api call for getting entities .
* HTTP Method : GET
* @ param type
* @ param id
* @ param fieldSet
* @ param params optional entity parameters
* @ return */
protected < T extends BullhornEntity > T handleGetEntity ( Class < T > type , Integer id , Set < String > fieldSet , EntityParams params ) { } } | Map < String , String > uriVariables = restUriVariablesFactory . getUriVariablesForEntity ( BullhornEntityInfo . getTypesRestEntityName ( type ) , id , fieldSet , params ) ; String url = restUrlFactory . assembleEntityUrl ( params ) ; String jsonString = this . performGetRequest ( url , String . class , uriVariables ) ; return restJsonConverter . jsonToEntityUnwrapRoot ( jsonString , type ) ; |
public class MarkupEngine { /** * Process the header of the file .
* @ param context the parser context */
private void processDefaultHeader ( ParserContext context ) { } } | for ( String line : context . getFileLines ( ) ) { if ( hasHeaderSeparator ( line ) ) { break ; } processHeaderLine ( line , context . getDocumentModel ( ) ) ; } |
public class GVRCursorController { /** * Get the latest { @ link MotionEvent } processed by the
* { @ link GVRCursorController } if there is one ( not all
* { @ link GVRCursorController } s report { @ link MotionEvent } s )
* Note that this function also returns a null . To get every
* { @ link MotionEvent } reported by the { @ link GVRCursorController } use the
* { @ link IControllerEvent } or the { @ link ISensorEvents } listener to
* query for the { @ link MotionEvent } whenever a a callback is made .
* The { @ link MotionEvent } would be valid for the lifetime of that callback
* and would be recycled and reset to null on completion . Make use to the
* { @ link MotionEvent # obtain ( MotionEvent ) } to clone a copy of the
* { @ link MotionEvent } .
* @ return the latest { @ link MotionEvent } processed by the
* { @ link GVRCursorController } or null . */
public MotionEvent getMotionEvent ( ) { } } | synchronized ( eventLock ) { if ( processedMotionEvent . isEmpty ( ) ) { return null ; } else { return MotionEvent . obtain ( processedMotionEvent . get ( processedMotionEvent . size ( ) - 1 ) ) ; } } |
public class CmsJspContentAccessValueWrapper { /** * Factory method to create a new XML content value wrapper . < p >
* In case either parameter is < code > null < / code > , the { @ link # NULL _ VALUE _ WRAPPER } is returned . < p >
* @ param cms the current users OpenCms context
* @ param value the value to warp
* @ param parentValue the parent value , required to set the null value info
* @ param valueName the value path name
* @ return a new content value wrapper instance , or < code > null < / code > if any parameter is < code > null < / code > */
public static CmsJspContentAccessValueWrapper createWrapper ( CmsObject cms , I_CmsXmlContentValue value , I_CmsXmlContentValue parentValue , String valueName ) { } } | if ( ( value != null ) && ( cms != null ) ) { return new CmsJspContentAccessValueWrapper ( cms , value ) ; } if ( ( parentValue != null ) && ( valueName != null ) && ( cms != null ) ) { CmsJspContentAccessValueWrapper wrapper = new CmsJspContentAccessValueWrapper ( ) ; wrapper . m_nullValueInfo = new NullValueInfo ( parentValue , valueName ) ; wrapper . m_cms = cms ; return wrapper ; } // if no value is available ,
return NULL_VALUE_WRAPPER ; |
public class Operation { /** * Executes the operation using the parent { @ link Resource } ' s { @ link OkHttpClient } ,
* sending any given { @ link Params } and automatically deserializes the result into
* an instance of the given generic type reference .
* @ param typeRef The generic { @ link TypeReference } of the result .
* @ param < T > The type of the result to return .
* @ return An instance of the type T deserialized from the response body .
* @ throws IOException when encountering general network communications problem .
* @ throws OmiseException when receiving an error object from the Omise API . */
public < T extends OmiseObject > T returns ( TypeReference < T > typeRef ) throws IOException , OmiseException { } } | InputStream stream = preprocess ( roundtrip ( ) ) ; if ( stream == null ) { return null ; } try { return serializer . deserialize ( stream , typeRef ) ; } finally { stream . close ( ) ; } |
public class GeneratorRegistry { /** * Returns the PathMapping for the generated resource
* @ param bundle
* the bundle
* @ param path
* the generated path
* @ param rsReader
* the resource reader handler
* @ return the PathMapping for the generated resource */
public List < PathMapping > getGeneratedPathMappings ( JoinableResourceBundle bundle , String path , ResourceReaderHandler rsReader ) { } } | List < PathMapping > pathMappings = null ; ResourceGenerator resourceGenerator = getResourceGenerator ( path ) ; if ( resourceGenerator instanceof PathMappingProvider ) { pathMappings = ( ( PathMappingProvider ) resourceGenerator ) . getPathMappings ( bundle , path , rsReader ) ; } return pathMappings ; |
public class LUDecompositionBase_ZDRM { /** * Writes the lower triangular matrix into the specified matrix .
* @ param lower Where the lower triangular matrix is written to . */
@ Override public ZMatrixRMaj getLower ( ZMatrixRMaj lower ) { } } | int numRows = LU . numRows ; int numCols = LU . numRows < LU . numCols ? LU . numRows : LU . numCols ; lower = UtilDecompositons_ZDRM . checkZerosUT ( lower , numRows , numCols ) ; for ( int i = 0 ; i < numCols ; i ++ ) { lower . set ( i , i , 1.0 , 0.0 ) ; for ( int j = 0 ; j < i ; j ++ ) { int indexLU = LU . getIndex ( i , j ) ; int indexL = lower . getIndex ( i , j ) ; double real = LU . data [ indexLU ] ; double imaginary = LU . data [ indexLU + 1 ] ; lower . data [ indexL ] = real ; lower . data [ indexL + 1 ] = imaginary ; } } if ( numRows > numCols ) { for ( int i = numCols ; i < numRows ; i ++ ) { for ( int j = 0 ; j < numCols ; j ++ ) { int indexLU = LU . getIndex ( i , j ) ; int indexL = lower . getIndex ( i , j ) ; double real = LU . data [ indexLU ] ; double imaginary = LU . data [ indexLU + 1 ] ; lower . data [ indexL ] = real ; lower . data [ indexL + 1 ] = imaginary ; } } } return lower ; |
public class RsaUtil { /** * 根据私钥字符串获取私钥
* Only RSAPrivate ( Crt ) KeySpec and PKCS8EncodedKeySpec supported for RSA private keys
* @ param privateKey
* @ return */
public PrivateKey getPrivateKey_PKCS8Encoded ( byte [ ] keyBytes ) { } } | try { PKCS8EncodedKeySpec spec = new PKCS8EncodedKeySpec ( keyBytes ) ; KeyFactory keyFactory = KeyFactory . getInstance ( algorithm ) ; return keyFactory . generatePrivate ( spec ) ; } catch ( Exception e ) { log . error ( e . getMessage ( ) , e ) ; } return null ; |
public class Neighbour { /** * Sends the proxy subscriptions to this Neighbour .
* Pulls all the subscriptions that have been sent to this
* Bus and builds a message that will be sent to this Neighbour .
* @ exception SIResourceException */
void sendResetProxySubscriptions ( ) throws SIResourceException { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "sendResetProxySubscriptions" ) ; // Generate the subscription message that will be sent to this neighbour .
SubscriptionMessage message = iBusGroup . generateResetSubscriptionMessage ( ) ; if ( message != null ) // Send to the Neighbour .
sendToNeighbour ( message , null ) ; else if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( tc , "No subscriptions to forward to ME " + iMEUuid . toString ( ) ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "sendResetProxySubscriptions" ) ; |
public class RObjectsPanel { /** * < / editor - fold > / / GEN - END : initComponents */
private void _addActionPerformed ( java . awt . event . ActionEvent evt ) { } } | // GEN - FIRST : event _ _ addActionPerformed
JFileChooser fc = new JFileChooser ( ) ; fc . setMultiSelectionEnabled ( true ) ; fc . setFileSelectionMode ( JFileChooser . FILES_ONLY ) ; fc . setFileFilter ( new FileFilter ( ) { @ Override public boolean accept ( File f ) { return f . isDirectory ( ) || f . getName ( ) . endsWith ( ".R" ) || f . getName ( ) . endsWith ( ".Rdata" ) ; } @ Override public String getDescription ( ) { return "R object file" ; } } ) ; if ( fc . showOpenDialog ( this ) == JFileChooser . APPROVE_OPTION && fc . getSelectedFiles ( ) != null ) { File [ ] files = fc . getSelectedFiles ( ) ; for ( File file : files ) { if ( file . getName ( ) . endsWith ( ".R" ) ) { if ( R != null ) { R . source ( file ) ; } } else if ( file . getName ( ) . endsWith ( ".Rdata" ) ) { if ( R != null ) { R . load ( file ) ; } } else { Log . Out . println ( "Not loading/sourcing " + file . getName ( ) ) ; } } } update ( ) ; |
public class Bytes { /** * Encode to uppercase hexadecimal */
public static String toHexUpper ( byte [ ] b , int off , int len ) { } } | return toHex ( b , off , len , HEX_UPPER_CHAR ) ; |
public class RecurrenceIteratorFactory { /** * Creates a recurrence iterator from an RRULE .
* @ param rrule the recurrence rule
* @ param dtStart the start date of the series
* @ param tzid the timezone that the start date is in , as well as the
* timezone to iterate in
* @ return the iterator */
public static RecurrenceIterator createRecurrenceIterator ( Recurrence rrule , DateValue dtStart , TimeZone tzid ) { } } | Frequency freq = rrule . getFrequency ( ) ; /* * If the given RRULE is malformed and does not have a frequency
* specified , default to " yearly " . */
if ( freq == null ) { freq = Frequency . YEARLY ; } DayOfWeek wkst = rrule . getWorkweekStarts ( ) ; ICalDate until = rrule . getUntil ( ) ; DateValue untilUtc = ( until == null ) ? null : Google2445Utils . convert ( until , tzid ) ; int count = toInt ( rrule . getCount ( ) ) ; int interval = toInt ( rrule . getInterval ( ) ) ; ByDay [ ] byDay = rrule . getByDay ( ) . toArray ( new ByDay [ 0 ] ) ; int [ ] byMonth = toIntArray ( rrule . getByMonth ( ) ) ; int [ ] byMonthDay = toIntArray ( rrule . getByMonthDay ( ) ) ; int [ ] byWeekNo = toIntArray ( rrule . getByWeekNo ( ) ) ; int [ ] byYearDay = toIntArray ( rrule . getByYearDay ( ) ) ; int [ ] bySetPos = toIntArray ( rrule . getBySetPos ( ) ) ; int [ ] byHour = toIntArray ( rrule . getByHour ( ) ) ; int [ ] byMinute = toIntArray ( rrule . getByMinute ( ) ) ; int [ ] bySecond = toIntArray ( rrule . getBySecond ( ) ) ; if ( interval <= 0 ) { interval = 1 ; } if ( wkst == null ) { wkst = DayOfWeek . MONDAY ; } // optimize out BYSETPOS where possible
if ( bySetPos . length > 0 ) { switch ( freq ) { case HOURLY : if ( byHour . length > 0 && byMinute . length <= 1 && bySecond . length <= 1 ) { byHour = filterBySetPos ( byHour , bySetPos ) ; } /* * Handling bySetPos for rules that are more frequent than daily
* tends to lead to large amounts of processor being used before
* other work limiting features can kick in since there many
* seconds between dtStart and where the year limit kicks in .
* There are no known use cases for the use of bySetPos with
* hourly minutely and secondly rules so we just ignore it . */
bySetPos = NO_INTS ; break ; case MINUTELY : if ( byMinute . length > 0 && bySecond . length <= 1 ) { byMinute = filterBySetPos ( byMinute , bySetPos ) ; } // see bySetPos handling comment above
bySetPos = NO_INTS ; break ; case SECONDLY : if ( bySecond . length > 0 ) { bySecond = filterBySetPos ( bySecond , bySetPos ) ; } // see bySetPos handling comment above
bySetPos = NO_INTS ; break ; default : } } DateValue start = dtStart ; if ( bySetPos . length > 0 ) { /* * Roll back until the beginning of the period to make sure that any
* positive indices are indexed properly . The actual iterator
* implementation is responsible for anything < dtStart . */
switch ( freq ) { case YEARLY : start = ( dtStart instanceof TimeValue ) ? new DateTimeValueImpl ( start . year ( ) , 1 , 1 , 0 , 0 , 0 ) : new DateValueImpl ( start . year ( ) , 1 , 1 ) ; break ; case MONTHLY : start = ( dtStart instanceof TimeValue ) ? new DateTimeValueImpl ( start . year ( ) , start . month ( ) , 1 , 0 , 0 , 0 ) : new DateValueImpl ( start . year ( ) , start . month ( ) , 1 ) ; break ; case WEEKLY : int d = ( 7 + wkst . ordinal ( ) - TimeUtils . dayOfWeek ( dtStart ) . getCalendarConstant ( ) ) % 7 ; start = TimeUtils . add ( dtStart , new DateValueImpl ( 0 , 0 , - d ) ) ; break ; default : break ; } } /* * Recurrences are implemented as a sequence of periodic generators .
* First a year is generated , and then months , and within months , days . */
ThrottledGenerator yearGenerator = Generators . serialYearGenerator ( freq == Frequency . YEARLY ? interval : 1 , dtStart ) ; Generator monthGenerator = null ; Generator dayGenerator = null ; Generator secondGenerator = null ; Generator minuteGenerator = null ; Generator hourGenerator = null ; /* * When multiple generators are specified for a period , they act as a
* union operator . We could have multiple generators ( say , for day ) and
* then run each and merge the results , but some generators are more
* efficient than others . So to avoid generating 53 Sundays and throwing
* away all but 1 for RRULE : FREQ = YEARLY ; BYDAY = TU ; BYWEEKNO = 1 , we
* reimplement some of the more prolific generators as filters . */
// TODO ( msamuel ) : don ' t need a list here
List < Predicate < ? super DateValue > > filters = new ArrayList < Predicate < ? super DateValue > > ( ) ; switch ( freq ) { case SECONDLY : if ( bySecond . length == 0 || interval != 1 ) { secondGenerator = Generators . serialSecondGenerator ( interval , dtStart ) ; if ( bySecond . length > 0 ) { filters . add ( Filters . bySecondFilter ( bySecond ) ) ; } } break ; case MINUTELY : if ( byMinute . length == 0 || interval != 1 ) { minuteGenerator = Generators . serialMinuteGenerator ( interval , dtStart ) ; if ( byMinute . length > 0 ) { filters . add ( Filters . byMinuteFilter ( byMinute ) ) ; } } break ; case HOURLY : if ( byHour . length == 0 || interval != 1 ) { hourGenerator = Generators . serialHourGenerator ( interval , dtStart ) ; if ( byHour . length > 0 ) { filters . add ( Filters . byHourFilter ( bySecond ) ) ; } } break ; case DAILY : break ; case WEEKLY : /* * Week is not considered a period because a week may span multiple
* months and / or years . There are no week generators , so a filter is
* used to make sure that FREQ = WEEKLY ; INTERVAL = 2 only generates
* dates within the proper week . */
if ( byDay . length > 0 ) { dayGenerator = Generators . byDayGenerator ( byDay , false , start ) ; byDay = NO_DAYS ; if ( interval > 1 ) { filters . add ( Filters . weekIntervalFilter ( interval , wkst , dtStart ) ) ; } } else { dayGenerator = Generators . serialDayGenerator ( interval * 7 , dtStart ) ; } break ; case YEARLY : if ( byYearDay . length > 0 ) { /* * The BYYEARDAY rule part specifies a COMMA separated list of
* days of the year . Valid values are 1 to 366 or - 366 to - 1.
* For example , - 1 represents the last day of the year ( December
* 31st ) and - 306 represents the 306th to the last day of the
* year ( March 1st ) . */
dayGenerator = Generators . byYearDayGenerator ( byYearDay , start ) ; break ; } // $ FALL - THROUGH $
case MONTHLY : if ( byMonthDay . length > 0 ) { /* * The BYMONTHDAY rule part specifies a COMMA separated list of
* days of the month . Valid values are 1 to 31 or - 31 to - 1 . For
* example , - 10 represents the tenth to the last day of the
* month . */
dayGenerator = Generators . byMonthDayGenerator ( byMonthDay , start ) ; byMonthDay = NO_INTS ; } else if ( byWeekNo . length > 0 && Frequency . YEARLY == freq ) { /* * The BYWEEKNO rule part specifies a COMMA separated list of
* ordinals specifying weeks of the year . This rule part is only
* valid for YEARLY rules . */
dayGenerator = Generators . byWeekNoGenerator ( byWeekNo , wkst , start ) ; byWeekNo = NO_INTS ; } else if ( byDay . length > 0 ) { /* * Each BYDAY value can also be preceded by a positive ( n ) or
* negative ( - n ) integer . If present , this indicates the nth
* occurrence of the specific day within the MONTHLY or YEARLY
* RRULE . For example , within a MONTHLY rule , + 1MO ( or simply
* 1MO ) represents the first Monday within the month , whereas
* - 1MO represents the last Monday of the month . If an integer
* modifier is not present , it means all days of this type
* within the specified frequency . For example , within a MONTHLY
* rule , MO represents all Mondays within the month . */
dayGenerator = Generators . byDayGenerator ( byDay , Frequency . YEARLY == freq && byMonth . length == 0 , start ) ; byDay = NO_DAYS ; } else { if ( Frequency . YEARLY == freq ) { monthGenerator = Generators . byMonthGenerator ( new int [ ] { dtStart . month ( ) } , start ) ; } dayGenerator = Generators . byMonthDayGenerator ( new int [ ] { dtStart . day ( ) } , start ) ; } break ; } if ( secondGenerator == null ) { secondGenerator = Generators . bySecondGenerator ( bySecond , start ) ; } if ( minuteGenerator == null ) { if ( byMinute . length == 0 && freq . compareTo ( Frequency . MINUTELY ) < 0 ) { minuteGenerator = Generators . serialMinuteGenerator ( 1 , dtStart ) ; } else { minuteGenerator = Generators . byMinuteGenerator ( byMinute , start ) ; } } if ( hourGenerator == null ) { if ( byHour . length == 0 && freq . compareTo ( Frequency . HOURLY ) < 0 ) { hourGenerator = Generators . serialHourGenerator ( 1 , dtStart ) ; } else { hourGenerator = Generators . byHourGenerator ( byHour , start ) ; } } if ( dayGenerator == null ) { boolean dailyOrMoreOften = freq . compareTo ( Frequency . DAILY ) <= 0 ; if ( byMonthDay . length > 0 ) { dayGenerator = Generators . byMonthDayGenerator ( byMonthDay , start ) ; byMonthDay = NO_INTS ; } else if ( byDay . length > 0 ) { dayGenerator = Generators . byDayGenerator ( byDay , Frequency . YEARLY == freq , start ) ; byDay = NO_DAYS ; } else if ( dailyOrMoreOften ) { dayGenerator = Generators . serialDayGenerator ( Frequency . DAILY == freq ? interval : 1 , dtStart ) ; } else { dayGenerator = Generators . byMonthDayGenerator ( new int [ ] { dtStart . day ( ) } , start ) ; } } if ( byDay . length > 0 ) { filters . add ( Filters . byDayFilter ( byDay , Frequency . YEARLY == freq , wkst ) ) ; byDay = NO_DAYS ; } if ( byMonthDay . length > 0 ) { filters . add ( Filters . byMonthDayFilter ( byMonthDay ) ) ; } // generator inference common to all periods
if ( byMonth . length > 0 ) { monthGenerator = Generators . byMonthGenerator ( byMonth , start ) ; } else if ( monthGenerator == null ) { monthGenerator = Generators . serialMonthGenerator ( freq == Frequency . MONTHLY ? interval : 1 , dtStart ) ; } /* * The condition tells the iterator when to halt . The condition is
* exclusive , so the date that triggers it will not be included . */
Predicate < DateValue > condition ; boolean canShortcutAdvance = true ; if ( count != 0 ) { condition = Conditions . countCondition ( count ) ; /* * We can ' t shortcut because the countCondition must see every
* generated instance .
* TODO ( msamuel ) : If count is large , we might try predicting the end
* date so that we can convert the COUNT condition to an UNTIL
* condition . */
canShortcutAdvance = false ; } else if ( untilUtc != null ) { if ( ( untilUtc instanceof TimeValue ) != ( dtStart instanceof TimeValue ) ) { // TODO ( msamuel ) : warn
if ( dtStart instanceof TimeValue ) { untilUtc = TimeUtils . dayStart ( untilUtc ) ; } else { untilUtc = TimeUtils . toDateValue ( untilUtc ) ; } } condition = Conditions . untilCondition ( untilUtc ) ; } else { condition = Predicates . < DateValue > alwaysTrue ( ) ; } // combine filters into a single function
Predicate < ? super DateValue > filter ; switch ( filters . size ( ) ) { case 0 : filter = Predicates . < DateValue > alwaysTrue ( ) ; break ; case 1 : filter = filters . get ( 0 ) ; break ; default : filter = Predicates . and ( filters ) ; break ; } Generator instanceGenerator ; if ( bySetPos . length > 0 ) { instanceGenerator = InstanceGenerators . bySetPosInstanceGenerator ( bySetPos , freq , wkst , filter , yearGenerator , monthGenerator , dayGenerator , hourGenerator , minuteGenerator , secondGenerator ) ; } else { instanceGenerator = InstanceGenerators . serialInstanceGenerator ( filter , yearGenerator , monthGenerator , dayGenerator , hourGenerator , minuteGenerator , secondGenerator ) ; } return new RRuleIteratorImpl ( dtStart , tzid , condition , instanceGenerator , yearGenerator , monthGenerator , dayGenerator , hourGenerator , minuteGenerator , secondGenerator , canShortcutAdvance ) ; |
public class SQLService { /** * Gets all of the column names for a result meta data
* @ param rsmd Resultset metadata
* @ return Array of column names
* @ throws Exception exception */
private String [ ] getColumnNames ( ResultSetMetaData rsmd ) throws Exception { } } | ArrayList < String > names = new ArrayList < String > ( ) ; // Get result set meta data
int numColumns = rsmd . getColumnCount ( ) ; // Get the column names ; column indices start from 1
for ( int i = 1 ; i < numColumns + 1 ; i ++ ) { String columnName = rsmd . getColumnName ( i ) ; names . add ( columnName ) ; } return names . toArray ( new String [ 0 ] ) ; |
public class Log { /** * Log throwable with message and stack trace according to the logging level
* settings , as in log4j and slf4j . This is probably not what you expect .
* @ param msg
* @ param t
* @ deprecated Use { @ link # warn ( Throwable , String , Object . . . ) } to clarify that
* you want the stack trace printed according to log severity
* settings or explicitly call toString ( ) if you want to use the
* Throwable as an argument to
* { @ link String # format ( String , Object . . . ) } . */
@ Deprecated public void warn ( String msg , Throwable t ) { } } | log . warn ( msg , t ) ; |
public class HTMLParser { /** * / * ( non - Javadoc )
* @ see spark . protocol . parser . ResultParser # parse ( spark . api . Command , java . io . InputStream , spark . protocol . ProtocolCommand . ResultType ) */
@ Override public Result parse ( Command cmd , InputStream input , ResultType type ) throws SparqlException { } } | BufferedReader br = null ; try { // Expected type should already be validated by ResultFactory , check anyways .
if ( type != null && type != ResultType . ASK ) { throw new SparqlException ( "Unexpected result type; expected " + type + " but found ASK." ) ; } char [ ] buf = new char [ BUFFER_LEN ] ; // TODO : Find the actual encoding from the server response , if available .
br = new BufferedReader ( new InputStreamReader ( input , UTF8 ) ) ; int len = fill ( buf , br ) ; String s = new String ( buf , 0 , len ) . trim ( ) ; logger . debug ( "Read '{}' from text/html ASK result" , s ) ; boolean result ; if ( s . equalsIgnoreCase ( "true" ) ) { result = true ; } else if ( s . equalsIgnoreCase ( "false" ) ) { result = false ; } else { logger . warn ( "Unexpected boolean value read from text/html ASK result: '{}'" , s ) ; result = false ; } if ( logger . isWarnEnabled ( ) && br . read ( ) >= 0 ) { logger . warn ( "Unexpected input found after boolean value" ) ; } return new BooleanResultImpl ( cmd , result ) ; } catch ( IOException e ) { throw new SparqlException ( "Error reading from server input" , e ) ; } finally { try { if ( br != null ) br . close ( ) ; else input . close ( ) ; } catch ( IOException e ) { // Don ' t re - throw because that could mask another exception ( or prevent an otherwise usable
// result from being returned ) .
logger . warn ( "Error closing server input" , e ) ; } } |
public class DimensionValuesWithAttributes { /** * The attribute that applies to a specific < code > Dimension < / code > .
* @ param attributes
* The attribute that applies to a specific < code > Dimension < / code > .
* @ return Returns a reference to this object so that method calls can be chained together . */
public DimensionValuesWithAttributes withAttributes ( java . util . Map < String , String > attributes ) { } } | setAttributes ( attributes ) ; return this ; |
public class HeartbeatImpl { /** * received a join from this server . Used to update external IPs . */
private void joinSelf ( ServerHeartbeat server , String extAddress , int extPort , String address , int port , String serverHash ) { } } | if ( server != _serverSelf ) { throw new IllegalStateException ( L . l ( "Invalid self: {0} vs {1}" , server , _serverSelf ) ) ; } if ( ! serverHash . equals ( _serverSelf . getMachineHash ( ) ) ) { throw new IllegalStateException ( L . l ( "Invalid server hash {0} against {1}:{2}({3})" , _serverSelf , address , port , serverHash ) ) ; } if ( port != _serverSelf . port ( ) ) { throw new IllegalStateException ( L . l ( "Invalid server port {0} against {1}:{2}({3})" , _serverSelf , address , port , serverHash ) ) ; } boolean isSSL = false ; ServerHeartbeat extServer = getCluster ( ) . createServer ( extAddress , extPort , isSSL ) ; server . setExternalServer ( extServer ) ; server . setMachineHash ( serverHash ) ; server . getRack ( ) . update ( server . getUpdate ( ) ) ; heartbeatStart ( server ) ; updateHubHeartbeatSelf ( ) ; // _ podService . updatePodsFromHeartbeat ( ) ;
log . fine ( "join self " + server ) ; |
public class InjectorBuilder { /** * Iterator through all elements of the current module and write the output of the
* ElementVisitor to the logger at debug level . ' null ' responses are ignored
* @ param visitor
* @ deprecated Use forEachElement ( visitor , message - & gt ; LOG . debug ( message ) ) ; instead */
@ Deprecated public InjectorBuilder traceEachElement ( ElementVisitor < String > visitor ) { } } | return forEachElement ( visitor , message -> LOG . debug ( message ) ) ; |
public class StopTransformJobRequestMarshaller { /** * Marshall the given parameter object . */
public void marshall ( StopTransformJobRequest stopTransformJobRequest , ProtocolMarshaller protocolMarshaller ) { } } | if ( stopTransformJobRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( stopTransformJobRequest . getTransformJobName ( ) , TRANSFORMJOBNAME_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; } |
public class TasksImpl { /** * Reactivates a task , allowing it to run again even if its retry count has been exhausted .
* Reactivation makes a task eligible to be retried again up to its maximum retry count . The task ' s state is changed to active . As the task is no longer in the completed state , any previous exit code or failure information is no longer available after reactivation . Each time a task is reactivated , its retry count is reset to 0 . Reactivation will fail for tasks that are not completed or that previously completed successfully ( with an exit code of 0 ) . Additionally , it will fail if the job has completed ( or is terminating or deleting ) .
* @ param jobId The ID of the job containing the task .
* @ param taskId The ID of the task to reactivate .
* @ param taskReactivateOptions Additional parameters for the operation
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ throws BatchErrorException thrown if the request is rejected by server
* @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent */
public void reactivate ( String jobId , String taskId , TaskReactivateOptions taskReactivateOptions ) { } } | reactivateWithServiceResponseAsync ( jobId , taskId , taskReactivateOptions ) . toBlocking ( ) . single ( ) . body ( ) ; |
public class EhcacheCachingProvider { /** * { @ inheritDoc } */
@ Override public CacheManager getCacheManager ( URI uri , ClassLoader classLoader , Properties properties ) { } } | uri = uri == null ? getDefaultURI ( ) : uri ; classLoader = classLoader == null ? getDefaultClassLoader ( ) : classLoader ; properties = properties == null ? new Properties ( ) : cloneProperties ( properties ) ; if ( URI_DEFAULT . equals ( uri ) ) { URI override = DefaultConfigurationResolver . resolveConfigURI ( properties ) ; if ( override != null ) { uri = override ; } } return getCacheManager ( new ConfigSupplier ( uri , classLoader ) , properties ) ; |
public class WatchTimeout { /** * Returns a reasonable timeout duration for a watch request .
* @ param expectedTimeoutMillis timeout duration that a user wants to use , in milliseconds
* @ param bufferMillis buffer duration which needs to be added , in milliseconds
* @ return timeout duration in milliseconds , between the specified { @ code bufferMillis } and
* the { @ link # MAX _ MILLIS } . */
public static long makeReasonable ( long expectedTimeoutMillis , long bufferMillis ) { } } | checkArgument ( expectedTimeoutMillis > 0 , "expectedTimeoutMillis: %s (expected: > 0)" , expectedTimeoutMillis ) ; checkArgument ( bufferMillis >= 0 , "bufferMillis: %s (expected: > 0)" , bufferMillis ) ; final long timeout = Math . min ( expectedTimeoutMillis , MAX_MILLIS ) ; if ( bufferMillis == 0 ) { return timeout ; } if ( timeout > MAX_MILLIS - bufferMillis ) { return MAX_MILLIS ; } else { return bufferMillis + timeout ; } |
public class RoleManager { /** * Sets the { @ link java . awt . Color Color } of the selected { @ link net . dv8tion . jda . core . entities . Role Role } .
* @ param color
* The new color for the selected { @ link net . dv8tion . jda . core . entities . Role Role }
* @ return RoleManager for chaining convenience */
@ CheckReturnValue public RoleManager setColor ( Color color ) { } } | this . color = color == null ? Role . DEFAULT_COLOR_RAW : color . getRGB ( ) ; set |= COLOR ; return this ; |
public class SampleRace { /** * Animated path
* @ since 6.0.2 */
private MilestoneManager getAnimatedPathManager ( final MilestoneLister pMilestoneLister ) { } } | final Paint slicePaint = getStrokePaint ( COLOR_POLYLINE_ANIMATED , LINE_WIDTH_BIG ) ; return new MilestoneManager ( pMilestoneLister , new MilestoneLineDisplayer ( slicePaint ) ) ; |
public class AgentsAlreadyRunningAssessmentException { /** * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use
* { @ link # setAgents ( java . util . Collection ) } or { @ link # withAgents ( java . util . Collection ) } if you want to override the
* existing values .
* @ param agents
* @ return Returns a reference to this object so that method calls can be chained together . */
public AgentsAlreadyRunningAssessmentException withAgents ( AgentAlreadyRunningAssessment ... agents ) { } } | if ( this . agents == null ) { setAgents ( new java . util . ArrayList < AgentAlreadyRunningAssessment > ( agents . length ) ) ; } for ( AgentAlreadyRunningAssessment ele : agents ) { this . agents . add ( ele ) ; } return this ; |
public class ADocWithTaxesPayments { /** * < p > Setter for payByDate . < / p >
* @ param pPayByDate reference */
public final void setPayByDate ( final Date pPayByDate ) { } } | if ( pPayByDate == null ) { this . payByDate = null ; } else { this . payByDate = new Date ( pPayByDate . getTime ( ) ) ; } |
public class OutputPanel { /** * This method initializes this */
private void initialize ( ) { } } | this . setLayout ( new BorderLayout ( ) ) ; this . setName ( Constant . messages . getString ( "output.panel.title" ) ) ; // ZAP : i18n
if ( Model . getSingleton ( ) . getOptionsParam ( ) . getViewParam ( ) . getWmUiHandlingOption ( ) == 0 ) { this . setSize ( 243 , 119 ) ; } // ZAP : Added Output ( doc ) icon
this . setIcon ( new ImageIcon ( OutputPanel . class . getResource ( "/resource/icon/16/172.png" ) ) ) ; // ' doc ' icon
this . setDefaultAccelerator ( View . getSingleton ( ) . getMenuShortcutKeyStroke ( KeyEvent . VK_O , KeyEvent . SHIFT_DOWN_MASK , false ) ) ; this . setMnemonic ( Constant . messages . getChar ( "output.panel.mnemonic" ) ) ; this . add ( getMainPanel ( ) , BorderLayout . CENTER ) ; this . setShowByDefault ( true ) ; |
public class JobConf { /** * Get the key class for the map output data . If it is not set , use the
* ( final ) output key class . This allows the map output key class to be
* different than the final output key class .
* @ return the map output key class . */
public Class < ? > getMapOutputKeyClass ( ) { } } | Class < ? > retv = getClass ( "mapred.mapoutput.key.class" , null , Object . class ) ; if ( retv == null ) { retv = getOutputKeyClass ( ) ; } return retv ; |
public class Ifc2x3tc1PackageImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
public EClass getIfcWasteTerminalType ( ) { } } | if ( ifcWasteTerminalTypeEClass == null ) { ifcWasteTerminalTypeEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc2x3tc1Package . eNS_URI ) . getEClassifiers ( ) . get ( 643 ) ; } return ifcWasteTerminalTypeEClass ; |
public class AmazonEC2Client { /** * Gets information about the route table propagations for the specified transit gateway route table .
* @ param getTransitGatewayRouteTablePropagationsRequest
* @ return Result of the GetTransitGatewayRouteTablePropagations operation returned by the service .
* @ sample AmazonEC2 . GetTransitGatewayRouteTablePropagations
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / ec2-2016-11-15 / GetTransitGatewayRouteTablePropagations "
* target = " _ top " > AWS API Documentation < / a > */
@ Override public GetTransitGatewayRouteTablePropagationsResult getTransitGatewayRouteTablePropagations ( GetTransitGatewayRouteTablePropagationsRequest request ) { } } | request = beforeClientExecution ( request ) ; return executeGetTransitGatewayRouteTablePropagations ( request ) ; |
public class ASTHelpers { /** * Returns the modifiers tree of the given class , method , or variable declaration . */
@ Nullable public static ModifiersTree getModifiers ( Tree tree ) { } } | if ( tree instanceof ClassTree ) { return ( ( ClassTree ) tree ) . getModifiers ( ) ; } else if ( tree instanceof MethodTree ) { return ( ( MethodTree ) tree ) . getModifiers ( ) ; } else if ( tree instanceof VariableTree ) { return ( ( VariableTree ) tree ) . getModifiers ( ) ; } else { return null ; } |
public class ScoreKeeper { /** * Based on the given array of ScoreKeeper and stopping criteria should we stop early ? */
public static boolean stopEarly ( ScoreKeeper [ ] sk , int k , boolean classification , StoppingMetric criterion , double rel_improvement , String what , boolean verbose ) { } } | if ( k == 0 ) return false ; int len = sk . length - 1 ; // how many " full " / " conservative " scoring events we have ( skip the first )
if ( len < 2 * k ) return false ; // need at least k for SMA and another k to tell whether the model got better or not
if ( criterion == StoppingMetric . AUTO ) { criterion = classification ? StoppingMetric . logloss : StoppingMetric . deviance ; } boolean moreIsBetter = moreIsBetter ( criterion ) ; boolean hasLowerBound = hasLowerBound ( criterion ) ; double movingAvg [ ] = new double [ k + 1 ] ; // need one moving average value for the last k + 1 scoring events
double lastBeforeK = moreIsBetter ? - Double . MAX_VALUE : Double . MAX_VALUE ; double bestInLastK = moreIsBetter ? - Double . MAX_VALUE : Double . MAX_VALUE ; for ( int i = 0 ; i < movingAvg . length ; ++ i ) { movingAvg [ i ] = 0 ; // compute k + 1 simple moving averages of window size k
// need to go back 2 * k steps
// Example : 20 scoring events , k = 3
// need to go back from idx 19 to idx 14
// movingAvg [ 0 ] is based on scoring events indices 14,15,16 < - reference
// movingAvg [ 1 ] is based on scoring events indices 15,16,17 < - first " new " smooth score
// movingAvg [ 2 ] is based on scoring events indices 16,17,18 < - second " new " smooth score
// movingAvg [ 3 ] is based on scoring events indices 17,18,19 < - third " new " smooth score
// Example : 18 scoring events , k = 2
// need to go back from idx 17 to idx 14
// movingAvg [ 0 ] is based on scoring events indices 14,15 < - reference
// movingAvg [ 1 ] is based on scoring events indices 15,16 < - first " new " smooth score
// movingAvg [ 2 ] is based on scoring events indices 16,17 < - second " new " smooth score
// Example : 18 scoring events , k = 1
// need to go back from idx 17 to idx 16
// movingAvg [ 0 ] is based on scoring event index 16 < - reference
// movingAvg [ 1 ] is based on scoring event index 17 < - first " new " score
int startIdx = sk . length - 2 * k + i ; for ( int j = 0 ; j < k ; ++ j ) { ScoreKeeper skj = sk [ startIdx + j ] ; double val ; switch ( criterion ) { case AUC : val = skj . _AUC ; break ; case MSE : val = skj . _mse ; break ; case RMSE : val = skj . _rmse ; break ; case MAE : val = skj . _mae ; break ; case RMSLE : val = skj . _rmsle ; break ; case deviance : val = skj . _mean_residual_deviance ; break ; case logloss : val = skj . _logloss ; break ; case misclassification : val = skj . _classError ; break ; case mean_per_class_error : val = skj . _mean_per_class_error ; break ; case lift_top_group : val = skj . _lift ; break ; /* case r2:
val = skj . _ r2;
break ; */
case custom : case custom_increasing : val = skj . _custom_metric ; break ; default : throw H2O . unimpl ( "Undefined stopping criterion." ) ; } movingAvg [ i ] += val ; } movingAvg [ i ] /= k ; if ( Double . isNaN ( movingAvg [ i ] ) ) return false ; if ( i == 0 ) lastBeforeK = movingAvg [ i ] ; else bestInLastK = moreIsBetter ? Math . max ( movingAvg [ i ] , bestInLastK ) : Math . min ( movingAvg [ i ] , bestInLastK ) ; } // zero - crossing could be for residual deviance or r ^ 2 - > mark it not yet converged , avoid division by 0 or weird relative improvements math below
if ( Math . signum ( ArrayUtils . maxValue ( movingAvg ) ) != Math . signum ( ArrayUtils . minValue ( movingAvg ) ) ) return false ; if ( Math . signum ( bestInLastK ) != Math . signum ( lastBeforeK ) ) return false ; assert ( lastBeforeK != Double . MAX_VALUE ) ; assert ( bestInLastK != Double . MAX_VALUE ) ; if ( verbose ) Log . info ( "Windowed averages (window size " + k + ") of " + what + " " + ( k + 1 ) + " " + criterion . toString ( ) + " metrics: " + Arrays . toString ( movingAvg ) ) ; if ( lastBeforeK == 0 && ! moreIsBetter && hasLowerBound ) // eg . deviance - less better and can be negative
return true ; double ratio = bestInLastK / lastBeforeK ; if ( Double . isNaN ( ratio ) ) return false ; boolean improved = moreIsBetter ? ratio > 1 + rel_improvement : ratio < 1 - rel_improvement ; if ( verbose ) Log . info ( "Checking convergence with " + criterion . toString ( ) + " metric: " + lastBeforeK + " --> " + bestInLastK + ( improved ? " (still improving)." : " (converged)." ) ) ; return ! improved ; |
public class ReplaceIamInstanceProfileAssociationRequest { /** * This method is intended for internal use only . Returns the marshaled request configured with additional
* parameters to enable operation dry - run . */
@ Override public Request < ReplaceIamInstanceProfileAssociationRequest > getDryRunRequest ( ) { } } | Request < ReplaceIamInstanceProfileAssociationRequest > request = new ReplaceIamInstanceProfileAssociationRequestMarshaller ( ) . marshall ( this ) ; request . addParameter ( "DryRun" , Boolean . toString ( true ) ) ; return request ; |
public class Ifc2x3tc1PackageImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
public EClass getIfcFaceOuterBound ( ) { } } | if ( ifcFaceOuterBoundEClass == null ) { ifcFaceOuterBoundEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc2x3tc1Package . eNS_URI ) . getEClassifiers ( ) . get ( 223 ) ; } return ifcFaceOuterBoundEClass ; |
public class XmlIOFactoryUtil { /** * Loads a class from the classloader ;
* If not found , the classloader of the { @ code context } class specified will be used .
* If the flag { @ code checkParent } is true , the classloader ' s parent is included in
* the lookup . */
static Class < ? > loadClass ( String className , Class < ? > context , boolean checkParent ) { } } | Class < ? > clazz = null ; try { clazz = Thread . currentThread ( ) . getContextClassLoader ( ) . loadClass ( className ) ; } catch ( ClassNotFoundException e ) { if ( context != null ) { ClassLoader loader = context . getClassLoader ( ) ; while ( loader != null ) { try { clazz = loader . loadClass ( className ) ; return clazz ; } catch ( ClassNotFoundException e1 ) { loader = checkParent ? loader . getParent ( ) : null ; } } } } return clazz ; |
public class RequestUtil { /** * Checks if the input stream of the given request is nor isn ' t consumed . This method is backwards - compatible with Servlet 2 . x ,
* as in Servlet 3 . x there is a " isFinished " method .
* @ param request a { @ code HttpServletRequest } , never { @ code null }
* @ return { @ code true } if the request stream has been consumed , { @ code false } otherwise . */
public static boolean streamNotConsumed ( HttpServletRequest request ) { } } | try { ServletInputStream servletInputStream = request . getInputStream ( ) ; // in servlet > = 3.0 , available will throw an exception ( while previously it didn ' t )
return request . getContentLength ( ) != 0 && servletInputStream . available ( ) > 0 ; } catch ( IOException e ) { return false ; } |
public class TermStatementUpdate { /** * Adds labels to the item
* @ param labels
* the labels to add */
protected void processLabels ( List < MonolingualTextValue > labels ) { } } | for ( MonolingualTextValue label : labels ) { String lang = label . getLanguageCode ( ) ; NameWithUpdate currentValue = newLabels . get ( lang ) ; if ( currentValue == null || ! currentValue . value . equals ( label ) ) { newLabels . put ( lang , new NameWithUpdate ( label , true ) ) ; // Delete any alias that matches the new label
AliasesWithUpdate currentAliases = newAliases . get ( lang ) ; if ( currentAliases != null && currentAliases . aliases . contains ( label ) ) { deleteAlias ( label ) ; } } } |
public class SimpleAttributeDefinition { /** * Marshalls the value from the given { @ code resourceModel } as an xml attribute , if it
* { @ link # isMarshallable ( org . jboss . dmr . ModelNode , boolean ) is marshallable } .
* @ param resourceModel the model , a non - null node of { @ link org . jboss . dmr . ModelType # OBJECT } .
* @ param marshallDefault { @ code true } if the value should be marshalled even if it matches the default value
* @ param writer stream writer to use for writing the attribute
* @ throws javax . xml . stream . XMLStreamException if { @ code writer } throws an exception */
public void marshallAsAttribute ( final ModelNode resourceModel , final boolean marshallDefault , final XMLStreamWriter writer ) throws XMLStreamException { } } | getMarshaller ( ) . marshallAsAttribute ( this , resourceModel , marshallDefault , writer ) ; |
public class Query { /** * Set an array parameter . < br >
* For example :
* < pre >
* createQuery ( " SELECT * FROM user WHERE id IN ( : ids ) " )
* . addParameter ( " ids " , 4 , 5 , 6)
* . executeAndFetch ( . . . )
* < / pre >
* will generate the query : < code > SELECT * FROM user WHERE id IN ( 4,5,6 ) < / code > < br >
* < br >
* It is not possible to use array parameters with a batch < code > PreparedStatement < / code > :
* since the text query passed to the < code > PreparedStatement < / code > depends on the number of parameters in the array ,
* array parameters are incompatible with batch mode . < br >
* < br >
* If the values array is empty , < code > null < / code > will be set to the array parameter :
* < code > SELECT * FROM user WHERE id IN ( NULL ) < / code >
* @ throws NullPointerException if values parameter is null */
public Query addParameter ( String name , final Object ... values ) { } } | if ( values == null ) { throw new NullPointerException ( "Array parameter cannot be null" ) ; } addParameterInternal ( name , new ParameterSetter ( values . length ) { @ Override public void setParameter ( int paramIdx , PreparedStatement statement ) throws SQLException { if ( values . length == 0 ) { getConnection ( ) . getSql2o ( ) . getQuirks ( ) . setParameter ( statement , paramIdx , ( Object ) null ) ; } else { for ( Object value : values ) { getConnection ( ) . getSql2o ( ) . getQuirks ( ) . setParameter ( statement , paramIdx ++ , value ) ; } } } } ) ; return this ; |
public class GraphHelper { /** * Remove the specified AtlasVertex from the graph .
* @ param vertex */
public void removeVertex ( AtlasVertex vertex ) { } } | String vertexString = null ; if ( LOG . isDebugEnabled ( ) ) { vertexString = string ( vertex ) ; LOG . debug ( "Removing {}" , vertexString ) ; } graph . removeVertex ( vertex ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . info ( "Removed {}" , vertexString ) ; } |
public class NotificationsHandlerFactory { /** * Return a handler for the system event
* @ param queueName our queue
* @ param pr jms properties
* @ return NotificationsHandler
* @ throws NotificationException */
private static NotificationsHandler getHandler ( final String queueName , final Properties pr ) throws NotificationException { } } | if ( handler != null ) { return handler ; } synchronized ( synchit ) { handler = new JmsNotificationsHandlerImpl ( queueName , pr ) ; } return handler ; |
public class MathoidConverter { /** * Returns true if the Mathoid service is reachable , otherwise false .
* @ return */
public boolean isReachable ( ) { } } | try { URL url = new URL ( mathoidConfig . getUrl ( ) + "/mml" ) ; SimpleClientHttpRequestFactory factory = new SimpleClientHttpRequestFactory ( ) ; ClientHttpRequest req = factory . createRequest ( url . toURI ( ) , HttpMethod . POST ) ; req . execute ( ) ; return true ; } catch ( Exception e ) { return false ; } |
public class AsyncHttpJoinConverter { /** * Convert an input record to a future object where an output record will be filled in sometime later
* Sequence :
* Convert input ( DI ) to an http request
* Send http request asynchronously , and registers an http callback
* Create an { @ link CompletableFuture } object . When the callback is invoked , this future object is filled in by an output record which is converted from http response .
* Return the future object . */
@ Override public final CompletableFuture < DO > convertRecordAsync ( SO outputSchema , DI inputRecord , WorkUnitState workUnitState ) throws DataConversionException { } } | // Convert DI to HttpOperation
HttpOperation operation = generateHttpOperation ( inputRecord , workUnitState ) ; BufferedRecord < GenericRecord > bufferedRecord = new BufferedRecord < > ( operation , WriteCallback . EMPTY ) ; // Convert HttpOperation to RQ
Queue < BufferedRecord < GenericRecord > > buffer = new LinkedBlockingDeque < > ( ) ; buffer . add ( bufferedRecord ) ; AsyncRequest < GenericRecord , RQ > request = this . requestBuilder . buildRequest ( buffer ) ; RQ rawRequest = request . getRawRequest ( ) ; // Execute query and get response
AsyncHttpJoinConverterContext context = new AsyncHttpJoinConverterContext ( this , outputSchema , inputRecord , request ) ; try { httpClient . sendAsyncRequest ( rawRequest , context . getCallback ( ) ) ; } catch ( IOException e ) { throw new DataConversionException ( e ) ; } return context . future ; |
public class CurvedArrow { /** * Draws the arrow on the indicated graphics environment .
* @ param g
* the graphics to draw this arrow upon */
public void draw ( Graphics2D g ) { } } | if ( needsRefresh ) refreshCurve ( ) ; g . draw ( curve ) ; // Draws the main part of the arrow .
drawArrow ( g , end , control ) ; // Draws the arrow head .
drawText ( g ) ; |
public class JMSServices { /** * Sends a JMS text message to a local queue .
* @ param queueName
* local queues are based on logical queue names
* @ param message
* the message string
* @ param delaySeconds
* 0 for immediate
* @ throws ServiceLocatorException */
public void sendTextMessage ( String queueName , String message , int delaySeconds ) throws NamingException , JMSException , ServiceLocatorException { } } | sendTextMessage ( null , queueName , message , delaySeconds , null ) ; |
public class DistinguishedNameParser { /** * see http : / / www . unicode . org for UTF - 8 bit distribution table */
private char getUTF8 ( ) { } } | int res = getByte ( pos ) ; pos ++ ; // FIXME tmp
if ( res < 128 ) { // one byte : 0-7F
return ( char ) res ; } else if ( res >= 192 && res <= 247 ) { int count ; if ( res <= 223 ) { // two bytes : C0 - DF
count = 1 ; res = res & 0x1F ; } else if ( res <= 239 ) { // three bytes : E0 - EF
count = 2 ; res = res & 0x0F ; } else { // four bytes : F0 - F7
count = 3 ; res = res & 0x07 ; } int b ; for ( int i = 0 ; i < count ; i ++ ) { pos ++ ; if ( pos == length || chars [ pos ] != '\\' ) { return 0x3F ; // FIXME failed to decode UTF - 8 char - return ' ? '
} pos ++ ; b = getByte ( pos ) ; pos ++ ; // FIXME tmp
if ( ( b & 0xC0 ) != 0x80 ) { return 0x3F ; // FIXME failed to decode UTF - 8 char - return ' ? '
} res = ( res << 6 ) + ( b & 0x3F ) ; } return ( char ) res ; } else { return 0x3F ; // FIXME failed to decode UTF - 8 char - return ' ? '
} |
public class Tree { /** * Actually build the tree */
@ Override public void compute2 ( ) { } } | if ( Job . isRunning ( _jobKey ) ) { Timer timer = new Timer ( ) ; _stats [ 0 ] = new ThreadLocal < hex . singlenoderf . Statistic > ( ) ; _stats [ 1 ] = new ThreadLocal < hex . singlenoderf . Statistic > ( ) ; Data d = _sampler . sample ( _data , _seed , _modelKey , _local_mode ) ; hex . singlenoderf . Statistic left = getStatistic ( 0 , d , _seed , _exclusiveSplitLimit ) ; // calculate the split
for ( Row r : d ) left . addQ ( r , _regression ) ; if ( ! _regression ) left . applyClassWeights ( ) ; // Weight the distributions
hex . singlenoderf . Statistic . Split spl = left . split ( d , false ) ; if ( spl . isLeafNode ( ) ) { if ( _regression ) { float av = d . computeAverage ( ) ; _tree = new LeafNode ( - 1 , d . rows ( ) , av ) ; } else { _tree = new LeafNode ( _data . unmapClass ( spl . _split ) , d . rows ( ) , - 1 ) ; } } else { _tree = new FJBuild ( spl , d , 0 , _seed ) . compute ( ) ; } _stats = null ; // GC
if ( _jobKey != null && ! Job . isRunning ( _jobKey ) ) throw new Job . JobCancelledException ( ) ; // Atomically improve the Model as well
Key tkey = toKey ( ) ; Key dtreeKey = null ; if ( _score_pojo ) dtreeKey = toCompressedKey ( ) ; appendKey ( _modelKey , tkey , dtreeKey , _verbose > 10 ? _tree . toString ( new StringBuilder ( "" ) , Integer . MAX_VALUE ) . toString ( ) : "" , _data_id ) ; // appendKey ( _ modelKey , tkey , _ verbose > 10 ? _ tree . toString ( new StringBuilder ( " " ) , Integer . MAX _ VALUE ) . toString ( ) : " " , _ data _ id ) ;
StringBuilder sb = new StringBuilder ( "[RF] Tree : " ) . append ( _data_id + 1 ) ; sb . append ( " d=" ) . append ( _tree . depth ( ) ) . append ( " leaves=" ) . append ( _tree . leaves ( ) ) . append ( " done in " ) . append ( timer ) . append ( '\n' ) ; Log . info ( sb . toString ( ) ) ; if ( _verbose > 10 ) { // Log . info ( Sys . RANDF , _ tree . toString ( sb , Integer . MAX _ VALUE ) . toString ( ) ) ;
// Log . info ( Sys . RANDF , _ tree . toJava ( sb , Integer . MAX _ VALUE ) . toString ( ) ) ;
} } else throw new Job . JobCancelledException ( ) ; // Wait for completion
tryComplete ( ) ; |
public class DatabaseTableConfig { /** * Extract the field types from the fieldConfigs if they have not already been configured . */
public void extractFieldTypes ( DatabaseType databaseType ) throws SQLException { } } | if ( fieldTypes == null ) { if ( fieldConfigs == null ) { fieldTypes = extractFieldTypes ( databaseType , dataClass , tableName ) ; } else { fieldTypes = convertFieldConfigs ( databaseType , tableName , fieldConfigs ) ; } } |
public class CommerceWarehousePersistenceImpl { /** * Returns a range of all the commerce warehouses where groupId = & # 63 ; and active = & # 63 ; and primary = & # 63 ; .
* Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link CommerceWarehouseModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order .
* @ param groupId the group ID
* @ param active the active
* @ param primary the primary
* @ param start the lower bound of the range of commerce warehouses
* @ param end the upper bound of the range of commerce warehouses ( not inclusive )
* @ return the range of matching commerce warehouses */
@ Override public List < CommerceWarehouse > findByG_A_P ( long groupId , boolean active , boolean primary , int start , int end ) { } } | return findByG_A_P ( groupId , active , primary , start , end , null ) ; |
public class DockerImageComboStepFileCallable { /** * Try to clean as much as we can without throwing errors . */
private void invokeCleanup ( DockerClient client , List < String > builtImages , @ Nonnull Set < String > containers ) { } } | PrintStream llog = taskListener . getLogger ( ) ; if ( cleanupDangling ) { for ( String containerId : containers ) { try { client . removeImageCmd ( containerId ) . exec ( ) ; llog . printf ( "Removed dangling layer image %s.%n" , containerId ) ; LOG . debug ( "Removed dangling layer image '{}'" , containerId ) ; } catch ( NotFoundException | ConflictException ex ) { // ignore
} catch ( Throwable ex ) { taskListener . error ( "Can't remove dangling layer image " + containerId + "." ) ; LOG . error ( "Can't remove dangling layer image " + containerId , ex ) ; } } } if ( ! cleanup ) { llog . println ( "Skipping cleanup." ) ; return ; } else { llog . println ( "Running cleanup..." ) ; } for ( String image : builtImages ) { if ( isNotEmpty ( image ) ) { llog . println ( "Removing built image " + image ) ; try { client . removeImageCmd ( image ) . exec ( ) ; } catch ( NotFoundException ex ) { llog . println ( "Image doesn't exist." ) ; } catch ( Throwable ex ) { taskListener . error ( "Can't remove image" + ex . getMessage ( ) ) ; // ignore as it cleanup
} } } |
public class TargetHostsBuilder { /** * get target hosts from line by line .
* @ param sourcePath
* the source path
* @ param sourceType
* the source type
* @ return the list
* @ throws TargetHostsLoadException
* the target hosts load exception */
@ Override public List < String > setTargetHostsFromLineByLineText ( String sourcePath , HostsSourceType sourceType ) throws TargetHostsLoadException { } } | List < String > targetHosts = new ArrayList < String > ( ) ; try { String content = getContentFromPath ( sourcePath , sourceType ) ; targetHosts = setTargetHostsFromString ( content ) ; } catch ( IOException e ) { throw new TargetHostsLoadException ( "IEException when reading " + sourcePath , e ) ; } return targetHosts ; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.