signature stringlengths 43 39.1k | implementation stringlengths 0 450k |
|---|---|
public class CmsContextMenu { /** * Adds new item to context menu root with given caption . < p >
* @ param caption the caption
* @ return reference to newly added item */
public ContextMenuItem addItem ( String caption ) { } } | ContextMenuItemState itemState = getState ( ) . addChild ( caption , getNextId ( ) ) ; ContextMenuItem item = new ContextMenuItem ( null , itemState ) ; m_items . put ( itemState . getId ( ) , item ) ; return item ; |
public class ClassLister { /** * Returns all the classnames that were found for this superclass .
* @ param superclassthe superclass to return the derived classes for
* @ returnthe classnames of the derived classes */
public String [ ] getClassnames ( Class superclass ) { } } | List < String > list ; list = m_ListNames . get ( superclass . getName ( ) ) ; if ( list == null ) return new String [ 0 ] ; else return list . toArray ( new String [ list . size ( ) ] ) ; |
public class IntHashSet { /** * IntHashSet specialised variant of { this # containsAll ( Collection ) } .
* @ param other int hash set to compare against .
* @ return true if every element in other is in this . */
public boolean containsAll ( final IntHashSet other ) { } } | for ( final int value : other . values ) { if ( value != MISSING_VALUE && ! contains ( value ) ) { return false ; } } return ! other . containsMissingValue || this . containsMissingValue ; |
public class BundleCameraNumericJacobian { /** * Computes Jacobian for intrinsic parameters
* @ param camX 3D point in camera reference frame
* @ param camY 3D point in camera reference frame
* @ param camZ 3D point in camera reference frame
* @ param calibX ( Output ) Partial of projected x ' relative to calibration parameters . length N
* @ param calibY ( Output ) Partial of projected y ' relative to calibration parameters . length N */
public void jacobianIntrinsics ( double camX , double camY , double camZ , @ Nonnull double calibX [ ] , @ Nonnull double calibY [ ] ) { } } | funcIntrinsic . X . set ( camX , camY , camZ ) ; jacobian . reshape ( 2 , numIntrinsic ) ; numericalIntrinsic . process ( intrinsic , jacobian ) ; for ( int i = 0 ; i < numIntrinsic ; i ++ ) { calibX [ i ] = jacobian . data [ i ] ; calibY [ i ] = jacobian . data [ i + numIntrinsic ] ; } // make sure its intrinsic parameters have not been modified
model . setIntrinsic ( intrinsic , 0 ) ; |
public class SubclassDynamicTypeBuilder { /** * Applies this builder ' s constructor strategy to the given instrumented type .
* @ param instrumentedType The instrumented type to apply the constructor onto .
* @ return The instrumented type with the constructor strategy applied onto . */
private InstrumentedType applyConstructorStrategy ( InstrumentedType instrumentedType ) { } } | if ( ! instrumentedType . isInterface ( ) ) { for ( MethodDescription . Token token : constructorStrategy . extractConstructors ( instrumentedType ) ) { instrumentedType = instrumentedType . withMethod ( token ) ; } } return instrumentedType ; |
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link SyncActivity } { @ code > } } */
@ XmlElementDecl ( namespace = "http://schema.intuit.com/finance/v3" , name = "SyncActivity" , substitutionHeadNamespace = "http://schema.intuit.com/finance/v3" , substitutionHeadName = "IntuitObject" ) public JAXBElement < SyncActivity > createSyncActivity ( SyncActivity value ) { } } | return new JAXBElement < SyncActivity > ( _SyncActivity_QNAME , SyncActivity . class , null , value ) ; |
public class CPTaxCategoryPersistenceImpl { /** * Removes the cp tax category with the primary key from the database . Also notifies the appropriate model listeners .
* @ param primaryKey the primary key of the cp tax category
* @ return the cp tax category that was removed
* @ throws NoSuchCPTaxCategoryException if a cp tax category with the primary key could not be found */
@ Override public CPTaxCategory remove ( Serializable primaryKey ) throws NoSuchCPTaxCategoryException { } } | Session session = null ; try { session = openSession ( ) ; CPTaxCategory cpTaxCategory = ( CPTaxCategory ) session . get ( CPTaxCategoryImpl . class , primaryKey ) ; if ( cpTaxCategory == null ) { if ( _log . isDebugEnabled ( ) ) { _log . debug ( _NO_SUCH_ENTITY_WITH_PRIMARY_KEY + primaryKey ) ; } throw new NoSuchCPTaxCategoryException ( _NO_SUCH_ENTITY_WITH_PRIMARY_KEY + primaryKey ) ; } return remove ( cpTaxCategory ) ; } catch ( NoSuchCPTaxCategoryException nsee ) { throw nsee ; } catch ( Exception e ) { throw processException ( e ) ; } finally { closeSession ( session ) ; } |
public class Controller { /** * Returns all of this Controller ' s child Routers */
@ NonNull public final List < Router > getChildRouters ( ) { } } | List < Router > routers = new ArrayList < > ( childRouters . size ( ) ) ; routers . addAll ( childRouters ) ; return routers ; |
public class MyfacesLogger { /** * Log a message , with no arguments .
* If the logger is currently enabled for the given message
* level then the given message is forwarded to all the
* registered output Handler objects .
* @ param level One of the message level identifiers , e . g . SEVERE
* @ param msg The string message ( or a key in the message catalog ) */
public void log ( Level level , String msg ) { } } | if ( isLoggable ( level ) ) { MyfacesLogRecord lr = new MyfacesLogRecord ( level , msg ) ; doLog ( lr ) ; } |
public class AdminToolLog4j2Util { /** * returns all loggers
* @ return */
public Collection < Logger > getLoggers ( ) { } } | LoggerContext ctx = ( LoggerContext ) LogManager . getContext ( false ) ; List < Logger > loggers = new ArrayList < > ( ctx . getLoggers ( ) ) ; Collections . sort ( loggers , LOGGER_COMP ) ; return loggers ; |
public class GetDiscoveredResourceCountsResult { /** * The list of < code > ResourceCount < / code > objects . Each object is listed in descending order by the number of
* resources .
* @ return The list of < code > ResourceCount < / code > objects . Each object is listed in descending order by the number
* of resources . */
public java . util . List < ResourceCount > getResourceCounts ( ) { } } | if ( resourceCounts == null ) { resourceCounts = new com . amazonaws . internal . SdkInternalList < ResourceCount > ( ) ; } return resourceCounts ; |
public class Graphs { /** * Returns a synchronized ( thread - safe ) { @ link DirectedGraph } backed by the specified Graph .
* It is imperative that the user manually synchronize on the returned graph when iterating over iterable collections :
* < pre >
* Graph syncGraph = synchronize ( graph ) ;
* synchronized ( syncGraph ) {
* for ( Vertex v : g . getVertices ( ) ) / / Must be in synchronized block
* foo ( v )
* < / pre >
* Failure to follow this advice may result in non - deterministic behavior .
* The returned { @ link Graph } will be serializable if the specified { @ link Graph } is serializable .
* @ param < V > the Graph vertices type
* @ param < E > the Graph edges type
* @ param graph the input { @ link Graph }
* @ return the syncronyzed graph */
public static < V , E > DirectedGraph < V , E > synchronize ( final DirectedGraph < V , E > graph ) { } } | return new SynchronizedDirectedGraph < V , E > ( graph ) ; |
public class CmsSubscriptionManager { /** * Sets the security manager during initialization . < p >
* @ param securityManager the security manager */
public void setSecurityManager ( CmsSecurityManager securityManager ) { } } | if ( m_frozen ) { throw new CmsRuntimeException ( Messages . get ( ) . container ( Messages . ERR_CONFIG_SUBSCRIPTIONMANAGER_FROZEN_0 ) ) ; } m_securityManager = securityManager ; |
public class GetItemRequest { /** * One or more substitution tokens for attribute names in an expression . The following are some use cases for using
* < code > ExpressionAttributeNames < / code > :
* < ul >
* < li >
* To access an attribute whose name conflicts with a DynamoDB reserved word .
* < / li >
* < li >
* To create a placeholder for repeating occurrences of an attribute name in an expression .
* < / li >
* < li >
* To prevent special characters in an attribute name from being misinterpreted in an expression .
* < / li >
* < / ul >
* Use the < b > # < / b > character in an expression to dereference an attribute name . For example , consider the following
* attribute name :
* < ul >
* < li >
* < code > Percentile < / code >
* < / li >
* < / ul >
* The name of this attribute conflicts with a reserved word , so it cannot be used directly in an expression . ( For
* the complete list of reserved words , see < a
* href = " https : / / docs . aws . amazon . com / amazondynamodb / latest / developerguide / ReservedWords . html " > Reserved Words < / a > in
* the < i > Amazon DynamoDB Developer Guide < / i > ) . To work around this , you could specify the following for
* < code > ExpressionAttributeNames < / code > :
* < ul >
* < li >
* < code > { " # P " : " Percentile " } < / code >
* < / li >
* < / ul >
* You could then use this substitution in an expression , as in this example :
* < ul >
* < li >
* < code > # P = : val < / code >
* < / li >
* < / ul >
* < note >
* Tokens that begin with the < b > : < / b > character are < i > expression attribute values < / i > , which are placeholders for
* the actual value at runtime .
* < / note >
* For more information on expression attribute names , see < a href =
* " https : / / docs . aws . amazon . com / amazondynamodb / latest / developerguide / Expressions . AccessingItemAttributes . html "
* > Accessing Item Attributes < / a > in the < i > Amazon DynamoDB Developer Guide < / i > .
* @ param expressionAttributeNames
* One or more substitution tokens for attribute names in an expression . The following are some use cases for
* using < code > ExpressionAttributeNames < / code > : < / p >
* < ul >
* < li >
* To access an attribute whose name conflicts with a DynamoDB reserved word .
* < / li >
* < li >
* To create a placeholder for repeating occurrences of an attribute name in an expression .
* < / li >
* < li >
* To prevent special characters in an attribute name from being misinterpreted in an expression .
* < / li >
* < / ul >
* Use the < b > # < / b > character in an expression to dereference an attribute name . For example , consider the
* following attribute name :
* < ul >
* < li >
* < code > Percentile < / code >
* < / li >
* < / ul >
* The name of this attribute conflicts with a reserved word , so it cannot be used directly in an expression .
* ( For the complete list of reserved words , see < a
* href = " https : / / docs . aws . amazon . com / amazondynamodb / latest / developerguide / ReservedWords . html " > Reserved
* Words < / a > in the < i > Amazon DynamoDB Developer Guide < / i > ) . To work around this , you could specify the
* following for < code > ExpressionAttributeNames < / code > :
* < ul >
* < li >
* < code > { " # P " : " Percentile " } < / code >
* < / li >
* < / ul >
* You could then use this substitution in an expression , as in this example :
* < ul >
* < li >
* < code > # P = : val < / code >
* < / li >
* < / ul >
* < note >
* Tokens that begin with the < b > : < / b > character are < i > expression attribute values < / i > , which are
* placeholders for the actual value at runtime .
* < / note >
* For more information on expression attribute names , see < a href =
* " https : / / docs . aws . amazon . com / amazondynamodb / latest / developerguide / Expressions . AccessingItemAttributes . html "
* > Accessing Item Attributes < / a > in the < i > Amazon DynamoDB Developer Guide < / i > .
* @ return Returns a reference to this object so that method calls can be chained together . */
public GetItemRequest withExpressionAttributeNames ( java . util . Map < String , String > expressionAttributeNames ) { } } | setExpressionAttributeNames ( expressionAttributeNames ) ; return this ; |
public class Configuration { /** * Gets the double value < code > key < / code > .
* @ param key key to get value for
* @ throws java . lang . IllegalArgumentException if the key is not found
* @ return value */
public double getDouble ( String key ) { } } | if ( containsKey ( key ) ) { return Double . parseDouble ( get ( key ) ) ; } else { throw new IllegalArgumentException ( "Missing key " + key + "." ) ; } |
public class UndoUtils { /** * Returns an UndoManager that can undo / redo { @ link RichTextChange } s . New changes
* emitted from the stream will not be merged with the previous change
* after { @ link # DEFAULT _ PREVENT _ MERGE _ DELAY } */
public static < PS , SEG , S > UndoManager < List < RichTextChange < PS , SEG , S > > > richTextUndoManager ( GenericStyledArea < PS , SEG , S > area , UndoManagerFactory factory ) { } } | return richTextUndoManager ( area , factory , DEFAULT_PREVENT_MERGE_DELAY ) ; |
public class ConverterManager { /** * Adds a converter to the set of converters . If a matching converter is
* already in the set , the given converter replaces it . If the converter is
* exactly the same as one already in the set , no changes are made .
* The order in which converters are added is not relevant . The best
* converter is selected by examining the object hierarchy .
* @ param converter the converter to add , null ignored
* @ return replaced converter , or null */
public DurationConverter addDurationConverter ( DurationConverter converter ) throws SecurityException { } } | checkAlterDurationConverters ( ) ; if ( converter == null ) { return null ; } DurationConverter [ ] removed = new DurationConverter [ 1 ] ; iDurationConverters = iDurationConverters . add ( converter , removed ) ; return removed [ 0 ] ; |
public class TargetStreamManager { /** * Handle a ControlFlushed message . Flush any existing streams and
* throw away any cached messages .
* @ param cMsg
* @ throws SIResourceException */
public void handleFlushedMessage ( ControlFlushed cMsg ) throws SIResourceException { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "handleFlushedMessage" , new Object [ ] { cMsg } ) ; SIBUuid12 streamID = cMsg . getGuaranteedStreamUUID ( ) ; forceFlush ( streamID ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "handleFlushedMessage" ) ; |
public class V1InstanceCreator { /** * Create a new schedule entity with a name , iteration length , and iteration gap
* @ param name Name of the new schedule
* @ param iterationLength The duration an iteration will last in this schedule
* @ param iterationGap The duration between iterations in this schedule .
* @ param attributes additional attributes for the Schedule .
* @ return A newly minted Schedule that exists in the VersionOne system . */
public Schedule schedule ( String name , Duration iterationLength , Duration iterationGap , Map < String , Object > attributes ) { } } | Schedule schedule = new Schedule ( instance ) ; schedule . setName ( name ) ; schedule . setIterationLength ( iterationLength ) ; schedule . setIterationGap ( iterationGap ) ; addAttributes ( schedule , attributes ) ; schedule . save ( ) ; return schedule ; |
public class SqlTileWriter { /** * Delete cache tiles
* @ since 6.0.2
* @ param pTileSourceName the tile source name ( possibly null )
* @ param pZoom the zoom level
* @ param pInclude a collection of bounding boxes to include ( possibly null / empty )
* @ param pExclude a collection of bounding boxes to exclude ( possibly null / empty )
* @ return the number of corresponding tiles deleted from the cache , or - 1 if a problem occurred */
public long delete ( final String pTileSourceName , final int pZoom , final Collection < Rect > pInclude , final Collection < Rect > pExclude ) { } } | try { final SQLiteDatabase db = getDb ( ) ; if ( db == null || ! db . isOpen ( ) ) { return - 1 ; } return db . delete ( TABLE , getWhereClause ( pZoom , pInclude , pExclude ) + ( pTileSourceName != null ? " and " + COLUMN_PROVIDER + "=?" : "" ) , pTileSourceName != null ? new String [ ] { pTileSourceName } : null ) ; } catch ( Exception ex ) { catchException ( ex ) ; return 0 ; } |
public class MessageReceiverFilterList { /** * Remove all the filters that have this as a listener .
* @ param listener Filters with this listener will be removed ( pass null to free them all ) . */
public void freeFiltersWithSource ( Object objSource ) { } } | Object [ ] rgFilter = m_mapFilters . values ( ) . toArray ( ) ; for ( int i = 0 ; i < rgFilter . length ; i ++ ) { BaseMessageFilter filter = ( BaseMessageFilter ) rgFilter [ i ] ; if ( filter . getMessageSource ( ) == objSource ) filter . free ( ) ; } |
public class WorkQueueManager { /** * This method is called when work must be added to the connect selector .
* @ param connectInfo */
protected void queueConnectForSelector ( ConnectInfo connectInfo ) { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . entry ( tc , "queueConnectForSelector" ) ; } try { moveIntoPosition ( connectCount , connect , connectInfo , CS_CONNECTOR ) ; } catch ( IOException x ) { FFDCFilter . processException ( x , getClass ( ) . getName ( ) , "140" , this ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Caught IOException...throwing RuntimeException" ) ; } throw new RuntimeException ( x ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . exit ( tc , "queueConnectForSelector" ) ; } |
public class JpaManifestStore { /** * / * ( non - Javadoc )
* @ see org . duracloud . mill . manifest . ManifestStore # purgeDeletedItemsBefore ( java . util . Date ) */
@ Override @ Transactional ( value = MillJpaRepoConfig . TRANSACTION_MANAGER_BEAN , propagation = Propagation . REQUIRES_NEW ) public int purgeDeletedItemsBefore ( Date expiration ) { } } | return this . manifestItemRepo . deleteFirst50000ByDeletedTrueAndModifiedBefore ( expiration ) ; |
public class SSSRFinder { /** * Finds a Smallest Set of Smallest Rings .
* The returned set is not uniquely defined .
* @ return a RingSet containing the SSSR */
public IRingSet findSSSR ( ) { } } | if ( atomContainer == null ) { return null ; } IRingSet ringSet = toRingSet ( atomContainer , cycleBasis ( ) . cycles ( ) ) ; // atomContainer . setProperty ( CDKConstants . SMALLEST _ RINGS , ringSet ) ;
return ringSet ; |
public class CPDefinitionOptionValueRelPersistenceImpl { /** * Returns the cp definition option value rel where CPDefinitionOptionRelId = & # 63 ; and key = & # 63 ; or returns < code > null < / code > if it could not be found . Uses the finder cache .
* @ param CPDefinitionOptionRelId the cp definition option rel ID
* @ param key the key
* @ return the matching cp definition option value rel , or < code > null < / code > if a matching cp definition option value rel could not be found */
@ Override public CPDefinitionOptionValueRel fetchByC_K ( long CPDefinitionOptionRelId , String key ) { } } | return fetchByC_K ( CPDefinitionOptionRelId , key , true ) ; |
public class I18n { /** * Retrieve a I18n instance by resource name .
* @ param resource resource name . See { @ link
* ResourceBundle # getBundle ( String ) ResourceBundle . getBundle ( ) } */
public static synchronized I18n getI18n ( String resource ) { } } | I18n instance = ( I18n ) mapping . get ( resource ) ; if ( instance == null ) { instance = new I18n ( ResourceBundle . getBundle ( resource , Locale . getDefault ( ) , getClassLoader ( ) ) ) ; mapping . put ( resource , instance ) ; } return instance ; |
public class NumberVectorLabelParser { /** * Get a prototype object for the given dimensionality .
* @ param mindim Minimum dimensionality
* @ param maxdim Maximum dimensionality
* @ return Prototype object */
SimpleTypeInformation < V > getTypeInformation ( int mindim , int maxdim ) { } } | if ( mindim > maxdim ) { throw new AbortException ( "No vectors were read from the input file - cannot determine vector data type." ) ; } if ( mindim == maxdim ) { String [ ] colnames = null ; if ( columnnames != null && mindim <= columnnames . size ( ) ) { colnames = new String [ mindim ] ; int j = 0 ; for ( int i = 0 ; i < mindim ; i ++ ) { if ( isLabelColumn ( i ) ) { continue ; } colnames [ j ] = columnnames . get ( i ) ; j ++ ; } if ( j != mindim ) { colnames = null ; // Did not work
} } return new VectorFieldTypeInformation < > ( factory , mindim , colnames ) ; } // Variable dimensionality - return non - vector field type
return new VectorTypeInformation < > ( factory , factory . getDefaultSerializer ( ) , mindim , maxdim ) ; |
public class PrepareRequestInterceptor { /** * { @ inheritDoc } */
@ Override public void execute ( IntuitMessage intuitMessage ) throws FMSException { } } | LOG . debug ( "Enter PrepareRequestInterceptor..." ) ; RequestElements requestElements = intuitMessage . getRequestElements ( ) ; Map < String , String > requestParameters = requestElements . getRequestParameters ( ) ; String action = ( requestElements . getAction ( ) == null ) ? getEntityName ( requestElements . getEntity ( ) ) : requestElements . getAction ( ) ; if ( intuitMessage . isPlatformService ( ) ) { requestParameters . put ( RequestElements . REQ_PARAM_RESOURCE_URL , prepareIPSUri ( action , requestElements . getContext ( ) ) ) ; } else if ( intuitMessage . isEntitlementService ( ) ) { prepareEntitlementsRequest ( intuitMessage , requestElements , requestParameters ) ; } else { prepareDataServiceRequest ( intuitMessage , requestElements , requestParameters , action ) ; } LOG . debug ( "Exit PrepareRequestInterceptor." ) ; |
public class BasePrepareStatement { /** * Sets the value of the designated parameter with the given object . This method is like the
* method
* < code > setObject < / code > above , except that it assumes a scale of zero .
* @ param parameterIndex the first parameter is 1 , the second is 2 , . . .
* @ param obj the object containing the input parameter value
* @ param targetSqlType the SQL type ( as defined in java . sql . Types ) to be sent to the database
* @ throws SQLException if parameterIndex does not correspond to a parameter
* marker in the SQL statement ; if a database access error
* occurs or this method is called on a closed
* < code > PreparedStatement < / code > s
* @ see Types */
public void setObject ( final int parameterIndex , final Object obj , final int targetSqlType ) throws SQLException { } } | setInternalObject ( parameterIndex , obj , targetSqlType , Long . MAX_VALUE ) ; |
public class Package { /** * Remove a { @ link Tag } from this { @ link Package } */
public void removeTag ( Tag tag ) { } } | Iterable < Tag > tags = getTags ( ) ; removeAll ( tags , singletonList ( tag ) ) ; set ( PackageMetadata . TAGS , tag ) ; |
public class SoLoader { /** * Gets the full path of a library .
* @ param libName the library file name , including the prefix and extension .
* @ return the full path of the library , or null if it is not found in none of the SoSources .
* @ throws IOException */
public static @ Nullable String getLibraryPath ( String libName ) throws IOException { } } | sSoSourcesLock . readLock ( ) . lock ( ) ; String libPath = null ; try { if ( sSoSources != null ) { for ( int i = 0 ; libPath == null && i < sSoSources . length ; ++ i ) { SoSource currentSource = sSoSources [ i ] ; libPath = currentSource . getLibraryPath ( libName ) ; } } } finally { sSoSourcesLock . readLock ( ) . unlock ( ) ; } return libPath ; |
public class AmqpMessageHandlerService { /** * Method to handle the different topics to an event .
* @ param message
* the incoming event message .
* @ param topic
* the topic of the event . */
private void handleIncomingEvent ( final Message message ) { } } | switch ( EventTopic . valueOf ( getStringHeaderKey ( message , MessageHeaderKey . TOPIC , "EventTopic is null" ) ) ) { case UPDATE_ACTION_STATUS : updateActionStatus ( message ) ; break ; case UPDATE_ATTRIBUTES : updateAttributes ( message ) ; break ; default : logAndThrowMessageError ( message , "Got event without appropriate topic." ) ; break ; } |
public class FacebookDialog { /** * Determines whether the version of the Facebook application installed on the user ' s device is recent
* enough to support specific features of the native Message dialog , which in turn may be used to determine
* which UI , etc . , to present to the user .
* @ param context the calling Context
* @ param features zero or more features to check for ; { @ link com . facebook . widget . FacebookDialog . MessageDialogFeature # MESSAGE _ DIALOG } is implicitly
* checked if not explicitly specified
* @ return true if all of the specified features are supported by the currently installed version of the
* Facebook application ; false if any of the features are not supported */
public static boolean canPresentMessageDialog ( Context context , MessageDialogFeature ... features ) { } } | return handleCanPresent ( context , EnumSet . of ( MessageDialogFeature . MESSAGE_DIALOG , features ) ) ; |
public class Tuple { /** * Sets a value at a specific position in the tuple .
* If this tuple has fewer elements than the index being set , then the tuple will grow to
* { @ code idx } elements .
* @ param idx the index to set
* @ param val the value to set
* @ return a handle to this object to enable builder operations */
public Tuple set ( int idx , Object val ) { } } | while ( idx >= fields . size ( ) ) { fields . add ( null ) ; } fields . set ( idx , val ) ; return this ; |
public class GitController { /** * Launching the synchronisation */
@ RequestMapping ( value = "project-sync/{projectId}" , method = RequestMethod . POST ) public Ack projectGitSync ( @ PathVariable ID projectId , @ RequestBody GitSynchronisationRequest request ) { } } | Project project = structureService . getProject ( projectId ) ; return gitService . projectSync ( project , request ) ; |
public class OverrideHelper { /** * / * @ Nullable */
public LightweightTypeReference getReturnTypeOfOverriddenOperation ( JvmOperation operation , LightweightTypeReference context ) { } } | if ( operation . getVisibility ( ) == JvmVisibility . PRIVATE || ! InferredTypeIndicator . isInferred ( operation . getReturnType ( ) ) ) { return null ; } BottomResolvedOperation resolvedOperation = new BottomResolvedOperation ( operation , context , overrideTester ) ; List < IResolvedOperation > overriddenMethods = resolvedOperation . getOverriddenAndImplementedMethods ( ) ; if ( overriddenMethods . isEmpty ( ) ) return null ; LightweightTypeReference result = overriddenMethods . get ( 0 ) . getResolvedReturnType ( ) ; return result ; |
public class InputStreamReader { /** * Reads characters into a portion of an array .
* @ param cbuf Destination buffer
* @ param offset Offset at which to start storing characters
* @ param length Maximum number of characters to read
* @ return The number of characters read , or - 1 if the end of the
* stream has been reached
* @ exception IOException If an I / O error occurs */
public int read ( char cbuf [ ] , int offset , int length ) throws IOException { } } | return sd . read ( cbuf , offset , length ) ; |
public class EntitySpec { /** * Returns the distinct tables specified in this entity spec , not including
* references to other entity specs .
* @ return an array of { @ link TableSpec } s . Guaranteed not < code > null < / code > . */
public ColumnSpec [ ] getColumnSpecs ( ) { } } | Set < ColumnSpec > results = new HashSet < > ( ) ; addTo ( results , this . baseSpec ) ; addTo ( results , this . codeSpec ) ; addTo ( results , this . constraintSpecs ) ; addTo ( results , this . finishTimeSpec ) ; addTo ( results , this . startTimeOrTimestampSpec ) ; addTo ( results , this . uniqueIdSpecs ) ; addTo ( results , this . valueSpec ) ; addTo ( results , this . createDateSpec ) ; addTo ( results , this . updateDateSpec ) ; addTo ( results , this . deleteDateSpec ) ; for ( PropertySpec propertySpec : this . propertySpecs ) { addTo ( results , propertySpec . getCodeSpec ( ) ) ; } return results . toArray ( new ColumnSpec [ results . size ( ) ] ) ; |
public class LUDecomposition { /** * Return pivot permutation vector as a one - dimensional double array
* @ return ( double ) piv */
@ Nonnull public double [ ] getDoublePivot ( ) { } } | final double [ ] vals = new double [ m_nRows ] ; for ( int i = 0 ; i < m_nRows ; i ++ ) vals [ i ] = m_aPivot [ i ] ; return vals ; |
public class ExecutorServices { /** * Shuts down all registered scheduler and background workers as soon as possible , but at the latest in specified
* { @ link # SHUTDOWN _ DURATION } seconds . */
public static void shutdownAll ( ) { } } | for ( final ExecutorService executor : new ArrayList < ExecutorService > ( BACKGROUND_EXECUTORS ) ) { shutdown ( executor ) ; BACKGROUND_EXECUTORS . remove ( executor ) ; } for ( final ScheduledExecutorService scheduler : new ArrayList < ScheduledExecutorService > ( SCHEDULERS ) ) { shutdown ( scheduler ) ; SCHEDULERS . remove ( scheduler ) ; } |
public class SoyTreeUtils { /** * Retrieves all nodes in a tree that are an instance of a particular class .
* @ param < T > The type of node to retrieve .
* @ param rootSoyNode The parse tree to search .
* @ param classObject The class whose instances to search for , including subclasses .
* @ return The nodes in the order they appear . */
public static < T extends Node > ImmutableList < T > getAllNodesOfType ( Node rootSoyNode , final Class < T > classObject ) { } } | return getAllMatchingNodesOfType ( rootSoyNode , classObject , arg -> true ) ; |
public class ArrayUtils { /** * Returns the first element in { @ code array } that satisfies the given
* predicate .
* @ param array
* @ param predicate
* @ param < T >
* @ return */
public static < T > T find ( T [ ] array , Predicate < T > predicate ) { } } | if ( array == null ) { return null ; } for ( T t : array ) { if ( predicate . apply ( t ) ) { return t ; } } return null ; |
public class JBBPUtils { /** * Convert string representation of binary data into byte array /
* @ param values a string represents binary data
* @ param bitOrder the bit order to be used for operation
* @ return a byte array generated from the decoded string , empty array for
* null string */
public static byte [ ] str2bin ( final String values , final JBBPBitOrder bitOrder ) { } } | if ( values == null ) { return new byte [ 0 ] ; } int buff = 0 ; int cnt = 0 ; final ByteArrayOutputStream buffer = new ByteArrayOutputStream ( ( values . length ( ) + 7 ) >> 3 ) ; final boolean msb0 = bitOrder == JBBPBitOrder . MSB0 ; for ( final char v : values . toCharArray ( ) ) { switch ( v ) { case '_' : case ' ' : continue ; case '0' : case 'X' : case 'x' : case 'Z' : case 'z' : { if ( msb0 ) { buff >>= 1 ; } else { buff <<= 1 ; } } break ; case '1' : { if ( msb0 ) { buff = ( buff >> 1 ) | 0x80 ; } else { buff = ( buff << 1 ) | 1 ; } } break ; default : throw new IllegalArgumentException ( "Detected unsupported char '" + v + ']' ) ; } cnt ++ ; if ( cnt == 8 ) { buffer . write ( buff ) ; cnt = 0 ; buff = 0 ; } } if ( cnt > 0 ) { buffer . write ( msb0 ? buff >>> ( 8 - cnt ) : buff ) ; } return buffer . toByteArray ( ) ; |
public class RequestMultipartHelper { /** * Parse the provided servlet request as multipart , if the Content - Type starts
* with < code > multipart / form - data < / code > .
* @ param aHttpRequest
* Source HTTP request from which multipart / form - data ( aka file
* uploads ) should be extracted .
* @ param aConsumer
* A consumer that takes either { @ link IFileItem } or
* { @ link IFileItem } [ ] or { @ link String } or { @ link String } [ ] .
* @ return { @ link EChange # CHANGED } if something was added */
@ Nonnull public static EChange handleMultipartFormData ( @ Nonnull final HttpServletRequest aHttpRequest , @ Nonnull final BiConsumer < String , Object > aConsumer ) { } } | if ( aHttpRequest instanceof MockHttpServletRequest ) { // First check , because some of the contained methods throw
// UnsupportedOperationExceptions
return EChange . UNCHANGED ; } if ( ! RequestHelper . isMultipartFormDataContent ( aHttpRequest ) ) { // It ' s not a multipart request
return EChange . UNCHANGED ; } // It is a multipart request !
// Note : this handles only POST parameters !
boolean bAddedFileUploadItems = false ; try { // Setup the ServletFileUpload . . . .
final ServletFileUpload aUpload = new ServletFileUpload ( s_aFIFP . getFileItemFactory ( ) ) ; aUpload . setSizeMax ( MAX_REQUEST_SIZE ) ; aUpload . setHeaderEncoding ( CWeb . CHARSET_REQUEST_OBJ . name ( ) ) ; final IProgressListener aProgressListener = ProgressListenerProvider . getProgressListener ( ) ; if ( aProgressListener != null ) aUpload . setProgressListener ( aProgressListener ) ; try { aHttpRequest . setCharacterEncoding ( CWeb . CHARSET_REQUEST_OBJ . name ( ) ) ; } catch ( final UnsupportedEncodingException ex ) { if ( LOGGER . isErrorEnabled ( ) ) LOGGER . error ( "Failed to set request character encoding to '" + CWeb . CHARSET_REQUEST_OBJ . name ( ) + "'" , ex ) ; } // Group all items with the same name together
final IMultiMapListBased < String , String > aFormFields = new MultiHashMapArrayListBased < > ( ) ; final IMultiMapListBased < String , IFileItem > aFormFiles = new MultiHashMapArrayListBased < > ( ) ; final ICommonsList < IFileItem > aFileItems = aUpload . parseRequest ( aHttpRequest ) ; for ( final IFileItem aFileItem : aFileItems ) { if ( aFileItem . isFormField ( ) ) { // We need to explicitly use the charset , as by default only the
// charset from the content type is used !
aFormFields . putSingle ( aFileItem . getFieldName ( ) , aFileItem . getString ( CWeb . CHARSET_REQUEST_OBJ ) ) ; } else aFormFiles . putSingle ( aFileItem . getFieldName ( ) , aFileItem ) ; } // set all form fields
for ( final Map . Entry < String , ICommonsList < String > > aEntry : aFormFields . entrySet ( ) ) { // Convert list of String to value ( String or String [ ] )
final ICommonsList < String > aValues = aEntry . getValue ( ) ; final Object aValue = aValues . size ( ) == 1 ? aValues . getFirst ( ) : ArrayHelper . newArray ( aValues , String . class ) ; aConsumer . accept ( aEntry . getKey ( ) , aValue ) ; } // set all form files ( potentially overwriting form fields with the same
// name )
for ( final Map . Entry < String , ICommonsList < IFileItem > > aEntry : aFormFiles . entrySet ( ) ) { // Convert list of String to value ( IFileItem or IFileItem [ ] )
final ICommonsList < IFileItem > aValues = aEntry . getValue ( ) ; final Object aValue = aValues . size ( ) == 1 ? aValues . getFirst ( ) : ArrayHelper . newArray ( aValues , IFileItem . class ) ; aConsumer . accept ( aEntry . getKey ( ) , aValue ) ; } // Parsing complex file upload succeeded - > do not use standard scan for
// parameters
bAddedFileUploadItems = true ; } catch ( final FileUploadException ex ) { if ( ! StreamHelper . isKnownEOFException ( ex . getCause ( ) ) ) LOGGER . error ( "Error parsing multipart request content" , ex ) ; } catch ( final RuntimeException ex ) { LOGGER . error ( "Error parsing multipart request content" , ex ) ; } return EChange . valueOf ( bAddedFileUploadItems ) ; |
public class Host2NodesMap { /** * Find data node by its name .
* @ deprecated use { @ link # getDataNodeByIpPort ( String ) }
* @ return DatanodeDescriptor if found or null otherwise */
@ Deprecated public DatanodeDescriptor getDatanodeByName ( String name ) { } } | if ( name == null ) { return null ; } int colon = name . indexOf ( ":" ) ; String host ; if ( colon < 0 ) { host = name ; } else { host = name . substring ( 0 , colon ) ; } hostmapLock . readLock ( ) . lock ( ) ; try { DatanodeDescriptor [ ] nodes = map . get ( host ) ; // no entry
if ( nodes == null ) { return null ; } for ( DatanodeDescriptor containedNode : nodes ) { if ( name . equals ( containedNode . getName ( ) ) ) { return containedNode ; } } return null ; } finally { hostmapLock . readLock ( ) . unlock ( ) ; } |
public class AvatarDataNode { /** * Returns the IP : port address of the avatar node */
private static List < InetSocketAddress > getAvatarNodeAddresses ( String suffix , Configuration conf , Collection < String > serviceIds ) throws IOException { } } | List < InetSocketAddress > namenodeAddresses = DFSUtil . getRPCAddresses ( suffix , conf , serviceIds , FSConstants . DFS_NAMENODE_RPC_ADDRESS_KEY ) ; List < InetSocketAddress > avatarnodeAddresses = new ArrayList < InetSocketAddress > ( namenodeAddresses . size ( ) ) ; for ( InetSocketAddress namenodeAddress : namenodeAddresses ) { avatarnodeAddresses . add ( new InetSocketAddress ( namenodeAddress . getAddress ( ) , conf . getInt ( "dfs.avatarnode.port" , namenodeAddress . getPort ( ) + 1 ) ) ) ; } return avatarnodeAddresses ; |
public class EventData { /** * 返回所有待变更的字段 */
public List < EventColumn > getUpdatedColumns ( ) { } } | List < EventColumn > columns = new ArrayList < EventColumn > ( ) ; for ( EventColumn column : this . columns ) { if ( column . isUpdate ( ) ) { columns . add ( column ) ; } } return columns ; |
public class Repository { /** * Call this after you ' ve loaded all libraries
* @ return reload files */
public List < Node > link ( ) { } } | List < Module > dependencies ; Module module ; Module resolved ; StringBuilder problems ; List < Node > result ; problems = new StringBuilder ( ) ; for ( Map . Entry < Module , List < String > > entry : notLinked . entrySet ( ) ) { module = entry . getKey ( ) ; dependencies = module . dependencies ( ) ; for ( String name : entry . getValue ( ) ) { resolved = lookup ( name ) ; if ( resolved == null ) { problems . append ( "module '" + module . getName ( ) + "': cannot resolve dependency '" + name + "'\n" ) ; } else { dependencies . add ( resolved ) ; } } } if ( problems . length ( ) > 0 ) { throw new IllegalArgumentException ( problems . toString ( ) ) ; } result = reloadFiles ; notLinked = null ; reloadFiles = null ; return result ; |
public class SslContext { /** * Creates a new client - side { @ link SslContext } .
* @ param certChainFile an X . 509 certificate chain file in PEM format .
* { @ code null } to use the system default
* @ param trustManagerFactory the { @ link TrustManagerFactory } that provides the { @ link TrustManager } s
* that verifies the certificates sent from servers .
* { @ code null } to use the default .
* @ return a new client - side { @ link SslContext }
* @ deprecated Replaced by { @ link SslContextBuilder } */
@ Deprecated public static SslContext newClientContext ( File certChainFile , TrustManagerFactory trustManagerFactory ) throws SSLException { } } | return newClientContext ( null , certChainFile , trustManagerFactory ) ; |
public class DefaultExtWebDriver { /** * TODO : determine if there is a better option than returning null */
@ Override public String getBrowserVersion ( ) { } } | if ( wd != null ) { Capabilities capabilities = ( ( HasCapabilities ) wd ) . getCapabilities ( ) ; if ( capabilities != null ) { return capabilities . getVersion ( ) ; } return null ; } return null ; |
public class SaverDef { /** * < pre >
* The name of the tensor in which to specify the filename when saving or
* restoring a model checkpoint .
* < / pre >
* < code > optional string filename _ tensor _ name = 1 ; < / code > */
public java . lang . String getFilenameTensorName ( ) { } } | java . lang . Object ref = filenameTensorName_ ; if ( ref instanceof java . lang . String ) { return ( java . lang . String ) ref ; } else { com . google . protobuf . ByteString bs = ( com . google . protobuf . ByteString ) ref ; java . lang . String s = bs . toStringUtf8 ( ) ; filenameTensorName_ = s ; return s ; } |
public class JobBuilder { /** * Create a JobBuilder with which to define a < code > JobDetail < / code > , and set the class name of
* the < code > Job < / code > to be executed .
* @ return a new JobBuilder */
public static JobBuilder newJobBuilder ( Class < ? extends Job > jobClass ) { } } | JobBuilder b = new JobBuilder ( ) ; b . ofType ( jobClass ) ; return b ; |
public class XMLChar { /** * Returns true if the specified character is a valid name character as defined by production [ 4]
* in the XML 1.0 specification . < b > NameChar : : = Letter | Digit | ' . ' | ' - ' | ' _ ' | ' : ' |
* CombiningChar | Extender < / b >
* @ param ch
* The character to check . */
public static boolean isName ( int ch ) { } } | return isLetter ( ch ) || isDigit ( ch ) || '.' == ch || '-' == ch || '_' == ch || ':' == ch || isCombiningChar ( ch ) || isExtender ( ch ) ; |
public class ComparisonFailureErrorFactory { /** * { @ inheritDoc }
* Create < pre > org . junit . ComparisonFailure < / pre > if possible . */
@ Override public AssertionError newAssertionError ( Description d , Representation representation ) { } } | AssertionError assertionError = getComparisonFailureInstance ( ) ; if ( assertionError != null ) { return assertionError ; } String message = String . format ( EXPECTED_BUT_WAS_MESSAGE , getActual ( ) , getExpected ( ) ) ; return Failures . instance ( ) . failure ( message ) ; |
public class PersistentMessageStoreImpl { /** * as we can not function once we have lost the ObjectManager . */
private void objectManagerStopped ( ) { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "objectManagerStopped" ) ; // Stop any new work coming in as we do not have a working
// ObjectManager to service any requests .
_available = false ; // Only treat this shutdown as an error if we haven ' t
// asked for it . In the case of a normal ME stop we
// will still get the callback but don ' t have to worry
// about it .
if ( ! _shutdownRequested ) { // Report a local error so that we can begin failover to a
// new ME instance . This will allow the new instance to
// enter it ' s startup retry loop and try to re - connect to
// a working ObjectManager .
_ms . reportLocalError ( ) ; SibTr . error ( tc , "FILE_STORE_STOP_UNEXPECTED_SIMS1590" ) ; } else { SibTr . info ( tc , "FILE_STORE_STOP_EXPECTED_SIMS1589" ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "objectManagerStopped" ) ; |
public class RenderUtils { /** * Transforms the supplied object to an escaped HTML string .
* @ param object
* the object
* @ return the escaped HTML string */
@ Nullable public static String escapeHtml ( @ Nullable final Object object ) { } } | return object == null ? null : HtmlEscapers . htmlEscaper ( ) . escape ( object . toString ( ) ) ; |
public class EmailValidationUtil { /** * Validates an e - mail with given validation flags .
* @ param email A complete email address .
* @ param emailAddressValidationCriteria A set of flags that restrict or relax RFC 2822 compliance .
* @ return Whether the e - mail address is compliant with RFC 2822 , configured using the passed in { @ link EmailAddressValidationCriteria } .
* @ see EmailAddressValidationCriteria # EmailAddressValidationCriteria ( boolean , boolean ) */
public static boolean isValid ( final String email , final EmailAddressValidationCriteria emailAddressValidationCriteria ) { } } | return buildValidEmailPattern ( emailAddressValidationCriteria ) . matcher ( email ) . matches ( ) ; |
public class SignalUtil { /** * Implements the same semantics as { @ link CountDownLatch # await ( ) } , except that the wait will
* periodically time out within this method to log a warning message that the thread appears
* stuck . This cycle will be repeated until the latch has counted down to zero . If the thread
* has waited for an extended period , then wait logging is quiesced to avoid overwhelming the
* logs . If and when a thread that has been reported as being stuck and the latch is finally
* counted down to zero , then a warning level message is logged to indicate that the thread is
* resuming .
* @ param latch
* the CountDownLatch object to wait on
* @ param callerClass
* the caller class
* @ param callerMethod
* the caller method
* @ param args
* extra arguments to be appended to the log message
* @ throws InterruptedException */
public static void await ( CountDownLatch latch , String callerClass , String callerMethod , Object ... args ) throws InterruptedException { } } | final String sourceMethod = "await" ; // $ NON - NLS - 1 $
final boolean isTraceLogging = log . isLoggable ( Level . FINER ) ; if ( isTraceLogging ) { log . entering ( sourceClass , sourceMethod , new Object [ ] { Thread . currentThread ( ) , latch , callerClass , callerMethod , args } ) ; } long start = System . currentTimeMillis ( ) ; boolean quiesced = false , logged = false ; while ( ! latch . await ( SignalUtil . SIGNAL_LOG_INTERVAL_SECONDS , TimeUnit . SECONDS ) ) { if ( ! quiesced ) { quiesced = logWaiting ( callerClass , callerMethod , latch , start , args ) ; logged = true ; } } if ( logged ) { logResuming ( callerClass , callerMethod , latch , start ) ; } if ( isTraceLogging ) { log . exiting ( sourceClass , sourceMethod , Arrays . asList ( Thread . currentThread ( ) , latch , callerClass , callerMethod ) ) ; } |
public class ClassUtil { /** * returns the calling class .
* offset 0 returns class who is calling this method
* offset 1 returns class who called your method
* and so on */
public static Class getClassingClass ( int offset ) { } } | Class [ ] context = ClassContext . INSTANCE . getClassContext ( ) ; offset += 2 ; return context . length > offset ? context [ offset ] : null ; |
public class FileEventStore { /** * { @ inheritDoc } */
@ Override public void setAttempts ( String projectId , String eventCollection , String attemptsString ) throws IOException { } } | // Prepare the collection cache directory .
File collectionCacheDir = prepareCollectionDir ( projectId , eventCollection ) ; // Create the cache file .
File cacheFile = new File ( collectionCacheDir , ATTEMPTS_JSON_FILE_NAME ) ; // Write the event to the cache file .
OutputStream out = new FileOutputStream ( cacheFile ) ; Writer writer = null ; try { writer = new OutputStreamWriter ( out , ENCODING ) ; writer . write ( attemptsString ) ; } finally { KeenUtils . closeQuietly ( writer ) ; } |
public class StashReader { /** * Gets the metadata for all tables in this stash . This is a heavier operation that just { @ link # listTables ( ) }
* since it also returns full file details for the entire Stash instead of just table names . */
public Iterator < StashTableMetadata > listTableMetadata ( ) { } } | final String root = getRootPath ( ) ; final String prefix = String . format ( "%s/" , root ) ; final int prefixLength = prefix . length ( ) ; return new AbstractIterator < StashTableMetadata > ( ) { PeekingIterator < S3ObjectSummary > _listResponse = Iterators . peekingIterator ( Iterators . < S3ObjectSummary > emptyIterator ( ) ) ; String _marker = null ; boolean _truncated = true ; @ Override protected StashTableMetadata computeNext ( ) { String tableDir = null ; List < StashFileMetadata > files = Lists . newArrayListWithCapacity ( 16 ) ; boolean allFilesRead = false ; while ( ! allFilesRead ) { if ( _listResponse . hasNext ( ) ) { // Peek at the next record but don ' t consume it until we verify it ' s part of the same table
S3ObjectSummary s3File = _listResponse . peek ( ) ; String key = s3File . getKey ( ) ; // Don ' t include the _ SUCCESS file or any other stray files we may find
String [ ] parentDirAndFile = key . substring ( prefixLength ) . split ( "/" ) ; if ( parentDirAndFile . length != 2 ) { // Consume and skip this row
_listResponse . next ( ) ; } else { String parentDir = parentDirAndFile [ 0 ] ; if ( tableDir == null ) { tableDir = parentDir ; } if ( ! parentDir . equals ( tableDir ) ) { allFilesRead = true ; } else { // Record is part of this table ; consume it now
_listResponse . next ( ) ; files . add ( new StashFileMetadata ( _bucket , key , s3File . getSize ( ) ) ) ; } } } else if ( _truncated ) { ObjectListing response = _s3 . listObjects ( new ListObjectsRequest ( ) . withBucketName ( _bucket ) . withPrefix ( prefix ) . withMarker ( _marker ) . withMaxKeys ( 1000 ) ) ; _listResponse = Iterators . peekingIterator ( response . getObjectSummaries ( ) . iterator ( ) ) ; _marker = response . getNextMarker ( ) ; _truncated = response . isTruncated ( ) ; } else { allFilesRead = true ; } } if ( tableDir == null ) { // No files read this iteration means all files have been read
return endOfData ( ) ; } String tablePrefix = prefix + tableDir + "/" ; String tableName = StashUtil . decodeStashTable ( tableDir ) ; return new StashTableMetadata ( _bucket , tablePrefix , tableName , files ) ; } } ; |
public class SF { /** * dieses < SEG > anzuwenden */
private String [ ] extractSegId ( StringBuffer sb ) { } } | String [ ] ret = new String [ ] { "" , "" } ; if ( sb . length ( ) > 1 ) { int startpos = 0 ; char ch = sb . charAt ( 0 ) ; if ( ch == '+' || ch == ':' || ch == '\'' ) startpos ++ ; // erste DEG extrahieren
int endpos = sb . indexOf ( "+" , startpos ) ; if ( endpos == - 1 ) { endpos = sb . length ( ) ; } // code und version aus der ersten DEG extrahieren
String [ ] des = sb . substring ( startpos , endpos ) . split ( ":" ) ; ret [ 0 ] = des [ 0 ] ; // segcode
ret [ 1 ] = des [ 2 ] ; // segversion
} return ret ; |
public class AbstractConnectProtocol { /** * Force closes socket and stream readers / writers . */
public void abort ( ) { } } | this . explicitClosed = true ; boolean lockStatus = false ; if ( lock != null ) { lockStatus = lock . tryLock ( ) ; } this . connected = false ; abortActiveStream ( ) ; if ( ! lockStatus ) { // lock not available : query is running
// force end by executing an KILL connection
forceAbort ( ) ; try { socket . setSoLinger ( true , 0 ) ; } catch ( IOException ioException ) { // eat
} } else { SendClosePacket . send ( writer ) ; } closeSocket ( reader , writer , socket ) ; cleanMemory ( ) ; if ( lockStatus ) { lock . unlock ( ) ; } |
public class TemplateTokenizer { /** * Returns true if there are more literals or tokens / delimiters . */
public boolean hasNext ( ) { } } | if ( _matcher == null ) { return false ; } if ( _literal != null || _token != null ) { return true ; } if ( _matcher . find ( ) ) { _literal = _template . subSequence ( _endPrevios , _matcher . start ( ) ) . toString ( ) ; _token = _matcher . group ( ) ; _endPrevios = _matcher . end ( ) ; } else if ( _endPrevios < _template . length ( ) ) { // We ' re at the end
_literal = _template . subSequence ( _endPrevios , _template . length ( ) ) . toString ( ) ; _endPrevios = _template . length ( ) ; // Remove the matcher so it doesn ' t reset itself
_matcher = null ; } return _literal != null || _token != null ; |
public class MainFieldHandler { /** * Move the physical binary data to this field .
* The code here is kinda complicated . This is the unique key field .
* If this is a new record and there haven ' t been any changes yet ,
* I do a seek on this index .
* @ param objData the raw data to set the basefield to .
* @ param bDisplayOption If true , display the change .
* @ param iMoveMode The type of move being done ( init / read / screen ) .
* @ return The error code ( or NORMAL _ RETURN if okay ) . */
public int doSetData ( Object data , boolean bDisplayOption , int iMoveMode ) { } } | if ( ! m_bReadOnly ) { Object bookmark = null ; BaseBuffer buffer = null ; // In case a temp buffer is needed
boolean bSuccess = false ; Object oldBuff = this . getOwner ( ) . getData ( ) ; // Get a copy of the old key field
Record record = this . getOwner ( ) . getRecord ( ) ; if ( record . getEditMode ( ) != Constants . EDIT_ADD ) { // Current record ( but no changes ) - save the bookmark only for possible re - read .
try { bookmark = record . getHandle ( DBConstants . DATA_SOURCE_HANDLE ) ; } catch ( DBException e ) { bookmark = null ; } } int result = super . doSetData ( data , bDisplayOption , iMoveMode ) ; // Do the rest of the behaviors
if ( result != DBConstants . NORMAL_RETURN ) return result ; // If error
if ( ( ! this . getOwner ( ) . isJustModified ( ) ) || ( data == null ) ) return result ; // If no change or set to null
String iOldKeySeq = record . getKeyArea ( - 1 ) . getKeyName ( ) ; record . setKeyArea ( keyName ) ; if ( ( ! record . isModified ( true ) ) && ( ( record . getEditMode ( ) == Constants . EDIT_ADD ) || ( record . getEditMode ( ) == Constants . EDIT_NONE ) ) ) // Modified or valid record
{ // This is a new record and this is the first mod
try { buffer = new VectorBuffer ( null ) ; record . getKeyArea ( keyName ) . setupKeyBuffer ( buffer , DBConstants . FILE_KEY_AREA ) ; // Save the keys
bSuccess = this . seek ( record ) ; // Read this record ( display if found )
record . setKeyArea ( iOldKeySeq ) ; // Set the key order back
if ( ! bSuccess ) { // Not found , restore the data and the new key
record . addNew ( ) ; // This may wipe out the keys
if ( buffer != null ) buffer . resetPosition ( ) ; // Just to be careful
record . getKeyArea ( keyName ) . reverseKeyBuffer ( buffer , DBConstants . FILE_KEY_AREA ) ; // Restore the keys
result = DBConstants . NORMAL_RETURN ; // Everything went okay
} else { // Record found - good
if ( record . getRecordOwner ( ) . isBatch ( ) ) { // Special case - Can ' t display this record
return DBConstants . DUPLICATE_KEY ; } } } catch ( DBException e ) { return e . getErrorCode ( ) ; // Never
} } else if ( record . getKeyArea ( keyName ) . getUniqueKeyCode ( ) == DBConstants . UNIQUE ) { // Data already entered , see if this entry makes a duplicate key !
buffer = new VectorBuffer ( null ) ; try { buffer . fieldsToBuffer ( record , BaseBuffer . ALL_FIELDS ) ; // Save the entire record
bSuccess = this . seek ( record ) ; // See if this key already exists
if ( bookmark != null ) { if ( record . setHandle ( bookmark , DBConstants . DATA_SOURCE_HANDLE ) != null ) // Set the pointer back to the old key
record . edit ( ) ; else record . addNew ( ) ; // Never
} else record . addNew ( ) ; // This is a new record , and they entered non - key data already
record . setKeyArea ( iOldKeySeq ) ; // Set the key order back
buffer . bufferToFields ( record , DBConstants . DONT_DISPLAY , DBConstants . READ_MOVE ) ; // Restore the data
for ( int iFieldSeq = DBConstants . MAIN_FIELD ; iFieldSeq < record . getFieldCount ( ) + DBConstants . MAIN_FIELD ; iFieldSeq ++ ) { // Redisplay all the fields
record . getField ( iFieldSeq ) . displayField ( ) ; } } catch ( DBException e ) { return e . getErrorCode ( ) ; // Never
} if ( bSuccess ) { this . getOwner ( ) . setData ( oldBuff , bDisplayOption , iMoveMode ) ; // Restore a copy of the old key field
result = DBConstants . DUPLICATE_KEY ; // Can ' t enter this key , you ' ll get a dup key !
} else result = DBConstants . NORMAL_RETURN ; // Good , this is a valid key
} return result ; } else { // Read only
Record record = this . getOwner ( ) . getRecord ( ) ; int result = super . doSetData ( data , bDisplayOption , iMoveMode ) ; // Do the rest of the behaviors
if ( result != DBConstants . NORMAL_RETURN ) return result ; // If error or no change
if ( ! this . getOwner ( ) . isJustModified ( ) ) return result ; // If error or no change
String strOldKeySeq = Constants . BLANK ; strOldKeySeq = record . getKeyArea ( - 1 ) . getKeyName ( ) ; record . setKeyArea ( keyName ) ; if ( ! record . isModified ( true ) ) // Modified or valid ( Need valid for secondary lookups )
{ try { if ( ( data == null ) || ( data . equals ( Constants . BLANK ) ) ) { record . addNew ( ) ; // Clear the fields !
} else { // This is a new record and this is the first mod
boolean bSuccess = this . seek ( record ) ; // Read this record ( display if found )
if ( ! bSuccess ) record . initRecord ( bDisplayOption ) ; // Clear the fields !
} } catch ( DBException e ) { return e . getErrorCode ( ) ; // Never
} finally { record . setKeyArea ( strOldKeySeq ) ; // Set the key order back
} } return result ; } |
public class UriEscape { /** * Perform am URI fragment identifier < strong > unescape < / strong > operation
* on a < tt > Reader < / tt > input using < tt > UTF - 8 < / tt > as encoding , writing results to a < tt > Writer < / tt > .
* This method will unescape every percent - encoded ( < tt > % HH < / tt > ) sequences present in input ,
* even for those characters that do not need to be percent - encoded in this context ( unreserved characters
* can be percent - encoded even if / when this is not required , though it is not generally considered a
* good practice ) .
* This method will use < tt > UTF - 8 < / tt > in order to determine the characters specified in the
* percent - encoded byte sequences .
* This method is < strong > thread - safe < / strong > .
* @ param reader the < tt > Reader < / tt > reading the text to be unescaped .
* @ param writer the < tt > java . io . Writer < / tt > to which the unescaped result will be written . Nothing will
* be written at all to this writer if input is < tt > null < / tt > .
* @ throws IOException if an input / output exception occurs
* @ since 1.1.2 */
public static void unescapeUriFragmentId ( final Reader reader , final Writer writer ) throws IOException { } } | unescapeUriFragmentId ( reader , writer , DEFAULT_ENCODING ) ; |
public class HTODDynacache { /** * readDependencyByRange ( )
* This method is used by CacheMonitor to retrive the dependency ids from the disk .
* If index = 0 , it starts the beginning . If index = 1 , it means " next " . If Index = - 1 , it means " previous " .
* The length of the max number of templates to be read . If length = - 1 , it reads all templates until the end . */
public Result readDependencyByRange ( int index , int length ) { } } | Result result = getFromResultPool ( ) ; if ( ! this . disableDependencyId ) { Result other = readByRange ( DEP_ID_DATA , index , length , ! CHECK_EXPIRED , ! FILTER ) ; result . copy ( other ) ; returnToResultPool ( other ) ; } return result ; |
public class FileExecutor { /** * 新建文件夹 , 如果文件夹存在则不创建
* @ param director 文件夹
* @ return 文件夹是否创建成功 ( 如果文件夹存在同样返回true ) */
public static boolean createFolder ( File director ) { } } | return director . exists ( ) || ( createFolder ( director . getParent ( ) ) && director . mkdir ( ) ) ; |
public class ModelWrapper { /** * Appends the currentContext to the given model id . */
protected String appendContextId ( Object modelId ) { } } | return String . format ( "%s/%s" , ContextHolder . get ( ) . getCurrentContextId ( ) , modelId ) ; |
public class CmsXmlPage { /** * Returns the link table of an element . < p >
* @ param name name of the element
* @ param locale locale of the element
* @ return the link table */
public CmsLinkTable getLinkTable ( String name , Locale locale ) { } } | CmsXmlHtmlValue value = ( CmsXmlHtmlValue ) getValue ( name , locale ) ; if ( value != null ) { return value . getLinkTable ( ) ; } return new CmsLinkTable ( ) ; |
public class CouchDBUtils { /** * Gets the design document .
* @ param httpClient
* the http client
* @ param httpHost
* the http host
* @ param gson
* the gson
* @ param tableName
* the table name
* @ param schemaName
* the schema name
* @ return the design document */
private static CouchDBDesignDocument getDesignDocument ( HttpClient httpClient , HttpHost httpHost , Gson gson , String tableName , String schemaName ) { } } | HttpResponse response = null ; try { String id = CouchDBConstants . DESIGN + tableName ; URI uri = new URI ( CouchDBConstants . PROTOCOL , null , httpHost . getHostName ( ) , httpHost . getPort ( ) , CouchDBConstants . URL_SEPARATOR + schemaName . toLowerCase ( ) + CouchDBConstants . URL_SEPARATOR + id , null , null ) ; HttpGet get = new HttpGet ( uri ) ; get . addHeader ( "Accept" , "application/json" ) ; response = httpClient . execute ( httpHost , get , CouchDBUtils . getContext ( httpHost ) ) ; InputStream content = response . getEntity ( ) . getContent ( ) ; Reader reader = new InputStreamReader ( content ) ; JsonObject jsonObject = gson . fromJson ( reader , JsonObject . class ) ; return gson . fromJson ( jsonObject , CouchDBDesignDocument . class ) ; } catch ( Exception e ) { log . error ( "Error while fetching design document object, Caused by: ." , e ) ; throw new KunderaException ( e ) ; } finally { CouchDBUtils . closeContent ( response ) ; } |
public class ExecutorHealthChecker { /** * Increments executor failure count . If it reaches max failure count , sends alert emails to AZ
* admin .
* @ param entry executor to list of flows map entry
* @ param executor the executor
* @ param e Exception thrown when the executor is not alive */
private void handleExecutorNotAliveCase ( final Entry < Optional < Executor > , List < ExecutableFlow > > entry , final Executor executor , final ExecutorManagerException e ) { } } | logger . error ( "Failed to get update from executor " + executor . getId ( ) , e ) ; this . executorFailureCount . put ( executor . getId ( ) , this . executorFailureCount . getOrDefault ( executor . getId ( ) , 0 ) + 1 ) ; if ( this . executorFailureCount . get ( executor . getId ( ) ) % this . executorMaxFailureCount == 0 && ! this . alertEmails . isEmpty ( ) ) { entry . getValue ( ) . stream ( ) . forEach ( flow -> flow . getExecutionOptions ( ) . setFailureEmails ( this . alertEmails ) ) ; logger . info ( String . format ( "Executor failure count is %d. Sending alert emails to %s." , this . executorFailureCount . get ( executor . getId ( ) ) , this . alertEmails ) ) ; this . alerterHolder . get ( "email" ) . alertOnFailedUpdate ( executor , entry . getValue ( ) , e ) ; } |
public class Preconditions { /** * Checks the truth of the given expression and throws a customized
* { @ link IllegalArgumentException } if it is false . Intended for doing parameter validation in
* methods and constructors , e . g . :
* < blockquote > < pre >
* public void foo ( int count ) {
* Preconditions . checkArgument ( count > 0 , " count must be positive : % s . " , count ) ;
* < / pre > < / blockquote >
* @ param expression the precondition to check involving one ore more parameters to the calling
* method or constructor
* @ param messageFormat a { @ link Formatter format } string for the detail message to be used in
* the event that an exception is thrown .
* @ param messageArgs the arguments referenced by the format specifiers in the
* { @ code messageFormat }
* @ throws IllegalArgumentException if { @ code expression } is false */
public static void checkArgument ( boolean expression , String messageFormat , Object ... messageArgs ) { } } | if ( ! expression ) { throw new IllegalArgumentException ( format ( messageFormat , messageArgs ) ) ; } |
public class ScreamTrackerMixer { /** * Convenient Method for the panning slide Effekt
* @ param aktMemo */
protected void doPanningSlideEffekt ( ChannelMemory aktMemo ) { } } | aktMemo . doSurround = false ; aktMemo . panning += aktMemo . panningSlideValue ; |
public class PrimaveraPMFileReader { /** * Process tasks .
* @ param project xml container */
private void processTasks ( ProjectType project ) { } } | List < WBSType > wbs = project . getWBS ( ) ; List < ActivityType > tasks = project . getActivity ( ) ; Set < Integer > uniqueIDs = new HashSet < Integer > ( ) ; Set < Task > wbsTasks = new HashSet < Task > ( ) ; // Read WBS entries and create tasks
Collections . sort ( wbs , WBS_ROW_COMPARATOR ) ; for ( WBSType row : wbs ) { Task task = m_projectFile . addTask ( ) ; Integer uniqueID = row . getObjectId ( ) ; uniqueIDs . add ( uniqueID ) ; wbsTasks . add ( task ) ; task . setUniqueID ( uniqueID ) ; task . setGUID ( DatatypeConverter . parseUUID ( row . getGUID ( ) ) ) ; task . setName ( row . getName ( ) ) ; task . setBaselineCost ( row . getSummaryBaselineTotalCost ( ) ) ; task . setRemainingCost ( row . getSummaryRemainingTotalCost ( ) ) ; task . setRemainingDuration ( getDuration ( row . getSummaryRemainingDuration ( ) ) ) ; task . setSummary ( true ) ; task . setStart ( row . getAnticipatedStartDate ( ) ) ; task . setFinish ( row . getAnticipatedFinishDate ( ) ) ; task . setWBS ( row . getCode ( ) ) ; } // Create hierarchical structure
m_projectFile . getChildTasks ( ) . clear ( ) ; for ( WBSType row : wbs ) { Task task = m_projectFile . getTaskByUniqueID ( row . getObjectId ( ) ) ; Task parentTask = m_projectFile . getTaskByUniqueID ( row . getParentObjectId ( ) ) ; if ( parentTask == null ) { m_projectFile . getChildTasks ( ) . add ( task ) ; } else { m_projectFile . getChildTasks ( ) . remove ( task ) ; parentTask . getChildTasks ( ) . add ( task ) ; task . setWBS ( parentTask . getWBS ( ) + "." + task . getWBS ( ) ) ; task . setText ( 1 , task . getWBS ( ) ) ; } } // Read Task entries and create tasks
int nextID = 1 ; m_clashMap . clear ( ) ; for ( ActivityType row : tasks ) { Integer uniqueID = row . getObjectId ( ) ; if ( uniqueIDs . contains ( uniqueID ) ) { while ( uniqueIDs . contains ( Integer . valueOf ( nextID ) ) ) { ++ nextID ; } Integer newUniqueID = Integer . valueOf ( nextID ) ; m_clashMap . put ( uniqueID , newUniqueID ) ; uniqueID = newUniqueID ; } uniqueIDs . add ( uniqueID ) ; Task task ; Integer parentTaskID = row . getWBSObjectId ( ) ; Task parentTask = m_projectFile . getTaskByUniqueID ( parentTaskID ) ; if ( parentTask == null ) { task = m_projectFile . addTask ( ) ; } else { task = parentTask . addTask ( ) ; } task . setUniqueID ( uniqueID ) ; task . setGUID ( DatatypeConverter . parseUUID ( row . getGUID ( ) ) ) ; task . setName ( row . getName ( ) ) ; task . setPercentageComplete ( reversePercentage ( row . getPercentComplete ( ) ) ) ; task . setRemainingDuration ( getDuration ( row . getRemainingDuration ( ) ) ) ; task . setActualWork ( getDuration ( zeroIsNull ( row . getActualDuration ( ) ) ) ) ; task . setRemainingWork ( getDuration ( row . getRemainingTotalUnits ( ) ) ) ; task . setBaselineDuration ( getDuration ( row . getPlannedDuration ( ) ) ) ; task . setActualDuration ( getDuration ( row . getActualDuration ( ) ) ) ; task . setDuration ( getDuration ( row . getAtCompletionDuration ( ) ) ) ; // ActualCost and RemainingCost will be set when we resolve the resource assignments
task . setActualCost ( NumberHelper . DOUBLE_ZERO ) ; task . setRemainingCost ( NumberHelper . DOUBLE_ZERO ) ; task . setBaselineCost ( NumberHelper . DOUBLE_ZERO ) ; task . setConstraintDate ( row . getPrimaryConstraintDate ( ) ) ; task . setConstraintType ( CONSTRAINT_TYPE_MAP . get ( row . getPrimaryConstraintType ( ) ) ) ; task . setActualStart ( row . getActualStartDate ( ) ) ; task . setActualFinish ( row . getActualFinishDate ( ) ) ; task . setLateStart ( row . getRemainingLateStartDate ( ) ) ; task . setLateFinish ( row . getRemainingLateFinishDate ( ) ) ; task . setEarlyStart ( row . getRemainingEarlyStartDate ( ) ) ; task . setEarlyFinish ( row . getRemainingEarlyFinishDate ( ) ) ; task . setBaselineStart ( row . getPlannedStartDate ( ) ) ; task . setBaselineFinish ( row . getPlannedFinishDate ( ) ) ; task . setPriority ( PRIORITY_MAP . get ( row . getLevelingPriority ( ) ) ) ; task . setCreateDate ( row . getCreateDate ( ) ) ; task . setText ( 1 , row . getId ( ) ) ; task . setText ( 2 , row . getType ( ) ) ; task . setText ( 3 , row . getStatus ( ) ) ; task . setNumber ( 1 , row . getPrimaryResourceObjectId ( ) ) ; task . setMilestone ( BooleanHelper . getBoolean ( MILESTONE_MAP . get ( row . getType ( ) ) ) ) ; task . setCritical ( task . getEarlyStart ( ) != null && task . getLateStart ( ) != null && ! ( task . getLateStart ( ) . compareTo ( task . getEarlyStart ( ) ) > 0 ) ) ; if ( parentTask != null ) { task . setWBS ( parentTask . getWBS ( ) ) ; } Integer calId = row . getCalendarObjectId ( ) ; ProjectCalendar cal = m_calMap . get ( calId ) ; task . setCalendar ( cal ) ; task . setStart ( row . getStartDate ( ) ) ; task . setFinish ( row . getFinishDate ( ) ) ; populateField ( task , TaskField . START , TaskField . START , TaskField . ACTUAL_START , TaskField . BASELINE_START ) ; populateField ( task , TaskField . FINISH , TaskField . FINISH , TaskField . ACTUAL_FINISH ) ; populateField ( task , TaskField . WORK , TaskField . ACTUAL_WORK , TaskField . BASELINE_WORK ) ; // We ' ve tried the finish and actual finish fields . . . but we still have null .
// P6 itself doesn ' t export PMXML like this .
// The sample I have that requires this code appears to have been been generated by Synchro .
if ( task . getFinish ( ) == null ) { // Find the remaining duration , set it to null if it is zero
Duration duration = task . getRemainingDuration ( ) ; if ( duration != null && duration . getDuration ( ) == 0 ) { duration = null ; } // If the task hasn ' t started , or we don ' t have a usable duration
// let ' s just use the baseline finish .
if ( task . getActualStart ( ) == null || duration == null ) { task . setFinish ( task . getBaselineFinish ( ) ) ; } else { // The task has started , let ' s calculate the finish date using the remaining duration
// and the " restart " date , which we ' ve put in the baseline start date .
ProjectCalendar calendar = task . getEffectiveCalendar ( ) ; Date finish = calendar . getDate ( task . getBaselineStart ( ) , duration , false ) ; // Deal with an oddity where the finish date shows up as the
// start of work date for the next working day . If we can identify this ,
// wind the date back to the end of the previous working day .
Date nextWorkStart = calendar . getNextWorkStart ( finish ) ; if ( DateHelper . compare ( finish , nextWorkStart ) == 0 ) { finish = calendar . getPreviousWorkFinish ( finish ) ; } task . setFinish ( finish ) ; } } readUDFTypes ( task , row . getUDF ( ) ) ; readActivityCodes ( task , row . getCode ( ) ) ; m_eventManager . fireTaskReadEvent ( task ) ; } new ActivitySorter ( TaskField . TEXT1 , wbsTasks ) . sort ( m_projectFile ) ; updateStructure ( ) ; updateDates ( ) ; |
public class JacksonHelper { /** * Is this a { @ literal Resources < Resource < ? > > } ?
* @ param type
* @ return */
public static boolean isResourcesOfResource ( JavaType type ) { } } | return CollectionModel . class . isAssignableFrom ( type . getRawClass ( ) ) && EntityModel . class . isAssignableFrom ( type . containedType ( 0 ) . getRawClass ( ) ) ; |
public class CreateDeploymentGroupRequest { /** * The on - premises instance tags on which to filter . The deployment group includes on - premises instances with any of
* the specified tags . Cannot be used in the same call as OnPremisesTagSet .
* < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use
* { @ link # setOnPremisesInstanceTagFilters ( java . util . Collection ) } or
* { @ link # withOnPremisesInstanceTagFilters ( java . util . Collection ) } if you want to override the existing values .
* @ param onPremisesInstanceTagFilters
* The on - premises instance tags on which to filter . The deployment group includes on - premises instances with
* any of the specified tags . Cannot be used in the same call as OnPremisesTagSet .
* @ return Returns a reference to this object so that method calls can be chained together . */
public CreateDeploymentGroupRequest withOnPremisesInstanceTagFilters ( TagFilter ... onPremisesInstanceTagFilters ) { } } | if ( this . onPremisesInstanceTagFilters == null ) { setOnPremisesInstanceTagFilters ( new com . amazonaws . internal . SdkInternalList < TagFilter > ( onPremisesInstanceTagFilters . length ) ) ; } for ( TagFilter ele : onPremisesInstanceTagFilters ) { this . onPremisesInstanceTagFilters . add ( ele ) ; } return this ; |
public class SQLite { /** * Get an { @ code IN } operator for the column and values . The values will be escaped if
* necessary .
* @ return column IN ( values [ 0 ] , . . . , values [ n ] )
* @ since 2.6.0 */
public static StringBuilder in ( String column , String [ ] values ) { } } | return in ( column , values , new StringBuilder ( 64 ) ) ; |
public class AbstractView { /** * Translates screen x - coordinate to cartesian coordinate .
* @ param x
* @ return */
public double fromScreenX ( double x ) { } } | Point2D src = srcPnt . get ( ) ; Point2D dst = dstPnt . get ( ) ; src . setLocation ( x , 0 ) ; inverse . transform ( src , dst ) ; return dst . getX ( ) ; |
public class RealmIdentityManager { /** * This verify method is used to verify both BASIC authentication and DIGEST authentication requests . */
@ Override public Account verify ( String id , Credential credential ) { } } | if ( id == null || id . length ( ) == 0 ) { HttpServerLogger . ROOT_LOGGER . debug ( "Missing or empty username received, aborting account verification." ) ; return null ; } if ( credential instanceof PasswordCredential ) { return verify ( id , ( PasswordCredential ) credential ) ; } else if ( credential instanceof DigestCredential ) { return verify ( id , ( DigestCredential ) credential ) ; } throw HttpServerLogger . ROOT_LOGGER . invalidCredentialType ( credential . getClass ( ) . getName ( ) ) ; |
public class LongIntSortedVector { /** * TODO : This could be done with a single binary search instead of two . */
public void add ( long idx , int val ) { } } | int curVal = getWithDefault ( idx , ZERO ) ; put ( idx , curVal + val ) ; |
public class TagsInterface { /** * Get a list of the user ' s ( identified by token ) popular tags .
* This method does not require authentication .
* @ param tagVal
* a tag to search for ( optional )
* @ return The collection of Tag objects
* @ throws FlickrException */
public Collection < TagRaw > getListUserRaw ( String tagVal ) throws FlickrException { } } | Map < String , Object > parameters = new HashMap < String , Object > ( ) ; parameters . put ( "method" , METHOD_GET_LIST_USER_RAW ) ; if ( tagVal != null ) { parameters . put ( "tag" , tagVal ) ; } Response response = transportAPI . get ( transportAPI . getPath ( ) , parameters , apiKey , sharedSecret ) ; if ( response . isError ( ) ) { throw new FlickrException ( response . getErrorCode ( ) , response . getErrorMessage ( ) ) ; } Element whoElement = response . getPayload ( ) ; List < TagRaw > tags = new ArrayList < TagRaw > ( ) ; Element tagsElement = ( Element ) whoElement . getElementsByTagName ( "tags" ) . item ( 0 ) ; NodeList tagElements = tagsElement . getElementsByTagName ( "tag" ) ; for ( int i = 0 ; i < tagElements . getLength ( ) ; i ++ ) { Element tagElement = ( Element ) tagElements . item ( i ) ; TagRaw tag = new TagRaw ( ) ; tag . setClean ( tagElement . getAttribute ( "clean" ) ) ; NodeList rawElements = tagElement . getElementsByTagName ( "raw" ) ; for ( int j = 0 ; j < rawElements . getLength ( ) ; j ++ ) { Element rawElement = ( Element ) rawElements . item ( j ) ; tag . addRaw ( ( ( Text ) rawElement . getFirstChild ( ) ) . getData ( ) ) ; } tags . add ( tag ) ; } return tags ; |
public class ExpressRouteGatewaysInner { /** * Creates or updates a ExpressRoute gateway in a specified resource group .
* @ param resourceGroupName The name of the resource group .
* @ param expressRouteGatewayName The name of the ExpressRoute gateway .
* @ param putExpressRouteGatewayParameters Parameters required in an ExpressRoute gateway PUT operation .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ throws CloudException thrown if the request is rejected by server
* @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @ return the ExpressRouteGatewayInner object if successful . */
public ExpressRouteGatewayInner beginCreateOrUpdate ( String resourceGroupName , String expressRouteGatewayName , ExpressRouteGatewayInner putExpressRouteGatewayParameters ) { } } | return beginCreateOrUpdateWithServiceResponseAsync ( resourceGroupName , expressRouteGatewayName , putExpressRouteGatewayParameters ) . toBlocking ( ) . single ( ) . body ( ) ; |
public class HUXCrossover { /** * Execute ( ) method */
public List < BinarySolution > execute ( List < BinarySolution > parents ) { } } | if ( parents . size ( ) != 2 ) { throw new JMetalException ( "HUXCrossover.execute: operator needs two parents" ) ; } return doCrossover ( crossoverProbability , parents . get ( 0 ) , parents . get ( 1 ) ) ; |
public class AbstractFetcher { /** * Utility method that takes the topic partitions and creates the topic partition state
* holders , depending on the timestamp / watermark mode . */
private List < KafkaTopicPartitionState < KPH > > createPartitionStateHolders ( Map < KafkaTopicPartition , Long > partitionsToInitialOffsets , int timestampWatermarkMode , SerializedValue < AssignerWithPeriodicWatermarks < T > > watermarksPeriodic , SerializedValue < AssignerWithPunctuatedWatermarks < T > > watermarksPunctuated , ClassLoader userCodeClassLoader ) throws IOException , ClassNotFoundException { } } | // CopyOnWrite as adding discovered partitions could happen in parallel
// while different threads iterate the partitions list
List < KafkaTopicPartitionState < KPH > > partitionStates = new CopyOnWriteArrayList < > ( ) ; switch ( timestampWatermarkMode ) { case NO_TIMESTAMPS_WATERMARKS : { for ( Map . Entry < KafkaTopicPartition , Long > partitionEntry : partitionsToInitialOffsets . entrySet ( ) ) { // create the kafka version specific partition handle
KPH kafkaHandle = createKafkaPartitionHandle ( partitionEntry . getKey ( ) ) ; KafkaTopicPartitionState < KPH > partitionState = new KafkaTopicPartitionState < > ( partitionEntry . getKey ( ) , kafkaHandle ) ; partitionState . setOffset ( partitionEntry . getValue ( ) ) ; partitionStates . add ( partitionState ) ; } return partitionStates ; } case PERIODIC_WATERMARKS : { for ( Map . Entry < KafkaTopicPartition , Long > partitionEntry : partitionsToInitialOffsets . entrySet ( ) ) { KPH kafkaHandle = createKafkaPartitionHandle ( partitionEntry . getKey ( ) ) ; AssignerWithPeriodicWatermarks < T > assignerInstance = watermarksPeriodic . deserializeValue ( userCodeClassLoader ) ; KafkaTopicPartitionStateWithPeriodicWatermarks < T , KPH > partitionState = new KafkaTopicPartitionStateWithPeriodicWatermarks < > ( partitionEntry . getKey ( ) , kafkaHandle , assignerInstance ) ; partitionState . setOffset ( partitionEntry . getValue ( ) ) ; partitionStates . add ( partitionState ) ; } return partitionStates ; } case PUNCTUATED_WATERMARKS : { for ( Map . Entry < KafkaTopicPartition , Long > partitionEntry : partitionsToInitialOffsets . entrySet ( ) ) { KPH kafkaHandle = createKafkaPartitionHandle ( partitionEntry . getKey ( ) ) ; AssignerWithPunctuatedWatermarks < T > assignerInstance = watermarksPunctuated . deserializeValue ( userCodeClassLoader ) ; KafkaTopicPartitionStateWithPunctuatedWatermarks < T , KPH > partitionState = new KafkaTopicPartitionStateWithPunctuatedWatermarks < > ( partitionEntry . getKey ( ) , kafkaHandle , assignerInstance ) ; partitionState . setOffset ( partitionEntry . getValue ( ) ) ; partitionStates . add ( partitionState ) ; } return partitionStates ; } default : // cannot happen , add this as a guard for the future
throw new RuntimeException ( ) ; } |
public class EncodedAppenderWriter { /** * ( non - Javadoc )
* @ see java . io . Writer # write ( char [ ] , int , int ) */
@ Override public void write ( char [ ] cbuf , int off , int len ) throws IOException { } } | encodedAppender . append ( encoder , null , cbuf , off , len ) ; |
public class ListVolumesResult { /** * An array of < a > VolumeInfo < / a > objects , where each object describes an iSCSI volume . If no volumes are defined for
* the gateway , then < code > VolumeInfos < / code > is an empty array " [ ] " .
* @ param volumeInfos
* An array of < a > VolumeInfo < / a > objects , where each object describes an iSCSI volume . If no volumes are
* defined for the gateway , then < code > VolumeInfos < / code > is an empty array " [ ] " . */
public void setVolumeInfos ( java . util . Collection < VolumeInfo > volumeInfos ) { } } | if ( volumeInfos == null ) { this . volumeInfos = null ; return ; } this . volumeInfos = new com . amazonaws . internal . SdkInternalList < VolumeInfo > ( volumeInfos ) ; |
public class ByteBuffer { /** * You can fill the cache in advance if you want to .
* @ param decimals */
public static void fillCache ( int decimals ) { } } | int step = 1 ; switch ( decimals ) { case 0 : step = 100 ; break ; case 1 : step = 10 ; break ; } for ( int i = 1 ; i < byteCacheSize ; i += step ) { if ( byteCache [ i ] != null ) continue ; byteCache [ i ] = convertToBytes ( i ) ; } |
public class WhiteboxImpl { /** * Filter power mock constructor .
* @ param declaredConstructors the declared constructors
* @ return the constructor [ ] */
static Constructor < ? > [ ] filterPowerMockConstructor ( Constructor < ? > [ ] declaredConstructors ) { } } | Set < Constructor < ? > > constructors = new HashSet < Constructor < ? > > ( ) ; for ( Constructor < ? > constructor : declaredConstructors ) { final Class < ? > [ ] parameterTypes = constructor . getParameterTypes ( ) ; if ( parameterTypes . length >= 1 && parameterTypes [ parameterTypes . length - 1 ] . getName ( ) . equals ( "org.powermock.core.IndicateReloadClass" ) ) { continue ; } else { constructors . add ( constructor ) ; } } return constructors . toArray ( new Constructor < ? > [ constructors . size ( ) ] ) ; |
public class XLifecycleExtension { /** * Extracts the lifecycle model identifier from a given log .
* @ param log
* Event log .
* @ return Lifecycle model identifier string . */
public String extractModel ( XLog log ) { } } | XAttribute attribute = log . getAttributes ( ) . get ( KEY_MODEL ) ; if ( attribute == null ) { return null ; } else { return ( ( XAttributeLiteral ) attribute ) . getValue ( ) ; } |
public class JSONConverter { /** * Decode a JSON document to retrieve a ServerNotificationRegistration instance .
* @ param in The stream to read JSON from
* @ return The decoded ServerNotificationRegistration instance
* @ throws ConversionException If JSON uses unexpected structure / format
* @ throws IOException If an I / O error occurs or if JSON is ill - formed .
* @ throws ClassNotFoundException If needed class can ' t be found .
* @ see # writeServerNotificationRegistration ( OutputStream , ServerNotificationRegistration ) */
public ServerNotificationRegistration readServerNotificationRegistration ( InputStream in ) throws ConversionException , IOException , ClassNotFoundException { } } | JSONObject json = parseObject ( in ) ; ServerNotificationRegistration ret = new ServerNotificationRegistration ( ) ; String name = readStringInternal ( json . get ( N_OPERATION ) ) ; ret . operation = name != null ? Operation . valueOf ( name ) : null ; ret . objectName = readObjectName ( json . get ( N_OBJECTNAME ) ) ; ret . listener = readObjectName ( json . get ( N_LISTENER ) ) ; ret . filter = readNotificationFilterInternal ( json . get ( N_FILTER ) , true ) ; ret . handback = readPOJOInternal ( json . get ( N_HANDBACK ) ) ; ret . filterID = readIntInternal ( json . get ( N_FILTERID ) ) ; ret . handbackID = readIntInternal ( json . get ( N_HANDBACKID ) ) ; return ret ; |
public class URLHelper { /** * Get the passed URL as an URI . If the URL is null or not an URI
* < code > null < / code > is returned .
* @ param aURL
* Source URL . May be < code > null < / code > .
* @ return < code > null < / code > if the passed URL is empty or invalid . */
@ Nullable public static URI getAsURI ( @ Nullable final URL aURL ) { } } | if ( aURL != null ) try { return aURL . toURI ( ) ; } catch ( final URISyntaxException ex ) { // fall - through
if ( GlobalDebug . isDebugMode ( ) ) if ( LOGGER . isWarnEnabled ( ) ) LOGGER . warn ( "Debug warn: failed to convert '" + aURL + "' to a URI!" ) ; } return null ; |
public class PutInstructionFileRequest { /** * Returns the material description for the new instruction file . */
@ Override public Map < String , String > getMaterialsDescription ( ) { } } | return matDesc == null ? encryptionMaterials . getMaterialsDescription ( ) : matDesc ; |
public class OgnlExpression { /** * { @ inheritDoc } */
public T evaluate ( Object target , Map < String , Object > variables ) { } } | try { if ( variables == null ) { return ( T ) Ognl . getValue ( getCompiledExpression ( ) , target ) ; } else { Ognl . addDefaultContext ( target , variables ) ; return ( T ) Ognl . getValue ( getCompiledExpression ( ) , variables , target ) ; } } catch ( OgnlException e ) { throw new RuntimeException ( e ) ; } |
public class ModelMojoWriter { /** * Create file that contains model details in JSON format .
* This information is pulled from the models schema . */
private void writeModelDetails ( ) throws IOException { } } | ModelSchemaV3 modelSchema = ( ModelSchemaV3 ) SchemaServer . schema ( 3 , model ) . fillFromImpl ( model ) ; startWritingTextFile ( "experimental/modelDetails.json" ) ; writeln ( modelSchema . toJsonString ( ) ) ; finishWritingTextFile ( ) ; |
public class MtasSolrResultMerge { /** * Merge responses sorted set .
* @ param originalList
* the original list
* @ param shardList
* the shard list */
private void mergeResponsesSortedSet ( SortedSet < Object > originalList , SortedSet < Object > shardList ) { } } | for ( Object item : shardList ) { originalList . add ( item ) ; } |
public class XmlReporter { /** * This method is used to compare two XML { @ code Xml } objects with each other . The differences are written to
* the { @ code Report } object . To control what is different and what is not different you can provide a arbitrator .
* BFS ( breadth - first search ) comparison is used when comparing the XML trees .
* The default arbitrator { @ code DefaultArbitrator } is very strict and will not accept any differences .
* @ param a the { @ code Xml } object to compare with
* @ param b the { @ code Xml } object to compare with
* @ param arbitrator { @ code Arbitrator } used for comparison , null will use the default arbitrator
* @ param report { @ code Report } collector for differences in the documents
* @ return @ { code Report } object */
public Report compare ( Xml a , Xml b , Arbitrator arbitrator , Report report ) { } } | // validations
if ( a == null ) { throw new DiffException ( "The Document a parameter can not be a null value" ) ; } if ( b == null ) { throw new DiffException ( "The Document b parameter can not be a null value" ) ; } // clone document
try { a = ( Xml ) a . clone ( ) ; b = ( Xml ) b . clone ( ) ; } catch ( CloneNotSupportedException e ) { throw new DiffException ( "Can't clone the document" , e ) ; } // initializations
String nameA = ( a . getName ( ) == null ) ? "A" : a . getName ( ) ; String nameB = ( b . getName ( ) == null ) ? "B" : b . getName ( ) ; if ( arbitrator == null ) { arbitrator = new DefaultArbitrator ( ) ; } if ( report == null ) { report = new DefaultReport ( nameA , nameB ) ; } // compare documents
if ( a . getRoot ( ) != null || b . getRoot ( ) != null ) { if ( a . getRoot ( ) == null ) { report . add ( new Difference ( "" , "" , String . format ( "There's only data in: %s" , b . getName ( ) ) ) ) ; } else if ( b . getRoot ( ) == null ) { report . add ( new Difference ( "" , "" , String . format ( "There's only data in: %s" , a . getName ( ) ) ) ) ; } else { List < Node > listA = new ArrayList < Node > ( ) ; listA . add ( a . getRoot ( ) ) ; List < Node > listB = new ArrayList < Node > ( ) ; listB . add ( b . getRoot ( ) ) ; dfsComparison ( report , arbitrator , listA , listB , nameA , nameB ) ; } } return report ; |
public class UtilLoggingLevel { /** * Get level with specified symbolic name .
* @ param sArg symbolic name .
* @ param defaultLevel level to return if no match .
* @ return matching level or defaultLevel if no match . */
public static Level toLevel ( final String sArg , final Level defaultLevel ) { } } | if ( sArg == null ) { return defaultLevel ; } String s = sArg . toUpperCase ( ) ; if ( s . equals ( "SEVERE" ) ) { return SEVERE ; } // if ( s . equals ( " FINE " ) ) return Level . FINE ;
if ( s . equals ( "WARNING" ) ) { return WARNING ; } if ( s . equals ( "INFO" ) ) { return INFO ; } if ( s . equals ( "CONFI" ) ) { return CONFIG ; } if ( s . equals ( "FINE" ) ) { return FINE ; } if ( s . equals ( "FINER" ) ) { return FINER ; } if ( s . equals ( "FINEST" ) ) { return FINEST ; } return defaultLevel ; |
public class DataSetBuilder { /** * Set random filler using { @ link Date } values .
* The specified column will be filled with values that
* are uniformly sampled from the interval < code > [ min , max ] < / code > .
* @ param column Column name .
* @ param min Minimum value .
* @ param max Maximum value .
* @ return The builder instance ( for chained calls ) .
* @ see # random ( String , Time , Time )
* @ see # random ( String , Timestamp , Timestamp ) */
public DataSetBuilder random ( String column , Date min , Date max ) { } } | ensureValidRange ( min , max ) ; long a = min . getTime ( ) / MILLIS_PER_DAY ; long b = max . getTime ( ) / MILLIS_PER_DAY ; return set ( column , ( ) -> new Date ( nextRandomLong ( a , b ) * MILLIS_PER_DAY ) ) ; |
public class PrcAccDocRetrieve { /** * < p > Process entity request . < / p >
* @ param pAddParam additional param , e . g . return this line ' s
* document in " nextEntity " for farther process
* @ param pRequestData Request Data
* @ param pEntity Entity to process
* @ return Entity processed for farther process or null
* @ throws Exception - an exception */
@ Override public final T process ( final Map < String , Object > pAddParam , final T pEntity , final IRequestData pRequestData ) throws Exception { } } | T entity = this . prcAccEntityRetrieve . process ( pAddParam , pEntity , pRequestData ) ; String actionAdd = pRequestData . getParameter ( "actionAdd" ) ; if ( "full" . equals ( actionAdd ) ) { pRequestData . setAttribute ( "accEntries" , this . srvAccEntry . retrieveAccEntriesFor ( pAddParam , entity ) ) ; pRequestData . setAttribute ( "classAccountingEntry" , AccountingEntry . class ) ; } return entity ; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.