signature stringlengths 43 39.1k | implementation stringlengths 0 450k |
|---|---|
public class KeyTransformationHandler { /** * Registers a { @ link org . infinispan . query . Transformer } for the supplied key class .
* @ param keyClass the key class for which the supplied transformerClass should be used
* @ param transformerClass the transformer class to use for the supplied key class */
public void registerTransformer ( Class < ? > keyClass , Class < ? extends Transformer > transformerClass ) { } } | transformerTypes . put ( keyClass , transformerClass ) ; |
public class ClassHelper { /** * Get the name of the package the passed class resides in .
* @ param sClassName
* The name class to get the information from . May be < code > null < / code >
* @ return The package name of the passed class . */
@ Nullable public static String getClassPackageName ( @ Nullable final String sClassName ) { } } | if ( sClassName == null ) return null ; final int nIndex = sClassName . lastIndexOf ( '.' ) ; return nIndex == - 1 ? "" : sClassName . substring ( 0 , nIndex ) ; |
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
@ Override public EClass getIfcLinearStiffnessMeasure ( ) { } } | if ( ifcLinearStiffnessMeasureEClass == null ) { ifcLinearStiffnessMeasureEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 826 ) ; } return ifcLinearStiffnessMeasureEClass ; |
public class MarvinColorModelConverter { /** * Converts an image in BINARY mode to RGB mode
* @ param img image
* @ return new MarvinImage instance in RGB mode */
public static MarvinImage binaryToRgb ( MarvinImage img ) { } } | MarvinImage resultImage = new MarvinImage ( img . getWidth ( ) , img . getHeight ( ) , MarvinImage . COLOR_MODEL_RGB ) ; for ( int y = 0 ; y < img . getHeight ( ) ; y ++ ) { for ( int x = 0 ; x < img . getWidth ( ) ; x ++ ) { if ( img . getBinaryColor ( x , y ) ) { resultImage . setIntColor ( x , y , 0 , 0 , 0 ) ; } else { resultImage . setIntColor ( x , y , 255 , 255 , 255 ) ; } } } return resultImage ; |
public class SourceDetailMarshaller { /** * Marshall the given parameter object . */
public void marshall ( SourceDetail sourceDetail , ProtocolMarshaller protocolMarshaller ) { } } | if ( sourceDetail == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( sourceDetail . getEventSource ( ) , EVENTSOURCE_BINDING ) ; protocolMarshaller . marshall ( sourceDetail . getMessageType ( ) , MESSAGETYPE_BINDING ) ; protocolMarshaller . marshall ( sourceDetail . getMaximumExecutionFrequency ( ) , MAXIMUMEXECUTIONFREQUENCY_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; } |
public class Interval { /** * Return the complement section of the interval .
* @ param op
* @ return The complemented interval ( s ) , or an empty list if the complement is empty . */
public List < Interval < T > > complement ( final Interval < T > op ) throws MIDDException { } } | final boolean disJoined = ( this . lowerBound . compareTo ( op . upperBound ) >= 0 ) || ( this . upperBound . compareTo ( op . lowerBound ) <= 0 ) ; if ( disJoined ) { Interval < T > newInterval = new Interval < > ( this . lowerBound , this . upperBound ) ; final boolean isLowerClosed ; if ( this . lowerBound . compareTo ( op . upperBound ) == 0 ) { isLowerClosed = this . lowerBoundClosed && ! op . upperBoundClosed ; } else { isLowerClosed = this . lowerBoundClosed ; } newInterval . closeLeft ( isLowerClosed ) ; final boolean isUpperClosed ; if ( this . upperBound . compareTo ( op . lowerBound ) == 0 ) { isUpperClosed = this . upperBoundClosed && ! op . upperBoundClosed ; } else { isUpperClosed = this . upperBoundClosed ; } newInterval . closeRight ( isUpperClosed ) ; // return empty if new interval is invalid
if ( ! newInterval . validate ( ) ) { return ImmutableList . of ( ) ; } return ImmutableList . of ( newInterval ) ; } else { final Interval < T > interval1 = new Interval < > ( this . lowerBound , op . lowerBound ) ; final Interval < T > interval2 = new Interval < > ( op . upperBound , this . upperBound ) ; interval1 . closeLeft ( ! interval1 . isLowerInfinite ( ) && this . lowerBoundClosed ) ; interval1 . closeRight ( ! interval1 . isUpperInfinite ( ) && ! op . lowerBoundClosed ) ; interval2 . closeLeft ( ! interval2 . isLowerInfinite ( ) && ! op . upperBoundClosed ) ; interval2 . closeRight ( ! interval2 . isUpperInfinite ( ) && this . upperBoundClosed ) ; final List < Interval < T > > result = new ArrayList < > ( ) ; if ( interval1 . validate ( ) ) { result . add ( interval1 ) ; } if ( interval2 . validate ( ) ) { result . add ( interval2 ) ; } return ImmutableList . copyOf ( result ) ; } |
public class ConnectionDialog { /** * This method initializes jContentPane
* @ return javax . swing . JPanel */
private JPanel getJContentPane ( ) { } } | if ( jContentPane == null ) { jContentPane = new JPanel ( ) ; jContentPane . setLayout ( new BorderLayout ( ) ) ; jContentPane . add ( getMainPanel ( ) , BorderLayout . CENTER ) ; jContentPane . add ( getButtonPanel ( ) , BorderLayout . SOUTH ) ; } return jContentPane ; |
public class RESTClientFactory { /** * Creates a REST client used to perform Voldemort operations against the
* Coordinator
* @ param storeName Name of the store to perform the operations on
* @ param resolver Custom resolver as specified by the application
* @ return */
@ Override public < K , V > StoreClient < K , V > getStoreClient ( final String storeName , final InconsistencyResolver < Versioned < V > > resolver ) { } } | // wrap it in LazyStoreClient here so any direct calls to this method
// returns a lazy client
return new LazyStoreClient < K , V > ( new Callable < StoreClient < K , V > > ( ) { @ Override public StoreClient < K , V > call ( ) throws Exception { Store < K , V , Object > clientStore = getRawStore ( storeName , resolver ) ; return new RESTClient < K , V > ( storeName , clientStore ) ; } } , true ) ; |
public class UTF8Reader { /** * Reads into a character buffer using the correct encoding .
* @ param cbuf character buffer receiving the data .
* @ param off starting offset into the buffer .
* @ param len number of characters to read .
* @ return the number of characters read or - 1 on end of file . */
@ Override public int read ( char [ ] cbuf , int off , int len ) throws IOException { } } | int i = 0 ; InputStream is = _is ; if ( is == null ) return - 1 ; for ( i = 0 ; i < len ; i ++ ) { if ( i > 0 && is . available ( ) < 1 ) return i ; int ch = read ( ) ; if ( ch < 0 ) return i == 0 ? - 1 : i ; cbuf [ off + i ] = ( char ) ch ; } return i ; |
public class UnshareDirectoryRequestMarshaller { /** * Marshall the given parameter object . */
public void marshall ( UnshareDirectoryRequest unshareDirectoryRequest , ProtocolMarshaller protocolMarshaller ) { } } | if ( unshareDirectoryRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( unshareDirectoryRequest . getDirectoryId ( ) , DIRECTORYID_BINDING ) ; protocolMarshaller . marshall ( unshareDirectoryRequest . getUnshareTarget ( ) , UNSHARETARGET_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; } |
public class ForecastServiceLocator { /** * For the given interface , get the stub implementation .
* If this service has no port for the given interface ,
* then ServiceException is thrown . */
public java . rmi . Remote getPort ( Class serviceEndpointInterface ) throws javax . xml . rpc . ServiceException { } } | try { if ( com . google . api . ads . admanager . axis . v201811 . ForecastServiceInterface . class . isAssignableFrom ( serviceEndpointInterface ) ) { com . google . api . ads . admanager . axis . v201811 . ForecastServiceSoapBindingStub _stub = new com . google . api . ads . admanager . axis . v201811 . ForecastServiceSoapBindingStub ( new java . net . URL ( ForecastServiceInterfacePort_address ) , this ) ; _stub . setPortName ( getForecastServiceInterfacePortWSDDServiceName ( ) ) ; return _stub ; } } catch ( java . lang . Throwable t ) { throw new javax . xml . rpc . ServiceException ( t ) ; } throw new javax . xml . rpc . ServiceException ( "There is no stub implementation for the interface: " + ( serviceEndpointInterface == null ? "null" : serviceEndpointInterface . getName ( ) ) ) ; |
public class AdBrokerBenchmark { /** * Prints some summary statistics about performance .
* @ throws Exception if anything unexpected happens . */
public synchronized void printResults ( ) throws Exception { } } | ClientStats stats = m_fullStatsContext . fetch ( ) . getStats ( ) ; System . out . print ( HORIZONTAL_RULE ) ; System . out . println ( " Client Workload Statistics" ) ; System . out . println ( HORIZONTAL_RULE ) ; System . out . printf ( "Average throughput: %,9d txns/sec\n" , stats . getTxnThroughput ( ) ) ; if ( m_config . latencyreport ) { System . out . printf ( "Average latency: %,9.2f ms\n" , stats . getAverageLatency ( ) ) ; System . out . printf ( "10th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .1 ) ) ; System . out . printf ( "25th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .25 ) ) ; System . out . printf ( "50th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .5 ) ) ; System . out . printf ( "75th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .75 ) ) ; System . out . printf ( "90th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .9 ) ) ; System . out . printf ( "95th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .95 ) ) ; System . out . printf ( "99th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .99 ) ) ; System . out . printf ( "99.5th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .995 ) ) ; System . out . printf ( "99.9th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .999 ) ) ; System . out . print ( "\n" + HORIZONTAL_RULE ) ; System . out . println ( " System Server Statistics" ) ; System . out . println ( HORIZONTAL_RULE ) ; System . out . printf ( "Reported Internal Avg Latency: %,9.2f ms\n" , stats . getAverageInternalLatency ( ) ) ; System . out . print ( "\n" + HORIZONTAL_RULE ) ; System . out . println ( " Latency Histogram" ) ; System . out . println ( HORIZONTAL_RULE ) ; System . out . println ( stats . latencyHistoReport ( ) ) ; } // 4 . Write stats to file if requested
m_client . writeSummaryCSV ( stats , m_config . statsfile ) ; |
public class PGDImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
public void setYpgBase ( Integer newYpgBase ) { } } | Integer oldYpgBase = ypgBase ; ypgBase = newYpgBase ; if ( eNotificationRequired ( ) ) eNotify ( new ENotificationImpl ( this , Notification . SET , AfplibPackage . PGD__YPG_BASE , oldYpgBase , ypgBase ) ) ; |
public class SnapshotManagerImpl { /** * ( non - Javadoc )
* @ see org . duracloud . snapshot . service . SnapshotManager # addContentItem (
* org . duracloud . snapshot . db . model . Snapshot , java . lang . String , java . util . Map ) */
@ Override @ Transactional public void addContentItem ( Snapshot snapshot , String contentId , Map < String , String > props ) throws SnapshotException { } } | String contentIdHash = createChecksumGenerator ( ) . generateChecksum ( contentId ) ; try { if ( this . snapshotContentItemRepo . findBySnapshotAndContentIdHash ( snapshot , contentIdHash ) != null ) { return ; } SnapshotContentItem item = new SnapshotContentItem ( ) ; item . setContentId ( contentId ) ; item . setSnapshot ( snapshot ) ; item . setContentIdHash ( contentIdHash ) ; String propString = PropertiesSerializer . serialize ( props ) ; item . setMetadata ( propString ) ; this . snapshotContentItemRepo . save ( item ) ; } catch ( Exception ex ) { throw new SnapshotException ( "failed to add content item: " + ex . getMessage ( ) , ex ) ; } |
public class ReadWriteManager { /** * Writes the content of a KAFDocument object to standard output . */
static void print ( KAFDocument kaf ) { } } | try { Writer out = new BufferedWriter ( new OutputStreamWriter ( System . out , "UTF8" ) ) ; out . write ( kafToStr ( kaf ) ) ; out . flush ( ) ; } catch ( Exception e ) { System . out . println ( e ) ; } |
public class ClasspathBuilder { /** * Get resources for specific scope .
* @ param project
* @ param scope
* @ return */
private List < Resource > getResources ( final MavenProject project , final String scope ) { } } | if ( SCOPE_COMPILE . equals ( scope ) || SCOPE_RUNTIME . equals ( scope ) ) { return project . getResources ( ) ; } else if ( SCOPE_TEST . equals ( scope ) ) { List < Resource > resources = new ArrayList < Resource > ( ) ; resources . addAll ( project . getTestResources ( ) ) ; resources . addAll ( project . getResources ( ) ) ; return resources ; } else { throw new RuntimeException ( "Not allowed scope " + scope ) ; } |
public class ConstantsSummaryBuilder { /** * Build the summary for each documented package .
* @ param node the XML element that specifies which components to document
* @ param contentTree the tree to which the summaries will be added
* @ throws DocletException if there is a problem while building the documentation */
public void buildConstantSummaries ( XMLNode node , Content contentTree ) throws DocletException { } } | printedPackageHeaders . clear ( ) ; Content summariesTree = writer . getConstantSummaries ( ) ; for ( PackageElement aPackage : configuration . packages ) { if ( hasConstantField ( aPackage ) ) { currentPackage = aPackage ; // Build the documentation for the current package .
buildChildren ( node , summariesTree ) ; first = false ; } } writer . addConstantSummaries ( contentTree , summariesTree ) ; |
public class EpollEventLoop { /** * Deregister the given epoll from this { @ link EventLoop } . */
void remove ( AbstractEpollChannel ch ) throws IOException { } } | assert inEventLoop ( ) ; if ( ch . isOpen ( ) ) { int fd = ch . socket . intValue ( ) ; if ( channels . remove ( fd ) != null ) { // Remove the epoll . This is only needed if it ' s still open as otherwise it will be automatically
// removed once the file - descriptor is closed .
Native . epollCtlDel ( epollFd . intValue ( ) , ch . fd ( ) . intValue ( ) ) ; } } |
public class LuceneBooleanQueryExternalizer { /** * BooleanQuery has a static ( but reconfigurable ) limit for the number of clauses .
* If any node was able to bypass this limit , we ' ll need to assume that this limit
* was somehow relaxed and some point in time , so we need to apply the same configuration
* to this node .
* @ param numberOfClauses The number of clauses being deserialized */
private static void assureNumberOfClausesLimit ( int numberOfClauses ) { } } | if ( numberOfClauses > BooleanQuery . getMaxClauseCount ( ) ) { log . overridingBooleanQueryMaxClauseCount ( numberOfClauses ) ; BooleanQuery . setMaxClauseCount ( numberOfClauses ) ; } |
public class SerializerFactory { /** * Reads the object as a map . */
public Deserializer getObjectDeserializer ( String type ) throws HessianProtocolException { } } | Deserializer deserializer = getDeserializer ( type ) ; if ( deserializer != null ) return deserializer ; else if ( _hashMapDeserializer != null ) return _hashMapDeserializer ; else { _hashMapDeserializer = new MapDeserializer ( HashMap . class ) ; return _hashMapDeserializer ; } |
public class DateParser { /** * Process between syntax . Just leave the beginning date .
* @ param input the source string
* @ return the stripped result */
private String handleBetween ( final String input ) { } } | final String outString = input . replace ( "BETWEEN " , "" ) . replace ( "BET " , "" ) . replace ( "FROM " , "" ) ; return truncateAt ( truncateAt ( outString , " AND " ) , " TO " ) . trim ( ) ; |
public class JdbcKAMLoaderImpl { /** * Saves an entry to the object table .
* @ param tid { @ code int } , the object type id
* @ param v { @ link String } , the non - null object value
* @ return { @ code int } , the object primary key
* @ throws SQLException - Thrown if a sql error occurred saving an entry to
* the object table */
protected int saveObject ( int tid , String v ) throws SQLException { } } | final String objectsIdColumn = ( dbConnection . isPostgresql ( ) ? OBJECTS_ID_COLUMN_POSTGRESQL : OBJECTS_ID_COLUMN ) ; PreparedStatement ps = getPreparedStatement ( OBJECTS_SQL , new String [ ] { objectsIdColumn } ) ; ResultSet rs = null ; if ( v == null ) { throw new InvalidArgument ( "object value cannot be null" ) ; } try { v = new String ( v . getBytes ( ) , "UTF-8" ) ; } catch ( UnsupportedEncodingException e ) { throw new RuntimeException ( "utf-8 unsupported" , e ) ; } try { // Insert into objects _ text if we are over MAX _ VARCHAR _ LENGTH
Integer objectsTextId = null ; if ( v . length ( ) > MAX_VARCHAR_LENGTH ) { final String objectsTextColumn = ( dbConnection . isPostgresql ( ) ? OBJECTS_TEXT_COLUMN_POSTGRESQL : OBJECTS_TEXT_COLUMN ) ; PreparedStatement otps = getPreparedStatement ( OBJECTS_TEXT_SQL , new String [ ] { objectsTextColumn } ) ; ResultSet otrs = null ; StringReader sr = null ; try { sr = new StringReader ( v ) ; otps . setClob ( 1 , sr , v . length ( ) ) ; otps . execute ( ) ; otrs = otps . getGeneratedKeys ( ) ; if ( otrs . next ( ) ) { objectsTextId = otrs . getInt ( 1 ) ; } } finally { close ( otrs ) ; if ( sr != null ) { sr . close ( ) ; } } } // FIXME Hardcoding objects _ type to 1?
ps . setInt ( 1 , 1 ) ; if ( objectsTextId == null ) { // insert value into objects table
ps . setString ( 2 , v ) ; ps . setNull ( 3 , Types . INTEGER ) ; } else { ps . setNull ( 2 , Types . VARCHAR ) ; ps . setInt ( 3 , objectsTextId ) ; } ps . execute ( ) ; rs = ps . getGeneratedKeys ( ) ; int oid ; if ( rs . next ( ) ) { oid = rs . getInt ( 1 ) ; } else { throw new IllegalStateException ( "object insert failed." ) ; } return oid ; } finally { close ( rs ) ; } |
public class JwaServerWebSocket { /** * { @ link Session } is available . */
@ Override public < T > T unwrap ( Class < T > clazz ) { } } | return Session . class . isAssignableFrom ( clazz ) ? clazz . cast ( session ) : null ; |
public class ZooKeeperUtils { /** * Creates a { @ link ZooKeeperLeaderRetrievalService } instance .
* @ param client The { @ link CuratorFramework } ZooKeeper client to use
* @ param configuration { @ link Configuration } object containing the configuration values
* @ param pathSuffix The path suffix which we want to append
* @ return { @ link ZooKeeperLeaderRetrievalService } instance .
* @ throws Exception */
public static ZooKeeperLeaderRetrievalService createLeaderRetrievalService ( final CuratorFramework client , final Configuration configuration , final String pathSuffix ) { } } | String leaderPath = configuration . getString ( HighAvailabilityOptions . HA_ZOOKEEPER_LEADER_PATH ) + pathSuffix ; return new ZooKeeperLeaderRetrievalService ( client , leaderPath ) ; |
public class TransactionScope { /** * Called by TransactionImpl . */
void detach ( ) { } } | mLock . lock ( ) ; try { if ( mDetached || mTxnMgr . removeLocalScope ( this ) ) { mDetached = true ; } else { throw new IllegalStateException ( "Transaction is attached to a different thread" ) ; } } finally { mLock . unlock ( ) ; } |
public class Strs { /** * Search target string to find the first index of any string in the given string array , starting at the specified index
* @ param target
* @ param fromIndex
* @ param indexWith
* @ return */
public static int indexAny ( String target , Integer fromIndex , String ... indexWith ) { } } | return indexAny ( target , fromIndex , Arrays . asList ( indexWith ) ) ; |
public class Matrix4f { /** * / * ( non - Javadoc )
* @ see org . joml . Matrix4fc # rotateY ( float , org . joml . Matrix4f ) */
public Matrix4f rotateY ( float ang , Matrix4f dest ) { } } | if ( ( properties & PROPERTY_IDENTITY ) != 0 ) return dest . rotationY ( ang ) ; float cos , sin ; sin = ( float ) Math . sin ( ang ) ; cos = ( float ) Math . cosFromSin ( sin , ang ) ; float rm00 = cos ; float rm02 = - sin ; float rm20 = sin ; float rm22 = cos ; // add temporaries for dependent values
float nm00 = m00 * rm00 + m20 * rm02 ; float nm01 = m01 * rm00 + m21 * rm02 ; float nm02 = m02 * rm00 + m22 * rm02 ; float nm03 = m03 * rm00 + m23 * rm02 ; // set non - dependent values directly
dest . _m20 ( m00 * rm20 + m20 * rm22 ) ; dest . _m21 ( m01 * rm20 + m21 * rm22 ) ; dest . _m22 ( m02 * rm20 + m22 * rm22 ) ; dest . _m23 ( m03 * rm20 + m23 * rm22 ) ; // set other values
dest . _m00 ( nm00 ) ; dest . _m01 ( nm01 ) ; dest . _m02 ( nm02 ) ; dest . _m03 ( nm03 ) ; dest . _m10 ( m10 ) ; dest . _m11 ( m11 ) ; dest . _m12 ( m12 ) ; dest . _m13 ( m13 ) ; dest . _m30 ( m30 ) ; dest . _m31 ( m31 ) ; dest . _m32 ( m32 ) ; dest . _m33 ( m33 ) ; dest . _properties ( properties & ~ ( PROPERTY_PERSPECTIVE | PROPERTY_IDENTITY | PROPERTY_TRANSLATION ) ) ; return dest ; |
public class BSDSyslogSenderImpl { /** * Serialize , format , and prepare the message payload body
* for sending by this transport . This includes adding
* the syslog message header .
* @ param msg Message to prepare
* @ return Buffer to send */
protected byte [ ] getTransportPayload ( AuditEventMessage msg ) { } } | if ( msg == null ) { return null ; } byte [ ] msgBytes = msg . getSerializedMessage ( false ) ; if ( EventUtils . isEmptyOrNull ( msgBytes ) ) { return null ; } // Format message with transport - specific headers
StringBuilder sb = new StringBuilder ( ) ; sb . append ( "<" ) ; sb . append ( TRANSPORT_DEFAULT_PRIORITY ) ; sb . append ( ">" ) ; sb . append ( TimestampUtils . getBSDSyslogDate ( msg . getDateTime ( ) ) ) ; sb . append ( " " ) ; sb . append ( getSystemHostName ( ) ) ; sb . append ( " " ) ; sb . append ( "<?xml version=\"1.0\" encoding=\"ASCII\"?>" ) ; sb . append ( new String ( msgBytes ) ) ; return sb . toString ( ) . trim ( ) . getBytes ( ) ; |
public class InvokerTask { /** * Executes the task on a thread from the common Liberty thread pool . */
@ Override public void run ( ) { } } | final boolean trace = TraceComponent . isAnyTracingEnabled ( ) ; if ( trace && tc . isEntryEnabled ( ) ) Tr . entry ( this , tc , "run[" + taskId + ']' , persistentExecutor ) ; Config config = persistentExecutor . configRef . get ( ) ; if ( persistentExecutor . deactivated || ! config . enableTaskExecution ) { if ( trace && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "run[" + taskId + ']' , persistentExecutor . deactivated ? "deactivated" : ( "enableTaskExecution? " + config . enableTaskExecution ) ) ; return ; } // Work around for when the scheduled executor fires too early
long execTime = new Date ( ) . getTime ( ) ; if ( execTime < expectedExecTime ) { long delay = expectedExecTime - execTime ; persistentExecutor . scheduledExecutor . schedule ( this , delay , TimeUnit . MILLISECONDS ) ; if ( trace && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "run[" + taskId + ']' , "attempted to run " + delay + " ms too early. Rescheduled." ) ; return ; } // If a Configuration update is in progress , then we will defer the execution of this Task until after .
if ( persistentExecutor . deferExecutionForConfigUpdate ( this ) == true ) { if ( trace && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "run[" + taskId + ']' , "attempted to run during a configuration update." ) ; return ; } String taskName = null ; String taskIdForPropTable = null ; TaskLocker ejbSingletonLockCollaborator = null ; String ownerForDeferredTask = null ; ClassLoader loader = null ; Throwable failure = null ; Short prevFailureCount = null , nextFailureCount = null ; Long nextExecTime = null ; TaskStore taskStore = persistentExecutor . taskStore ; ApplicationTracker appTracker = persistentExecutor . appTrackerRef . getServiceWithException ( ) ; TransactionManager tranMgr = persistentExecutor . tranMgrRef . getServiceWithException ( ) ; taskIdsOfRunningTasks . set ( taskId ) ; try { int timeout = txTimeout == 0 && ( binaryFlags & TaskRecord . Flags . SUSPEND_TRAN_OF_EXECUTOR_THREAD . bit ) != 0 ? DEFAULT_TIMEOUT_FOR_SUSPENDED_TRAN : txTimeout ; tranMgr . setTransactionTimeout ( timeout ) ; TaskRecord ejbSingletonRecord = null ; if ( ( binaryFlags & TaskRecord . Flags . EJB_SINGLETON . bit ) != 0 && ( binaryFlags & TaskRecord . Flags . SUSPEND_TRAN_OF_EXECUTOR_THREAD . bit ) == 0 ) { tranMgr . begin ( ) ; ejbSingletonRecord = taskStore . getTrigger ( taskId ) ; tranMgr . commit ( ) ; } tranMgr . begin ( ) ; TaskRecord taskRecord ; // Execution property TRANSACTION = SUSPEND indicates the task should not run in the persistent executor transaction .
// Lock an entry in a different table to prevent concurrent execution , and run with that transaction suspended .
if ( ( binaryFlags & TaskRecord . Flags . SUSPEND_TRAN_OF_EXECUTOR_THREAD . bit ) != 0 ) { if ( ! taskStore . createProperty ( taskIdForPropTable = "{" + taskId + "}" , " " ) ) throw new IllegalStateException ( taskIdForPropTable ) ; // Internal error if this path is ever reached
Transaction suspendedTran = tranMgr . suspend ( ) ; try { // We still need the task information , but get it in a new transaction that we can commit right away .
Throwable failed = null ; tranMgr . begin ( ) ; try { taskRecord = taskStore . find ( taskId , persistentExecutor . getPartitionId ( ) , new Date ( ) . getTime ( ) , false ) ; } catch ( Throwable x ) { throw failed = x ; } finally { if ( failed == null ) tranMgr . commit ( ) ; else tranMgr . rollback ( ) ; } } finally { tranMgr . resume ( suspendedTran ) ; // will be suspended again by application of transaction context
} } else { if ( ejbSingletonRecord != null ) { String owner = ejbSingletonRecord . getIdentifierOfOwner ( ) ; if ( ! appTracker . isStarted ( owner ) ) { ownerForDeferredTask = owner ; if ( trace && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "run[" + taskId + ']' , "unavailable - deferred" ) ; return ; // Ignore , we are deferring the task because the application or module is unavailable
} byte [ ] bytes = ejbSingletonRecord . getTrigger ( ) ; ejbSingletonLockCollaborator = ( TaskLocker ) persistentExecutor . deserialize ( bytes , priv . getSystemClassLoader ( ) ) ; if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "notify EJB container to lock singleton" ) ; ejbSingletonLockCollaborator . lock ( ) ; } taskRecord = taskStore . find ( taskId , persistentExecutor . getPartitionId ( ) , new Date ( ) . getTime ( ) , true ) ; } if ( taskRecord == null || ( taskRecord . getState ( ) & TaskState . ENDED . bit ) != 0 ) { if ( trace && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "run[" + taskId + ']' , "not appropriate to run task at this time" ) ; return ; // Ignore , because the task was canceled or someone else already ran it
} taskName = taskRecord . getName ( ) ; prevFailureCount = taskRecord . getConsecutiveFailureCount ( ) ; String classLoaderIdentifier = taskRecord . getIdentifierOfClassLoader ( ) ; if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "classloader identifier" , classLoaderIdentifier ) ; loader = ejbSingletonRecord == null ? persistentExecutor . classloaderIdSvc . getClassLoader ( classLoaderIdentifier ) : priv . getSystemClassLoader ( ) ; if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "classloader" , loader ) ; String owner = taskRecord . getIdentifierOfOwner ( ) ; if ( loader == null || ! appTracker . isStarted ( owner ) ) { ownerForDeferredTask = owner ; if ( trace && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "run[" + taskId + ']' , "unavailable - deferred" ) ; return ; // Ignore , we are deferring the task because the application or module is unavailable
} TaskInfo info = ( TaskInfo ) persistentExecutor . deserialize ( taskRecord . getTaskInformation ( ) , null ) ; byte [ ] triggerBytes = taskRecord . getTrigger ( ) ; Trigger trigger = triggerBytes == null ? null : ejbSingletonRecord != null && Arrays . equals ( triggerBytes , ejbSingletonRecord . getTrigger ( ) ) ? ejbSingletonLockCollaborator : ( Trigger ) persistentExecutor . deserialize ( triggerBytes , loader ) ; if ( trigger == null ) { String triggerClassName = info . getClassNameForNonSerializableTrigger ( ) ; if ( triggerClassName != null ) trigger = ( Trigger ) loader . loadClass ( triggerClassName ) . newInstance ( ) ; } byte [ ] taskBytes = taskRecord . getTask ( ) ; Object task = taskBytes == null ? null : persistentExecutor . deserialize ( taskBytes , loader ) ; if ( task == null ) { String taskClassName = info . getClassNameForNonSerializableTask ( ) ; if ( taskClassName == null ) task = trigger ; // optimization to share single instance for task and trigger
else task = loader . loadClass ( taskClassName ) . newInstance ( ) ; } byte [ ] resultBytes = taskRecord . getResult ( ) ; boolean skipped = false ; Throwable skippedX = null ; LastExecution lastExecution = null ; long startTime = 0 , stopTime = 0 ; Object result = null ; Map < String , String > execProps = persistentExecutor . getExecutionProperties ( task ) ; ThreadContextDescriptor threadContext = info . deserializeThreadContext ( execProps ) ; ArrayList < ThreadContext > contextAppliedToThread = threadContext == null ? null : threadContext . taskStarting ( ) ; try { if ( trigger != null ) { Long prevScheduledStart = taskRecord . getPreviousScheduledStartTime ( ) ; if ( prevScheduledStart != null ) lastExecution = new LastExecutionImpl ( persistentExecutor , taskId , taskName , resultBytes , taskRecord . getPreviousStopTime ( ) , taskRecord . getPreviousStartTime ( ) , prevScheduledStart , loader ) ; try { skipped = trigger . skipRun ( lastExecution , new Date ( taskRecord . getNextExecutionTime ( ) ) ) ; } catch ( RuntimeException x ) { skipped = true ; skippedX = x ; } } if ( skipped ) { if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "skipping task" , skippedX ) ; Date nextExecDate = trigger . getNextRunTime ( lastExecution , new Date ( taskRecord . getOriginalSubmitTime ( ) ) ) ; nextExecTime = nextExecDate == null ? null : nextExecDate . getTime ( ) ; } else { // Fixed result for one - shot runnable
if ( ! info . isSubmittedAsCallable ( ) && info . getInterval ( ) == - 1 && trigger == null && resultBytes != null ) result = persistentExecutor . deserialize ( resultBytes , loader ) ; if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "task about to start " + task ) ; startTime = new Date ( ) . getTime ( ) ; try { if ( info . isSubmittedAsCallable ( ) ) result = ( ( Callable < ? > ) task ) . call ( ) ; else ( ( Runnable ) task ) . run ( ) ; if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "task result " + result ) ; nextFailureCount = ( short ) 0 ; } catch ( Throwable x ) { if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "task failed" , x ) ; failure = x ; nextFailureCount = ( short ) ( ( prevFailureCount < Short . MAX_VALUE ) ? ( prevFailureCount + 1 ) : Short . MAX_VALUE ) ; // If we will retry , immediately roll back , then can update persistent store with failure count
config = persistentExecutor . configRef . get ( ) ; if ( config . retryLimit == - 1 || nextFailureCount <= config . retryLimit ) throw failure ; } finally { stopTime = new Date ( ) . getTime ( ) ; } // Compute next execution time if the task did not fail .
if ( failure == null ) { long interval = info . getInterval ( ) ; if ( interval == - 1 ) { if ( trigger == null ) { nextExecTime = null ; // one - shot task
} else { lastExecution = new LastExecutionImpl ( persistentExecutor , taskId , taskName , result , stopTime , startTime , taskRecord . getNextExecutionTime ( ) ) ; Date nextExecDate = trigger . getNextRunTime ( lastExecution , new Date ( taskRecord . getOriginalSubmitTime ( ) ) ) ; nextExecTime = nextExecDate == null ? null : nextExecDate . getTime ( ) ; } } else if ( info . isFixedRate ( ) ) { long originalScheduledStartTime = taskRecord . getOriginalSubmitTime ( ) + info . getInitialDelay ( ) ; long elapsed = stopTime - originalScheduledStartTime ; nextExecTime = ( elapsed / interval + 1 ) * interval + originalScheduledStartTime ; } else nextExecTime = stopTime + interval ; // fixed - delay
} } } finally { if ( contextAppliedToThread != null ) threadContext . taskStopping ( contextAppliedToThread ) ; } short autoPurgeBit = failure == null ? TaskRecord . Flags . AUTO_PURGE_ON_SUCCESS . bit : TaskRecord . Flags . AUTO_PURGE_ALWAYS . bit ; if ( ( nextExecTime == null || ! skipped && nextFailureCount > 0 ) && ( binaryFlags & autoPurgeBit ) != 0 ) { // Autopurge the completed task if it hasn ' t already been ended ( removal / cancellation by self or other )
taskStore . remove ( taskId , null , false ) ; } else { // Update state
TaskRecord updates = new TaskRecord ( false ) ; if ( nextExecTime != null ) updates . setNextExecutionTime ( nextExecTime ) ; short state = nextExecTime == null ? ( short ) ( TaskState . ENDED . bit | TaskState . SUCCESSFUL . bit ) : TaskState . SCHEDULED . bit ; if ( skipped ) { state |= TaskState . SKIPPED . bit ; if ( skippedX != null ) { state |= TaskState . SKIPRUN_FAILED . bit ; byte [ ] previousResultBytes = persistentExecutor . serialize ( lastExecution == null ? null : lastExecution . getResult ( ) ) ; updates . setResult ( persistentExecutor . serialize ( new TaskSkipped ( previousResultBytes , skippedX , loader , persistentExecutor ) ) ) ; } } else { updates . setConsecutiveFailureCount ( nextFailureCount ) ; updates . setPreviousScheduledStartTime ( taskRecord . getNextExecutionTime ( ) ) ; updates . setPreviousStartTime ( startTime ) ; updates . setPreviousStopTime ( stopTime ) ; if ( failure == null ) { // Only update result blob if it changed
byte [ ] updatedResultBytes = result == null ? null : serializeResult ( result , loader ) ; if ( updatedResultBytes == null || ! Arrays . equals ( resultBytes , updatedResultBytes ) ) updates . setResult ( updatedResultBytes ) ; } else { updates . setResult ( persistentExecutor . serialize ( new TaskFailure ( failure , loader , persistentExecutor , TaskFailure . FAILURE_LIMIT_REACHED , Short . toString ( nextFailureCount ) ) ) ) ; state = ( short ) ( TaskState . ENDED . bit | TaskState . FAILURE_LIMIT_REACHED . bit ) ; } } updates . setState ( state ) ; // Only update blobs if they have changed
if ( taskBytes != null ) { byte [ ] updatedTaskBytes = persistentExecutor . serialize ( task ) ; if ( ! Arrays . equals ( taskBytes , updatedTaskBytes ) ) updates . setTask ( updatedTaskBytes ) ; } if ( triggerBytes != null ) { byte [ ] updatedTriggerBytes = persistentExecutor . serialize ( trigger ) ; if ( ! Arrays . equals ( triggerBytes , updatedTriggerBytes ) ) updates . setTrigger ( updatedTriggerBytes ) ; } TaskRecord expected = new TaskRecord ( false ) ; expected . setId ( taskId ) ; expected . setVersion ( taskRecord . getVersion ( ) ) ; boolean updatesPersisted = taskStore . persist ( updates , expected ) ; if ( ! updatesPersisted ) { // Optimistic update unsuccessful . Need to take into account changes made by the task to itself .
TaskRecord taskRecordRefresh = taskStore . findById ( taskId , null , false ) ; if ( taskRecordRefresh != null ) { short refreshedState = taskRecordRefresh . getState ( ) ; if ( ( refreshedState & TaskState . CANCELED . bit ) != 0 && ( binaryFlags & TaskRecord . Flags . SUSPEND_TRAN_OF_EXECUTOR_THREAD . bit ) == 0 ) { // task canceled itself , combine the canceled state with other updates
updates . setState ( state = refreshedState ) ; expected . setVersion ( taskRecordRefresh . getVersion ( ) ) ; taskStore . persist ( updates , expected ) ; } else if ( ( refreshedState & TaskState . SUSPENDED . bit ) != 0 ) { // task suspended itself , merge the suspended state with new state and combine with other updates
state |= TaskState . SUSPENDED . bit ; state &= ~ TaskState . SCHEDULED . bit ; expected . setVersion ( taskRecordRefresh . getVersion ( ) ) ; taskStore . persist ( updates , expected ) ; } } // else the task removed itself
} } } catch ( Throwable x ) { if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "marking transaction to roll back in response to error" , x ) ; try { tranMgr . setRollbackOnly ( ) ; } catch ( Throwable t ) { } if ( failure == null ) failure = x ; } finally { if ( ejbSingletonLockCollaborator != null ) ejbSingletonLockCollaborator . unlock ( ) ; taskIdsOfRunningTasks . remove ( ) ; try { tranMgr . setTransactionTimeout ( 0 ) ; // clear the value so we don ' t impact subsequent transactions on this thread
if ( tranMgr . getStatus ( ) == Status . STATUS_MARKED_ROLLBACK ) { if ( trace && tc . isEventEnabled ( ) ) Tr . event ( this , tc , "rolling back task execution attempt" ) ; if ( nextFailureCount == null || nextFailureCount == 0 ) nextFailureCount = ( short ) ( prevFailureCount == null ? 1 : prevFailureCount < Short . MAX_VALUE ? ( prevFailureCount + 1 ) : Short . MAX_VALUE ) ; tranMgr . rollback ( ) ; if ( config == null ) config = persistentExecutor . configRef . get ( ) ; processRetryableTaskFailure ( failure , loader , nextFailureCount , config , taskName ) ; } else { if ( taskIdForPropTable != null ) taskStore . removeProperty ( taskIdForPropTable ) ; tranMgr . commit ( ) ; // Immediately reschedule tasks that should run in the near future if the transaction commits
config = persistentExecutor . configRef . get ( ) ; if ( config . enableTaskExecution && nextExecTime != null && ( config . pollInterval < 0 || nextExecTime <= new Date ( ) . getTime ( ) + config . pollInterval ) ) { expectedExecTime = nextExecTime ; long delay = nextExecTime - new Date ( ) . getTime ( ) ; ScheduledExecutorService executor = persistentExecutor . scheduledExecutor ; if ( executor == null ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "deactivated - reschedule skipped" ) ; } else executor . schedule ( this , delay , TimeUnit . MILLISECONDS ) ; } else if ( ownerForDeferredTask != null ) appTracker . deferTask ( this , ownerForDeferredTask , persistentExecutor ) ; else { persistentExecutor . inMemoryTaskIds . remove ( taskId ) ; if ( failure != null ) { taskName = taskName == null || taskName . length ( ) == 0 || taskName . length ( ) == 1 && taskName . charAt ( 0 ) == ' ' ? String . valueOf ( taskId ) // empty task name
: taskId + " (" + taskName + ")" ; Tr . warning ( tc , "CWWKC1511.retry.limit.reached.failed" , persistentExecutor . name , taskName , nextFailureCount , failure ) ; } } } } catch ( Throwable x ) { if ( failure != null ) failure = x ; // Retry the task if an error occurred
processRetryableTaskFailure ( failure , loader , nextFailureCount , config , taskName ) ; } } if ( trace && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "run[" + taskId + ']' , failure ) ; |
public class X509Credential { /** * Returns the identity certificate of this credential . The identity certificate is the first certificate
* in the chain that is not an impersonation proxy certificate .
* @ return < code > X509Certificate < / code > the identity cert . Null , if unable to get the identity certificate
* ( an error occurred ) */
public X509Certificate getIdentityCertificate ( ) { } } | try { return BouncyCastleUtil . getIdentityCertificate ( this . certChain ) ; } catch ( CertificateException e ) { logger . debug ( "Error getting certificate identity." , e ) ; return null ; } |
public class RemoteQPConsumerKey { /** * message received from the DME corresponding to a get request issued by this consumer .
* Note that this method will never be called on messages received due to gets issued by the
* RemoteQPConsumerKeyGroup
* @ param key */
protected final void messageReceived ( AIStreamKey key ) { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "messageReceived" , key ) ; long timeout = refillTime ; boolean reissueGet = false ; synchronized ( this ) { countOfUnlockedMessages ++ ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( tc , "readAhead change: countOfUnlockedMessages++ " + countOfUnlockedMessages ) ; if ( key . getOriginalTimeout ( ) == SIMPConstants . INFINITE_TIMEOUT ) { countOfOutstandingInfiniteTimeoutGets -- ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( tc , "readAhead change: countOfOutstandingInfiniteTimeoutGets-- " + countOfOutstandingInfiniteTimeoutGets ) ; } // If the msg that came in was for a refill request then we can allow normal
// operations to resume or redrive a new request for any attempted requests while
// we were busy
if ( isRefilling && key . getTick ( ) == refillTick ) { // redrive a request that encompasses any requests that came in while we
// were busy trying to refill .
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( tc , "we are refilling, refillTime: " + Long . valueOf ( refillTime ) ) ; isRefilling = false ; refillTick = AnycastInputHandler . INVALID_TICK ; if ( refillTime != LocalConsumerPoint . NO_WAIT ) { if ( timeout != LocalConsumerPoint . INFINITE_WAIT ) timeout = refillTime - System . currentTimeMillis ( ) ; if ( timeout == LocalConsumerPoint . INFINITE_WAIT || timeout > 0 ) reissueGet = true ; } } } // end of synch
if ( reissueGet ) // outside of synch block
{ initiateRefill ( ) ; waiting ( timeout , true ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "messageReceived" ) ; |
public class StringUtils { /** * Fast line splitting with a separator , typically a tab or space character .
* @ param line
* the line to be splitted
* @ param delimiter
* the delimiter
* @ param splitted
* the array containing the splitted tokens for each line */
public static void splitLine ( final String line , final char delimiter , final String [ ] splitted ) { } } | int idxComma , idxToken = 0 , fromIndex = 0 ; while ( ( idxComma = line . indexOf ( delimiter , fromIndex ) ) != - 1 ) { splitted [ idxToken ++ ] = line . substring ( fromIndex , idxComma ) ; fromIndex = idxComma + 1 ; } splitted [ idxToken ] = line . substring ( fromIndex ) ; |
public class GranteeManager { /** * Revokes the rights represented by the rights argument on
* the database object identified by the dbobject argument
* from the User object identified by the name
* argument . < p >
* @ see # grant */
public void revoke ( OrderedHashSet granteeList , SchemaObject dbObject , Right rights , Grantee grantor , boolean grantOption , boolean cascade ) { } } | if ( ! grantor . isFullyAccessibleByRole ( dbObject ) ) { throw Error . error ( ErrorCode . X_42501 , dbObject . getName ( ) . name ) ; } if ( grantor . isAdmin ( ) ) { grantor = dbObject . getOwner ( ) ; } for ( int i = 0 ; i < granteeList . size ( ) ; i ++ ) { String name = ( String ) granteeList . get ( i ) ; Grantee g = get ( name ) ; if ( g == null ) { throw Error . error ( ErrorCode . X_28501 , name ) ; } if ( isImmutable ( name ) ) { throw Error . error ( ErrorCode . X_28502 , name ) ; } } for ( int i = 0 ; i < granteeList . size ( ) ; i ++ ) { String name = ( String ) granteeList . get ( i ) ; Grantee g = get ( name ) ; g . revoke ( dbObject , rights , grantor , grantOption ) ; g . updateAllRights ( ) ; if ( g . isRole ) { updateAllRights ( g ) ; } } |
public class CmsModuleImportExportRepository { /** * Computes a module hash , which should change when a module changes and stay the same when the module doesn ' t change . < p >
* We only use the modification time of the module resources and their descendants and the modification time of the metadata
* for computing it .
* @ param module the module for which to compute the module signature
* @ param project the project in which to compute the module hash
* @ return the module signature
* @ throws CmsException if something goes wrong */
private String computeModuleHash ( CmsModule module , CmsProject project ) throws CmsException { } } | LOG . info ( "Getting module hash for " + module . getName ( ) ) ; // This method may be called very frequently during a short time , but it is unlikely
// that a module changes multiple times in a few seconds , so we use a timed cache here
String cachedValue = m_newModuleHashCache . get ( module ) ; if ( cachedValue != null ) { LOG . info ( "Using cached value for module hash of " + module . getName ( ) ) ; return cachedValue ; } CmsObject cms = OpenCms . initCmsObject ( m_adminCms ) ; if ( ! CmsStringUtil . isEmptyOrWhitespaceOnly ( module . getSite ( ) ) ) { cms . getRequestContext ( ) . setSiteRoot ( module . getSite ( ) ) ; } cms . getRequestContext ( ) . setCurrentProject ( project ) ; // We compute a hash code from the paths of all resources belonging to the module and their respective modification dates .
List < String > entries = Lists . newArrayList ( ) ; for ( String path : module . getResources ( ) ) { try { Set < CmsResource > resources = Sets . newHashSet ( ) ; CmsResource moduleRes = cms . readResource ( path , CmsResourceFilter . IGNORE_EXPIRATION ) ; resources . add ( moduleRes ) ; if ( moduleRes . isFolder ( ) ) { resources . addAll ( cms . readResources ( path , CmsResourceFilter . IGNORE_EXPIRATION , true ) ) ; } for ( CmsResource res : resources ) { entries . add ( res . getRootPath ( ) + ":" + res . getDateLastModified ( ) ) ; } } catch ( CmsVfsResourceNotFoundException e ) { entries . add ( path + ":null" ) ; } } Collections . sort ( entries ) ; String inputString = CmsStringUtil . listAsString ( entries , "\n" ) + "\nMETA:" + module . getObjectCreateTime ( ) ; LOG . debug ( "Computing module hash from base string:\n" + inputString ) ; return "" + inputString . hashCode ( ) ; |
public class CountersManager { /** * Allocate a new counter with a given label and type .
* @ param label to describe the counter .
* @ param typeId for the type of counter .
* @ return the id allocated for the counter . */
public int allocate ( final String label , final int typeId ) { } } | final int counterId = nextCounterId ( ) ; checkCountersCapacity ( counterId ) ; final int recordOffset = metaDataOffset ( counterId ) ; checkMetaDataCapacity ( recordOffset ) ; try { metaDataBuffer . putInt ( recordOffset + TYPE_ID_OFFSET , typeId ) ; metaDataBuffer . putLong ( recordOffset + FREE_FOR_REUSE_DEADLINE_OFFSET , NOT_FREE_TO_REUSE ) ; putLabel ( recordOffset , label ) ; metaDataBuffer . putIntOrdered ( recordOffset , RECORD_ALLOCATED ) ; } catch ( final Exception ex ) { freeList . pushInt ( counterId ) ; LangUtil . rethrowUnchecked ( ex ) ; } return counterId ; |
public class SunriseSunsetCalculator { /** * Computes the sunset for an arbitrary declination .
* @ param latitude
* @ param longitude
* Coordinates for the location to compute the sunrise / sunset for .
* @ param timeZone
* timezone to compute the sunrise / sunset times in .
* @ param date
* < code > Calendar < / code > object containing the date to compute the official sunset for .
* @ param degrees
* Angle under the horizon for which to compute sunrise . For example , " civil sunset "
* corresponds to 6 degrees .
* @ return the requested sunset time as a < code > Calendar < / code > object . */
public static Calendar getSunset ( double latitude , double longitude , TimeZone timeZone , Calendar date , double degrees ) { } } | SolarEventCalculator solarEventCalculator = new SolarEventCalculator ( new Location ( latitude , longitude ) , timeZone ) ; return solarEventCalculator . computeSunsetCalendar ( new Zenith ( 90 - degrees ) , date ) ; |
public class DomainsInner { /** * Creates an ownership identifier for a domain or updates identifier details for an existing identifer .
* Creates an ownership identifier for a domain or updates identifier details for an existing identifer .
* @ param resourceGroupName Name of the resource group to which the resource belongs .
* @ param domainName Name of domain .
* @ param name Name of identifier .
* @ param domainOwnershipIdentifier A JSON representation of the domain ownership properties .
* @ param serviceCallback the async ServiceCallback to handle successful and failed responses .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the { @ link ServiceFuture } object */
public ServiceFuture < DomainOwnershipIdentifierInner > updateOwnershipIdentifierAsync ( String resourceGroupName , String domainName , String name , DomainOwnershipIdentifierInner domainOwnershipIdentifier , final ServiceCallback < DomainOwnershipIdentifierInner > serviceCallback ) { } } | return ServiceFuture . fromResponse ( updateOwnershipIdentifierWithServiceResponseAsync ( resourceGroupName , domainName , name , domainOwnershipIdentifier ) , serviceCallback ) ; |
public class A_CmsUploadDialog { /** * Execute to set the content wrapper height . < p > */
protected void setContentWrapperHeight ( ) { } } | // set the max height of the content panel
int fixedContent = 0 ; if ( m_dialogInfo . isVisible ( ) ) { fixedContent += m_dialogInfo . getOffsetHeight ( ) ; } if ( m_selectionSummary . isVisible ( ) ) { fixedContent += m_selectionSummary . getOffsetHeight ( ) ; } m_scrollPanel . getElement ( ) . getStyle ( ) . setPropertyPx ( "maxHeight" , getAvailableHeight ( fixedContent ) ) ; doResize ( ) ; |
public class Forward { /** * Make sure required action outputs are present , and are of the right type ( only make the latter check when not
* in production mode */
private void checkActionOutputs ( PageFlowActionForward fc ) { } } | PageFlowActionForward . ActionOutput [ ] actionOutputs = fc . getActionOutputs ( ) ; boolean doExpensiveChecks = _actionOutputs != null && ! AdapterManager . getServletContainerAdapter ( _servletContext ) . isInProductionMode ( ) ; for ( int i = 0 ; i < actionOutputs . length ; ++ i ) { PageFlowActionForward . ActionOutput actionOutput = actionOutputs [ i ] ; if ( ! actionOutput . getNullable ( ) ) { String actionOutputName = actionOutput . getName ( ) ; if ( _actionOutputs == null || ! _actionOutputs . containsKey ( actionOutputName ) ) { PageFlowException ex = new MissingActionOutputException ( _mappingPath , _flowController , actionOutput . getName ( ) , getName ( ) ) ; InternalUtils . throwPageFlowException ( ex ) ; } else if ( _actionOutputs . get ( actionOutputName ) == null ) { PageFlowException ex = new NullActionOutputException ( _mappingPath , _flowController , actionOutput . getName ( ) , getName ( ) ) ; InternalUtils . throwPageFlowException ( ex ) ; } } // If we ' re * not * in production mode , do some ( expensive ) checks to ensure that the types for the
// action outputs match their declared types .
if ( doExpensiveChecks ) { Object actualActionOutput = _actionOutputs . get ( actionOutput . getName ( ) ) ; if ( actualActionOutput != null ) { String expectedTypeName = actionOutput . getType ( ) ; int expectedArrayDims = 0 ; while ( expectedTypeName . endsWith ( "[]" ) ) { ++ expectedArrayDims ; expectedTypeName = expectedTypeName . substring ( 0 , expectedTypeName . length ( ) - 2 ) ; } Class expectedType = ( Class ) PRIMITIVE_TYPES . get ( expectedTypeName ) ; if ( expectedType == null ) { try { ReloadableClassHandler rch = Handlers . get ( _servletContext ) . getReloadableClassHandler ( ) ; expectedType = rch . loadClass ( expectedTypeName ) ; } catch ( ClassNotFoundException e ) { _log . error ( "Could not load expected action output type " + expectedTypeName + " for action output '" + actionOutput . getName ( ) + "' on forward '" + fc . getName ( ) + "'; skipping type check." ) ; continue ; } } Class actualType = actualActionOutput . getClass ( ) ; int actualArrayDims = 0 ; InternalStringBuilder arraySuffix = new InternalStringBuilder ( ) ; while ( actualType . isArray ( ) && actualArrayDims <= expectedArrayDims ) { ++ actualArrayDims ; arraySuffix . append ( "[]" ) ; actualType = actualType . getComponentType ( ) ; } if ( actualArrayDims != expectedArrayDims || ! expectedType . isAssignableFrom ( actualType ) ) { PageFlowException ex = new MismatchedActionOutputException ( _mappingPath , _flowController , actionOutput . getName ( ) , getName ( ) , expectedTypeName , actualType . getName ( ) + arraySuffix ) ; InternalUtils . throwPageFlowException ( ex ) ; } } } } |
public class TSDB { /** * Blocks while pre - fetching meta data from the data and uid tables
* so that performance improves , particularly with a large number of
* regions and region servers .
* @ since 2.2 */
public void preFetchHBaseMeta ( ) { } } | LOG . info ( "Pre-fetching meta data for all tables" ) ; final long start = System . currentTimeMillis ( ) ; final ArrayList < Deferred < Object > > deferreds = new ArrayList < Deferred < Object > > ( ) ; deferreds . add ( client . prefetchMeta ( table ) ) ; deferreds . add ( client . prefetchMeta ( uidtable ) ) ; // TODO ( cl ) - meta , tree , etc
try { Deferred . group ( deferreds ) . join ( ) ; LOG . info ( "Fetched meta data for tables in " + ( System . currentTimeMillis ( ) - start ) + "ms" ) ; } catch ( InterruptedException e ) { LOG . error ( "Interrupted" , e ) ; Thread . currentThread ( ) . interrupt ( ) ; return ; } catch ( Exception e ) { LOG . error ( "Failed to prefetch meta for our tables" , e ) ; } |
public class CmsXmlMessages { /** * Returns the localized resource String from the configuration file , if not found or set from the resource bundle . < p >
* @ see org . opencms . i18n . CmsMessages # key ( java . lang . String ) */
@ Override public String key ( String keyName ) { } } | if ( hasConfigValue ( keyName ) ) { return getConfigValue ( keyName ) ; } return m_messages . key ( keyName ) ; |
public class A_CmsDiffViewDialog { /** * Returns the html code for the buttons ' show only differences ' and ' show everything ' . < p >
* @ return the html code for the buttons ' show only differences ' and ' show everything ' */
String getDiffOnlyButtonsHtml ( ) { } } | StringBuffer result = new StringBuffer ( ) ; if ( ! getOriginalSource ( ) . equals ( getCopySource ( ) ) ) { String onClick1 = "javascript:document.forms['diff-form'].mode.value = '" ; String onClick2 = "javascript:document.forms['diff-form'].mode.value = '" ; onClick1 += CmsDiffViewMode . ALL ; onClick2 += CmsDiffViewMode . DIFF_ONLY ; onClick1 += "'; document.forms['diff-form'].submit();" ; onClick2 += "'; document.forms['diff-form'].submit();" ; result . append ( getTwoButtonsHtml ( CmsDiffViewMode . DIFF_ONLY . getName ( ) . key ( getLocale ( ) ) , CmsDiffViewMode . ALL . getName ( ) . key ( getLocale ( ) ) , onClick1 , onClick2 , getMode ( ) == CmsDiffViewMode . DIFF_ONLY ) ) ; } else { // display all text , if there are no differences
setMode ( CmsDiffViewMode . ALL ) ; } return result . toString ( ) ; |
public class RequestMapper { /** * Register the RequestHandler to accept requests on the given port , for the
* specified host and path .
* @ param port the integer port on which the RequestHandler gets requests .
* @ param host the String Host which the RequestHandler matches , or null , if
* the RequestHandler should match ALL hosts .
* @ param path the String path which the RequestHandler matches , or null , if
* the RequestHandler should match ALL paths .
* @ param requestHandler the RequestHandler to register . */
public void addRequestHandler ( int port , String host , String path , RequestHandler requestHandler ) { } } | Integer portInt = Integer . valueOf ( port ) ; PortMapper portMapper = portMap . get ( portInt ) ; if ( portMapper == null ) { portMapper = new PortMapper ( portInt ) ; portMap . put ( portInt , portMapper ) ; } portMapper . addRequestHandler ( host , path , requestHandler ) ; LOGGER . info ( "Registered " + port + "/" + ( host == null ? "*" : host ) + "/" + ( path == null ? "*" : path ) + " --> " + requestHandler ) ; |
public class NonBlockingIdentityHashMap { /** * Atomically do a < code > put ( key , newValue ) < / code > if - and - only - if the key is
* mapped a value which is < code > equals < / code > to < code > oldValue < / code > .
* @ throws NullPointerException if the specified key or value is null */
public boolean replace ( TypeK key , TypeV oldValue , TypeV newValue ) { } } | return putIfMatch ( key , newValue , oldValue ) == oldValue ; |
public class ESFilterBuilder { /** * Populate like query .
* @ param likeExpression
* the like expression
* @ param metadata
* the metadata
* @ return the filter builder */
private QueryBuilder populateLikeQuery ( LikeExpression likeExpression , EntityMetadata metadata ) { } } | Expression patternValue = likeExpression . getPatternValue ( ) ; String field = likeExpression . getStringExpression ( ) . toString ( ) ; String likePattern = ( patternValue instanceof InputParameter ) ? kunderaQuery . getParametersMap ( ) . get ( ( patternValue ) . toParsedText ( ) ) . toString ( ) : patternValue . toParsedText ( ) . toString ( ) ; String jpaField = getField ( field ) ; log . debug ( "Pattern value for field " + field + " is: " + patternValue ) ; QueryBuilder filterBuilder = getQueryBuilder ( kunderaQuery . new FilterClause ( jpaField , Expression . LIKE , likePattern , field ) , metadata ) ; return filterBuilder ; |
public class GetStringComparator { /** * Get a chunk of the string that is consistently not digits . Length of
* string is passed in for improved efficiency ( calculate once ) .
* @ param string the string being chunked
* @ param slength the length of the string
* @ param marker the starting point for processing
* @ return the chunk */
private String getTextChunk ( final String string , final int slength , final int marker ) { } } | final StringBuilder chunk = new StringBuilder ( ) ; for ( int index = marker ; index < slength ; index ++ ) { final char c = string . charAt ( index ) ; if ( isDigit ( c ) ) { break ; } chunk . append ( c ) ; } return chunk . toString ( ) ; |
public class PaginatedList { /** * Returns whether the collection contains the given element . Results are
* loaded and checked incrementally until a match is found or the end of the
* result set is reached .
* Not supported in ITERATION _ ONLY mode . */
@ Override public boolean contains ( Object arg0 ) { } } | checkUnsupportedOperationForIterationOnlyMode ( "contains(Object arg0)" ) ; if ( allResults . contains ( arg0 ) ) return true ; while ( nextResultsAvailable ( ) ) { boolean found = nextResults . contains ( arg0 ) ; moveNextResults ( false ) ; if ( found ) return true ; } return false ; |
public class Properties { /** * Append the value to the list to which the specified property key is mapped . If
* this properties contains no mapping for the property key , the value append to
* a new list witch is associate the the specified property key .
* @ param < T >
* the type of elements in the list
* @ param property
* the property key whose associated list is to be added
* @ param value
* the value to be appended to list */
public < T > void addListItem ( PropertyListKey < T > property , T value ) { } } | List < T > list = get ( property ) ; list . add ( value ) ; if ( ! contains ( property ) ) { set ( property , list ) ; } |
public class CorporationApi { /** * Get corporation member roles history Return how roles have changed for a
* coporation & # 39 ; s members , up to a month - - - This route is cached for up
* to 3600 seconds - - - Requires one of the following EVE corporation
* role ( s ) : Director SSO Scope :
* esi - corporations . read _ corporation _ membership . v1
* @ param corporationId
* An EVE corporation ID ( required )
* @ param datasource
* The server name you would like data from ( optional , default to
* tranquility )
* @ param ifNoneMatch
* ETag from a previous request . A 304 will be returned if this
* matches the current ETag ( optional )
* @ param page
* Which page of results to return ( optional , default to 1)
* @ param token
* Access token to use if unable to set a header ( optional )
* @ return List & lt ; CorporationRolesHistoryResponse & gt ;
* @ throws ApiException
* If fail to call the API , e . g . server error or cannot
* deserialize the response body */
public List < CorporationRolesHistoryResponse > getCorporationsCorporationIdRolesHistory ( Integer corporationId , String datasource , String ifNoneMatch , Integer page , String token ) throws ApiException { } } | ApiResponse < List < CorporationRolesHistoryResponse > > resp = getCorporationsCorporationIdRolesHistoryWithHttpInfo ( corporationId , datasource , ifNoneMatch , page , token ) ; return resp . getData ( ) ; |
public class Sentry { /** * Returns the last statically stored { @ link SentryClient } instance . If no instance
* is already stored , the { @ link # init ( ) } method will be called one time in an attempt to
* create a { @ link SentryClient } .
* @ return statically stored { @ link SentryClient } instance , or null . */
public static SentryClient getStoredClient ( ) { } } | if ( storedClient != null ) { return storedClient ; } synchronized ( Sentry . class ) { if ( storedClient == null && ! autoInitAttempted . get ( ) ) { // attempt initialization by using configuration found in the environment
autoInitAttempted . set ( true ) ; init ( ) ; } } return storedClient ; |
public class CmsListColumnDefinition { /** * returns the html for a cell . < p >
* @ param item the item to render the cell for
* @ param isPrintable if the list is to be printed
* @ return html code */
public String htmlCell ( CmsListItem item , boolean isPrintable ) { } } | StringBuffer html = new StringBuffer ( 512 ) ; Iterator < I_CmsListDirectAction > itActions = m_directActions . iterator ( ) ; while ( itActions . hasNext ( ) ) { I_CmsListDirectAction action = itActions . next ( ) ; action . setItem ( item ) ; boolean enabled = action . isEnabled ( ) ; if ( isPrintable ) { action . setEnabled ( false ) ; } html . append ( action . buttonHtml ( ) ) ; if ( isPrintable ) { action . setEnabled ( enabled ) ; } } if ( ! m_defaultActions . isEmpty ( ) ) { Iterator < CmsListDefaultAction > itDefaultActions = m_defaultActions . iterator ( ) ; while ( itDefaultActions . hasNext ( ) ) { CmsListDefaultAction defAction = itDefaultActions . next ( ) ; defAction . setItem ( item ) ; boolean enabled = defAction . isEnabled ( ) ; if ( isPrintable ) { defAction . setEnabled ( false ) ; } html . append ( defAction . buttonHtml ( ) ) ; if ( isPrintable ) { defAction . setEnabled ( enabled ) ; } } } else { if ( m_formatter == null ) { // unformatted output
if ( item . get ( m_id ) != null ) { // null values are not showed by default
html . append ( item . get ( m_id ) . toString ( ) ) ; } } else { // formatted output
html . append ( m_formatter . format ( item . get ( m_id ) , getWp ( ) . getLocale ( ) ) ) ; } } html . append ( "\n" ) ; return html . toString ( ) ; |
public class AccessSet { /** * This is the getter method for instance variable { @ link # dataModelTypes } .
* @ return the value of the instance variable { @ link # dataModelTypes } .
* @ see # dataModelTypes
* @ throws CacheReloadException on error */
public Set < Type > getDataModelTypes ( ) throws CacheReloadException { } } | final Set < Type > ret = new HashSet < > ( ) ; for ( final Long id : this . dataModelTypes ) { ret . add ( Type . get ( id ) ) ; } return Collections . unmodifiableSet ( ret ) ; |
public class EditableTemplate { /** * Checks if the given page is part of the editable template definition itself .
* @ param page Page
* @ return true if page is part of template definition . */
private static boolean isPageInTemplateDefinition ( Page page ) { } } | Resource resource = page . adaptTo ( Resource . class ) ; if ( resource != null ) { Resource parent = resource . getParent ( ) ; if ( parent != null ) { return StringUtils . equals ( NT_TEMPLATE , parent . getValueMap ( ) . get ( JCR_PRIMARYTYPE , String . class ) ) ; } } return false ; |
public class FastNonThreadsafeRandom { /** * Returns a pseudorandom , uniformly distributed { @ code int } value between 0
* ( inclusive ) and the specified value ( exclusive ) , drawn from this random
* number generator ' s sequence . The general contract of { @ code nextInt } is
* that one { @ code int } value in the specified range is pseudorandomly
* generated and returned . All { @ code n } possible { @ code int } values are
* produced with ( approximately ) equal probability .
* In contrast to the Java version , we use an approach that tries to avoid
* divisions for performance . In this method , we also employ rejection
* sampling ( for marginal improvement ) as discussed in :
* D . Lemire < br >
* Fast random shuffling < br >
* http : / / lemire . me / blog / 2016/06/30 / fast - random - shuffling /
* In our experiments , the difference was negligible , as the rejections are
* quite rare events at least for our use case . */
@ Reference ( authors = "D. Lemire" , title = "Fast random shuffling" , booktitle = "Daniel Lemire's blog" , url = "http://lemire.me/blog/2016/06/30/fast-random-shuffling/" , bibkey = "blog/Lemire16" ) public int nextIntRefined ( int n ) { } } | if ( n <= 0 ) { throw new IllegalArgumentException ( BADBOUND ) ; } seed = ( seed * multiplier + addend ) & mask ; long ret = ( seed >>> 16 ) * n ; // Rejection sampling
int leftover = ( int ) ( ret & 0x7FFFFFFFL ) ; if ( leftover < n ) { // With Java 8 , we could use Integer . remainderUnsigned
final long threshold = ( - n & 0xFFFFFFFFL ) % n ; while ( leftover < threshold ) { seed = ( seed * multiplier + addend ) & mask ; leftover = ( int ) ( ( ret = ( seed >>> 16 ) * n ) & 0x7FFFFFFFL ) ; } } return ( int ) ( ret >>> 32 ) ; |
public class SoyProtoValue { /** * Gets a value for the field for the underlying proto object . Not intended for general use .
* @ param name The proto field name .
* @ return The value of the given field for the underlying proto object , or NullData if either the
* field does not exist or the value is not set in the underlying proto ( according to the jspb
* semantics ) */
public SoyValue getProtoField ( String name ) { } } | FieldWithInterpreter field = clazz ( ) . fields . get ( name ) ; if ( field == null ) { throw new IllegalArgumentException ( "Proto " + proto . getClass ( ) . getName ( ) + " does not have a field of name " + name ) ; } if ( field . shouldCheckFieldPresenceToEmulateJspbNullability ( ) && ! proto . hasField ( field . getDescriptor ( ) ) ) { return NullData . INSTANCE ; } return field . interpretField ( proto ) ; |
public class ProducerSequenceFactory { /** * Returns a sequence that can be used for a request for a decoded image .
* @ param imageRequest the request that will be submitted
* @ return the sequence that should be used to process the request */
public Producer < CloseableReference < CloseableImage > > getDecodedImageProducerSequence ( ImageRequest imageRequest ) { } } | if ( FrescoSystrace . isTracing ( ) ) { FrescoSystrace . beginSection ( "ProducerSequenceFactory#getDecodedImageProducerSequence" ) ; } Producer < CloseableReference < CloseableImage > > pipelineSequence = getBasicDecodedImageSequence ( imageRequest ) ; if ( imageRequest . getPostprocessor ( ) != null ) { pipelineSequence = getPostprocessorSequence ( pipelineSequence ) ; } if ( mUseBitmapPrepareToDraw ) { pipelineSequence = getBitmapPrepareSequence ( pipelineSequence ) ; } if ( FrescoSystrace . isTracing ( ) ) { FrescoSystrace . endSection ( ) ; } return pipelineSequence ; |
public class EntropyInjector { @ Nullable private static EntropyInjectingFileSystem getEntropyFs ( FileSystem fs ) { } } | if ( fs instanceof EntropyInjectingFileSystem ) { return ( EntropyInjectingFileSystem ) fs ; } else if ( fs instanceof SafetyNetWrapperFileSystem ) { FileSystem delegate = ( ( SafetyNetWrapperFileSystem ) fs ) . getWrappedDelegate ( ) ; if ( delegate instanceof EntropyInjectingFileSystem ) { return ( EntropyInjectingFileSystem ) delegate ; } else { return null ; } } else { return null ; } |
public class QrCodeEncoder { /** * Returns the length of the message length variable in bits . Dependent on version */
private static int getLengthBits ( int version , int bitsA , int bitsB , int bitsC ) { } } | int lengthBits ; if ( version < 10 ) lengthBits = bitsA ; else if ( version < 27 ) lengthBits = bitsB ; else lengthBits = bitsC ; return lengthBits ; |
public class JodaBeanJsonWriter { /** * write map */
private void writeMap ( SerIterator itemIterator ) throws IOException { } } | // if key type is known and convertible use short key format , else use full bean format
if ( settings . getConverter ( ) . isConvertible ( itemIterator . keyType ( ) ) ) { writeMapSimple ( itemIterator ) ; } else { writeMapComplex ( itemIterator ) ; } |
public class ChunkCollision { /** * Gets the ray trace result . < br >
* Called via ASM from { @ link World # rayTraceBlocks ( Vec3d , Vec3d , boolean , boolean , boolean ) } before each return .
* @ param world the world
* @ param result the mop
* @ return the ray trace result */
public RayTraceResult getRayTraceResult ( World world , Pair < Point , Point > infos , RayTraceResult result , boolean stopOnLiquid , boolean ignoreBlockWithoutBoundingBox , boolean returnLastUncollidableBlock ) { } } | if ( infos == null ) return result ; RayTraceResult tmp = new RaytraceChunk ( world , infos . getLeft ( ) , infos . getRight ( ) ) . trace ( ) ; result = Raytrace . getClosestHit ( Type . BLOCK , infos . getLeft ( ) , result , tmp ) ; return returnLastUncollidableBlock || result == null || result . typeOfHit == Type . BLOCK ? result : null ; |
public class ViewHolder { /** * I added a generic return type to reduce the casting noise in client code */
@ SuppressWarnings ( "unchecked" ) public static < T extends View > T get ( View view , int id ) { } } | SparseArray < View > viewHolder = ( SparseArray < View > ) view . getTag ( ) ; if ( viewHolder == null ) { viewHolder = new SparseArray < View > ( ) ; view . setTag ( viewHolder ) ; } View childView = viewHolder . get ( id ) ; if ( childView == null ) { childView = view . findViewById ( id ) ; viewHolder . put ( id , childView ) ; } return ( T ) childView ; |
public class MailSenderInfo { /** * 获得邮件会话属性 */
public Properties getProperties ( ) { } } | Properties p = new Properties ( ) ; p . put ( "mail.smtp.host" , this . mailServerHost ) ; p . put ( "mail.smtp.port" , this . mailServerPort ) ; p . put ( "mail.smtp.auth" , validate ? "true" : "false" ) ; return p ; |
public class JpaClusterLockDao { /** * Retrieves a ClusterMutex in a new TX */
protected ClusterMutex getClusterMutexInternal ( final String mutexName ) { } } | final TransactionOperations transactionOperations = this . getTransactionOperations ( ) ; return transactionOperations . execute ( new TransactionCallback < ClusterMutex > ( ) { @ Override public ClusterMutex doInTransaction ( TransactionStatus status ) { final CacheKey key = CacheKey . build ( CLUSTER_MUTEX_SOURCE , mutexName ) ; ClusterMutex clusterMutex = entityManagerCache . get ( BasePortalJpaDao . PERSISTENCE_UNIT_NAME , key ) ; if ( clusterMutex != null ) { return clusterMutex ; } final NaturalIdQuery < ClusterMutex > query = createNaturalIdQuery ( ClusterMutex . class ) ; query . using ( ClusterMutex_ . name , mutexName ) ; clusterMutex = query . load ( ) ; entityManagerCache . put ( BasePortalJpaDao . PERSISTENCE_UNIT_NAME , key , clusterMutex ) ; return clusterMutex ; } } ) ; |
public class AmazonDynamoDBAsyncClient { /** * Retrieves a set of Attributes for an item that matches the primary
* key .
* The < code > GetItem < / code > operation provides an eventually - consistent
* read by default . If eventually - consistent reads are not acceptable for
* your application , use < code > ConsistentRead < / code > . Although this
* operation might take longer than a standard read , it always returns
* the last updated value .
* @ param getItemRequest Container for the necessary parameters to
* execute the GetItem operation on AmazonDynamoDB .
* @ return A Java Future object containing the response from the GetItem
* service method , as returned by AmazonDynamoDB .
* @ throws AmazonClientException
* If any internal errors are encountered inside the client while
* attempting to make the request or handle the response . For example
* if a network connection is not available .
* @ throws AmazonServiceException
* If an error response is returned by AmazonDynamoDB indicating
* either a problem with the data in the request , or a server side issue . */
public Future < GetItemResult > getItemAsync ( final GetItemRequest getItemRequest ) throws AmazonServiceException , AmazonClientException { } } | return executorService . submit ( new Callable < GetItemResult > ( ) { public GetItemResult call ( ) throws Exception { return getItem ( getItemRequest ) ; } } ) ; |
public class StatementDML { /** * Highest level multiple row delete method . Corresponds to an SQL
* DELETE . */
int delete ( Session session , Table table , RowSetNavigator oldRows ) { } } | if ( table . fkMainConstraints . length == 0 ) { deleteRows ( session , table , oldRows ) ; oldRows . beforeFirst ( ) ; if ( table . hasTrigger ( Trigger . DELETE_AFTER ) ) { table . fireAfterTriggers ( session , Trigger . DELETE_AFTER , oldRows ) ; } return oldRows . getSize ( ) ; } HashSet path = session . sessionContext . getConstraintPath ( ) ; HashMappedList tableUpdateList = session . sessionContext . getTableUpdateList ( ) ; if ( session . database . isReferentialIntegrity ( ) ) { oldRows . beforeFirst ( ) ; while ( oldRows . hasNext ( ) ) { oldRows . next ( ) ; Row row = oldRows . getCurrentRow ( ) ; path . clear ( ) ; checkCascadeDelete ( session , table , tableUpdateList , row , false , path ) ; } } if ( session . database . isReferentialIntegrity ( ) ) { oldRows . beforeFirst ( ) ; while ( oldRows . hasNext ( ) ) { oldRows . next ( ) ; Row row = oldRows . getCurrentRow ( ) ; path . clear ( ) ; checkCascadeDelete ( session , table , tableUpdateList , row , true , path ) ; } } oldRows . beforeFirst ( ) ; while ( oldRows . hasNext ( ) ) { oldRows . next ( ) ; Row row = oldRows . getCurrentRow ( ) ; if ( ! row . isDeleted ( session ) ) { table . deleteNoRefCheck ( session , row ) ; } } for ( int i = 0 ; i < tableUpdateList . size ( ) ; i ++ ) { Table targetTable = ( Table ) tableUpdateList . getKey ( i ) ; HashMappedList updateList = ( HashMappedList ) tableUpdateList . get ( i ) ; if ( updateList . size ( ) > 0 ) { targetTable . updateRowSet ( session , updateList , null , true ) ; updateList . clear ( ) ; } } oldRows . beforeFirst ( ) ; if ( table . hasTrigger ( Trigger . DELETE_AFTER ) ) { table . fireAfterTriggers ( session , Trigger . DELETE_AFTER , oldRows ) ; } path . clear ( ) ; return oldRows . getSize ( ) ; |
public class AbstractPendingLinkingCandidate { /** * Returns false if the argument expression is a lambda and the expected type
* of the argument is not a function type or { @ link Object } .
* Returns true in all other cases .
* This serves as a shortcut to rule out decision path ' s where a method is overloaded
* and one of the overloads accepts a function type but the other doesn ' t . In those cases
* it is not necessary to compute the type of the lamdba expression twice .
* An example for this pattern is { @ link IterableExtensions # filter ( Iterable , Class ) } vs
* { @ link IterableExtensions # filter ( Iterable , org . eclipse . xtext . xbase . lib . Functions . Function1 ) } . */
protected boolean isPossibleFunctionType ( int idx ) { } } | if ( idx < arguments . getArgumentCount ( ) ) { XExpression argument = arguments . getArgument ( idx ) ; if ( argument instanceof XClosure ) { XClosure closure = ( XClosure ) argument ; LightweightTypeReference declaredType = arguments . getDeclaredTypeForLambda ( idx ) ; if ( declaredType != null && ! declaredType . isType ( Object . class ) ) { CommonTypeComputationServices services = getState ( ) . getReferenceOwner ( ) . getServices ( ) ; JvmOperation operation = services . getFunctionTypes ( ) . findImplementingOperation ( declaredType ) ; if ( operation == null ) { return false ; } if ( closure . isExplicitSyntax ( ) && closure . getDeclaredFormalParameters ( ) . size ( ) != operation . getParameters ( ) . size ( ) ) { return false ; } } } } return true ; |
public class DirectConnectGatewayAssociation { /** * The Amazon VPC prefixes to advertise to the Direct Connect gateway .
* @ param allowedPrefixesToDirectConnectGateway
* The Amazon VPC prefixes to advertise to the Direct Connect gateway . */
public void setAllowedPrefixesToDirectConnectGateway ( java . util . Collection < RouteFilterPrefix > allowedPrefixesToDirectConnectGateway ) { } } | if ( allowedPrefixesToDirectConnectGateway == null ) { this . allowedPrefixesToDirectConnectGateway = null ; return ; } this . allowedPrefixesToDirectConnectGateway = new com . amazonaws . internal . SdkInternalList < RouteFilterPrefix > ( allowedPrefixesToDirectConnectGateway ) ; |
public class WorkflowClient { /** * Pause a workflow by workflow id
* @ param workflowId the workflow id of the workflow to be paused */
public void pauseWorkflow ( String workflowId ) { } } | Preconditions . checkArgument ( StringUtils . isNotBlank ( workflowId ) , "workflow id cannot be blank" ) ; stub . pauseWorkflow ( WorkflowServicePb . PauseWorkflowRequest . newBuilder ( ) . setWorkflowId ( workflowId ) . build ( ) ) ; |
public class CRC13239 { /** * < p > Method for calculating a CRC13239 checksum over a byte buffer . < / p >
* @ param buf byte buffer to be checksummed .
* @ param len how much of the buffer should be checksummed
* @ return CRC13239 checksum */
static public short getCRC ( byte [ ] buf , int len ) { } } | short i ; short crc = 0x7fff ; boolean isNeg = true ; for ( int j = 0 ; j < len ; j ++ ) { crc ^= buf [ j ] & 0xff ; for ( i = 0 ; i < 8 ; i ++ ) { if ( ( crc & 1 ) == 0 ) { crc >>= 1 ; if ( isNeg ) { isNeg = false ; crc |= 0x4000 ; } } else { crc >>= 1 ; if ( isNeg ) { crc ^= 0x4408 ; } else { crc ^= 0x0408 ; isNeg = true ; } } } } return isNeg ? ( short ) ( crc | ( short ) 0x8000 ) : crc ; |
public class CaseServicesClientImpl { /** * internal methods */
protected List < CaseFileDataItem > internalGetCaseInstanceDataItems ( String caseId , List < String > names , List < String > types , Integer page , Integer pageSize ) { } } | CaseFileDataItemList list = null ; if ( config . isRest ( ) ) { Map < String , Object > valuesMap = new HashMap < String , Object > ( ) ; valuesMap . put ( CASE_ID , caseId ) ; String queryString = getPagingQueryString ( "" , page , pageSize ) ; if ( names != null && ! names . isEmpty ( ) ) { queryString = getAdditionalParams ( queryString , "name" , names ) ; } else if ( types != null && ! types . isEmpty ( ) ) { queryString = getAdditionalParams ( queryString , "type" , types ) ; } list = makeHttpGetRequestAndCreateCustomResponse ( build ( loadBalancer . getUrl ( ) , CASE_QUERY_URI + "/" + CASE_FILE_GET_URI , valuesMap ) + queryString , CaseFileDataItemList . class ) ; } else { CommandScript script = new CommandScript ( Collections . singletonList ( ( KieServerCommand ) new DescriptorCommand ( "CaseQueryService" , "getCaseInstanceDataItems" , new Object [ ] { caseId , safeList ( names ) , safeList ( types ) , page , pageSize } ) ) ) ; ServiceResponse < CaseFileDataItemList > response = ( ServiceResponse < CaseFileDataItemList > ) executeJmsCommand ( script , DescriptorCommand . class . getName ( ) , KieServerConstants . CAPABILITY_CASE ) . getResponses ( ) . get ( 0 ) ; throwExceptionOnFailure ( response ) ; if ( shouldReturnWithNullResponse ( response ) ) { return null ; } list = response . getResult ( ) ; } if ( list != null ) { return list . getItems ( ) ; } return Collections . emptyList ( ) ; |
public class HdfsStatsService { /** * Scans the hbase table and populates the hdfs stats
* @ param cluster
* @ param scan
* @ param maxCount
* @ return
* @ throws IOException */
private List < HdfsStats > createFromScanResults ( String cluster , String path , Scan scan , int maxCount , boolean checkPath , long starttime , long endtime ) throws IOException { } } | Map < HdfsStatsKey , HdfsStats > hdfsStats = new HashMap < HdfsStatsKey , HdfsStats > ( ) ; ResultScanner scanner = null ; Stopwatch timer = new Stopwatch ( ) . start ( ) ; int rowCount = 0 ; long colCount = 0 ; long resultSize = 0 ; Table hdfsUsageTable = null ; try { hdfsUsageTable = hbaseConnection . getTable ( TableName . valueOf ( HdfsConstants . HDFS_USAGE_TABLE ) ) ; scanner = hdfsUsageTable . getScanner ( scan ) ; for ( Result result : scanner ) { if ( result != null && ! result . isEmpty ( ) ) { colCount += result . size ( ) ; // TODO dogpiledays resultSize + = result . getWritableSize ( ) ;
rowCount = populateHdfsStats ( result , hdfsStats , checkPath , path , starttime , endtime , rowCount ) ; // return if we ' ve already hit the limit
if ( rowCount >= maxCount ) { break ; } } } timer . stop ( ) ; LOG . info ( "In createFromScanResults For cluster " + cluster + " Fetched from hbase " + rowCount + " rows, " + colCount + " columns, " + resultSize + " bytes ( " + resultSize / ( 1024 * 1024 ) + ") MB, in total time of " + timer ) ; } finally { try { if ( scanner != null ) { scanner . close ( ) ; } } finally { if ( hdfsUsageTable != null ) { hdfsUsageTable . close ( ) ; } } } List < HdfsStats > values = new ArrayList < HdfsStats > ( hdfsStats . values ( ) ) ; // sort so that timestamps are arranged in descending order
Collections . sort ( values ) ; return values ; |
public class BiconjugateGradient { /** * Solves A * x = b by iterative biconjugate gradient method .
* @ param b the right hand side of linear equations .
* @ param x on input , x should be set to an initial guess of the solution
* ( or all zeros ) . On output , x is reset to the improved solution .
* @ return the estimated error . */
public static double solve ( Matrix A , double [ ] b , double [ ] x ) { } } | return solve ( A , diagonalPreconditioner ( A ) , b , x ) ; |
public class ParametricStatement { /** * Executes an UPDATE or DELETE statement .
* @ return the number of rows affected */
public int executeUpdate ( Connection conn , DataObject object ) throws SQLException { } } | PreparedStatement statement = conn . prepareStatement ( _sql ) ; try { load ( statement , object ) ; return statement . executeUpdate ( ) ; } finally { statement . close ( ) ; } |
public class OperationImpl { /** * Add the extra headers to the write { @ link ByteBuffer } .
* @ param bb the buffer where to append .
* @ param extraHeaders the headers to append . */
private void addExtraHeaders ( final ByteBuffer bb , final Object ... extraHeaders ) { } } | for ( Object o : extraHeaders ) { if ( o instanceof Integer ) { bb . putInt ( ( Integer ) o ) ; } else if ( o instanceof byte [ ] ) { bb . put ( ( byte [ ] ) o ) ; } else if ( o instanceof Long ) { bb . putLong ( ( Long ) o ) ; } else if ( o instanceof Short ) { bb . putShort ( ( Short ) o ) ; } else { assert false : "Unhandled extra header type: " + o . getClass ( ) ; } |
public class Utils { /** * Gets a keystore manager for a given hostname
* Creates one / key if it does not already exist
* @ param hostname
* @ return
* @ throws Exception */
public static KeyStoreManager getKeyStoreManager ( String hostname ) throws Exception { } } | File root = getKeyStoreRoot ( hostname ) ; // create entry
KeyStoreManager keyStoreManager = new KeyStoreManager ( root ) ; // under the hood this will generate the cert if it doesn ' t exist
keyStoreManager . getCertificateByHostname ( hostname ) ; // use this since getCertificateByHostname always returns null , but hostname = = alias for our purpose
X509Certificate cert = keyStoreManager . getCertificateByAlias ( hostname ) ; try { cert . checkValidity ( ) ; } catch ( CertificateExpiredException cee ) { // if the cert is expired we should destroy it and recursively call this function
keyStoreManager = null ; FileUtils . deleteDirectory ( root ) ; return getKeyStoreManager ( hostname ) ; } return keyStoreManager ; |
public class IndexForHeaderIndexFile { /** * Checks , if this index is consistent . All inserted keys have to be inserted incrementally .
* @ return boolean */
public boolean isConsistent ( ) { } } | for ( int i = 1 ; i < filledUpTo ; i ++ ) { int comp1 = KeyUtils . compareKey ( maxKeyPerChunk [ i ] , maxKeyPerChunk [ i - 1 ] ) ; if ( comp1 <= 0 ) { return false ; } } return true ; |
public class WebSocketDecoder { /** * Try parsing the message as a websocket handshake request . If it is such a request , then send the corresponding handshake response ( as in Section 4.2.2 RFC 6455 ) . */
@ SuppressWarnings ( "unchecked" ) private boolean doHandShake ( IoSession session , IoBuffer in ) { } } | if ( log . isDebugEnabled ( ) ) { log . debug ( "Handshake: {}" , in ) ; } // incoming data
byte [ ] data = null ; // check for existing HS data
if ( session . containsAttribute ( Constants . WS_HANDSHAKE ) ) { byte [ ] tmp = ( byte [ ] ) session . getAttribute ( Constants . WS_HANDSHAKE ) ; // size to hold existing and incoming
data = new byte [ tmp . length + in . remaining ( ) ] ; System . arraycopy ( tmp , 0 , data , 0 , tmp . length ) ; // get incoming bytes
in . get ( data , tmp . length , in . remaining ( ) ) ; } else { // size for incoming bytes
data = new byte [ in . remaining ( ) ] ; // get incoming bytes
in . get ( data ) ; } // ensure the incoming data is complete ( ends with crlfcrlf )
byte [ ] tail = Arrays . copyOfRange ( data , data . length - 4 , data . length ) ; if ( ! Arrays . equals ( tail , Constants . END_OF_REQ ) ) { // accumulate the HS data
session . setAttribute ( Constants . WS_HANDSHAKE , data ) ; return false ; } // create the connection obj
WebSocketConnection conn = new WebSocketConnection ( session ) ; // mark as secure if using ssl
if ( session . getFilterChain ( ) . contains ( "sslFilter" ) ) { conn . setSecure ( true ) ; } try { Map < String , Object > headers = parseClientRequest ( conn , new String ( data ) ) ; if ( log . isTraceEnabled ( ) ) { log . trace ( "Header map: {}" , headers ) ; } if ( ! headers . isEmpty ( ) && headers . containsKey ( Constants . WS_HEADER_KEY ) ) { // add the headers to the connection , they may be of use to implementers
conn . setHeaders ( headers ) ; // add query string parameters
if ( headers . containsKey ( Constants . URI_QS_PARAMETERS ) ) { conn . setQuerystringParameters ( ( Map < String , Object > ) headers . remove ( Constants . URI_QS_PARAMETERS ) ) ; } // check the version
if ( ! "13" . equals ( headers . get ( Constants . WS_HEADER_VERSION ) ) ) { log . info ( "Version 13 was not found in the request, communications may fail" ) ; } // get the path
String path = conn . getPath ( ) ; // get the scope manager
WebSocketScopeManager manager = ( WebSocketScopeManager ) session . getAttribute ( Constants . MANAGER ) ; if ( manager == null ) { WebSocketPlugin plugin = ( WebSocketPlugin ) PluginRegistry . getPlugin ( "WebSocketPlugin" ) ; manager = plugin . getManager ( path ) ; } // store manager in the current session
session . setAttribute ( Constants . MANAGER , manager ) ; // TODO add handling for extensions
// TODO expand handling for protocols requested by the client , instead of just echoing back
if ( headers . containsKey ( Constants . WS_HEADER_PROTOCOL ) ) { boolean protocolSupported = false ; String protocol = ( String ) headers . get ( Constants . WS_HEADER_PROTOCOL ) ; log . debug ( "Protocol '{}' found in the request" , protocol ) ; // add protocol to the connection
conn . setProtocol ( protocol ) ; // TODO check listeners for " protocol " support
Set < IWebSocketDataListener > listeners = manager . getScope ( path ) . getListeners ( ) ; for ( IWebSocketDataListener listener : listeners ) { if ( listener . getProtocol ( ) . equals ( protocol ) ) { // log . debug ( " Scope has listener support for the { } protocol " , protocol ) ;
protocolSupported = true ; break ; } } log . debug ( "Scope listener does{} support the '{}' protocol" , ( protocolSupported ? "" : "n't" ) , protocol ) ; } // add connection to the manager
manager . addConnection ( conn ) ; // prepare response and write it to the directly to the session
HandshakeResponse wsResponse = buildHandshakeResponse ( conn , ( String ) headers . get ( Constants . WS_HEADER_KEY ) ) ; // pass the handshake response to the ws connection so it can be sent outside the io thread and allow the decode to complete
conn . sendHandshakeResponse ( wsResponse ) ; // remove the chunk attr
session . removeAttribute ( Constants . WS_HANDSHAKE ) ; return true ; } // set connection as native / direct
conn . setType ( ConnectionType . DIRECT ) ; } catch ( Exception e ) { // input is not a websocket handshake request
log . warn ( "Handshake failed" , e ) ; } return false ; |
public class MessageDrivenBeanTypeImpl { /** * If not already created , a new < code > around - invoke < / code > element will be created and returned .
* Otherwise , the first existing < code > around - invoke < / code > element will be returned .
* @ return the instance defined for the element < code > around - invoke < / code > */
public AroundInvokeType < MessageDrivenBeanType < T > > getOrCreateAroundInvoke ( ) { } } | List < Node > nodeList = childNode . get ( "around-invoke" ) ; if ( nodeList != null && nodeList . size ( ) > 0 ) { return new AroundInvokeTypeImpl < MessageDrivenBeanType < T > > ( this , "around-invoke" , childNode , nodeList . get ( 0 ) ) ; } return createAroundInvoke ( ) ; |
public class DynamoDBMapperFieldModel { /** * Creates a condition which filters on the specified value .
* @ param value The value .
* @ return The condition .
* @ see com . amazonaws . services . dynamodbv2 . model . ComparisonOperator # NE
* @ see com . amazonaws . services . dynamodbv2 . model . Condition */
public final Condition ne ( final V value ) { } } | return new Condition ( ) . withComparisonOperator ( NE ) . withAttributeValueList ( convert ( value ) ) ; |
public class autoscaleaction { /** * Use this API to fetch all the autoscaleaction resources that are configured on netscaler . */
public static autoscaleaction [ ] get ( nitro_service service ) throws Exception { } } | autoscaleaction obj = new autoscaleaction ( ) ; autoscaleaction [ ] response = ( autoscaleaction [ ] ) obj . get_resources ( service ) ; return response ; |
public class PropertiesConfigHelper { /** * Returns as a set , the comma separated values of a property
* @ param bundleName
* the bundle name
* @ param key
* the key of the property
* @ return a set of the comma separated values of a property */
public Map < String , List < String > > getCustomBundlePropertyAsMap ( String bundleName , String key ) { } } | Map < String , List < String > > propertiesMap = new HashMap < > ( ) ; StringTokenizer tk = new StringTokenizer ( getCustomBundleProperty ( bundleName , key , "" ) , ";" ) ; while ( tk . hasMoreTokens ( ) ) { String [ ] mapEntry = tk . nextToken ( ) . trim ( ) . split ( ":" ) ; String mapKey = mapEntry [ 0 ] ; String values = mapEntry [ 1 ] ; StringTokenizer valueTk = new StringTokenizer ( values , "," ) ; List < String > valueList = new ArrayList < > ( ) ; while ( valueTk . hasMoreTokens ( ) ) { valueList . add ( valueTk . nextToken ( ) . trim ( ) ) ; } propertiesMap . put ( mapKey , valueList ) ; } return propertiesMap ; |
public class AmazonQuickSightClient { /** * Returns an Amazon QuickSight group ' s description and Amazon Resource Name ( ARN ) .
* The permissions resource is
* < code > arn : aws : quicksight : us - east - 1 : < i > & lt ; relevant - aws - account - id & gt ; < / i > : group / default / < i > & lt ; group - name & gt ; < / i > < / code >
* The response is the group object .
* < b > CLI Sample : < / b >
* < code > aws quicksight describe - group - \ - aws - account - id = 11112222333 - \ - namespace = default - \ - group - name = Sales < / code >
* @ param describeGroupRequest
* @ return Result of the DescribeGroup operation returned by the service .
* @ throws AccessDeniedException
* You don ' t have access to this . The provided credentials couldn ' t be validated . You might not be
* authorized to carry out the request . Ensure that your account is authorized to use the Amazon QuickSight
* service , that your policies have the correct permissions , and that you are using the correct access keys .
* @ throws InvalidParameterValueException
* One or more parameters don ' t have a valid value .
* @ throws ResourceNotFoundException
* One or more resources can ' t be found .
* @ throws ThrottlingException
* Access is throttled .
* @ throws PreconditionNotMetException
* One or more preconditions aren ' t met .
* @ throws InternalFailureException
* An internal failure occurred .
* @ throws ResourceUnavailableException
* This resource is currently unavailable .
* @ sample AmazonQuickSight . DescribeGroup
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / quicksight - 2018-04-01 / DescribeGroup " target = " _ top " > AWS API
* Documentation < / a > */
@ Override public DescribeGroupResult describeGroup ( DescribeGroupRequest request ) { } } | request = beforeClientExecution ( request ) ; return executeDescribeGroup ( request ) ; |
public class BasicChronology { /** * Get the milliseconds for the start of a month .
* @ param year The year to use .
* @ param month The month to use
* @ return millis from 1970-01-01T00:00:00Z */
long getYearMonthMillis ( int year , int month ) { } } | long millis = getYearMillis ( year ) ; millis += getTotalMillisByYearMonth ( year , month ) ; return millis ; |
public class GridBagLayoutFormBuilder { /** * Appends a label and field to the end of the current line .
* The label will be to the left of the field , and be right - justified .
* < br / >
* The field will " grow " horizontally as space allows .
* @ param propertyName the name of the property to create the controls for
* @ return " this " to make it easier to string together append calls */
public GridBagLayoutFormBuilder appendLabeledField ( String propertyName , final JComponent field , LabelOrientation labelOrientation ) { } } | return appendLabeledField ( propertyName , field , labelOrientation , 1 ) ; |
public class ServiceContainerHelper { /** * Generics friendly version of { @ link ServiceRegistry # getService ( ServiceName ) }
* @ param registry service registry
* @ param name service name
* @ return the service controller with the specified name , or null if the service does not exist */
public static < T > ServiceController < T > findService ( ServiceRegistry registry , ServiceName name ) { } } | return ( ServiceController < T > ) registry . getService ( name ) ; |
public class AfplibFactoryImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
public ResourceObjectIncludeObjType createResourceObjectIncludeObjTypeFromString ( EDataType eDataType , String initialValue ) { } } | ResourceObjectIncludeObjType result = ResourceObjectIncludeObjType . get ( initialValue ) ; if ( result == null ) throw new IllegalArgumentException ( "The value '" + initialValue + "' is not a valid enumerator of '" + eDataType . getName ( ) + "'" ) ; return result ; |
public class Kinds { /** * A KindName representing a given symbol kind */
public static KindName kindName ( int kind ) { } } | switch ( kind ) { case PCK : return KindName . PACKAGE ; case TYP : return KindName . CLASS ; case VAR : return KindName . VAR ; case VAL : return KindName . VAL ; case MTH : return KindName . METHOD ; default : throw new AssertionError ( "Unexpected kind: " + kind ) ; } |
public class ExampleUtils { /** * Returns a BigQuery client builder using the specified { @ link BigQueryOptions } . */
private static Bigquery . Builder newBigQueryClient ( BigQueryOptions options ) { } } | return new Bigquery . Builder ( Transport . getTransport ( ) , Transport . getJsonFactory ( ) , chainHttpRequestInitializer ( options . getGcpCredential ( ) , // Do not log 404 . It clutters the output and is possibly even required by the caller .
new RetryHttpRequestInitializer ( ImmutableList . of ( 404 ) ) ) ) . setApplicationName ( options . getAppName ( ) ) . setGoogleClientRequestInitializer ( options . getGoogleApiTrace ( ) ) ; |
public class MastersSlavesProtocol { /** * loop until found the failed connection .
* @ param listener current failover
* @ param globalInfo server global variables information
* @ param addresses list of HostAddress to loop
* @ param searchFilter search parameter
* @ throws SQLException if not found */
public static void loop ( MastersSlavesListener listener , final GlobalStateInfo globalInfo , final List < HostAddress > addresses , SearchFilter searchFilter ) throws SQLException { } } | MastersSlavesProtocol protocol ; ArrayDeque < HostAddress > loopAddresses = new ArrayDeque < > ( addresses ) ; if ( loopAddresses . isEmpty ( ) ) { resetHostList ( listener , loopAddresses ) ; } int maxConnectionTry = listener . getRetriesAllDown ( ) ; SQLException lastQueryException = null ; boolean firstLoop = true ; while ( ! loopAddresses . isEmpty ( ) || ( ! searchFilter . isFailoverLoop ( ) && maxConnectionTry > 0 ) ) { protocol = getNewProtocol ( listener . getProxy ( ) , globalInfo , listener . getUrlParser ( ) ) ; if ( listener . isExplicitClosed ( ) || ( ! listener . isSecondaryHostFailReconnect ( ) && ! listener . isMasterHostFailReconnect ( ) ) ) { return ; } maxConnectionTry -- ; try { HostAddress host = loopAddresses . pollFirst ( ) ; if ( host == null ) { loopAddresses . addAll ( listener . getUrlParser ( ) . getHostAddresses ( ) ) ; host = loopAddresses . pollFirst ( ) ; } protocol . setHostAddress ( host ) ; protocol . connect ( ) ; if ( listener . isExplicitClosed ( ) ) { protocol . close ( ) ; return ; } listener . removeFromBlacklist ( protocol . getHostAddress ( ) ) ; if ( listener . isMasterHostFailReconnect ( ) && protocol . isMasterConnection ( ) ) { if ( foundMaster ( listener , protocol , searchFilter ) ) { return ; } } else if ( listener . isSecondaryHostFailReconnect ( ) && ! protocol . isMasterConnection ( ) ) { if ( foundSecondary ( listener , protocol , searchFilter ) ) { return ; } } else { protocol . close ( ) ; } } catch ( SQLException e ) { lastQueryException = e ; listener . addToBlacklist ( protocol . getHostAddress ( ) ) ; } if ( ! listener . isMasterHostFailReconnect ( ) && ! listener . isSecondaryHostFailReconnect ( ) ) { return ; } // in case master not found but slave is , and allowing master down
if ( loopAddresses . isEmpty ( ) && ( listener . isMasterHostFailReconnect ( ) && listener . urlParser . getOptions ( ) . allowMasterDownConnection && ! listener . isSecondaryHostFailReconnect ( ) ) ) { return ; } // on connection and all slaves have been tested , use master if on
if ( loopAddresses . isEmpty ( ) && searchFilter . isInitialConnection ( ) && ! listener . isMasterHostFailReconnect ( ) ) { return ; } // if server has try to connect to all host , and there is remaining master or slave that fail
// add all servers back to continue looping until maxConnectionTry is reached
if ( loopAddresses . isEmpty ( ) && ! searchFilter . isFailoverLoop ( ) && maxConnectionTry > 0 ) { resetHostList ( listener , loopAddresses ) ; if ( firstLoop ) { firstLoop = false ; } else { try { // wait 250ms before looping through all connection another time
Thread . sleep ( 250 ) ; } catch ( InterruptedException interrupted ) { // interrupted , continue
} } } } if ( listener . isMasterHostFailReconnect ( ) || listener . isSecondaryHostFailReconnect ( ) ) { String error = "No active connection found for replica" ; if ( listener . isMasterHostFailReconnect ( ) ) { error = "No active connection found for master" ; } if ( lastQueryException != null ) { throw new SQLException ( error + " : " + lastQueryException . getMessage ( ) , lastQueryException . getSQLState ( ) , lastQueryException . getErrorCode ( ) , lastQueryException ) ; } throw new SQLException ( error ) ; } |
public class StructureDiagramGenerator { /** * Generate a new bracket across the provided bond .
* @ param bond bond
* @ param bonds bond map to Sgroups
* @ param counter count how many brackets this group has already
* @ param vert vertical align bonds
* @ return the new bracket */
private SgroupBracket newCrossingBracket ( IBond bond , Multimap < IBond , Sgroup > bonds , Map < IBond , Integer > counter , boolean vert ) { } } | final IAtom beg = bond . getBegin ( ) ; final IAtom end = bond . getEnd ( ) ; final Point2d begXy = beg . getPoint2d ( ) ; final Point2d endXy = end . getPoint2d ( ) ; final Vector2d lenOffset = new Vector2d ( endXy . x - begXy . x , endXy . y - begXy . y ) ; final Vector2d bndCrossVec = new Vector2d ( - lenOffset . y , lenOffset . x ) ; lenOffset . normalize ( ) ; bndCrossVec . normalize ( ) ; bndCrossVec . scale ( ( ( 0.9 * bondLength ) ) / 2 ) ; final List < Sgroup > sgroups = new ArrayList < > ( bonds . get ( bond ) ) ; // bond in sgroup , place it in the middle of the bond
if ( sgroups . size ( ) == 1 ) { lenOffset . scale ( 0.5 * bondLength ) ; } // two sgroups , place one near start and one near end
else if ( sgroups . size ( ) == 2 ) { boolean flip = ! sgroups . get ( counter . get ( bond ) ) . getAtoms ( ) . contains ( beg ) ; if ( counter . get ( bond ) == 0 ) { lenOffset . scale ( flip ? 0.75 : 0.25 * bondLength ) ; // 75 or 25 % along
counter . put ( bond , 1 ) ; } else { lenOffset . scale ( flip ? 0.25 : 0.75 * bondLength ) ; // 25 or 75 % along
} } else { double step = bondLength / ( 1 + sgroups . size ( ) ) ; int idx = counter . get ( bond ) + 1 ; counter . put ( bond , idx ) ; lenOffset . scale ( ( idx * step ) * bondLength ) ; } // vertical bracket
if ( vert ) { return new SgroupBracket ( begXy . x + lenOffset . x , begXy . y + lenOffset . y + bndCrossVec . length ( ) , begXy . x + lenOffset . x , begXy . y + lenOffset . y - bndCrossVec . length ( ) ) ; } else { return new SgroupBracket ( begXy . x + lenOffset . x + bndCrossVec . x , begXy . y + lenOffset . y + bndCrossVec . y , begXy . x + lenOffset . x - bndCrossVec . x , begXy . y + lenOffset . y - bndCrossVec . y ) ; } |
public class DBCluster { /** * Provides the list of instances that make up the DB cluster .
* @ return Provides the list of instances that make up the DB cluster . */
public java . util . List < DBClusterMember > getDBClusterMembers ( ) { } } | if ( dBClusterMembers == null ) { dBClusterMembers = new com . amazonaws . internal . SdkInternalList < DBClusterMember > ( ) ; } return dBClusterMembers ; |
public class Reflection { /** * Loads a class by name .
* < p > This methods tries first with the current thread ' s context class loader ( the intent is that
* if the driver is in a low - level loader of an application server - - e . g . bootstrap or system - -
* it can still find classes in the application ' s class loader ) . If it is null , it defaults to the
* class loader that loaded the class calling this method .
* @ return null if the class does not exist . */
public static Class < ? > loadClass ( ClassLoader classLoader , String className ) { } } | try { // If input classLoader is null , use current thread ' s ClassLoader , if that is null , use
// default ( calling class ' ) ClassLoader .
ClassLoader cl = classLoader != null ? classLoader : Thread . currentThread ( ) . getContextClassLoader ( ) ; if ( cl != null ) { return Class . forName ( className , true , cl ) ; } else { return Class . forName ( className ) ; } } catch ( ClassNotFoundException e ) { return null ; } |
public class SubscribeForm { /** * Gets the { @ link PresenceState } for which an entity wants to receive
* notifications .
* @ return the list of states */
public List < PresenceState > getShowValues ( ) { } } | ArrayList < PresenceState > result = new ArrayList < > ( 5 ) ; for ( String state : getFieldValues ( SubscribeOptionFields . show_values ) ) { result . add ( PresenceState . valueOf ( state ) ) ; } return result ; |
public class CallbackMethod { /** * メソッド名の昇順 */
@ Override public int compareTo ( final CallbackMethod o ) { } } | final String name1 = method . getDeclaringClass ( ) . getName ( ) + "#" + method . getName ( ) ; final String name2 = o . method . getDeclaringClass ( ) . getName ( ) + "#" + o . method . getName ( ) ; return name1 . compareTo ( name2 ) ; |
public class ByteArithmeticExtensions { /** * The unary { @ code minus } operator . This is the equivalent to
* the Java ' s { @ code - } function . This function is not null - safe .
* @ param number a number .
* @ return { @ code - number } */
@ Pure @ Inline ( value = "(-($1.byteValue()))" , constantExpression = true ) public static int operator_minus ( Byte number ) { } } | return - number . byteValue ( ) ; |
public class FLACEncoder { /** * Get number of samples that are available to encode . This includes samples
* which are in a partial block ( and so would only be written if " end " was
* set true in encodeSamples ( int count , boolean end ) ;
* @ return number of samples availble to encode . */
public int samplesAvailableToEncode ( ) { } } | int available = 0 ; // sum all in blockQueue
int channels = streamConfig . getChannelCount ( ) ; for ( BlockEncodeRequest ber : preparedRequests ) { int [ ] block = ber . samples ; available += block . length / channels ; } if ( unfilledRequest != null ) available += unfilledRequest . count ; return available ; |
public class Promise { /** * Converts an Object to a ( completed or uncompleted ) CompletableFuture .
* @ param object
* input Object
* @ return object converted to CompletableFuture */
protected static final CompletableFuture < Tree > toCompletableFuture ( Object object ) { } } | if ( object == null ) { return CompletableFuture . completedFuture ( null ) ; } if ( object instanceof CompletableFuture ) { return ( ( CompletableFuture < ? > ) object ) . thenCompose ( Promise :: toCompletableFuture ) ; } if ( object instanceof Promise ) { return ( ( Promise ) object ) . future ; } if ( object instanceof Throwable ) { CompletableFuture < Tree > future = new CompletableFuture < > ( ) ; future . completeExceptionally ( ( Throwable ) object ) ; return future ; } if ( object instanceof CompletionStage ) { return ( ( ( CompletionStage < ? > ) object ) . toCompletableFuture ( ) ) . thenCompose ( Promise :: toCompletableFuture ) ; } return CompletableFuture . completedFuture ( toTree ( object ) ) ; |
public class CalendarDays { /** * / * [ deutsch ]
* < p > Berechnet die Tagesdifferenz zwischen den angegebenen Datumsobjekten . < / p >
* @ param start first calendar date ( inclusive )
* @ param end second calendar date ( exclusive )
* @ return count of calendar days between start and end
* @ since 3.8/4.5 */
public static CalendarDays between ( CalendarDate start , CalendarDate end ) { } } | long t1 = start . getDaysSinceEpochUTC ( ) ; long t2 = end . getDaysSinceEpochUTC ( ) ; return CalendarDays . of ( Math . subtractExact ( t2 , t1 ) ) ; |
public class AtomContainer { /** * { @ inheritDoc } */
@ Override public Order getMaximumBondOrder ( IAtom atom ) { } } | IBond . Order max = null ; for ( IBond bond : bonds ( ) ) { if ( ! bond . contains ( atom ) ) continue ; if ( max == null || bond . getOrder ( ) . numeric ( ) > max . numeric ( ) ) { max = bond . getOrder ( ) ; } } if ( max == null ) { if ( ! contains ( atom ) ) throw new NoSuchAtomException ( "Atom does not belong to this container!" ) ; if ( atom . getImplicitHydrogenCount ( ) != null && atom . getImplicitHydrogenCount ( ) > 0 ) max = Order . SINGLE ; else max = Order . UNSET ; } return max ; |
public class TaskSession { /** * Create the remote receive queue .
* @ param strQueueName The queue name to service .
* @ param strQueueType The queue type .
* @ return The RemoteSendQueue . */
public RemoteReceiveQueue createRemoteReceiveQueue ( String strQueueName , String strQueueType ) throws RemoteException { } } | MessageManager messageManager = this . getEnvironment ( ) . getMessageManager ( this . getApplication ( ) , true ) ; BaseMessageReceiver receiver = ( BaseMessageReceiver ) messageManager . getMessageQueue ( strQueueName , strQueueType ) . getMessageReceiver ( ) ; ReceiveQueueSession remoteQueue = new ReceiveQueueSession ( this , receiver ) ; return remoteQueue ; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.