signature stringlengths 43 39.1k | implementation stringlengths 0 450k |
|---|---|
public class ServerTransform { /** * Merges the transform and its parameters with other parameters
* of the request .
* Ordinarily , and application does not need to call this method .
* @ param currentParamsthe other parameters
* @ returnthe union of the other parameters and the transform parameters */
public Map < String , List < String > > merge ( Map < String , List < String > > currentParams ) { } } | Map < String , List < String > > params = ( currentParams != null ) ? currentParams : new RequestParameters ( ) ; params . put ( "transform" , Arrays . asList ( getName ( ) ) ) ; for ( Map . Entry < String , List < String > > entry : entrySet ( ) ) { params . put ( "trans:" + entry . getKey ( ) , entry . getValue ( ) ) ; } return params ; |
public class Matrices { /** * Creates a sum matrix accumulator that calculates the sum of all elements in the matrix .
* @ param neutral the neutral value
* @ return a sum accumulator */
public static MatrixAccumulator asSumAccumulator ( final double neutral ) { } } | return new MatrixAccumulator ( ) { private BigDecimal result = BigDecimal . valueOf ( neutral ) ; @ Override public void update ( int i , int j , double value ) { result = result . add ( BigDecimal . valueOf ( value ) ) ; } @ Override public double accumulate ( ) { double value = result . setScale ( Matrices . ROUND_FACTOR , RoundingMode . CEILING ) . doubleValue ( ) ; result = BigDecimal . valueOf ( neutral ) ; return value ; } } ; |
public class Iterables { /** * Gets the element from an array with given index .
* @ param array the array
* @ param index the index
* @ return null if the array doesn ' t have the element at index , otherwise , return the element */
public static Object getElementFromArrary ( Object array , int index ) { } } | try { return Array . get ( array , index ) ; } catch ( ArrayIndexOutOfBoundsException e ) { return null ; } |
public class Logger { /** * Logs a formatted message and stack trace if DEBUG logging is enabled
* or a formatted message and exception description if WARN logging is enabled .
* @ param cause an exception to print stack trace of if DEBUG logging is enabled
* @ param message a < a href = " http : / / download . oracle . com / javase / 6 / docs / api / java / util / Formatter . html # syntax " > format string < / a >
* @ param args arguments referenced by the format specifiers in the format string . */
public final void warnDebugf ( final Throwable cause , final String message , final Object ... args ) { } } | logDebugf ( Level . WARN , cause , message , args ) ; |
public class AVObject { /** * changable operations . */
public void add ( String key , Object value ) { } } | validFieldName ( key ) ; ObjectFieldOperation op = OperationBuilder . gBuilder . create ( OperationBuilder . OperationType . Add , key , value ) ; addNewOperation ( op ) ; |
public class ClassWriterImpl { /** * Get links to the given classes .
* @ param context the id of the context where the link will be printed
* @ param list the list of classes
* @ return a content tree for the class list */
private Content getClassLinks ( LinkInfoImpl . Kind context , Collection < ? > list ) { } } | Content dd = new HtmlTree ( HtmlTag . DD ) ; boolean isFirst = true ; for ( Object type : list ) { if ( ! isFirst ) { Content separator = new StringContent ( ", " ) ; dd . addContent ( separator ) ; } else { isFirst = false ; } // TODO : should we simply split this method up to avoid instanceof ?
if ( type instanceof TypeElement ) { Content link = getLink ( new LinkInfoImpl ( configuration , context , ( TypeElement ) ( type ) ) ) ; dd . addContent ( HtmlTree . CODE ( link ) ) ; } else { Content link = getLink ( new LinkInfoImpl ( configuration , context , ( ( TypeMirror ) type ) ) ) ; dd . addContent ( HtmlTree . CODE ( link ) ) ; } } return dd ; |
public class GroovyRunnerRegistry { /** * Returns the number of registered runners .
* @ return number of registered runners */
@ Override public int size ( ) { } } | Map < String , GroovyRunner > map = getMap ( ) ; readLock . lock ( ) ; try { return map . size ( ) ; } finally { readLock . unlock ( ) ; } |
public class Stylesheet { /** * Set the location information for this element .
* @ param locator SourceLocator object with location information */
public void setLocaterInfo ( SourceLocator locator ) { } } | if ( null != locator ) { m_publicId = locator . getPublicId ( ) ; m_systemId = locator . getSystemId ( ) ; if ( null != m_systemId ) { try { m_href = SystemIDResolver . getAbsoluteURI ( m_systemId , null ) ; } catch ( TransformerException se ) { // Ignore this for right now
} } super . setLocaterInfo ( locator ) ; } |
public class FixedLengthInputFormat { /** * { @ inheritDoc }
* @ throws IOException */
@ Override public FileBaseStatistics getStatistics ( BaseStatistics cachedStats ) throws IOException { } } | final FileBaseStatistics stats = super . getStatistics ( cachedStats ) ; return stats == null ? null : new FileBaseStatistics ( stats . getLastModificationTime ( ) , stats . getTotalInputSize ( ) , this . recordLength ) ; |
public class PortalSessionScope { /** * / * ( non - Javadoc )
* @ see org . springframework . beans . factory . config . Scope # registerDestructionCallback ( java . lang . String , java . lang . Runnable ) */
@ Override public void registerDestructionCallback ( String name , Runnable callback ) { } } | final HttpSession session = this . getPortalSesion ( true ) ; final DestructionCallbackBindingListener callbackListener = new DestructionCallbackBindingListener ( callback ) ; session . setAttribute ( DESTRUCTION_CALLBACK_NAME_PREFIX + name , callbackListener ) ; |
public class FileSystemLayout { /** * Derived form ' user . dir '
* @ return a FileSystemLayout instance */
public static FileSystemLayout create ( ) { } } | String userDir = System . getProperty ( USER_DIR ) ; if ( null == userDir ) { throw SwarmMessages . MESSAGES . systemPropertyNotFound ( USER_DIR ) ; } return create ( userDir ) ; |
public class Hex { /** * Gets hex string corresponding to the given byte array from " < tt > from < / tt > " position to " < tt > to ' s < / tt > "
* @ param bytes bytes .
* @ param from from position .
* @ param to to position .
* @ return hex string . */
public static String get ( byte [ ] bytes , int from , int to ) { } } | final StringBuilder bld = new StringBuilder ( ) ; for ( int i = from ; i < to ; i ++ ) { bld . append ( Hex . get ( bytes [ i ] ) ) ; } return bld . toString ( ) ; |
public class StaticPageUtil { /** * Given the specified components , render them to a stand - alone HTML page ( which is returned as a String )
* @ param components Components to render
* @ return Stand - alone HTML page , as a String */
public static String renderHTML ( Component ... components ) { } } | try { return renderHTMLContent ( components ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } |
public class CmsWebdavServlet { /** * Process a DELETE WebDAV request for the specified resource . < p >
* @ param req the servlet request we are processing
* @ param resp the servlet response we are creating
* @ throws IOException if an input / output error occurs */
@ Override protected void doDelete ( HttpServletRequest req , HttpServletResponse resp ) throws IOException { } } | // Get the path to delete
String path = getRelativePath ( req ) ; // Check if webdav is set to read only
if ( m_readOnly ) { resp . setStatus ( CmsWebdavStatus . SC_FORBIDDEN ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( Messages . get ( ) . getBundle ( ) . key ( Messages . LOG_WEBDAV_READ_ONLY_0 ) ) ; } return ; } // Check if path exists
boolean exists = m_session . exists ( path ) ; if ( ! exists ) { resp . setStatus ( CmsWebdavStatus . SC_NOT_FOUND ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( Messages . get ( ) . getBundle ( ) . key ( Messages . LOG_ITEM_NOT_FOUND_1 , path ) ) ; } return ; } // Check if resource is locked
if ( isLocked ( req ) ) { resp . setStatus ( CmsWebdavStatus . SC_LOCKED ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( Messages . get ( ) . getBundle ( ) . key ( Messages . LOG_ITEM_LOCKED_1 , path ) ) ; } return ; } // Check if resources found in the tree of the path are locked
Hashtable < String , Integer > errorList = new Hashtable < String , Integer > ( ) ; checkChildLocks ( req , path , errorList ) ; if ( ! errorList . isEmpty ( ) ) { sendReport ( req , resp , errorList ) ; if ( LOG . isDebugEnabled ( ) ) { Iterator < String > iter = errorList . keySet ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { String errorPath = iter . next ( ) ; LOG . debug ( Messages . get ( ) . getBundle ( ) . key ( Messages . LOG_CHILD_LOCKED_1 , errorPath ) ) ; } } return ; } // Delete the resource
try { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( Messages . get ( ) . getBundle ( ) . key ( Messages . LOG_DELETE_ITEM_0 ) ) ; } m_session . delete ( path ) ; } catch ( CmsVfsResourceNotFoundException rnfex ) { // should never happen
resp . setStatus ( CmsWebdavStatus . SC_NOT_FOUND ) ; return ; } catch ( CmsSecurityException sex ) { resp . setStatus ( CmsWebdavStatus . SC_FORBIDDEN ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( Messages . get ( ) . getBundle ( ) . key ( Messages . LOG_NO_PERMISSION_0 ) ) ; } return ; } catch ( CmsException ex ) { resp . setStatus ( CmsWebdavStatus . SC_INTERNAL_SERVER_ERROR ) ; if ( LOG . isErrorEnabled ( ) ) { LOG . error ( Messages . get ( ) . getBundle ( ) . key ( Messages . LOG_REPOSITORY_ERROR_2 , "DELETE" , path ) , ex ) ; } return ; } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( Messages . get ( ) . getBundle ( ) . key ( Messages . LOG_DELETE_SUCCESS_0 ) ) ; } resp . setStatus ( CmsWebdavStatus . SC_NO_CONTENT ) ; |
public class WsByteBufferImpl { /** * @ see java . io . Externalizable # writeExternal ( java . io . ObjectOutput ) */
@ Override public void writeExternal ( ObjectOutput s ) throws IOException { } } | if ( ! removedFromLeakDetection ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) { Tr . event ( tc , "Buffer being serialized but removeFromLeakDetection has not been called: " + this ) ; } } if ( oByteBuffer . isDirect ( ) ) { // hard code as local strings to help performance .
// Strings are used here and and in the readObject routines
s . writeObject ( "D" ) ; } else { s . writeObject ( "ND" ) ; } if ( oByteBuffer . order ( ) == java . nio . ByteOrder . BIG_ENDIAN ) { s . writeObject ( "B" ) ; } else { s . writeObject ( "L" ) ; } int startPosition = oByteBuffer . position ( ) ; int startLimit = oByteBuffer . limit ( ) ; // set position to 0 and limit to capacity , so we can serialize
// the entire buffer
this . oByteBuffer . position ( 0 ) ; this . oByteBuffer . limit ( this . oByteBuffer . capacity ( ) ) ; if ( oByteBuffer . hasArray ( ) && oByteBuffer . arrayOffset ( ) == 0 ) { s . writeObject ( oByteBuffer . array ( ) ) ; } else { byte [ ] bytes = new byte [ oByteBuffer . limit ( ) ] ; this . oByteBuffer . get ( bytes ) ; s . writeObject ( bytes ) ; } this . oByteBuffer . position ( startPosition ) ; this . oByteBuffer . limit ( startLimit ) ; s . writeObject ( Integer . toString ( startPosition ) ) ; s . writeObject ( Integer . toString ( startLimit ) ) ; if ( this . readOnly ) { s . writeObject ( "R" ) ; } else { s . writeObject ( "RW" ) ; } |
public class RequestContextAssembly { /** * Enable { @ link RequestContext } during operators . */
@ SuppressWarnings ( { } } | "rawtypes" , "unchecked" } ) public static synchronized void enable ( ) { if ( enabled ) { return ; } oldOnObservableAssembly = RxJavaPlugins . getOnObservableAssembly ( ) ; RxJavaPlugins . setOnObservableAssembly ( compose ( oldOnObservableAssembly , new ConditionalOnCurrentRequestContextFunction < Observable > ( ) { @ Override Observable applyActual ( Observable o , RequestContext ctx ) { if ( ! ( o instanceof Callable ) ) { return new RequestContextObservable ( o , ctx ) ; } if ( o instanceof ScalarCallable ) { return new RequestContextScalarCallableObservable ( o , ctx ) ; } return new RequestContextCallableObservable ( o , ctx ) ; } } ) ) ; oldOnConnectableObservableAssembly = RxJavaPlugins . getOnConnectableObservableAssembly ( ) ; RxJavaPlugins . setOnConnectableObservableAssembly ( compose ( oldOnConnectableObservableAssembly , new ConditionalOnCurrentRequestContextFunction < ConnectableObservable > ( ) { @ Override ConnectableObservable applyActual ( ConnectableObservable co , RequestContext ctx ) { return new RequestContextConnectableObservable ( co , ctx ) ; } } ) ) ; oldOnCompletableAssembly = RxJavaPlugins . getOnCompletableAssembly ( ) ; RxJavaPlugins . setOnCompletableAssembly ( compose ( oldOnCompletableAssembly , new ConditionalOnCurrentRequestContextFunction < Completable > ( ) { @ Override Completable applyActual ( Completable c , RequestContext ctx ) { if ( ! ( c instanceof Callable ) ) { return new RequestContextCompletable ( c , ctx ) ; } if ( c instanceof ScalarCallable ) { return new RequestContextScalarCallableCompletable ( c , ctx ) ; } return new RequestContextCallableCompletable ( c , ctx ) ; } } ) ) ; oldOnSingleAssembly = RxJavaPlugins . getOnSingleAssembly ( ) ; RxJavaPlugins . setOnSingleAssembly ( compose ( oldOnSingleAssembly , new ConditionalOnCurrentRequestContextFunction < Single > ( ) { @ Override Single applyActual ( Single s , RequestContext ctx ) { if ( ! ( s instanceof Callable ) ) { return new RequestContextSingle ( s , ctx ) ; } if ( s instanceof ScalarCallable ) { return new RequestContextScalarCallableSingle ( s , ctx ) ; } return new RequestContextCallableSingle ( s , ctx ) ; } } ) ) ; oldOnMaybeAssembly = RxJavaPlugins . getOnMaybeAssembly ( ) ; RxJavaPlugins . setOnMaybeAssembly ( compose ( oldOnMaybeAssembly , new ConditionalOnCurrentRequestContextFunction < Maybe > ( ) { @ Override Maybe applyActual ( Maybe m , RequestContext ctx ) { if ( ! ( m instanceof Callable ) ) { return new RequestContextMaybe ( m , ctx ) ; } if ( m instanceof ScalarCallable ) { return new RequestContextScalarCallableMaybe ( m , ctx ) ; } return new RequestContextCallableMaybe ( m , ctx ) ; } } ) ) ; oldOnFlowableAssembly = RxJavaPlugins . getOnFlowableAssembly ( ) ; RxJavaPlugins . setOnFlowableAssembly ( compose ( oldOnFlowableAssembly , new ConditionalOnCurrentRequestContextFunction < Flowable > ( ) { @ Override Flowable applyActual ( Flowable f , RequestContext ctx ) { if ( ! ( f instanceof Callable ) ) { return new RequestContextFlowable ( f , ctx ) ; } if ( f instanceof ScalarCallable ) { return new RequestContextScalarCallableFlowable ( f , ctx ) ; } return new RequestContextCallableFlowable ( f , ctx ) ; } } ) ) ; oldOnConnectableFlowableAssembly = RxJavaPlugins . getOnConnectableFlowableAssembly ( ) ; RxJavaPlugins . setOnConnectableFlowableAssembly ( compose ( oldOnConnectableFlowableAssembly , new ConditionalOnCurrentRequestContextFunction < ConnectableFlowable > ( ) { @ Override ConnectableFlowable applyActual ( ConnectableFlowable cf , RequestContext ctx ) { return new RequestContextConnectableFlowable ( cf , ctx ) ; } } ) ) ; oldOnParallelAssembly = RxJavaPlugins . getOnParallelAssembly ( ) ; RxJavaPlugins . setOnParallelAssembly ( compose ( oldOnParallelAssembly , new ConditionalOnCurrentRequestContextFunction < ParallelFlowable > ( ) { @ Override ParallelFlowable applyActual ( ParallelFlowable pf , RequestContext ctx ) { return new RequestContextParallelFlowable ( pf , ctx ) ; } } ) ) ; enabled = true ; |
public class Compiler { /** * Marks the end of a pass . */
void endPass ( String passName ) { } } | checkState ( currentTracer != null , "Tracer should not be null at the end of a pass." ) ; stopTracer ( currentTracer , currentPassName ) ; afterPass ( passName ) ; currentPassName = null ; currentTracer = null ; maybeRunValidityCheck ( ) ; |
public class EnvironmentsInner { /** * Get environment .
* @ param resourceGroupName The name of the resource group .
* @ param labAccountName The name of the lab Account .
* @ param labName The name of the lab .
* @ param environmentSettingName The name of the environment Setting .
* @ param environmentName The name of the environment .
* @ param expand Specify the $ expand query . Example : ' properties ( $ expand = networkInterface ) '
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable to the EnvironmentInner object */
public Observable < EnvironmentInner > getAsync ( String resourceGroupName , String labAccountName , String labName , String environmentSettingName , String environmentName , String expand ) { } } | return getWithServiceResponseAsync ( resourceGroupName , labAccountName , labName , environmentSettingName , environmentName , expand ) . map ( new Func1 < ServiceResponse < EnvironmentInner > , EnvironmentInner > ( ) { @ Override public EnvironmentInner call ( ServiceResponse < EnvironmentInner > response ) { return response . body ( ) ; } } ) ; |
public class ImmutableLightMetaProperty { /** * Creates an instance from a derived { @ code Method } .
* @ param < P > the property type
* @ param metaBean the meta bean , not null
* @ param method the method , not null
* @ param constructorIndex the index of the property
* @ return the property , not null */
@ SuppressWarnings ( "unchecked" ) static < P > ImmutableLightMetaProperty < P > of ( MetaBean metaBean , final Method method , final String propertyName , int constructorIndex ) { } } | PropertyGetter getter = new PropertyGetter ( ) { @ Override public Object get ( Bean bean ) { try { return method . invoke ( bean ) ; } catch ( IllegalArgumentException | IllegalAccessException ex ) { throw new UnsupportedOperationException ( "Property cannot be read: " + propertyName , ex ) ; } catch ( InvocationTargetException ex ) { if ( ex . getCause ( ) instanceof RuntimeException ) { throw ( RuntimeException ) ex . getCause ( ) ; } throw new RuntimeException ( ex ) ; } } } ; return new ImmutableLightMetaProperty < P > ( metaBean , propertyName , ( Class < P > ) method . getReturnType ( ) , method . getGenericReturnType ( ) , Arrays . asList ( method . getAnnotations ( ) ) , getter , constructorIndex , PropertyStyle . DERIVED ) ; |
public class KeyVaultClientCustomImpl { /** * Retrieves a list of individual key versions with the same key name . The full
* key identifier , attributes , and tags are provided in the response .
* Authorization : Requires the keys / list permission .
* @ param vaultBaseUrl
* The vault name , e . g . https : / / myvault . vault . azure . net
* @ param keyName
* The name of the key
* @ param maxresults
* Maximum number of results to return in a page . If not specified
* the service will return up to 25 results .
* @ return the PagedList & lt ; KeyItem & gt ; if successful . */
public PagedList < KeyItem > listKeyVersions ( final String vaultBaseUrl , final String keyName , final Integer maxresults ) { } } | return getKeyVersions ( vaultBaseUrl , keyName , maxresults ) ; |
public class JDBC4ClientConnectionPool { /** * Releases a connection . This method ( or connection . close ( ) must be called by the user thread
* once the connection is no longer needed to release it back into the pool where other threads
* can pick it up . Failure to do so will cause a memory leak as more and more new connections
* will be created , never to be released and reused . The pool itself will run the logic to
* decide whether the actual underlying connection should be kept alive ( if other threads are
* using it ) , or closed for good ( if the calling thread was the last user of that connection ) .
* @ param connection
* the connection to release back into the pool . */
public static void dispose ( JDBC4ClientConnection connection ) { } } | synchronized ( ClientConnections ) { connection . dispose ( ) ; if ( connection . users == 0 ) ClientConnections . remove ( connection . key ) ; } |
public class JCECrypto { /** * Get an RSA public key utilizing the provided provider and PEM formatted string
* @ param provider Provider to generate the key
* @ param pem PEM formatted key string
* @ return RSA public key */
public static RSAPublicKey getRSAPublicKeyFromPEM ( Provider provider , String pem ) { } } | try { KeyFactory keyFactory = KeyFactory . getInstance ( "RSA" , provider ) ; return ( RSAPublicKey ) keyFactory . generatePublic ( new X509EncodedKeySpec ( getKeyBytesFromPEM ( pem ) ) ) ; } catch ( NoSuchAlgorithmException e ) { throw new IllegalArgumentException ( "Algorithm SHA256withRSA is not available" , e ) ; } catch ( InvalidKeySpecException e ) { throw new IllegalArgumentException ( "Invalid PEM provided" , e ) ; } |
public class JvmTypesBuilder { /** * Attaches the given compile strategy to the given { @ link JvmExecutable } such that the compiler knows how to
* implement the { @ link JvmExecutable } when it is translated to Java source code .
* @ param executable the operation or constructor to add the method body to . If < code > null < / code > this method does nothing .
* @ param strategy the compilation strategy . If < code > null < / code > this method does nothing . */
public void setBody ( /* @ Nullable */
JvmExecutable executable , /* @ Nullable */
Procedures . Procedure1 < ITreeAppendable > strategy ) { } } | removeExistingBody ( executable ) ; setCompilationStrategy ( executable , strategy ) ; |
public class ServerReadyEventHandler { /** * / * ( non - Javadoc )
* @ see com . tvd12 . ezyfox . sfs2x . serverhandler . ServerBaseEventHandler # notifyHandler ( com . tvd12 . ezyfox . core . structure . ServerHandlerClass ) */
@ Override protected void notifyHandler ( ServerHandlerClass handler ) { } } | Object instance = handler . newInstance ( ) ; assignDataToHandler ( handler , instance ) ; ReflectMethodUtil . invokeHandleMethod ( handler . getHandleMethod ( ) , instance , context ) ; |
public class WebFluxTags { /** * Creates an { @ code outcome } tag based on the response status of the given
* { @ code exchange } .
* @ param exchange the exchange
* @ return the outcome tag derived from the response status
* @ since 2.1.0 */
public static Tag outcome ( ServerWebExchange exchange ) { } } | HttpStatus status = exchange . getResponse ( ) . getStatusCode ( ) ; if ( status != null ) { if ( status . is1xxInformational ( ) ) { return OUTCOME_INFORMATIONAL ; } if ( status . is2xxSuccessful ( ) ) { return OUTCOME_SUCCESS ; } if ( status . is3xxRedirection ( ) ) { return OUTCOME_REDIRECTION ; } if ( status . is4xxClientError ( ) ) { return OUTCOME_CLIENT_ERROR ; } return OUTCOME_SERVER_ERROR ; } return OUTCOME_UNKNOWN ; |
public class MimeType { /** * Loads a file from a input stream containing all known mime types . The InputStream is a resource mapped from the
* project resource directory .
* @ param in InputStream */
private static void loadFile ( @ NotNull InputStream in ) { } } | try ( BufferedReader br = new BufferedReader ( new InputStreamReader ( in ) ) ) { String l ; while ( ( l = br . readLine ( ) ) != null ) { if ( l . length ( ) > 0 && l . charAt ( 0 ) != '#' ) { String [ ] tokens = l . split ( "\\s+" ) ; for ( int i = 1 ; i < tokens . length ; i ++ ) { mimes . put ( tokens [ i ] , tokens [ 0 ] ) ; } } } } catch ( IOException e ) { throw new RuntimeException ( e ) ; } |
public class AuditLogWritingProcessor { /** * / * ( non - Javadoc )
* @ see org . duracloud . mill . workman . TaskProcessorBase # executeImpl ( ) */
@ Override protected void executeImpl ( ) throws TaskExecutionFailedException { } } | try { String account = task . getAccount ( ) ; String storeId = task . getStoreId ( ) ; String spaceId = task . getSpaceId ( ) ; String contentId = task . getContentId ( ) ; String action = task . getAction ( ) ; Map < String , String > props = task . getContentProperties ( ) ; String acls = task . getSpaceACLs ( ) ; Date timestamp = new Date ( Long . valueOf ( task . getDateTime ( ) ) ) ; auditLogStore . write ( account , storeId , spaceId , contentId , task . getContentChecksum ( ) , task . getContentMimetype ( ) , task . getContentSize ( ) , task . getUserId ( ) , action , props != null ? AuditLogStoreUtil . serialize ( props ) : null , acls , task . getSourceSpaceId ( ) , task . getSourceContentId ( ) , timestamp ) ; log . debug ( "audit task successfully processed: {}" , task ) ; } catch ( TransactionSystemException e ) { log . error ( "failed to write item ( account={} storeId={} spaceId={} contentId={} timestamp={} ) " + "to the database due to a transactional error. Likely cause: duplicate entry. Details: {}. " + "Ignoring..." , task . getAccount ( ) , task . getStoreId ( ) , task . getSpaceId ( ) , task . getContentId ( ) , new Date ( Long . valueOf ( task . getDateTime ( ) ) ) , e . getMessage ( ) ) ; } catch ( Exception e ) { String message = "Failed to execute " + task + ": " + e . getMessage ( ) ; log . debug ( message , e ) ; throw new TaskExecutionFailedException ( message , e ) ; } |
public class FactoryDetectPoint { /** * Detects Harris corners .
* @ param configDetector Configuration for feature detector .
* @ param configCorner Configuration for corner intensity computation . If null radius will match detector radius
* @ param derivType Type of derivative image .
* @ see boofcv . alg . feature . detect . intensity . HarrisCornerIntensity */
public static < T extends ImageGray < T > , D extends ImageGray < D > > GeneralFeatureDetector < T , D > createHarris ( @ Nullable ConfigGeneralDetector configDetector , @ Nullable ConfigHarrisCorner configCorner , Class < D > derivType ) { } } | if ( configDetector == null ) configDetector = new ConfigGeneralDetector ( ) ; if ( configCorner == null ) { configCorner = new ConfigHarrisCorner ( ) ; configCorner . radius = configDetector . radius ; } GradientCornerIntensity < D > cornerIntensity = FactoryIntensityPointAlg . harris ( configCorner . radius , ( float ) configCorner . kappa , configCorner . weighted , derivType ) ; return createGeneral ( cornerIntensity , configDetector ) ; |
public class Database { public DbAttribute get_device_attribute_property ( String devname , String attname ) throws DevFailed { } } | return databaseDAO . get_device_attribute_property ( this , devname , attname ) ; |
public class PojoDescriptorGenerator { /** * This method determines the property descriptors of the { @ link net . sf . mmm . util . pojo . api . Pojo } identified
* by { @ code inputType } .
* @ param inputType is the { @ link JClassType } reflecting the input - type that triggered the generation via
* { @ link com . google . gwt . core . client . GWT # create ( Class ) } .
* @ return the { @ link PojoDescriptor } for the given { @ code inputType } . */
private static PojoDescriptor < ? > getPojoDescriptor ( JClassType inputType ) { } } | PojoDescriptorBuilder descriptorBuilder = PojoDescriptorBuilderFactoryImpl . getInstance ( ) . createPublicMethodDescriptorBuilder ( ) ; Class < ? > inputClass ; try { inputClass = Class . forName ( inputType . getQualifiedSourceName ( ) ) ; } catch ( ClassNotFoundException e ) { throw new TypeNotFoundException ( inputType . getQualifiedSourceName ( ) ) ; } PojoDescriptor < ? > descriptor = descriptorBuilder . getDescriptor ( inputClass ) ; return descriptor ; |
public class BaseDestinationHandler { /** * Return the itemstream representing a transmit queue to a remote ME
* @ param meUuid
* @ return */
PtoPXmitMsgsItemStream getXmitQueuePoint ( SIBUuid8 meUuid ) { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "getXmitQueuePoint" , meUuid ) ; PtoPXmitMsgsItemStream stream = getLocalisationManager ( ) . getXmitQueuePoint ( meUuid ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "getXmitQueuePoint" , stream ) ; return stream ; |
public class Params { /** * Test if numeric parameter is less than or equal to given threshold value .
* @ param parameter invocation numeric parameter ,
* @ param value threshold value ,
* @ param name the name of invocation parameter .
* @ throws IllegalArgumentException if < code > parameter < / code > is not less than or equal to threshold value . */
public static void LTE ( long parameter , long value , String name ) throws IllegalArgumentException { } } | if ( parameter > value ) { throw new IllegalArgumentException ( String . format ( "%s is not less than or equal %d." , name , value ) ) ; } |
public class Languages { /** * Returns the language for the request . This process considers Request &
* Response cookies , the Request ACCEPT _ LANGUAGE header , and finally the
* application default language .
* @ param routeContext
* @ return the language for the request */
public String getLanguageOrDefault ( RouteContext routeContext ) { } } | final String cookieName = generateLanguageCookie ( defaultLanguage ) . getName ( ) ; // Step 1 : Look for a Response cookie .
// The Response always has priority over the Request because it may have
// been set earlier in the HandlerChain .
Cookie cookie = routeContext . getResponse ( ) . getCookie ( cookieName ) ; if ( cookie != null && ! StringUtils . isNullOrEmpty ( cookie . getValue ( ) ) ) { return getLanguageOrDefault ( cookie . getValue ( ) ) ; } // Step 2 : Look for a Request cookie .
cookie = routeContext . getRequest ( ) . getCookie ( cookieName ) ; if ( cookie != null && ! StringUtils . isNullOrEmpty ( cookie . getValue ( ) ) ) { return getLanguageOrDefault ( cookie . getValue ( ) ) ; } // Step 3 : Look for a lang parameter in the response locals
if ( routeContext . getResponse ( ) . getLocals ( ) . containsKey ( PippoConstants . REQUEST_PARAMETER_LANG ) ) { String language = routeContext . getLocal ( PippoConstants . REQUEST_PARAMETER_LANG ) ; language = getLanguageOrDefault ( language ) ; return language ; } // Step 4 : Look for a language in the Accept - Language header .
String acceptLanguage = routeContext . getHeader ( HttpConstants . Header . ACCEPT_LANGUAGE ) ; return getLanguageOrDefault ( acceptLanguage ) ; |
public class InstanceGroupManagerClient { /** * Creates a managed instance group using the information that you specify in the request . After
* the group is created , instances in the group are created using the specified instance template .
* This operation is marked as DONE when the group is created even if the instances in the group
* have not yet been created . You must separately verify the status of the individual instances
* with the listmanagedinstances method .
* < p > A managed instance group can have up to 1000 VM instances per group . Please contact Cloud
* Support if you need an increase in this limit .
* < p > Sample code :
* < pre > < code >
* try ( InstanceGroupManagerClient instanceGroupManagerClient = InstanceGroupManagerClient . create ( ) ) {
* ProjectZoneName zone = ProjectZoneName . of ( " [ PROJECT ] " , " [ ZONE ] " ) ;
* InstanceGroupManager instanceGroupManagerResource = InstanceGroupManager . newBuilder ( ) . build ( ) ;
* Operation response = instanceGroupManagerClient . insertInstanceGroupManager ( zone . toString ( ) , instanceGroupManagerResource ) ;
* < / code > < / pre >
* @ param zone The name of the zone where you want to create the managed instance group .
* @ param instanceGroupManagerResource An Instance Group Manager resource . ( = = resource _ for
* beta . instanceGroupManagers = = ) ( = = resource _ for v1 . instanceGroupManagers = = ) ( = =
* resource _ for beta . regionInstanceGroupManagers = = ) ( = = resource _ for
* v1 . regionInstanceGroupManagers = = )
* @ throws com . google . api . gax . rpc . ApiException if the remote call fails */
@ BetaApi public final Operation insertInstanceGroupManager ( String zone , InstanceGroupManager instanceGroupManagerResource ) { } } | InsertInstanceGroupManagerHttpRequest request = InsertInstanceGroupManagerHttpRequest . newBuilder ( ) . setZone ( zone ) . setInstanceGroupManagerResource ( instanceGroupManagerResource ) . build ( ) ; return insertInstanceGroupManager ( request ) ; |
public class Introspector { /** * Gets the < code > BeanInfo < / code > object which contains the information of
* the properties , events and methods of the specified bean class . It will
* not introspect the " stopclass " and its super class .
* The < code > Introspector < / code > will cache the < code > BeanInfo < / code >
* object . Subsequent calls to this method will be answered with the cached
* data .
* @ param beanClass
* the specified beanClass .
* @ param stopClass
* the sopt class which should be super class of the bean class .
* May be null .
* @ return the < code > BeanInfo < / code > of the bean class .
* @ throws IntrospectionException */
public static BeanInfo getBeanInfo ( Class < ? > beanClass , Class < ? > stopClass ) throws IntrospectionException { } } | if ( stopClass == null ) { // try to use cache
return getBeanInfo ( beanClass ) ; } return getBeanInfoImplAndInit ( beanClass , stopClass , USE_ALL_BEANINFO ) ; |
public class JsApiMessageImpl { /** * copyOf
* This method has pretty much the same spec as the Java 1.6 Arrays . copyOf ( byte [ ] . . . )
* method , which can ' t be used because the Thin Client has to be able to run on v1.5.
* There is no checking or error - throwing in this method , as it is expected to be
* called only by intelligent callers !
* @ param original The original byte array
* @ param length The required length for the copy
* @ return byte [ ] A copy of the original byte array , truncated or padded ( with 0s )
* to the required length , if necessary . */
private byte [ ] copyOf ( byte [ ] original , int length ) { } } | byte [ ] copy = new byte [ length ] ; if ( length <= original . length ) { System . arraycopy ( original , 0 , copy , 0 , length ) ; } else { System . arraycopy ( original , 0 , copy , 0 , original . length ) ; } return copy ; |
public class YarnDriverRuntimeRestartManager { /** * Determines the number of times the Driver has been submitted based on the container ID environment
* variable provided by YARN . If that fails , determine whether the application master is a restart
* based on the number of previous containers reported by YARN . In the failure scenario , returns 1 if restart , 0
* otherwise .
* @ return positive value if the application master is a restarted instance , 0 otherwise . */
@ Override public int getResubmissionAttempts ( ) { } } | final String containerIdString = YarnUtilities . getContainerIdString ( ) ; final ApplicationAttemptId appAttemptID = YarnUtilities . getAppAttemptId ( containerIdString ) ; if ( containerIdString == null || appAttemptID == null ) { LOG . log ( Level . WARNING , "Was not able to fetch application attempt, container ID is [" + containerIdString + "] and application attempt is [" + appAttemptID + "]. Determining restart based on previous containers." ) ; if ( this . isRestartByPreviousContainers ( ) ) { LOG . log ( Level . WARNING , "Driver is a restarted instance based on the number of previous containers. " + "As returned by the Resource Manager. Returning default resubmission attempts " + DEFAULT_RESTART_RESUBMISSION_ATTEMPTS + "." ) ; return DEFAULT_RESTART_RESUBMISSION_ATTEMPTS ; } return 0 ; } int appAttempt = appAttemptID . getAttemptId ( ) ; LOG . log ( Level . FINE , "Application attempt: " + appAttempt ) ; assert appAttempt > 0 ; return appAttempt - 1 ; |
public class Client { /** * Delete the proxy history for the active profile
* @ throws Exception exception */
public void clearHistory ( ) throws Exception { } } | String uri ; try { uri = HISTORY + uriEncode ( _profileName ) ; doDelete ( uri , null ) ; } catch ( Exception e ) { throw new Exception ( "Could not delete proxy history" ) ; } |
public class DefaultPropertyPlaceholderResolver { /** * Resolves a single expression .
* @ param context The context of the expression
* @ param expression The expression
* @ param type The class
* @ param < T > The type the expression should be converted to
* @ return The resolved and converted expression */
@ Nullable protected < T > T resolveExpression ( String context , String expression , Class < T > type ) { } } | if ( environment . containsProperty ( expression ) ) { return environment . getProperty ( expression , type ) . orElseThrow ( ( ) -> new ConfigurationException ( "Could not resolve expression: [" + expression + "] in placeholder ${" + context + "}" ) ) ; } if ( NameUtils . isEnvironmentName ( expression ) ) { String envVar = System . getenv ( expression ) ; if ( StringUtils . isNotEmpty ( envVar ) ) { return conversionService . convert ( envVar , type ) . orElseThrow ( ( ) -> new ConfigurationException ( "Could not resolve expression: [" + expression + "] in placeholder ${" + context + "}" ) ) ; } } return null ; |
public class Settings { /** * Reset Gosu Lab application - level settings to defaults . */
public static Map < String , ISettings > makeDefaultSettings ( ) { } } | Map < String , ISettings > settings = new TreeMap < > ( ) ; AppearanceSettings appearanceSettings = new AppearanceSettings ( ) ; appearanceSettings . resetToDefaultSettings ( null ) ; settings . put ( appearanceSettings . getPath ( ) , appearanceSettings ) ; return settings ; |
public class DoubleUtils { /** * Is { @ code value } within { @ code tolerance } of being an integer ? */
public static boolean isCloseToIntegral ( final double value , final double tolerance ) { } } | return Math . abs ( value - Math . round ( value ) ) <= tolerance ; |
public class TopicsInner { /** * List topic event types .
* List event types for a topic .
* @ param resourceGroupName The name of the resource group within the user ' s subscription .
* @ param providerNamespace Namespace of the provider of the topic
* @ param resourceTypeName Name of the topic type
* @ param resourceName Name of the topic
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ throws CloudException thrown if the request is rejected by server
* @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @ return the List & lt ; EventTypeInner & gt ; object if successful . */
public List < EventTypeInner > listEventTypes ( String resourceGroupName , String providerNamespace , String resourceTypeName , String resourceName ) { } } | return listEventTypesWithServiceResponseAsync ( resourceGroupName , providerNamespace , resourceTypeName , resourceName ) . toBlocking ( ) . single ( ) . body ( ) ; |
public class CollectionJsonLinkDiscoverer { /** * ( non - Javadoc )
* @ see org . springframework . hateoas . core . JsonPathLinkDiscoverer # findLinksWithRel ( org . springframework . hateoas . LinkRelation , java . io . InputStream ) */
@ Override public Links findLinksWithRel ( LinkRelation relation , InputStream representation ) { } } | return relation . isSameAs ( IanaLinkRelations . SELF ) ? addSelfLink ( super . findLinksWithRel ( relation , representation ) , representation ) : super . findLinksWithRel ( relation , representation ) ; |
public class XPathContext { /** * Stringifies the XPath of the current node ' s parent . */
public String getParentXPath ( ) { } } | Iterator < Level > levelIterator = path . descendingIterator ( ) ; if ( levelIterator . hasNext ( ) ) { levelIterator . next ( ) ; } return getXPath ( levelIterator ) ; |
public class ChemSequence { /** * Grows the chemModel array by a given size .
* @ see growArraySize */
protected void growChemModelArray ( ) { } } | ChemModel [ ] newchemModels = new ChemModel [ chemModels . length + growArraySize ] ; System . arraycopy ( chemModels , 0 , newchemModels , 0 , chemModels . length ) ; chemModels = newchemModels ; |
public class TemplateMatcher { /** * / * - - - - - [ Character Streams ] - - - - - */
public void replace ( Reader reader , Writer writer , VariableResolver resolver ) throws IOException { } } | BufferedReader breader = reader instanceof BufferedReader ? ( BufferedReader ) reader : new BufferedReader ( reader ) ; BufferedWriter bwriter = writer instanceof BufferedWriter ? ( BufferedWriter ) writer : new BufferedWriter ( writer ) ; try { boolean firstLine = true ; for ( String line ; ( line = breader . readLine ( ) ) != null ; ) { if ( firstLine ) firstLine = false ; else bwriter . newLine ( ) ; bwriter . write ( replace ( line , resolver ) ) ; } } finally { try { breader . close ( ) ; } finally { bwriter . close ( ) ; } } |
public class AmfWriter { /** * Encodes an object in amf3 as an object of
* the specified type
* @ param obj The object to encode
* @ param marker The type marker for this object
* @ throws IOException */
public void encodeAmf3 ( Object obj , Amf3Type marker ) throws IOException { } } | out . write ( marker . ordinal ( ) ) ; if ( marker == Amf3Type . NULL ) { return ; // Null marker is enough
} serializeAmf3 ( obj , marker ) ; |
public class ReplaceableService { /** * Creates a service collection and starts it .
* @ see AbstractLifecycle # onStart */
@ Override protected void onStart ( ) { } } | m_serviceCollection = new ServiceCollection < T > ( m_context , m_serviceClass , new CollectionListener ( ) ) ; m_serviceCollection . start ( ) ; |
public class Day { /** * Compare this day to the specified day . If object is
* not of type Day a ClassCastException is thrown .
* @ param object Day object to compare to .
* @ return @ see Comparable # compareTo ( Object )
* @ throws IllegalArgumentException If day is null . */
public int compareTo ( Day object ) { } } | if ( object == null ) throw new IllegalArgumentException ( "day cannot be null" ) ; Day day = ( Day ) object ; return localDate . compareTo ( day . localDate ) ; |
public class SynchronizedPDUSender { /** * ( non - Javadoc )
* @ see org . jsmpp . PDUSender # sendDataSmResp ( java . io . OutputStream , int ,
* java . lang . String , org . jsmpp . bean . OptionalParameter [ ] ) */
public byte [ ] sendDataSmResp ( OutputStream os , int sequenceNumber , String messageId , OptionalParameter ... optionalParameters ) throws PDUStringException , IOException { } } | synchronized ( os ) { return pduSender . sendDataSmResp ( os , sequenceNumber , messageId , optionalParameters ) ; } |
public class JGroupsSubsystemXMLWriter { /** * { @ inheritDoc }
* @ see org . jboss . staxmapper . XMLElementWriter # writeContent ( org . jboss . staxmapper . XMLExtendedStreamWriter , java . lang . Object ) */
@ Override public void writeContent ( XMLExtendedStreamWriter writer , SubsystemMarshallingContext context ) throws XMLStreamException { } } | context . startSubsystemElement ( JGroupsSchema . CURRENT . getNamespaceUri ( ) , false ) ; ModelNode model = context . getModelNode ( ) ; if ( model . isDefined ( ) ) { if ( model . hasDefined ( ChannelResourceDefinition . WILDCARD_PATH . getKey ( ) ) ) { writer . writeStartElement ( Element . CHANNELS . getLocalName ( ) ) ; JGroupsSubsystemResourceDefinition . DEFAULT_CHANNEL . marshallAsAttribute ( model , writer ) ; for ( Property property : model . get ( ChannelResourceDefinition . WILDCARD_PATH . getKey ( ) ) . asPropertyList ( ) ) { writer . writeStartElement ( Element . CHANNEL . getLocalName ( ) ) ; writer . writeAttribute ( Attribute . NAME . getLocalName ( ) , property . getName ( ) ) ; ModelNode channel = property . getValue ( ) ; writeAttribute ( writer , channel , ChannelResourceDefinition . STACK ) ; writeAttribute ( writer , channel , ChannelResourceDefinition . MODULE ) ; if ( channel . hasDefined ( ForkResourceDefinition . WILDCARD_PATH . getKey ( ) ) ) { for ( Property forkProperty : channel . get ( ForkResourceDefinition . WILDCARD_PATH . getKey ( ) ) . asPropertyList ( ) ) { writer . writeStartElement ( Element . FORK . getLocalName ( ) ) ; writer . writeAttribute ( Attribute . NAME . getLocalName ( ) , forkProperty . getName ( ) ) ; ModelNode fork = forkProperty . getValue ( ) ; if ( fork . hasDefined ( ProtocolResourceDefinition . WILDCARD_PATH . getKey ( ) ) ) { for ( Property protocol : fork . get ( ProtocolResourceDefinition . WILDCARD_PATH . getKey ( ) ) . asPropertyList ( ) ) { writeProtocol ( writer , protocol ) ; } } writer . writeEndElement ( ) ; } } writer . writeEndElement ( ) ; } writer . writeEndElement ( ) ; } if ( model . hasDefined ( StackResourceDefinition . WILDCARD_PATH . getKey ( ) ) ) { writer . writeStartElement ( Element . STACKS . getLocalName ( ) ) ; writeAttribute ( writer , model , JGroupsSubsystemResourceDefinition . DEFAULT_STACK ) ; for ( Property property : model . get ( StackResourceDefinition . WILDCARD_PATH . getKey ( ) ) . asPropertyList ( ) ) { writer . writeStartElement ( Element . STACK . getLocalName ( ) ) ; writer . writeAttribute ( Attribute . NAME . getLocalName ( ) , property . getName ( ) ) ; ModelNode stack = property . getValue ( ) ; if ( stack . hasDefined ( TransportResourceDefinition . WILDCARD_PATH . getKey ( ) ) ) { writeTransport ( writer , stack . get ( TransportResourceDefinition . WILDCARD_PATH . getKey ( ) ) . asProperty ( ) ) ; } if ( stack . hasDefined ( ProtocolResourceDefinition . WILDCARD_PATH . getKey ( ) ) ) { for ( Property protocol : stack . get ( ProtocolResourceDefinition . WILDCARD_PATH . getKey ( ) ) . asPropertyList ( ) ) { writeProtocol ( writer , protocol ) ; } } if ( stack . get ( RelayResourceDefinition . PATH . getKeyValuePair ( ) ) . isDefined ( ) ) { writeRelay ( writer , stack . get ( RelayResourceDefinition . PATH . getKeyValuePair ( ) ) ) ; } if ( stack . get ( SaslResourceDefinition . PATH . getKeyValuePair ( ) ) . isDefined ( ) ) { writeSasl ( writer , stack . get ( SaslResourceDefinition . PATH . getKeyValuePair ( ) ) ) ; } writer . writeEndElement ( ) ; } writer . writeEndElement ( ) ; } } writer . writeEndElement ( ) ; |
public class SARLValidator { /** * Check if the supertype of the given capacity is a subtype of Capacity .
* @ param capacity the type to test . */
@ Check ( CheckType . FAST ) public void checkSuperTypes ( SarlCapacity capacity ) { } } | checkSuperTypes ( capacity , SARL_CAPACITY__EXTENDS , capacity . getExtends ( ) , Capacity . class , false ) ; |
public class ns_image { /** * Use this API to fetch filtered set of ns _ image resources .
* set the filter parameter values in filtervalue object . */
public static ns_image [ ] get_filtered ( nitro_service service , filtervalue [ ] filter ) throws Exception { } } | ns_image obj = new ns_image ( ) ; options option = new options ( ) ; option . set_filter ( filter ) ; ns_image [ ] response = ( ns_image [ ] ) obj . getfiltered ( service , option ) ; return response ; |
public class GradientActivity { /** * Override the default behavior and colorize gradient instead of converting input image . */
@ Override protected void renderBitmapImage ( BitmapMode mode , ImageBase image ) { } } | switch ( mode ) { case UNSAFE : { // this application is configured to use double buffer and could ignore all other modes
VisualizeImageData . colorizeGradient ( derivX , derivY , - 1 , bitmap , bitmapTmp ) ; } break ; case DOUBLE_BUFFER : { VisualizeImageData . colorizeGradient ( derivX , derivY , - 1 , bitmapWork , bitmapTmp ) ; if ( bitmapLock . tryLock ( ) ) { try { Bitmap tmp = bitmapWork ; bitmapWork = bitmap ; bitmap = tmp ; } finally { bitmapLock . unlock ( ) ; } } } break ; } |
public class Config { /** * Returns a read - only { @ link com . hazelcast . core . ILock } configuration for
* the given name .
* The name is matched by pattern to the configuration and by stripping the
* partition ID qualifier from the given { @ code name } .
* If there is no config found by the name , it will return the configuration
* with the name { @ code default } .
* @ param name name of the lock config
* @ return the lock configuration
* @ throws ConfigurationException if ambiguous configurations are found
* @ see StringPartitioningStrategy # getBaseName ( java . lang . String )
* @ see # setConfigPatternMatcher ( ConfigPatternMatcher )
* @ see # getConfigPatternMatcher ( )
* @ see EvictionConfig # setSize ( int ) */
public LockConfig findLockConfig ( String name ) { } } | name = getBaseName ( name ) ; final LockConfig config = lookupByPattern ( configPatternMatcher , lockConfigs , name ) ; if ( config != null ) { return config . getAsReadOnly ( ) ; } return getLockConfig ( "default" ) . getAsReadOnly ( ) ; |
public class AWSIotClient { /** * Lists the role aliases registered in your account .
* @ param listRoleAliasesRequest
* @ return Result of the ListRoleAliases operation returned by the service .
* @ throws InvalidRequestException
* The request is not valid .
* @ throws ThrottlingException
* The rate exceeds the limit .
* @ throws UnauthorizedException
* You are not authorized to perform this operation .
* @ throws ServiceUnavailableException
* The service is temporarily unavailable .
* @ throws InternalFailureException
* An unexpected error has occurred .
* @ sample AWSIot . ListRoleAliases */
@ Override public ListRoleAliasesResult listRoleAliases ( ListRoleAliasesRequest request ) { } } | request = beforeClientExecution ( request ) ; return executeListRoleAliases ( request ) ; |
public class TaskRunner { /** * Runs a list of tasks starting from the input file as input to the first task , the following tasks use the preceding result
* as input . The final result is written to the output .
* @ param input the input file
* @ param output the output file
* @ param tasks the list of tasks
* @ return returns a list of runner results
* @ throws IOException if there is an I / O error
* @ throws TaskSystemException if there is a problem with the task system */
public List < RunnerResult > runTasks ( AnnotatedFile input , File output , List < InternalTask > tasks ) throws IOException , TaskSystemException { } } | Progress progress = new Progress ( ) ; logger . info ( name + " started on " + progress . getStart ( ) ) ; int i = 0 ; NumberFormat nf = NumberFormat . getPercentInstance ( ) ; // FIXME : implement temp file handling as per issue # 47
TempFileWriter tempWriter = writeTempFiles ? tempFileWriter != null ? tempFileWriter : new DefaultTempFileWriter . Builder ( ) . build ( ) : null ; List < RunnerResult > ret = new ArrayList < > ( ) ; try ( TaskRunnerCore itr = new TaskRunnerCore ( input , output , tempWriter ) ) { for ( InternalTask task : tasks ) { ret . addAll ( itr . runTask ( task ) ) ; i ++ ; ProgressEvent event = progress . updateProgress ( i / ( double ) tasks . size ( ) ) ; logger . info ( nf . format ( event . getProgress ( ) ) + " done. ETC " + event . getETC ( ) ) ; progressListeners . forEach ( v -> v . accept ( event ) ) ; } } catch ( IOException | TaskSystemException | RuntimeException e ) { // This is called after the resource ( fj ) is closed .
// Since the temp file handler is closed the current state will be written to output . However , we do not want it .
if ( ! output . delete ( ) ) { output . deleteOnExit ( ) ; } throw e ; } if ( ! keepTempFilesOnSuccess && tempWriter != null ) { // Process were successful , delete temp files
tempWriter . deleteTempFiles ( ) ; } logger . info ( name + " finished in " + Math . round ( progress . timeSinceStart ( ) / 100d ) / 10d + " s" ) ; return ret ; |
public class SpecializedOps_ZDRM { /** * Computes the quality of a triangular matrix , where the quality of a matrix
* is defined in { @ link LinearSolverDense # quality ( ) } . In
* this situation the quality is the magnitude of the product of
* each diagonal element divided by the magnitude of the largest diagonal element .
* If all diagonal elements are zero then zero is returned .
* @ return the quality of the system . */
public static double qualityTriangular ( ZMatrixRMaj T ) { } } | int N = Math . min ( T . numRows , T . numCols ) ; double max = elementDiagMaxMagnitude2 ( T ) ; if ( max == 0.0 ) return 0.0 ; max = Math . sqrt ( max ) ; int rowStride = T . getRowStride ( ) ; double qualityR = 1.0 ; double qualityI = 0.0 ; for ( int i = 0 ; i < N ; i ++ ) { int index = i * rowStride + i * 2 ; double real = T . data [ index ] / max ; double imaginary = T . data [ index ] / max ; double r = qualityR * real - qualityI * imaginary ; double img = qualityR * imaginary + real * qualityI ; qualityR = r ; qualityI = img ; } return Math . sqrt ( qualityR * qualityR + qualityI * qualityI ) ; |
public class NativeRSACoreEngine { /** * initialise the RSA engine .
* @ param forEncryption true if we are encrypting , false otherwise .
* @ param param the necessary RSA key parameters . */
public void init ( boolean forEncryption , CipherParameters param ) { } } | if ( param instanceof ParametersWithRandom ) { ParametersWithRandom rParam = ( ParametersWithRandom ) param ; key = ( RSAKeyParameters ) rParam . getParameters ( ) ; } else { key = ( RSAKeyParameters ) param ; } this . forEncryption = forEncryption ; if ( key instanceof RSAPrivateCrtKeyParameters ) { isPrivate = true ; // we have the extra factors , use the Chinese Remainder Theorem - the author
// wishes to express his thanks to Dirk Bonekaemper at rtsffm . com for
// advice regarding the expression of this .
RSAPrivateCrtKeyParameters crtKey = ( RSAPrivateCrtKeyParameters ) key ; p = new GmpInteger ( crtKey . getP ( ) ) ; q = new GmpInteger ( crtKey . getQ ( ) ) ; dP = new GmpInteger ( crtKey . getDP ( ) ) ; dQ = new GmpInteger ( crtKey . getDQ ( ) ) ; qInv = crtKey . getQInv ( ) ; exponent = modulus = null ; } else { isPrivate = false ; exponent = new GmpInteger ( key . getExponent ( ) ) ; modulus = new GmpInteger ( key . getModulus ( ) ) ; isSmallExponent = exponent . bitLength ( ) < 64 ; p = q = dP = dQ = null ; qInv = null ; } |
public class C3P0PooledDataSourceProvider { /** * get the pooled datasource from c3p0 pool
* @ param aDbType a supported database type .
* @ param aDbUrl a connection url
* @ param aUsername a username for the database
* @ param aPassword a password for the database connection
* @ return a DataSource a pooled data source
* @ throws SQLException */
public DataSource openPooledDataSource ( DBType aDbType , String aDbUrl , String aUsername , String aPassword ) throws SQLException { } } | final DataSource unPooledDS = DataSources . unpooledDataSource ( aDbUrl , aUsername , aPassword ) ; // override the default properties with ours
final Map < String , String > props = this . getPoolingProperties ( aDbType ) ; return DataSources . pooledDataSource ( unPooledDS , props ) ; |
public class PersistedHttpMessagesList { /** * { @ inheritDoc }
* < strong > Note : < / strong > The returned message will be { @ code null } if an error occurred while loading the message from the
* database ( for example , no longer exists ) . */
@ Override public HttpMessage get ( int index ) { } } | try { return historyReferences . get ( index ) . getHttpMessage ( ) ; } catch ( HttpMalformedHeaderException | DatabaseException e ) { if ( LOGGER . isDebugEnabled ( ) ) { LOGGER . debug ( "Failed to get the message from DB: " + e . getMessage ( ) , e ) ; } } return null ; |
public class CPRuleAssetCategoryRelLocalServiceWrapper { /** * Returns the cp rule asset category rel with the primary key .
* @ param CPRuleAssetCategoryRelId the primary key of the cp rule asset category rel
* @ return the cp rule asset category rel
* @ throws PortalException if a cp rule asset category rel with the primary key could not be found */
@ Override public com . liferay . commerce . product . model . CPRuleAssetCategoryRel getCPRuleAssetCategoryRel ( long CPRuleAssetCategoryRelId ) throws com . liferay . portal . kernel . exception . PortalException { } } | return _cpRuleAssetCategoryRelLocalService . getCPRuleAssetCategoryRel ( CPRuleAssetCategoryRelId ) ; |
public class ParserRegistry { /** * Returns all warning parsers registered by extension points
* { @ link WarningsParser } and { @ link AbstractWarningsParser } .
* @ return the extension list */
@ SuppressWarnings ( "javadoc" ) private static List < AbstractWarningsParser > all ( ) { } } | Hudson instance = Hudson . getInstance ( ) ; if ( instance == null ) { return Lists . newArrayList ( ) ; } List < AbstractWarningsParser > parsers = Lists . newArrayList ( instance . getExtensionList ( AbstractWarningsParser . class ) ) ; addParsersWithDeprecatedApi ( instance , parsers ) ; return parsers ; |
public class CUDA_MEMCPY2D { /** * Creates and returns a string representation of this object ,
* using the given separator for the fields
* @ param f Separator
* @ return A String representation of this object */
private String createString ( String f ) { } } | return "srcXInBytes=" + srcXInBytes + f + "srcY=" + srcY + f + "srcMemoryType=" + CUmemorytype . stringFor ( srcMemoryType ) + f + "srcHost =" + srcHost + f + "srcDevice =" + srcDevice + f + "srcArray =" + srcArray + f + "srcPitch=" + srcPitch + f + "dstXInBytes=" + dstXInBytes + f + "dstY=" + dstY + f + "dstMemoryType=" + CUmemorytype . stringFor ( dstMemoryType ) + f + "dstHost =" + dstHost + f + "dstDevice =" + dstDevice + f + "dstArray =" + dstArray + f + "dstPitch=" + dstPitch + f + "WidthInBytes=" + WidthInBytes + f + "Height=" + Height ; |
public class Cluster { /** * A list of cluster security group that are associated with the cluster . Each security group is represented by an
* element that contains < code > ClusterSecurityGroup . Name < / code > and < code > ClusterSecurityGroup . Status < / code >
* subelements .
* Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud ( VPC ) .
* Clusters that are created in a VPC use VPC security groups , which are listed by the < b > VpcSecurityGroups < / b >
* parameter .
* < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use
* { @ link # setClusterSecurityGroups ( java . util . Collection ) } or
* { @ link # withClusterSecurityGroups ( java . util . Collection ) } if you want to override the existing values .
* @ param clusterSecurityGroups
* A list of cluster security group that are associated with the cluster . Each security group is represented
* by an element that contains < code > ClusterSecurityGroup . Name < / code > and
* < code > ClusterSecurityGroup . Status < / code > subelements . < / p >
* Cluster security groups are used when the cluster is not created in an Amazon Virtual Private Cloud ( VPC ) .
* Clusters that are created in a VPC use VPC security groups , which are listed by the
* < b > VpcSecurityGroups < / b > parameter .
* @ return Returns a reference to this object so that method calls can be chained together . */
public Cluster withClusterSecurityGroups ( ClusterSecurityGroupMembership ... clusterSecurityGroups ) { } } | if ( this . clusterSecurityGroups == null ) { setClusterSecurityGroups ( new com . amazonaws . internal . SdkInternalList < ClusterSecurityGroupMembership > ( clusterSecurityGroups . length ) ) ; } for ( ClusterSecurityGroupMembership ele : clusterSecurityGroups ) { this . clusterSecurityGroups . add ( ele ) ; } return this ; |
public class ParallelIndexSupervisorTask { /** * { @ link ParallelIndexSubTask } s call this API to report the segments they ' ve generated and pushed . */
@ POST @ Path ( "/report" ) @ Consumes ( SmileMediaTypes . APPLICATION_JACKSON_SMILE ) public Response report ( PushedSegmentsReport report , @ Context final HttpServletRequest req ) { } } | ChatHandlers . authorizationCheck ( req , Action . WRITE , getDataSource ( ) , authorizerMapper ) ; if ( runner == null ) { return Response . status ( Response . Status . SERVICE_UNAVAILABLE ) . entity ( "task is not running yet" ) . build ( ) ; } else { runner . collectReport ( report ) ; return Response . ok ( ) . build ( ) ; } |
public class ChildFirst { /** * Loads a given set of class descriptions and their binary representations using a child - first class loader .
* @ param classLoader The parent class loader .
* @ param types The unloaded types to be loaded .
* @ param protectionDomain The protection domain to apply where { @ code null } references an implicit protection domain .
* @ param persistenceHandler The persistence handler of the created class loader .
* @ param packageDefinitionStrategy The package definer to be queried for package definitions .
* @ param forbidExisting { @ code true } if the class loading should throw an exception if a class was already loaded by a parent class loader .
* @ param sealed { @ code true } if the class loader should be sealed .
* @ return A map of the given type descriptions pointing to their loaded representations . */
@ SuppressFBWarnings ( value = "DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED" , justification = "Privilege is explicit user responsibility" ) public static Map < TypeDescription , Class < ? > > load ( ClassLoader classLoader , Map < TypeDescription , byte [ ] > types , ProtectionDomain protectionDomain , PersistenceHandler persistenceHandler , PackageDefinitionStrategy packageDefinitionStrategy , boolean forbidExisting , boolean sealed ) { } } | Map < String , byte [ ] > typesByName = new HashMap < String , byte [ ] > ( ) ; for ( Map . Entry < TypeDescription , byte [ ] > entry : types . entrySet ( ) ) { typesByName . put ( entry . getKey ( ) . getName ( ) , entry . getValue ( ) ) ; } classLoader = new ChildFirst ( classLoader , sealed , typesByName , protectionDomain , persistenceHandler , packageDefinitionStrategy , NoOpClassFileTransformer . INSTANCE ) ; Map < TypeDescription , Class < ? > > result = new LinkedHashMap < TypeDescription , Class < ? > > ( ) ; for ( TypeDescription typeDescription : types . keySet ( ) ) { try { Class < ? > type = Class . forName ( typeDescription . getName ( ) , false , classLoader ) ; if ( forbidExisting && type . getClassLoader ( ) != classLoader ) { throw new IllegalStateException ( "Class already loaded: " + type ) ; } result . put ( typeDescription , type ) ; } catch ( ClassNotFoundException exception ) { throw new IllegalStateException ( "Cannot load class " + typeDescription , exception ) ; } } return result ; |
public class Locale { /** * Replies the text that corresponds to the specified resource .
* < p > This function assumes the classname of the caller as the
* resource provider .
* @ param key is the name of the resource into the specified file
* @ param params is the the list of parameters which will
* replaces the < code > # 1 < / code > , < code > # 2 < / code > . . . into the string .
* @ return the text that corresponds to the specified resource */
public static String getString ( String key , Object ... params ) { } } | final Class < ? > resource = detectResourceClass ( null ) ; return getString ( ClassLoaderFinder . findClassLoader ( ) , resource , key , params ) ; |
public class Genotype { /** * Create a new { @ code Genotype } which consists of { @ code n } chromosomes ,
* which are created by the given { @ code factory } . This method can be used
* for easily creating a < i > gene matrix < / i > . The following example will
* create a 10x5 { @ code DoubleGene } < i > matrix < / i > .
* < pre > { @ code
* final Genotype < DoubleGene > gt = Genotype
* . of ( DoubleChromosome . of ( 0.0 , 1.0 , 10 ) , 5 ) ;
* } < / pre >
* @ since 3.0
* @ param < G > the gene type
* @ param factory the factory which creates the chromosomes this genotype
* consists of
* @ param n the number of chromosomes this genotype consists of
* @ return new { @ code Genotype } containing { @ code n } chromosomes
* @ throws IllegalArgumentException if { @ code n < 1 } .
* @ throws NullPointerException if the { @ code factory } is { @ code null } . */
public static < G extends Gene < ? , G > > Genotype < G > of ( final Factory < ? extends Chromosome < G > > factory , final int n ) { } } | final ISeq < Chromosome < G > > ch = ISeq . of ( factory :: newInstance , n ) ; return new Genotype < > ( ch ) ; |
public class ManagedDatabasesInner { /** * Creates a new database or updates an existing database .
* @ param resourceGroupName The name of the resource group that contains the resource . You can obtain this value from the Azure Resource Manager API or the portal .
* @ param managedInstanceName The name of the managed instance .
* @ param databaseName The name of the database .
* @ param parameters The requested database resource state .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable for the request */
public Observable < ManagedDatabaseInner > createOrUpdateAsync ( String resourceGroupName , String managedInstanceName , String databaseName , ManagedDatabaseInner parameters ) { } } | return createOrUpdateWithServiceResponseAsync ( resourceGroupName , managedInstanceName , databaseName , parameters ) . map ( new Func1 < ServiceResponse < ManagedDatabaseInner > , ManagedDatabaseInner > ( ) { @ Override public ManagedDatabaseInner call ( ServiceResponse < ManagedDatabaseInner > response ) { return response . body ( ) ; } } ) ; |
public class Stylesheet { /** * Add a stylesheet to the " import " list .
* @ see < a href = " http : / / www . w3 . org / TR / xslt # import " > import in XSLT Specification < / a >
* @ param v Stylesheet to add to the import list */
public void setImport ( StylesheetComposed v ) { } } | if ( null == m_imports ) m_imports = new Vector ( ) ; // I ' m going to insert the elements in backwards order ,
// so I can walk them 0 to n .
m_imports . addElement ( v ) ; |
public class NaetherImpl { /** * / * ( non - Javadoc )
* @ see com . tobedevoured . naether . api . Naether # addRemoteRepositoryByUrl ( java . lang . String ) */
public void addRemoteRepositoryByUrl ( String url ) throws NaetherException { } } | try { addRemoteRepository ( RepoBuilder . remoteRepositoryFromUrl ( url ) ) ; } catch ( MalformedURLException e ) { log . error ( "Malformed url: {}" , url , e ) ; throw new NaetherException ( e ) ; } |
public class GraphicUtils { /** * Draws a circle with the specified diameter using the given point coordinates as center
* and fills it with the current color of the graphics context .
* @ param g Graphics context
* @ param centerX X coordinate of circle center
* @ param centerY Y coordinate of circle center
* @ param diam Circle diameter */
public static void fillCircle ( Graphics g , int centerX , int centerY , int diam ) { } } | g . fillOval ( ( int ) ( centerX - diam / 2 ) , ( int ) ( centerY - diam / 2 ) , diam , diam ) ; |
public class CollectionUtil { /** * / * - - - - - [ To Primitive Array ] - - - - - */
public static boolean [ ] toBooleanArray ( Collection < Boolean > c ) { } } | boolean arr [ ] = new boolean [ c . size ( ) ] ; int i = 0 ; for ( Boolean item : c ) arr [ i ++ ] = item ; return arr ; |
public class DDLCompiler { /** * Checks whether or not the start of the given identifier is java ( and
* thus DDL ) compliant . An identifier may start with : _ [ a - zA - Z ] $
* @ param identifier the identifier to check
* @ param statement the statement where the identifier is
* @ return the given identifier unmodified
* @ throws VoltCompilerException when it is not compliant */
private String checkIdentifierStart ( final String identifier , final String statement ) throws VoltCompilerException { } } | assert identifier != null && ! identifier . trim ( ) . isEmpty ( ) ; assert statement != null && ! statement . trim ( ) . isEmpty ( ) ; int loc = 0 ; do { if ( ! Character . isJavaIdentifierStart ( identifier . charAt ( loc ) ) ) { String msg = "Unknown indentifier in DDL: \"" + statement . substring ( 0 , statement . length ( ) - 1 ) + "\" contains invalid identifier \"" + identifier + "\"" ; throw m_compiler . new VoltCompilerException ( msg ) ; } loc = identifier . indexOf ( '.' , loc ) + 1 ; } while ( loc > 0 && loc < identifier . length ( ) ) ; return identifier ; |
public class Reader { /** * A comma - separated list of paths pointing to a folder , jar or zip which contains EMF resources .
* Example use ( MWE2 ) :
* < code >
* path = " . / foo / bar . jar , . / src / main / model "
* < / code >
* Example use ( MWE1 ) :
* < code >
* & lt ; path value = " . / foo / bar . jar , . / src / main / model " / & gt ;
* < / code > */
public void addPath ( String path ) { } } | for ( String p : path . split ( "," ) ) { this . pathes . add ( p . trim ( ) ) ; } |
public class SimonUtils { /** * Returns multi - line string containing Simon tree starting with the specified Simon .
* Root Simon can be used to obtain tree with all Simons . Returns { @ code null } for
* input value of null or for NullSimon or any Simon with name equal to null ( anonymous
* Simons ) - this is also the case when the Manager is disabled and tree for its root
* Simon is requested .
* @ param simon root Simon of the output tree
* @ return string containing the tree or null if the Simon is null Simon */
public static String simonTreeString ( Simon simon ) { } } | if ( simon == null || simon . getName ( ) == null ) { return null ; } StringBuilder sb = new StringBuilder ( ) ; printSimonTree ( 0 , simon , sb ) ; return sb . toString ( ) ; |
public class Closeables { /** * Closes the given { @ link Reader } , logging any { @ code IOException } that ' s thrown rather than
* propagating it .
* < p > While it ' s not safe in the general case to ignore exceptions that are thrown when closing
* an I / O resource , it should generally be safe in the case of a resource that ' s being used only
* for reading , such as a { @ code Reader } . Unlike with writable resources , there ' s no chance that
* a failure that occurs when closing the reader indicates a meaningful problem such as a failure
* to flush all bytes to the underlying resource .
* @ param reader the reader to be closed , or { @ code null } in which case this method does nothing
* @ since 17.0 */
public static void closeQuietly ( @ Nullable Reader reader ) { } } | try { close ( reader , true ) ; } catch ( IOException impossible ) { throw new AssertionError ( impossible ) ; } |
public class IteratorStream { @ Override public < K , V , C extends Collection < V > , M extends Multimap < K , V , C > > M toMultimap ( Function < ? super T , ? extends K > keyMapper , Function < ? super T , ? extends V > valueMapper , Supplier < ? extends M > mapFactory ) { } } | assertNotClosed ( ) ; try { final M result = mapFactory . get ( ) ; T next = null ; while ( elements . hasNext ( ) ) { next = elements . next ( ) ; result . put ( keyMapper . apply ( next ) , valueMapper . apply ( next ) ) ; } return result ; } finally { close ( ) ; } |
public class RandomLong { /** * Generates a random sequence of longs starting from 0 like : [ 0,1,2,3 . . . ? ? ]
* @ param min minimum value of the long that will be generated . If ' max ' is
* omitted , then ' max ' is set to ' min ' and ' min ' is set to 0.
* @ param max ( optional ) maximum value of the long that will be generated .
* Defaults to ' min ' if omitted .
* @ return generated array of longs . */
public static List < Long > sequence ( long min , long max ) { } } | long count = nextLong ( min , max ) ; List < Long > result = new ArrayList < Long > ( ) ; for ( long i = 0 ; i < count ; i ++ ) result . add ( i ) ; return result ; |
public class ObjectManagerState { /** * Create a checkpoint of this ObjectManagerState and all active transactions in the Log , sufficient to restart from .
* @ param persistentTransactions true if we checkpoint both persistent ( logged ) and non persistent objects
* otherwise we only checkpoint the non persistent unlogged objects .
* @ throws ObjectManagerException */
private void performCheckpoint ( boolean persistentTransactions ) throws ObjectManagerException { } } | final String methodName = "performCheckpoint" ; if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . entry ( this , cclass , methodName , new Boolean ( persistentTransactions ) ) ; if ( gatherStatistics ) { long now = System . currentTimeMillis ( ) ; waitingBetweenCheckpointsMilliseconds += now - lastCheckpointMilliseconds ; lastCheckpointMilliseconds = now ; } // if ( gatherStatistics ) .
// Recover and complete Orphan transactions .
completeOrphanTransactions ( ) ; long logSequenceNumber = 0 ; if ( persistentTransactions ) { // Mark the point in the log that we can truncate to once checkpoint is complete ,
// then write start of checkpoint to the log . This makes sure that the CheckpointStart is recovered first ,
// which ensures that all of the ObjectStores are known at startup .
CheckpointStartLogRecord checkpointStartLogRecord = new CheckpointStartLogRecord ( this ) ; // We don ' t release the reserved space for the Checkpoint log records as
// we will need to re - reserve it after the checkpoint has completed . Instead we just
// suppress the check on the log space because we reserved the space for the
// checkpoint start and end records earlier .
// Updates to QueueManagerState must be recorded in the log before we
// flush the ObjectStores , otherwise we risk having named objects in the stores
// which are not known in the ObjectManager .
logSequenceNumber = logOutput . markAndWriteNext ( checkpointStartLogRecord , 0 , false , // Do not check the log space , we reserved it earlier .
true ) ; if ( gatherStatistics ) { long now = System . currentTimeMillis ( ) ; flushingCheckpointStartMilliseconds += now - lastCheckpointMilliseconds ; lastCheckpointMilliseconds = now ; } // if ( gatherStatistics ) .
checkpointStarting = CHECKPOINT_STARTING_PERSISTENT ; // Mark all active transactions as requiring a checkpoint .
for ( java . util . Iterator registeredTransactionIterator = registeredInternalTransactions . values ( ) . iterator ( ) ; registeredTransactionIterator . hasNext ( ) ; ) { InternalTransaction registeredTransaction = ( InternalTransaction ) ( registeredTransactionIterator . next ( ) ) ; // The registered transaction might be null if it was removed from the map after
// we built the iterator . See ConcurrentHashMap and java . util . Map . Entry . getValue ( ) .
if ( registeredTransaction != null ) registeredTransaction . setRequiresCheckpoint ( ) ; } // for registeredInternalTransactions .
} else { checkpointStarting = CHECKPOINT_STARTING_NONPERSISTENT ; } // if ( persistentTransactions ) .
// From now on , new transactions write to the following checkpoint in the file store .
checkpointStarting = CHECKPOINT_STARTING_NO ; // Pause so that :
// 1 ) In flight persistent transactions might complete , avoiding the need to write a checkpoint
// record for them .
// 2 ) Objects in the checkpoint set which will be written to the Object Store might get deleted
// avoiding the need to write them .
Object lock = new Object ( ) ; synchronized ( lock ) { try { if ( Tracing . isAnyTracingEnabled ( ) && trace . isDebugEnabled ( ) ) trace . debug ( this , cclass , methodName , new Object [ ] { "wait:867" , new Integer ( checkpointDelay ) } ) ; lock . wait ( checkpointDelay ) ; // Let some transactions complete .
} catch ( InterruptedException exception ) { // No FFDC Code Needed .
ObjectManager . ffdc . processException ( this , cclass , methodName , exception , "1:877:1.62" ) ; if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . exit ( this , cclass , methodName , exception ) ; throw new UnexpectedExceptionException ( this , exception ) ; } // catch ( InterruptedException exception ) .
} // synchronized ( lock ) .
if ( gatherStatistics ) { long now = System . currentTimeMillis ( ) ; pausedDuringCheckpointMilliseconds += now - lastCheckpointMilliseconds ; lastCheckpointMilliseconds = now ; } // if ( gatherStatistics ) .
// Now that the log is forced tell the active transactions they can safely update the ObjectStores
// on the assumption that their logRecords prior to logSequenceNumber have been forced to disk .
// The Transactions take their checkpoint on the assumption that the ObjectStores contain their
// ManagedObjects . The Object Stores have not been forced yet but the log has ,
// so this is a safe assumption .
long transactionsRequiringCheckpoint = 0 ; for ( java . util . Iterator registeredTransactionIterator = registeredInternalTransactions . values ( ) . iterator ( ) ; registeredTransactionIterator . hasNext ( ) ; ) { InternalTransaction registeredTransaction = ( InternalTransaction ) ( registeredTransactionIterator . next ( ) ) ; // The registered transaction might be null if it was removed from the map after
// we built the iterator . See ConcurrentHashMap and java . util . Map . Entry . getValue ( ) .
// Checkpoint only transactions that have not cleared their reqiuirement for a checkpoint .
if ( registeredTransaction != null && registeredTransaction . requiresPersistentCheckpoint ) { registeredTransaction . checkpoint ( logSequenceNumber ) ; transactionsRequiringCheckpoint ++ ; } // if . . . registeredTransaction . requiresCheckpoint .
} // for registeredInternalTransactions .
if ( gatherStatistics ) { totalTransactionsRequiringCheckpoint = totalTransactionsRequiringCheckpoint + transactionsRequiringCheckpoint ; maximumTransactionsInAnyCheckpoint = Math . max ( transactionsRequiringCheckpoint , maximumTransactionsInAnyCheckpoint ) ; } // if ( gatherStatistics ) .
// Flush ObjectStores to disk .
// Loop over a copy of the store collection , in case a new one is registered while we we do this .
ObjectStore [ ] stores ; synchronized ( objectStores ) { stores = ( ObjectStore [ ] ) objectStores . values ( ) . toArray ( new ObjectStore [ objectStores . size ( ) ] ) ; } for ( int i = 0 ; i < stores . length ; i ++ ) { // Let the store decide whether to flush according to its storage strategy .
if ( persistentTransactions ) stores [ i ] . flush ( ) ; else if ( ! stores [ i ] . getPersistence ( ) ) stores [ i ] . flush ( ) ; } // for .
if ( gatherStatistics ) { long now = System . currentTimeMillis ( ) ; flushingObjectStoresForCheckpointMilliseconds += now - lastCheckpointMilliseconds ; lastCheckpointMilliseconds = now ; totalCheckpointsTaken ++ ; } // if ( gatherStatistics ) .
if ( persistentTransactions ) { // Write end of checkpoint to the log .
CheckpointEndLogRecord checkpointEndLogRecord = new CheckpointEndLogRecord ( ) ; logOutput . writeNext ( checkpointEndLogRecord , 0 , false , // Do not ckeck the log space , we reserved it earlier .
false ) ; // Force the log and truncate to the mark point set when we wrote checkpointStartLogRecord .
logOutput . truncate ( ) ; if ( gatherStatistics ) { long now = System . currentTimeMillis ( ) ; flushingEndCheckpointMilliseconds += now - lastCheckpointMilliseconds ; lastCheckpointMilliseconds = now ; } // if ( gatherStatistics ) .
// If the log is still too full start backing out transactions .
synchronized ( registeredInternalTransactions ) { if ( logOutput . isOcupancyHigh ( ) ) { // Reduce other work in the ObjectManager so that checkpointing can catch up .
currentMaximumActiveTransactions = Math . max ( registeredInternalTransactions . size ( ) / 2 , 2 ) ; // Find the biggest transaction in terms of its reserved space in the log .
InternalTransaction biggestTransaction = null ; long biggestTransactionSize = 0 ; for ( java . util . Iterator transactionIterator = registeredInternalTransactions . values ( ) . iterator ( ) ; transactionIterator . hasNext ( ) ; ) { InternalTransaction internalTransaction = ( InternalTransaction ) transactionIterator . next ( ) ; long transactionSize = internalTransaction . getLogSpaceReserved ( ) ; if ( transactionSize > biggestTransactionSize && internalTransaction . state == Transaction . stateActivePersistent ) { biggestTransaction = internalTransaction ; biggestTransactionSize = transactionSize ; } // if ( transactionSize . . .
} // for . . . registeredInternalTransactions .
if ( biggestTransaction != null ) { Transaction transaction = biggestTransaction . getExternalTransaction ( ) ; // We cannot synchronize on the internal transaction because we might deadlock an application thread .
// Application threads may synchronize on their ManagedObjects and then invoke synchronized transaction
// methods , so we must not hold a transaction lock when calling backout . For example LinkedLists
// synchronize on the list when the preBackout callback is made then they invoke synchronized transaction
// methods . If we hold the transaction lock first we would deadlock another thread making transaction
// calls while synchronized on the list .
// We just catch the state exception and discard it .
try { if ( biggestTransaction . state == Transaction . stateActivePersistent ) { // It is possible that the biggest transaction has now moved on to some other work ,
// but we need to backout something so that will ahve to do .
if ( Tracing . isAnyTracingEnabled ( ) && trace . isDebugEnabled ( ) ) trace . debug ( this , cclass , methodName , new Object [ ] { "Log too full, backing out biggestTransaction:995" , biggestTransaction } ) ; trace . warning ( this , cclass , methodName , "ObjectManagerState_LogTooFull" , biggestTransaction ) ; transaction . setTerminationReason ( Transaction . terminatedLogTooFull ) ; transaction . backout ( false ) ; } // if ( biggestTransaction . state . . .
} catch ( InvalidStateException exception ) { // No FFDC Code Needed , condition expected when we race with the application thread .
if ( Tracing . isAnyTracingEnabled ( ) && trace . isEventEnabled ( ) ) trace . event ( this , cclass , methodName , exception ) ; } // catch ( StateErrorException exception ) .
requestCheckpoint ( true ) ; // Trigger another checkpoint to see if things have got better .
} } else { // log file not particularly full .
// We had previously reduced the number of active transactions ,
// this will allw the number to recover . As new transactions come along they
// will run and the backlog will be cleared as transactions finish .
currentMaximumActiveTransactions = maximumActiveTransactions ; } // if ( ( ( FileLogOutput ) logOutput ) . getOcupancy ( ) . . .
// If we have more transactions than we need release some freeTrasactions .
if ( totalTransactions > currentMaximumActiveTransactions ) totalTransactions = totalTransactions - freeTransactions . clear ( totalTransactions - currentMaximumActiveTransactions ) ; } // synchronized ( registeredInternalTransactions ) .
} // if ( persistentTransactions ) .
// finally reset transactions since last checkpoint
if ( persistentTransactions ) persistentTransactionsSinceLastCheckpoint = 0 ; nonPersistentTransactionsSinceLastCheckpoint = 0 ; if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . exit ( this , cclass , methodName ) ; |
public class EncryptUtil { /** * md5加密 < / br >
* @ praram str 需要进行md5加密的字符
* @ return 已进行md5的加密的字符 */
public static String md5 ( String str ) { } } | String md5 = encode ( str , "MD5" ) . toLowerCase ( ) ; return md5 ; |
public class MapModel { /** * Add layer selection handler .
* @ param handler
* the handler to be registered
* @ return handler registration
* @ since 1.6.0 */
@ Api public HandlerRegistration addLayerSelectionHandler ( final LayerSelectionHandler handler ) { } } | return handlerManager . addHandler ( LayerSelectionHandler . TYPE , handler ) ; |
public class AmortizedPQueue { /** * / * These 2 methods not guaranteed to be fast . */
public PCollection < E > minus ( Object e ) { } } | return Empty . < E > vector ( ) . plusAll ( this ) . minus ( e ) ; |
public class BuyerPriceCategory { /** * < p > Setter for buyer . < / p >
* @ param pBuyer reference */
public final void setBuyer ( final OnlineBuyer pBuyer ) { } } | this . buyer = pBuyer ; if ( this . itsId == null ) { this . itsId = new BuyerPriceCategoryId ( ) ; } this . itsId . setBuyer ( this . buyer ) ; |
public class GenericsUtils { /** * For a given classnode , fills in the supplied map with the parameterized
* types it defines .
* @ param node the class node to check
* @ param map the generics type information collector */
public static void extractPlaceholders ( ClassNode node , Map < GenericsTypeName , GenericsType > map ) { } } | if ( node == null ) return ; if ( node . isArray ( ) ) { extractPlaceholders ( node . getComponentType ( ) , map ) ; return ; } if ( ! node . isUsingGenerics ( ) || ! node . isRedirectNode ( ) ) return ; GenericsType [ ] parameterized = node . getGenericsTypes ( ) ; if ( parameterized == null || parameterized . length == 0 ) return ; GenericsType [ ] redirectGenericsTypes = node . redirect ( ) . getGenericsTypes ( ) ; if ( redirectGenericsTypes == null || ( node . isGenericsPlaceHolder ( ) && redirectGenericsTypes . length != parameterized . length ) /* GROOVY - 8609 */
) { redirectGenericsTypes = parameterized ; } if ( redirectGenericsTypes . length != parameterized . length ) { throw new GroovyBugError ( "Expected earlier checking to detect generics parameter arity mismatch" + "\nExpected: " + node . getName ( ) + toGenericTypesString ( redirectGenericsTypes ) + "\nSupplied: " + node . getName ( ) + toGenericTypesString ( parameterized ) ) ; } List < GenericsType > valueList = new LinkedList < > ( ) ; for ( int i = 0 ; i < redirectGenericsTypes . length ; i ++ ) { GenericsType redirectType = redirectGenericsTypes [ i ] ; if ( redirectType . isPlaceholder ( ) ) { GenericsTypeName name = new GenericsTypeName ( redirectType . getName ( ) ) ; if ( ! map . containsKey ( name ) ) { GenericsType value = parameterized [ i ] ; map . put ( name , value ) ; valueList . add ( value ) ; } } } for ( GenericsType value : valueList ) { if ( value . isWildcard ( ) ) { ClassNode lowerBound = value . getLowerBound ( ) ; if ( lowerBound != null ) { extractPlaceholders ( lowerBound , map ) ; } ClassNode [ ] upperBounds = value . getUpperBounds ( ) ; if ( upperBounds != null ) { for ( ClassNode upperBound : upperBounds ) { extractPlaceholders ( upperBound , map ) ; } } } else if ( ! value . isPlaceholder ( ) ) { extractPlaceholders ( value . getType ( ) , map ) ; } } |
public class Delay { /** * Creates a new { @ link ExponentialDelay } on with custom boundaries and factor ( eg . with upper 9000 , lower 0 , powerOf 10 : 1,
* 10 , 100 , 1000 , 9000 , 9000 , 9000 , . . . ) .
* @ param lower the lower boundary , must be non - negative
* @ param upper the upper boundary , must be greater than the lower boundary
* @ param powersOf the base for exponential growth ( eg . powers of 2 , powers of 10 , etc . . . ) , must be non - negative and greater
* than 1
* @ param targetTimeUnit the unit of the delay .
* @ return a created { @ link ExponentialDelay } .
* @ since 5.0 */
public static Delay exponential ( Duration lower , Duration upper , int powersOf , TimeUnit targetTimeUnit ) { } } | LettuceAssert . notNull ( lower , "Lower boundary must not be null" ) ; LettuceAssert . isTrue ( lower . toNanos ( ) >= 0 , "Lower boundary must be greater or equal to 0" ) ; LettuceAssert . notNull ( upper , "Upper boundary must not be null" ) ; LettuceAssert . isTrue ( upper . toNanos ( ) > lower . toNanos ( ) , "Upper boundary must be greater than the lower boundary" ) ; LettuceAssert . isTrue ( powersOf > 1 , "PowersOf must be greater than 1" ) ; LettuceAssert . notNull ( targetTimeUnit , "Target TimeUnit must not be null" ) ; return new ExponentialDelay ( lower , upper , powersOf , targetTimeUnit ) ; |
public class ReferenceCollectingCallback { /** * Updates block stack . */
@ Override public boolean shouldTraverse ( NodeTraversal nodeTraversal , Node n , Node parent ) { } } | // We automatically traverse a hoisted function body when that function
// is first referenced , so that the reference lists are in the right order .
// TODO ( nicksantos ) : Maybe generalize this to a continuation mechanism
// like in RemoveUnusedCode .
if ( NodeUtil . isHoistedFunctionDeclaration ( n ) ) { Node nameNode = n . getFirstChild ( ) ; Var functionVar = nodeTraversal . getScope ( ) . getVar ( nameNode . getString ( ) ) ; checkNotNull ( functionVar ) ; if ( finishedFunctionTraverse . contains ( functionVar ) ) { return false ; } startedFunctionTraverse . add ( functionVar ) ; } // If node is a new basic block , put on basic block stack
if ( isBlockBoundary ( n , parent ) ) { blockStack . add ( new BasicBlock ( peek ( blockStack ) , n ) ) ; } // Add the second x before the first one in " let [ x ] = x ; " . VariableReferenceCheck
// relies on reference order to give a warning .
if ( ( n . isDefaultValue ( ) || n . isDestructuringLhs ( ) ) && n . hasTwoChildren ( ) ) { Scope scope = nodeTraversal . getScope ( ) ; nodeTraversal . traverseInnerNode ( n . getSecondChild ( ) , n , scope ) ; nodeTraversal . traverseInnerNode ( n . getFirstChild ( ) , n , scope ) ; return false ; } return true ; |
public class FileHandlerUtil { /** * Generate file name .
* @ param date the date
* @ return the string */
public static String generateAuditArchiveFileName ( Date date ) { } } | StringBuffer name = new StringBuffer ( ) ; name . append ( "Audit_Archive-" ) . append ( AuditUtil . dateToString ( date , "yyyy-MM-dd" ) ) . append ( CoreConstants . AUDIT_ARCHIVE_EXTENTION ) ; return name . toString ( ) ; |
public class Caster { /** * cast a Object to a boolean value ( primitive value type )
* @ param str String to cast
* @ return casted boolean value
* @ throws PageException */
public static boolean toBooleanValue ( String str ) throws PageException { } } | Boolean b = toBoolean ( str , null ) ; if ( b != null ) return b . booleanValue ( ) ; throw new CasterException ( "Can't cast String [" + CasterException . crop ( str ) + "] to a boolean" ) ; |
public class CmsUploadDialogImpl { /** * Adds the given file input field to this dialog . < p >
* @ param fileInput the file input field to add */
@ Override protected void addFileInput ( CmsFileInput fileInput ) { } } | // add the files selected by the user to the list of files to upload
if ( fileInput != null ) { m_inputsToUpload . put ( fileInput . getFiles ( ) [ 0 ] . getFileName ( ) , fileInput ) ; } super . addFileInput ( fileInput ) ; |
public class FileUtil { /** * 从文件中读取每一行数据
* @ param url 文件的URL
* @ param charset 字符集
* @ return 文件中的每行内容的集合List
* @ throws IORuntimeException IO异常 */
public static List < String > readLines ( URL url , String charset ) throws IORuntimeException { } } | return readLines ( url , charset , new ArrayList < String > ( ) ) ; |
public class TraceStep { /** * Returns the last step in this execution .
* @ return this step if it has no chidlren or the last step from the children . */
public TraceStep getLastStep ( ) { } } | TraceStep result = this ; while ( true ) { if ( result . children == null || result . children . size ( ) == 0 ) return result ; result = result . children . get ( result . children . size ( ) - 1 ) ; } |
public class DefaultRedirectResolver { /** * Compares two strings but treats empty string or null equal
* @ param str1
* @ param str2
* @ return true if strings are equal , false otherwise */
private boolean isEqual ( String str1 , String str2 ) { } } | if ( StringUtils . isEmpty ( str1 ) && StringUtils . isEmpty ( str2 ) ) { return true ; } else if ( ! StringUtils . isEmpty ( str1 ) ) { return str1 . equals ( str2 ) ; } else { return false ; } |
public class ApiOvhEmailexchange { /** * Get this object properties
* REST : GET / email / exchange / { organizationName } / service / { exchangeService } / domain / { domainName } / disclaimer
* @ param organizationName [ required ] The internal name of your exchange organization
* @ param exchangeService [ required ] The internal name of your exchange service
* @ param domainName [ required ] Domain name */
public OvhDisclaimer organizationName_service_exchangeService_domain_domainName_disclaimer_GET ( String organizationName , String exchangeService , String domainName ) throws IOException { } } | String qPath = "/email/exchange/{organizationName}/service/{exchangeService}/domain/{domainName}/disclaimer" ; StringBuilder sb = path ( qPath , organizationName , exchangeService , domainName ) ; String resp = exec ( qPath , "GET" , sb . toString ( ) , null ) ; return convertTo ( resp , OvhDisclaimer . class ) ; |
public class HeaderedArchiveRecord { /** * Read header if present . Technique borrowed from HttpClient HttpParse
* class . Using http parser code for now . Later move to more generic header
* parsing code if there proves a need .
* @ return ByteArrayInputStream with the http header in it or null if no
* http header .
* @ throws IOException */
private InputStream readContentHeaders ( ) throws IOException { } } | // If judged a record that doesn ' t have an http header , return
// immediately .
if ( ! hasContentHeaders ( ) ) { return null ; } byte [ ] statusBytes = LaxHttpParser . readRawLine ( getIn ( ) ) ; int eolCharCount = getEolCharsCount ( statusBytes ) ; if ( eolCharCount <= 0 ) { throw new IOException ( "Failed to read raw lie where one " + " was expected: " + new String ( statusBytes ) ) ; } String statusLine = EncodingUtil . getString ( statusBytes , 0 , statusBytes . length - eolCharCount , ARCConstants . DEFAULT_ENCODING ) ; if ( statusLine == null ) { throw new NullPointerException ( "Expected status line is null" ) ; } // TODO : Tighten up this test .
boolean isHttpResponse = StatusLine . startsWithHTTP ( statusLine ) ; boolean isHttpRequest = false ; if ( ! isHttpResponse ) { isHttpRequest = statusLine . toUpperCase ( ) . startsWith ( "GET" ) || ! statusLine . toUpperCase ( ) . startsWith ( "POST" ) ; } if ( ! isHttpResponse && ! isHttpRequest ) { throw new UnexpectedStartLineIOException ( "Failed parse of " + "status line: " + statusLine ) ; } this . statusCode = isHttpResponse ? ( new StatusLine ( statusLine ) ) . getStatusCode ( ) : - 1 ; // Save off all bytes read . Keep them as bytes rather than
// convert to strings so we don ' t have to worry about encodings
// though this should never be a problem doing http headers since
// its all supposed to be ascii .
ByteArrayOutputStream baos = new ByteArrayOutputStream ( statusBytes . length + 4 * 1024 ) ; baos . write ( statusBytes ) ; // Now read rest of the header lines looking for the separation
// between header and body .
for ( byte [ ] lineBytes = null ; true ; ) { lineBytes = LaxHttpParser . readRawLine ( getIn ( ) ) ; eolCharCount = getEolCharsCount ( lineBytes ) ; if ( eolCharCount <= 0 ) { throw new IOException ( "Failed reading headers: " + ( ( lineBytes != null ) ? new String ( lineBytes ) : null ) ) ; } // Save the bytes read .
baos . write ( lineBytes ) ; if ( ( lineBytes . length - eolCharCount ) <= 0 ) { // We ' ve finished reading the http header .
break ; } } byte [ ] headerBytes = baos . toByteArray ( ) ; // Save off where content body , post content headers , starts .
this . contentHeadersLength = headerBytes . length ; ByteArrayInputStream bais = new ByteArrayInputStream ( headerBytes ) ; if ( ! bais . markSupported ( ) ) { throw new IOException ( "ByteArrayInputStream does not support mark" ) ; } bais . mark ( headerBytes . length ) ; // Read the status line . Don ' t let it into the parseHeaders function .
// It doesn ' t know what to do with it .
bais . read ( statusBytes , 0 , statusBytes . length ) ; this . contentHeaders = LaxHttpParser . parseHeaders ( bais , ARCConstants . DEFAULT_ENCODING ) ; bais . reset ( ) ; return bais ; |
public class Iterate { /** * Flip the keys and values of the multimap . */
public static < K , V > HashBagMultimap < V , K > flip ( ListMultimap < K , V > listMultimap ) { } } | final HashBagMultimap < V , K > result = new HashBagMultimap < V , K > ( ) ; listMultimap . forEachKeyMultiValues ( new Procedure2 < K , Iterable < V > > ( ) { public void value ( final K key , Iterable < V > values ) { Iterate . forEach ( values , new Procedure < V > ( ) { public void value ( V value ) { result . put ( value , key ) ; } } ) ; } } ) ; return result ; |
public class StringUtils { /** * Trim all occurrences of the supplied trailing character from the given { @ code String } .
* @ param str the { @ code String } to check
* @ param trailingCharacter the trailing character to be trimmed
* @ return the trimmed { @ code String } */
public static String trimTrailingCharacter ( final String str , final char trailingCharacter ) { } } | if ( ! StringUtils . hasLength ( str ) ) { return str ; } final StringBuilder sb = new StringBuilder ( str ) ; while ( sb . length ( ) > 0 && sb . charAt ( sb . length ( ) - 1 ) == trailingCharacter ) { sb . deleteCharAt ( sb . length ( ) - 1 ) ; } return sb . toString ( ) ; |
public class DeferredReleaser { /** * Schedules deferred release .
* The object will be released after the current Looper ' s loop ,
* unless { @ code cancelDeferredRelease } is called before then .
* @ param releasable Object to release . */
public void scheduleDeferredRelease ( Releasable releasable ) { } } | ensureOnUiThread ( ) ; if ( ! mPendingReleasables . add ( releasable ) ) { return ; } // Posting to the UI queue is an O ( n ) operation , so we only do it once .
// The one runnable does all the releases .
if ( mPendingReleasables . size ( ) == 1 ) { mUiHandler . post ( releaseRunnable ) ; } |
public class systemglobal_authenticationlocalpolicy_binding { /** * Use this API to fetch filtered set of systemglobal _ authenticationlocalpolicy _ binding resources .
* filter string should be in JSON format . eg : " port : 80 , servicetype : HTTP " . */
public static systemglobal_authenticationlocalpolicy_binding [ ] get_filtered ( nitro_service service , String filter ) throws Exception { } } | systemglobal_authenticationlocalpolicy_binding obj = new systemglobal_authenticationlocalpolicy_binding ( ) ; options option = new options ( ) ; option . set_filter ( filter ) ; systemglobal_authenticationlocalpolicy_binding [ ] response = ( systemglobal_authenticationlocalpolicy_binding [ ] ) obj . getfiltered ( service , option ) ; return response ; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.