signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class TopologyBuilder { /** * Define a new bolt in this topology . This defines a basic bolt , which is a * simpler to use but more restricted kind of bolt . Basic bolts are intended * for non - aggregation processing and automate the anchoring / acking process to * achieve proper reliability in the topology . * @ param id the id of this component . This id is referenced by other components that want to consume this bolt ' s outputs . * @ param bolt the basic bolt * @ return use the returned object to declare the inputs to this component * @ throws IllegalArgumentException if { @ code parallelism _ hint } is not positive */ public BoltDeclarer setBolt ( String id , IBasicBolt bolt ) throws IllegalArgumentException { } }
return setBolt ( id , bolt , null ) ;
public class StructureTools { /** * List of groups from the structure not included in ca ( e . g . ligands ) . * Unaligned groups are searched from all chains referenced in ca , as well * as any chains in the first model of the structure from ca [ 0 ] , if any . * @ param ca an array of atoms * @ return */ public static List < Group > getUnalignedGroups ( Atom [ ] ca ) { } }
Set < Chain > chains = new HashSet < Chain > ( ) ; Set < Group > caGroups = new HashSet < Group > ( ) ; // Create list of all chains in this structure Structure s = null ; if ( ca . length > 0 ) { Group g = ca [ 0 ] . getGroup ( ) ; if ( g != null ) { Chain c = g . getChain ( ) ; if ( c != null ) { s = c . getStructure ( ) ; } } } if ( s != null ) { // Add all chains from the structure for ( Chain c : s . getChains ( 0 ) ) { chains . add ( c ) ; } } // Add groups and chains from ca for ( Atom a : ca ) { Group g = a . getGroup ( ) ; if ( g != null ) { caGroups . add ( g ) ; Chain c = g . getChain ( ) ; if ( c != null ) { chains . add ( c ) ; } } } // Iterate through all chains , finding groups not in ca List < Group > unadded = new ArrayList < Group > ( ) ; for ( Chain c : chains ) { for ( Group g : c . getAtomGroups ( ) ) { if ( ! caGroups . contains ( g ) ) { unadded . add ( g ) ; } } } return unadded ;
public class ShardingEncryptorStrategy { /** * Is has sharding query assisted encryptor or not . * @ param logicTableName logic table name * @ return has sharding query assisted encryptor or not */ public boolean isHasShardingQueryAssistedEncryptor ( final String logicTableName ) { } }
return shardingEncryptor instanceof ShardingQueryAssistedEncryptor && ! Collections2 . filter ( assistedQueryColumns , new Predicate < ColumnNode > ( ) { @ Override public boolean apply ( final ColumnNode input ) { return input . getTableName ( ) . equals ( logicTableName ) ; } } ) . isEmpty ( ) ;
public class JCufft { /** * Convenience method for { @ link JCufft # cufftExecZ2D ( cufftHandle , Pointer , Pointer ) } . * Accepts arrays for input and output data and automatically performs the host - device * and device - host copies . * @ see jcuda . jcufft . JCufft # cufftExecZ2D ( cufftHandle , Pointer , Pointer ) */ public static int cufftExecZ2D ( cufftHandle plan , double cIdata [ ] , double rOdata [ ] ) { } }
int cudaResult = 0 ; boolean inPlace = ( cIdata == rOdata ) ; // Allocate space for the input data on the device Pointer hostCIdata = Pointer . to ( cIdata ) ; Pointer deviceCIdata = new Pointer ( ) ; cudaResult = JCuda . cudaMalloc ( deviceCIdata , cIdata . length * Sizeof . DOUBLE ) ; if ( cudaResult != cudaError . cudaSuccess ) { if ( exceptionsEnabled ) { throw new CudaException ( "JCuda error: " + cudaError . stringFor ( cudaResult ) ) ; } return cufftResult . JCUFFT_INTERNAL_ERROR ; } // Allocate the output device data Pointer hostROdata = null ; Pointer deviceROdata = null ; if ( inPlace ) { hostROdata = hostCIdata ; deviceROdata = deviceCIdata ; } else { hostROdata = Pointer . to ( rOdata ) ; deviceROdata = new Pointer ( ) ; cudaResult = JCuda . cudaMalloc ( deviceROdata , rOdata . length * Sizeof . DOUBLE ) ; if ( cudaResult != cudaError . cudaSuccess ) { JCuda . cudaFree ( deviceCIdata ) ; if ( exceptionsEnabled ) { throw new CudaException ( "JCuda error: " + cudaError . stringFor ( cudaResult ) ) ; } return cufftResult . JCUFFT_INTERNAL_ERROR ; } } // Copy the host input data to the device cudaResult = JCuda . cudaMemcpy ( deviceCIdata , hostCIdata , cIdata . length * Sizeof . DOUBLE , cudaMemcpyKind . cudaMemcpyHostToDevice ) ; if ( cudaResult != cudaError . cudaSuccess ) { JCuda . cudaFree ( deviceCIdata ) ; if ( ! inPlace ) { JCuda . cudaFree ( deviceROdata ) ; } if ( exceptionsEnabled ) { throw new CudaException ( "JCuda error: " + cudaError . stringFor ( cudaResult ) ) ; } return cufftResult . JCUFFT_INTERNAL_ERROR ; } // Execute the transform int result = cufftResult . CUFFT_SUCCESS ; try { result = JCufft . cufftExecZ2D ( plan , deviceCIdata , deviceROdata ) ; } catch ( CudaException e ) { JCuda . cudaFree ( deviceCIdata ) ; if ( ! inPlace ) { JCuda . cudaFree ( deviceROdata ) ; } result = cufftResult . JCUFFT_INTERNAL_ERROR ; } if ( result != cufftResult . CUFFT_SUCCESS ) { if ( exceptionsEnabled ) { throw new CudaException ( cufftResult . stringFor ( cudaResult ) ) ; } return result ; } // Copy the device output data to the host cudaResult = JCuda . cudaMemcpy ( hostROdata , deviceROdata , rOdata . length * Sizeof . DOUBLE , cudaMemcpyKind . cudaMemcpyDeviceToHost ) ; if ( cudaResult != cudaError . cudaSuccess ) { JCuda . cudaFree ( deviceCIdata ) ; if ( ! inPlace ) { JCuda . cudaFree ( deviceROdata ) ; } if ( exceptionsEnabled ) { throw new CudaException ( "JCuda error: " + cudaError . stringFor ( cudaResult ) ) ; } return cufftResult . JCUFFT_INTERNAL_ERROR ; } // Free the device data cudaResult = JCuda . cudaFree ( deviceCIdata ) ; if ( cudaResult != cudaError . cudaSuccess ) { if ( exceptionsEnabled ) { throw new CudaException ( "JCuda error: " + cudaError . stringFor ( cudaResult ) ) ; } return cufftResult . JCUFFT_INTERNAL_ERROR ; } if ( ! inPlace ) { cudaResult = JCuda . cudaFree ( deviceROdata ) ; if ( cudaResult != cudaError . cudaSuccess ) { if ( exceptionsEnabled ) { throw new CudaException ( "JCuda error: " + cudaError . stringFor ( cudaResult ) ) ; } return cufftResult . JCUFFT_INTERNAL_ERROR ; } } return result ;
public class DocumentReferenceTranslator { /** * Default implementation ignores providers . Override to try registry lookup . */ public Document toDomDocument ( Object obj , boolean tryProviders ) throws TranslationException { } }
if ( this instanceof XmlDocumentTranslator ) return ( ( XmlDocumentTranslator ) this ) . toDomDocument ( obj ) ; else throw new UnsupportedOperationException ( "Translator: " + this . getClass ( ) . getName ( ) + " does not implement" + XmlDocumentTranslator . class . getName ( ) ) ;
public class StAXUtils { /** * Go to the end of the current element . This include skipping any children element . * @ param xmlReader the XML stream reader * @ return the type of the new current event * @ throws XMLStreamException if there is an error processing the underlying XML source * @ since 5.3M1 */ public static int skipElement ( XMLStreamReader xmlReader ) throws XMLStreamException { } }
if ( ! xmlReader . isStartElement ( ) ) { throw new XMLStreamException ( "Current node is not start element" ) ; } if ( ! xmlReader . isEndElement ( ) ) { for ( xmlReader . next ( ) ; ! xmlReader . isEndElement ( ) ; xmlReader . next ( ) ) { if ( xmlReader . isStartElement ( ) ) { skipElement ( xmlReader ) ; } } } return xmlReader . getEventType ( ) ;
public class Type { /** * Check if a type is convertable including support for varargs . If varargs * is true , then this will add 40 to the total cost such that non - varargs * will always take precedence . * @ see # convertableFrom ( Type ) */ public int convertableFrom ( Type other , boolean vararg ) { } }
int cost = convertableFrom ( other ) ; if ( cost < 0 ) { return cost ; } return ( vararg ? cost + 40 : cost ) ;
public class FctBnCnvBnFromRs { /** * < p > Get bean in lazy mode ( if bean is null then initialize it ) . < / p > * @ param pAddParam additional param * @ param pBeanName - bean name * @ return requested bean * @ throws Exception - an exception */ @ Override public final IConverterByName < IRecordSet < RS > , ? > lazyGet ( final Map < String , Object > pAddParam , final String pBeanName ) throws Exception { } }
IConverterByName < IRecordSet < RS > , ? > convrt = this . convertersMap . get ( pBeanName ) ; if ( convrt == null ) { // locking : synchronized ( this . convertersMap ) { // make sure again whether it ' s null after locking : convrt = this . convertersMap . get ( pBeanName ) ; if ( convrt == null ) { if ( pBeanName . equals ( CnvBnRsToFloat . class . getSimpleName ( ) ) ) { convrt = createPutCnvBnRsToFloat ( ) ; } else if ( pBeanName . equals ( CnvBnRsToEnum . class . getSimpleName ( ) ) ) { convrt = createPutCnvBnRsToEnum ( ) ; } else if ( pBeanName . equals ( CnvBnRsToEntity . class . getSimpleName ( ) ) ) { convrt = createPutCnvBnRsToEntity ( ) ; } else if ( pBeanName . equals ( CnvBnRsToDouble . class . getSimpleName ( ) ) ) { convrt = createPutCnvBnRsToDouble ( ) ; } else if ( pBeanName . equals ( CnvBnRsToInteger . class . getSimpleName ( ) ) ) { convrt = createPutCnvBnRsToInteger ( ) ; } else if ( pBeanName . equals ( CnvBnRsToLong . class . getSimpleName ( ) ) ) { convrt = createPutCnvBnRsToLong ( ) ; } else if ( pBeanName . equals ( CnvBnRsToBoolean . class . getSimpleName ( ) ) ) { convrt = createPutCnvBnRsToBoolean ( ) ; } else if ( pBeanName . equals ( CnvBnRsToDate . class . getSimpleName ( ) ) ) { convrt = createPutCnvBnRsToDate ( ) ; } else if ( pBeanName . equals ( CnvBnRsToString . class . getSimpleName ( ) ) ) { convrt = createPutCnvBnRsToString ( ) ; } else if ( pBeanName . equals ( CnvBnRsToBigDecimal . class . getSimpleName ( ) ) ) { convrt = createPutCnvBnRsToBigDecimal ( ) ; } } } } if ( convrt == null ) { throw new ExceptionWithCode ( ExceptionWithCode . CONFIGURATION_MISTAKE , "There is no converter with name " + pBeanName ) ; } return convrt ;
public class JettyStarter { /** * Set the common resource base ( directory ) from which all web application * resources will be loaded ( servlet context root ) . * @ param aResourceBase * The resource . May neither be < code > null < / code > nor empty . * @ return this for chaining */ @ Nonnull public final JettyStarter setResourceBase ( @ Nonnull final Resource aResourceBase ) { } }
ValueEnforcer . notNull ( aResourceBase , "ResourceBase" ) ; m_aResourceBase = aResourceBase ; return this ;
public class Repository { /** * http : / / en . wikipedia . org / wiki / Byte _ Order _ Mark */ private static String stripBom ( String str ) { } }
if ( ! str . isEmpty ( ) && str . charAt ( 0 ) == 65279 ) { return str . substring ( 1 ) ; } else { return str ; }
public class ScanStream { /** * Sequentially iterate over entries in a hash identified by { @ code key } . This method uses { @ code HSCAN } to perform an * iterative scan . * @ param commands the commands interface , must not be { @ literal null } . * @ param key the hash to scan . * @ param scanArgs the scan arguments , must not be { @ literal null } . * @ param < K > Key type . * @ param < V > Value type . * @ return a new { @ link Flux } . */ public static < K , V > Flux < KeyValue < K , V > > hscan ( RedisHashReactiveCommands < K , V > commands , K key , ScanArgs scanArgs ) { } }
LettuceAssert . notNull ( scanArgs , "ScanArgs must not be null" ) ; return hscan ( commands , key , Optional . of ( scanArgs ) ) ;
public class StringBlock { /** * Returns raw string ( without any styling information ) at specified index . * Returns null if index is invalid or object was not initialized . */ public String getRaw ( int index ) { } }
if ( m_strings == null || index < 0 || index > m_strings . length - 1 ) { return null ; } return m_strings [ index ] ;
public class EnvironmentLogger { /** * Returns the finest assigned level for any classloader environment . */ private Level getFinestLevel ( ) { } }
Level level ; if ( _parent == null ) level = Level . INFO ; else if ( _parent . isLocalLevel ( ) ) level = selectFinestLevel ( _systemLevel , _parent . getFinestLevel ( ) ) ; else if ( _systemLevel != null ) level = _systemLevel ; else level = _parent . getFinestLevel ( ) ; if ( _localLevel == null ) return level ; for ( int i = _loaders . size ( ) - 1 ; i >= 0 ; i -- ) { WeakReference < ClassLoader > ref = _loaders . get ( i ) ; ClassLoader loader = ref . get ( ) ; if ( loader == null ) _loaders . remove ( i ) ; level = selectFinestLevel ( level , _localLevel . get ( loader ) ) ; } return level ;
public class MultiPathImpl { /** * Reviewed vs . Native Jan 11 , 2011 */ public void startPath ( Point point ) { } }
if ( point . isEmpty ( ) ) throw new IllegalArgumentException ( ) ; // throw new // IllegalArgumentException ( ) ; mergeVertexDescription ( point . getDescription ( ) ) ; _initPathStartPoint ( ) ; point . copyTo ( m_moveToPoint ) ; // TODO check MultiPathImpl . cpp comment // " / / the description will be merged later " // assignVertexDescription ( m _ moveToPoint . getDescription ( ) ) ; m_bPathStarted = true ;
public class UnicodeSetStringSpan { /** * Spans a string . * @ param s The string to be spanned * @ param start The start index that the span begins * @ param spanCondition The span condition * @ return the limit ( exclusive end ) of the span */ public int span ( CharSequence s , int start , SpanCondition spanCondition ) { } }
if ( spanCondition == SpanCondition . NOT_CONTAINED ) { return spanNot ( s , start , null ) ; } int spanLimit = spanSet . span ( s , start , SpanCondition . CONTAINED ) ; if ( spanLimit == s . length ( ) ) { return spanLimit ; } return spanWithStrings ( s , start , spanLimit , spanCondition ) ;
public class EclipseProductLocationProvider { /** * Returns a list of paths of Eclipse installations . * The search process works by scanning for each ' source dir ' for either an eclipse installation or a folder containing the text returned * by getDirName ( ) . If such a folder is found , this process is applied recursively . On windows , this process is run on each drive letter * which represents a physical hard disk . If the native windows API call to determine these drive letters fails , only ' C : ' is checked . */ private List < String > getSourceDirsOnWindowsWithDriveLetters ( ) { } }
List < String > driveLetters = asList ( "C" ) ; try { driveLetters = OsUtils . getDrivesOnWindows ( ) ; } catch ( Throwable ignore ) { ignore . printStackTrace ( ) ; } List < String > sourceDirs = new ArrayList < String > ( ) ; for ( String letter : driveLetters ) { for ( String possibleSource : descriptor . getSourceDirsOnWindows ( ) ) { if ( ! isDriveSpecificOnWindows ( possibleSource ) ) { sourceDirs . add ( letter + ":" + possibleSource ) ; } } } for ( String possibleSource : descriptor . getSourceDirsOnWindows ( ) ) { if ( isDriveSpecificOnWindows ( possibleSource ) ) sourceDirs . add ( possibleSource ) ; } return sourceDirs ;
public class KeyVaultClientBaseImpl { /** * List secrets in a specified key vault . * The Get Secrets operation is applicable to the entire vault . However , only the base secret identifier and its attributes are provided in the response . Individual secret versions are not listed in the response . This operation requires the secrets / list permission . * @ param vaultBaseUrl The vault name , for example https : / / myvault . vault . azure . net . * @ param maxresults Maximum number of results to return in a page . If not specified , the service will return up to 25 results . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the PagedList & lt ; SecretItem & gt ; object */ public Observable < ServiceResponse < Page < SecretItem > > > getSecretsWithServiceResponseAsync ( final String vaultBaseUrl , final Integer maxresults ) { } }
return getSecretsSinglePageAsync ( vaultBaseUrl , maxresults ) . concatMap ( new Func1 < ServiceResponse < Page < SecretItem > > , Observable < ServiceResponse < Page < SecretItem > > > > ( ) { @ Override public Observable < ServiceResponse < Page < SecretItem > > > call ( ServiceResponse < Page < SecretItem > > page ) { String nextPageLink = page . body ( ) . nextPageLink ( ) ; if ( nextPageLink == null ) { return Observable . just ( page ) ; } return Observable . just ( page ) . concatWith ( getSecretsNextWithServiceResponseAsync ( nextPageLink ) ) ; } } ) ;
public class ThrowsTaglet { /** * { @ inheritDoc } */ public void inherit ( DocFinder . Input input , DocFinder . Output output ) { } }
ClassDoc exception ; if ( input . tagId == null ) { ThrowsTag throwsTag = ( ThrowsTag ) input . tag ; exception = throwsTag . exception ( ) ; input . tagId = exception == null ? throwsTag . exceptionName ( ) : throwsTag . exception ( ) . qualifiedName ( ) ; } else { exception = input . element . containingClass ( ) . findClass ( input . tagId ) ; } ThrowsTag [ ] tags = ( ( MethodDoc ) input . element ) . throwsTags ( ) ; for ( int i = 0 ; i < tags . length ; i ++ ) { if ( input . tagId . equals ( tags [ i ] . exceptionName ( ) ) || ( tags [ i ] . exception ( ) != null && ( input . tagId . equals ( tags [ i ] . exception ( ) . qualifiedName ( ) ) ) ) ) { output . holder = input . element ; output . holderTag = tags [ i ] ; output . inlineTags = input . isFirstSentence ? tags [ i ] . firstSentenceTags ( ) : tags [ i ] . inlineTags ( ) ; output . tagList . add ( tags [ i ] ) ; } else if ( exception != null && tags [ i ] . exception ( ) != null && tags [ i ] . exception ( ) . subclassOf ( exception ) ) { output . tagList . add ( tags [ i ] ) ; } }
public class TransactionOLTP { /** * Executes a method which has the potential to throw a TemporaryLockingException or a PermanentLockingException . * If the exception is thrown it is wrapped in a GraknServerException so that the transaction can be retried . * @ param method The locking method to execute */ private < X > X executeLockingMethod ( Supplier < X > method ) { } }
try { return method . get ( ) ; } catch ( JanusGraphException e ) { if ( e . isCausedBy ( TemporaryLockingException . class ) || e . isCausedBy ( PermanentLockingException . class ) ) { throw TemporaryWriteException . temporaryLock ( e ) ; } else { throw GraknServerException . unknown ( e ) ; } }
public class Page { /** * Write the entire page by calling : < br > * writeHtmlHead ( out ) < br > * writeBodyTag ( out ) < br > * writeElements ( out ) < br > * writeHtmlEnd ( out ) */ public void write ( Writer out ) throws IOException { } }
writeHtmlHead ( out ) ; writeBodyTag ( out ) ; writeElements ( out ) ; writeHtmlEnd ( out ) ;
public class FastTrackReader { /** * Extract the outline level from a task ' s WBS attribute . * @ param task Task instance * @ return outline level */ private Integer getOutlineLevel ( Task task ) { } }
String value = task . getWBS ( ) ; Integer result = Integer . valueOf ( 1 ) ; if ( value != null && value . length ( ) > 0 ) { String [ ] path = WBS_SPLIT_REGEX . split ( value ) ; result = Integer . valueOf ( path . length ) ; } return result ;
public class OutboundConnectionTracker { /** * Closes a conversation for the specified connection . It is unnecessary to * require a reference to the conversation being closed as by the time this method * is invoked , the conversation has been marked " closed " and all that remains is the * book keeping . This method notifies the appropriate connection group that a * conversation has closed , and if necessary , removes the group from the map of those * tracked . * @ param connectionHostingConversation The connection which is hosting the conversation * we are closing . */ public void closeConversation ( OutboundConnection connectionHostingConversation ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "closeConversation" , connectionHostingConversation ) ; // To avoid deadlock - keep a list of pending closes and only allow one thread // to process this list . boolean closeOnThisThread = false ; synchronized ( closeList ) { closeOnThisThread = closeList . isEmpty ( ) ; closeList . addLast ( connectionHostingConversation ) ; if ( closeOnThisThread ) connectionHostingConversation = closeList . getFirst ( ) ; } while ( closeOnThisThread ) { ConnectionData connectionData = connectionHostingConversation . getConnectionData ( ) ; if ( connectionData != null ) { EndPointDescriptor endPointDescriptior = connectionData . getEndPointDescriptor ( ) ; ConnectionDataGroup group ; // Take the endpoint to group map ' s monitor to prevent anyone else from getting // ahold of and using this group whilst we perform an update . synchronized ( endPointToGroupMap ) { // Find the group . group = endPointToGroupMap . get ( endPointDescriptior ) ; if ( ( group == null ) || ( group != connectionData . getConnectionDataGroup ( ) ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { if ( group == null ) SibTr . debug ( this , tc , "group == nul" ) ; else SibTr . debug ( this , tc , "group != connectionData.getConnectionDataGroup()" ) ; } // Record an FFDC , but then ignore this close request ( and continue with the next one ) // NOTE : If we don ' t continue NO close will ever be processed Exception e = new SIErrorException ( nls . getFormattedMessage ( "OUTCONNTRACKER_INTERNAL_SICJ0064" , null , "OUTCONNTRACKER_INTERNAL_SICJ0064" ) ) ; // D226223 FFDCFilter . processException ( e , "com.ibm.ws.sib.jfapchannel.impl.octracker.OutboundConnectionTracker" , JFapChannelConstants . OUTBOUNDCONNTRACKER_CLOSECONV_01 , connectionData ) ; } else { // Notify the group that a conversation using the specified connection // has closed . group . close ( connectionHostingConversation ) ; // If the connection group has become empty , remove it from out map . if ( group . isEmpty ( ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "group: " + group + " has become empty" ) ; endPointToGroupMap . remove ( group . getEndPointDescriptor ( ) ) ; } } } } synchronized ( closeList ) { closeList . removeFirst ( ) ; closeOnThisThread = ! closeList . isEmpty ( ) ; if ( closeOnThisThread ) { connectionHostingConversation = closeList . getFirst ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "close list has an entry: " + connectionHostingConversation ) ; } } } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "closeConversation" ) ;
public class IFixUtils { /** * This method will find which iFixes have been installed and return a list with all of the IDs of APARs that have been fixed by the iFixes . * @ param wlpInstallationDirectory The installation directory of the current install * @ param console The console for printing messages to * @ return The Set of LibertyProfileMetadataFile objects from all the * . lpmf files in the supplied installation dir . */ public static Set < LibertyProfileMetadataFile > getInstalledLibertyProfileMetadataFiles ( File wlpInstallationDirectory , CommandConsole console ) { } }
Set < LibertyProfileMetadataFile > lpmfInfos = new HashSet < LibertyProfileMetadataFile > ( ) ; // First create a parser for reading the Liberty Profile Metadata XML try { docBuilder = DocumentBuilderFactory . newInstance ( ) . newDocumentBuilder ( ) ; } catch ( Exception e ) { // If we can ' t create an unmarshaller then we won ' t be able to read any files so print a message and return the empty list console . printlnErrorMessage ( getMessage ( "ifixutils.unable.to.create.parser" , e . getMessage ( ) ) ) ; return lpmfInfos ; } // Find the lpmf XML files File [ ] lpmfFiles = findLPMFXmlFiles ( wlpInstallationDirectory ) ; // Read in each file and parse the liberty profile metadata into in memory objects . for ( File file : lpmfFiles ) { try { Document doc = docBuilder . parse ( file ) ; LibertyProfileMetadataFile lpmfInfo = LibertyProfileMetadataFile . fromDocument ( doc ) ; lpmfInfos . add ( lpmfInfo ) ; } catch ( Exception e ) { // There was an error reading this one file but we can continue to read the next files so print a message but then continue console . printlnErrorMessage ( getMessage ( "ifixutils.unable.to.read.file" , file . getAbsolutePath ( ) , e . getMessage ( ) ) ) ; } } return lpmfInfos ;
public class ReflectionUtils { /** * Make the given constructor accessible , explicitly setting it accessible * if necessary . The { @ code setAccessible ( true ) } method is only called * when actually necessary , to avoid unnecessary conflicts with a JVM * SecurityManager ( if active ) . * @ param ctor the constructor to make accessible * @ see java . lang . reflect . Constructor # setAccessible */ public static void makeAccessible ( Constructor < ? > ctor ) { } }
if ( ( ! Modifier . isPublic ( ctor . getModifiers ( ) ) || ! Modifier . isPublic ( ctor . getDeclaringClass ( ) . getModifiers ( ) ) ) && ! ctor . isAccessible ( ) ) { ctor . setAccessible ( true ) ; }
public class FrameworkUtils { public static ExtensionMeta mkExtensionMeta ( String name , Object ... params ) { } }
List < ? > paramList = Arrays . asList ( params ) ; String paramStr = paramList . isEmpty ( ) ? "" : paramList . stream ( ) . map ( t -> t == null ? "" : t . toString ( ) ) . reduce ( ( l , r ) -> l + ", " + r ) . get ( ) ; String desc = name + "(" + paramStr + ")" ; return new ExtensionMeta ( name , desc , paramList ) ;
public class AbstractProcessorChain { /** * < p > Directs processing along the chain by executing the root { @ link ProcessorChainLink } and delegating * all successors to { @ link # onTraverse ( Object , ProcessorChainLink , Object . . . ) } and the terminal link to * { @ link # onTerminate ( Object , Object . . . ) } . < / p > * @ param args * the arguments to the root { @ link ProcessorChainLink } which serves as the input to the first * { @ link Processor } which produces the initial < i > RESULT < / i > ; these are passed along the chain * for each and every link * < br > < br > * @ return the result of the complete { @ link AbstractProcessorChain } execution after being processed by * { @ link # onTerminate ( Object , Object . . . ) } * < br > < br > * @ throws ChainExecutionException * if the { @ link AbstractProcessorChain } halted due to an unrecoverable failure in one of its * { @ link ProcessorChainLink } s ; this signals a < b > chain - wide < / b > failure ; failures of individual * links may be handled in { @ link # onInitiate ( ProcessorChainLink , Object . . . ) } and * { @ link # onTraverse ( Object , ProcessorChainLink , Object . . . ) } * < br > < br > * @ since 1.3.0 */ @ Override public LINK_RESULT run ( Object ... args ) { } }
try { ProcessorChainLink < LINK_RESULT , LINK_FAILURE > current = root ; LINK_RESULT result = onInitiate ( current , args ) ; while ( ! current . isTerminalLink ( ) ) { current = current . getSuccessor ( ) ; result = onTraverse ( result , current , args ) ; } onTerminate ( result , args ) ; return result ; } catch ( Exception e ) { throw new ChainExecutionException ( e ) ; }
public class FileUtils { /** * This method is able to determine whether a file is GZipped and return an { @ link InputStream } in any case . * @ param path the path to the file to open * @ param options options specifying how the file is opened * @ return a new input stream * @ throws IOException if an I / O error occurs */ public static InputStream newInputStream ( Path path , OpenOption ... options ) throws IOException { } }
FileUtils . checkFile ( path ) ; InputStream inputStream = Files . newInputStream ( path , options ) ; if ( path . toFile ( ) . getName ( ) . endsWith ( ".gz" ) ) { inputStream = new GZIPInputStream ( inputStream ) ; } return inputStream ;
public class RepositoryResourceImpl { /** * Checks if the two resources are equivalent by checking if the assets * are equivalent . * @ param obj * @ return */ public boolean equivalent ( Object obj ) { } }
if ( this == obj ) return true ; if ( obj == null ) return false ; if ( getClass ( ) != obj . getClass ( ) ) return false ; RepositoryResourceImpl other = ( RepositoryResourceImpl ) obj ; if ( _asset == null ) { if ( other . _asset != null ) return false ; } else if ( ! _asset . equivalent ( other . _asset ) ) return false ; return true ;
public class JvmTypeConstraintImplCustom { /** * Constraint bounds are definitely invalid if they are < code > not null < / code > and point to a primitive type . * { @ link JvmSpecializedTypeReference } will not be resolved by this check thus they may lead to finally * invalid constraint bounds . * @ param constraintBound the reference that shall be come the new constraint . * @ return < code > false < / code > if the given constraint is definitely invalid . */ protected boolean isLikelyAValidConstraintBound ( JvmTypeReference constraintBound ) { } }
if ( constraintBound == null ) return true ; if ( constraintBound instanceof JvmSpecializedTypeReference ) { JvmTypeReference equivalent = ( JvmTypeReference ) constraintBound . eGet ( TypesPackage . Literals . JVM_SPECIALIZED_TYPE_REFERENCE__EQUIVALENT , false ) ; if ( equivalent != null ) { return isLikelyAValidConstraintBound ( equivalent ) ; } return true ; } boolean invalid = ( constraintBound . getType ( ) instanceof JvmPrimitiveType || ( constraintBound . getType ( ) instanceof JvmVoid && ! constraintBound . getType ( ) . eIsProxy ( ) ) ) ; return ! invalid ;
public class OAuth20Utils { /** * Is authorized grant type for service ? * @ param grantType the grant type * @ param registeredService the registered service * @ return true / false */ public static boolean isAuthorizedGrantTypeForService ( final String grantType , final OAuthRegisteredService registeredService ) { } }
if ( registeredService . getSupportedGrantTypes ( ) != null && ! registeredService . getSupportedGrantTypes ( ) . isEmpty ( ) ) { LOGGER . debug ( "Checking grant type [{}] against supported grant types [{}]" , grantType , registeredService . getSupportedGrantTypes ( ) ) ; return registeredService . getSupportedGrantTypes ( ) . stream ( ) . anyMatch ( s -> s . equalsIgnoreCase ( grantType ) ) ; } LOGGER . warn ( "Registered service [{}] does not define any authorized/supported grant types. " + "It is STRONGLY recommended that you authorize and assign grant types to the service definition. " + "While just a warning for now, this behavior will be enforced by CAS in future versions." , registeredService . getName ( ) ) ; return true ;
public class SaneOption { /** * Set the value of the current option to the supplied value . Option value must be of integer type * TODO : consider caching the returned value for " fast read " later * @ param newValue for the option * @ return the value actually set * @ throws IOException */ public int setIntegerValue ( int newValue ) throws IOException , SaneException { } }
Preconditions . checkState ( getValueCount ( ) == 1 , "option is an array" ) ; // check that this option is readable Preconditions . checkState ( isWriteable ( ) ) ; // Send RPC corresponding to : // SANE _ Status sane _ control _ option ( SANE _ Handle h , SANE _ Int n , // SANE _ Action a , void * v , // SANE _ Int * i ) ; ControlOptionResult result = writeOption ( ImmutableList . of ( newValue ) ) ; Preconditions . checkState ( result . getType ( ) == OptionValueType . INT ) ; Preconditions . checkState ( result . getValueSize ( ) == SaneWord . SIZE_IN_BYTES ) ; return SaneWord . fromBytes ( result . getValue ( ) ) . integerValue ( ) ;
public class LTCCallbacks { /** * Register a < code > UOWCallback < / code > for LTC notifications . * @ param callback The UOWCallback object to register with the LocalTransaction service */ public void registerCallback ( UOWCallback callback ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) Tr . entry ( tc , "registerCallback" , callback ) ; if ( ! _callbacks . contains ( callback ) ) { _callbacks . add ( callback ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "Number of registered Callbacks: " + _callbacks . size ( ) ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) Tr . exit ( tc , "registerCallback" ) ;
public class HttpExtKit { /** * Send Hex String request */ public static String postHexString ( String url , Map < String , String > queryParas , byte [ ] data , Map < String , String > headers ) { } }
return HttpKit . post ( url , queryParas , HexKit . byteToHexString ( data ) , headers ) ;
public class CPDefinitionLinkPersistenceImpl { /** * Returns the first cp definition link in the ordered set where CPDefinitionId = & # 63 ; and type = & # 63 ; . * @ param CPDefinitionId the cp definition ID * @ param type the type * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the first matching cp definition link , or < code > null < / code > if a matching cp definition link could not be found */ @ Override public CPDefinitionLink fetchByCPD_T_First ( long CPDefinitionId , String type , OrderByComparator < CPDefinitionLink > orderByComparator ) { } }
List < CPDefinitionLink > list = findByCPD_T ( CPDefinitionId , type , 0 , 1 , orderByComparator ) ; if ( ! list . isEmpty ( ) ) { return list . get ( 0 ) ; } return null ;
public class TangoEventsAdapter { public void addTangoPeriodicListener ( ITangoPeriodicListener listener , String attrName , String [ ] filters , boolean stateless ) throws DevFailed { } }
TangoPeriodic tangoPeriodic ; String key = deviceName + "/" + attrName ; if ( ( tangoPeriodic = tango_periodic_source . get ( key ) ) == null ) { tangoPeriodic = new TangoPeriodic ( deviceProxy , attrName , filters ) ; tango_periodic_source . put ( key , tangoPeriodic ) ; } synchronized ( moni ) { tangoPeriodic . addTangoPeriodicListener ( listener , stateless ) ; }
public class TarInputStream { /** * Skip bytes in the input buffer . This skips bytes in the current entry ' s data , not the entire archive , and will * stop at the end of the current entry ' s data if the number to skip extends beyond that point . * @ param numToSkip * The number of bytes to skip . * @ return The actual number of bytes skipped . */ public long skip ( long numToSkip ) throws IOException { } }
// REVIEW // This is horribly inefficient , but it ensures that we // properly skip over bytes via the TarBuffer . . . byte [ ] skipBuf = new byte [ 8 * 1024 ] ; long num = numToSkip ; for ( ; num > 0 ; ) { int numRead = this . read ( skipBuf , 0 , ( num > skipBuf . length ? skipBuf . length : ( int ) num ) ) ; if ( numRead == - 1 ) { break ; } num -= numRead ; } return ( numToSkip - num ) ;
public class ConfigClassAnalyzer { /** * Strips all decorations from the decorated object . * @ param decorated the decorated object * @ return the stripped object */ public static Object stripDeep ( Object decorated ) { } }
Object stripped = stripShallow ( decorated ) ; if ( stripped == decorated ) { return stripped ; } else { return stripDeep ( stripped ) ; }
public class AWSStorageGatewayClient { /** * Returns an array of Challenge - Handshake Authentication Protocol ( CHAP ) credentials information for a specified * iSCSI target , one for each target - initiator pair . * @ param describeChapCredentialsRequest * A JSON object containing the Amazon Resource Name ( ARN ) of the iSCSI volume target . * @ return Result of the DescribeChapCredentials operation returned by the service . * @ throws InvalidGatewayRequestException * An exception occurred because an invalid gateway request was issued to the service . For more information , * see the error and message fields . * @ throws InternalServerErrorException * An internal server error has occurred during the request . For more information , see the error and message * fields . * @ sample AWSStorageGateway . DescribeChapCredentials * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / storagegateway - 2013-06-30 / DescribeChapCredentials " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DescribeChapCredentialsResult describeChapCredentials ( DescribeChapCredentialsRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDescribeChapCredentials ( request ) ;
public class AccountACL { /** * Set if a player can withdraw money in the account * @ param name The Player name * @ param withdraw Can withdraw or not */ public void setWithdraw ( String name , boolean withdraw ) { } }
String newName = name . toLowerCase ( ) ; if ( aclList . containsKey ( name ) ) { AccountACLValue value = aclList . get ( newName ) ; set ( newName , value . canDeposit ( ) , withdraw , value . canAcl ( ) , value . canBalance ( ) , value . isOwner ( ) ) ; } else { set ( newName , false , withdraw , false , false , false ) ; }
public class DirectoryLookupService { /** * Remove the NotificationHandler from the Service . * @ param serviceName * the service name . * @ param handler * the NotificationHandler for the service . */ public void removeNotificationHandler ( String serviceName , NotificationHandler handler ) { } }
if ( handler == null || serviceName == null || serviceName . isEmpty ( ) ) { throw new IllegalArgumentException ( ) ; } synchronized ( notificationHandlers ) { if ( notificationHandlers . containsKey ( serviceName ) ) { List < NotificationHandler > list = notificationHandlers . get ( serviceName ) ; if ( list . contains ( handler ) ) { list . remove ( handler ) ; } if ( list . size ( ) == 0 ) { notificationHandlers . remove ( serviceName ) ; } } }
public class DataSet { /** * Writes a DataSet as text file ( s ) to the specified location . * < p > For each element of the DataSet the result of { @ link Object # toString ( ) } is written . < br / > * < br / > * < span class = " strong " > Output files and directories < / span > < br / > * What output how writeAsText ( ) method produces is depending on other circumstance * < ul > * < li > * A directory is created and multiple files are written underneath . ( Default behavior ) < br / > * This sink creates a directory called " path1 " , and files " 1 " , " 2 " . . . are writen underneath depending on < a href = " https : / / flink . apache . org / faq . html # what - is - the - parallelism - how - do - i - set - it " > parallelism < / a > * < pre > { @ code . * └ ─ ─ path1/ * └ ─ ─ . . . } < / pre > * Code Example * < pre > { @ code dataset . writeAsText ( " file : / / / path1 " ) ; } < / pre > * < / li > * < li > * A single file called " path1 " is created when parallelism is set to 1 * < pre > { @ code . * └ ─ ─ path1 } < / pre > * Code Example * < pre > { @ code / / Parallelism is set to only this particular operation * dataset . writeAsText ( " file : / / / path1 " ) . setParallelism ( 1 ) ; * / / This will creates the same effect but note all operators ' parallelism are set to one * env . setParallelism ( 1 ) ; * dataset . writeAsText ( " file : / / / path1 " ) ; } < / pre > * < / li > * < li > * A directory is always created when < a href = " https : / / ci . apache . org / projects / flink / flink - docs - master / setup / config . html # file - systems " > fs . output . always - create - directory < / a > * is set to true in flink - conf . yaml file , even when parallelism is set to 1. * < pre > { @ code . * └ ─ ─ path1/ * └ ─ ─ 1 } < / pre > * Code Example * < pre > { @ code / / fs . output . always - create - directory = true * dataset . writeAsText ( " file : / / / path1 " ) . setParallelism ( 1 ) ; } < / pre > * < / li > * < / ul > * @ param filePath The path pointing to the location the text file or files under the directory is written to . * @ return The DataSink that writes the DataSet . * @ see TextOutputFormat */ public DataSink < T > writeAsText ( String filePath ) { } }
return output ( new TextOutputFormat < T > ( new Path ( filePath ) ) ) ;
public class Widgets { /** * Creates a label that triggers an action using the supplied text and handler . The label will * be styled as specified with an additional style that configures the mouse pointer and adds * underline to the text . */ public static Label newActionLabel ( String text , String style , ClickHandler onClick ) { } }
return makeActionLabel ( newLabel ( text , style ) , onClick ) ;
public class ScriptUtils { /** * Recursively sets all files and directories executable , starting from a file or base directory . * @ param f a file to set executable or a base directory */ public static void setScriptsExecutable ( File fileOrDir ) { } }
List < File > files = new ArrayList < > ( ) ; if ( fileOrDir . isDirectory ( ) ) files . addAll ( Utils . listAllFiles ( fileOrDir , true ) ) ; else files . add ( fileOrDir ) ; for ( File f : files ) f . setExecutable ( true ) ;
public class FlowIdProtocolHeaderCodec { /** * Write flow id . * @ param message the message * @ param flowId the flow id */ public static void writeFlowId ( Message message , String flowId ) { } }
Map < String , List < String > > headers = getOrCreateProtocolHeader ( message ) ; headers . put ( FLOWID_HTTP_HEADER_NAME , Collections . singletonList ( flowId ) ) ; if ( LOG . isLoggable ( Level . FINE ) ) { LOG . fine ( "HTTP header '" + FLOWID_HTTP_HEADER_NAME + "' set to: " + flowId ) ; }
public class DefaultCouchbaseDataHandler { /** * Iterate and populate json object . * @ param entity * the entity * @ param iterator * the iterator * @ return the json object */ private JsonObject iterateAndPopulateJsonObject ( Object entity , Iterator < Attribute > iterator , String tableName ) { } }
JsonObject obj = JsonObject . create ( ) ; while ( iterator . hasNext ( ) ) { Attribute attribute = iterator . next ( ) ; Field field = ( Field ) attribute . getJavaMember ( ) ; Object value = PropertyAccessorHelper . getObject ( entity , field ) ; obj . put ( ( ( AbstractAttribute ) attribute ) . getJPAColumnName ( ) , value ) ; } obj . put ( CouchbaseConstants . KUNDERA_ENTITY , tableName ) ; return obj ;
public class SearchForm { /** * advance */ @ Override public int getStartPosition ( ) { } }
final FessConfig fessConfig = ComponentUtil . getFessConfig ( ) ; if ( start == null ) { start = fessConfig . getPagingSearchPageStartAsInteger ( ) ; } return start ;
public class UserProfileHandlerImpl { /** * Create new profile node . */ private Node getProfileNode ( Node userNode ) throws RepositoryException { } }
try { return userNode . getNode ( JCROrganizationServiceImpl . JOS_PROFILE ) ; } catch ( PathNotFoundException e ) { return userNode . addNode ( JCROrganizationServiceImpl . JOS_PROFILE ) ; }
public class AbstractBackendHelper { /** * Call this in { @ link org . microg . nlp . api . LocationBackendService # onClose ( ) } . */ public synchronized void onClose ( ) { } }
if ( state == State . DISABLED || state == State . DISABLING ) throw new IllegalStateException ( "Do not call onClose if not opened before" ) ; if ( state == State . WAITING ) { state = State . DISABLED ; } else { state = State . DISABLING ; }
public class Media { /** * Downloads existing media file from server . * @ param mediaName name of media * @ param file file for putting content . Will be overridden . * @ throws IOException unexpected error . */ public void download ( final String mediaName , final File file ) throws IOException { } }
final String uri = client . getUserResourceInstanceUri ( BandwidthConstants . MEDIA_URI_PATH , mediaName ) ; client . download ( uri , file ) ;
public class ErrorPageFilter { /** * Called to render the error page , if possible . * @ param sc the status code of the error . * @ param message the message for the error . * @ param request the request that caused the error . * @ param response the response that the error will be written to . * @ throws IOException if there is a problem rendering the error page . */ @ SuppressWarnings ( "deprecation" ) protected void sendError ( int sc , String message , HttpServletRequest request , HttpServletResponse response ) throws IOException { } }
if ( 400 <= sc && sc < 600 && ! response . isCommitted ( ) ) { String [ ] fileNames = new String [ ] { "/" + Integer . toString ( sc ) + ".html" , "/" + Integer . toString ( sc ) . subSequence ( 0 , 2 ) + "x.html" , "/" + Integer . toString ( sc ) . subSequence ( 0 , 1 ) + "xx.html" } ; InputStream errorPageIn = null ; try { String path = request . getRequestURI ( ) ; while ( path != null && errorPageIn == null ) { if ( path . endsWith ( "/" ) ) { path = path . substring ( 0 , path . length ( ) - 1 ) ; } for ( String fileName : fileNames ) { if ( ( errorPageIn = contentService . getResourceAsStream ( path + fileName ) ) != null ) { log . trace ( "Found error page for path {} at {}" , path , path + fileName ) ; break ; } } if ( errorPageIn == null ) { if ( path . length ( ) > 0 ) { path = path . substring ( 0 , path . lastIndexOf ( "/" ) ) ; } else { path = null ; } } } // get the default page . if ( errorPageIn == null ) { for ( String fileName : fileNames ) { if ( ( errorPageIn = ErrorPageFilter . class . getResourceAsStream ( fileName ) ) != null ) { log . trace ( "Found error page at {}" , fileName ) ; break ; } else if ( ( errorPageIn = ErrorPageFilter . class . getResourceAsStream ( "./" + fileName ) ) != null ) { log . trace ( "Found error page at {}" , "./" + fileName ) ; break ; } } } if ( errorPageIn == null ) { log . trace ( "No error page found." ) ; if ( message == null ) response . sendError ( sc ) ; else response . sendError ( sc , message ) ; return ; } // set the status code . if ( message != null ) response . setStatus ( sc , message ) ; else response . setStatus ( sc ) ; // create a UTF - 8 reader for the error page content . response . setContentType ( MediaType . TEXT_HTML ) ; log . trace ( "Sending error page content to response:{}" , response . getClass ( ) . getName ( ) ) ; IOUtils . copy ( errorPageIn , response . getOutputStream ( ) ) ; log . trace ( "Done sending error page. {}" , sc ) ; } finally { IOUtils . closeQuietly ( errorPageIn ) ; IOUtils . closeQuietly ( response . getOutputStream ( ) ) ; } } else { if ( response . isCommitted ( ) ) log . trace ( "Response is committed!" ) ; if ( message == null ) response . sendError ( sc ) ; else response . sendError ( sc , message ) ; }
public class MethodUtils { /** * < p > Retrieves a method whether or not it ' s accessible . If no such method * can be found , return { @ code null } . < / p > * @ param cls The class that will be subjected to the method search * @ param methodName The method that we wish to call * @ param parameterTypes Argument class types * @ return The method * @ since 3.5 */ public static Method getMatchingMethod ( final Class < ? > cls , final String methodName , final Class < ? > ... parameterTypes ) { } }
Validate . notNull ( cls , "Null class not allowed." ) ; Validate . notEmpty ( methodName , "Null or blank methodName not allowed." ) ; // Address methods in superclasses Method [ ] methodArray = cls . getDeclaredMethods ( ) ; final List < Class < ? > > superclassList = ClassUtils . getAllSuperclasses ( cls ) ; for ( final Class < ? > klass : superclassList ) { methodArray = ArrayUtils . addAll ( methodArray , klass . getDeclaredMethods ( ) ) ; } Method inexactMatch = null ; for ( final Method method : methodArray ) { if ( methodName . equals ( method . getName ( ) ) && Objects . deepEquals ( parameterTypes , method . getParameterTypes ( ) ) ) { return method ; } else if ( methodName . equals ( method . getName ( ) ) && ClassUtils . isAssignable ( parameterTypes , method . getParameterTypes ( ) , true ) ) { if ( inexactMatch == null ) { inexactMatch = method ; } else if ( distance ( parameterTypes , method . getParameterTypes ( ) ) < distance ( parameterTypes , inexactMatch . getParameterTypes ( ) ) ) { inexactMatch = method ; } } } return inexactMatch ;
public class BigtableDataGrpcClient { /** * { @ inheritDoc } */ @ Override public ListenableFuture < List < FlatRow > > readFlatRowsAsync ( ReadRowsRequest request ) { } }
if ( shouldOverrideAppProfile ( request . getAppProfileId ( ) ) ) { request = request . toBuilder ( ) . setAppProfileId ( clientDefaultAppProfileId ) . build ( ) ; } return Futures . transform ( createStreamingListener ( request , readRowsAsync , request . getTableName ( ) ) . getAsyncResult ( ) , FLAT_ROW_LIST_TRANSFORMER , MoreExecutors . directExecutor ( ) ) ;
public class Tile { /** * Defines if the switch in the SwitchTileSkin is active * @ param SELECTED */ public void setActive ( final boolean SELECTED ) { } }
if ( null == active ) { _active = SELECTED ; fireTileEvent ( REDRAW_EVENT ) ; } else { active . set ( SELECTED ) ; }
public class SnapshotUtil { /** * Generates a Filename to the snapshot file for the given table . * @ param table * @ param fileNonce * @ param hostId */ public static final String constructFilenameForTable ( Table table , String fileNonce , SnapshotFormat format , int hostId ) { } }
String extension = ".vpt" ; if ( format == SnapshotFormat . CSV ) { extension = ".csv" ; } StringBuilder filename_builder = new StringBuilder ( fileNonce ) ; filename_builder . append ( "-" ) ; filename_builder . append ( table . getTypeName ( ) ) ; if ( ! table . getIsreplicated ( ) ) { filename_builder . append ( "-host_" ) ; filename_builder . append ( hostId ) ; } filename_builder . append ( extension ) ; // Volt partitioned table return filename_builder . toString ( ) ;
public class NettyHelper { /** * 得到服务端Boss线程池 * @ param config 服务端配置 * @ return 服务端Boss线程池 */ public static EventLoopGroup getServerBossEventLoopGroup ( ServerTransportConfig config ) { } }
String type = config . getProtocolType ( ) ; EventLoopGroup bossGroup = serverBossGroups . get ( type ) ; if ( bossGroup == null ) { synchronized ( NettyHelper . class ) { bossGroup = serverBossGroups . get ( config . getProtocolType ( ) ) ; if ( bossGroup == null ) { int bossThreads = config . getBossThreads ( ) ; bossThreads = bossThreads <= 0 ? Math . max ( 4 , SystemInfo . getCpuCores ( ) / 2 ) : bossThreads ; NamedThreadFactory threadName = new NamedThreadFactory ( "SEV-BOSS-" + config . getPort ( ) , config . isDaemon ( ) ) ; bossGroup = config . isUseEpoll ( ) ? new EpollEventLoopGroup ( bossThreads , threadName ) : new NioEventLoopGroup ( bossThreads , threadName ) ; serverBossGroups . put ( type , bossGroup ) ; refCounter . putIfAbsent ( bossGroup , new AtomicInteger ( 0 ) ) ; } } } refCounter . get ( bossGroup ) . incrementAndGet ( ) ; return bossGroup ;
public class CamelRouteActionBuilder { /** * Remove these Camel routes . */ public void remove ( String ... routes ) { } }
RemoveCamelRouteAction camelRouteAction = new RemoveCamelRouteAction ( ) ; camelRouteAction . setRouteIds ( Arrays . asList ( routes ) ) ; camelRouteAction . setCamelContext ( getCamelContext ( ) ) ; action . setDelegate ( camelRouteAction ) ;
public class Matchers { /** * Matches an AST node that is enclosed by some node that matches the given matcher . * < p > TODO ( eaftan ) : This could be used instead of enclosingBlock and enclosingClass . */ public static < T extends Tree > Matcher < Tree > enclosingNode ( final Matcher < T > matcher ) { } }
return new Matcher < Tree > ( ) { @ SuppressWarnings ( "unchecked" ) // TODO ( cushon ) : this should take a Class < T > @ Override public boolean matches ( Tree t , VisitorState state ) { TreePath path = state . getPath ( ) . getParentPath ( ) ; while ( path != null ) { Tree node = path . getLeaf ( ) ; state = state . withPath ( path ) ; if ( matcher . matches ( ( T ) node , state ) ) { return true ; } path = path . getParentPath ( ) ; } return false ; } } ;
public class AbstractSchemaScannerPlugin { /** * Create the table descriptors . * @ param catalog The catalog . * @ param schema The schema . * @ param schemaDescriptor The schema descriptor . * @ param columnTypes The cached data types . * @ param allColumns The map to collect all columns . * @ param allForeignKeys The map to collect all foreign keys . * @ param store The store . */ private void createTables ( Catalog catalog , Schema schema , SchemaDescriptor schemaDescriptor , Map < String , ColumnTypeDescriptor > columnTypes , Map < Column , ColumnDescriptor > allColumns , Set < ForeignKey > allForeignKeys , Store store ) { } }
for ( Table table : catalog . getTables ( schema ) ) { TableDescriptor tableDescriptor = getTableDescriptor ( table , schemaDescriptor , store ) ; Map < String , ColumnDescriptor > localColumns = new HashMap < > ( ) ; for ( Column column : table . getColumns ( ) ) { ColumnDescriptor columnDescriptor = createColumnDescriptor ( column , ColumnDescriptor . class , columnTypes , store ) ; columnDescriptor . setDefaultValue ( column . getDefaultValue ( ) ) ; columnDescriptor . setGenerated ( column . isGenerated ( ) ) ; columnDescriptor . setPartOfIndex ( column . isPartOfIndex ( ) ) ; columnDescriptor . setPartOfPrimaryKey ( column . isPartOfPrimaryKey ( ) ) ; columnDescriptor . setPartOfForeignKey ( column . isPartOfForeignKey ( ) ) ; columnDescriptor . setAutoIncremented ( column . isAutoIncremented ( ) ) ; tableDescriptor . getColumns ( ) . add ( columnDescriptor ) ; localColumns . put ( column . getName ( ) , columnDescriptor ) ; allColumns . put ( column , columnDescriptor ) ; } // Primary key PrimaryKey primaryKey = table . getPrimaryKey ( ) ; if ( primaryKey != null ) { PrimaryKeyDescriptor primaryKeyDescriptor = storeIndex ( primaryKey , tableDescriptor , localColumns , PrimaryKeyDescriptor . class , PrimaryKeyOnColumnDescriptor . class , store ) ; tableDescriptor . setPrimaryKey ( primaryKeyDescriptor ) ; } // Indices for ( Index index : table . getIndices ( ) ) { IndexDescriptor indexDescriptor = storeIndex ( index , tableDescriptor , localColumns , IndexDescriptor . class , IndexOnColumnDescriptor . class , store ) ; tableDescriptor . getIndices ( ) . add ( indexDescriptor ) ; } // Trigger for ( Trigger trigger : table . getTriggers ( ) ) { TriggerDescriptor triggerDescriptor = store . create ( TriggerDescriptor . class ) ; triggerDescriptor . setName ( trigger . getName ( ) ) ; triggerDescriptor . setActionCondition ( trigger . getActionCondition ( ) ) ; triggerDescriptor . setActionOrder ( trigger . getActionOrder ( ) ) ; triggerDescriptor . setActionOrientation ( trigger . getActionOrientation ( ) . name ( ) ) ; triggerDescriptor . setActionStatement ( trigger . getActionStatement ( ) ) ; triggerDescriptor . setConditionTiming ( trigger . getConditionTiming ( ) . name ( ) ) ; triggerDescriptor . setEventManipulationTime ( trigger . getEventManipulationType ( ) . name ( ) ) ; tableDescriptor . getTriggers ( ) . add ( triggerDescriptor ) ; } allForeignKeys . addAll ( table . getForeignKeys ( ) ) ; }
public class Synthetic { /** * Returns a sequence of events where some items are more popular than others , according to a * zipfian distribution . Unlike { @ link # zipfian } , the generated sequence scatters the " popular " * items across the item space . Use if you don ' t want the head of the distribution ( the popular * items ) clustered together . * @ param items the number of items in the distribution * @ param constant the skew factor for the distribution * @ param events the number of events in the distribution */ public static LongStream scrambledZipfian ( int items , double constant , int events ) { } }
return generate ( new ScrambledZipfianGenerator ( 0 , items - 1 , constant ) , events ) ;
public class PartitionReplicaSyncRequest { /** * Send responses for first number of { @ code permits } namespaces and remove them from the list . */ private void sendOperationsForNamespaces ( int permits ) { } }
InternalPartitionServiceImpl partitionService = getService ( ) ; try { PartitionReplicationEvent event = new PartitionReplicationEvent ( getPartitionId ( ) , getReplicaIndex ( ) ) ; Iterator < ServiceNamespace > iterator = namespaces . iterator ( ) ; for ( int i = 0 ; i < permits ; i ++ ) { ServiceNamespace namespace = iterator . next ( ) ; Collection < Operation > operations ; if ( NonFragmentedServiceNamespace . INSTANCE . equals ( namespace ) ) { operations = createNonFragmentedReplicationOperations ( event ) ; } else { operations = createFragmentReplicationOperations ( event , namespace ) ; } sendOperations ( operations , namespace ) ; iterator . remove ( ) ; } } finally { partitionService . getReplicaManager ( ) . releaseReplicaSyncPermits ( permits ) ; }
public class PublishedCustomer { /** * region > removeOrder ( action , not published ) */ @ Action ( semantics = SemanticsOf . IDEMPOTENT ) public PublishedCustomer removeOrder ( final Order order ) { } }
getOrders ( ) . remove ( order ) ; repositoryService . remove ( order ) ; return this ;
public class MoreCollectors { /** * Returns a { @ code Collector } which finds all the elements which are equal * to each other and smaller than any other element according to the natural * order . The found elements are reduced using the specified downstream * { @ code Collector } . * @ param < T > the type of the input elements * @ param < A > the intermediate accumulation type of the downstream collector * @ param < D > the result type of the downstream reduction * @ param downstream a { @ code Collector } implementing the downstream * reduction * @ return a { @ code Collector } which finds all the minimal elements . * @ see # minAll ( Comparator , Collector ) * @ see # minAll ( Comparator ) * @ see # minAll ( ) */ public static < T extends Comparable < ? super T > , A , D > Collector < T , ? , D > minAll ( Collector < T , A , D > downstream ) { } }
return maxAll ( Comparator . < T > reverseOrder ( ) , downstream ) ;
public class DevicesApi { /** * Delete Device * Deletes a device * @ param deviceId deviceId ( required ) * @ return ApiResponse & lt ; DeviceEnvelope & gt ; * @ throws ApiException If fail to call the API , e . g . server error or cannot deserialize the response body */ public ApiResponse < DeviceEnvelope > deleteDeviceWithHttpInfo ( String deviceId ) throws ApiException { } }
com . squareup . okhttp . Call call = deleteDeviceValidateBeforeCall ( deviceId , null , null ) ; Type localVarReturnType = new TypeToken < DeviceEnvelope > ( ) { } . getType ( ) ; return apiClient . execute ( call , localVarReturnType ) ;
public class RowMapperWide { /** * Returns the Lucene { @ link Sort } to get { @ link Document } s in the same order that is used in Cassandra . * @ return The Lucene { @ link Sort } to get { @ link Document } s in the same order that is used in Cassandra . */ public Sort sort ( ) { } }
SortField [ ] partitionKeySort = tokenMapper . sortFields ( ) ; SortField [ ] clusteringKeySort = clusteringKeyMapper . sortFields ( ) ; return new Sort ( ArrayUtils . addAll ( partitionKeySort , clusteringKeySort ) ) ;
public class SegmentWithState { /** * Change the segment state to { @ link SegmentState # PUSHED _ AND _ DROPPED } . The current state should be * { @ link SegmentState # APPENDING } . This method should be called after the segment of { @ link # segmentIdentifier } is * completely pushed and dropped . * @ param dataSegment pushed { @ link DataSegment } */ public void pushAndDrop ( DataSegment dataSegment ) { } }
checkStateTransition ( this . state , SegmentState . APPENDING , SegmentState . PUSHED_AND_DROPPED ) ; this . state = SegmentState . PUSHED_AND_DROPPED ; this . dataSegment = dataSegment ;
public class BPSImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public void setPsegName ( String newPsegName ) { } }
String oldPsegName = psegName ; psegName = newPsegName ; if ( eNotificationRequired ( ) ) eNotify ( new ENotificationImpl ( this , Notification . SET , AfplibPackage . BPS__PSEG_NAME , oldPsegName , psegName ) ) ;
public class Messenger { /** * Share Group History * @ param gid group ' s id * @ return Promise of void */ @ NotNull @ ObjectiveCName ( "shareHistoryWithGid:" ) public Promise < Void > shareHistory ( int gid ) { } }
return modules . getGroupsModule ( ) . shareHistory ( gid ) ;
public class Links { /** * Creates a { @ link Links } instance from the given RFC5988 - compatible link format . * @ param source a comma separated list of { @ link Link } representations . * @ return the { @ link Links } represented by the given { @ link String } . */ public static Links parse ( @ Nullable String source ) { } }
if ( ! StringUtils . hasText ( source ) ) { return NONE ; } Matcher matcher = LINK_HEADER_PATTERN . matcher ( source ) ; List < Link > links = new ArrayList < > ( ) ; while ( matcher . find ( ) ) { Link link = Link . valueOf ( matcher . group ( ) ) ; if ( link != null ) { links . add ( link ) ; } } return new Links ( links ) ;
public class UnderFileSystemBlockStore { /** * Closes the block reader or writer and checks whether it is necessary to commit the block * to Local block store . * During UFS block read , this is triggered when the block is unlocked . * During UFS block write , this is triggered when the UFS block is committed . * @ param sessionId the session ID * @ param blockId the block ID */ public void closeReaderOrWriter ( long sessionId , long blockId ) throws IOException { } }
BlockInfo blockInfo ; try ( LockResource lr = new LockResource ( mLock ) ) { blockInfo = mBlocks . get ( new Key ( sessionId , blockId ) ) ; if ( blockInfo == null ) { LOG . warn ( "Key (block ID: {}, session ID {}) is not found when cleaning up the UFS block." , blockId , sessionId ) ; return ; } } blockInfo . closeReaderOrWriter ( ) ;
public class NonBlockingHashMap { /** * Returns a { @ link Set } view of the keys contained in this map . The set * is backed by the map , so changes to the map are reflected in the set , * and vice - versa . The set supports element removal , which removes the * corresponding mapping from this map , via the < tt > Iterator . remove < / tt > , * < tt > Set . remove < / tt > , < tt > removeAll < / tt > , < tt > retainAll < / tt > , and * < tt > clear < / tt > operations . It does not support the < tt > add < / tt > or * < tt > addAll < / tt > operations . * < p > The view ' s < tt > iterator < / tt > is a " weakly consistent " iterator that * will never throw { @ link ConcurrentModificationException } , and guarantees * to traverse elements as they existed upon construction of the iterator , * and may ( but is not guaranteed to ) reflect any modifications subsequent * to construction . */ @ Override public Set < TypeK > keySet ( ) { } }
return new AbstractSet < TypeK > ( ) { @ Override public void clear ( ) { NonBlockingHashMap . this . clear ( ) ; } @ Override public int size ( ) { return NonBlockingHashMap . this . size ( ) ; } @ Override public boolean contains ( Object k ) { return NonBlockingHashMap . this . containsKey ( k ) ; } @ Override public boolean remove ( Object k ) { return NonBlockingHashMap . this . remove ( k ) != null ; } @ Override public Iterator < TypeK > iterator ( ) { return new SnapshotK ( ) ; } // This is an efficient implementation of toArray instead of the standard // one . In particular it uses a smart iteration over the NBHM . @ Override public < T > T [ ] toArray ( T [ ] a ) { Object [ ] kvs = raw_array ( ) ; // Estimate size of array ; be prepared to see more or fewer elements int sz = size ( ) ; T [ ] r = a . length >= sz ? a : ( T [ ] ) java . lang . reflect . Array . newInstance ( a . getClass ( ) . getComponentType ( ) , sz ) ; // Fast efficient element walk . int j = 0 ; for ( int i = 0 ; i < len ( kvs ) ; i ++ ) { Object K = key ( kvs , i ) ; Object V = Prime . unbox ( val ( kvs , i ) ) ; if ( K != null && K != TOMBSTONE && V != null && V != TOMBSTONE ) { if ( j >= r . length ) { int sz2 = ( int ) Math . min ( Integer . MAX_VALUE - 8 , ( ( long ) j ) << 1 ) ; if ( sz2 <= r . length ) throw new OutOfMemoryError ( "Required array size too large" ) ; r = Arrays . copyOf ( r , sz2 ) ; } r [ j ++ ] = ( T ) K ; } } if ( j <= a . length ) { // Fit in the original array ? if ( a != r ) System . arraycopy ( r , 0 , a , 0 , j ) ; if ( j < a . length ) r [ j ++ ] = null ; // One final null not in the spec but in the default impl return a ; // Return the original } return Arrays . copyOf ( r , j ) ; } } ;
public class ServiceFactory { /** * Example USage : * < code > * StructureFinder [ ] finders = new StructureFinder [ sources . length ] ; * ServiceFactory . getInstance ( ) . createForNames ( sources , finders ) ; * < / code > * @ param names the object / service names * @ param objectOutput array to place results * @ param < T > the type class */ public < T > void createForNames ( String [ ] names , T [ ] objectOutput ) { } }
if ( objectOutput == null || objectOutput . length == 0 ) throw new IllegalArgumentException ( "Non empty objectOutput" ) ; if ( names == null || names . length == 0 ) { return ; } String name = null ; Object obj = null ; try { for ( int i = 0 ; i < names . length ; i ++ ) { name = names [ i ] ; obj = this . create ( name ) ; objectOutput [ i ] = this . create ( name ) ; } } catch ( ArrayStoreException e ) { if ( obj == null ) throw e ; throw new SystemException ( "Cannot assign bean \"" + name + "\" class:" + obj . getClass ( ) . getName ( ) + " to array of objectOutput arrray class:" + Arrays . asList ( objectOutput ) , e ) ; }
public class WebAppFilterManager { /** * Compares the request uri to the passed in filter uri to see if the filter * associated with the filter uri should filter the request uri * @ param requestURI * the request uri * @ param filterURI * the uri associated with the filter * @ return boolean indicating whether the uri ' s match */ private boolean uriMatch ( String requestURI , IFilterMapping fmInfo , DispatcherType dispatcherType ) { } }
boolean theyMatch = false ; // determine what type of filter uri we have switch ( fmInfo . getMappingType ( ) ) { case FMI_MAPPING_SINGLE_SLASH : // default servlet mapping . . . if requestURI is " / " , they match if ( requestURI . equals ( "/" ) ) // 144908 theyMatch = true ; break ; case FMI_MAPPING_PATH_MATCH : // it ' s a path mapping . . . match string is already stripped if ( requestURI . startsWith ( fmInfo . getUrlPattern ( ) + "/" ) || requestURI . equals ( fmInfo . getUrlPattern ( ) ) ) theyMatch = true ; break ; case FMI_MAPPING_EXTENSION_MATCH : // it ' s an extension mapping . . . get the extension String ext = fmInfo . getUrlPattern ( ) . substring ( 2 ) ; // compare to any extension on the request uri int index = requestURI . lastIndexOf ( '.' ) ; if ( index != - 1 ) if ( ext . equals ( requestURI . substring ( index + 1 ) ) ) theyMatch = true ; break ; case FMI_MAPPING_EXACT_MATCH : // it ' s an exact match if ( requestURI . equals ( fmInfo . getUrlPattern ( ) ) ) theyMatch = true ; break ; default : // should never happen . . . give a message ? break ; } // Check if dispatch mode matches boolean dispMatch = false ; if ( theyMatch ) { for ( int i = 0 ; i < fmInfo . getDispatchMode ( ) . length ; i ++ ) { if ( dispatcherType == fmInfo . getDispatchMode ( ) [ i ] ) { dispMatch = true ; break ; } } } return dispMatch && theyMatch ;
public class SystemInterpreter { /** * ExqlInterpreter */ public static void main0 ( String [ ] args ) throws Exception { } }
// 转换语句中的表达式 String sql = "insert ignore into table_name " + "(`id`,`uid`,`favable_id`,`addtime`,`ranking`) " + "values (:1,:2,now(),0)" ; ExqlPattern pattern = ExqlPatternImpl . compile ( sql ) ; ExqlContextImpl context = new ExqlContextImpl ( ) ; Map < String , Object > parametersAsMap = new HashMap < String , Object > ( ) ; parametersAsMap . put ( ":1" , "p1" ) ; parametersAsMap . put ( ":2" , "p2" ) ; pattern . execute ( context , parametersAsMap ) ; String result = context . flushOut ( ) ; System . out . println ( result ) ;
public class MultiUserChat { /** * Get a new MUC enter configuration builder . * @ param nickname the nickname used when entering the MUC room . * @ return a new MUC enter configuration builder . * @ since 4.2 */ public MucEnterConfiguration . Builder getEnterConfigurationBuilder ( Resourcepart nickname ) { } }
return new MucEnterConfiguration . Builder ( nickname , connection . getReplyTimeout ( ) ) ;
public class ConnectionDAODefaultImpl { public void init ( final Connection connection ) throws DevFailed { } }
// Check Tango Host Properties ( host name , port number ) connection . url = new TangoUrl ( ) ; connection . setDevice_is_dbase ( true ) ; connection . transparent_reconnection = true ; // Always true for Database // connection . url . trace ( ) ; ApiUtil . get_orb ( ) ; connect_to_dbase ( connection ) ; connection . devname = connection . device . name ( ) ; connection . setAlready_connected ( true ) ;
public class AbstractSphere3F { /** * Replies if the specified box intersects the specified sphere . * @ param sphereCenterx x coordinate of the sphere center . * @ param sphereCentery y coordinate of the sphere center . * @ param sphereCenterz z coordinate of the sphere center . * @ param sphereRadius is the radius of the sphere . * @ param boxCenterx x coordinate of the center point of the oriented box . * @ param boxCentery y coordinate of the center point of the oriented box . * @ param boxCenterz z coordinate of the center point of the oriented box . * @ param boxAxis1x x coordinate of the first axis of the oriented box axis . * @ param boxAxis1y y coordinate of the first axis of the oriented box axis . * @ param boxAxis1z z coordinate of the first axis of the oriented box axis . * @ param boxAxis2x x coordinate of the second axis of the oriented box axis . * @ param boxAxis2y y coordinate of the second axis of the oriented box axis . * @ param boxAxis2z z coordinate of the second axis of the oriented box axis . * @ param boxAxis3x x coordinate of the third axis of the oriented box axis . * @ param boxAxis3y y coordinate of the third axis of the oriented box axis . * @ param boxAxis3z z coordinate of the third axis of the oriented box axis . * @ param boxExtentAxis1 extent of the first axis of the oriented box . * @ param boxExtentAxis2 extent of the second axis of the oriented box . * @ param boxExtentAxis3 extent of the third axis of the oriented box . * @ return < code > true < / code > if intersecting , otherwise < code > false < / code > */ @ Pure public static boolean intersectsSolidSphereOrientedBox ( double sphereCenterx , double sphereCentery , double sphereCenterz , double sphereRadius , double boxCenterx , double boxCentery , double boxCenterz , double boxAxis1x , double boxAxis1y , double boxAxis1z , double boxAxis2x , double boxAxis2y , double boxAxis2z , double boxAxis3x , double boxAxis3y , double boxAxis3z , double boxExtentAxis1 , double boxExtentAxis2 , double boxExtentAxis3 ) { } }
// Find points on OBB closest and farest to sphere center Point3f closest = new Point3f ( ) ; Point3f farest = new Point3f ( ) ; AbstractOrientedBox3F . computeClosestFarestOBBPoints ( sphereCenterx , sphereCentery , sphereCenterz , boxCenterx , boxCentery , boxCenterz , boxAxis1x , boxAxis1y , boxAxis1z , boxAxis2x , boxAxis2y , boxAxis2z , boxAxis3x , boxAxis3y , boxAxis3z , boxExtentAxis1 , boxExtentAxis2 , boxExtentAxis3 , closest , farest ) ; // Sphere and OBB intersect if the ( squared ) distance from sphere // center to point p is less than the ( squared ) sphere radius double squaredRadius = sphereRadius * sphereRadius ; return ( FunctionalPoint3D . distanceSquaredPointPoint ( sphereCenterx , sphereCentery , sphereCenterz , closest . getX ( ) , closest . getY ( ) , closest . getZ ( ) ) < squaredRadius ) ;
public class Agg { /** * Get a { @ link Collector } that calculates the < code > BIT _ OR ( ) < / code > for any * type of { @ link Number } . */ public static < T , U > Collector < T , ? , Integer > bitOrInt ( ToIntFunction < ? super T > function ) { } }
return Collector . of ( ( ) -> new int [ 1 ] , ( s , v ) -> { s [ 0 ] = s [ 0 ] | function . applyAsInt ( v ) ; } , ( s1 , s2 ) -> { s1 [ 0 ] = s1 [ 0 ] | s2 [ 0 ] ; return s1 ; } , s -> s [ 0 ] ) ;
public class CrsUtilities { /** * Reads a { @ link CoordinateReferenceSystem } from a prj file . * @ param filePath the path to the prj file or the connected datafile it sidecar file for . * @ param extension the extension of the data file . If < code > null < / code > it is assumed to be prj . * @ return the read { @ link CoordinateReferenceSystem } . * @ throws Exception */ @ SuppressWarnings ( "nls" ) public static CoordinateReferenceSystem readProjectionFile ( String filePath , String extension ) throws Exception { } }
CoordinateReferenceSystem crs = null ; String prjPath = null ; String filePathLower = filePath . trim ( ) . toLowerCase ( ) ; if ( filePathLower . endsWith ( ".prj" ) ) { // it is the prj file prjPath = filePath ; } else if ( extension != null && filePathLower . endsWith ( "." + extension ) ) { // datafile was supplied ( substitute extension ) int dotLoc = filePath . lastIndexOf ( "." ) ; prjPath = filePath . substring ( 0 , dotLoc ) ; prjPath = prjPath + ".prj" ; } else { prjPath = filePath + ".prj" ; } File prjFile = new File ( prjPath ) ; if ( ! prjFile . exists ( ) ) { throw new ModelsIOException ( "The prj file doesn't exist: " + prjPath , "CRSUTILITIES" ) ; } String wkt = FileUtilities . readFile ( prjFile ) ; crs = CRS . parseWKT ( wkt ) ; return crs ;
public class GetGroupsResult { /** * The collection of all active groups . * @ param groups * The collection of all active groups . */ public void setGroups ( java . util . Collection < GroupSummary > groups ) { } }
if ( groups == null ) { this . groups = null ; return ; } this . groups = new java . util . ArrayList < GroupSummary > ( groups ) ;
public class CmsContentEditor { /** * Opens the copy locale dialog . < p > */ void openCopyLocaleDialog ( ) { } }
CmsCopyLocaleDialog dialog = new CmsCopyLocaleDialog ( m_availableLocales , m_contentLocales , m_locale , m_definitions . get ( m_locale ) . hasSynchronizedElements ( ) , this ) ; dialog . center ( ) ;
public class SftpUtil { /** * Deletes a directory with all its files and sub - directories . The reason it * do a " chmod 700 " before the delete is that some tests changes the * permission , and thus we have to restore the right to delete it . . . * @ param muleClient * @ param endpointName * @ param relativePath * @ throws IOException */ static protected void recursiveDelete ( MuleContext muleContext , SftpClient sftpClient , String endpointName , String relativePath ) throws IOException { } }
EndpointURI endpointURI = getImmutableEndpoint ( muleContext , endpointName ) . getEndpointURI ( ) ; String path = endpointURI . getPath ( ) + relativePath ; try { // Ensure that we can delete the current directory and the below // directories ( if write is not permitted then delete is either ) sftpClient . chmod ( path , 00700 ) ; sftpClient . changeWorkingDirectory ( sftpClient . getAbsolutePath ( path ) ) ; // Delete all sub - directories String [ ] directories = sftpClient . listDirectories ( ) ; for ( String directory : directories ) { recursiveDelete ( muleContext , sftpClient , endpointName , relativePath + "/" + directory ) ; } // Needs to change the directory back after the recursiveDelete sftpClient . changeWorkingDirectory ( sftpClient . getAbsolutePath ( path ) ) ; // Delete all files String [ ] files = sftpClient . listFiles ( ) ; for ( String file : files ) { sftpClient . deleteFile ( file ) ; } // Delete the directory try { sftpClient . deleteDirectory ( path ) ; } catch ( Exception e ) { if ( logger . isDebugEnabled ( ) ) logger . debug ( "Failed delete directory " + path , e ) ; } } catch ( Exception e ) { if ( logger . isDebugEnabled ( ) ) logger . debug ( "Failed to recursivly delete directory " + path , e ) ; }
public class SourceCode { /** * returns if the current character ( internal pointer ) and the following are the same as the given * input */ public boolean isCurrent ( String str ) { } }
if ( pos + str . length ( ) > lcText . length ) return false ; for ( int i = str . length ( ) - 1 ; i >= 0 ; i -- ) { if ( str . charAt ( i ) != lcText [ pos + i ] ) return false ; } return true ;
public class EigenvalueSmall_F64 { /** * See page 385 of Fundamentals of Matrix Computations 2nd */ public void symm2x2_fast ( double a11 , double a12 , double a22 ) { } }
// double p = ( a11 - a22 ) * 0.5; // double r = Math . sqrt ( p * p + a12 * a12 ) ; // value0 . real = a22 + a12 * a12 / ( r - p ) ; // value1 . real = a22 - a12 * a12 / ( r + p ) ; // public void symm2x2 _ std ( double a11 , double a12 , double a22 ) double left = ( a11 + a22 ) * 0.5 ; double b = ( a11 - a22 ) * 0.5 ; double right = Math . sqrt ( b * b + a12 * a12 ) ; value0 . real = left + right ; value1 . real = left - right ;
public class RevocationVerificationManager { /** * This method first tries to verify the given certificate chain using OCSP since OCSP verification is * faster . If that fails it tries to do the verification using CRL . * @ param peerCertificates javax . security . cert . X509Certificate [ ] array of peer certificate chain from peer / client . * @ throws CertificateVerificationException Occurs when certificate fails to be validated from both OCSP and CRL . * @ return true If the process of certificate revocation becomes successful . */ public boolean verifyRevocationStatus ( javax . security . cert . X509Certificate [ ] peerCertificates ) throws CertificateVerificationException { } }
X509Certificate [ ] convertedCertificates = convert ( peerCertificates ) ; long start = System . currentTimeMillis ( ) ; // If not set by the user , default cache size will be 50 and default cache delay will be 15 mins . OCSPCache ocspCache = OCSPCache . getCache ( ) ; ocspCache . init ( cacheSize , cacheDelayMins ) ; CRLCache crlCache = CRLCache . getCache ( ) ; crlCache . init ( cacheSize , cacheDelayMins ) ; RevocationVerifier [ ] verifiers = { new OCSPVerifier ( ocspCache ) , new CRLVerifier ( crlCache ) } ; for ( RevocationVerifier verifier : verifiers ) { try { CertificatePathValidator pathValidator = new CertificatePathValidator ( convertedCertificates , verifier ) ; pathValidator . validatePath ( ) ; if ( LOG . isInfoEnabled ( ) ) { LOG . info ( "Path verification is successful. Took {} ms." , System . currentTimeMillis ( ) - start ) ; } return true ; } catch ( Exception e ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "{} failed." , verifier . getClass ( ) . getSimpleName ( ) ) ; LOG . debug ( "Certificate verification with {} failed. " , verifier . getClass ( ) . getSimpleName ( ) , e ) ; } } } throw new CertificateVerificationException ( "Path verification failed for both OCSP and CRL" ) ;
public class SignatureFinder { /** * Stop finding signatures for all active players . */ public synchronized void stop ( ) { } }
if ( isRunning ( ) ) { MetadataFinder . getInstance ( ) . removeTrackMetadataListener ( metadataListener ) ; WaveformFinder . getInstance ( ) . removeWaveformListener ( waveformListener ) ; BeatGridFinder . getInstance ( ) . removeBeatGridListener ( beatGridListener ) ; running . set ( false ) ; pendingUpdates . clear ( ) ; queueHandler . interrupt ( ) ; queueHandler = null ; // Report the loss of our signatures , on the proper thread , outside our lock final Set < Integer > dyingSignatures = new HashSet < Integer > ( signatures . keySet ( ) ) ; signatures . clear ( ) ; SwingUtilities . invokeLater ( new Runnable ( ) { @ Override public void run ( ) { for ( Integer player : dyingSignatures ) { deliverSignatureUpdate ( player , null ) ; } } } ) ; } deliverLifecycleAnnouncement ( logger , false ) ;
public class DateUtils { /** * Parse format { @ link # DATETIME _ FORMAT } . This method never throws exception . * @ param s any string * @ return the datetime , { @ code null } if parsing error or if parameter is { @ code null } */ @ CheckForNull public static Date parseDateTimeQuietly ( @ Nullable String s ) { } }
Date datetime = null ; if ( s != null ) { try { datetime = parseDateTime ( s ) ; } catch ( RuntimeException e ) { // ignore } } return datetime ;
public class CsvDozerBeanWriter { /** * { @ inheritDoc } */ public void write ( final Object source , final CellProcessor [ ] processors ) throws IOException { } }
if ( source == null ) { throw new NullPointerException ( "object to write should not be null" ) ; } else if ( processors == null ) { throw new NullPointerException ( "processors should not be null" ) ; } // update the current row / line numbers super . incrementRowAndLineNo ( ) ; // extract the bean values into the List using dozer beanData . getColumns ( ) . clear ( ) ; dozerBeanMapper . map ( source , beanData ) ; // execute the cell processors Util . executeCellProcessors ( processedColumns , beanData . getColumns ( ) , processors , getLineNumber ( ) , getRowNumber ( ) ) ; // write the list super . writeRow ( processedColumns ) ;
public class StockholmFileParser { /** * Handles a line that corresponds to a sequence . < br > * e . g . : COATB _ BPIKE / 30-81 AEPNAATNYATEAMDSLKTQAIDLISQTWPVVTTVVVAGLVIRLFKKFSSKA < br > * N . B . : This function can ' t tolerate sequences with intrinsic white space . * @ param line * the line to be parsed * @ throws Exception */ private void handleSequenceLine ( String line ) throws ParserException { } }
String [ ] lineContent = line . split ( "\\s+" ) ; if ( lineContent . length != 2 ) { throw new ParserException ( "Could not split sequence line into sequence name and sequence:\n" + line ) ; } stockholmStructure . appendToSequence ( lineContent [ 0 ] , lineContent [ 1 ] ) ;
public class IOUtils { /** * Read text . * @ param inputStream the input stream * @ return the string */ public static String readText ( InputStream inputStream ) { } }
BufferedReader bufferedReader = new BufferedReader ( new InputStreamReader ( inputStream ) ) ; String line ; StringBuilder stringBuilder = new StringBuilder ( ) ; try { while ( ( line = bufferedReader . readLine ( ) ) != null ) { stringBuilder . append ( line ) ; stringBuilder . append ( '\n' ) ; } } catch ( IOException e ) { Logger . error ( e . getMessage ( ) ) ; e . printStackTrace ( ) ; return null ; } finally { if ( bufferedReader != null ) { try { bufferedReader . close ( ) ; } catch ( IOException e ) { Logger . error ( e . getMessage ( ) ) ; e . printStackTrace ( ) ; } } } return stringBuilder . toString ( ) ;
public class OntologyTermRepository { /** * Retrieves an { @ link OntologyTerm } for one or more IRIs * @ param iris Array of { @ link OntologyTerm } IRIs * @ return combined { @ link OntologyTerm } for the iris . */ public OntologyTerm getOntologyTerm ( String [ ] iris ) { } }
List < OntologyTerm > ontologyTerms = Lists . newArrayList ( ) ; for ( String iri : iris ) { org . molgenis . ontology . core . meta . OntologyTerm ontologyTermEntity = dataService . query ( ONTOLOGY_TERM , org . molgenis . ontology . core . meta . OntologyTerm . class ) . eq ( ONTOLOGY_TERM_IRI , iri ) . findOne ( ) ; OntologyTerm ontologyTerm = toOntologyTerm ( ontologyTermEntity ) ; if ( ontologyTerm == null ) { return null ; } ontologyTerms . add ( ontologyTerm ) ; } return OntologyTerm . and ( ontologyTerms . toArray ( new OntologyTerm [ 0 ] ) ) ;
public class DefaultWardenService { /** * ~ Methods * * * * * */ private static String _constructWardenMetricExpression ( String relativeStart , PrincipalUser user , PolicyCounter counter ) { } }
assert ( user != null ) : "User cannot be null." ; assert ( counter != null ) : "Counter cannot be null." ; assert ( relativeStart != null ) : "Relative start cannot be null." ; String metricName = counter . getMetricName ( ) ; String userName = user . getUserName ( ) ; if ( "sum" . equals ( counter . getAggregator ( ) ) ) { return MessageFormat . format ( "INTEGRAL(ZEROIFMISSINGSUM({0}:argus.custom:{1}'{'user={2},host=*'}':{3}))" , relativeStart , metricName , userName , counter . getAggregator ( ) ) ; } else { return MessageFormat . format ( "{0}:argus.custom:{1}'{'user={2},host=*'}':{3}" , relativeStart , metricName , userName , counter . getAggregator ( ) ) ; }
public class AbstractJaxRsResourceProvider { /** * Update an existing resource based on the given condition * @ param resource the body contents for the put method * @ return the response * @ see < a href = " https : / / www . hl7 . org / fhir / http . html # update " > https : / / www . hl7 . org / fhir / http . html # update < / a > */ @ PUT public Response conditionalUpdate ( final String resource ) throws IOException { } }
return execute ( getResourceRequest ( RequestTypeEnum . PUT , RestOperationTypeEnum . UPDATE ) . resource ( resource ) ) ;
public class HttpResponseHeader { /** * Parses the response headers and build a lis of all the http cookies set . For the cookies whose domain * could not be determined , the { @ code defaultDomain } is set . * @ param defaultDomain the default domain * @ return the http cookies */ public List < HttpCookie > getHttpCookies ( String defaultDomain ) { } }
List < HttpCookie > cookies = new LinkedList < > ( ) ; Vector < String > cookiesS = getHeaders ( HttpHeader . SET_COOKIE ) ; if ( cookiesS != null ) { for ( String c : cookiesS ) { cookies . addAll ( parseCookieString ( c , defaultDomain ) ) ; } } cookiesS = getHeaders ( HttpHeader . SET_COOKIE2 ) ; if ( cookiesS != null ) { for ( String c : cookiesS ) { cookies . addAll ( parseCookieString ( c , defaultDomain ) ) ; } } return cookies ;
public class ElementIterator { /** * Copied from { @ link org . apache . accumulo . core . iterators . user . RowEncodingIterator } */ @ Override public void seek ( Range range , Collection < ByteSequence > columnFamilies , boolean inclusive ) throws IOException { } }
topKey = null ; topValue = null ; Key sk = range . getStartKey ( ) ; if ( sk != null && sk . getColumnFamilyData ( ) . length ( ) == 0 && sk . getColumnQualifierData ( ) . length ( ) == 0 && sk . getColumnVisibilityData ( ) . length ( ) == 0 && sk . getTimestamp ( ) == Long . MAX_VALUE && ! range . isStartKeyInclusive ( ) ) { // assuming that we are seeking using a key previously returned by this iterator therefore go the next row Key followingRowKey = sk . followingKey ( PartialKey . ROW ) ; if ( range . getEndKey ( ) != null && followingRowKey . compareTo ( range . getEndKey ( ) ) > 0 ) { return ; } range = new Range ( sk . followingKey ( PartialKey . ROW ) , true , range . getEndKey ( ) , range . isEndKeyInclusive ( ) ) ; } sourceIterator . seek ( range , columnFamilies , inclusive ) ; loadNext ( ) ;
public class GsonOrdainer { /** * Generate sanitized js content with provided Gson serializer . * @ param gson A Gson serializer . * @ param obj The object to render as gson . * @ return SanitizedContent containing the object rendered as a json string . */ public static SanitizedContent serializeObject ( Gson gson , Object obj ) { } }
return ordainJson ( gson . toJson ( obj ) ) ;
public class CmsModuleUpdater { /** * Handles the file deletions . * @ param cms the CMS context to use * @ param toDelete the resources to delete * @ throws CmsException if something goes wrong */ protected void processDeletions ( CmsObject cms , List < CmsResource > toDelete ) throws CmsException { } }
Collections . sort ( toDelete , ( a , b ) -> b . getRootPath ( ) . compareTo ( a . getRootPath ( ) ) ) ; for ( CmsResource deleteRes : toDelete ) { m_report . print ( org . opencms . importexport . Messages . get ( ) . container ( org . opencms . importexport . Messages . RPT_DELFOLDER_0 ) , I_CmsReport . FORMAT_NOTE ) ; m_report . print ( org . opencms . report . Messages . get ( ) . container ( org . opencms . report . Messages . RPT_ARGUMENT_1 , deleteRes . getRootPath ( ) ) ) ; CmsLock lock = cms . getLock ( deleteRes ) ; if ( lock . isUnlocked ( ) ) { lock ( cms , deleteRes ) ; } cms . deleteResource ( deleteRes , CmsResource . DELETE_PRESERVE_SIBLINGS ) ; m_report . println ( org . opencms . report . Messages . get ( ) . container ( org . opencms . report . Messages . RPT_OK_0 ) , I_CmsReport . FORMAT_OK ) ; }
public class Device { /** * Request the update progress as a percentage value from 0 to 100 * @ return The current progress . * @ throws CommandExecutionException When there has been a error during the communication or the response was invalid . */ public int updateProgress ( ) throws CommandExecutionException { } }
int resp = sendToArray ( "miIO.get_ota_progress" ) . optInt ( 0 , - 1 ) ; if ( ( resp < 0 ) || ( resp > 100 ) ) throw new CommandExecutionException ( CommandExecutionException . Error . INVALID_RESPONSE ) ; return resp ;
public class FacesImpl { /** * Detect human faces in an image and returns face locations , and optionally with faceIds , landmarks , and attributes . * @ param image An image stream . * @ param detectWithStreamOptionalParameter the object representing the optional parameters to be set before calling this API * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < List < DetectedFace > > detectWithStreamAsync ( byte [ ] image , DetectWithStreamOptionalParameter detectWithStreamOptionalParameter , final ServiceCallback < List < DetectedFace > > serviceCallback ) { } }
return ServiceFuture . fromResponse ( detectWithStreamWithServiceResponseAsync ( image , detectWithStreamOptionalParameter ) , serviceCallback ) ;
public class ComputerVisionImpl { /** * Optical Character Recognition ( OCR ) detects printed text in an image and extracts the recognized characters into a machine - usable character stream . Upon success , the OCR results will be returned . Upon failure , the error code together with an error message will be returned . The error code can be one of InvalidImageUrl , InvalidImageFormat , InvalidImageSize , NotSupportedImage , NotSupportedLanguage , or InternalServerError . * @ param detectOrientation Whether detect the text orientation in the image . With detectOrientation = true the OCR service tries to detect the image orientation and correct it before further processing ( e . g . if it ' s upside - down ) . * @ param url Publicly reachable URL of an image * @ param recognizePrintedTextOptionalParameter the object representing the optional parameters to be set before calling this API * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < OcrResult > recognizePrintedTextAsync ( boolean detectOrientation , String url , RecognizePrintedTextOptionalParameter recognizePrintedTextOptionalParameter , final ServiceCallback < OcrResult > serviceCallback ) { } }
return ServiceFuture . fromResponse ( recognizePrintedTextWithServiceResponseAsync ( detectOrientation , url , recognizePrintedTextOptionalParameter ) , serviceCallback ) ;
public class PropertyResolver { /** * Parse a String and replace vars a la ant ( $ { key } from System Properties * Support complex Strings like : * " $ { myhost } " " $ { myhost : w2 } " " $ { mybean : defbean } . $ { mybean2 : defbean2 } " * @ param s * @ return resolved String */ public String resolveProps ( @ Nullable String s ) { } }
if ( s == null ) { return null ; } int ipos = 0 ; int pos = s . indexOf ( "${" , ipos ) ; if ( pos == - 1 ) return s ; StringBuilder sb = new StringBuilder ( ) ; while ( ipos < s . length ( ) ) { pos = s . indexOf ( "${" , ipos ) ; if ( pos < 0 ) { sb . append ( s . substring ( ipos ) ) ; break ; } if ( pos != ipos ) sb . append ( s . substring ( ipos , pos ) ) ; int end = s . indexOf ( '}' , pos ) ; if ( end < 0 ) break ; int start = pos + 2 ; String key = s . substring ( start , end ) ; String val = resolveString ( key ) ; if ( val != null ) sb . append ( val ) ; else sb . append ( "${" ) . append ( key ) . append ( "}" ) ; ipos = end + 1 ; } return sb . toString ( ) ;