signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class CommerceUserSegmentCriterionUtil { /** * Returns a range of all the commerce user segment criterions where commerceUserSegmentEntryId = & # 63 ; . * Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link CommerceUserSegmentCriterionModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order . * @ param commerceUserSegmentEntryId the commerce user segment entry ID * @ param start the lower bound of the range of commerce user segment criterions * @ param end the upper bound of the range of commerce user segment criterions ( not inclusive ) * @ return the range of matching commerce user segment criterions */ public static List < CommerceUserSegmentCriterion > findByCommerceUserSegmentEntryId ( long commerceUserSegmentEntryId , int start , int end ) { } }
return getPersistence ( ) . findByCommerceUserSegmentEntryId ( commerceUserSegmentEntryId , start , end ) ;
public class ApiOvhPackxdsl { /** * Activate an Email Pro service * REST : POST / pack / xdsl / { packName } / emailPro / services * @ param email [ required ] The email address * @ param password [ required ] The password * @ param packName [ required ] The internal name of your pack */ public OvhTask packName_emailPro_services_POST ( String packName , String email , String password ) throws IOException { } }
String qPath = "/pack/xdsl/{packName}/emailPro/services" ; StringBuilder sb = path ( qPath , packName ) ; HashMap < String , Object > o = new HashMap < String , Object > ( ) ; addBody ( o , "email" , email ) ; addBody ( o , "password" , password ) ; String resp = exec ( qPath , "POST" , sb . toString ( ) , o ) ; return convertTo ( resp , OvhTask . class ) ;
public class GreenPepperServerServiceImpl { /** * { @ inheritDoc } */ public void removeRunner ( String name ) throws GreenPepperServerException { } }
try { sessionService . startSession ( ) ; sessionService . beginTransaction ( ) ; sutDao . removeRunner ( name ) ; sessionService . commitTransaction ( ) ; log . debug ( "Removed Runner: " + name ) ; } catch ( Exception ex ) { sessionService . rollbackTransaction ( ) ; throw handleException ( RUNNER_REMOVE_FAILED , ex ) ; } finally { sessionService . closeSession ( ) ; }
public class DeleteManager { /** * Get the delete set if any from the plf and process each delete command removing any that fail * from the delete set so that the delete set is self cleaning . */ static void applyAndUpdateDeleteSet ( Document plf , Document ilf , IntegrationResult result ) { } }
Element dSet = null ; try { dSet = getDeleteSet ( plf , null , false ) ; } catch ( Exception e ) { LOG . error ( "Exception occurred while getting user's DLM delete-set." , e ) ; } if ( dSet == null ) return ; NodeList deletes = dSet . getChildNodes ( ) ; for ( int i = deletes . getLength ( ) - 1 ; i >= 0 ; i -- ) { if ( applyDelete ( ( Element ) deletes . item ( i ) , ilf ) == false ) { dSet . removeChild ( deletes . item ( i ) ) ; result . setChangedPLF ( true ) ; } else { result . setChangedILF ( true ) ; } } if ( dSet . getChildNodes ( ) . getLength ( ) == 0 ) { plf . getDocumentElement ( ) . removeChild ( dSet ) ; result . setChangedPLF ( true ) ; }
public class BundleProcessor { /** * Create the bundle file * @ param servlet * the servlet * @ param response * the response * @ param request * the request * @ param path * the path * @ param destFile * the destination file * @ param mapping * the mapping * @ throws IOException * if an IO exception occurs * @ throws ServletException * if an exception occurs */ protected void createBundleFile ( HttpServlet servlet , MockServletResponse response , MockServletRequest request , String path , File destFile , String mapping ) throws IOException , ServletException { } }
request . setRequestPath ( mapping , path ) ; // Create the parent directory of the destination file if ( ! destFile . getParentFile ( ) . exists ( ) ) { boolean dirsCreated = destFile . getParentFile ( ) . mkdirs ( ) ; if ( ! dirsCreated ) { throw new IOException ( "The directory '" + destFile . getParentFile ( ) . getCanonicalPath ( ) + "' can't be created." ) ; } } // Set the response mock to write in the destination file try { response . setOutputStream ( new FileOutputStream ( destFile ) ) ; servlet . service ( request , response ) ; } finally { response . close ( ) ; } if ( destFile . length ( ) == 0 ) { logger . warn ( "No content retrieved for file '" + destFile . getAbsolutePath ( ) + "', which is associated to the path : " + path ) ; System . out . println ( "No content retrieved for file '" + destFile . getAbsolutePath ( ) + "', which is associated to the path : " + path ) ; }
public class SigninFormPanel { /** * Factory method for creating the new { @ link Label } for the button . This method is invoked in * the constructor from the derived classes and can be overridden so users can provide their own * version of a new { @ link Label } for the button . * @ param id * the id * @ param resourceKey * the resource key * @ param defaultValue * the default value * @ return the new { @ link Label } for the button . */ protected Label newButtonLabel ( final String id , final String resourceKey , final String defaultValue ) { } }
final IModel < String > labelModel = ResourceModelFactory . newResourceModel ( resourceKey , this , defaultValue ) ; return ComponentFactory . newLabel ( id , labelModel ) ;
public class ClientController { /** * Bulk delete clients from a profile . * @ param model * @ param profileIdentifier * @ param clientUUID * @ return returns the table of the remaining clients or an exception if deletion failed for some reason * @ throws Exception */ @ RequestMapping ( value = "/api/profile/{profileIdentifier}/clients/delete" , method = RequestMethod . POST ) public @ ResponseBody HashMap < String , Object > deleteClient ( Model model , @ RequestParam ( "profileIdentifier" ) String profileIdentifier , @ RequestParam ( "clientUUID" ) String [ ] clientUUID ) throws Exception { } }
logger . info ( "Attempting to remove clients from the profile: " , profileIdentifier ) ; logger . info ( "Attempting to remove the following clients: {}" , Arrays . toString ( clientUUID ) ) ; HashMap < String , Object > valueHash = new HashMap < String , Object > ( ) ; Integer profileId = ControllerUtils . convertProfileIdentifier ( profileIdentifier ) ; for ( int i = 0 ; i < clientUUID . length ; i ++ ) { if ( clientUUID [ i ] . compareTo ( Constants . PROFILE_CLIENT_DEFAULT_ID ) == 0 ) throw new Exception ( "Default client cannot be deleted" ) ; clientService . remove ( profileId , clientUUID [ i ] ) ; } valueHash . put ( "clients" , clientService . findAllClients ( profileId ) ) ; return valueHash ;
public class TableModel { /** * Returns all rows in the model as a list of lists containing the data as elements * @ return All rows in the model as a list of lists containing the data as elements */ public synchronized List < List < V > > getRows ( ) { } }
List < List < V > > copy = new ArrayList < List < V > > ( ) ; for ( List < V > row : rows ) { copy . add ( new ArrayList < V > ( row ) ) ; } return copy ;
public class StatementServiceImp { /** * / * ( non - Javadoc ) * @ see com . popbill . api . StatementService # sendEmail ( java . lang . String , java . number . Integer , java . lang . String , java . lang . String ) */ @ Override public Response sendEmail ( String CorpNum , int ItemCode , String MgtKey , String Receiver ) throws PopbillException { } }
if ( MgtKey == null || MgtKey . isEmpty ( ) ) throw new PopbillException ( - 99999999 , "관리번호가 입력되지 않았습니다." ) ; return sendEmail ( CorpNum , ItemCode , MgtKey , Receiver , null ) ;
public class ByteBufQueue { /** * Adds { @ code maxSize } bytes from this queue to ByteBuf { @ code dest } if * queue contains more than { @ code maxSize } bytes . Otherwise adds all * bytes from queue to dest . In both cases increases queue ' s position to * number of drained bytes . * @ param dest { @ code ByteBuf } for draining * @ param maxSize number of bytes for adding * @ return number of drained bytes */ public int drainTo ( @ NotNull ByteBuf dest , int maxSize ) { } }
int actualSize = drainTo ( dest . array ( ) , dest . tail ( ) , maxSize ) ; dest . moveTail ( actualSize ) ; return actualSize ;
public class CommerceShippingMethodUtil { /** * Returns the commerce shipping method with the primary key or throws a { @ link NoSuchShippingMethodException } if it could not be found . * @ param commerceShippingMethodId the primary key of the commerce shipping method * @ return the commerce shipping method * @ throws NoSuchShippingMethodException if a commerce shipping method with the primary key could not be found */ public static CommerceShippingMethod findByPrimaryKey ( long commerceShippingMethodId ) throws com . liferay . commerce . exception . NoSuchShippingMethodException { } }
return getPersistence ( ) . findByPrimaryKey ( commerceShippingMethodId ) ;
public class BlacklistsEntity { /** * Add a Token to the Blacklist . A token with scope blacklist : tokens is needed . * See https : / / auth0 . com / docs / api / management / v2 # ! / Blacklists / post _ tokens . * @ param token the token to blacklist . * @ return a Request to execute . */ public Request blacklistToken ( Token token ) { } }
Asserts . assertNotNull ( token , "token" ) ; String url = baseUrl . newBuilder ( ) . addPathSegments ( "api/v2/blacklists/tokens" ) . build ( ) . toString ( ) ; VoidRequest request = new VoidRequest ( client , url , "POST" ) ; request . addHeader ( "Authorization" , "Bearer " + apiToken ) ; request . setBody ( token ) ; return request ;
public class JwtSSOTokenImpl { /** * ( non - Javadoc ) * @ see * com . ibm . ws . security . sso . cookie . JwtSSOToken # getJwtSSOToken ( javax . security . * auth . Subject ) */ @ Override public String getJwtSSOToken ( Subject subject ) { } }
// TODO Auto - generated method stub String encodedjwtprincipal = null ; Set < JsonWebToken > jsonWebTokenPrincipalSet = getJwtPrincipals ( subject ) ; if ( jsonWebTokenPrincipalSet != null && ! jsonWebTokenPrincipalSet . isEmpty ( ) ) { if ( hasMultiplePrincipals ( jsonWebTokenPrincipalSet ) ) { // TODO error } else { encodedjwtprincipal = convertToEncoded ( jsonWebTokenPrincipalSet . iterator ( ) . next ( ) ) ; } } return encodedjwtprincipal ;
public class IniFile { /** * Returns the specified long property from the specified section . * @ param pstrSection the INI section name . * @ param pstrProp the property to be retrieved . * @ return the long property value . */ public Long getLongProperty ( String pstrSection , String pstrProp ) { } }
Long lngRet = null ; String strVal = null ; INIProperty objProp = null ; INISection objSec = null ; objSec = ( INISection ) this . mhmapSections . get ( pstrSection ) ; if ( objSec != null ) { objProp = objSec . getProperty ( pstrProp ) ; try { if ( objProp != null ) { strVal = objProp . getPropValue ( ) ; if ( strVal != null ) lngRet = new Long ( strVal ) ; } } catch ( NumberFormatException NFExIgnore ) { } finally { if ( objProp != null ) objProp = null ; } objSec = null ; } return lngRet ;
public class XCARespondingGatewayAuditor { /** * Get an instance of the XCA Responding Gateway Auditor from the * global context * @ return XCA Responding Gateway Auditor instance */ public static XCARespondingGatewayAuditor getAuditor ( ) { } }
AuditorModuleContext ctx = AuditorModuleContext . getContext ( ) ; return ( XCARespondingGatewayAuditor ) ctx . getAuditor ( XCARespondingGatewayAuditor . class ) ;
public class BeanUtils { /** * Returns the BeanInfo for the given bean class . Returns < code > null < / code > * if an IntrospectionException is caused * @ param beanClass The bean class * @ return The BeanInfo */ private static BeanInfo getBeanInfoOptional ( Class < ? > beanClass ) { } }
try { return Introspector . getBeanInfo ( beanClass ) ; } catch ( IntrospectionException e ) { return null ; }
public class AesUtil { /** * 将base 64 code AES解密 * @ param encryptStr 待解密的base 64 code * @ param decryptKey 解密密钥 * @ return 解密后的 * @ throws Exception */ public static byte [ ] AesDecryptByBase64Str ( String base64String , byte [ ] decryptKey ) throws Exception { } }
byte [ ] content = Base64Decode ( base64String ) ; return AesDecryptByBytes ( content , decryptKey ) ;
public class CPFriendlyURLEntryLocalServiceBaseImpl { /** * Creates a new cp friendly url entry with the primary key . Does not add the cp friendly url entry to the database . * @ param CPFriendlyURLEntryId the primary key for the new cp friendly url entry * @ return the new cp friendly url entry */ @ Override @ Transactional ( enabled = false ) public CPFriendlyURLEntry createCPFriendlyURLEntry ( long CPFriendlyURLEntryId ) { } }
return cpFriendlyURLEntryPersistence . create ( CPFriendlyURLEntryId ) ;
public class TileFormatInfo100 { @ Override protected void parse ( Node node ) { } }
NamedNodeMap attributes = node . getAttributes ( ) ; width = getValueRecursiveAsInteger ( attributes . getNamedItem ( "width" ) ) ; height = getValueRecursiveAsInteger ( attributes . getNamedItem ( "height" ) ) ; mimeType = getValueRecursive ( attributes . getNamedItem ( "mime-type" ) ) ; extension = getValueRecursive ( attributes . getNamedItem ( "extension" ) ) ; setParsed ( true ) ;
public class ServiceAdmin { @ Path ( "node/{nodeName}/log" ) @ Produces ( "application/octet-stream" ) @ GET public InputStream getNodeLog ( @ PathParam ( "nodeName" ) String nodeName , @ QueryParam ( "latest" ) int latest , @ Context HttpServletResponse res ) { } }
SelfDestructFileStream fs = ( SelfDestructFileStream ) ( ( JdbcClient ) JqmClientFactory . getClient ( ) ) . getEngineLog ( nodeName , latest ) ; res . setHeader ( "Content-Disposition" , "attachment; filename=" + nodeName + ".log" ) ; return fs ;
public class LonePairElectronChecker { /** * Checks if an Atom is saturated their lone pair electrons * by comparing it with known AtomTypes . * @ return True , if it ' s right saturated */ public boolean isSaturated ( IAtom atom , IAtomContainer ac ) throws CDKException { } }
createAtomTypeFactory ( ac . getBuilder ( ) ) ; IAtomType atomType = factory . getAtomType ( atom . getAtomTypeName ( ) ) ; int lpCount = ( Integer ) atomType . getProperty ( CDKConstants . LONE_PAIR_COUNT ) ; int foundLPCount = ac . getConnectedLonePairsCount ( atom ) ; return foundLPCount >= lpCount ;
public class Example3 { /** * A " normal " scenario without reuse filter technique . */ public MealyMachine < ? , String , ? , String > runExperiment1 ( ) { } }
// For each membership query a new instance of BoundedStringQueue will // be created ( normal learning scenario without filters ) FullMembershipQueryOracle oracle = new FullMembershipQueryOracle ( ) ; // construct L * instance ( almost classic Mealy version ) // almost : we use words ( Word < String > ) in cells of the table // instead of single outputs . MealyLearner < String , String > lstar ; lstar = new ExtensibleLStarMealyBuilder < String , String > ( ) . withAlphabet ( sigma ) . withInitialSuffixes ( initialSuffixes ) . withOracle ( oracle ) . create ( ) ; lstar . startLearning ( ) ; MealyMachine < ? , String , ? , String > result ; result = lstar . getHypothesisModel ( ) ; System . out . println ( "Resets: " + oracle . resets ) ; System . out . println ( "Symbols: " + oracle . symbols ) ; return result ;
public class PublicanPODocBookBuilder { /** * Add an entry to a POT file . * @ param tag The XML element name . * @ param source The original source string . * @ param potFile The POT file to add to . */ protected void addPOTEntry ( final String tag , final String source , final StringBuilder potFile ) { } }
addPOEntry ( tag , source , "" , false , potFile ) ;
public class BinaryContourHelper { /** * Reshapes data so that the un - padded image has the specified shape . */ public void reshape ( int width , int height ) { } }
if ( padded == null ) { binary . reshape ( width , height ) ; } else { binary . reshape ( width + 2 , height + 2 ) ; binary . subimage ( 1 , 1 , width + 1 , height + 1 , subimage ) ; }
public class RequirePluginVersions { /** * Gets the phase to lifecycle map . * @ return the phase to lifecycle map * @ throws LifecycleExecutionException the lifecycle execution exception */ public Map < String , Lifecycle > getPhaseToLifecycleMap ( ) throws LifecycleExecutionException { } }
if ( phaseToLifecycleMap == null ) { phaseToLifecycleMap = new HashMap < String , Lifecycle > ( ) ; for ( Lifecycle lifecycle : lifecycles ) { @ SuppressWarnings ( "unchecked" ) List < String > phases = lifecycle . getPhases ( ) ; for ( String phase : phases ) { if ( phaseToLifecycleMap . containsKey ( phase ) ) { Lifecycle prevLifecycle = ( Lifecycle ) phaseToLifecycleMap . get ( phase ) ; throw new LifecycleExecutionException ( "Phase '" + phase + "' is defined in more than one lifecycle: '" + lifecycle . getId ( ) + "' and '" + prevLifecycle . getId ( ) + "'" ) ; } else { phaseToLifecycleMap . put ( phase , lifecycle ) ; } } } } return phaseToLifecycleMap ;
public class HttpMethodBase { /** * Returns the response body of the HTTP method , if any , as a { @ link String } . * If response body is not available or cannot be read , returns < tt > null < / tt > * The string conversion on the data is done using the character encoding specified * in < tt > Content - Type < / tt > header . Buffers the response and this method can be * called several times yielding the same result each time . < / p > * Note : This will cause the entire response body to be buffered in memory . This method is * safe if the content length of the response is unknown , because the amount of memory used * is limited . < p > * If the response is large this method involves lots of array copying and many object * allocations , which makes it unsuitable for high - performance / low - footprint applications . * Those applications should use { @ link # getResponseBodyAsStream ( ) } . * @ param maxlen the maximum content length to accept ( number of bytes ) . Note that , * depending on the encoding , this is not equal to the number of characters . * @ return The response body or < code > null < / code > . * @ throws IOException If an I / O ( transport ) problem occurs while obtaining the * response body . */ public String getResponseBodyAsString ( int maxlen ) throws IOException { } }
if ( maxlen < 0 ) throw new IllegalArgumentException ( "maxlen must be positive" ) ; byte [ ] rawdata = null ; if ( responseAvailable ( ) ) { rawdata = getResponseBody ( maxlen ) ; } if ( rawdata != null ) { return EncodingUtil . getString ( rawdata , getResponseCharSet ( ) ) ; } else { return null ; }
public class GosuPanel { /** * This should only be called when either the file ' s contents change externally , * or when the file saves to disk . */ public void refresh ( Path file ) { } }
EditorHost editor = findTab ( file ) ; if ( editor != null ) { // The file is open in an editor , refresh it with the contents of the file try ( Reader reader = PathUtil . createReader ( file ) ) { editor . refresh ( StreamUtil . getContent ( reader ) ) ; setDirty ( editor , false ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } FileTree root = FileTreeUtil . getRoot ( ) ; FileTree node = root . find ( file ) ; if ( node != null ) { // Refresh the type system to include the changes IType type = node . getType ( ) ; if ( type != null ) { reload ( type ) ; } }
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public EClass getIfcSlabStandardCase ( ) { } }
if ( ifcSlabStandardCaseEClass == null ) { ifcSlabStandardCaseEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 604 ) ; } return ifcSlabStandardCaseEClass ;
public class DependencyList { /** * Returns the set of features that were discovered when evaluating has ! * plugin expressions or aliases when expanding the dependencies . This * information may be required by the cache manager in order to properly * idenify cached responses . * @ return The set of discovered feature names . * @ throws IOException */ public Set < String > getDependentFeatures ( ) throws IOException { } }
final boolean entryExitLogging = log . isLoggable ( Level . FINER ) ; final String methodName = "getDependentFeatures" ; // $ NON - NLS - 1 $ if ( entryExitLogging ) { log . entering ( DependencyList . class . getName ( ) , methodName ) ; } if ( ! initialized ) { initialize ( ) ; } if ( entryExitLogging ) { log . exiting ( DependencyList . class . getName ( ) , methodName , dependentFeatures ) ; } return dependentFeatures ;
public class Backend { /** * Initializes this backend with the given configuration . Must be called before this Backend can be used * @ param config */ public void initialize ( Configuration config ) { } }
try { // EdgeStore & VertexIndexStore KeyColumnValueStore idStore = storeManager . openDatabase ( ID_STORE_NAME ) ; idAuthority = null ; if ( storeFeatures . isKeyConsistent ( ) ) { idAuthority = new ConsistentKeyIDAuthority ( idStore , storeManager , config ) ; } else { throw new IllegalStateException ( "Store needs to support consistent key or transactional operations for ID manager to guarantee proper id allocations" ) ; } KeyColumnValueStore edgeStoreRaw = storeManagerLocking . openDatabase ( EDGESTORE_NAME ) ; KeyColumnValueStore indexStoreRaw = storeManagerLocking . openDatabase ( INDEXSTORE_NAME ) ; // Configure caches if ( cacheEnabled ) { long expirationTime = configuration . get ( DB_CACHE_TIME ) ; Preconditions . checkArgument ( expirationTime >= 0 , "Invalid cache expiration time: %s" , expirationTime ) ; if ( expirationTime == 0 ) expirationTime = ETERNAL_CACHE_EXPIRATION ; long cacheSizeBytes ; double cachesize = configuration . get ( DB_CACHE_SIZE ) ; Preconditions . checkArgument ( cachesize > 0.0 , "Invalid cache size specified: %s" , cachesize ) ; if ( cachesize < 1.0 ) { // Its a percentage Runtime runtime = Runtime . getRuntime ( ) ; cacheSizeBytes = ( long ) ( ( runtime . maxMemory ( ) - ( runtime . totalMemory ( ) - runtime . freeMemory ( ) ) ) * cachesize ) ; } else { Preconditions . checkArgument ( cachesize > 1000 , "Cache size is too small: %s" , cachesize ) ; cacheSizeBytes = ( long ) cachesize ; } log . info ( "Configuring total store cache size: {}" , cacheSizeBytes ) ; long cleanWaitTime = configuration . get ( DB_CACHE_CLEAN_WAIT ) ; Preconditions . checkArgument ( EDGESTORE_CACHE_PERCENT + INDEXSTORE_CACHE_PERCENT == 1.0 , "Cache percentages don't add up!" ) ; long edgeStoreCacheSize = Math . round ( cacheSizeBytes * EDGESTORE_CACHE_PERCENT ) ; long indexStoreCacheSize = Math . round ( cacheSizeBytes * INDEXSTORE_CACHE_PERCENT ) ; edgeStore = new ExpirationKCVSCache ( edgeStoreRaw , getMetricsCacheName ( EDGESTORE_NAME ) , expirationTime , cleanWaitTime , edgeStoreCacheSize ) ; indexStore = new ExpirationKCVSCache ( indexStoreRaw , getMetricsCacheName ( INDEXSTORE_NAME ) , expirationTime , cleanWaitTime , indexStoreCacheSize ) ; } else { edgeStore = new NoKCVSCache ( edgeStoreRaw ) ; indexStore = new NoKCVSCache ( indexStoreRaw ) ; } // Just open them so that they are cached txLogManager . openLog ( SYSTEM_TX_LOG_NAME ) ; mgmtLogManager . openLog ( SYSTEM_MGMT_LOG_NAME ) ; txLogStore = new NoKCVSCache ( storeManager . openDatabase ( SYSTEM_TX_LOG_NAME ) ) ; // Open global configuration KeyColumnValueStore systemConfigStore = storeManagerLocking . openDatabase ( SYSTEM_PROPERTIES_STORE_NAME ) ; systemConfig = getGlobalConfiguration ( new BackendOperation . TransactionalProvider ( ) { @ Override public StoreTransaction openTx ( ) throws BackendException { return storeManagerLocking . beginTransaction ( StandardBaseTransactionConfig . of ( configuration . get ( TIMESTAMP_PROVIDER ) , storeFeatures . getKeyConsistentTxConfig ( ) ) ) ; } @ Override public void close ( ) throws BackendException { // Do nothing , storeManager is closed explicitly by Backend } } , systemConfigStore , configuration ) ; userConfig = getConfiguration ( new BackendOperation . TransactionalProvider ( ) { @ Override public StoreTransaction openTx ( ) throws BackendException { return storeManagerLocking . beginTransaction ( StandardBaseTransactionConfig . of ( configuration . get ( TIMESTAMP_PROVIDER ) ) ) ; } @ Override public void close ( ) throws BackendException { // Do nothing , storeManager is closed explicitly by Backend } } , systemConfigStore , USER_CONFIGURATION_IDENTIFIER , configuration ) ; } catch ( BackendException e ) { throw new TitanException ( "Could not initialize backend" , e ) ; }
public class Evaluator { /** * Synchronize the internal set of subscriptions with the map that is passed in . * @ param subs * Complete set of subscriptions for all groups . The keys for the map are the * group names . * @ return * This instance for chaining of update operations . */ public Evaluator sync ( Map < String , List < Subscription > > subs ) { } }
Set < String > removed = subscriptions . keySet ( ) ; removed . removeAll ( subs . keySet ( ) ) ; removed . forEach ( this :: removeGroupSubscriptions ) ; subs . forEach ( this :: addGroupSubscriptions ) ; return this ;
public class ArgParser { /** * Parse the args returning as name value pairs . */ public static HashMap < String , String > parse ( String args ) { } }
HashMap < String , String > map = new HashMap < > ( ) ; if ( args != null ) { String [ ] split = args . split ( ";" ) ; for ( String nameValuePair : split ) { String [ ] nameValue = nameValuePair . split ( "=" ) ; if ( nameValue . length == 2 ) { map . put ( nameValue [ 0 ] . toLowerCase ( ) , nameValue [ 1 ] ) ; } } } return map ;
public class GlobalUsersInner { /** * Register a user to a managed lab . * @ param userName The name of the user . * @ param registrationCode The registration code of the lab . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent */ public void register ( String userName , String registrationCode ) { } }
registerWithServiceResponseAsync ( userName , registrationCode ) . toBlocking ( ) . single ( ) . body ( ) ;
public class StreamEngine { /** * Zero indicates the peer has closed the connection . */ private int read ( ByteBuffer buf ) { } }
int nbytes ; try { nbytes = fd . read ( buf ) ; if ( nbytes == - 1 ) { errno . set ( ZError . ENOTCONN ) ; } else if ( nbytes == 0 ) { if ( ! fd . isBlocking ( ) ) { // If not a single byte can be read from the socket in non - blocking mode // we ' ll get an error ( this may happen during the speculative read ) . // Several errors are OK . When speculative read is being done we may not // be able to read a single byte from the socket . Also , SIGSTOP issued // by a debugging tool can result in EINTR error . errno . set ( ZError . EAGAIN ) ; nbytes = - 1 ; } } } catch ( IOException e ) { errno . set ( ZError . ENOTCONN ) ; nbytes = - 1 ; } return nbytes ;
public class AxesWalker { /** * Get the next node in document order on the axes . * @ return the next node in document order on the axes , or null . */ protected int getNextNode ( ) { } }
if ( m_foundLast ) return DTM . NULL ; if ( m_isFresh ) { m_currentNode = m_traverser . first ( m_root ) ; m_isFresh = false ; } // I shouldn ' t have to do this the check for current node , I think . // numbering \ numbering24 . xsl fails if I don ' t do this . I think // it occurs as the walkers are backing up . - sb else if ( DTM . NULL != m_currentNode ) { m_currentNode = m_traverser . next ( m_root , m_currentNode ) ; } if ( DTM . NULL == m_currentNode ) this . m_foundLast = true ; return m_currentNode ;
public class DataMediaPairServiceImpl { /** * 根据对应的dataMediaPairId找到对应的dataMediaPair */ public DataMediaPair findById ( Long dataMediaPairId ) { } }
Assert . assertNotNull ( dataMediaPairId ) ; List < DataMediaPair > dataMediaPairs = listByIds ( dataMediaPairId ) ; if ( dataMediaPairs . size ( ) != 1 ) { String exceptionCause = "query dataMediaPairId:" + dataMediaPairId + " but return " + dataMediaPairs . size ( ) + " Pairs." ; logger . error ( "ERROR ## " + exceptionCause ) ; throw new ManagerException ( exceptionCause ) ; } return dataMediaPairs . get ( 0 ) ;
public class ReportInstanceStatusRequest { /** * The reason codes that describe the health state of your instance . * < ul > * < li > * < code > instance - stuck - in - state < / code > : My instance is stuck in a state . * < / li > * < li > * < code > unresponsive < / code > : My instance is unresponsive . * < / li > * < li > * < code > not - accepting - credentials < / code > : My instance is not accepting my credentials . * < / li > * < li > * < code > password - not - available < / code > : A password is not available for my instance . * < / li > * < li > * < code > performance - network < / code > : My instance is experiencing performance problems that I believe are network * related . * < / li > * < li > * < code > performance - instance - store < / code > : My instance is experiencing performance problems that I believe are * related to the instance stores . * < / li > * < li > * < code > performance - ebs - volume < / code > : My instance is experiencing performance problems that I believe are related * to an EBS volume . * < / li > * < li > * < code > performance - other < / code > : My instance is experiencing performance problems . * < / li > * < li > * < code > other < / code > : [ explain using the description parameter ] * < / li > * < / ul > * @ return The reason codes that describe the health state of your instance . < / p > * < ul > * < li > * < code > instance - stuck - in - state < / code > : My instance is stuck in a state . * < / li > * < li > * < code > unresponsive < / code > : My instance is unresponsive . * < / li > * < li > * < code > not - accepting - credentials < / code > : My instance is not accepting my credentials . * < / li > * < li > * < code > password - not - available < / code > : A password is not available for my instance . * < / li > * < li > * < code > performance - network < / code > : My instance is experiencing performance problems that I believe are * network related . * < / li > * < li > * < code > performance - instance - store < / code > : My instance is experiencing performance problems that I believe * are related to the instance stores . * < / li > * < li > * < code > performance - ebs - volume < / code > : My instance is experiencing performance problems that I believe are * related to an EBS volume . * < / li > * < li > * < code > performance - other < / code > : My instance is experiencing performance problems . * < / li > * < li > * < code > other < / code > : [ explain using the description parameter ] * < / li > * @ see ReportInstanceReasonCodes */ public java . util . List < String > getReasonCodes ( ) { } }
if ( reasonCodes == null ) { reasonCodes = new com . amazonaws . internal . SdkInternalList < String > ( ) ; } return reasonCodes ;
public class RNAUtils { /** * method to check if two given polymers are complement to each other * @ param polymerOne * PolymerNotation of the first polymer * @ param polymerTwo * PolymerNotation of the second polymer * @ return true , if they are opposite to each other , false otherwise * @ throws RNAUtilsException * if the polymers are not rna / dna or the antiparallel strand * can not be built from polymerOne * @ throws HELM2HandledException * if the polymers contain HELM2 features * @ throws ChemistryException * if the Chemistry Engine can not be initialized * @ throws NucleotideLoadingException if nucleotides can not be loaded */ public static boolean areAntiparallel ( PolymerNotation polymerOne , PolymerNotation polymerTwo ) throws RNAUtilsException , HELM2HandledException , ChemistryException , NucleotideLoadingException { } }
checkRNA ( polymerOne ) ; checkRNA ( polymerTwo ) ; PolymerNotation antiparallel = getAntiparallel ( polymerOne ) ; String sequenceOne = FastaFormat . generateFastaFromRNA ( MethodsMonomerUtils . getListOfHandledMonomers ( antiparallel . getListMonomers ( ) ) ) ; String sequenceTwo = FastaFormat . generateFastaFromRNA ( MethodsMonomerUtils . getListOfHandledMonomers ( polymerTwo . getListMonomers ( ) ) ) ; return sequenceOne . equals ( sequenceTwo ) ;
public class PartitionRegionImpl { /** * This method returns true only if the metadata for the service contains the given * region in the list of supported regions . */ private boolean isServiceSupportedInRegion ( String serviceName ) { } }
return partition . getServices ( ) . get ( serviceName ) != null && partition . getServices ( ) . get ( serviceName ) . getEndpoints ( ) . containsKey ( region ) ;
public class Binary { /** * Represent as Base 64 with customized dialect and padding * @ return Base 64 encoded string */ public String asBase64 ( BaseEncoding . Dialect dialect , BaseEncoding . Padding padding ) throws IOException { } }
String standardBase64 = DatatypeConverter . printBase64Binary ( asByteArray ( false ) ) ; if ( dialect == BaseEncoding . Dialect . STANDARD && padding == BaseEncoding . Padding . STANDARD ) { return standardBase64 ; } StringBuilder safeBase64 = new StringBuilder ( standardBase64 . length ( ) ) ; for ( int i = 0 ; i < standardBase64 . length ( ) ; i ++ ) { char c = standardBase64 . charAt ( i ) ; if ( dialect == BaseEncoding . Dialect . SAFE ) { if ( c == '+' ) c = '-' ; else if ( c == '/' ) c = '_' ; } if ( c == '=' ) { if ( padding == BaseEncoding . Padding . STANDARD ) { safeBase64 . append ( '=' ) ; } else if ( padding == BaseEncoding . Padding . SAFE ) { safeBase64 . append ( '.' ) ; } } else { safeBase64 . append ( c ) ; } } return safeBase64 . toString ( ) ;
public class ProcessorStripSpace { /** * Receive notification of the start of an strip - space element . * @ param handler The calling StylesheetHandler / TemplatesBuilder . * @ param uri The Namespace URI , or the empty string if the * element has no Namespace URI or if Namespace * processing is not being performed . * @ param localName The local name ( without prefix ) , or the * empty string if Namespace processing is not being * performed . * @ param rawName The raw XML 1.0 name ( with prefix ) , or the * empty string if raw names are not available . * @ param attributes The attributes attached to the element . If * there are no attributes , it shall be an empty * Attributes object . */ public void startElement ( StylesheetHandler handler , String uri , String localName , String rawName , Attributes attributes ) throws org . xml . sax . SAXException { } }
Stylesheet thisSheet = handler . getStylesheet ( ) ; WhitespaceInfoPaths paths = new WhitespaceInfoPaths ( thisSheet ) ; setPropertiesFromAttributes ( handler , rawName , attributes , paths ) ; Vector xpaths = paths . getElements ( ) ; for ( int i = 0 ; i < xpaths . size ( ) ; i ++ ) { WhiteSpaceInfo wsi = new WhiteSpaceInfo ( ( XPath ) xpaths . elementAt ( i ) , true , thisSheet ) ; wsi . setUid ( handler . nextUid ( ) ) ; thisSheet . setStripSpaces ( wsi ) ; } paths . clearElements ( ) ;
public class WebApp { /** * ( non - Javadoc ) * @ see com . ibm . ws . webcontainer . webapp . WebApp # getWebExtensionProcessor ( ) */ public com . ibm . ws . webcontainer . extension . WebExtensionProcessor getWebExtensionProcessor ( ) { } }
// TODO Auto - generated method stub return new com . ibm . ws . webcontainer . osgi . extension . WebExtensionProcessor ( this ) ;
public class ReflectionUtils { /** * Returns the method objects for methods which are annotated with the * given annotation of the given class . This method traverses through * the super classes of the given class and tries to find methods as * declared methods within these classes which are annotated with an * annotation of the given annotation type . * When the object class is reached the traversing stops . If no methods * can be found , an empty list is returned . * The order of the methods is random . * @ param clazz The class within to look for the methods * @ param annotation The annotation type a method must be annotated with to be * included in the list * @ return A list of method objects for methods annotated with the given * annotation type or an emtpy list */ public static List < Method > getMethods ( Class < ? > clazz , final Class < ? extends Annotation > annotation ) { } }
final List < Method > methods = new ArrayList < Method > ( ) ; traverseHierarchy ( clazz , new TraverseTask < Method > ( ) { @ Override public Method run ( Class < ? > clazz ) { Method [ ] methodArray = clazz . getDeclaredMethods ( ) ; for ( int i = 0 ; i < methodArray . length ; i ++ ) { Method m = methodArray [ i ] ; if ( m . getAnnotation ( annotation ) != null ) { methods . add ( m ) ; } } return null ; } } ) ; return methods ;
public class Participant { /** * Create a ParticipantFetcher to execute fetch . * @ param pathAccountSid The SID of the Account that created the resource to * fetch * @ param pathConferenceSid The SID of the conference with the participant to * fetch * @ param pathCallSid The Call SID of the resource to fetch * @ return ParticipantFetcher capable of executing the fetch */ public static ParticipantFetcher fetcher ( final String pathAccountSid , final String pathConferenceSid , final String pathCallSid ) { } }
return new ParticipantFetcher ( pathAccountSid , pathConferenceSid , pathCallSid ) ;
public class AwsSecurityFindingFilters { /** * The URL for more details from the source of the threat intel . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setThreatIntelIndicatorSourceUrl ( java . util . Collection ) } or * { @ link # withThreatIntelIndicatorSourceUrl ( java . util . Collection ) } if you want to override the existing values . * @ param threatIntelIndicatorSourceUrl * The URL for more details from the source of the threat intel . * @ return Returns a reference to this object so that method calls can be chained together . */ public AwsSecurityFindingFilters withThreatIntelIndicatorSourceUrl ( StringFilter ... threatIntelIndicatorSourceUrl ) { } }
if ( this . threatIntelIndicatorSourceUrl == null ) { setThreatIntelIndicatorSourceUrl ( new java . util . ArrayList < StringFilter > ( threatIntelIndicatorSourceUrl . length ) ) ; } for ( StringFilter ele : threatIntelIndicatorSourceUrl ) { this . threatIntelIndicatorSourceUrl . add ( ele ) ; } return this ;
public class ComboButtonBox { /** * documentation inherited from interface */ public void intervalRemoved ( ListDataEvent e ) { } }
// remove the buttons in the specified interval int start = e . getIndex0 ( ) , count = e . getIndex1 ( ) - start + 1 ; removeButtons ( start , count ) ; SwingUtil . refresh ( this ) ;
public class ClientSharedObject { /** * Notify listeners on update * @ param key * Updated attribute key * @ param value * Updated attribute value */ protected void notifyUpdate ( String key , Object value ) { } }
for ( ISharedObjectListener listener : listeners ) { listener . onSharedObjectUpdate ( this , key , value ) ; }
public class ISPNQuotaPersister { /** * Compose unique workspace name in global JCR instance . */ private String composeWorkspaceUniqueName ( String repositoryName , String workspaceName ) { } }
StringBuilder builder = new StringBuilder ( ) ; builder . append ( repositoryName ) ; builder . append ( '/' ) ; builder . append ( workspaceName ) ; builder . append ( '/' ) ; return builder . toString ( ) ;
public class DotPatternMapHelpers { /** * Links the two field names into a single left . right field name . * If the left field is empty , right is returned * @ param left one field name * @ param right the other field name * @ return left . right or right if left is an empty string */ public static String flatten ( String left , String right ) { } }
return left == null || left . isEmpty ( ) ? right : left + "." + right ;
public class Trigger { /** * Evaluates the trigger against actualValue ( passed as parameter ) . * @ param trigger trigger to be evaluated . * @ param actualValue value against the trigger to be evaluated . * @ return true if the trigger should be fired so that notification will be sent otherwise false . * @ throws SystemException If an error in evaluation occurs . */ public static boolean evaluateTrigger ( Trigger trigger , Double actualValue ) { } }
requireArgument ( trigger != null , "Trigger cannot be null." ) ; requireArgument ( actualValue != null , "Trigger cannot be evaulated against null." ) ; Double lowThreshold , highThreshold ; switch ( trigger . type ) { case GREATER_THAN : return actualValue . compareTo ( trigger . getThreshold ( ) ) > 0 ; case GREATER_THAN_OR_EQ : return actualValue . compareTo ( trigger . getThreshold ( ) ) >= 0 ; case LESS_THAN : return actualValue . compareTo ( trigger . getThreshold ( ) ) < 0 ; case LESS_THAN_OR_EQ : return actualValue . compareTo ( trigger . getThreshold ( ) ) <= 0 ; case EQUAL : return actualValue . compareTo ( trigger . getThreshold ( ) ) == 0 ; case NOT_EQUAL : return actualValue . compareTo ( trigger . getThreshold ( ) ) != 0 ; case BETWEEN : lowThreshold = Math . min ( trigger . getThreshold ( ) , trigger . getSecondaryThreshold ( ) ) ; highThreshold = Math . max ( trigger . getThreshold ( ) , trigger . getSecondaryThreshold ( ) ) ; return ( actualValue . compareTo ( lowThreshold ) >= 0 && actualValue . compareTo ( highThreshold ) <= 0 ) ; case NOT_BETWEEN : lowThreshold = Math . min ( trigger . getThreshold ( ) , trigger . getSecondaryThreshold ( ) ) ; highThreshold = Math . max ( trigger . getThreshold ( ) , trigger . getSecondaryThreshold ( ) ) ; return ( actualValue . compareTo ( lowThreshold ) < 0 || actualValue . compareTo ( highThreshold ) > 0 ) ; case NO_DATA : return actualValue == null ; default : throw new SystemException ( "Unsupported trigger type " + trigger . type ) ; }
public class MultiPartRequest { /** * Check if a named part is present * @ param name The part * @ return true if it was included */ public boolean contains ( String name ) { } }
Part part = ( Part ) _partMap . get ( name ) ; return ( part != null ) ;
public class FunctionsInner { /** * Updates an existing function under an existing streaming job . This can be used to partially update ( ie . update one or two properties ) a function without affecting the rest the job or function definition . * @ param resourceGroupName The name of the resource group that contains the resource . You can obtain this value from the Azure Resource Manager API or the portal . * @ param jobName The name of the streaming job . * @ param functionName The name of the function . * @ param function A function object . The properties specified here will overwrite the corresponding properties in the existing function ( ie . Those properties will be updated ) . Any properties that are set to null here will mean that the corresponding property in the existing function will remain the same and not change as a result of this PATCH operation . * @ param ifMatch The ETag of the function . Omit this value to always overwrite the current function . Specify the last - seen ETag value to prevent accidentally overwritting concurrent changes . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the FunctionInner object */ public Observable < FunctionInner > updateAsync ( String resourceGroupName , String jobName , String functionName , FunctionInner function , String ifMatch ) { } }
return updateWithServiceResponseAsync ( resourceGroupName , jobName , functionName , function , ifMatch ) . map ( new Func1 < ServiceResponseWithHeaders < FunctionInner , FunctionsUpdateHeaders > , FunctionInner > ( ) { @ Override public FunctionInner call ( ServiceResponseWithHeaders < FunctionInner , FunctionsUpdateHeaders > response ) { return response . body ( ) ; } } ) ;
public class QSufSort { /** * Transforms the alphabet of { @ link # V } by attempting to aggregate several symbols * into one , while preserving the suffix order of < code > V < / code > . The alphabet may * also be compacted , so that < code > V < / code > on output comprises all integers of the * new alphabet with no skipped numbers . * Input : < code > V < / code > is an array of size < code > n + 1 < / code > whose first * < code > n < / code > elements are positive integers in the range < code > l . . . k - 1 < / code > . * < code > I < / code > is array of size < code > n + 1 < / code > , used for temporary storage . * < code > q < / code > controls aggregation and compaction by defining the maximum value * for any symbol during transformation : < code > q < / code > must be at least * < code > k - l < / code > ; if < code > q < = n < / code > , compaction is guaranteed ; if * < code > k - l > n < / code > , compaction is never done ; if < code > q < / code > is * { @ link Integer # MAX _ VALUE } , the maximum number of symbols are aggregated into one . * @ return an integer < code > j < / code > in the range < code > 1 . . . q < / code > representing the * size of the new alphabet . If < code > j < = n + 1 < / code > , the alphabet is * compacted . The global variable < code > r < / code > is set to the number of old * symbols grouped into one . Only < code > V [ n ] < / code > is < code > 0 < / code > . */ private int transform ( int n , int k , int l , int q ) { } }
int b , c , d , e , i , j , m , s ; int pi , pj ; // pointers for ( s = 0 , i = k - l ; i != 0 ; i >>= 1 ) ++ s ; /* s is number of bits in old symbol . */ e = Integer . MAX_VALUE >> s ; /* e is for overflow checking . */ for ( b = d = r = 0 ; r < n && d <= e && ( c = d << s | ( k - l ) ) <= q ; ++ r ) { b = b << s | ( V [ start + r ] - l + 1 ) ; /* b is start of x in chunk alphabet . */ d = c ; /* d is max symbol in chunk alphabet . */ } m = ( 1 << ( r - 1 ) * s ) - 1 ; /* m masks off top old symbol from chunk . */ V [ start + n ] = l - 1 ; /* emulate zero terminator . */ if ( d <= n ) { /* if bucketing possible , compact alphabet . */ for ( pi = 0 ; pi <= d ; ++ pi ) I [ pi ] = 0 ; /* zero transformation table . */ for ( pi = r , c = b ; pi <= n ; ++ pi ) { I [ c ] = 1 ; /* mark used chunk symbol . */ c = ( c & m ) << s | ( V [ start + pi ] - l + 1 ) ; /* * shift in next old symbol in * chunk . */ } for ( i = 1 ; i < r ; ++ i ) { /* handle last r - 1 positions . */ I [ c ] = 1 ; /* mark used chunk symbol . */ c = ( c & m ) << s ; /* shift in next old symbol in chunk . */ } for ( pi = 0 , j = 1 ; pi <= d ; ++ pi ) if ( I [ pi ] != 0 ) I [ pi ] = j ++ ; /* j is new alphabet size . */ for ( pi = 0 , pj = r , c = b ; pj <= n ; ++ pi , ++ pj ) { V [ start + pi ] = I [ c ] ; /* transform to new alphabet . */ c = ( c & m ) << s | ( V [ start + pj ] - l + 1 ) ; /* * shift in next old symbol in * chunk . */ } while ( pi < n ) { /* handle last r - 1 positions . */ V [ start + pi ++ ] = I [ c ] ; /* transform to new alphabet . */ c = ( c & m ) << s ; /* shift right - end zero in chunk . */ } } else { /* bucketing not possible , don ' t compact . */ for ( pi = 0 , pj = r , c = b ; pj <= n ; ++ pi , ++ pj ) { V [ start + pi ] = c ; /* transform to new alphabet . */ c = ( c & m ) << s | ( V [ start + pj ] - l + 1 ) ; /* * shift in next old symbol in * chunk . */ } while ( pi < n ) { /* handle last r - 1 positions . */ V [ start + pi ++ ] = c ; /* transform to new alphabet . */ c = ( c & m ) << s ; /* shift right - end zero in chunk . */ } j = d + 1 ; /* new alphabet size . */ } V [ start + n ] = 0 ; /* end - of - string symbol is zero . */ return j ; /* return new alphabet size . */
public class BigDecimalMath { /** * Returns the number e . * < p > See < a href = " https : / / en . wikipedia . org / wiki / E _ ( mathematical _ constant ) " > Wikipedia : E ( mathematical _ constant ) < / a > < / p > * @ param mathContext the { @ link MathContext } used for the result * @ return the number e with the precision specified in the < code > mathContext < / code > * @ throws UnsupportedOperationException if the { @ link MathContext } has unlimited precision */ public static BigDecimal e ( MathContext mathContext ) { } }
checkMathContext ( mathContext ) ; BigDecimal result = null ; synchronized ( eCacheLock ) { if ( eCache != null && mathContext . getPrecision ( ) <= eCache . precision ( ) ) { result = eCache ; } else { eCache = exp ( ONE , mathContext ) ; return eCache ; } } return round ( result , mathContext ) ;
public class AttackDetail { /** * The array of < a > AttackProperty < / a > objects . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setAttackProperties ( java . util . Collection ) } or { @ link # withAttackProperties ( java . util . Collection ) } if you * want to override the existing values . * @ param attackProperties * The array of < a > AttackProperty < / a > objects . * @ return Returns a reference to this object so that method calls can be chained together . */ public AttackDetail withAttackProperties ( AttackProperty ... attackProperties ) { } }
if ( this . attackProperties == null ) { setAttackProperties ( new java . util . ArrayList < AttackProperty > ( attackProperties . length ) ) ; } for ( AttackProperty ele : attackProperties ) { this . attackProperties . add ( ele ) ; } return this ;
public class RotationAxisAligner { /** * Returns a normalized vector that represents a minor rotation axis , except * for Cn , this represents an axis orthogonal to the principal axis . * @ return minor rotation axis */ private void refineReferenceVector ( ) { } }
referenceVector = new Vector3d ( Y_AXIS ) ; if ( rotationGroup . getPointGroup ( ) . startsWith ( "C" ) ) { referenceVector = getReferenceAxisCylicWithSubunitAlignment ( ) ; } else if ( rotationGroup . getPointGroup ( ) . startsWith ( "D" ) ) { referenceVector = getReferenceAxisDihedralWithSubunitAlignment ( ) ; } referenceVector = orthogonalize ( principalRotationVector , referenceVector ) ;
public class ConfigReader { /** * Read from the < code > input < / code > and return it ' s configuration settings as a { @ link Map } . * @ param input * the input * @ return return { @ link Map } with all the configurations read from the config file , or throws an exception if * there ' s a problem reading the input , e . g . : invalid XML . * @ throws IOException * Signals that an I / O exception has occurred . * @ throws SAXException * the SAX exception * @ throws ConfigReadException * the config read exception */ public Map < String , String > read ( InputStream input ) throws IOException , SAXException , ConfigReadException { } }
Digester digester = new Digester ( ) ; digester . addRuleSet ( new RuleSet ( ) ) ; Object result = digester . parse ( input ) ; if ( result == null || ! ( result instanceof Profiles ) ) { throw new ConfigReadException ( "No profiles found in config file" ) ; } Profiles profiles = ( Profiles ) result ; List < Map < String , String > > list = profiles . getProfiles ( ) ; if ( list . isEmpty ( ) ) { throw new ConfigReadException ( "No profile in config file of kind: " + Profiles . PROFILE_KIND ) ; } return list . get ( 0 ) ;
public class Bytes { /** * This method will get a sequence of bytes from pos - > limit , * but will restore pos after . * @ param buf * @ return byte array */ public static byte [ ] getBytes ( ByteBuffer buf ) { } }
int savedPos = buf . position ( ) ; byte [ ] newBytes = new byte [ buf . remaining ( ) ] ; buf . get ( newBytes ) ; buf . position ( savedPos ) ; return newBytes ;
public class InnerRankUpdate_DDRB { /** * Performs the following operation on a block : < br > * < br > * c = c - a < sup > T < / sup > a < br > */ protected static void multTransABlockMinus ( double [ ] dataA , double [ ] dataC , int indexA , int indexB , int indexC , final int heightA , final int widthA , final int widthC ) { } }
// for ( int i = 0 ; i < widthA ; i + + ) { // for ( int k = 0 ; k < heightA ; k + + ) { // double valA = dataA [ k * widthA + i + indexA ] ; // for ( int j = 0 ; j < widthC ; j + + ) { // dataC [ i * widthC + j + indexC ] - = valA * dataA [ k * widthC + j + indexB ] ; int rowB = indexB ; int endLoopK = rowB + heightA * widthC ; int startA = indexA ; // for ( int k = 0 ; k < heightA ; k + + ) { for ( ; rowB != endLoopK ; rowB += widthC , startA += widthA ) { int a = startA ; int c = indexC ; int endA = a + widthA ; int endB = rowB + widthC ; while ( a != endA ) { double valA = dataA [ a ++ ] ; int b = rowB ; while ( b != endB ) { dataC [ c ++ ] -= valA * dataA [ b ++ ] ; } } }
public class FileCopier { /** * 新建一个文件复制器 * @ param srcPath 源文件路径 ( 相对ClassPath路径或绝对路径 ) * @ param destPath 目标文件路径 ( 相对ClassPath路径或绝对路径 ) * @ return { @ link FileCopier } */ public static FileCopier create ( String srcPath , String destPath ) { } }
return new FileCopier ( FileUtil . file ( srcPath ) , FileUtil . file ( destPath ) ) ;
public class CmsFileUtil { /** * Returns the normalized file path created from the given URL . < p > * The path part { @ link URL # getPath ( ) } is used , unescaped and * normalized using { @ link # normalizePath ( String , char ) } . < p > * @ param url the URL to extract the path information from * @ param separatorChar the file separator char to use , for example { @ link File # separatorChar } * @ return the normalized file path created from the given URL */ public static String normalizePath ( URL url , char separatorChar ) { } }
// get the path part from the URL String path = new File ( url . getPath ( ) ) . getAbsolutePath ( ) ; // trick to get the OS default encoding , taken from the official Java i18n FAQ String systemEncoding = ( new OutputStreamWriter ( new ByteArrayOutputStream ( ) ) ) . getEncoding ( ) ; // decode url in order to remove spaces and escaped chars from path return CmsFileUtil . normalizePath ( CmsEncoder . decode ( path , systemEncoding ) , separatorChar ) ;
public class RippleTradeServiceRaw { /** * Retrieve order details from local store if they have been previously stored otherwise query * external server . */ public IRippleTradeTransaction getTrade ( final String account , final RippleNotification notification ) throws RippleException , IOException { } }
final RippleExchange ripple = ( RippleExchange ) exchange ; if ( ripple . isStoreTradeTransactionDetails ( ) ) { Map < String , IRippleTradeTransaction > cache = rawTradeStore . get ( account ) ; if ( cache == null ) { cache = new ConcurrentHashMap < > ( ) ; rawTradeStore . put ( account , cache ) ; } if ( cache . containsKey ( notification . getHash ( ) ) ) { return cache . get ( notification . getHash ( ) ) ; } } final IRippleTradeTransaction trade ; try { if ( notification . getType ( ) . equals ( "order" ) ) { trade = ripplePublic . orderTransaction ( account , notification . getHash ( ) ) ; } else if ( notification . getType ( ) . equals ( "payment" ) ) { trade = ripplePublic . paymentTransaction ( account , notification . getHash ( ) ) ; } else { throw new IllegalArgumentException ( String . format ( "unexpected notification %s type for transaction[%s] and account[%s]" , notification . getType ( ) , notification . getHash ( ) , notification . getAccount ( ) ) ) ; } } catch ( final RippleException e ) { if ( e . getHttpStatusCode ( ) == 500 && e . getErrorType ( ) . equals ( "transaction" ) ) { // Do not let an individual transaction parsing bug in the Ripple REST service cause a total // trade // history failure . See https : / / github . com / ripple / ripple - rest / issues / 384 as an example of // this situation . logger . error ( "exception reading {} transaction[{}] for account[{}]" , notification . getType ( ) , notification . getHash ( ) , account , e ) ; return null ; } else { throw e ; } } if ( ripple . isStoreTradeTransactionDetails ( ) ) { rawTradeStore . get ( account ) . put ( notification . getHash ( ) , trade ) ; } return trade ;
public class RequestUtils { /** * Recreates the URL used to make the supplied request , replacing the * server part of the URL with the supplied server name . */ public static String rehostLocation ( HttpServletRequest req , String servername ) { } }
StringBuffer buf = req . getRequestURL ( ) ; String csname = req . getServerName ( ) ; int csidx = buf . indexOf ( csname ) ; if ( csidx != - 1 ) { buf . delete ( csidx , csidx + csname . length ( ) ) ; buf . insert ( csidx , servername ) ; } String query = req . getQueryString ( ) ; if ( ! StringUtil . isBlank ( query ) ) { buf . append ( "?" ) . append ( query ) ; } return buf . toString ( ) ;
public class WindowedStream { /** * Applies the given window function to each window . The window function is called for each * evaluation of the window for each key individually . The output of the window function is * interpreted as a regular non - windowed stream . * < p > Note that this function requires that all data in the windows is buffered until the window * is evaluated , as the function provides no means of incremental aggregation . * @ param function The window function . * @ param resultType Type information for the result type of the window function * @ return The data stream that is the result of applying the window function to the window . */ public < R > SingleOutputStreamOperator < R > apply ( WindowFunction < T , R , K , W > function , TypeInformation < R > resultType ) { } }
function = input . getExecutionEnvironment ( ) . clean ( function ) ; return apply ( new InternalIterableWindowFunction < > ( function ) , resultType , function ) ;
public class DataPipelineClient { /** * Requests that the status of the specified physical or logical pipeline objects be updated in the specified * pipeline . This update might not occur immediately , but is eventually consistent . The status that can be set * depends on the type of object ( for example , DataNode or Activity ) . You cannot perform this operation on * < code > FINISHED < / code > pipelines and attempting to do so returns < code > InvalidRequestException < / code > . * @ param setStatusRequest * Contains the parameters for SetStatus . * @ return Result of the SetStatus operation returned by the service . * @ throws PipelineNotFoundException * The specified pipeline was not found . Verify that you used the correct user and account identifiers . * @ throws PipelineDeletedException * The specified pipeline has been deleted . * @ throws InternalServiceErrorException * An internal service error occurred . * @ throws InvalidRequestException * The request was not valid . Verify that your request was properly formatted , that the signature was * generated with the correct credentials , and that you haven ' t exceeded any of the service limits for your * account . * @ sample DataPipeline . SetStatus * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / datapipeline - 2012-10-29 / SetStatus " target = " _ top " > AWS API * Documentation < / a > */ @ Override public SetStatusResult setStatus ( SetStatusRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeSetStatus ( request ) ;
public class StreamExecutionEnvironment { /** * Creates a data stream that contains the contents of file created while system watches the given path . The file * will be read with the system ' s default character set . * @ param filePath * The path of the file , as a URI ( e . g . , " file : / / / some / local / file " or " hdfs : / / host : port / file / path / " ) * @ param intervalMillis * The interval of file watching in milliseconds * @ param watchType * The watch type of file stream . When watchType is { @ link org . apache . flink . streaming . api . functions . source . FileMonitoringFunction . WatchType # ONLY _ NEW _ FILES } , the system processes * only * new files . { @ link org . apache . flink . streaming . api . functions . source . FileMonitoringFunction . WatchType # REPROCESS _ WITH _ APPENDED } means that the system re - processes all contents of * appended file . { @ link org . apache . flink . streaming . api . functions . source . FileMonitoringFunction . WatchType # PROCESS _ ONLY _ APPENDED } means that the system processes only appended * contents * of files . * @ return The DataStream containing the given directory . * @ deprecated Use { @ link # readFile ( FileInputFormat , String , FileProcessingMode , long ) } instead . */ @ Deprecated @ SuppressWarnings ( "deprecation" ) public DataStream < String > readFileStream ( String filePath , long intervalMillis , FileMonitoringFunction . WatchType watchType ) { } }
DataStream < Tuple3 < String , Long , Long > > source = addSource ( new FileMonitoringFunction ( filePath , intervalMillis , watchType ) , "Read File Stream source" ) ; return source . flatMap ( new FileReadFunction ( ) ) ;
public class ResourceGroovyMethods { /** * Create a buffered reader for this file , using the specified * charset as the encoding . * @ param file a File * @ param charset the charset for this File * @ return a BufferedReader * @ throws FileNotFoundException if the File was not found * @ throws UnsupportedEncodingException if the encoding specified is not supported * @ since 1.0 */ public static BufferedReader newReader ( File file , String charset ) throws FileNotFoundException , UnsupportedEncodingException { } }
return new BufferedReader ( new InputStreamReader ( new FileInputStream ( file ) , charset ) ) ;
public class LabelService { /** * Returns the label with the given key . * This is needed because the API does not contain an operation to get a label using the key directly . * @ param key The key of the label to return * @ return The label */ public Optional < Label > show ( String key ) { } }
Optional < Label > ret = Optional . absent ( ) ; Collection < Label > labels = list ( ) ; for ( Label label : labels ) { if ( label . getKey ( ) . equals ( key ) ) ret = Optional . of ( label ) ; } return ret ;
public class XEventAttributeClassifier { /** * ( non - Javadoc ) * @ see * org . deckfour . xes . classification . XEventClassifier # sameEventClass ( org . deckfour * . xes . model . XEvent , org . deckfour . xes . model . XEvent ) */ public boolean sameEventClass ( XEvent eventA , XEvent eventB ) { } }
return getClassIdentity ( eventA ) . equals ( getClassIdentity ( eventB ) ) ;
public class DateConverter { /** * Convert and move string to this field . * @ param bState the state to set the data to . * @ param bDisplayOption Display the data on the screen if true . * @ param iMoveMode INIT , SCREEN , or READ move mode . * @ return The error code . */ public int setString ( String strField , boolean bDisplayOption , int iMoveMode ) { } }
int fieldLength = 0 ; Object tempBinary = null ; if ( strField != null ) { fieldLength = strField . length ( ) ; int maxLength = this . getMaxLength ( ) ; if ( ( fieldLength > maxLength ) || ( fieldLength > 40 ) ) fieldLength = maxLength ; Date dateOld = new Date ( ( long ) this . getValue ( ) ) ; // Save current time try { tempBinary = DateConverter . stringToBinary ( strField , dateOld , m_sDateFormat ) ; } catch ( Exception ex ) { String strError = ex . getMessage ( ) ; if ( strError == null ) strError = ex . getClass ( ) . getName ( ) ; Task task = null ; if ( this . getField ( ) != null ) if ( ( ( BaseField ) this . getField ( ) ) . getRecord ( ) != null ) if ( ( ( BaseField ) this . getField ( ) ) . getRecord ( ) . getRecordOwner ( ) != null ) task = ( ( BaseField ) this . getField ( ) ) . getRecord ( ) . getRecordOwner ( ) . getTask ( ) ; // if ( task = = null ) // task = BaseApplet . getSharedInstance ( ) ; return task . setLastError ( strError ) ; } } return this . setData ( tempBinary , bDisplayOption , iMoveMode ) ;
public class FullDTDReader { /** * Internal methods , input access : */ private void loadMoreScoped ( WstxInputSource currScope , String entityName , Location loc ) throws XMLStreamException { } }
boolean check = ( mInput == currScope ) ; loadMore ( getErrorMsg ( ) ) ; // Did we get out of the scope ? if ( check && ( mInput != currScope ) ) { _reportWFCViolation ( "Unterminated entity value for entity '" + entityName + "' (definition started at " + loc + ")" ) ; }
public class OServerAdmin { /** * Checks if a database exists in the remote server . * @ return true if exists , otherwise false * @ throws IOException */ public synchronized boolean existsDatabase ( ) throws IOException { } }
storage . checkConnection ( ) ; try { final OChannelBinaryClient network = storage . beginRequest ( OChannelBinaryProtocol . REQUEST_DB_EXIST ) ; try { network . writeString ( storage . getName ( ) ) ; } finally { storage . endRequest ( network ) ; } try { storage . beginResponse ( network ) ; return network . readByte ( ) == 1 ; } finally { storage . endResponse ( network ) ; } } catch ( Exception e ) { OLogManager . instance ( ) . exception ( "Error on checking existence of the remote storage: " + storage . getName ( ) , e , OStorageException . class ) ; storage . close ( true ) ; } return false ;
public class FeatureIndex { /** * Writes the feature index into the specified * output stream in a format suitable for loading * into a < code > Map < / code > using the * { @ link # read ( Reader ) write } method . * @ param outa < code > Writer < / code > object to * provide the underlying stream . * @ throws IOExceptionif writing this feature index * to the specified output stream * throws an < code > IOException < / code > . */ public void write ( Writer out ) throws IOException { } }
// logger . debug ( " writing feature index " ) ; PrintWriter pw = new PrintWriter ( out ) ; Iterator it = map . entrySet ( ) . iterator ( ) ; // logger . debug ( " index size " + map . entrySet ( ) . size ( ) ) ; while ( it . hasNext ( ) ) { Map . Entry me = ( Map . Entry ) it . next ( ) ; // feature index pw . println ( me . getValue ( ) + "\t" + me . getKey ( ) ) ; } pw . flush ( ) ; pw . close ( ) ;
public class MPPConstraintField { /** * Retrieve an instance of the ConstraintField class based on the data read from an * MS Project file . * @ param value value from an MS Project file * @ return ConstraintField instance */ public static ConstraintField getInstance ( int value ) { } }
ConstraintField result = null ; if ( value >= 0 && value < FIELD_ARRAY . length ) { result = FIELD_ARRAY [ value ] ; } return ( result ) ;
public class Record { /** * Get grouping { @ link Boolean } value * @ param label target label * @ return { @ link Boolean } value of the label . If it is not null . */ public Boolean getGroupingBoolean ( String label ) { } }
PrimitiveObject o = getPrimitiveObject ( KEY , label , ObjectUtil . BOOLEAN , "Boolean" ) ; if ( o == null ) { return null ; } return ( Boolean ) o . getObject ( ) ;
public class BaseCheckBox { /** * Checks or unchecks the check box , firing { @ link ValueChangeEvent } if * appropriate . * Note that this < em > does not < / em > set the value property of the checkbox * input element wrapped by this widget . For access to that property , see * { @ link # setFormValue ( String ) } * @ param value true to check , false to uncheck ; null value implies false * @ param fireEvents If true , and value has changed , fire a * { @ link ValueChangeEvent } */ @ Override public void setValue ( Boolean value , boolean fireEvents ) { } }
if ( value == null ) { value = Boolean . FALSE ; } Boolean oldValue = getValue ( ) ; inputElem . setChecked ( value ) ; inputElem . setDefaultChecked ( value ) ; if ( value . equals ( oldValue ) ) { return ; } if ( fireEvents ) { ValueChangeEvent . fire ( this , value ) ; }
public class LeaderState { /** * Starts sending AppendEntries requests to all cluster members . */ private void startAppendTimer ( ) { } }
// Set a timer that will be used to periodically synchronize with other nodes // in the cluster . This timer acts as a heartbeat to ensure this node remains // the leader . LOGGER . trace ( "{} - Starting append timer" , context . getCluster ( ) . member ( ) . address ( ) ) ; appendTimer = context . getThreadContext ( ) . schedule ( Duration . ZERO , context . getHeartbeatInterval ( ) , this :: appendMembers ) ;
public class MtasSolrResultUtil { /** * Decode . * @ param l the l * @ return the array list */ @ SuppressWarnings ( { } }
"rawtypes" , "unchecked" } ) static ArrayList decode ( ArrayList l ) { for ( int i = 0 ; i < l . size ( ) ; i ++ ) { if ( l . get ( i ) instanceof NamedList ) { l . set ( i , decode ( ( NamedList ) l . get ( i ) ) ) ; } else if ( l . get ( i ) instanceof ArrayList ) { l . set ( i , decode ( ( ArrayList ) l . get ( i ) ) ) ; } } return l ;
public class OptionsApi { /** * Replace old options with new . ( asynchronously ) * The POST operation will replace CloudCluster / Options with new values * @ param body Body Data ( required ) * @ param callback The callback to be executed when the API call finishes * @ return The request call * @ throws ApiException If fail to process the API call , e . g . serializing the request body object */ public com . squareup . okhttp . Call optionsPostAsync ( OptionsPost body , final ApiCallback < OptionsPostResponseStatusSuccess > callback ) throws ApiException { } }
ProgressResponseBody . ProgressListener progressListener = null ; ProgressRequestBody . ProgressRequestListener progressRequestListener = null ; if ( callback != null ) { progressListener = new ProgressResponseBody . ProgressListener ( ) { @ Override public void update ( long bytesRead , long contentLength , boolean done ) { callback . onDownloadProgress ( bytesRead , contentLength , done ) ; } } ; progressRequestListener = new ProgressRequestBody . ProgressRequestListener ( ) { @ Override public void onRequestProgress ( long bytesWritten , long contentLength , boolean done ) { callback . onUploadProgress ( bytesWritten , contentLength , done ) ; } } ; } com . squareup . okhttp . Call call = optionsPostValidateBeforeCall ( body , progressListener , progressRequestListener ) ; Type localVarReturnType = new TypeToken < OptionsPostResponseStatusSuccess > ( ) { } . getType ( ) ; apiClient . executeAsync ( call , localVarReturnType , callback ) ; return call ;
public class FanartTvApi { /** * Get images for artist * @ param id * @ return * @ throws FanartTvException */ public FTMusicArtist getMusicArtist ( String id ) throws FanartTvException { } }
URL url = ftapi . getImageUrl ( BaseType . ARTIST , id ) ; String page = requestWebPage ( url ) ; FTMusicArtist artist = null ; try { artist = mapper . readValue ( page , FTMusicArtist . class ) ; } catch ( IOException ex ) { throw new FanartTvException ( ApiExceptionType . MAPPING_FAILED , "Fauled to get Music Artist with ID " + id , url , ex ) ; } return artist ;
public class AttributeRepositoryDecorator { /** * Updates an attribute ' s representation in the backend for each concrete { @ link EntityType } that * has the { @ link Attribute } . * @ param attr current version of the attribute * @ param updatedAttr new version of the attribute */ private void updateAttributeInBackend ( Attribute attr , Attribute updatedAttr ) { } }
MetaDataService meta = dataService . getMeta ( ) ; meta . getConcreteChildren ( attr . getEntity ( ) ) . forEach ( entityType -> meta . getBackend ( entityType ) . updateAttribute ( entityType , attr , updatedAttr ) ) ;
public class LittleEndianDataOutputStream { /** * / * ( non - Javadoc ) * @ see java . io . OutputStream # write ( byte [ ] , int , int ) */ @ Override public void write ( byte [ ] b , int off , int len ) throws IOException { } }
inner . write ( b , off , len ) ;
public class I18nInvItem { /** * < p > Setter for lang . < / p > * @ param pLang reference */ @ Override public final void setLang ( final Languages pLang ) { } }
this . lang = pLang ; if ( this . itsId == null ) { this . itsId = new IdI18nInvItem ( ) ; } this . itsId . setLang ( this . lang ) ;
public class MStress_Client { /** * Prints usage information to standard out . */ private static void usage ( ) { } }
String className = MStress_Client . class . getName ( ) ; System . out . printf ( "Usage: java %s -s dfs-server -p dfs-port" + "[-t [create|stat|read|readdir|delete|rename] -a planfile-path -c host -n process-name" + " -P prefix]\n" , className ) ; System . out . printf ( " -t: this option requires -a, -c, and -n options.\n" ) ; System . out . printf ( " -P: default prefix is PATH_.\n" ) ; System . out . printf ( "eg:\n" ) ; System . out . printf ( " java %s -s <metaserver-host> -p <metaserver-port> -t create" + " -a <planfile> -c localhost -n Proc_00\n" , className ) ; System . exit ( 1 ) ;
public class FileSearchExtensions { /** * Checks if the given file contains only in the parent file recursively . * @ param parent * The parent directory to search . * @ param search * The file to search . * @ return ' s true if the file exists in the parent directory otherwise false . */ public static boolean containsFileRecursive ( final File parent , final File search ) { } }
final File toSearch = search . getAbsoluteFile ( ) ; boolean exists = false ; final File [ ] children = parent . getAbsoluteFile ( ) . listFiles ( ) ; if ( children == null ) { return false ; } final List < File > fileList = Arrays . asList ( children ) ; for ( final File currentFile : fileList ) { if ( currentFile . isDirectory ( ) ) { exists = FileSearchExtensions . containsFileRecursive ( currentFile , toSearch ) ; if ( exists ) { return true ; } } if ( fileList . contains ( toSearch ) ) { return true ; } } return exists ;
public class RequestCreator { /** * Loads the next page of the current RSS feed . * If no page was previously loaded , this will * request the first page . */ public RequestCreator nextPage ( ) { } }
Request request = data . build ( ) ; String url = request . url ; int page = request . page ; if ( request . search != null ) url += "?s=" + request . search ; Map < String , Integer > pageTracker = singleton . getPageTracker ( ) ; if ( pageTracker . containsKey ( url ) ) page = pageTracker . get ( url ) ; this . data . page ( page + 1 ) ; return this ;
public class MapDataDao { /** * Note that if not logged in , the Changeset for each returned element will be null * @ param wayIds a collection of way ids to return * @ throws OsmNotFoundException if < b > any < / b > one of the given ways does not exist * @ return a list of ways . */ public List < Way > getWays ( Collection < Long > wayIds ) { } }
if ( wayIds . isEmpty ( ) ) return Collections . emptyList ( ) ; return getSomeElements ( WAY + "s?" + WAY + "s=" + toCommaList ( wayIds ) , Way . class ) ;
public class PluginUtils { /** * Get URLClassLoader */ public static URLClassLoader getURLClassLoader ( final File pluginDir , List < String > extLibClassPaths , ClassLoader parentLoader ) { } }
final File libDir = new File ( pluginDir , LIBRARY_FOLDER_NAME ) ; if ( libDir . exists ( ) && libDir . isDirectory ( ) ) { final File [ ] files = libDir . listFiles ( ) ; final ArrayList < URL > urls = getUrls ( files ) ; if ( extLibClassPaths != null ) { for ( final String extLibClassPath : extLibClassPaths ) { try { final File extLibFile = new File ( pluginDir , extLibClassPath ) ; if ( extLibFile . exists ( ) ) { if ( extLibFile . isDirectory ( ) ) { // extLibFile is a directory ; load all the files in the // directory . final File [ ] extLibFiles = extLibFile . listFiles ( ) ; urls . addAll ( getUrls ( extLibFiles ) ) ; } else { final URL url = extLibFile . toURI ( ) . toURL ( ) ; urls . add ( url ) ; } } else { logger . error ( "External library path not found. path = " + extLibFile . getAbsolutePath ( ) ) ; continue ; } } catch ( final MalformedURLException e ) { logger . error ( "Invalid External library path. path = " + extLibClassPath + " dir = " + pluginDir , e ) ; } } } return new URLClassLoader ( urls . toArray ( new URL [ urls . size ( ) ] ) , parentLoader ) ; } else { logger . error ( "Library path not found. path = " + libDir ) ; return null ; }
public class MultiLineCommentDocumentationProvider { /** * / * @ NonNull */ @ Override public List < INode > getDocumentationNodes ( /* @ NonNull */ EObject object ) { } }
ICompositeNode node = NodeModelUtils . getNode ( object ) ; List < INode > result = Collections . emptyList ( ) ; if ( node != null ) { // get the last multi line comment before a non hidden leaf node for ( ILeafNode leafNode : node . getLeafNodes ( ) ) { if ( ! leafNode . isHidden ( ) ) break ; if ( leafNode . getGrammarElement ( ) instanceof TerminalRule && ruleName . equalsIgnoreCase ( ( ( TerminalRule ) leafNode . getGrammarElement ( ) ) . getName ( ) ) ) { String comment = leafNode . getText ( ) ; if ( commentStartTagRegex . matcher ( comment ) . matches ( ) ) { result = Collections . < INode > singletonList ( leafNode ) ; } } } } return result ;
public class KTypeVTypeHashMap { /** * This method is invoked when there is a new key / value pair to be inserted into * the buffers but there is not enough empty slots to do so . * New buffers are allocated . If this succeeds , we know we can proceed * with rehashing so we assign the pending element to the previous buffer * ( possibly violating the invariant of having at least one empty slot ) * and rehash all keys , substituting new buffers at the end . */ protected void allocateThenInsertThenRehash ( int slot , KType pendingKey , VType pendingValue ) { } }
assert assigned == resizeAt && Intrinsics . < KType > isEmpty ( Intrinsics . < KType > cast ( keys [ slot ] ) ) && ! Intrinsics . < KType > isEmpty ( pendingKey ) ; // Try to allocate new buffers first . If we OOM , we leave in a consistent state . final KType [ ] prevKeys = Intrinsics . < KType [ ] > cast ( this . keys ) ; final VType [ ] prevValues = Intrinsics . < VType [ ] > cast ( this . values ) ; allocateBuffers ( nextBufferSize ( mask + 1 , size ( ) , loadFactor ) ) ; assert this . keys . length > prevKeys . length ; // We have succeeded at allocating new data so insert the pending key / value at // the free slot in the old arrays before rehashing . prevKeys [ slot ] = pendingKey ; prevValues [ slot ] = pendingValue ; // Rehash old keys , including the pending key . rehash ( prevKeys , prevValues ) ;
public class LinearSearch { /** * Search for the minimum element in the array . * @ param longArray array that we are searching in . * @ return the minimum element in the array . */ public static long searchMin ( long [ ] longArray ) { } }
if ( longArray . length == 0 ) { throw new IllegalArgumentException ( "The array you provided does not have any elements" ) ; } long min = longArray [ 0 ] ; for ( int i = 1 ; i < longArray . length ; i ++ ) { if ( longArray [ i ] < min ) { min = longArray [ i ] ; } } return min ;
public class LinkedList { /** * Find the next link after the start link , even if it is part of a transaction . * A dirty scan . * Caller must be synchronized on LinkedList . * @ param start the link where the search for the next link is to start , * null implies the head of the list . * @ return Link in the list following the start position . * Returns null if there is none , or the start is deleted . * @ throws ObjectManagerException */ protected Link nextLink ( Link start ) throws ObjectManagerException { } }
if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . entry ( this , cclass , "nextLink" , new Object [ ] { start } ) ; Token nextToken = null ; if ( start == null ) { // Start at the head of the list ? nextToken = head ; } else { // if in body but deleted . nextToken = start . next ; } // if ( start = = null ) . Link nextLink = null ; if ( nextToken != null ) nextLink = ( Link ) ( nextToken . getManagedObject ( ) ) ; if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . exit ( this , cclass , "nextLink" , new Object [ ] { nextLink } ) ; return nextLink ;
public class BoxApiFolder { /** * Gets a request that copies a folder * @ param id id of folder to copy * @ param parentId id of parent folder to copy folder into * @ return request to copy a folder */ public BoxRequestsFolder . CopyFolder getCopyRequest ( String id , String parentId ) { } }
BoxRequestsFolder . CopyFolder request = new BoxRequestsFolder . CopyFolder ( id , parentId , getFolderCopyUrl ( id ) , mSession ) ; return request ;
public class GetIdentityMailFromDomainAttributesResult { /** * A map of identities to custom MAIL FROM attributes . * @ param mailFromDomainAttributes * A map of identities to custom MAIL FROM attributes . * @ return Returns a reference to this object so that method calls can be chained together . */ public GetIdentityMailFromDomainAttributesResult withMailFromDomainAttributes ( java . util . Map < String , IdentityMailFromDomainAttributes > mailFromDomainAttributes ) { } }
setMailFromDomainAttributes ( mailFromDomainAttributes ) ; return this ;
public class CRFCliqueTree { /** * Gives the probability of a tag at a single position conditioned on a * sequence of previous labels . * @ param position * Index in sequence * @ param label * Label of item at index * @ param prevLabels * @ return conditional log probability */ public double condLogProbGivenPrevious ( int position , int label , int [ ] prevLabels ) { } }
if ( prevLabels . length + 1 == windowSize ) { return factorTables [ position ] . conditionalLogProbGivenPrevious ( prevLabels , label ) ; } else if ( prevLabels . length + 1 < windowSize ) { FactorTable ft = factorTables [ position ] . sumOutFront ( ) ; while ( ft . windowSize ( ) > prevLabels . length + 1 ) { ft = ft . sumOutFront ( ) ; } return ft . conditionalLogProbGivenPrevious ( prevLabels , label ) ; } else { int [ ] p = new int [ windowSize - 1 ] ; System . arraycopy ( prevLabels , prevLabels . length - p . length , p , 0 , p . length ) ; return factorTables [ position ] . conditionalLogProbGivenPrevious ( p , label ) ; }
public class ServerService { /** * Reboot a single server or group of servers * @ param serverRefs server references list * @ return OperationFuture wrapper for Server list */ public OperationFuture < List < Server > > reboot ( Server ... serverRefs ) { } }
return powerOperationResponse ( Arrays . asList ( serverRefs ) , "Reboot" , client . reboot ( ids ( serverRefs ) ) ) ;
public class EVCacheNodeLocator { /** * @ see net . spy . memcached . NodeLocator # getPrimary */ public MemcachedNode getPrimary ( String k ) { } }
if ( partialStringHash . get ( ) ) { final int index = k . indexOf ( hashDelimiter . get ( ) ) ; if ( index > 0 ) { k = k . substring ( 0 , index ) ; } } final long _hash = hashingAlgorithm . hash ( k ) ; Long hash = Long . valueOf ( _hash ) ; hash = ketamaNodes . ceilingKey ( hash ) ; if ( hash == null ) { hash = ketamaNodes . firstKey ( ) ; } return ketamaNodes . get ( hash ) ;
public class YearMonthDay { /** * Returns a copy of this date with the specified field set to a new value . * For example , if the field type is < code > dayOfMonth < / code > then the day * would be changed in the returned instance . * These three lines are equivalent : * < pre > * YearMonthDay updated = ymd . withField ( DateTimeFieldType . dayOfMonth ( ) , 6 ) ; * YearMonthDay updated = ymd . dayOfMonth ( ) . setCopy ( 6 ) ; * YearMonthDay updated = ymd . property ( DateTimeFieldType . dayOfMonth ( ) ) . setCopy ( 6 ) ; * < / pre > * @ param fieldType the field type to set , not null * @ param value the value to set * @ return a copy of this instance with the field set * @ throws IllegalArgumentException if the value is null or invalid */ public YearMonthDay withField ( DateTimeFieldType fieldType , int value ) { } }
int index = indexOfSupported ( fieldType ) ; if ( value == getValue ( index ) ) { return this ; } int [ ] newValues = getValues ( ) ; newValues = getField ( index ) . set ( this , index , newValues , value ) ; return new YearMonthDay ( this , newValues ) ;
public class VMath { /** * Component - wise matrix operation : m1 = m1 - m2 * s2, * overwriting the existing matrix m1. * @ param m1 Input matrix * @ param m2 another matrix * @ param s2 Scalar * @ return m1 = m1 - s2 * m2 , overwriting m1 */ public static double [ ] [ ] minusTimesEquals ( final double [ ] [ ] m1 , final double [ ] [ ] m2 , final double s2 ) { } }
assert getRowDimensionality ( m1 ) == getRowDimensionality ( m2 ) && getColumnDimensionality ( m1 ) == getColumnDimensionality ( m2 ) : ERR_MATRIX_DIMENSIONS ; for ( int i = 0 ; i < m1 . length ; i ++ ) { final double [ ] row1 = m1 [ i ] , row2 = m2 [ i ] ; for ( int j = 0 ; j < row1 . length ; j ++ ) { row1 [ j ] -= s2 * row2 [ j ] ; } } return m1 ;
public class SegmentedBucketLocker { /** * Unlocks segments corresponding to bucket indexes in specific order to prevent deadlocks */ void unlockBucketsRead ( long i1 , long i2 ) { } }
int bucket1LockIdx = getBucketLock ( i1 ) ; int bucket2LockIdx = getBucketLock ( i2 ) ; // always unlock segments in same order to avoid deadlocks if ( bucket1LockIdx == bucket2LockIdx ) { lockAry [ bucket1LockIdx ] . tryUnlockRead ( ) ; return ; } lockAry [ bucket1LockIdx ] . tryUnlockRead ( ) ; lockAry [ bucket2LockIdx ] . tryUnlockRead ( ) ;