signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class TinyPlugzConfigurator { /** * Sets up a { @ link TinyPlugz } instance which uses the Classloader which * loaded the { @ link TinyPlugzConfigurator } class as parent Classloader . * This Classloader will be used for several purposes . First , it serves as * parent Classloader for the plugin Classloader which is to be created to * access classes and configurations from plugins . Second , the Classloader * will be used to look up the TinyPlugz service provider either using the * { @ link ServiceLoader } or by looking up an explicit implementation class . * @ return Fluent builder object for further configuration . */ public static DefineProperties setupUsingApplicationClassLoader ( ) { } }
final ClassLoader cl = Require . nonNull ( TinyPlugzConfigurator . class . getClassLoader ( ) ) ; return new Impl ( cl ) ;
public class FastStreamingKMeans { /** * Clusters the rows of the provided matrix into the specified number of * clusters in a single pass using the parameters to guide how clusters are * formed . Note that due to the streaming nature of the algorithm , fewer * than { @ code numClusters } may be returned . * @ param matrix the matrix whose rows are to be clustered * @ param numClusters the number of clusters to be returned . Note that * under some circumstances , the algorithm may return fewer clusters * than this amount . * @ param kappa the maximum number of facilities ( clusters ) to keep in * memory at any given point . At most this should be { @ code * numClusters * Math . log ( matrix . rows ( ) ) } * @ param beta the initial cost for creating a new facility . The default * value of { @ value # DEFAULT _ BETA _ VALUE } is recommended for this * parameter , unless specific customization is required . * @ param simFunc the similarity function used to compare rows of the * matrix . In the original paper , this is the inverse of square of * the Euclidean distance . * @ return an assignment from each row of the matrix to a cluster * identifier . */ public Assignments cluster ( Matrix matrix , int numClusters , int kappa , double beta , SimilarityFunction simFunc ) { } }
int rows = matrix . rows ( ) ; int cols = matrix . columns ( ) ; // f is the facility cost ; double f = 1d / ( numClusters * ( 1 + Math . log ( rows ) ) ) ; // This list contains at most kappa facilities . List < CandidateCluster > facilities = new ArrayList < CandidateCluster > ( kappa ) ; for ( int r = 0 ; r < rows ; /* no auto - increment */ ) { for ( ; facilities . size ( ) <= kappa && r < rows ; ++ r ) { DoubleVector x = matrix . getRowVector ( r ) ; CandidateCluster closest = null ; // Delta is ultimately assigned the lowest inverse - similarity // ( distance ) to any of the current facilities ' center of mass double delta = Double . MAX_VALUE ; for ( CandidateCluster y : facilities ) { double similarity = simFunc . sim ( x , y . centerOfMass ( ) ) ; double invSim = invertSim ( similarity ) ; if ( invSim < delta ) { delta = invSim ; closest = y ; } } // Base case : If this is the first data point and there are no // other facilities // Or if we surpass the probability of a new event occurring // ( line 6) if ( closest == null || Math . random ( ) < delta / f ) { CandidateCluster fac = new CandidateCluster ( ) ; fac . add ( r , x ) ; facilities . add ( fac ) ; } // Otherwise , add this data point to an existing facility else { closest . add ( r , x ) ; } } // If we still have data points left to process ( line 10 : ) if ( r < rows ) { // Check whether we have more than kappa clusters ( line 11 ) . // Kappa provides the upper bound on the clusters ( facilities ) // that are kept at any given time . If there are more , then we // need to consolidate facilities while ( facilities . size ( ) > kappa ) { f *= beta ; int curNumFacilities = facilities . size ( ) ; List < CandidateCluster > consolidated = new ArrayList < CandidateCluster > ( kappa ) ; consolidated . add ( facilities . get ( 0 ) ) ; for ( int j = 1 ; j < curNumFacilities ; ++ j ) { CandidateCluster x = facilities . get ( j ) ; int pointsAssigned = x . size ( ) ; // Compute the similarity of this facility to all other // consolidated facilities . Delta represents the lowest // inverse - similarity ( distance ) to another facility . // See line 17 of the algorithm . double delta = Double . MAX_VALUE ; CandidateCluster closest = null ; for ( CandidateCluster y : consolidated ) { double similarity = simFunc . sim ( x . centerOfMass ( ) , y . centerOfMass ( ) ) ; double invSim = invertSim ( similarity ) ; if ( invSim < delta ) { delta = invSim ; closest = y ; } } // Use ( pointsAssigned * delta / f ) as a threshold for // whether this facility could constitute a new event . // If a random check is less than it , then we nominate // this facilty to continue . if ( Math . random ( ) < ( pointsAssigned * delta ) / f ) { consolidated . add ( x ) ; } // Otherwise , we consolidate the points in this // community to the closest facility else { assert closest != null : "no closest facility" ; closest . merge ( x ) ; } } verbose ( LOGGER , "Consolidated %d facilities down to %d" , facilities . size ( ) , consolidated . size ( ) ) ; facilities = consolidated ; } } // Once we have processed all of the items in the stream ( line 23 of // algorithm ) , reduce the kappa clusters into k clusters . else { // Edge case for when we already have fewer facilities than we // need if ( facilities . size ( ) <= numClusters ) { verbose ( LOGGER , "Had fewer facilities, %d, than the " + "requested number of clusters %d" , facilities . size ( ) , numClusters ) ; // There ' s no point in reducing the number of facilities // further since we ' re under the desired amount , nor can we // go back and increase the number of facilities since all // the data has been seen at this point . Therefore , just // loop through the candidates and report their assignemnts . Assignment [ ] assignments = new Assignment [ rows ] ; int numFacilities = facilities . size ( ) ; for ( int j = 0 ; j < numFacilities ; ++ j ) { CandidateCluster fac = facilities . get ( j ) ; veryVerbose ( LOGGER , "Facility %d had a center of mass at %s" , j , fac . centerOfMass ( ) ) ; int clusterId = j ; IntIterator iter = fac . indices ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { int row = iter . nextInt ( ) ; assignments [ row ] = new HardAssignment ( clusterId ) ; } } return new Assignments ( numClusters , assignments , matrix ) ; } else { verbose ( LOGGER , "Had more than %d facilities, " + "consolidating to %d" , facilities . size ( ) , numClusters ) ; List < DoubleVector > facilityCentroids = new ArrayList < DoubleVector > ( facilities . size ( ) ) ; int [ ] weights = new int [ facilities . size ( ) ] ; int i = 0 ; for ( CandidateCluster fac : facilities ) { facilityCentroids . add ( fac . centerOfMass ( ) ) ; weights [ i ++ ] = fac . size ( ) ; } // Wrap the facilities centroids in a matrix for convenience Matrix m = Matrices . asMatrix ( facilityCentroids ) ; // Select the initial seed points for reducing the kappa // clusters to k using the generalized ORSS selection // process , which supports data comparisons other than // Euclidean distance GeneralizedOrssSeed orss = new GeneralizedOrssSeed ( simFunc ) ; DoubleVector [ ] centroids = orss . chooseSeeds ( numClusters , m ) ; assert nonNullCentroids ( centroids ) : "ORSS seed returned too few centroids" ; // This records the assignments of the kappa facilities to // the k centers . Initially , everyhting is assigned to the // same center and iterations repeat until convergence . int [ ] facilityAssignments = new int [ facilities . size ( ) ] ; // Using those facilities as starting points , run k - means on // the facility centroids until no facilities change their // memebership . int numChanged = 0 ; int kmeansIters = 0 ; do { numChanged = 0 ; // Recompute the new centroids each time DoubleVector [ ] updatedCentroids = new DoubleVector [ numClusters ] ; for ( i = 0 ; i < updatedCentroids . length ; ++ i ) updatedCentroids [ i ] = new DenseVector ( cols ) ; int [ ] updatedCentroidSizes = new int [ numClusters ] ; double similaritySum = 0 ; // For each CandidateCluster find the most similar centroid i = 0 ; for ( CandidateCluster fac : facilities ) { int mostSim = - 1 ; double highestSim = - 1 ; for ( int j = 0 ; j < centroids . length ; ++ j ) { // System . out . printf ( " centroids [ % d ] : % s % n fac . centroid ( ) : % s % n " , // j , centroids [ j ] , // fac . centerOfMass ( ) ) ; double sim = simFunc . sim ( centroids [ j ] , fac . centerOfMass ( ) ) ; if ( sim > highestSim ) { highestSim = sim ; mostSim = j ; } } // For the most similar centroid , update its center // of mass for the next round with the weighted // vector VectorMath . add ( updatedCentroids [ mostSim ] , fac . sum ( ) ) ; updatedCentroidSizes [ mostSim ] += fac . size ( ) ; int curAssignment = facilityAssignments [ i ] ; facilityAssignments [ i ] = mostSim ; similaritySum += highestSim ; if ( curAssignment != mostSim ) { veryVerbose ( LOGGER , "Facility %d changed its " + "centroid from %d to %d" , i , curAssignment , mostSim ) ; numChanged ++ ; } i ++ ; } // Once all the facilities have been assigned to one of // the k - centroids , recompute the centroids by // normalizing the sum of the weighted vectors according // the number of points for ( int j = 0 ; j < updatedCentroids . length ; ++ j ) { DoubleVector v = updatedCentroids [ j ] ; int size = updatedCentroidSizes [ j ] ; for ( int k = 0 ; k < cols ; ++ k ) v . set ( k , v . get ( k ) / size ) ; // Update this centroid for the next round centroids [ j ] = v ; } veryVerbose ( LOGGER , "%d centroids swapped their facility" , numChanged ) ; } while ( numChanged > 0 && ++ kmeansIters < MAX_BATCH_KMEANS_ITERS ) ; // Use the final assignments to create assignments for each // of the input data points Assignment [ ] assignments = new Assignment [ rows ] ; for ( int j = 0 ; j < facilityAssignments . length ; ++ j ) { CandidateCluster fac = facilities . get ( j ) ; veryVerbose ( LOGGER , "Facility %d had a center of mass at %s" , j , fac . centerOfMass ( ) ) ; int clusterId = facilityAssignments [ j ] ; IntIterator iter = fac . indices ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { int row = iter . nextInt ( ) ; assignments [ row ] = new HardAssignment ( clusterId ) ; } } return new Assignments ( numClusters , assignments , matrix ) ; } } } throw new AssertionError ( "Processed all data points without making assignment" ) ;
public class ThrottledApiHandler { /** * Retrieve a specific item * This method does not count towards the rate limit and is not affected by the throttle * @ param id The id of the item * @ param data Additional information to retrieve * @ return The item * @ see < a href = https : / / developer . riotgames . com / api / methods # ! / 649/2176 > Official API documentation < / a > */ public Future < Item > getItem ( int id , ItemData data ) { } }
return new DummyFuture < > ( handler . getItem ( id , data ) ) ;
public class CreateTableUtil { /** * 检查表结构是否已存在 . * @ return */ protected static boolean checkTableExist ( Jdbc jdbc , String tableName ) { } }
String sql = "select 1 from " + tableName + " limit 1;" ; try { jdbc . queryForInt ( sql ) ; return true ; } catch ( BadSqlGrammarException e ) { return false ; }
public class GeoJsonToAssembler { /** * Converts a multipoint to its corresponding Transfer Object * @ param input the multipoint * @ return a transfer object for the multipoint */ public MultiPointTo toTransferObject ( MultiPoint input ) { } }
MultiPointTo result = new MultiPointTo ( ) ; result . setCrs ( GeoJsonTo . createCrsTo ( "EPSG:" + input . getSRID ( ) ) ) ; result . setCoordinates ( getPoints ( input ) ) ; return result ;
public class WSkipLinksExample { /** * Creates a panel for the example . * @ param title the panel title . * @ param accessKey the panel access key * @ return a panel for use in the example . */ private WPanel buildPanel ( final String title , final char accessKey ) { } }
WPanel panel = buildPanel ( title ) ; panel . setAccessKey ( accessKey ) ; return panel ;
public class RESTRegistryService { /** * Returns the corresponding registry entry which wraps a node of type " exo : registryEntry " * @ param entryPath The relative path to the registry entry * @ response * { code } * " entryStream " : the output stream corresponding registry entry which wraps a node of type " exo : registryEntry * { code } * Example : * { @ code * < Audit jcr : primaryType = " exo : registryEntry " > * < adminIdentity jcr : primaryType = " nt : unstructured " value = " * : / Platform / Administrators " / > * < / Audit > * @ LevelAPI Experimental */ @ GET @ Path ( "/{entryPath:.+}" ) @ Produces ( MediaType . APPLICATION_XML ) public Response getEntry ( @ PathParam ( "entryPath" ) String entryPath ) { } }
SessionProvider sessionProvider = sessionProviderService . getSessionProvider ( null ) ; try { RegistryEntry entry ; entry = regService . getEntry ( sessionProvider , normalizePath ( entryPath ) ) ; return Response . ok ( new DOMSource ( entry . getDocument ( ) ) ) . build ( ) ; } catch ( PathNotFoundException e ) { return Response . status ( Response . Status . NOT_FOUND ) . build ( ) ; } catch ( RepositoryException e ) { LOG . error ( "Get registry entry failed" , e ) ; throw new WebApplicationException ( e ) ; }
public class AS2ClientBuilder { /** * Set the key store file and password for the AS2 client . The key store must * be an existing file of type PKCS12 containing at least the key alias of the * sender ( see { @ link # setSenderAS2ID ( String ) } ) . The key store file must be * writable as dynamically certificates of partners are added . * @ param aKeyStoreFile * The existing key store file . Must exist and may not be * < code > null < / code > . * @ param sKeyStorePassword * The password to the key store . May not be < code > null < / code > but * empty . * @ return this for chaining */ @ Nonnull @ Deprecated public AS2ClientBuilder setPKCS12KeyStore ( @ Nullable final File aKeyStoreFile , @ Nullable final String sKeyStorePassword ) { } }
return setKeyStore ( EKeyStoreType . PKCS12 , aKeyStoreFile , sKeyStorePassword ) ;
public class Calendar { /** * Compute the Gregorian calendar year , month , and day of month from * the given Julian day . These values are not stored in fields , but in * member variables gregorianXxx . Also compute the DAY _ OF _ WEEK and * DOW _ LOCAL fields . */ private final void computeGregorianAndDOWFields ( int julianDay ) { } }
computeGregorianFields ( julianDay ) ; // Compute day of week : JD 0 = Monday int dow = fields [ DAY_OF_WEEK ] = julianDayToDayOfWeek ( julianDay ) ; // Calculate 1 - based localized day of week int dowLocal = dow - getFirstDayOfWeek ( ) + 1 ; if ( dowLocal < 1 ) { dowLocal += 7 ; } fields [ DOW_LOCAL ] = dowLocal ;
public class MustacheResolver { /** * Scans given template for mustache keys ( syntax { { value } } ) . * @ return empty list if no mustache key is found . */ public static List < String > getMustacheKeys ( final String template ) { } }
final Set < String > keys = new HashSet < > ( ) ; if ( StringUtils . isNotEmpty ( template ) ) { final Matcher matcher = MUSTACHE_PATTERN . matcher ( template ) ; while ( matcher . find ( ) ) { keys . add ( matcher . group ( 1 ) ) ; } } return new ArrayList < > ( keys ) ;
public class DescribeTapesResult { /** * An array of virtual tape descriptions . * @ return An array of virtual tape descriptions . */ public java . util . List < Tape > getTapes ( ) { } }
if ( tapes == null ) { tapes = new com . amazonaws . internal . SdkInternalList < Tape > ( ) ; } return tapes ;
public class PhasedBackoffWaitStrategy { /** * Construct { @ link PhasedBackoffWaitStrategy } with fallback to { @ link BlockingWaitStrategy } * @ param spinTimeout The maximum time in to busy spin for . * @ param yieldTimeout The maximum time in to yield for . * @ param units Time units used for the timeout values . * @ return The constructed wait strategy . */ public static PhasedBackoffWaitStrategy withLock ( long spinTimeout , long yieldTimeout , TimeUnit units ) { } }
return new PhasedBackoffWaitStrategy ( spinTimeout , yieldTimeout , units , new BlockingWaitStrategy ( ) ) ;
public class WebDriverHelper { /** * Authenticates the user using the given username and password . This will * work only if the credentials are requested through an alert window . * @ param username * the user name * @ param password * the password */ public void authenticateOnNextAlert ( String username , String password ) { } }
Credentials credentials = new UserAndPassword ( username , password ) ; Alert alert = driver . switchTo ( ) . alert ( ) ; alert . authenticateUsing ( credentials ) ;
public class TrimmedEstimator { /** * Local copy , see ArrayLikeUtil . toPrimitiveDoubleArray . * @ param data Data * @ param adapter Adapter * @ return Copy of the data , as { @ code double [ ] } */ public static < A > double [ ] toPrimitiveDoubleArray ( A data , NumberArrayAdapter < ? , A > adapter ) { } }
if ( adapter == DoubleArrayAdapter . STATIC ) { return ( ( double [ ] ) data ) . clone ( ) ; } final int len = adapter . size ( data ) ; double [ ] x = new double [ len ] ; for ( int i = 0 ; i < len ; i ++ ) { x [ i ] = adapter . getDouble ( data , i ) ; } return x ;
public class RequestParameterBuilder { /** * Encodes given value with a proper encoding . This is a convenient method for primitive , plain data types . Value will not be converted to JSON . Note : Value * can be null . * @ param value value to be encoded * @ return String encoded value * @ throws UnsupportedEncodingException DOCUMENT _ ME */ public String encode ( Object value ) throws UnsupportedEncodingException { } }
if ( value == null ) { return null ; } return URLEncoder . encode ( value . toString ( ) , encoding ) ;
public class ClassGraphException { /** * Static factory method to stop IDEs from auto - completing ClassGraphException after " new ClassGraph " . * @ param message * the message * @ param cause * the cause * @ return the ClassGraphException * @ throws ClassGraphException * the class graph exception */ public static ClassGraphException newClassGraphException ( final String message , final Throwable cause ) throws ClassGraphException { } }
return new ClassGraphException ( message , cause ) ;
public class InstanceFactory { /** * 这是一个阻塞方法 , 直到context初始化完成 */ public synchronized static void waitUtilInitialized ( ) { } }
if ( initialized . get ( ) ) return ; while ( true ) { if ( initialized . get ( ) ) break ; try { Thread . sleep ( 1000 ) ; } catch ( Exception e ) { } long waiting = System . currentTimeMillis ( ) - timeStarting ; if ( waiting > 60 * 1000 ) throw new RuntimeException ( "Spring Initialize failture" ) ; System . out . println ( "Spring Initializing >>>>>" + waiting + " s" ) ; }
public class ActiveMqQueue { /** * Get the { @ link Session } dedicated for consuming messages . * @ return * @ throws JMSException */ protected Session getConsumerSession ( ) throws JMSException { } }
if ( consumerSession == null ) { synchronized ( this ) { if ( consumerSession == null ) { consumerSession = createSession ( Session . AUTO_ACKNOWLEDGE ) ; } } } return consumerSession ;
public class RESTService { /** * Register a set of application REST commands as belonging to the given storage * service owner . The commands are defined via a set of { @ link RESTCallback } objects , * which must use the { @ link Description } annotation to provide metadata about the * commands . * @ param callbackClasses Iterable collection of { @ link RESTCallback } classes * that define application - specific REST commands to be * registered . * @ param service { @ link StorageService } that owns the commands . */ public void registerCommands ( Iterable < Class < ? extends RESTCallback > > cmdClasses , StorageService service ) { } }
m_cmdRegistry . registerCallbacks ( service , cmdClasses ) ;
public class RIMBeanServerRegistrationUtility { /** * Checks whether an ObjectName is already registered . * @ throws javax . cache . CacheException - all exceptions are wrapped in * CacheException */ static < K , V > boolean isRegistered ( AbstractJCache < K , V > cache , ObjectNameType objectNameType ) { } }
Set < ObjectName > registeredObjectNames ; MBeanServer mBeanServer = cache . getMBeanServer ( ) ; if ( mBeanServer != null ) { ObjectName objectName = calculateObjectName ( cache , objectNameType ) ; registeredObjectNames = SecurityActions . queryNames ( objectName , null , mBeanServer ) ; return ! registeredObjectNames . isEmpty ( ) ; } else { return false ; }
public class NumberColumn { /** * { @ inheritDoc } */ @ Override protected void dumpData ( PrintWriter pw ) { } }
pw . println ( " [Data" ) ; for ( Object item : m_data ) { pw . println ( " " + item ) ; } pw . println ( " ]" ) ;
public class LabelingJobStoppingConditionsMarshaller { /** * Marshall the given parameter object . */ public void marshall ( LabelingJobStoppingConditions labelingJobStoppingConditions , ProtocolMarshaller protocolMarshaller ) { } }
if ( labelingJobStoppingConditions == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( labelingJobStoppingConditions . getMaxHumanLabeledObjectCount ( ) , MAXHUMANLABELEDOBJECTCOUNT_BINDING ) ; protocolMarshaller . marshall ( labelingJobStoppingConditions . getMaxPercentageOfInputDatasetLabeled ( ) , MAXPERCENTAGEOFINPUTDATASETLABELED_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class DiSH { /** * Builds the cluster hierarchy . * @ param clustering Clustering we process * @ param clusters the sorted list of clusters * @ param dimensionality the dimensionality of the data * @ param database the database containing the data objects */ private void buildHierarchy ( Relation < V > database , Clustering < SubspaceModel > clustering , List < Cluster < SubspaceModel > > clusters , int dimensionality ) { } }
StringBuilder msg = LOG . isDebugging ( ) ? new StringBuilder ( ) : null ; final int db_dim = RelationUtil . dimensionality ( database ) ; Hierarchy < Cluster < SubspaceModel > > hier = clustering . getClusterHierarchy ( ) ; for ( int i = 0 ; i < clusters . size ( ) - 1 ; i ++ ) { Cluster < SubspaceModel > c_i = clusters . get ( i ) ; final Subspace s_i = c_i . getModel ( ) . getSubspace ( ) ; int subspaceDim_i = dimensionality - s_i . dimensionality ( ) ; NumberVector ci_centroid = ProjectedCentroid . make ( s_i . getDimensions ( ) , database , c_i . getIDs ( ) ) ; long [ ] pv1 = s_i . getDimensions ( ) ; for ( int j = i + 1 ; j < clusters . size ( ) ; j ++ ) { Cluster < SubspaceModel > c_j = clusters . get ( j ) ; final Subspace s_j = c_j . getModel ( ) . getSubspace ( ) ; int subspaceDim_j = dimensionality - s_j . dimensionality ( ) ; if ( subspaceDim_i < subspaceDim_j ) { if ( msg != null ) { msg . append ( "\n l_i=" ) . append ( subspaceDim_i ) . append ( " pv_i=[" ) . append ( BitsUtil . toStringLow ( s_i . getDimensions ( ) , db_dim ) ) . append ( ']' ) . append ( "\n l_j=" ) . append ( subspaceDim_j ) . append ( " pv_j=[" ) . append ( BitsUtil . toStringLow ( s_j . getDimensions ( ) , db_dim ) ) . append ( ']' ) ; } // noise level reached if ( s_j . dimensionality ( ) == 0 ) { // no parents exists - > parent is noise if ( hier . numParents ( c_i ) == 0 ) { clustering . addChildCluster ( c_j , c_i ) ; if ( msg != null ) { msg . append ( "\n [" ) . append ( BitsUtil . toStringLow ( s_j . getDimensions ( ) , db_dim ) ) . append ( "] is parent of [" ) . append ( BitsUtil . toStringLow ( s_i . getDimensions ( ) , db_dim ) ) . append ( ']' ) ; } } } else { NumberVector cj_centroid = ProjectedCentroid . make ( c_j . getModel ( ) . getDimensions ( ) , database , c_j . getIDs ( ) ) ; long [ ] pv2 = s_j . getDimensions ( ) ; long [ ] commonPreferenceVector = BitsUtil . andCMin ( pv1 , pv2 ) ; int subspaceDim = subspaceDimensionality ( ci_centroid , cj_centroid , pv1 , pv2 , commonPreferenceVector ) ; double d = weightedDistance ( ci_centroid , cj_centroid , commonPreferenceVector ) ; if ( msg != null ) { msg . append ( "\n dist = " ) . append ( subspaceDim ) ; } if ( subspaceDim == subspaceDim_j ) { if ( msg != null ) { msg . append ( "\n d = " ) . append ( d ) ; } if ( d <= 2 * epsilon ) { // no parent exists or c _ j is not a parent of the already // existing parents if ( hier . numParents ( c_i ) == 0 || ! isParent ( database , c_j , hier . iterParents ( c_i ) , db_dim ) ) { clustering . addChildCluster ( c_j , c_i ) ; if ( msg != null ) { msg . append ( "\n [" ) . append ( BitsUtil . toStringLow ( s_j . getDimensions ( ) , db_dim ) ) . append ( "] is parent of [" ) . append ( BitsUtil . toStringLow ( s_i . getDimensions ( ) , db_dim ) ) . append ( ']' ) ; } } } else { throw new RuntimeException ( "Should never happen: d = " + d ) ; } } } } } } if ( msg != null ) { LOG . debug ( msg . toString ( ) ) ; }
public class DataVecSparkUtil { /** * This is a convenience method to combine data from separate files together ( intended to write to a sequence file , using * { @ link org . apache . spark . api . java . JavaPairRDD # saveAsNewAPIHadoopFile ( String , Class , Class , Class ) } ) < br > * A typical use case is to combine input and label data from different files , for later parsing by a RecordReader * or SequenceRecordReader . * A typical use case is as follows : < br > * Given two paths ( directories ) , combine the files in these two directories into pairs . < br > * Then , for each pair of files , convert the file contents into a { @ link BytesPairWritable } , which also contains * the original file paths of the files . < br > * The assumptions are as follows : < br > * - For every file in the first directory , there is an equivalent file in the second directory ( i . e . , same key ) < br > * - The pairing of files can be done based on the paths of the files ; paths are mapped to a key using a { @ link PathToKeyConverter } ; * keys are then matched to give pairs of files < br > * < br > < br > * < b > Example usage < / b > : to combine all files in directory { @ code dir1 } with equivalent files in { @ code dir2 } , by file name : * < pre > * < code > JavaSparkContext sc = . . . ; * String path1 = " / dir1 " ; * String path2 = " / dir2 " ; * PathToKeyConverter pathConverter = new PathToKeyConverterFilename ( ) ; * JavaPairRDD & lt ; Text , BytesPairWritable & gt ; toWrite = DataVecSparkUtil . combineFilesForSequenceFile ( sc , path1 , path2 , pathConverter , pathConverter ) ; * String outputPath = " / my / output / path " ; * toWrite . saveAsNewAPIHadoopFile ( outputPath , Text . class , BytesPairWritable . class , SequenceFileOutputFormat . class ) ; * < / code > * < / pre > * Result : the file contexts aggregated ( pairwise ) , written to a hadoop sequence file at / my / output / path * @ param sc Spark context * @ param path1 First directory ( passed to JavaSparkContext . binaryFiles ( path1 ) ) * @ param path2 Second directory ( passed to JavaSparkContext . binaryFiles ( path1 ) ) * @ param converter1 Converter , to convert file paths in first directory to a key ( to allow files to be matched / paired by key ) * @ param converter2 As above , for second directory * @ return */ public static JavaPairRDD < Text , BytesPairWritable > combineFilesForSequenceFile ( JavaSparkContext sc , String path1 , String path2 , PathToKeyConverter converter1 , PathToKeyConverter converter2 ) { } }
JavaPairRDD < String , PortableDataStream > first = sc . binaryFiles ( path1 ) ; JavaPairRDD < String , PortableDataStream > second = sc . binaryFiles ( path2 ) ; // Now : process keys ( paths ) so that they can be merged JavaPairRDD < String , Tuple3 < String , Integer , PortableDataStream > > first2 = first . mapToPair ( new PathToKeyFunction ( 0 , converter1 ) ) ; JavaPairRDD < String , Tuple3 < String , Integer , PortableDataStream > > second2 = second . mapToPair ( new PathToKeyFunction ( 1 , converter2 ) ) ; JavaPairRDD < String , Tuple3 < String , Integer , PortableDataStream > > merged = first2 . union ( second2 ) ; // Combine into pairs , and prepare for writing JavaPairRDD < Text , BytesPairWritable > toWrite = merged . groupByKey ( ) . mapToPair ( new MapToBytesPairWritableFunction ( ) ) ; return toWrite ;
public class ConversionManager { /** * Convert a String to a Type using registered converters for the Type * @ param < T > * @ param rawString * @ param type * @ return */ public Object convert ( String rawString , Type type ) { } }
Object value = convert ( rawString , type , null ) ; return value ;
public class ScriptActionsInner { /** * Gets the script execution detail for the given script execution ID . * @ param resourceGroupName The name of the resource group . * @ param clusterName The name of the cluster . * @ param scriptExecutionId The script execution Id * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < RuntimeScriptActionDetailInner > getExecutionDetailAsync ( String resourceGroupName , String clusterName , String scriptExecutionId , final ServiceCallback < RuntimeScriptActionDetailInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( getExecutionDetailWithServiceResponseAsync ( resourceGroupName , clusterName , scriptExecutionId ) , serviceCallback ) ;
public class DefaultErrorWebExceptionHandler { /** * Determine if the stacktrace attribute should be included . * @ param request the source request * @ param produces the media type produced ( or { @ code MediaType . ALL } ) * @ return if the stacktrace attribute should be included */ protected boolean isIncludeStackTrace ( ServerRequest request , MediaType produces ) { } }
ErrorProperties . IncludeStacktrace include = this . errorProperties . getIncludeStacktrace ( ) ; if ( include == ErrorProperties . IncludeStacktrace . ALWAYS ) { return true ; } if ( include == ErrorProperties . IncludeStacktrace . ON_TRACE_PARAM ) { return isTraceEnabled ( request ) ; } return false ;
public class AmazonNeptuneClient { /** * Creates a new DB cluster parameter group . * Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster . * A DB cluster parameter group is initially created with the default parameters for the database engine used by * instances in the DB cluster . To provide custom values for any of the parameters , you must modify the group after * creating it using < a > ModifyDBClusterParameterGroup < / a > . Once you ' ve created a DB cluster parameter group , you * need to associate it with your DB cluster using < a > ModifyDBCluster < / a > . When you associate a new DB cluster * parameter group with a running DB cluster , you need to reboot the DB instances in the DB cluster without failover * for the new DB cluster parameter group and associated settings to take effect . * < important > * After you create a DB cluster parameter group , you should wait at least 5 minutes before creating your first DB * cluster that uses that DB cluster parameter group as the default parameter group . This allows Amazon Neptune to * fully complete the create action before the DB cluster parameter group is used as the default for a new DB * cluster . This is especially important for parameters that are critical when creating the default database for a * DB cluster , such as the character set for the default database defined by the < code > character _ set _ database < / code > * parameter . You can use the < i > Parameter Groups < / i > option of the < a * href = " https : / / console . aws . amazon . com / rds / " > Amazon Neptune console < / a > or the < a > DescribeDBClusterParameters < / a > * command to verify that your DB cluster parameter group has been created or modified . * < / important > * @ param createDBClusterParameterGroupRequest * @ return Result of the CreateDBClusterParameterGroup operation returned by the service . * @ throws DBParameterGroupQuotaExceededException * Request would result in user exceeding the allowed number of DB parameter groups . * @ throws DBParameterGroupAlreadyExistsException * A DB parameter group with the same name exists . * @ sample AmazonNeptune . CreateDBClusterParameterGroup * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / neptune - 2014-10-31 / CreateDBClusterParameterGroup " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DBClusterParameterGroup createDBClusterParameterGroup ( CreateDBClusterParameterGroupRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeCreateDBClusterParameterGroup ( request ) ;
public class AWSSdkClient { /** * Create a launch configuration that can be later used to create { @ link AmazonAutoScaling } groups * @ param launchConfigName Desired launch config name * @ param imageId AMI image id to use * @ param instanceType EC2 instance type to use * @ param keyName Key name * @ param securityGroups Security groups to apply * @ param kernelId Optional kernel id * @ param ramdiskId Optional ram disk id * @ param blockDeviceMapping Optional EBS device mapping * @ param iamInstanceProfile Optional IAM instance profile * @ param instanceMonitoring Optional instance monitoring * @ param userData User data ( eg . shell script to execute at instance boot under this launch config ) */ public void createLaunchConfig ( String launchConfigName , String imageId , String instanceType , String keyName , String securityGroups , Optional < String > kernelId , Optional < String > ramdiskId , Optional < BlockDeviceMapping > blockDeviceMapping , Optional < String > iamInstanceProfile , Optional < InstanceMonitoring > instanceMonitoring , String userData ) { } }
final AmazonAutoScaling autoScaling = getAmazonAutoScalingClient ( ) ; CreateLaunchConfigurationRequest createLaunchConfigurationRequest = new CreateLaunchConfigurationRequest ( ) . withLaunchConfigurationName ( launchConfigName ) . withImageId ( imageId ) . withInstanceType ( instanceType ) . withSecurityGroups ( SPLITTER . splitToList ( securityGroups ) ) . withKeyName ( keyName ) . withUserData ( userData ) ; if ( kernelId . isPresent ( ) ) { createLaunchConfigurationRequest = createLaunchConfigurationRequest . withKernelId ( kernelId . get ( ) ) ; } if ( ramdiskId . isPresent ( ) ) { createLaunchConfigurationRequest = createLaunchConfigurationRequest . withRamdiskId ( ramdiskId . get ( ) ) ; } if ( blockDeviceMapping . isPresent ( ) ) { createLaunchConfigurationRequest = createLaunchConfigurationRequest . withBlockDeviceMappings ( blockDeviceMapping . get ( ) ) ; } if ( iamInstanceProfile . isPresent ( ) ) { createLaunchConfigurationRequest = createLaunchConfigurationRequest . withIamInstanceProfile ( iamInstanceProfile . get ( ) ) ; } if ( instanceMonitoring . isPresent ( ) ) { createLaunchConfigurationRequest = createLaunchConfigurationRequest . withInstanceMonitoring ( instanceMonitoring . get ( ) ) ; } autoScaling . createLaunchConfiguration ( createLaunchConfigurationRequest ) ; LOGGER . info ( "Created Launch Configuration: " + launchConfigName ) ;
public class UntagResourceRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( UntagResourceRequest untagResourceRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( untagResourceRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( untagResourceRequest . getResourceArn ( ) , RESOURCEARN_BINDING ) ; protocolMarshaller . marshall ( untagResourceRequest . getTagKeys ( ) , TAGKEYS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class FixedPrioritiesPriorityQueue { public static void main ( String [ ] args ) { } }
FixedPrioritiesPriorityQueue < String > pq = new FixedPrioritiesPriorityQueue < String > ( ) ; System . out . println ( pq ) ; pq . add ( "one" , 1 ) ; System . out . println ( pq ) ; pq . add ( "three" , 3 ) ; System . out . println ( pq ) ; pq . add ( "one" , 1.1 ) ; System . out . println ( pq ) ; pq . add ( "two" , 2 ) ; System . out . println ( pq ) ; System . out . println ( pq . toString ( 2 ) ) ; while ( pq . hasNext ( ) ) { System . out . println ( pq . next ( ) ) ; }
public class CreateIssueParams { /** * Sets the issue estimate hours . * @ param estimatedHours the issue estimate hours * @ return CreateIssueParams instance */ public CreateIssueParams estimatedHours ( BigDecimal estimatedHours ) { } }
if ( estimatedHours == null ) { parameters . add ( new NameValuePair ( "estimatedHours" , "" ) ) ; } else { parameters . add ( new NameValuePair ( "estimatedHours" , estimatedHours . setScale ( 2 , BigDecimal . ROUND_HALF_UP ) . toPlainString ( ) ) ) ; } return this ;
public class JdbcEndpointAdapterController { /** * Executes the given update * @ param updateSql The update statement to be executed * @ throws JdbcServerException In case that the execution was not successful */ @ Override public int executeUpdate ( String updateSql ) throws JdbcServerException { } }
log . info ( "Received execute update request: " + updateSql ) ; Message response = handleMessageAndCheckResponse ( JdbcMessage . execute ( updateSql ) ) ; return Optional . ofNullable ( response . getHeader ( JdbcMessageHeaders . JDBC_ROWS_UPDATED ) ) . map ( Object :: toString ) . map ( Integer :: valueOf ) . orElse ( 0 ) ;
public class JavaMailSender { /** * Initialize the session and create the mime message . * @ return the mime message */ private MimeMessage createMimeMessage ( ) { } }
// prepare the message Session session = Session . getInstance ( properties , authenticator ) ; return new MimeMessage ( session ) ;
public class CmsModelPageHelper { /** * Returns the local model group pages . < p > * @ return the model group pages */ public List < CmsModelPageEntry > getModelGroups ( ) { } }
List < CmsModelPageEntry > result = new ArrayList < CmsModelPageEntry > ( ) ; CmsResourceTypeConfig config = m_adeConfig . getResourceType ( CmsResourceTypeXmlContainerPage . MODEL_GROUP_TYPE_NAME ) ; if ( ( config != null ) && ! config . isDisabled ( ) ) { String modelGroupFolderPath = config . getFolderPath ( m_cms , m_adeConfig . getBasePath ( ) ) ; if ( m_cms . existsResource ( modelGroupFolderPath ) ) { try { Locale wpLocale = OpenCms . getWorkplaceManager ( ) . getWorkplaceLocale ( m_cms ) ; List < CmsResource > modelResources = m_cms . readResources ( modelGroupFolderPath , CmsResourceFilter . ONLY_VISIBLE_NO_DELETED . addRequireType ( OpenCms . getResourceManager ( ) . getResourceType ( CmsResourceTypeXmlContainerPage . MODEL_GROUP_TYPE_NAME ) ) , false ) ; for ( CmsResource model : modelResources ) { CmsModelPageEntry entry = createModelPageEntry ( model , false , false , wpLocale ) ; if ( entry != null ) { result . add ( entry ) ; } } } catch ( CmsException e ) { LOG . error ( e . getLocalizedMessage ( ) , e ) ; } } } return result ;
public class LocalExtensionStorage { /** * Get file path in the local extension repository . * @ param id the extension id * @ param fileExtension the file extension * @ return the encoded file path */ private String getFilePath ( ExtensionId id , String fileExtension ) { } }
String encodedId = PathUtils . encode ( id . getId ( ) ) ; String encodedVersion = PathUtils . encode ( id . getVersion ( ) . toString ( ) ) ; String encodedType = PathUtils . encode ( fileExtension ) ; return encodedId + File . separator + encodedVersion + File . separator + encodedId + '-' + encodedVersion + '.' + encodedType ;
public class BinaryEllipseDetectorPixel { /** * In a binary image the contour on the right and bottom is off by one pixel . This is because the block region * extends the entire pixel not just the lower extent which is where it is indexed from . */ protected void adjustElipseForBinaryBias ( EllipseRotated_F64 ellipse ) { } }
ellipse . center . x += 0.5 ; ellipse . center . y += 0.5 ; ellipse . a += 0.5 ; ellipse . b += 0.5 ;
public class AbstractMatcher { /** * Obtain all the matching resources with the URIs of { @ code origin } within the range of MatchTypes provided , both inclusive . * @ param origins URIs to match * @ param minType the minimum MatchType we want to obtain * @ param maxType the maximum MatchType we want to obtain * @ return a { @ link com . google . common . collect . Table } with the result of the matching indexed by origin URI and then destination URI . */ @ Override public Table < URI , URI , MatchResult > listMatchesWithinRange ( Set < URI > origins , MatchType minType , MatchType maxType ) { } }
ImmutableTable . Builder < URI , URI , MatchResult > builder = ImmutableTable . builder ( ) ; Map < URI , MatchResult > matches ; for ( URI origin : origins ) { matches = this . listMatchesWithinRange ( origin , minType , maxType ) ; for ( Map . Entry < URI , MatchResult > match : matches . entrySet ( ) ) { builder . put ( origin , match . getKey ( ) , match . getValue ( ) ) ; } } return builder . build ( ) ;
public class RunResults { /** * returns null if not found */ public RootMethodRunResult getRunResultByRootMethod ( TestMethod rootMethod ) { } }
if ( rootMethod == null ) { throw new NullPointerException ( ) ; } return getRunResultByRootMethodKey ( rootMethod . getKey ( ) ) ;
public class FileHelper { /** * Delete a file ignoring failures . * @ param file file to delete */ public static final void deleteQuietly ( File file ) { } }
if ( file != null ) { if ( file . isDirectory ( ) ) { File [ ] children = file . listFiles ( ) ; if ( children != null ) { for ( File child : children ) { deleteQuietly ( child ) ; } } } file . delete ( ) ; }
public class FileDescriptor { /** * Open a new { @ link FileDescriptor } for the given path . */ public static FileDescriptor from ( String path ) throws IOException { } }
checkNotNull ( path , "path" ) ; int res = open ( path ) ; if ( res < 0 ) { throw newIOException ( "open" , res ) ; } return new FileDescriptor ( res ) ;
public class Dialog { /** * Set the background drawable of all action buttons . * @ param drawable The background drawable . * @ return The Dialog for chaining methods . */ public Dialog actionBackground ( Drawable drawable ) { } }
positiveActionBackground ( drawable ) ; negativeActionBackground ( drawable ) ; neutralActionBackground ( drawable ) ; return this ;
public class UrlPatternAnalyzer { protected UrlPatternChosenBox adjustUrlPatternMethodPrefix ( Method executeMethod , String sourceUrlPattern , String methodName , boolean specified ) { } }
final String keywordMark = getMethodKeywordMark ( ) ; if ( methodName . equals ( "index" ) ) { // e . g . index ( pageNumber ) , urlPattern = " { } " if ( sourceUrlPattern . contains ( keywordMark ) ) { throwUrlPatternMethodKeywordMarkButIndexMethodException ( executeMethod , sourceUrlPattern , keywordMark ) ; } return new UrlPatternChosenBox ( sourceUrlPattern , sourceUrlPattern , specified ) ; } else { // e . g . sea ( pageNumber ) , urlPattern = " { } " if ( sourceUrlPattern . contains ( keywordMark ) ) { // e . g . @ word / { } / @ word final List < String > keywordList = splitMethodKeywordList ( methodName ) ; if ( keywordList . size ( ) != Srl . count ( sourceUrlPattern , keywordMark ) ) { // e . g . sea ( ) but @ word / { } / @ word throwUrlPatternMethodKeywordMarkUnmatchedCountException ( executeMethod , sourceUrlPattern , keywordMark ) ; } final String resolved = keywordList . stream ( ) . reduce ( sourceUrlPattern , ( first , second ) -> { return Srl . substringFirstFront ( first , keywordMark ) + second + Srl . substringFirstRear ( first , keywordMark ) ; } ) ; // e . g . sea / land return new UrlPatternChosenBox ( resolved , sourceUrlPattern , specified ) ; } else { return new UrlPatternChosenBox ( methodName + "/" + sourceUrlPattern , sourceUrlPattern , specified ) . withMethodPrefix ( ) ; } }
public class NFA { /** * Constructs a dfa from using first nfa state as starting state . * @ param scope * @ return */ public DFA < T > constructDFA ( Scope < DFAState < T > > scope ) { } }
return new DFA < > ( first . constructDFA ( scope ) , scope . count ( ) ) ;
public class CacheToMapAdapter { /** * Factory method used to construct a new instance of { @ link CacheToMapAdapter } initialized with * the given { @ link Cache } used to back the { @ link Map } . * @ param < KEY > { @ link Class type } of keys used by the { @ link Map } . * @ param < VALUE > { @ link Class type } of the values stored by the { @ link Map } . * @ param cache { @ link Cache } to adapt as a { @ link Map } , backing the { @ link Map } instance . * @ return a new { @ link CacheToMapAdapter } initialized with the given { @ link Cache } . * @ throws IllegalArgumentException if { @ link Cache } is { @ literal null } . * @ see org . cp . elements . data . caching . Cache * @ see # CacheToMapAdapter ( Cache ) */ public static < KEY extends Comparable < KEY > , VALUE > CacheToMapAdapter < KEY , VALUE > of ( Cache < KEY , VALUE > cache ) { } }
return new CacheToMapAdapter < > ( cache ) ;
public class MetadataApiService { /** * POST / metadata / { projectName } / tokens * < p > Adds a { @ link Token } to the specified { @ code projectName } . */ @ Post ( "/metadata/{projectName}/tokens" ) public CompletableFuture < Revision > addToken ( @ Param ( "projectName" ) String projectName , IdentifierWithRole request , Author author ) { } }
final ProjectRole role = toProjectRole ( request . role ( ) ) ; return mds . findTokenByAppId ( request . id ( ) ) . thenCompose ( token -> mds . addToken ( author , projectName , token . appId ( ) , role ) ) ;
public class CSSMinifierMojo { /** * Cleans the output file if any . * @ param file the file * @ return { @ literal false } if the pipeline processing must be interrupted for this event . Most watchers should * return { @ literal true } to let other watchers be notified . * @ throws org . wisdom . maven . WatchingException if the watcher failed to process the given file . */ @ Override public boolean fileDeleted ( File file ) throws WatchingException { } }
if ( isNotMinified ( file ) ) { File minified = getMinifiedFile ( file ) ; FileUtils . deleteQuietly ( minified ) ; File map = new File ( minified . getParentFile ( ) , minified . getName ( ) + ".map" ) ; FileUtils . deleteQuietly ( map ) ; } return true ;
public class LambdaToMethod { /** * Translate qualified ` this ' references within a lambda to the mapped identifier * @ param tree */ @ Override public void visitSelect ( JCFieldAccess tree ) { } }
if ( context == null || ! analyzer . lambdaFieldAccessFilter ( tree ) ) { super . visitSelect ( tree ) ; } else { int prevPos = make . pos ; try { make . at ( tree ) ; LambdaTranslationContext lambdaContext = ( LambdaTranslationContext ) context ; JCTree ltree = lambdaContext . translate ( tree ) ; if ( ltree != null ) { result = ltree ; } else { super . visitSelect ( tree ) ; } } finally { make . at ( prevPos ) ; } }
public class SibRaManagedConnection { /** * Returns a connection handle for this managed connection . The resource * adapter does not support re - authentication so * < code > matchManagedConnection < / code > should already have checked that * the < code > Subject < / code > and request information are suitable for this * managed connection . The connection handle is given a clone of the core * SPI connection associated with this managed connection as is the request * information . * @ param containerSubject * the container subject * @ param requestInfo * the connection request information * @ return the connection handle * @ throws ResourceException * if the clone of the core SPI connection fails */ public Object getConnection ( final Subject containerSubject , final ConnectionRequestInfo requestInfo ) throws ResourceException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && TRACE . isEntryEnabled ( ) ) { SibTr . entry ( this , TRACE , "getConnection" , new Object [ ] { SibRaUtils . subjectToString ( containerSubject ) , requestInfo } ) ; } SibRaConnection connection = null ; if ( requestInfo instanceof SibRaConnectionRequestInfo ) { final SibRaConnectionRequestInfo sibRaRequestInfo = ( SibRaConnectionRequestInfo ) requestInfo ; SICoreConnection coreConnection = null ; try { _connectionException = null ; coreConnection = _coreConnection . cloneConnection ( ) ; } catch ( final SIConnectionUnavailableException exception ) { // No FFDC Code Needed // We will catch SIConnectionUnavailableException and SIConnectionDroppedException here connectionErrorOccurred ( exception , false ) ; _connectionException = exception ; _validConnection = false ; // PK60857 } catch ( SIException exception ) { FFDCFilter . processException ( exception , "com.ibm.ws.sib.ra.impl.SibRaManagedConnection.getConnection" , FFDC_PROBE_1 , this ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && TRACE . isEventEnabled ( ) ) { SibTr . exception ( this , TRACE , exception ) ; } connectionErrorOccurred ( exception , false ) ; _connectionException = exception ; _validConnection = false ; } catch ( SIErrorException exception ) { FFDCFilter . processException ( exception , "com.ibm.ws.sib.ra.impl.SibRaManagedConnection.getConnection" , FFDC_PROBE_7 , this ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && TRACE . isEventEnabled ( ) ) { SibTr . exception ( this , TRACE , exception ) ; } connectionErrorOccurred ( exception , false ) ; _connectionException = exception ; _validConnection = false ; } if ( coreConnection != null ) { sibRaRequestInfo . setCoreConnection ( coreConnection ) ; connection = new SibRaConnection ( this , sibRaRequestInfo , coreConnection ) ; _connections . add ( connection ) ; } else { connection = new SibRaConnection ( this , sibRaRequestInfo , coreConnection ) ; } } else { ResourceAdapterInternalException exception = new ResourceAdapterInternalException ( NLS . getFormattedMessage ( "UNRECOGNISED_REQUEST_INFO_CWSIV0401" , new Object [ ] { requestInfo , SibRaConnectionRequestInfo . class } , null ) ) ; if ( TRACE . isEventEnabled ( ) ) { SibTr . exception ( this , TRACE , exception ) ; } throw exception ; } if ( TraceComponent . isAnyTracingEnabled ( ) && TRACE . isEntryEnabled ( ) ) { SibTr . exit ( this , TRACE , "getConnection" , connection ) ; } return connection ;
public class VirtualListViewControlDirContextProcessor { /** * @ see org . springframework . ldap . control . AbstractRequestControlDirContextProcessor # createRequestControl ( ) */ public Control createRequestControl ( ) { } }
Control control ; if ( offsetPercentage ) { control = super . createRequestControl ( new Class [ ] { int . class , int . class , boolean . class } , new Object [ ] { Integer . valueOf ( targetOffset ) , Integer . valueOf ( pageSize ) , Boolean . valueOf ( CRITICAL_CONTROL ) } ) ; } else { control = super . createRequestControl ( new Class [ ] { int . class , int . class , int . class , int . class , boolean . class } , new Object [ ] { Integer . valueOf ( targetOffset ) , Integer . valueOf ( listSize ) , Integer . valueOf ( 0 ) , Integer . valueOf ( pageSize - 1 ) , Boolean . valueOf ( CRITICAL_CONTROL ) } ) ; } if ( cookie != null ) { invokeMethod ( "setContextID" , requestControlClass , control , new Class [ ] { byte [ ] . class } , new Object [ ] { cookie . getCookie ( ) } ) ; } return control ;
public class Evaluator { /** * Attribute . * @ param _ select the select * @ return the attribute * @ throws EFapsException the e faps exception */ protected Attribute attribute ( final Select _select ) throws EFapsException { } }
Attribute ret = null ; final AbstractElement < ? > element = _select . getElements ( ) . get ( _select . getElements ( ) . size ( ) - 1 ) ; if ( element instanceof AttributeElement ) { ret = ( ( AttributeElement ) element ) . getAttribute ( ) ; } return ret ;
public class Chunk { /** * Fetch the missing - status the slow way . */ public final boolean isNA ( long i ) { } }
long x = i - ( _start > 0 ? _start : 0 ) ; if ( 0 <= x && x < _len ) return isNA0 ( ( int ) x ) ; throw new ArrayIndexOutOfBoundsException ( getClass ( ) . getSimpleName ( ) + " " + _start + " <= " + i + " < " + ( _start + _len ) ) ;
public class ResourceAdapterModuleMBeanImpl { /** * ( non - Javadoc ) * @ see com . ibm . websphere . jca . mbean . ResourceAdapterModuleMBean # getresourceAdapters ( ) */ @ Override public String [ ] getresourceAdapters ( ) { } }
final String methodName = "getresourceAdapters" ; final boolean trace = TraceComponent . isAnyTracingEnabled ( ) ; if ( trace && tc . isEntryEnabled ( ) ) Tr . entry ( tc , methodName , this ) ; final Collection < ResourceAdapterMBeanImpl > c = raMBeanChildrenList . values ( ) ; final int size = c . size ( ) ; final String [ ] result = new String [ size ] ; int index = 0 ; for ( ResourceAdapterMBeanImpl mbeanItem : c ) result [ index ++ ] = mbeanItem . getobjectName ( ) ; if ( trace && tc . isEntryEnabled ( ) ) Tr . exit ( tc , methodName , this ) ; return result ;
public class Sequence { /** * Overridden so we can create appropriate sized buffer before making * string . * @ see uk . ac . ebi . embl . api . entry . sequence . AbstractSequence # getSequence ( java . lang . Long , * java . lang . Long ) */ @ Deprecated @ Override public String getSequence ( Long beginPosition , Long endPosition ) { } }
if ( beginPosition == null || endPosition == null || ( beginPosition > endPosition ) || beginPosition < 1 || endPosition > getLength ( ) ) { return null ; } int length = ( int ) ( endPosition . longValue ( ) - beginPosition . longValue ( ) ) + 1 ; int offset = beginPosition . intValue ( ) - 1 ; String subSequence = null ; try { subSequence = ByteBufferUtils . string ( getSequenceBuffer ( ) , offset , length ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; return subSequence ; } return subSequence ; /* / / note begin : 1 end : 4 has length 4 byte [ ] subsequence = new byte [ length ] ; synchronized ( sequence ) { sequence . position ( offset ) ; sequence . get ( subsequence , 0 , length ) ; String string = new String ( subsequence ) ; return string ; */
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link Object } { @ code > } } */ @ XmlElementDecl ( namespace = "http://www.citygml.org/ade/sub/0.9.0" , name = "_GenericApplicationPropertyOfClosureSurface" ) public JAXBElement < Object > create_GenericApplicationPropertyOfClosureSurface ( Object value ) { } }
return new JAXBElement < Object > ( __GenericApplicationPropertyOfClosureSurface_QNAME , Object . class , null , value ) ;
public class DbxClientV2 { /** * Returns a new { @ link DbxClientV2 } that performs requests against Dropbox API * user endpoints relative to a namespace without including the namespace as * part of the path variable for every request . * ( < a href = " https : / / www . dropbox . com / developers / reference / namespace - guide # pathrootmodes " > https : / / www . dropbox . com / developers / reference / namespace - guide # pathrootmodes < / a > ) . * < p > This method performs no validation of the namespace ID . < / p > * @ param pathRoot the path root for this client , never { @ code null } . * @ return Dropbox client that issues requests with Dropbox - API - Path - Root header . * @ throws IllegalArgumentException If { @ code pathRoot } is { @ code null } */ public DbxClientV2 withPathRoot ( PathRoot pathRoot ) { } }
if ( pathRoot == null ) { throw new IllegalArgumentException ( "'pathRoot' should not be null" ) ; } return new DbxClientV2 ( _client . withPathRoot ( pathRoot ) ) ;
public class ClassApi { /** * Creates new instance of class , rethrowing all exceptions in runtime . * @ param clazz type * @ param < T > class generic * @ return instance object */ public static < T > T newInstance ( final Class < T > clazz ) { } }
try { return clazz . newInstance ( ) ; } catch ( InstantiationException | IllegalAccessException e ) { throw new ClassApiException ( e ) ; }
public class FixedSizeBitSet { /** * Sets the bit specified by the index to { @ code false } . * @ param index the index of the bit to be cleared . * @ throws IndexOutOfBoundsException if the specified index is negative . */ public void clear ( int index ) { } }
if ( index < 0 || index >= size ) throw new IndexOutOfBoundsException ( "index: " + index ) ; int wordIndex = wordIndex ( index ) ; words [ wordIndex ] &= ~ ( 1L << index ) ;
public class AfplibPackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getMCF1RG ( ) { } }
if ( mcf1RGEClass == null ) { mcf1RGEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( AfplibPackage . eNS_URI ) . getEClassifiers ( ) . get ( 427 ) ; } return mcf1RGEClass ;
public class AlignedBox3d { /** * Set the y bounds of the box . * @ param min the min value for the y axis . * @ param max the max value for the y axis . */ @ Override public void setY ( double min , double max ) { } }
if ( min <= max ) { this . minyProperty . set ( min ) ; this . maxyProperty . set ( max ) ; } else { this . minyProperty . set ( max ) ; this . maxyProperty . set ( min ) ; }
public class SchemaConfiguration { /** * Return schema manager for pu . * @ param persistenceUnit * @ return */ private SchemaManager getSchemaManagerForPu ( final String persistenceUnit ) { } }
SchemaManager schemaManager = null ; Map < String , Object > externalProperties = KunderaCoreUtils . getExternalProperties ( persistenceUnit , externalPropertyMap , persistenceUnits ) ; if ( getSchemaProperty ( persistenceUnit , externalProperties ) != null && ! getSchemaProperty ( persistenceUnit , externalProperties ) . isEmpty ( ) ) { ClientFactory clientFactory = ClientResolver . getClientFactory ( persistenceUnit ) ; schemaManager = clientFactory != null ? clientFactory . getSchemaManager ( externalProperties ) : null ; } return schemaManager ;
public class OperationsApi { /** * Post users . * postUsers * @ return ApiResponse & lt ; PostUsers & gt ; * @ throws ApiException If fail to call the API , e . g . server error or cannot deserialize the response body */ public ApiResponse < PostUsers > postUsersWithHttpInfo ( ) throws ApiException { } }
com . squareup . okhttp . Call call = postUsersValidateBeforeCall ( null , null ) ; Type localVarReturnType = new TypeToken < PostUsers > ( ) { } . getType ( ) ; return apiClient . execute ( call , localVarReturnType ) ;
public class TxUtils { /** * Returns the write pointer for the first " short " transaction that in the in - progress set , or * { @ link Transaction # NO _ TX _ IN _ PROGRESS } if none . */ public static long getFirstShortInProgress ( Map < Long , TransactionManager . InProgressTx > inProgress ) { } }
long firstShort = Transaction . NO_TX_IN_PROGRESS ; for ( Map . Entry < Long , TransactionManager . InProgressTx > entry : inProgress . entrySet ( ) ) { if ( ! entry . getValue ( ) . isLongRunning ( ) ) { firstShort = entry . getKey ( ) ; break ; } } return firstShort ;
public class Configs { /** * Get self config decimal . Config key include prefix . * Example : < br > * If key . getKeyString ( ) is " test " , < br > * getSelfConfigDecimal ( " 1 . " , key ) ; * will return " 1 . test " config value in { @ linkplain IConfigKeyWithPath # getConfigPath ( ) path } set in key . * @ param keyPrefix config key prefix * @ param key config key with configAbsoluteClassPath in config file * @ return config BigDecimal value . Return null if not add config file or not config in config file . * @ see # addSelfConfigs ( String , OneProperties ) */ public static BigDecimal getHavePathSelfConfigDecimal ( String keyPrefix , IConfigKeyWithPath key ) { } }
String configAbsoluteClassPath = key . getConfigPath ( ) ; return getSelfConfigDecimal ( configAbsoluteClassPath , keyPrefix , key ) ;
public class FPSCounter { /** * Computes execution time * @ param extra */ public static void timeCheck ( String extra ) { } }
if ( startCheckTime > 0 ) { long now = System . currentTimeMillis ( ) ; long diff = now - nextCheckTime ; nextCheckTime = now ; Log . d ( Log . SUBSYSTEM . TRACING , "FPSCounter" , "[%d, %d] timeCheck: %s" , now , diff , extra ) ; }
public class PriceGraduation { /** * Create a simple price graduation that contains one item with the minimum * quantity of 1. * @ param aPrice * The price to use . May not be < code > null < / code > . * @ return Never < code > null < / code > . */ @ Nonnull public static IMutablePriceGraduation createSimple ( @ Nonnull final IMutablePrice aPrice ) { } }
final PriceGraduation ret = new PriceGraduation ( aPrice . getCurrency ( ) ) ; ret . addItem ( new PriceGraduationItem ( 1 , aPrice . getNetAmount ( ) . getValue ( ) ) ) ; return ret ;
public class MongoDBQueryUtils { /** * Auxiliary method to check if the operator of each of the values in the queryParamList matches the operator passed . * @ param type QueryParam type . * @ param queryParamList List of values . * @ param operator Operator to be checked . * @ return boolean indicating whether the list of values have always the same operator or not . */ private static boolean queryParamsOperatorAlwaysMatchesOperator ( QueryParam . Type type , List < String > queryParamList , ComparisonOperator operator ) { } }
for ( String queryItem : queryParamList ) { Matcher matcher = getPattern ( type ) . matcher ( queryItem ) ; String op = "" ; if ( matcher . find ( ) ) { op = matcher . group ( 1 ) ; } if ( operator != getComparisonOperator ( op , type ) ) { return false ; } } return true ;
public class Chronology { /** * Obtains a local date in this chronology from the era , year - of - era , * month - of - year and day - of - month fields . * @ param era the era of the correct type for the chronology , not null * @ param yearOfEra the chronology year - of - era * @ param month the chronology month - of - year * @ param dayOfMonth the chronology day - of - month * @ return the local date in this chronology , not null * @ throws DateTimeException if unable to create the date * @ throws ClassCastException if the { @ code era } is not of the correct type for the chronology */ public ChronoLocalDate date ( Era era , int yearOfEra , int month , int dayOfMonth ) { } }
return date ( prolepticYear ( era , yearOfEra ) , month , dayOfMonth ) ;
public class Apptentive { /** * This method takes a unique event string , stores a record of that event having been visited , * determines if there is an interaction that is able to run for this event , and then runs it . If * more than one interaction can run , then the most appropriate interaction takes precedence . Only * one interaction at most will run per invocation of this method . This task is performed * asynchronously . If you would like to know whether this method will launch an Apptentive * Interaction , use { @ link Apptentive # engage ( Context , String , BooleanCallback , Map ) } . * @ param context The context from which to launch the Interaction . This should be an Activity , * except in rare cases where you don ' t have access to one , in which case * Apptentive Interactions will launch in a new task . * @ param event A unique String representing the line this method is called on . For instance , * you may want to have the ability to target interactions to run after the user * uploads a file in your app . You may then call * < strong > < code > engage ( context , " finished _ upload " ) ; < / code > < / strong > * @ param customData A Map of String keys to Object values . Objects may be Strings , Numbers , or * Booleans . This data is sent to the server for tracking information in the * context of the engaged Event . * @ return true if the an interaction was shown , else false . */ public static synchronized void engage ( Context context , String event , Map < String , Object > customData ) { } }
engage ( context , event , null , customData , ( ExtendedData [ ] ) null ) ;
public class Pipeline { /** * Annotates a document with the given annotation types . * @ param textDocument the document to be the annotate * @ param annotationTypes the annotation types to be annotated */ public static void process ( @ NonNull Document textDocument , AnnotatableType ... annotationTypes ) { } }
if ( annotationTypes == null || annotationTypes . length == 0 ) { return ; } for ( AnnotatableType annotationType : annotationTypes ) { if ( annotationType == null ) { continue ; } if ( textDocument . getAnnotationSet ( ) . isCompleted ( annotationType ) ) { continue ; } if ( log . isLoggable ( Level . FINEST ) ) { log . finest ( "Annotating for {0}" , annotationType ) ; } Annotator annotator = AnnotatorCache . getInstance ( ) . get ( annotationType , textDocument . getLanguage ( ) ) ; if ( annotator == null ) { throw new IllegalStateException ( "Could not get annotator for " + annotationType ) ; } if ( ! annotator . satisfies ( ) . contains ( annotationType ) ) { throw new IllegalStateException ( annotator . getClass ( ) . getName ( ) + " does not satisfy " + annotationType ) ; } // Get the requirements out of the way for ( AnnotatableType prereq : annotator . requires ( ) ) { process ( textDocument , prereq ) ; } annotator . annotate ( textDocument ) ; for ( AnnotatableType type : annotator . satisfies ( ) ) { textDocument . getAnnotationSet ( ) . setIsCompleted ( type , true , annotator . getClass ( ) . getName ( ) + "::" + annotator . getVersion ( ) ) ; } }
public class IOUtils { /** * Create a folder , If the folder exists is not created . * @ param folderPath folder path . * @ return True : success , or false : failure . */ public static boolean createFolder ( String folderPath ) { } }
if ( ! StringUtils . isEmpty ( folderPath ) ) { File folder = new File ( folderPath ) ; return createFolder ( folder ) ; } return false ;
public class BatchArtifactsImpl { /** * If not already created , a new < code > ref < / code > element will be created and returned . * Otherwise , the first existing < code > ref < / code > element will be returned . * @ return the instance defined for the element < code > ref < / code > */ public BatchArtifactRef < BatchArtifacts < T > > getOrCreateRef ( ) { } }
List < Node > nodeList = childNode . get ( "ref" ) ; if ( nodeList != null && nodeList . size ( ) > 0 ) { return new BatchArtifactRefImpl < BatchArtifacts < T > > ( this , "ref" , childNode , nodeList . get ( 0 ) ) ; } return createRef ( ) ;
public class MSwingUtilities { /** * Retourne l ' instance courante de la classe componentClass contenant l ' élément component . < br / > * Cette méthode peut - être très utile pour récupérer une référence à un parent éloigné ( ancêtre ) , en l ' absence de référence directe du type attribut . * < br / > * Ex : un composant panel désire une référence sur sa JFrame parente , alors l ' instruction suivante suffit : getAncestorOfClass ( JFrame . class , panel ) * @ return Component * @ param < T > * le type du composant recherché * @ param componentClass * Class * @ param component * Component */ @ SuppressWarnings ( "unchecked" ) public static < T > T getAncestorOfClass ( final Class < T > componentClass , final Component component ) { } }
return ( T ) SwingUtilities . getAncestorOfClass ( componentClass , component ) ;
public class JvmTypesBuilder { /** * Attaches the given compile strategy to the given { @ link JvmExecutable } such that the compiler knows how to * implement the { @ link JvmExecutable } when it is translated to Java source code . * @ param executable the operation or constructor to add the method body to . If < code > null < / code > this method does nothing . * @ param strategy the compilation strategy . If < code > null < / code > this method does nothing . */ public void setBody ( /* @ Nullable */ JvmExecutable executable , /* @ Nullable */ StringConcatenationClient strategy ) { } }
removeExistingBody ( executable ) ; setCompilationStrategy ( executable , strategy ) ;
public class Searcher { /** * Loads more results with the same query . * Note that this method won ' t do anything if { @ link Searcher # hasMoreHits } returns false . * @ return this { @ link Searcher } for chaining . */ @ NonNull public Searcher loadMore ( ) { } }
if ( ! hasMoreHits ( ) ) { return this ; } query . setPage ( ++ lastRequestPage ) ; final int currentRequestId = ++ lastRequestId ; EventBus . getDefault ( ) . post ( new SearchEvent ( this , query , currentRequestId ) ) ; pendingRequests . put ( currentRequestId , triggerSearch ( new CompletionHandler ( ) { @ Override public void requestCompleted ( @ NonNull JSONObject content , @ Nullable AlgoliaException error ) { pendingRequests . remove ( currentRequestId ) ; if ( error != null ) { postError ( error , currentRequestId ) ; } else { if ( currentRequestId <= lastResponseId ) { return ; // Hits are for an older query , let ' s ignore them } if ( hasHits ( content ) ) { updateListeners ( content , true ) ; updateFacetStats ( content ) ; lastResponsePage = lastRequestPage ; checkIfLastPage ( content ) ; } else { endReached = true ; } EventBus . getDefault ( ) . post ( new ResultEvent ( Searcher . this , content , query , currentRequestId ) ) ; } } } ) ) ; return this ;
public class HashCodeBuilder { /** * Uses reflection to build a valid hash code from the fields of { @ code object } . * This constructor uses two hard coded choices for the constants needed to build a hash code . * It uses < code > AccessibleObject . setAccessible < / code > to gain access to private fields . This means that it will * throw a security exception if run under a security manager , if the permissions are not set up correctly . It is * also not as efficient as testing explicitly . * If the TestTransients parameter is set to < code > true < / code > , transient members will be tested , otherwise they * are ignored , as they are likely derived fields , and not part of the value of the < code > Object < / code > . * Static fields will not be tested . Superclass fields will be included . If no fields are found to include * in the hash code , the result of this method will be constant . * @ param object * the Object to create a < code > hashCode < / code > for * @ param testTransients * whether to include transient fields * @ return int hash code * @ throws IllegalArgumentException * if the object is < code > null < / code > */ public static int reflectionHashCode ( final Object object , final boolean testTransients ) { } }
return reflectionHashCode ( DEFAULT_INITIAL_VALUE , DEFAULT_MULTIPLIER_VALUE , object , testTransients , null ) ;
public class CPDefinitionLinkPersistenceImpl { /** * Removes the cp definition link with the primary key from the database . Also notifies the appropriate model listeners . * @ param primaryKey the primary key of the cp definition link * @ return the cp definition link that was removed * @ throws NoSuchCPDefinitionLinkException if a cp definition link with the primary key could not be found */ @ Override public CPDefinitionLink remove ( Serializable primaryKey ) throws NoSuchCPDefinitionLinkException { } }
Session session = null ; try { session = openSession ( ) ; CPDefinitionLink cpDefinitionLink = ( CPDefinitionLink ) session . get ( CPDefinitionLinkImpl . class , primaryKey ) ; if ( cpDefinitionLink == null ) { if ( _log . isDebugEnabled ( ) ) { _log . debug ( _NO_SUCH_ENTITY_WITH_PRIMARY_KEY + primaryKey ) ; } throw new NoSuchCPDefinitionLinkException ( _NO_SUCH_ENTITY_WITH_PRIMARY_KEY + primaryKey ) ; } return remove ( cpDefinitionLink ) ; } catch ( NoSuchCPDefinitionLinkException nsee ) { throw nsee ; } catch ( Exception e ) { throw processException ( e ) ; } finally { closeSession ( session ) ; }
public class CompilerWrapper { /** * < p > addField . < / p > * @ param fieldName a { @ link java . lang . String } object . * @ param fildType a { @ link java . lang . Class } object . * @ param isPublic a boolean . * @ param isStatic a boolean . * @ param isFinal a boolean . * @ throws javassist . CannotCompileException if any . * @ throws javassist . NotFoundException if any . */ public void addField ( String fieldName , Class < ? > fildType , boolean isPublic , boolean isStatic , boolean isFinal ) throws CannotCompileException , NotFoundException { } }
addField ( fieldName , fildType , isPublic , isStatic , isFinal , null ) ;
public class HawkbitCommonUtil { /** * Create lazy query container for DS type . * @ param queryFactory * @ return LazyQueryContainer */ public static LazyQueryContainer createDSLazyQueryContainer ( final BeanQueryFactory < ? extends AbstractBeanQuery < ? > > queryFactory ) { } }
queryFactory . setQueryConfiguration ( Collections . emptyMap ( ) ) ; return new LazyQueryContainer ( new LazyQueryDefinition ( true , 20 , "tagIdName" ) , queryFactory ) ;
public class SibRaCommonEndpointActivation { /** * A session error has occured on the connection , drop the connection and , if necessary , * try to create a new connection */ @ Override void sessionError ( SibRaMessagingEngineConnection connection , ConsumerSession session , Throwable throwable ) { } }
final String methodName = "sessionError" ; if ( TraceComponent . isAnyTracingEnabled ( ) && TRACE . isEntryEnabled ( ) ) { SibTr . entry ( this , TRACE , methodName , new Object [ ] { connection , session } ) ; } final SIDestinationAddress destination = session . getDestinationAddress ( ) ; SibTr . warning ( TRACE , "CONSUMER_FAILED_CWSIV0770" , new Object [ ] { destination . getDestinationName ( ) , _endpointConfiguration . getBusName ( ) , this , throwable } ) ; dropConnection ( connection , true , true , false ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && TRACE . isEntryEnabled ( ) ) { SibTr . exit ( this , TRACE , methodName ) ; }
public class CharacterManager { /** * Returns the estimated memory usage in bytes for all images * currently cached by the cached action frames . */ protected long getEstimatedCacheMemoryUsage ( ) { } }
long size = 0 ; Iterator < CompositedMultiFrameImage > iter = _frameCache . values ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { size += iter . next ( ) . getEstimatedMemoryUsage ( ) ; } return size ;
public class Reflect { /** * Creates an instance of Reflect associated with a class * @ param clazz The class for reflection as string * @ return The Reflect object * @ throws Exception the exception */ public static Reflect onClass ( String clazz ) throws Exception { } }
return new Reflect ( null , ReflectionUtils . getClassForName ( clazz ) ) ;
public class Bugsnag { /** * Set the endpoints to send data to . By default we ' ll send error reports to * https : / / notify . bugsnag . com , and sessions to https : / / sessions . bugsnag . com , but you can * override this if you are using Bugsnag Enterprise to point to your own Bugsnag endpoint . * Please note that it is recommended that you set both endpoints . If the notify endpoint is * missing , an exception will be thrown . If the session endpoint is missing , a warning will be * logged and sessions will not be sent automatically . * Note that if you are setting a custom { @ link Delivery } , this method should be called after * the custom implementation has been set . * @ param notify the notify endpoint * @ param sessions the sessions endpoint * @ throws IllegalArgumentException if the notify endpoint is empty or null */ public void setEndpoints ( String notify , String sessions ) throws IllegalArgumentException { } }
config . setEndpoints ( notify , sessions ) ;
public class CmsLuceneIndexWriter { /** * @ see org . opencms . search . I _ CmsIndexWriter # optimize ( ) * As optimize is deprecated with Lucene 3.5 , this implementation * actually calls { @ link IndexWriter # forceMerge ( int ) } . < p > */ public void optimize ( ) throws IOException { } }
if ( ( m_index != null ) && LOG . isInfoEnabled ( ) ) { LOG . info ( Messages . get ( ) . getBundle ( ) . key ( Messages . LOG_INDEX_WRITER_MSG_OPTIMIZE_2 , m_index . getName ( ) , m_index . getPath ( ) ) ) ; } int oldPriority = Thread . currentThread ( ) . getPriority ( ) ; // we don ' t want the priority too low as the process should complete as fast as possible Thread . currentThread ( ) . setPriority ( Thread . NORM_PRIORITY / 2 ) ; m_indexWriter . forceMerge ( 5 ) ; Thread . currentThread ( ) . setPriority ( oldPriority ) ;
public class SessionManager { /** * DS method to deactivate this component . * @ param context */ public void deactivate ( ComponentContext context ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) { Tr . event ( this , tc , "Deactivating" ) ; } if ( null != this . future ) { synchronized ( this . timerLock ) { if ( null != this . future ) { this . future . cancel ( true ) ; this . future = null ; } } // end - sync } // TODO purge the groupings / sessions maps , or dump to disk once // persistence is added
public class ManagedIndex { /** * Required by IndexEntryAccessor interface . */ public void copyFromMaster ( Storable indexEntry , S master ) throws FetchException { } }
mAccessor . copyFromMaster ( indexEntry , master ) ;
public class Interaction { /** * Fetches values using the popular dot notation * i . e . tumblr . author . id would return the id ( 12345 ) value in a structure similar to * < pre > * { " tumblr " : { * " author " : { * " id " : 12345 * < / pre > * @ param str a JSON dot notation string * @ return null if a value doesn ' t exist for that key or the value */ public JsonNode get ( String str ) { } }
String [ ] parts = str . split ( "\\." ) ; JsonNode retval = data . get ( parts [ 0 ] ) ; for ( int i = 1 ; i <= parts . length - 1 ; i ++ ) { if ( retval == null ) { return null ; } else { retval = retval . get ( parts [ i ] ) ; } } return retval ;
public class NetworkInterface { /** * The IP addresses associated with the network interface . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setIpv6Addresses ( java . util . Collection ) } or { @ link # withIpv6Addresses ( java . util . Collection ) } if you want * to override the existing values . * @ param ipv6Addresses * The IP addresses associated with the network interface . * @ return Returns a reference to this object so that method calls can be chained together . */ public NetworkInterface withIpv6Addresses ( String ... ipv6Addresses ) { } }
if ( this . ipv6Addresses == null ) { setIpv6Addresses ( new java . util . ArrayList < String > ( ipv6Addresses . length ) ) ; } for ( String ele : ipv6Addresses ) { this . ipv6Addresses . add ( ele ) ; } return this ;
public class NumberMap { /** * Creates a NumberMap for Shorts . * @ param < K > * @ return NumberMap < K > */ public static < K > NumberMap < K , Short > newShortMap ( ) { } }
return new NumberMap < K , Short > ( ) { @ Override public void add ( K key , Short addend ) { put ( key , containsKey ( key ) ? ( short ) ( get ( key ) + addend ) : addend ) ; } @ Override public void sub ( K key , Short subtrahend ) { put ( key , ( short ) ( ( containsKey ( key ) ? get ( key ) : 0 ) - subtrahend ) ) ; } } ;
public class JSONObject { /** * Internal method to write out a proper JSON attribute string . * @ param writer The writer to use while serializing * @ param attrs The attributes in a properties object to write out * @ param depth How far to indent the JSON text . * @ param compact Whether or not to use pretty indention output , or compact output , format * @ throws IOException Trhown if an error occurs on write . */ private void writeAttributes ( Writer writer , Properties attrs , int depth , boolean compact ) throws IOException { } }
if ( logger . isLoggable ( Level . FINER ) ) logger . entering ( className , "writeAttributes(Writer, Properties, int, boolean)" ) ; if ( attrs != null ) { Enumeration props = attrs . propertyNames ( ) ; if ( props != null && props . hasMoreElements ( ) ) { while ( props . hasMoreElements ( ) ) { String prop = ( String ) props . nextElement ( ) ; writeAttribute ( writer , escapeAttributeNameSpecialCharacters ( prop ) , ( String ) attrs . get ( prop ) , depth + 1 , compact ) ; if ( props . hasMoreElements ( ) ) { try { if ( ! compact ) { writer . write ( ",\n" ) ; } else { writer . write ( "," ) ; } } catch ( Exception ex ) { IOException iox = new IOException ( "Error occurred on serialization of JSON text." ) ; iox . initCause ( ex ) ; throw iox ; } } } } } if ( logger . isLoggable ( Level . FINER ) ) logger . exiting ( className , "writeAttributes(Writer, Properties, int, boolean)" ) ;
public class ICalComponent { /** * Gets the first sub - component of a given class . * @ param clazz the component class * @ param < T > the component class * @ return the sub - component or null if not found */ public < T extends ICalComponent > T getComponent ( Class < T > clazz ) { } }
return clazz . cast ( components . first ( clazz ) ) ;
public class CqlDataReaderDAO { /** * Asynchronously executes the provided statement . Although the iterator is returned immediately the actual results * may still be loading in the background . The statement must query the delta table as returned from * { @ link com . bazaarvoice . emodb . sor . db . astyanax . DeltaPlacement # getDeltaTableDDL ( ) } */ private Iterator < Iterable < Row > > deltaQueryAsync ( DeltaPlacement placement , Statement statement , boolean singleRow , String errorContext , Object ... errorContextArgs ) { } }
return doDeltaQuery ( placement , statement , singleRow , true , errorContext , errorContextArgs ) ;
public class AbstractRsJs { /** * Get resource * @ return * @ throws IOException */ @ GET @ Produces ( Constants . JSTYPE ) public Response getJs ( ) throws IOException { } }
return Response . ok ( ( Object ) getSequenceInputStream ( getStreams ( ) ) ) . build ( ) ;
public class Encoder { /** * Recovers a corrupt block in a parity file to an output stream . * The encoder generates codec . parityLength parity blocks for a source file stripe . * Since there is only one output provided , some blocks are written out to * files before being written out to the output . * @ param blockSize The block size for the source / parity files . * @ param out The destination for the reovered block . * @ throws InterruptedException */ private void encodeFileToStream ( StripeReader sReader , long blockSize , FSDataOutputStream out , CRC32 [ ] crcOuts , Progressable reporter ) throws IOException , InterruptedException { } }
OutputStream [ ] tmpOuts = new OutputStream [ codec . parityLength ] ; // One parity block can be written directly to out , rest to local files . tmpOuts [ 0 ] = out ; File [ ] tmpFiles = new File [ codec . parityLength - 1 ] ; for ( int i = 0 ; i < codec . parityLength - 1 ; i ++ ) { tmpFiles [ i ] = File . createTempFile ( "parity" , "_" + i ) ; LOG . info ( "Created tmp file " + tmpFiles [ i ] ) ; tmpFiles [ i ] . deleteOnExit ( ) ; } int finishedParityBlockIdx = 0 ; List < Integer > errorLocations = new ArrayList < Integer > ( ) ; try { // Loop over stripe boolean redo ; while ( sReader . hasNext ( ) ) { reporter . progress ( ) ; StripeInputInfo stripeInputInfo = null ; InputStream [ ] blocks = null ; // Create input streams for blocks in the stripe . long currentStripeIdx = sReader . getCurrentStripeIdx ( ) ; stripeInputInfo = sReader . getNextStripeInputs ( ) ; // The offset of first temporary output stream long encodeStartOffset = out . getPos ( ) ; int retry = 3 ; do { redo = false ; retry -- ; try { blocks = stripeInputInfo . getInputs ( ) ; CRC32 [ ] curCRCOuts = new CRC32 [ codec . parityLength ] ; if ( crcOuts != null ) { for ( int i = 0 ; i < codec . parityLength ; i ++ ) { crcOuts [ finishedParityBlockIdx + i ] = curCRCOuts [ i ] = new CRC32 ( ) ; } } // Create output streams to the temp files . for ( int i = 0 ; i < codec . parityLength - 1 ; i ++ ) { tmpOuts [ i + 1 ] = new FileOutputStream ( tmpFiles [ i ] ) ; } // Call the implementation of encoding . encodeStripe ( blocks , blockSize , tmpOuts , curCRCOuts , reporter , true , errorLocations ) ; } catch ( IOException e ) { if ( out . getPos ( ) > encodeStartOffset ) { // Partial data is already written , throw the exception InjectionHandler . processEventIO ( InjectionEvent . RAID_ENCODING_PARTIAL_STRIPE_ENCODED ) ; throw e ; } // try to fix the missing block in the stripe using stripe store . if ( ( e instanceof BlockMissingException || e instanceof ChecksumException ) && codec . isDirRaid ) { if ( retry <= 0 ) { throw e ; } redo = true ; CorruptBlockReconstructor constructor = new CorruptBlockReconstructor ( conf ) ; Set < Path > srcPaths = new HashSet < Path > ( ) ; for ( int idx : errorLocations ) { Path srcPath = stripeInputInfo . getSrcPaths ( ) [ idx ] ; if ( srcPath != null ) { srcPaths . add ( srcPath ) ; } } for ( Path srcPath : srcPaths ) { Decoder decoder = new Decoder ( conf , codec ) ; decoder . connectToStore ( srcPath ) ; LOG . info ( "In Encoding: try to reconstruct the file: " + srcPath ) ; // will throw exception if it fails to reconstruct the lost // blocks . constructor . processFile ( srcPath , null , decoder , true , null ) ; LOG . info ( "In Encoding: finished to reconstruct the file: " + srcPath ) ; } } else { throw e ; } } finally { if ( blocks != null ) { RaidUtils . closeStreams ( blocks ) ; } } if ( redo ) { // rebuild the inputs . stripeInputInfo = sReader . getStripeInputs ( currentStripeIdx ) ; } } while ( redo ) ; // Close output streams to the temp files and write the temp files // to the output provided . for ( int i = 0 ; i < codec . parityLength - 1 ; i ++ ) { tmpOuts [ i + 1 ] . close ( ) ; tmpOuts [ i + 1 ] = null ; InputStream in = new FileInputStream ( tmpFiles [ i ] ) ; RaidUtils . copyBytes ( in , out , writeBufs [ i ] , blockSize ) ; reporter . progress ( ) ; } finishedParityBlockIdx += codec . parityLength ; } } finally { for ( int i = 0 ; i < codec . parityLength - 1 ; i ++ ) { if ( tmpOuts [ i + 1 ] != null ) { tmpOuts [ i + 1 ] . close ( ) ; } tmpFiles [ i ] . delete ( ) ; LOG . info ( "Deleted tmp file " + tmpFiles [ i ] ) ; } }
public class SearchableString { /** * Returns all indices where the literal argument can be found in this String . Results are cached for better * performance . * @ param literal The string that should be found * @ return all indices where the literal argument can be found in this String . */ int [ ] getIndices ( final Literal literal ) { } }
// Check whether the answer is already in the cache final int index = literal . getIndex ( ) ; final int [ ] cached = myIndices [ index ] ; if ( cached != null ) { return cached ; } // Find all indices final int [ ] values = findIndices ( literal ) ; myIndices [ index ] = values ; return values ;
public class RestUtils { /** * Batch create response as JSON . * @ param app the current App object * @ param is entity input stream * @ return a status code 200 or 400 */ public static Response getBatchCreateResponse ( final App app , InputStream is ) { } }
try ( final Metrics . Context context = Metrics . time ( app == null ? null : app . getAppid ( ) , RestUtils . class , "batch" , "create" ) ) { if ( app != null ) { final LinkedList < ParaObject > newObjects = new LinkedList < > ( ) ; Response entityRes = getEntity ( is , List . class ) ; if ( entityRes . getStatusInfo ( ) == Response . Status . OK ) { List < Map < String , Object > > items = ( List < Map < String , Object > > ) entityRes . getEntity ( ) ; for ( Map < String , Object > object : items ) { // can ' t create multiple apps in batch String type = ( String ) object . get ( Config . _TYPE ) ; if ( isNotAnApp ( type ) ) { warnIfUserTypeDetected ( type ) ; ParaObject pobj = ParaObjectUtils . setAnnotatedFields ( object ) ; if ( pobj != null && isValidObject ( app , pobj ) ) { pobj . setAppid ( app . getAppIdentifier ( ) ) ; setCreatorid ( app , pobj ) ; newObjects . add ( pobj ) ; } } } Para . getDAO ( ) . createAll ( app . getAppIdentifier ( ) , newObjects ) ; Para . asyncExecute ( new Runnable ( ) { public void run ( ) { int typesCount = app . getDatatypes ( ) . size ( ) ; app . addDatatypes ( newObjects . toArray ( new ParaObject [ 0 ] ) ) ; if ( typesCount < app . getDatatypes ( ) . size ( ) ) { app . update ( ) ; } } } ) ; } else { return entityRes ; } return Response . ok ( newObjects ) . build ( ) ; } else { return getStatusResponse ( Response . Status . BAD_REQUEST ) ; } }
public class AtlasClient { /** * Search using dsl / full text * @ param searchQuery * @ param limit number of rows to be returned in the result , used for pagination . maxlimit > limit > 0 . - 1 maps to atlas . search . defaultlimit property value * @ param offset offset to the results returned , used for pagination . offset > = 0 . - 1 maps to offset 0 * @ return Query results * @ throws AtlasServiceException */ public JSONArray search ( final String searchQuery , final int limit , final int offset ) throws AtlasServiceException { } }
JSONObject result = callAPIWithRetries ( API . SEARCH , null , new ResourceCreator ( ) { @ Override public WebResource createResource ( ) { WebResource resource = getResource ( API . SEARCH ) ; resource = resource . queryParam ( QUERY , searchQuery ) ; resource = resource . queryParam ( LIMIT , String . valueOf ( limit ) ) ; resource = resource . queryParam ( OFFSET , String . valueOf ( offset ) ) ; return resource ; } } ) ; try { return result . getJSONArray ( RESULTS ) ; } catch ( JSONException e ) { throw new AtlasServiceException ( e ) ; }
public class NotExpressionImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public NotificationChain eInverseRemove ( InternalEObject otherEnd , int featureID , NotificationChain msgs ) { } }
switch ( featureID ) { case SimpleAntlrPackage . NOT_EXPRESSION__VALUE : return basicSetValue ( null , msgs ) ; } return super . eInverseRemove ( otherEnd , featureID , msgs ) ;
public class Response { /** * Return the Response nameID ( user identifier ) * @ return String user nameID * @ throws Exception */ public String getNameId ( ) throws Exception { } }
NodeList nodes = xmlDoc . getElementsByTagNameNS ( "urn:oasis:names:tc:SAML:2.0:assertion" , "NameID" ) ; if ( nodes . getLength ( ) == 0 ) { throw new Exception ( "No name id found in document" ) ; } return nodes . item ( 0 ) . getTextContent ( ) ;
public class StageState { /** * The state of the stage . * @ param actionStates * The state of the stage . */ public void setActionStates ( java . util . Collection < ActionState > actionStates ) { } }
if ( actionStates == null ) { this . actionStates = null ; return ; } this . actionStates = new java . util . ArrayList < ActionState > ( actionStates ) ;