signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class task_device_log { /** * < pre > * Converts API response of bulk operation into object and returns the object array in case of get request . * < / pre > */ protected base_resource [ ] get_nitro_bulk_response ( nitro_service service , String response ) throws Exception { } }
task_device_log_responses result = ( task_device_log_responses ) service . get_payload_formatter ( ) . string_to_resource ( task_device_log_responses . class , response ) ; if ( result . errorcode != 0 ) { if ( result . errorcode == SESSION_NOT_EXISTS ) service . clear_session ( ) ; throw new nitro_exception ( result . message , result . errorcode , ( base_response [ ] ) result . task_device_log_response_array ) ; } task_device_log [ ] result_task_device_log = new task_device_log [ result . task_device_log_response_array . length ] ; for ( int i = 0 ; i < result . task_device_log_response_array . length ; i ++ ) { result_task_device_log [ i ] = result . task_device_log_response_array [ i ] . task_device_log [ 0 ] ; } return result_task_device_log ;
public class ToolInstaller { /** * Checks whether this installer can be applied to a given node . * ( By default , just checks the label . ) */ public boolean appliesTo ( Node node ) { } }
Label l = Jenkins . getInstance ( ) . getLabel ( label ) ; return l == null || l . contains ( node ) ;
public class TagFilterMarshaller { /** * Marshall the given parameter object . */ public void marshall ( TagFilter tagFilter , ProtocolMarshaller protocolMarshaller ) { } }
if ( tagFilter == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( tagFilter . getName ( ) , NAME_BINDING ) ; protocolMarshaller . marshall ( tagFilter . getValues ( ) , VALUES_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class RegistriesInner { /** * Regenerates the administrator login credentials for the specified container registry . * @ param resourceGroupName The name of the resource group to which the container registry belongs . * @ param registryName The name of the container registry . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the RegistryCredentialsInner object */ public Observable < ServiceResponse < RegistryCredentialsInner > > regenerateCredentialsWithServiceResponseAsync ( String resourceGroupName , String registryName ) { } }
if ( this . client . subscriptionId ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.subscriptionId() is required and cannot be null." ) ; } if ( resourceGroupName == null ) { throw new IllegalArgumentException ( "Parameter resourceGroupName is required and cannot be null." ) ; } if ( registryName == null ) { throw new IllegalArgumentException ( "Parameter registryName is required and cannot be null." ) ; } if ( this . client . apiVersion ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.apiVersion() is required and cannot be null." ) ; } return service . regenerateCredentials ( this . client . subscriptionId ( ) , resourceGroupName , registryName , this . client . apiVersion ( ) , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < RegistryCredentialsInner > > > ( ) { @ Override public Observable < ServiceResponse < RegistryCredentialsInner > > call ( Response < ResponseBody > response ) { try { ServiceResponse < RegistryCredentialsInner > clientResponse = regenerateCredentialsDelegate ( response ) ; return Observable . just ( clientResponse ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ;
public class SocialRenderer { /** * { @ inheritDoc } */ @ Override public void encodeEnd ( final FacesContext context , final UIComponent component ) throws IOException { } }
final Social social = ( Social ) component ; encodeMarkup ( context , social ) ; encodeScript ( context , social ) ;
public class AbstractSegment3F { /** * Compute and replies the distance between two segments . * @ param ax1 * is the X coord of the first point of the first segment * @ param ay1 * is the Y coord of the first point of the first segment * @ param az1 * is the Z coord of the first point of the first segment * @ param ax2 * is the X coord of the second point of the first segment * @ param ay2 * is the Y coord of the second point of the first segment * @ param az2 * is the Z coord of the second point of the first segment * @ param bx1 * is the X coord of the first point of the second segment * @ param by1 * is the Y coord of the first point of the second segment * @ param bz1 * is the Z coord of the first point of the second segment * @ param bx2 * is the X coord of the second point of the second segment * @ param by2 * is the Y coord of the second point of the second segment * @ param bz2 * is the Z coord of the second point of the second segment * @ return the distance */ @ Pure public static double distanceSegmentSegment ( double ax1 , double ay1 , double az1 , double ax2 , double ay2 , double az2 , double bx1 , double by1 , double bz1 , double bx2 , double by2 , double bz2 ) { } }
return Math . sqrt ( distanceSquaredSegmentSegment ( ax1 , ay1 , az1 , ax2 , ay2 , az2 , bx1 , by1 , bz1 , bx2 , by2 , bz2 ) ) ;
public class WebLocatorAbstractBuilder { /** * For customize template please see here : See http : / / docs . oracle . com / javase / 7 / docs / api / java / util / Formatter . html # dpos * @ param key name template * @ param value template * @ param < T > the element which calls this method * @ return this element */ @ SuppressWarnings ( "unchecked" ) public < T extends WebLocatorAbstractBuilder > T setTemplate ( final String key , final String value ) { } }
pathBuilder . setTemplate ( key , value ) ; return ( T ) this ;
public class TokenManagerImpl { /** * { @ inheritDoc } */ public Token recreateTokenFromBytes ( String tokenType , byte [ ] tokenBytes ) throws InvalidTokenException , TokenExpiredException { } }
try { TokenService tokenService = getTokenServiceForType ( tokenType ) ; return tokenService . recreateTokenFromBytes ( tokenBytes ) ; } catch ( IllegalArgumentException e ) { Tr . info ( tc , "TOKEN_SERVICE_INVALID_TOKEN_INFO" ) ; String translatedMessage = TraceNLS . getStringFromBundle ( this . getClass ( ) , TraceConstants . MESSAGE_BUNDLE , "TOKEN_SERVICE_INVALID_TOKEN_INFO" , "CWWKS4001I: The security token cannot be validated." ) ; throw new InvalidTokenException ( translatedMessage , e ) ; }
public class Relation { /** * Returns all the NumericColumns in the relation */ public List < NumericColumn < ? > > numericColumns ( String ... columnNames ) { } }
List < NumericColumn < ? > > cols = new ArrayList < > ( ) ; for ( String name : columnNames ) { cols . add ( numberColumn ( name ) ) ; } return cols ;
public class JMTimeUtil { /** * Change timestamp to new format string . * @ param isoTimestamp the iso timestamp * @ param zoneID the zone id * @ param newFormat the new format * @ return the string */ public static String changeTimestampToNewFormat ( String isoTimestamp , ZoneId zoneID , DateTimeFormatter newFormat ) { } }
return ZonedDateTime . parse ( isoTimestamp ) . withZoneSameInstant ( zoneID ) . format ( newFormat ) ;
public class MessageLog { /** * Add this field in the Record ' s field sequence . */ public BaseField setupField ( int iFieldSeq ) { } }
BaseField field = null ; // if ( iFieldSeq = = 0) // field = new CounterField ( this , ID , Constants . DEFAULT _ FIELD _ LENGTH , null , null ) ; // field . setHidden ( true ) ; // if ( iFieldSeq = = 1) // field = new RecordChangedField ( this , LAST _ CHANGED , Constants . DEFAULT _ FIELD _ LENGTH , null , null ) ; // field . setHidden ( true ) ; // if ( iFieldSeq = = 2) // field = new BooleanField ( this , DELETED , Constants . DEFAULT _ FIELD _ LENGTH , null , new Boolean ( false ) ) ; // field . setHidden ( true ) ; if ( iFieldSeq == 3 ) field = new MessageInfoTypeField ( this , MESSAGE_INFO_TYPE_ID , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 4 ) field = new MessageTypeField ( this , MESSAGE_TYPE_ID , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 5 ) field = new MessageStatusField ( this , MESSAGE_STATUS_ID , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 6 ) field = new MessageTransportField ( this , MESSAGE_TRANSPORT_ID , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 7 ) field = new MessageProcessInfoField ( this , MESSAGE_PROCESS_INFO_ID , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 8 ) field = new ContactTypeField ( this , CONTACT_TYPE_ID , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 9 ) field = new ContactField ( this , CONTACT_ID , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 10 ) field = new StringField ( this , DESCRIPTION , 60 , null , null ) ; if ( iFieldSeq == 11 ) field = new MessageLog_MessageTime ( this , MESSAGE_TIME , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 12 ) field = new IntegerField ( this , TIMEOUT_SECONDS , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 13 ) field = new DateTimeField ( this , TIMEOUT_TIME , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 14 ) field = new UserField ( this , USER_ID , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 15 ) field = new StringField ( this , REFERENCE_TYPE , 60 , null , null ) ; if ( iFieldSeq == 16 ) field = new ReferenceField ( this , REFERENCE_ID , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 17 ) field = new MessageLogField ( this , RESPONSE_MESSAGE_LOG_ID , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 18 ) field = new PropertiesField ( this , MESSAGE_HEADER_PROPERTIES , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 19 ) field = new PropertiesField ( this , MESSAGE_INFO_PROPERTIES , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 20 ) field = new PropertiesField ( this , MESSAGE_TRANSPORT_PROPERTIES , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 21 ) field = new StringField ( this , MESSAGE_CLASS_NAME , 128 , null , null ) ; if ( iFieldSeq == 22 ) field = new StringField ( this , MESSAGE_HEADER_CLASS_NAME , 128 , null , null ) ; if ( iFieldSeq == 23 ) field = new StringField ( this , MESSAGE_DATA_CLASS_NAME , 128 , null , null ) ; if ( iFieldSeq == 24 ) field = new StringField ( this , EXTERNAL_MESSAGE_CLASS_NAME , 128 , null , null ) ; if ( iFieldSeq == 25 ) field = new StringField ( this , MESSAGE_QUEUE_NAME , 60 , null , null ) ; if ( iFieldSeq == 26 ) field = new StringField ( this , MESSAGE_QUEUE_TYPE , 60 , null , null ) ; if ( iFieldSeq == 27 ) field = new StringField ( this , MESSAGE_DATA_TYPE , 30 , null , null ) ; if ( iFieldSeq == 28 ) field = new XmlField ( this , XML_MESSAGE_DATA , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 29 ) field = new MemoField ( this , MESSAGE_DATA , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 30 ) field = new StringField ( this , ERROR_TEXT , 127 , null , null ) ; if ( field == null ) field = super . setupField ( iFieldSeq ) ; return field ;
public class BlockBrowser { /** * Builds the logical tree the chosen provider and parametres . */ private void buildLogicalTree ( ) { } }
if ( logicalCombo . getSelectedIndex ( ) != - 1 ) { LogicalTreeProvider provider = logicalCombo . getItemAt ( logicalCombo . getSelectedIndex ( ) ) ; proc . buildLogicalTree ( provider , null ) ; // the parametres should have been set through the GUI setLogicalTree ( proc . getLogicalAreaTree ( ) ) ; }
public class ZipImporterImpl { /** * { @ inheritDoc } * @ see org . jboss . shrinkwrap . api . importer . StreamImporter # importFrom ( java . io . InputStream ) */ @ Override public ZipImporter importFrom ( final InputStream stream ) throws ArchiveImportException { } }
return importFrom ( stream , Filters . includeAll ( ) ) ;
public class RDFUnitDemoSession { /** * Init session variables . * @ param clientHost a { @ link java . lang . String } object . */ public static void init ( String clientHost ) { } }
VaadinSession . getCurrent ( ) . setAttribute ( "client" , clientHost ) ; String baseDir = _getBaseDir ( ) ; VaadinSession . getCurrent ( ) . setAttribute ( String . class , baseDir ) ; TestGeneratorExecutor testGeneratorExecutor = new TestGeneratorExecutor ( ) ; VaadinSession . getCurrent ( ) . setAttribute ( TestGeneratorExecutor . class , testGeneratorExecutor ) ; TestSuite testSuite = new TestSuite ( new ArrayList < > ( ) ) ; VaadinSession . getCurrent ( ) . setAttribute ( TestSuite . class , testSuite ) ; CommonAccessUtils . initializeSchemaServices ( ) ;
public class CacheValue { /** * Returns a CacheValue instance that holds the value . * It holds it directly if the value is null or if the current " strength " is { @ code STRONG } . * Otherwise , it holds it via a { @ link Reference } . */ @ SuppressWarnings ( "unchecked" ) public static < V > CacheValue < V > getInstance ( V value ) { } }
if ( value == null ) { return NULL_VALUE ; } return strength == Strength . STRONG ? new StrongValue < V > ( value ) : new SoftValue < V > ( value ) ;
public class TagsInterface { /** * Search for tag - clusters . * This method does not require authentication . * @ since 1.2 * @ param searchTag * @ return a list of clusters */ public ClusterList getClusters ( String searchTag ) throws FlickrException { } }
Map < String , Object > parameters = new HashMap < String , Object > ( ) ; parameters . put ( "method" , METHOD_GET_CLUSTERS ) ; parameters . put ( "tag" , searchTag ) ; Response response = transportAPI . get ( transportAPI . getPath ( ) , parameters , apiKey , sharedSecret ) ; if ( response . isError ( ) ) { throw new FlickrException ( response . getErrorCode ( ) , response . getErrorMessage ( ) ) ; } ClusterList clusters = new ClusterList ( ) ; Element clustersElement = response . getPayload ( ) ; NodeList clusterElements = clustersElement . getElementsByTagName ( "cluster" ) ; for ( int i = 0 ; i < clusterElements . getLength ( ) ; i ++ ) { Cluster cluster = new Cluster ( ) ; NodeList tagElements = ( ( Element ) clusterElements . item ( i ) ) . getElementsByTagName ( "tag" ) ; for ( int j = 0 ; j < tagElements . getLength ( ) ; j ++ ) { Tag tag = new Tag ( ) ; tag . setValue ( ( ( Text ) tagElements . item ( j ) . getFirstChild ( ) ) . getData ( ) ) ; cluster . addTag ( tag ) ; } clusters . addCluster ( cluster ) ; } return clusters ;
public class Ecore2FXMojo { /** * { @ inheritDoc } */ @ Override public void execute ( ) throws MojoExecutionException { } }
final Ecore2FXGenerator g = new Ecore2FXGenerator ( ) ; g . generate ( this . outputDirectory , this . ecoreFile ) ;
public class DistributionPointFetcher { /** * Fetch CRLs from certStores . * @ throws CertStoreException if there is an error retrieving the CRLs from * one of the CertStores and no other CRLs are retrieved from * the other CertStores . If more than one CertStore throws an * exception then the one from the last CertStore is thrown . */ private static Collection < X509CRL > getCRLs ( X500Name name , X500Principal certIssuer , List < CertStore > certStores ) throws CertStoreException { } }
if ( debug != null ) { debug . println ( "Trying to fetch CRL from DP " + name ) ; } X509CRLSelector xcs = new X509CRLSelector ( ) ; xcs . addIssuer ( name . asX500Principal ( ) ) ; xcs . addIssuer ( certIssuer ) ; Collection < X509CRL > crls = new ArrayList < > ( ) ; CertStoreException savedCSE = null ; for ( CertStore store : certStores ) { try { for ( CRL crl : store . getCRLs ( xcs ) ) { crls . add ( ( X509CRL ) crl ) ; } } catch ( CertStoreException cse ) { if ( debug != null ) { debug . println ( "Exception while retrieving " + "CRLs: " + cse ) ; cse . printStackTrace ( ) ; } savedCSE = new PKIX . CertStoreTypeException ( store . getType ( ) , cse ) ; } } // only throw CertStoreException if no CRLs are retrieved if ( crls . isEmpty ( ) && savedCSE != null ) { throw savedCSE ; } else { return crls ; }
public class AmazonWorkMailClient { /** * Mark a user , group , or resource as no longer used in Amazon WorkMail . This action disassociates the mailbox and * schedules it for clean - up . WorkMail keeps mailboxes for 30 days before they are permanently removed . The * functionality in the console is < i > Disable < / i > . * @ param deregisterFromWorkMailRequest * @ return Result of the DeregisterFromWorkMail operation returned by the service . * @ throws EntityNotFoundException * The identifier supplied for the user , group , or resource does not exist in your organization . * @ throws EntityStateException * You are performing an operation on a user , group , or resource that isn ' t in the expected state , such as * trying to delete an active user . * @ throws InvalidParameterException * One or more of the input parameters don ' t match the service ' s restrictions . * @ throws OrganizationNotFoundException * An operation received a valid organization identifier that either doesn ' t belong or exist in the system . * @ throws OrganizationStateException * The organization must have a valid state ( Active or Synchronizing ) to perform certain operations on the * organization or its members . * @ sample AmazonWorkMail . DeregisterFromWorkMail * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / workmail - 2017-10-01 / DeregisterFromWorkMail " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DeregisterFromWorkMailResult deregisterFromWorkMail ( DeregisterFromWorkMailRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDeregisterFromWorkMail ( request ) ;
public class GridUtils { /** * CSON : ParameterNumber */ private static double [ ] transformToLabelProjection ( final MathTransform toLabelProjection , final Coordinate borderIntersection ) { } }
try { double [ ] labelProj = new double [ 2 ] ; toLabelProjection . transform ( new double [ ] { borderIntersection . x , borderIntersection . y } , 0 , labelProj , 0 , 1 ) ; return labelProj ; } catch ( TransformException e ) { throw new RuntimeException ( e ) ; }
public class Beagle { /** * { @ inheritDoc } */ public void processDocument ( BufferedReader document ) throws IOException { } }
Queue < String > prevWords = new ArrayDeque < String > ( ) ; Queue < String > nextWords = new ArrayDeque < String > ( ) ; Iterator < String > it = IteratorFactory . tokenizeOrdered ( document ) ; Map < String , DoubleVector > documentVectors = new HashMap < String , DoubleVector > ( ) ; // Fill up the words after the context so that when the real processing // starts , the context is fully prepared . for ( int i = 0 ; i < nextSize && it . hasNext ( ) ; ++ i ) nextWords . offer ( it . next ( ) . intern ( ) ) ; prevWords . offer ( IteratorFactory . EMPTY_TOKEN ) ; String focusWord = null ; while ( ! nextWords . isEmpty ( ) ) { focusWord = nextWords . remove ( ) ; if ( it . hasNext ( ) ) nextWords . offer ( it . next ( ) . intern ( ) ) ; if ( ! focusWord . equals ( IteratorFactory . EMPTY_TOKEN ) ) { // Incorporate the context into the semantic vector for the // focus word . If the focus word has no semantic vector yet , // create a new one , as determined by the index builder . DoubleVector meaning = termHolographs . get ( focusWord ) ; if ( meaning == null ) { meaning = new DenseVector ( indexVectorSize ) ; documentVectors . put ( focusWord , meaning ) ; } updateMeaning ( meaning , prevWords , nextWords ) ; } prevWords . offer ( focusWord ) ; if ( prevWords . size ( ) > 1 ) prevWords . remove ( ) ; } // Add the local cached semantics to the global term semantics . for ( Map . Entry < String , DoubleVector > entry : documentVectors . entrySet ( ) ) { synchronized ( entry . getKey ( ) ) { // Get the global semantic representation of each word . If it // does not currently exist , then just put the local copies // representation , otherwise add the local copy to the global // version . DoubleVector existingVector = termHolographs . get ( entry . getKey ( ) ) ; if ( existingVector == null ) termHolographs . put ( entry . getKey ( ) , entry . getValue ( ) ) ; else VectorMath . add ( existingVector , entry . getValue ( ) ) ; } }
public class MetricRegistryInstance { /** * Handles collecting the future arguments , correctly firing the timeout . * This function also updates the failed _ collections _ member variable . * @ param futures The futures of MetricGroups to dereference . * @ param t0 _ nsec The starting time of the scrape . * @ param timeout The future that informs collectors of the timeout event . * @ return The dereferenced data from all futures that completed * successfully and on time . */ private Collection < MetricGroup > derefFutures ( Collection < CompletableFuture < ? extends Collection < ? extends MetricGroup > > > futures , long t0_nsec , CompletableFuture < GroupGenerator . TimeoutObject > timeout ) { } }
long tDeadline1 = t0_nsec + TimeUnit . MILLISECONDS . toNanos ( MAX_COLLECTOR_WAIT_MSEC ) ; long tDeadline2 = t0_nsec + TimeUnit . MILLISECONDS . toNanos ( COLLECTOR_POST_TIMEOUT_MSEC ) ; final List < MetricGroup > result = new ArrayList < > ( ) ; final BlockingQueue < Any2 < Collection < ? extends MetricGroup > , Throwable > > readyQueue = new LinkedBlockingQueue < > ( ) ; int failCount = 0 ; int pendingCount = futures . size ( ) ; futures . forEach ( fut -> { fut . handle ( ( value , exc ) -> { if ( exc != null ) { if ( ! ( exc instanceof CancellationException ) ) LOG . log ( Level . INFO , "collector failed" , exc ) ; readyQueue . add ( Any2 . right ( exc ) ) ; } if ( value != null ) readyQueue . add ( Any2 . left ( value ) ) ; return null ; } ) ; } ) ; // Collect everything that completes on time . while ( pendingCount > 0 ) { final long tNow = System . nanoTime ( ) ; if ( tDeadline1 - tNow <= 0 ) break ; // GUARD final Any2 < Collection < ? extends MetricGroup > , Throwable > readyItem ; try { readyItem = readyQueue . poll ( tDeadline1 - tNow , TimeUnit . NANOSECONDS ) ; } catch ( InterruptedException ex ) { LOG . log ( Level . INFO , "interrupted while waiting for scrape data" , ex ) ; continue ; } if ( readyItem != null ) { readyItem . getLeft ( ) . ifPresent ( result :: addAll ) ; if ( readyItem . getRight ( ) . isPresent ( ) ) ++ failCount ; -- pendingCount ; } } // Fire timeout event . timeout . complete ( new GroupGenerator . TimeoutObject ( ) ) ; // Collect everything we can get within the grace period . while ( pendingCount > 0 ) { final long tNow = System . nanoTime ( ) ; if ( tDeadline2 - tNow <= 0 ) break ; // GUARD final Any2 < Collection < ? extends MetricGroup > , Throwable > readyItem ; try { readyItem = readyQueue . poll ( tDeadline2 - tNow , TimeUnit . NANOSECONDS ) ; } catch ( InterruptedException ex ) { LOG . log ( Level . INFO , "interrupted while waiting for scrape data" , ex ) ; continue ; } if ( readyItem != null ) { readyItem . getLeft ( ) . ifPresent ( result :: addAll ) ; if ( readyItem . getRight ( ) . isPresent ( ) ) ++ failCount ; -- pendingCount ; } } // Collect everything that is present . while ( pendingCount > 0 ) { final Any2 < Collection < ? extends MetricGroup > , Throwable > readyItem = readyQueue . poll ( ) ; if ( readyItem == null ) break ; // GUARD readyItem . getLeft ( ) . ifPresent ( result :: addAll ) ; if ( readyItem . getRight ( ) . isPresent ( ) ) ++ failCount ; -- pendingCount ; } // Expose failure count . failed_collections_ = pendingCount + failCount ; return result ;
public class DINameSpace { /** * Retrieves the < code > Class < / code > object specified by the * < code > name < / code > argument , using , if possible , the * classLoader attribute of the database . < p > * @ param name the fully qualified name of the < code > Class < / code > * object to retrieve . * @ throws ClassNotFoundException if the specified class object * cannot be found in the context of this name space * @ return the < code > Class < / code > object specified by the * < code > name < / code > argument */ Class classForName ( String name ) throws ClassNotFoundException { } }
if ( name == null ) { return Class . forName ( name ) ; } return Class . forName ( name ) ;
public class EastAsianMonth { /** * / * [ deutsch ] * < p > Liefert den traditionellen japanischen Monatsnamen . < / p > * < p > Hinweis : Diese Methode ignoriert den Umstand , ob dieser Monat ein Schaltmonat ist . < / p > * @ param locale language setting * @ return descriptive text ( never { @ code null } ) */ public String getOldJapaneseName ( Locale locale ) { } }
Map < String , String > textForms = CalendarText . getInstance ( "japanese" , locale ) . getTextForms ( ) ; return textForms . get ( "t" + this . getNumber ( ) ) ;
public class ZipUtil { /** * Copies an existing ZIP file and replaces the given entries in it . * @ param zip * an existing ZIP file ( only read ) . * @ param entries * new ZIP entries to be replaced with . * @ param destZip * new ZIP file created . * @ return < code > true < / code > if at least one entry was replaced . */ public static boolean replaceEntries ( File zip , ZipEntrySource [ ] entries , File destZip ) { } }
if ( log . isDebugEnabled ( ) ) { log . debug ( "Copying '" + zip + "' to '" + destZip + "' and replacing entries " + Arrays . asList ( entries ) + "." ) ; } final Map < String , ZipEntrySource > entryByPath = entriesByPath ( entries ) ; final int entryCount = entryByPath . size ( ) ; try { final ZipOutputStream out = new ZipOutputStream ( new BufferedOutputStream ( new FileOutputStream ( destZip ) ) ) ; try { final Set < String > names = new HashSet < String > ( ) ; iterate ( zip , new ZipEntryCallback ( ) { public void process ( InputStream in , ZipEntry zipEntry ) throws IOException { if ( names . add ( zipEntry . getName ( ) ) ) { ZipEntrySource entry = ( ZipEntrySource ) entryByPath . remove ( zipEntry . getName ( ) ) ; if ( entry != null ) { addEntry ( entry , out ) ; } else { ZipEntryUtil . copyEntry ( zipEntry , in , out ) ; } } else if ( log . isDebugEnabled ( ) ) { log . debug ( "Duplicate entry: {}" , zipEntry . getName ( ) ) ; } } } ) ; } finally { IOUtils . closeQuietly ( out ) ; } } catch ( IOException e ) { ZipExceptionUtil . rethrow ( e ) ; } return entryByPath . size ( ) < entryCount ;
public class Approval { /** * Verify the value that was passed in . * @ param value the value object to be approved * @ param filePath the path where the value will be kept for further approval */ public void verify ( @ Nullable T value , Path filePath ) { } }
Pre . notNull ( filePath , "filePath" ) ; File file = mapFilePath ( value , filePath ) ; File parentPathDirectory = file . getParentFile ( ) ; if ( parentPathDirectory != null && ! parentPathDirectory . exists ( ) ) { try { fileSystemReadWriter . createDirectories ( parentPathDirectory ) ; } catch ( IOException e ) { throw new IllegalStateException ( e . getMessage ( ) , e ) ; } } Path approvalPath = getApprovalPath ( file . toPath ( ) ) ; byte [ ] rawValue = converter . getRawForm ( value ) ; if ( ! file . exists ( ) ) { LOG . info ( file + " didn't exist. You will be asked for approval" ) ; handleFirstTimeApproval ( file , approvalPath , rawValue ) ; return ; } // Change the modification time . This will allow users to delete orphaned files // https : / / github . com / nikolavp / approval / issues / 18 boolean wasAbleSetModified = file . setLastModified ( System . currentTimeMillis ( ) ) ; if ( ! wasAbleSetModified ) { LOG . warning ( "We weren't able to change the modification date for " + file . getAbsolutePath ( ) ) ; } try { byte [ ] fileContent = fileSystemReadWriter . readFully ( file . toPath ( ) ) ; if ( ! Arrays . equals ( fileContent , rawValue ) ) { try { LOG . info ( "Approval in " + file + " is not the same as the last value. You will be asked for approval of the new value." ) ; fileSystemReadWriter . write ( approvalPath , rawValue ) ; } catch ( IOException e ) { throw new AssertionError ( "Couldn't write the new approval file " + file , e ) ; } reporter . notTheSame ( fileContent , file , rawValue , approvalPath . toFile ( ) ) ; } } catch ( IOException e ) { throw new AssertionError ( "Couldn't read the previous content in file " + file , e ) ; } // value approved
public class JAXBMarshallerHelper { /** * Set the standard property for formatting the output or not . * @ param aMarshaller * The marshaller to set the property . May not be < code > null < / code > . * @ param bFormattedOutput * the value to be set */ public static void setFormattedOutput ( @ Nonnull final Marshaller aMarshaller , final boolean bFormattedOutput ) { } }
_setProperty ( aMarshaller , Marshaller . JAXB_FORMATTED_OUTPUT , Boolean . valueOf ( bFormattedOutput ) ) ;
public class XmlUtil { /** * 将XML文档写出 * @ param node { @ link Node } XML文档节点或文档本身 * @ param writer 写出的Writer , Writer决定了输出XML的编码 * @ param charset 编码 * @ param indent 格式化输出中缩进量 , 小于1表示不格式化输出 * @ since 3.0.9 */ public static void write ( Node node , Writer writer , String charset , int indent ) { } }
transform ( new DOMSource ( node ) , new StreamResult ( writer ) , charset , indent ) ;
public class ServiceLoaderHelper { /** * Uses the { @ link ServiceLoader } to load all SPI implementations of the * passed class * @ param < T > * The implementation type to be loaded * @ param aSPIClass * The SPI interface class . May not be < code > null < / code > . * @ return A list of all currently available plugins */ @ Nonnull @ ReturnsMutableCopy public static < T > ICommonsList < T > getAllSPIImplementations ( @ Nonnull final Class < T > aSPIClass ) { } }
return getAllSPIImplementations ( aSPIClass , ClassLoaderHelper . getDefaultClassLoader ( ) , null ) ;
public class Tailer { /** * Creates and starts a Tailer for the given file , starting at the beginning * of the file with the default delay of 1.0s * @ param file * the file to follow . * @ param listener * the TailerListener to use . * @ return The new tailer */ public static Tailer create ( final File file , final TailerListener listener ) { } }
return Tailer . create ( file , listener , Tailer . DEFAULT_DELAY_MILLIS , false ) ;
public class JapaneseDate { /** * Obtains a { @ code JapaneseDate } representing a date in the Japanese calendar * system from the era , year - of - era and day - of - year fields . * This returns a { @ code JapaneseDate } with the specified fields . * The day must be valid for the year , otherwise an exception will be thrown . * The Japanese day - of - year is reset when the era changes . * @ param era the Japanese era , not null * @ param yearOfEra the Japanese year - of - era * @ param dayOfYear the Japanese day - of - year , from 1 to 31 * @ return the date in Japanese calendar system , not null * @ throws DateTimeException if the value of any field is out of range , * or if the day - of - year is invalid for the year */ static JapaneseDate ofYearDay ( JapaneseEra era , int yearOfEra , int dayOfYear ) { } }
Jdk8Methods . requireNonNull ( era , "era" ) ; if ( yearOfEra < 1 ) { throw new DateTimeException ( "Invalid YearOfEra: " + yearOfEra ) ; } LocalDate eraStartDate = era . startDate ( ) ; LocalDate eraEndDate = era . endDate ( ) ; if ( yearOfEra == 1 ) { dayOfYear += eraStartDate . getDayOfYear ( ) - 1 ; if ( dayOfYear > eraStartDate . lengthOfYear ( ) ) { throw new DateTimeException ( "DayOfYear exceeds maximum allowed in the first year of era " + era ) ; } } int yearOffset = eraStartDate . getYear ( ) - 1 ; LocalDate isoDate = LocalDate . ofYearDay ( yearOfEra + yearOffset , dayOfYear ) ; if ( isoDate . isBefore ( eraStartDate ) || isoDate . isAfter ( eraEndDate ) ) { throw new DateTimeException ( "Requested date is outside bounds of era " + era ) ; } return new JapaneseDate ( era , yearOfEra , isoDate ) ;
public class DataPublisher { /** * Starts the { @ code DataPublisher } . * The method { @ link DataAccumulator # publish } will be called approximately * every { @ code delayMillis } milliseconds . * If the publisher has already been started , does nothing . * @ see # stop */ public synchronized void start ( ) { } }
if ( future == null ) { Runnable task = new Runnable ( ) { public void run ( ) { try { accumulator . publish ( ) ; } catch ( Exception e ) { handleException ( e ) ; } } } ; future = getExecutor ( ) . scheduleWithFixedDelay ( task , delayMillis , delayMillis , TimeUnit . MILLISECONDS ) ; }
public class ExtractHandler { /** * Append an open tag with the given specification to the result buffer . * @ param qName Tag ' s qualified name . * @ param atts Tag ' s attributes . */ private void openTag ( String qName , Attributes atts ) { } }
this . result . append ( '<' ) . append ( qName ) ; for ( int i = 0 ; i < atts . getLength ( ) ; i ++ ) { this . result . append ( ' ' ) . append ( atts . getQName ( i ) ) . append ( "=\"" ) . append ( atts . getValue ( i ) ) . append ( '\"' ) ; } this . result . append ( '>' ) ;
public class Task { /** * Cancel this task . */ public final void cancel ( CancelException reason ) { } }
if ( cancelling != null ) return ; if ( reason == null ) reason = new CancelException ( "No reason given" ) ; cancelling = reason ; if ( TaskScheduler . cancel ( this ) ) { status = Task . STATUS_DONE ; result . cancelled ( reason ) ; return ; } if ( manager . remove ( this ) ) { status = Task . STATUS_DONE ; result . cancelled ( reason ) ; return ; } if ( status == Task . STATUS_NOT_STARTED ) { status = Task . STATUS_DONE ; result . cancelled ( reason ) ; return ; }
public class BroxWarpingSpacial { /** * Resize images for the current layer being processed */ protected void resizeForLayer ( int width , int height ) { } }
deriv1X . reshape ( width , height ) ; deriv1Y . reshape ( width , height ) ; deriv2X . reshape ( width , height ) ; deriv2Y . reshape ( width , height ) ; deriv2XX . reshape ( width , height ) ; deriv2YY . reshape ( width , height ) ; deriv2XY . reshape ( width , height ) ; warpImage2 . reshape ( width , height ) ; warpDeriv2X . reshape ( width , height ) ; warpDeriv2Y . reshape ( width , height ) ; warpDeriv2XX . reshape ( width , height ) ; warpDeriv2YY . reshape ( width , height ) ; warpDeriv2XY . reshape ( width , height ) ; derivFlowUX . reshape ( width , height ) ; derivFlowUY . reshape ( width , height ) ; derivFlowVX . reshape ( width , height ) ; derivFlowVY . reshape ( width , height ) ; psiData . reshape ( width , height ) ; psiGradient . reshape ( width , height ) ; psiSmooth . reshape ( width , height ) ; divU . reshape ( width , height ) ; divV . reshape ( width , height ) ; divD . reshape ( width , height ) ; du . reshape ( width , height ) ; dv . reshape ( width , height ) ;
public class UnionQueryAnalyzer { /** * Splits the filter into sub - results and possibly merges them . */ private List < IndexedQueryAnalyzer < S > . Result > splitIntoSubResults ( Filter < S > filter , OrderingList < S > ordering , QueryHints hints ) throws SupportException , RepositoryException { } }
// Required for split to work . Filter < S > dnfFilter = filter . disjunctiveNormalForm ( ) ; Splitter splitter = new Splitter ( ordering , hints ) ; RepositoryException e = dnfFilter . accept ( splitter , null ) ; if ( e != null ) { throw e ; } List < IndexedQueryAnalyzer < S > . Result > subResults = splitter . mSubResults ; // Check if any sub - result handles nothing . If so , a full scan is the // best option for the entire query and all sub - results merge into a // single sub - result . Any sub - results which filter anything and contain // a join property in the filter are exempt from the merge . This is // because fewer joins are read than if a full scan is performed for // the entire query . The resulting union has both a full scan and an // index scan . IndexedQueryAnalyzer < S > . Result full = null ; for ( IndexedQueryAnalyzer < S > . Result result : subResults ) { if ( ! result . handlesAnything ( ) ) { full = result ; break ; } if ( ! result . getCompositeScore ( ) . getFilteringScore ( ) . hasAnyMatches ( ) ) { if ( full == null ) { // This index is used only for its ordering , and it will be // tentatively selected as the " full scan " . If a result is // found doesn ' t use an index for anything , then it becomes // the " full scan " index . full = result ; } } } if ( full == null ) { // Okay , no full scan needed . return subResults ; } List < IndexedQueryAnalyzer < S > . Result > mergedResults = new ArrayList < IndexedQueryAnalyzer < S > . Result > ( ) ; for ( IndexedQueryAnalyzer < S > . Result result : subResults ) { if ( result == full ) { // Add after everything has been merged into it . continue ; } boolean exempt = result . getCompositeScore ( ) . getFilteringScore ( ) . hasAnyMatches ( ) ; if ( exempt ) { // Must also have a join in the filter to be exempt . List < PropertyFilter < S > > subFilters = PropertyFilterList . get ( result . getFilter ( ) ) ; joinCheck : { for ( PropertyFilter < S > subFilter : subFilters ) { if ( subFilter . getChainedProperty ( ) . getChainCount ( ) > 0 ) { // A chain implies a join was followed , so result is exempt . break joinCheck ; } } // No joins found , result is not exempt from merging into full scan . exempt = false ; } } if ( exempt ) { mergedResults . add ( result ) ; } else { full = full . mergeRemainderFilter ( result . getFilter ( ) ) ; } } if ( mergedResults . size ( ) == 0 ) { // Nothing was exempt . Rather than return a result with a dnf // filter , return full scan with a simpler reduced filter . full = full . withRemainderFilter ( filter . reduce ( ) ) ; } mergedResults . add ( full ) ; return mergedResults ;
public class SimpleTree { /** * Sets the children of this < code > Tree < / code > . If given * < code > null < / code > , this method sets the Tree ' s children to a * unique zero - length Tree [ ] array . * @ param children An array of child trees */ @ Override public void setChildren ( Tree [ ] children ) { } }
if ( children == null ) { System . err . println ( "Warning -- you tried to set the children of a SimpleTree to null.\nYou should be really using a zero-length array instead." ) ; daughterTrees = EMPTY_TREE_ARRAY ; } else { daughterTrees = children ; }
public class ConnectionReadCompletedCallback { /** * Part of the read completed callback interface . Notified when an error * occurres during a read operation . */ public void error ( NetworkConnection vc , IOReadRequestContext rrc , IOException t ) // F176003 , F184828 , D194678 { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "error" , new Object [ ] { vc , rrc , t } ) ; // F184828 if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) && ( t != null ) ) SibTr . exception ( this , tc , t ) ; if ( thisConnection . isLoggingIOEvents ( ) ) thisConnection . getConnectionEventRecorder ( ) . logDebug ( "error method invoked on read context " + System . identityHashCode ( rrc ) + " with exception " + t ) ; try { synchronized ( this ) { // begin F174772 if ( receivePhysicalCloseRequest ) { // Ignore any errors when we are physically closing they could be // because of a race condidition between write operations and the // close of the socket . if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "ignoring as in process of close" ) ; } // end F174772 // begin F175658 else if ( t instanceof SocketTimeoutException ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "error is as a result of a timeout" ) ; // We should only enter this arm of the if statement // if we have timed out on a previous request . This // must be heartbeat realted . if ( awaitingHeartbeatResponse . isSet ( ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "timed out waiting for heartbeat response" ) ; // F177053 thisConnection . getConnectionEventRecorder ( ) . logDebug ( "timed out waiting for heartbeat response" ) ; // At this point issue a message to the console String remoteHostAddress = "<Unknown>" ; String remoteHostPort = "<Unknown>" ; String chainName = thisConnection . chainName ; if ( tcpCtx != null ) { InetAddress addr = tcpCtx . getRemoteAddress ( ) ; if ( addr != null ) { remoteHostAddress = addr . getHostAddress ( ) ; } remoteHostPort = "" + tcpCtx . getRemotePort ( ) ; } if ( thisConnection . isInbound ( ) ) { if ( conversation . getConversationType ( ) == Conversation . ME ) { SibTr . error ( tc , SibTr . Suppressor . ALL_FOR_A_WHILE_SIMILAR_INSERTS , "ME_NOT_RESPONDING_SICJ0041" , new Object [ ] { remoteHostAddress , chainName , "" + currentHeartbeatTimeout } ) ; } else if ( conversation . getConversationType ( ) == Conversation . CLIENT ) { SibTr . error ( tc , SibTr . Suppressor . ALL_FOR_A_WHILE_SIMILAR_INSERTS , "CLIENT_NOT_RESPONDING_SICJ0042" , new Object [ ] { remoteHostAddress , chainName , "" + currentHeartbeatTimeout } ) ; } } else // we are Outbound { if ( conversation . getConversationType ( ) == Conversation . ME ) { SibTr . error ( tc , SibTr . Suppressor . ALL_FOR_A_WHILE_SIMILAR_INSERTS , "ME_NOT_RESPONDING_OUTBOUND_SICJ0070" , new Object [ ] { remoteHostAddress , remoteHostPort , chainName , "" + currentHeartbeatTimeout } ) ; } else if ( conversation . getConversationType ( ) == Conversation . CLIENT ) { SibTr . error ( tc , SibTr . Suppressor . ALL_FOR_A_WHILE_SIMILAR_INSERTS , "CLIENT_NOT_RESPONDING_OUTBOUND_SICJ0071" , new Object [ ] { remoteHostAddress , remoteHostPort , chainName , "" + currentHeartbeatTimeout } ) ; } } // Close the underlying connection . This takes out all // the conversations it shares , wakes up exchanges and // delivers exceptions to the conversation receive listeners . // begin F176003 JFapHeartbeatTimeoutException exception = new JFapHeartbeatTimeoutException ( "Connection dropped after heartbeat request went unacknowledged" ) ; exception . initCause ( t ) ; FFDCFilter . processException ( exception , "com.ibm.ws.sib.jfapchannel.impl.ConnectionReadCompletedCallback" , JFapChannelConstants . CONNREADCOMPCALLBACK_ERROR_03 , thisConnection . getDiagnostics ( true ) ) ; thisConnection . invalidate ( false , exception , "heartbeat request was not acknowledged" ) ; // end F176003 } // being F177053 else if ( thisConnection . getHeartbeatInterval ( ) == 0 ) { // Oh dear . This is a bit of a irratating situation we find // ourselves in . It looks like someone has turned off heartbeating // yet - whilst it was switched on we asked to be notified of a // timeout . We should probably just ignore this . if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "timed out but heartbeating now switched off" ) ; // begin D183461 NetworkConnection rvc = null ; synchronized ( connectionClosingLock ) { if ( ! connectionClosing ) { if ( thisConnection . isLoggingIOEvents ( ) ) thisConnection . getConnectionEventRecorder ( ) . logDebug ( "invoking readCtx.read() on context " + System . identityHashCode ( readCtx ) + " with no timeout" ) ; rvc = readCtx . read ( 1 , this , false , IOReadRequestContext . NO_TIMEOUT ) ; // F184828 } } // end D183461 if ( rvc != null ) complete ( rvc , readCtx ) ; } // end F177053 else { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "sending heartbeat request" ) ; // F177053 , D224570 // We timed out and we weren ' t awaiting a heartbeat // response . This must mean that we timedout on the // heartbeat interval . Issue a heartbeat request to // our peer . // Remember state about the fact that we are about to // issue a heartbeat request . awaitingHeartbeatResponse . set ( ) ; currentHeartbeatTimeout = thisConnection . getHeartbeatTimeout ( ) ; thisConnection . getConnectionEventRecorder ( ) . logDebug ( "sending heartbeat request and waiting up to " + currentHeartbeatTimeout + " seconds for a response" ) ; // Send a heartbeat request to our peer . // begin D221868 SIException sendException = null ; try { thisConnection . sendHeartbeat ( ) ; } catch ( SIConnectionLostException e ) { // No FFDC code needed if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "Exception received when heartbeating peer (" + e . toString ( ) + "). Dropping connection" ) ; sendException = e ; } catch ( SIConnectionDroppedException e ) { // No FFDC code needed if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "Exception received when heartbeating peer (" + e . toString ( ) + "). Dropping connection" ) ; sendException = e ; } if ( sendException == null ) { // Resume our outstanding read request - but this time , // set the timeout to our heartbeat timeout time . This is // because we are waiting for the response . int timeout = currentHeartbeatTimeout * 1000 ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "setting heartbeat timeout to: " + timeout + " milliseconds" ) ; // F177053 // begin F177053 , D183461 NetworkConnection rvc = null ; synchronized ( connectionClosingLock ) { if ( ! connectionClosing ) { if ( thisConnection . isLoggingIOEvents ( ) ) thisConnection . getConnectionEventRecorder ( ) . logDebug ( "invoking readCtx.read() on context " + System . identityHashCode ( readCtx ) + " with a timeout of " + timeout ) ; rvc = readCtx . read ( 1 , this , false , timeout ) ; // F184828 } } if ( rvc != null ) complete ( rvc , readCtx ) ; // end F177053 , D183461 } else { // We failed to send a heartbeat request to our peer . thisConnection . invalidate ( false , sendException , "exception caught while attempting to send heartbeat" ) ; // D224570 } // end D221868 } } // end F175658 else // F174772 { // F174772 // begin F176003 // We used to set the first parameter to true here meaning that we // need to notify the peer that we are having a bit of trouble . // However , I believe that to be incorrect as I think that if this // method is called then the socket is already dead - hence trying // to notify the peer would mean that we would hang . thisConnection . invalidate ( false , t , "IOException received for connection - " + t . getMessage ( ) ) ; // D179618 , D224570 // end F176003 // Note that this also deals with the buffer returned by getBuffer . final IOReadRequestContext req = readCtx ; final WsByteBuffer [ ] buffers = req . getBuffers ( ) ; if ( buffers != null ) { for ( final WsByteBuffer buffer : buffers ) { // Try and release the buffer . // Absorb any exceptions if it gets released by another thread ( for example by Connection . nonThreadSafePhysicalClose ) . try { buffer . release ( ) ; } catch ( RuntimeException e ) { // No FFDC code needed if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "Caught exception on releasing buffer." , e ) ; } } req . setBuffers ( null ) ; } else { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "Request has no buffers: " + req ) ; } } } } catch ( Error error ) { FFDCFilter . processException ( error , "com.ibm.ws.sib.jfapchannel.impl.ConnectionReadCompletedCallback" , JFapChannelConstants . CONNREADCOMPCALLBACK_ERROR_05 , thisConnection . getDiagnostics ( true ) ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) SibTr . exception ( this , tc , error ) ; // It might appear slightly odd for this code to catch Error ( especially since the JDK docs say // that Error means that something has gone so badly wrong that you should abandon all hope ) . // This code makes one final stab at putting out some diagnostics about what happened ( if we // propagate the Error up to the TCP Channel , it is sometimes lost ) and closing down the // connection . I figured that we might as well try to do something - as we can hardly make // things worse . . . ( famous last words ) thisConnection . invalidate ( false , error , "Error caught in ConnectionReadCompletedCallback.error()" ) ; // Re - throw the error to ensure that it causes the maximum devastation . // The JVM is probably very ill if an Error is thrown so attempt no recovery . throw error ; } catch ( RuntimeException runtimeException ) { FFDCFilter . processException ( runtimeException , "com.ibm.ws.sib.jfapchannel.impl.ConnectionReadCompletedCallback" , JFapChannelConstants . CONNREADCOMPCALLBACK_ERROR_06 , thisConnection . getDiagnostics ( true ) ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) SibTr . exception ( this , tc , runtimeException ) ; // We can reasonably try to recover from a runtime exception by invalidating the associated // connection . This should drive the underlying TCP / IP socket to be closed . thisConnection . invalidate ( false , runtimeException , "RuntimeException caught in ConnectionReadCompletedCallback.error()" ) ; // Don ' t throw the RuntimeException on as we risk blowing away part of the TCP channel . } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "error" ) ;
public class FileDownloadList { /** * This method generally used for enqueuing the task which will be assembled by a queue . * @ see BaseDownloadTask . InQueueTask # enqueue ( ) */ void addQueueTask ( final DownloadTaskAdapter task ) { } }
if ( task . isMarkedAdded2List ( ) ) { Util . w ( TAG , "queue task: " + task + " has been marked" ) ; return ; } synchronized ( list ) { task . markAdded2List ( ) ; task . assembleDownloadTask ( ) ; list . add ( task ) ; Util . d ( TAG , "add list in all " + task + " " + list . size ( ) ) ; }
public class ChunkModule { /** * Test whether there are changes that require topic rewriting . */ private boolean hasChanges ( final Map < URI , URI > changeTable ) { } }
if ( changeTable . isEmpty ( ) ) { return false ; } for ( Map . Entry < URI , URI > e : changeTable . entrySet ( ) ) { if ( ! e . getKey ( ) . equals ( e . getValue ( ) ) ) { return true ; } } return false ;
public class SAXReader { /** * Read an XML document via a SAX handler . The streams are closed after * reading . * @ param aIS * The input source to read from . Automatically closed upon success or * error . May not be < code > null < / code > . * { @ link com . helger . xml . sax . InputSourceFactory } may be used to create * { @ link InputSource } objects from different input types . * @ param aSettings * Reader settings . At least a content handler should be set . May be * < code > null < / code > . * @ return { @ link ESuccess # SUCCESS } if reading succeeded , * { @ link ESuccess # FAILURE } otherwise */ @ Nonnull public static ESuccess readXMLSAX ( @ WillClose @ Nonnull final InputSource aIS , @ Nonnull final ISAXReaderSettings aSettings ) { } }
ValueEnforcer . notNull ( aIS , "InputStream" ) ; ValueEnforcer . notNull ( aSettings , "Settings" ) ; try { boolean bFromPool = false ; org . xml . sax . XMLReader aParser ; if ( aSettings . requiresNewXMLParser ( ) ) { aParser = SAXReaderFactory . createXMLReader ( ) ; } else { // use parser from pool aParser = s_aSAXPool . borrowObject ( ) ; bFromPool = true ; } try { final StopWatch aSW = StopWatch . createdStarted ( ) ; // Apply settings aSettings . applyToSAXReader ( aParser ) ; // Start parsing aParser . parse ( aIS ) ; // Statistics s_aSaxSuccessCounterHdl . increment ( ) ; s_aSaxTimerHdl . addTime ( aSW . stopAndGetMillis ( ) ) ; return ESuccess . SUCCESS ; } finally { if ( bFromPool ) { // Return parser to pool s_aSAXPool . returnObject ( aParser ) ; } } } catch ( final SAXParseException ex ) { boolean bHandled = false ; if ( aSettings . getErrorHandler ( ) != null ) try { aSettings . getErrorHandler ( ) . fatalError ( ex ) ; bHandled = true ; } catch ( final SAXException ex2 ) { // fall - through } if ( ! bHandled ) aSettings . exceptionCallbacks ( ) . forEach ( x -> x . onException ( ex ) ) ; } catch ( final Exception ex ) { aSettings . exceptionCallbacks ( ) . forEach ( x -> x . onException ( ex ) ) ; } finally { // Close both byte stream and character stream , as we don ' t know which one // was used StreamHelper . close ( aIS . getByteStream ( ) ) ; StreamHelper . close ( aIS . getCharacterStream ( ) ) ; } s_aSaxErrorCounterHdl . increment ( ) ; return ESuccess . FAILURE ;
public class WebsocketClientTransport { /** * Creates a new instance * @ param client the { @ link HttpClient } to use * @ param path the path to request * @ return a new instance * @ throws NullPointerException if { @ code client } or { @ code path } is { @ code null } */ public static WebsocketClientTransport create ( HttpClient client , String path ) { } }
Objects . requireNonNull ( client , "client must not be null" ) ; Objects . requireNonNull ( path , "path must not be null" ) ; path = path . startsWith ( DEFAULT_PATH ) ? path : ( DEFAULT_PATH + path ) ; return new WebsocketClientTransport ( client , path ) ;
public class PreambleUtil { /** * TODO convert these to set / clear / any bits */ static void setEmpty ( final WritableMemory wmem ) { } }
int flags = wmem . getByte ( FLAGS_BYTE ) & 0XFF ; flags |= EMPTY_FLAG_MASK ; wmem . putByte ( FLAGS_BYTE , ( byte ) flags ) ;
public class OutlierAlgoPanel { /** * / * We need to fetch the right item from editComponents list , index needs to match GUI order */ public void setStreamValueAsCLIString ( String s ) { } }
streamOption . setValueViaCLIString ( s ) ; editComponents . get ( 0 ) . setEditState ( streamOption . getValueAsCLIString ( ) ) ;
public class ApplicationUserRepository { /** * region > newDelegateUser ( action ) */ @ Programmatic public ApplicationUser newDelegateUser ( final String username , final ApplicationRole initialRole , final Boolean enabled ) { } }
final ApplicationUser user = getApplicationUserFactory ( ) . newApplicationUser ( ) ; user . setUsername ( username ) ; user . setStatus ( ApplicationUserStatus . parse ( enabled ) ) ; user . setAccountType ( AccountType . DELEGATED ) ; if ( initialRole != null ) { user . addRole ( initialRole ) ; } container . persistIfNotAlready ( user ) ; return user ;
public class Iconics { /** * Creates a new SpannableStringBuilder and will iterate over the textSpanned once and copy over * all characters , it will also directly replace icon font placeholders with the correct mapping . * Afterwards it will apply the styles */ @ NonNull public static Spanned style ( @ NonNull Context ctx , @ NonNull Spanned textSpanned ) { } }
return style ( ctx , null , textSpanned , null , null ) ;
public class CmsFileBuffer { /** * Transfers data from this buffer to a byte array . < p > * @ param dest the target byte array * @ param length the number of bytes to transfer * @ param bufferOffset the start index for the target buffer * @ param fileOffset the start index for this instance * @ return the number of bytes read , or - 1 if we are at the end of the file */ public int read ( byte [ ] dest , int length , int bufferOffset , int fileOffset ) { } }
if ( fileOffset >= m_buffer . size ( ) ) { return - 1 ; } int readEnd = fileOffset + length ; if ( readEnd > m_buffer . size ( ) ) { length = length - ( readEnd - m_buffer . size ( ) ) ; } m_buffer . readBytes ( dest , fileOffset , bufferOffset , length ) ; return length ;
public class ViewRep { /** * Check if the view could apply to the provided table unchanged . */ public boolean compatibleWithTable ( VoltTable table ) { } }
String candidateName = getTableName ( table ) ; // table can ' t have the same name as the view if ( candidateName . equals ( viewName ) ) { return false ; } // view is for a different table if ( candidateName . equals ( srcTableName ) == false ) { return false ; } try { // ignore ret value here - just looking to not throw int groupColIndex = table . getColumnIndex ( groupColName ) ; VoltType groupColType = table . getColumnType ( groupColIndex ) ; if ( groupColType == VoltType . DECIMAL ) { // no longer a good type to group return false ; } // check the sum col is still value int sumColIndex = table . getColumnIndex ( sumColName ) ; VoltType sumColType = table . getColumnType ( sumColIndex ) ; if ( ( sumColType == VoltType . TINYINT ) || ( sumColType == VoltType . SMALLINT ) || ( sumColType == VoltType . INTEGER ) ) { return true ; } else { // no longer a good type to sum return false ; } } catch ( IllegalArgumentException e ) { // column index is bad return false ; }
public class MAPServiceMobilityImpl { /** * - - Location management services */ private void updateLocationRequest ( Parameter parameter , MAPDialogMobilityImpl mapDialogImpl , Long invokeId ) throws MAPParsingComponentException { } }
long version = mapDialogImpl . getApplicationContext ( ) . getApplicationContextVersion ( ) . getVersion ( ) ; if ( parameter == null ) throw new MAPParsingComponentException ( "Error while decoding updateLocationRequest: Parameter is mandatory but not found" , MAPParsingComponentExceptionReason . MistypedParameter ) ; if ( parameter . getTag ( ) != Tag . SEQUENCE || parameter . getTagClass ( ) != Tag . CLASS_UNIVERSAL || parameter . isPrimitive ( ) ) throw new MAPParsingComponentException ( "Error while decoding updateLocationRequest: Bad tag or tagClass or parameter is primitive, received tag=" + parameter . getTag ( ) , MAPParsingComponentExceptionReason . MistypedParameter ) ; byte [ ] buf = parameter . getData ( ) ; AsnInputStream ais = new AsnInputStream ( buf ) ; UpdateLocationRequestImpl ind = new UpdateLocationRequestImpl ( version ) ; ind . decodeData ( ais , buf . length ) ; ind . setInvokeId ( invokeId ) ; ind . setMAPDialog ( mapDialogImpl ) ; for ( MAPServiceListener serLis : this . serviceListeners ) { try { serLis . onMAPMessage ( ind ) ; ( ( MAPServiceMobilityListener ) serLis ) . onUpdateLocationRequest ( ind ) ; } catch ( Exception e ) { loger . error ( "Error processing updateLocationRequest: " + e . getMessage ( ) , e ) ; } }
public class ForkJoinTask { /** * Possibly executes tasks until the pool hosting the current task * { @ linkplain ForkJoinPool # isQuiescent is quiescent } . This method may be of use in designs in * which many tasks are forked , but none are explicitly joined , instead executing them until all * are processed . */ public static void helpQuiesce ( ) { } }
Thread t ; if ( ( t = Thread . currentThread ( ) ) instanceof ForkJoinWorkerThread ) { ForkJoinWorkerThread wt = ( ForkJoinWorkerThread ) t ; wt . pool . helpQuiescePool ( wt . workQueue ) ; } else ForkJoinPool . quiesceCommonPool ( ) ;
public class TypesafeConfigUtils { /** * Get a configuration as list of durations ( parses special strings like " 10s " ) . Return * { @ code null } if missing , wrong type or bad value . * @ param config * @ param path * @ param timeUnit * @ return */ public static Optional < List < Long > > getDurationListOptional ( Config config , String path , TimeUnit timeUnit ) { } }
return Optional . ofNullable ( getDurationList ( config , path , timeUnit ) ) ;
public class FacebookAuthorizer { public static boolean registerToken ( String token , String email , URL site ) { } }
List < String > key = new ArrayList < String > ( ) ; key . add ( email ) ; key . add ( site . toExternalForm ( ) . toLowerCase ( ) ) ; sRegisteredTokens . put ( key , token ) ; return true ;
public class DiagnosticsInner { /** * List Site Detector Responses . * List Site Detector Responses . * ServiceResponse < PageImpl < DetectorResponseInner > > * @ param resourceGroupName Name of the resource group to which the resource belongs . * ServiceResponse < PageImpl < DetectorResponseInner > > * @ param siteName Site Name * ServiceResponse < PageImpl < DetectorResponseInner > > * @ param slot Slot Name * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the PagedList & lt ; DetectorResponseInner & gt ; object wrapped in { @ link ServiceResponse } if successful . */ public Observable < ServiceResponse < Page < DetectorResponseInner > > > listSiteDetectorResponsesSlotSinglePageAsync ( final String resourceGroupName , final String siteName , final String slot ) { } }
if ( resourceGroupName == null ) { throw new IllegalArgumentException ( "Parameter resourceGroupName is required and cannot be null." ) ; } if ( siteName == null ) { throw new IllegalArgumentException ( "Parameter siteName is required and cannot be null." ) ; } if ( slot == null ) { throw new IllegalArgumentException ( "Parameter slot is required and cannot be null." ) ; } if ( this . client . subscriptionId ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.subscriptionId() is required and cannot be null." ) ; } if ( this . client . apiVersion ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.apiVersion() is required and cannot be null." ) ; } return service . listSiteDetectorResponsesSlot ( resourceGroupName , siteName , slot , this . client . subscriptionId ( ) , this . client . apiVersion ( ) , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < Page < DetectorResponseInner > > > > ( ) { @ Override public Observable < ServiceResponse < Page < DetectorResponseInner > > > call ( Response < ResponseBody > response ) { try { ServiceResponse < PageImpl < DetectorResponseInner > > result = listSiteDetectorResponsesSlotDelegate ( response ) ; return Observable . just ( new ServiceResponse < Page < DetectorResponseInner > > ( result . body ( ) , result . response ( ) ) ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ;
public class CmsAliasEditorLockTable { /** * Tries to update or create an entry for the given user / site root combination . < p > * If this method succeeds , it will return null , but if another user has created an entry for the site root , * it will return that user . < p > * @ param cms the current CMS context * @ param siteRoot the site root * @ return null of the user who has already created an entry */ public CmsUser update ( CmsObject cms , String siteRoot ) { } }
CmsUser originalUser = m_map . getIfPresent ( siteRoot ) ; if ( ( originalUser == null ) || originalUser . equals ( cms . getRequestContext ( ) . getCurrentUser ( ) ) ) { m_map . put ( siteRoot , cms . getRequestContext ( ) . getCurrentUser ( ) ) ; return null ; } return originalUser ;
public class CFMappingDef { /** * Setup mapping with defaults for the given class . Does not parse all * annotations . * @ param realClass */ @ SuppressWarnings ( "unchecked" ) public void setDefaults ( Class < T > realClass ) { } }
this . realClass = realClass ; this . keyDef = new KeyDefinition ( ) ; // find the " effective " class - skipping up the hierarchy over inner classes effectiveClass = realClass ; boolean entityFound ; while ( ! ( entityFound = ( null != effectiveClass . getAnnotation ( Entity . class ) ) ) ) { // TODO : BTB this might should be isSynthetic if ( ! effectiveClass . isAnonymousClass ( ) ) { break ; } else { effectiveClass = ( Class < T > ) effectiveClass . getSuperclass ( ) ; } } // if class is missing @ Entity , then proceed no further if ( ! entityFound ) { throw new HomMissingEntityAnnotationException ( "class, " + realClass . getName ( ) + ", not annotated with @" + Entity . class . getSimpleName ( ) ) ; }
public class RecognizeCelebritiesResult { /** * Details about each unrecognized face in the image . * @ param unrecognizedFaces * Details about each unrecognized face in the image . */ public void setUnrecognizedFaces ( java . util . Collection < ComparedFace > unrecognizedFaces ) { } }
if ( unrecognizedFaces == null ) { this . unrecognizedFaces = null ; return ; } this . unrecognizedFaces = new java . util . ArrayList < ComparedFace > ( unrecognizedFaces ) ;
public class LastSplitsCallback { /** * Create log template for given stopwatch . * This method can be overridden to tune logging strategy . * By default , when { @ link # isLogEnabled ( ) } is set , last splits are logged at each buffer revolution . * @ param stopwatch Stopwatch * @ return Log template */ @ SuppressWarnings ( "UnusedParameters" ) protected LogTemplate < Split > createLogTemplate ( Stopwatch stopwatch ) { } }
LogTemplate < Split > logTemplate ; if ( logEnabled ) { logTemplate = everyNSplits ( enabledStopwatchLogTemplate , capacity ) ; } else { logTemplate = disabled ( ) ; } return logTemplate ;
public class CmsGalleryService { /** * Reads the folder filters for the current site . < p > * @ return the folder filters */ private Set < String > readFolderFilters ( ) { } }
JSONObject storedFilters = readUserFolderFilters ( ) ; Set < String > result = null ; if ( storedFilters . has ( getCmsObject ( ) . getRequestContext ( ) . getSiteRoot ( ) ) ) { try { org . opencms . json . JSONArray folders = storedFilters . getJSONArray ( getCmsObject ( ) . getRequestContext ( ) . getSiteRoot ( ) ) ; result = new HashSet < String > ( ) ; for ( int i = 0 ; i < folders . length ( ) ; i ++ ) { result . add ( folders . getString ( i ) ) ; } } catch ( JSONException e ) { LOG . error ( e . getLocalizedMessage ( ) , e ) ; } } return result ;
public class WaehrungenSingletonSpi { /** * Access all currencies matching the given query . * @ param query The currency query , not null . * @ return a set of all currencies found , never null . */ @ Override public Set < CurrencyUnit > getCurrencies ( CurrencyQuery query ) { } }
Set < CurrencyUnit > result = new HashSet < > ( ) ; for ( Locale locale : query . getCountries ( ) ) { try { result . add ( Waehrung . of ( Currency . getInstance ( locale ) ) ) ; } catch ( IllegalArgumentException ex ) { LOG . log ( Level . WARNING , "Cannot get currency for locale '" + locale + "':" , ex ) ; } } for ( String currencyCode : query . getCurrencyCodes ( ) ) { try { result . add ( Waehrung . of ( currencyCode ) ) ; } catch ( IllegalArgumentException ex ) { LOG . log ( Level . WARNING , "Cannot get currency '" + currencyCode + "':" , ex ) ; } } for ( CurrencyProviderSpi spi : Bootstrap . getServices ( CurrencyProviderSpi . class ) ) { result . addAll ( spi . getCurrencies ( query ) ) ; } return result ;
public class InternalTransaction { /** * Print a dump of the state . * @ param printWriter to be written to . */ public void print ( java . io . PrintWriter printWriter ) { } }
printWriter . println ( "State Dump for:" + getClass ( ) . getName ( ) + " state=" + state + "(int) " + stateNames [ state ] + "(String)" + " transactionLock=" + transactionLock + "(TransactionLock)" ) ; if ( logicalUnitOfWork == null ) printWriter . println ( "logicalUnitOfWork=null" ) ; else { printWriter . print ( "logialUnitOfWork.identifier=" + logicalUnitOfWork . identifier + "(long)" ) ; if ( logicalUnitOfWork . XID != null ) { printWriter . print ( " XID=" ) ; for ( int i = 0 ; i < logicalUnitOfWork . XID . length ; i ++ ) { printWriter . print ( Integer . toHexString ( logicalUnitOfWork . XID [ i ] ) ) ; } } printWriter . println ( ) ; } printWriter . println ( "logSpaceReserved=" + logSpaceReserved + "(long)" + " requiresPersistentCheckpoint=" + requiresPersistentCheckpoint + "(boolean)" + " transactionReference=" + transactionReference + "(TransactionReference)" ) ; printWriter . println ( "Included Objects..." ) ; for ( java . util . Iterator managedObjectIterator = includedManagedObjects . values ( ) . iterator ( ) ; managedObjectIterator . hasNext ( ) ; ) { ManagedObject managedObject = ( ManagedObject ) managedObjectIterator . next ( ) ; Token token = managedObject . getToken ( ) ; printWriter . print ( managedObject . toString ( ) ) ; Long logSequenceNumber = ( Long ) logSequenceNumbers . get ( token ) ; Long managedObjectSequenceNumber = ( Long ) managedObjectSequenceNumbers . get ( token ) ; printWriter . print ( " LSN=" + logSequenceNumber + " MSN=" + managedObjectSequenceNumber ) ; ObjectManagerByteArrayOutputStream serializedBytes = ( ObjectManagerByteArrayOutputStream ) loggedSerializedBytes . get ( token ) ; if ( serializedBytes == null ) printWriter . print ( " serializedBytes=null" ) ; else printWriter . print ( serializedBytes ) ; if ( callbackTokens . contains ( token ) ) printWriter . println ( " Callback" ) ; else printWriter . println ( ) ; } // for . . . includedMansagedObjects .
public class MapElementConstants { /** * Replies the default radius for map elements . * @ return the default radius for map elements . */ @ Pure public static double getPreferredRadius ( ) { } }
final Preferences prefs = Preferences . userNodeForPackage ( MapElementConstants . class ) ; if ( prefs != null ) { return prefs . getDouble ( "RADIUS" , DEFAULT_RADIUS ) ; // $ NON - NLS - 1 $ } return DEFAULT_RADIUS ;
public class CommerceNotificationQueueEntryLocalServiceBaseImpl { /** * Returns a range of all the commerce notification queue entries . * Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link com . liferay . portal . kernel . dao . orm . QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link com . liferay . portal . kernel . dao . orm . QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link com . liferay . commerce . notification . model . impl . CommerceNotificationQueueEntryModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order . * @ param start the lower bound of the range of commerce notification queue entries * @ param end the upper bound of the range of commerce notification queue entries ( not inclusive ) * @ return the range of commerce notification queue entries */ @ Override public List < CommerceNotificationQueueEntry > getCommerceNotificationQueueEntries ( int start , int end ) { } }
return commerceNotificationQueueEntryPersistence . findAll ( start , end ) ;
public class WaitingDataQueueSynchronizationPoint { /** * Signal that no more data will be queued , so any waiting thread can be unblocked . */ public void endOfData ( ) { } }
ArrayList < Runnable > list = null ; synchronized ( this ) { end = true ; if ( waitingData . isEmpty ( ) ) { list = listeners ; listeners = null ; } } if ( list != null ) { for ( Runnable listener : list ) listener . run ( ) ; } // notify after listeners synchronized ( this ) { this . notifyAll ( ) ; }
public class SourceTableFeatureDetails { /** * Represents the LSI properties for the table when the backup was created . It includes the IndexName , KeySchema and * Projection for the LSIs on the table at the time of backup . * @ param localSecondaryIndexes * Represents the LSI properties for the table when the backup was created . It includes the IndexName , * KeySchema and Projection for the LSIs on the table at the time of backup . */ public void setLocalSecondaryIndexes ( java . util . Collection < LocalSecondaryIndexInfo > localSecondaryIndexes ) { } }
if ( localSecondaryIndexes == null ) { this . localSecondaryIndexes = null ; return ; } this . localSecondaryIndexes = new java . util . ArrayList < LocalSecondaryIndexInfo > ( localSecondaryIndexes ) ;
public class VarRef { /** * Produces a RSL representation of this variable reference . * @ param buf buffer to add the RSL representation to . * @ param explicitConcat if true explicit concatination will * be used in RSL strings . */ public void toRSL ( StringBuffer buf , boolean explicitConcat ) { } }
buf . append ( "$(" ) ; buf . append ( value ) ; if ( defValue != null ) { buf . append ( " " ) ; defValue . toRSL ( buf , explicitConcat ) ; } buf . append ( ")" ) ; if ( concatValue == null ) return ; if ( explicitConcat ) buf . append ( " # " ) ; concatValue . toRSL ( buf , explicitConcat ) ;
public class SipServletMessageImpl { /** * ( non - Javadoc ) * @ see javax . servlet . sip . SipServletMessage # getAttribute ( java . lang . String ) */ public Object getAttribute ( String name ) { } }
if ( logger . isDebugEnabled ( ) ) { logger . debug ( "getAttribute - name=" + sessionKey ) ; } if ( name == null ) throw new NullPointerException ( "Attribute name can not be null." ) ; return this . getAttributeMap ( ) . get ( name ) ;
public class DomUtilImpl { /** * This method gets the singleton instance of this { @ link DomUtilImpl } . < br > * < b > ATTENTION : < / b > < br > * Please prefer dependency - injection instead of using this method . * @ return the singleton instance . */ public static DomUtil getInstance ( ) { } }
if ( instance == null ) { synchronized ( DomUtilImpl . class ) { if ( instance == null ) { DomUtilImpl util = new DomUtilImpl ( ) ; util . initialize ( ) ; instance = util ; } } } return instance ;
public class DateSpinner { /** * Sets the maximum allowed date . * Spinner items and dates in the date picker after the given date will get disabled . * @ param maxDate The maximum date , or null to clear the previous max date . */ public void setMaxDate ( @ Nullable Calendar maxDate ) { } }
this . maxDate = maxDate ; // update the date picker ( even if it is not used right now ) if ( maxDate == null ) datePickerDialog . setMaxDate ( null ) ; else if ( minDate != null && compareCalendarDates ( minDate , maxDate ) > 0 ) throw new IllegalArgumentException ( "Maximum date must be after minimum date!" ) ; else datePickerDialog . setMaxDate ( new CalendarDay ( maxDate ) ) ; updateEnabledItems ( ) ;
public class CmsNewDialog { /** * Gets the initial value for the ' default location ' option . < p > * @ param folderResource the current folder * @ return the initial value for the option */ private Boolean getInitialValueForUseDefaultLocationOption ( CmsResource folderResource ) { } }
String rootPath = folderResource . getRootPath ( ) ; return Boolean . valueOf ( OpenCms . getSiteManager ( ) . getSiteForRootPath ( rootPath ) != null ) ;
public class IOManager { /** * Creates a new { @ link FileIOChannel . ID } in one of the temp directories . Multiple * invocations of this method spread the channels evenly across the different directories . * @ return A channel to a temporary directory . */ public FileIOChannel . ID createChannel ( ) { } }
final int num = getNextPathNum ( ) ; return new FileIOChannel . ID ( this . paths [ num ] , num , this . random ) ;
public class CassandraVersioner { /** * Update current database version to the migration version . This is executed after migration success . * @ param migration Migration that updated the database version * @ return Success of version update */ public boolean updateVersion ( final Migration migration ) { } }
final Statement insert = QueryBuilder . insertInto ( SCHEMA_VERSION_CF ) . value ( TYPE , migration . getType ( ) . name ( ) ) . value ( VERSION , migration . getVersion ( ) ) . value ( TIMESTAMP , System . currentTimeMillis ( ) ) . value ( DESCRIPTION , migration . getDescription ( ) ) . setConsistencyLevel ( ConsistencyLevel . ALL ) ; try { session . execute ( insert ) ; return true ; } catch ( final Exception e ) { LOGGER . error ( "Failed to execute update version statement" , e ) ; return false ; }
public class WorkbookCreationHelper { /** * Write the current workbook to an output stream . * @ param aOS * The output stream to write to . May not be < code > null < / code > . Is * automatically closed independent of the success state . * @ return { @ link ESuccess } */ @ Nonnull public ESuccess writeTo ( @ Nonnull @ WillClose final OutputStream aOS ) { } }
try { ValueEnforcer . notNull ( aOS , "OutputStream" ) ; if ( m_nCreatedCellStyles > 0 && LOGGER . isDebugEnabled ( ) ) LOGGER . debug ( "Writing Excel workbook with " + m_nCreatedCellStyles + " different cell styles" ) ; m_aWB . write ( aOS ) ; return ESuccess . SUCCESS ; } catch ( final IOException ex ) { if ( ! StreamHelper . isKnownEOFException ( ex ) ) LOGGER . error ( "Failed to write Excel workbook to output stream " + aOS , ex ) ; return ESuccess . FAILURE ; } finally { StreamHelper . close ( aOS ) ; }
public class SeleniumHelper { /** * Sets how long to wait before deciding an element does not exists . * @ param implicitWait time in milliseconds to wait . */ public void setImplicitlyWait ( int implicitWait ) { } }
try { driver ( ) . manage ( ) . timeouts ( ) . implicitlyWait ( implicitWait , TimeUnit . MILLISECONDS ) ; } catch ( Exception e ) { // https : / / code . google . com / p / selenium / issues / detail ? id = 6015 System . err . println ( "Unable to set implicit timeout (known issue for Safari): " + e . getMessage ( ) ) ; }
public class AppListenerUtil { /** * translate string definition of script protect to int definition * @ param strScriptProtect * @ return */ public static int translateScriptProtect ( String strScriptProtect ) { } }
strScriptProtect = strScriptProtect . toLowerCase ( ) . trim ( ) ; if ( "none" . equals ( strScriptProtect ) ) return ApplicationContext . SCRIPT_PROTECT_NONE ; if ( "no" . equals ( strScriptProtect ) ) return ApplicationContext . SCRIPT_PROTECT_NONE ; if ( "false" . equals ( strScriptProtect ) ) return ApplicationContext . SCRIPT_PROTECT_NONE ; if ( "all" . equals ( strScriptProtect ) ) return ApplicationContext . SCRIPT_PROTECT_ALL ; if ( "true" . equals ( strScriptProtect ) ) return ApplicationContext . SCRIPT_PROTECT_ALL ; if ( "yes" . equals ( strScriptProtect ) ) return ApplicationContext . SCRIPT_PROTECT_ALL ; String [ ] arr = ListUtil . listToStringArray ( strScriptProtect , ',' ) ; String item ; int scriptProtect = 0 ; for ( int i = 0 ; i < arr . length ; i ++ ) { item = arr [ i ] . trim ( ) ; if ( "cgi" . equals ( item ) && ( scriptProtect & ApplicationContext . SCRIPT_PROTECT_CGI ) == 0 ) scriptProtect += ApplicationContext . SCRIPT_PROTECT_CGI ; else if ( "cookie" . equals ( item ) && ( scriptProtect & ApplicationContext . SCRIPT_PROTECT_COOKIE ) == 0 ) scriptProtect += ApplicationContext . SCRIPT_PROTECT_COOKIE ; else if ( "form" . equals ( item ) && ( scriptProtect & ApplicationContext . SCRIPT_PROTECT_FORM ) == 0 ) scriptProtect += ApplicationContext . SCRIPT_PROTECT_FORM ; else if ( "url" . equals ( item ) && ( scriptProtect & ApplicationContext . SCRIPT_PROTECT_URL ) == 0 ) scriptProtect += ApplicationContext . SCRIPT_PROTECT_URL ; } return scriptProtect ;
public class ChannelFrameworkImpl { /** * Extract a list of runtime chain data objects ( including exclusively * child channel data lists ) that are using the input parent channel . * @ param parent * The parent channel data object * @ return an arraylist of the runtime chains that include the channel */ public synchronized List < ChainData > getRunningChains ( ChannelDataImpl parent ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . entry ( tc , "getRunningChains" ) ; } ChannelContainer channelContainer = null ; List < ChainData > chainDataList = new ArrayList < ChainData > ( ) ; // Iterate the children - they are in the runtime . Iterator < ChildChannelDataImpl > children = parent . children ( ) ; while ( children . hasNext ( ) ) { // Get the runtime channel container for this child . channelContainer = this . channelRunningMap . get ( children . next ( ) . getName ( ) ) ; for ( Chain chain : channelContainer . getChainMap ( ) . values ( ) ) { chainDataList . add ( chain . getChainData ( ) ) ; } } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . exit ( tc , "getRunningChains: " + chainDataList . size ( ) ) ; } return chainDataList ;
public class ObjectUtil { /** * 获取对象的某个方法 , 如果无此方法 , 则仅仅返回null * @ param c 对象 * @ param methodName 方法名 * @ param paras 参数列表 * @ return */ public static Method getGetMethod ( Class c , String methodName , Class ... paras ) { } }
// 需要优化 try { Method m = c . getMethod ( methodName , paras ) ; m . setAccessible ( true ) ; return m ; } catch ( SecurityException e ) { return null ; } catch ( NoSuchMethodException e ) { return null ; }
public class SingularValueDecomposition { /** * Return the diagonal matrix of singular values * @ return S */ public double [ ] [ ] getS ( ) { } }
double [ ] [ ] S = new double [ n ] [ n ] ; for ( int i = 0 ; i < n ; i ++ ) { S [ i ] [ i ] = this . s [ i ] ; } return S ;
public class DigitalOceanClient { @ Override public Images getAvailableImages ( Integer pageNo , Integer perPage ) throws DigitalOceanException , RequestUnsuccessfulException { } }
validatePageNo ( pageNo ) ; return ( Images ) perform ( new ApiRequest ( ApiAction . AVAILABLE_IMAGES , pageNo , perPage ) ) . getData ( ) ;
public class AbstractJobVertex { /** * Performs task specific checks if the * respective task has been configured properly . * @ param invokable * an instance of the task this vertex represents * @ throws IllegalConfigurationException * thrown if the respective tasks is not configured properly */ public void checkConfiguration ( final AbstractInvokable invokable ) throws IllegalConfigurationException { } }
if ( invokable == null ) { throw new IllegalArgumentException ( "Argument invokable is null" ) ; } // see if the task itself has a valid configuration // because this is user code running on the master , we embed it in a catch - all block try { invokable . checkConfiguration ( ) ; } catch ( IllegalConfigurationException icex ) { throw icex ; // simply forward } catch ( Throwable t ) { throw new IllegalConfigurationException ( "Checking the invokable's configuration caused an error: " + StringUtils . stringifyException ( t ) ) ; }
public class Control { /** * This method retrieves the current long value of this control . * Some controls may be write - only and getting * their value does not make sense . Invoking this method on this kind of * controls will trigger a ControlException . * @ return the current value of this control * @ throws ControlException if the value cannot be retrieved * @ throws UnsupportedMethod if this control * is not of type { @ link V4L4JConstants # CTRL _ TYPE _ LONG } . * @ throws StateException if this control has been released and must not be used anymore . */ public long getLongValue ( ) throws ControlException { } }
long v ; if ( type != V4L4JConstants . CTRL_TYPE_LONG ) throw new UnsupportedMethod ( "This control is not a long control" ) ; state . get ( ) ; try { v = doGetLongValue ( v4l4jObject , id ) ; } finally { state . put ( ) ; } return v ;
public class Task { /** * Retrieve a baseline value . * @ param baselineNumber baseline index ( 1-10) * @ return baseline value */ public Duration getBaselineEstimatedDuration ( int baselineNumber ) { } }
Object result = getCachedValue ( selectField ( TaskFieldLists . BASELINE_ESTIMATED_DURATIONS , baselineNumber ) ) ; if ( ! ( result instanceof Duration ) ) { result = null ; } return ( Duration ) result ;
public class FileInfo { /** * Handy factory method for constructing a FileInfo from a GoogleCloudStorageItemInfo while * potentially returning a singleton instead of really constructing an object for cases like ROOT . */ public static FileInfo fromItemInfo ( PathCodec pathCodec , GoogleCloudStorageItemInfo itemInfo ) { } }
if ( itemInfo . isRoot ( ) ) { return ROOT_INFO ; } URI path = pathCodec . getPath ( itemInfo . getBucketName ( ) , itemInfo . getObjectName ( ) , true ) ; return new FileInfo ( path , itemInfo ) ;
public class WildcardQuery { /** * { @ inheritDoc } */ @ Override public void extractTerms ( Set < Term > terms ) { } }
if ( multiTermQuery != null ) { multiTermQuery . extractTerms ( terms ) ; }
public class RocksDBStateBackend { /** * Creates a copy of this state backend that uses the values defined in the configuration * for fields where that were not yet specified in this state backend . * @ param config The configuration . * @ param classLoader The class loader . * @ return The re - configured variant of the state backend */ @ Override public RocksDBStateBackend configure ( Configuration config , ClassLoader classLoader ) { } }
return new RocksDBStateBackend ( this , config , classLoader ) ;
public class AmazonIdentityManagementClient { /** * Gets a list of all of the context keys referenced in all the IAM policies that are attached to the specified IAM * entity . The entity can be an IAM user , group , or role . If you specify a user , then the request also includes all * of the policies attached to groups that the user is a member of . * You can optionally include a list of one or more additional policies , specified as strings . If you want to * include < i > only < / i > a list of policies by string , use < a > GetContextKeysForCustomPolicy < / a > instead . * < b > Note : < / b > This API discloses information about the permissions granted to other users . If you do not want * users to see other user ' s permissions , then consider allowing them to use < a > GetContextKeysForCustomPolicy < / a > * instead . * Context keys are variables maintained by AWS and its services that provide details about the context of an API * query request . Context keys can be evaluated by testing against a value in an IAM policy . Use * < a > GetContextKeysForPrincipalPolicy < / a > to understand what key names and values you must supply when you call * < a > SimulatePrincipalPolicy < / a > . * @ param getContextKeysForPrincipalPolicyRequest * @ return Result of the GetContextKeysForPrincipalPolicy operation returned by the service . * @ throws NoSuchEntityException * The request was rejected because it referenced a resource entity that does not exist . The error message * describes the resource . * @ throws InvalidInputException * The request was rejected because an invalid or out - of - range value was supplied for an input parameter . * @ sample AmazonIdentityManagement . GetContextKeysForPrincipalPolicy * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / iam - 2010-05-08 / GetContextKeysForPrincipalPolicy " * target = " _ top " > AWS API Documentation < / a > */ @ Override public GetContextKeysForPrincipalPolicyResult getContextKeysForPrincipalPolicy ( GetContextKeysForPrincipalPolicyRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeGetContextKeysForPrincipalPolicy ( request ) ;
public class Query { /** * Set the raw response received from the HTTP call . Contents are passed on to sub - classes for mapping to DTOs . * @ param httpResponseCode The HTTP response code received * @ param httpResponseStatusString The text associated with the HTTP response code * @ param freesoundResponse The response received from the HTTP call * @ return Results of the query */ public Response < R > processResponse ( final int httpResponseCode , final String httpResponseStatusString , final S freesoundResponse ) { } }
final Response < R > response = new Response < > ( httpResponseCode , httpResponseStatusString ) ; if ( response . isErrorResponse ( ) ) { response . setErrorDetails ( extractErrorMessage ( freesoundResponse ) ) ; } else { response . setResults ( resultsMapper . map ( freesoundResponse ) ) ; } return response ;
public class OptimizerNode { /** * Adds a new outgoing connection to this node . * @ param connection * The connection to add . */ public void addOutgoingConnection ( DagConnection connection ) { } }
if ( this . outgoingConnections == null ) { this . outgoingConnections = new ArrayList < DagConnection > ( ) ; } else { if ( this . outgoingConnections . size ( ) == 64 ) { throw new CompilerException ( "Cannot currently handle nodes with more than 64 outputs." ) ; } } this . outgoingConnections . add ( connection ) ;
public class Tile { /** * Sets the sections to the given list of TimeSection objects . The * sections will be used to colorize areas with a special * meaning . Sections in the Medusa library usually are less eye - catching * than Areas . * @ param SECTIONS */ public void setTimeSections ( final List < TimeSection > SECTIONS ) { } }
getTimeSections ( ) . setAll ( SECTIONS ) ; getTimeSections ( ) . sort ( new TimeSectionComparator ( ) ) ; fireTileEvent ( SECTION_EVENT ) ;
public class SimonConsolePlugin { /** * Add a resource to this plugin . * @ param path Resource path * @ param type Resource type */ public final void addResource ( String path , HtmlResourceType type ) { } }
resources . add ( new HtmlResource ( path , type ) ) ;
public class SamplesBaseActivity { /** * Sets the scale bar from preferences . */ protected void setMapScaleBar ( ) { } }
String value = this . sharedPreferences . getString ( SETTING_SCALEBAR , SETTING_SCALEBAR_BOTH ) ; if ( SETTING_SCALEBAR_NONE . equals ( value ) ) { AndroidUtil . setMapScaleBar ( this . mapView , null , null ) ; } else { if ( SETTING_SCALEBAR_BOTH . equals ( value ) ) { AndroidUtil . setMapScaleBar ( this . mapView , MetricUnitAdapter . INSTANCE , ImperialUnitAdapter . INSTANCE ) ; } else if ( SETTING_SCALEBAR_METRIC . equals ( value ) ) { AndroidUtil . setMapScaleBar ( this . mapView , MetricUnitAdapter . INSTANCE , null ) ; } else if ( SETTING_SCALEBAR_IMPERIAL . equals ( value ) ) { AndroidUtil . setMapScaleBar ( this . mapView , ImperialUnitAdapter . INSTANCE , null ) ; } else if ( SETTING_SCALEBAR_NAUTICAL . equals ( value ) ) { AndroidUtil . setMapScaleBar ( this . mapView , NauticalUnitAdapter . INSTANCE , null ) ; } }
public class ConcurrentMultiCache { /** * Provides the values for all of the unique identifiers managed by the cache . * @ param item the item whose key values are being sought * @ return a mapping of key names to item values */ public HashMap < String , Object > getKeys ( T item ) { } }
HashMap < String , Object > keys = new HashMap < String , Object > ( ) ; for ( String key : caches . keySet ( ) ) { keys . put ( key , getValue ( key , item ) ) ; } return keys ;
public class GinFactoryModuleBuilder { /** * See the factory configuration examples at { @ link GinFactoryModuleBuilder } . */ public < T > GinFactoryModuleBuilder implement ( Key < T > source , TypeLiteral < ? extends T > target ) { } }
bindings . addBinding ( source , target ) ; return this ;
public class StreamingMergeSortedGrouper { /** * Wait for { @ link # nextReadIndex } to be moved if necessary and move { @ link # curWriteIndex } . */ private void increaseWriteIndex ( ) { } }
final long startAtNs = System . nanoTime ( ) ; final long queryTimeoutAtNs = getQueryTimeoutAtNs ( startAtNs ) ; final long spinTimeoutAtNs = startAtNs + SPIN_FOR_TIMEOUT_THRESHOLD_NS ; long timeoutNs = queryTimeoutAtNs - startAtNs ; long spinTimeoutNs = SPIN_FOR_TIMEOUT_THRESHOLD_NS ; // In the below , we check that the array is full and wait for at least one slot to become available . // nextReadIndex is a volatile variable and the changes on it are continuously checked until they are seen in // the while loop . See the following links . // * http : / / docs . oracle . com / javase / specs / jls / se7 / html / jls - 8 . html # jls - 8.3.1.4 // * http : / / docs . oracle . com / javase / specs / jls / se7 / html / jls - 17 . html # jls - 17.4.5 // * https : / / stackoverflow . com / questions / 11761552 / detailed - semantics - of - volatile - regarding - timeliness - of - visibility if ( curWriteIndex == maxNumSlots - 1 ) { // We additionally check that nextReadIndex is - 1 here because the writing thread should wait for the reading // thread to start reading only when the writing thread tries to overwrite the first slot for the first time . // The below condition is checked in a while loop instead of using a lock to avoid frequent thread park . while ( ( nextReadIndex == - 1 || nextReadIndex == 0 ) && ! Thread . currentThread ( ) . isInterrupted ( ) ) { if ( timeoutNs <= 0L ) { throw new RuntimeException ( new TimeoutException ( ) ) ; } // Thread . yield ( ) should not be called from the very beginning if ( spinTimeoutNs <= 0L ) { Thread . yield ( ) ; } long now = System . nanoTime ( ) ; timeoutNs = queryTimeoutAtNs - now ; spinTimeoutNs = spinTimeoutAtNs - now ; } // Changes on nextReadIndex happens - before changing curWriteIndex . curWriteIndex = 0 ; } else { final int nextWriteIndex = curWriteIndex + 1 ; // The below condition is checked in a while loop instead of using a lock to avoid frequent thread park . while ( ( nextWriteIndex == nextReadIndex ) && ! Thread . currentThread ( ) . isInterrupted ( ) ) { if ( timeoutNs <= 0L ) { throw new RuntimeException ( new TimeoutException ( ) ) ; } // Thread . yield ( ) should not be called from the very beginning if ( spinTimeoutNs <= 0L ) { Thread . yield ( ) ; } long now = System . nanoTime ( ) ; timeoutNs = queryTimeoutAtNs - now ; spinTimeoutNs = spinTimeoutAtNs - now ; } // Changes on nextReadIndex happens - before changing curWriteIndex . curWriteIndex = nextWriteIndex ; }
public class Distance { /** * Gets the Cosine distance between two points . * @ param p A point in space . * @ param q A point in space . * @ return The Cosine distance between x and y . */ public static double Cosine ( double [ ] p , double [ ] q ) { } }
double sumProduct = 0 ; double sumP = 0 , sumQ = 0 ; for ( int i = 0 ; i < p . length ; i ++ ) { sumProduct += p [ i ] * q [ i ] ; sumP += Math . pow ( Math . abs ( p [ i ] ) , 2 ) ; sumQ += Math . pow ( Math . abs ( q [ i ] ) , 2 ) ; } sumP = Math . sqrt ( sumP ) ; sumQ = Math . sqrt ( sumQ ) ; double result = 1 - ( sumProduct / ( sumP * sumQ ) ) ; return result ;
public class CalibratedCurves { /** * Add a calibration product to the set of calibration instruments . * @ param calibrationSpec The spec of the calibration product . * @ throws CloneNotSupportedException Thrown if a curve could not be cloned / created . */ private String add ( CalibrationSpec calibrationSpec ) throws CloneNotSupportedException { } }
calibrationSpecs . add ( calibrationSpec ) ; /* * Add one point to the calibration curve and one new objective function */ // Create calibration product ( will also create the curve if necessary ) calibrationProducts . add ( getCalibrationProductForSpec ( calibrationSpec ) ) ; calibrationProductsSymbols . add ( calibrationSpec . symbol ) ; // Create parameter to calibrate // Fetch old curve Curve calibrationCurveOld = model . getCurve ( calibrationSpec . calibrationCurveName ) ; if ( calibrationCurveOld == null ) { throw new IllegalArgumentException ( "Calibration curve " + calibrationSpec . calibrationCurveName + " does not exist. This should not happen. Possible reason: The given calibration product does not depend on the given calibration curve." ) ; } // Remove old curve objectsToCalibrate . remove ( calibrationCurveOld ) ; // Create and add new curve Curve calibrationCurve = null ; if ( DiscountCurveInterface . class . isInstance ( calibrationCurveOld ) ) { @ SuppressWarnings ( "unused" ) double paymentTime = calibrationSpec . swapTenorDefinitionReceiver . getPayment ( calibrationSpec . swapTenorDefinitionReceiver . getNumberOfPeriods ( ) - 1 ) ; // Build new curve with one additional point calibrationCurve = calibrationCurveOld . getCloneBuilder ( ) . addPoint ( calibrationSpec . calibrationTime , model . getRandomVariableForConstant ( 1.0 ) , true ) . build ( ) ; } else if ( ForwardCurveInterface . class . isInstance ( calibrationCurveOld ) ) { // Build new curve with one additional point calibrationCurve = calibrationCurveOld . getCloneBuilder ( ) . addPoint ( calibrationSpec . calibrationTime , model . getRandomVariableForConstant ( 0.1 ) , true ) . build ( ) ; } else { // Build new curve with one additional point calibrationCurve = calibrationCurveOld . getCloneBuilder ( ) . addPoint ( calibrationSpec . calibrationTime , model . getRandomVariableForConstant ( 1.0 ) , true ) . build ( ) ; } model = model . addCurves ( calibrationCurve ) ; objectsToCalibrate . add ( calibrationCurve ) ; return calibrationSpec . type ;
public class CmsUsersCsvDownloadDialog { /** * Returns the export options data . < p > * @ return the export options data */ protected Map < String , List < String > > getData ( ) { } }
return ( Map < String , List < String > > ) ( ( Map < String , Object > ) getSettings ( ) . getDialogObject ( ) ) . get ( CmsUserDataExportDialog . class . getName ( ) ) ;
public class CastedExpressionTypeComputationState { /** * Replies if the linking to the cast operator functions is enabled . * @ param cast the cast operator . * @ return { @ code true } if the linking is enabled . */ public boolean isCastOperatorLinkingEnabled ( SarlCastedExpression cast ) { } }
final LightweightTypeReference sourceType = getStackedResolvedTypes ( ) . getReturnType ( cast . getTarget ( ) ) ; final LightweightTypeReference destinationType = getReferenceOwner ( ) . toLightweightTypeReference ( cast . getType ( ) ) ; if ( sourceType . isPrimitiveVoid ( ) || destinationType . isPrimitiveVoid ( ) ) { return false ; } if ( sourceType . isPrimitive ( ) && destinationType . isPrimitive ( ) ) { return false ; } return ! sourceType . isSubtypeOf ( destinationType . getType ( ) ) ;
public class FastMath { /** * Compute the hyperbolic cosine of a number . * @ param x number on which evaluation is done * @ return hyperbolic cosine of x */ public static double cosh ( double x ) { } }
if ( x != x ) { return x ; } // cosh [ z ] = ( exp ( z ) + exp ( - z ) ) / 2 // for numbers with magnitude 20 or so , // exp ( - z ) can be ignored in comparison with exp ( z ) if ( x > 20 ) { if ( x >= LOG_MAX_VALUE ) { // Avoid overflow ( MATH - 905 ) . final double t = exp ( 0.5 * x ) ; return ( 0.5 * t ) * t ; } else { return 0.5 * exp ( x ) ; } } else if ( x < - 20 ) { if ( x <= - LOG_MAX_VALUE ) { // Avoid overflow ( MATH - 905 ) . final double t = exp ( - 0.5 * x ) ; return ( 0.5 * t ) * t ; } else { return 0.5 * exp ( - x ) ; } } final double hiPrec [ ] = new double [ 2 ] ; if ( x < 0.0 ) { x = - x ; } exp ( x , 0.0 , hiPrec ) ; double ya = hiPrec [ 0 ] + hiPrec [ 1 ] ; double yb = - ( ya - hiPrec [ 0 ] - hiPrec [ 1 ] ) ; double temp = ya * HEX_40000000 ; double yaa = ya + temp - temp ; double yab = ya - yaa ; // recip = 1 / y double recip = 1.0 / ya ; temp = recip * HEX_40000000 ; double recipa = recip + temp - temp ; double recipb = recip - recipa ; // Correct for rounding in division recipb += ( 1.0 - yaa * recipa - yaa * recipb - yab * recipa - yab * recipb ) * recip ; // Account for yb recipb += - yb * recip * recip ; // y = y + 1 / y temp = ya + recipa ; yb += - ( temp - ya - recipa ) ; ya = temp ; temp = ya + recipb ; yb += - ( temp - ya - recipb ) ; ya = temp ; double result = ya + yb ; result *= 0.5 ; return result ;
public class Zendesk { /** * This API will be removed in a future release . The API endpoint does not exist . * Instead , the { @ link # createGroup ( Group ) createGroup } method should be called for each Group * @ see < a href = " https : / / github . com / cloudbees / zendesk - java - client / issues / 111 " > Zendesk Java Client Issue # 111 < / a > */ @ Deprecated public List < Group > createGroups ( Group ... groups ) { } }
return createGroups ( Arrays . asList ( groups ) ) ;
public class ReconfigurableClient { /** * Changes the internal messaging client . * @ param factoryName the factory name ( see { @ link MessagingConstants } ) */ public void switchMessagingType ( String factoryName ) { } }
// Create a new client this . logger . fine ( "The messaging is requested to switch its type to " + factoryName + "." ) ; JmxWrapperForMessagingClient newMessagingClient = null ; try { IMessagingClient rawClient = createMessagingClient ( factoryName ) ; if ( rawClient != null ) { newMessagingClient = new JmxWrapperForMessagingClient ( rawClient ) ; newMessagingClient . setMessageQueue ( this . messageProcessor . getMessageQueue ( ) ) ; openConnection ( newMessagingClient ) ; } } catch ( Exception e ) { this . logger . warning ( "An error occurred while creating a new messaging client. " + e . getMessage ( ) ) ; Utils . logException ( this . logger , e ) ; // #594 : print a message to be visible in a console StringBuilder sb = new StringBuilder ( ) ; sb . append ( "\n\n**** WARNING ****\n" ) ; sb . append ( "Connection failed at " ) ; sb . append ( new SimpleDateFormat ( "HH:mm:ss, 'on' EEEE dd (MMMM)" ) . format ( new Date ( ) ) ) ; sb . append ( ".\n" ) ; sb . append ( "The messaging configuration may be invalid.\n" ) ; sb . append ( "Or the messaging server may not be started yet.\n\n" ) ; sb . append ( "Consider using the 'roboconf:force-reconnect' command if you forgot to start the messaging server.\n" ) ; sb . append ( "**** WARNING ****\n" ) ; this . console . println ( sb . toString ( ) ) ; } // Replace the current client IMessagingClient oldClient ; synchronized ( this ) { // Simple changes this . messagingType = factoryName ; oldClient = this . messagingClient ; // The messaging client can NEVER be null if ( newMessagingClient != null ) this . messagingClient = newMessagingClient ; else resetInternalClient ( ) ; } terminateClient ( oldClient , "The previous client could not be terminated correctly." , this . logger ) ;