signature stringlengths 43 39.1k | implementation stringlengths 0 450k |
|---|---|
public class TopLevel { /** * Static helper method to get a native error constructor with the given
* < code > type < / code > from the given < code > scope < / code > . If the scope is not
* an instance of this class or does have a cache of native errors ,
* the constructor is looked up via normal property lookup .
* @ param cx the current Context
* @ param scope the top - level scope
* @ param type the native error type
* @ return the native error constructor */
static Function getNativeErrorCtor ( Context cx , Scriptable scope , NativeErrors type ) { } } | // must be called with top level scope
assert scope . getParentScope ( ) == null ; if ( scope instanceof TopLevel ) { Function result = ( ( TopLevel ) scope ) . getNativeErrorCtor ( type ) ; if ( result != null ) { return result ; } } // fall back to normal constructor lookup
return ScriptRuntime . getExistingCtor ( cx , scope , type . name ( ) ) ; |
public class MapStorage { /** * Caller must hold upgrade or write lock . */
private boolean doTryInsertNoLock ( S storable ) { } } | // Create a fresh copy to ensure that custom fields are not saved .
S copy = ( S ) storable . prepare ( ) ; storable . copyAllProperties ( copy ) ; copy . markAllPropertiesClean ( ) ; Key < S > key = new Key < S > ( copy , mFullComparator ) ; S existing = mMap . get ( key ) ; if ( existing != null ) { return false ; } mMap . put ( key , copy ) ; storable . markAllPropertiesClean ( ) ; return true ; |
public class Policy { /** * Specifies the AWS account IDs to include in the policy . If < code > IncludeMap < / code > is null , all accounts in the
* organization in AWS Organizations are included in the policy . If < code > IncludeMap < / code > is not null , only values
* listed in < code > IncludeMap < / code > are included in the policy .
* The key to the map is < code > ACCOUNT < / code > . For example , a valid < code > IncludeMap < / code > would be
* < code > { “ ACCOUNT ” : [ “ accountID1 ” , “ accountID2 ” ] } < / code > .
* @ return Specifies the AWS account IDs to include in the policy . If < code > IncludeMap < / code > is null , all accounts
* in the organization in AWS Organizations are included in the policy . If < code > IncludeMap < / code > is not
* null , only values listed in < code > IncludeMap < / code > are included in the policy . < / p >
* The key to the map is < code > ACCOUNT < / code > . For example , a valid < code > IncludeMap < / code > would be
* < code > { “ ACCOUNT ” : [ “ accountID1 ” , “ accountID2 ” ] } < / code > . */
public java . util . Map < String , java . util . List < String > > getIncludeMap ( ) { } } | return includeMap ; |
public class ShowCumulatedProducersAction { /** * { @ inheritDoc } */
@ Override public ActionCommand execute ( ActionMapping mapping , FormBean bean , HttpServletRequest req , HttpServletResponse res ) throws Exception { } } | Map < String , GraphDataBean > graphData = new HashMap < > ( ) ; List < String > accumulatorIds = new ArrayList < > ( ) ; List < String > thresholdIds = new ArrayList < > ( ) ; // Obtaining common parameters , required for ProducerAPI
String intervalName = getCurrentInterval ( req ) ; UnitBean currentUnit = getCurrentUnit ( req ) ; // Getting action parameters , i . e . filters
String decoratorName = req . getParameter ( PARAM_DECORATOR ) ; String categoryName = req . getParameter ( PARAM_CATEGORY ) ; String subsystemName = req . getParameter ( PARAM_SUBSYSTEM ) ; // Getting producers filtered by category and / or subsystem
List < ProducerAO > producersList = getFilteredProducers ( intervalName , currentUnit . getUnit ( ) , categoryName , subsystemName ) ; // Getting specified decorator with producers
ProducerDecoratorBean decorator = ProducerUtility . filterProducersByDecoratorName ( decoratorName , producersList , graphData ) ; // Fill threshold and accumulator ids list
for ( ProducerAO producer : decorator . getProducers ( ) ) { accumulatorIds . addAll ( getAccumulatorAPI ( ) . getAccumulatorIdsTiedToASpecificProducer ( producer . getProducerId ( ) ) ) ; thresholdIds . addAll ( getThresholdAPI ( ) . getThresholdIdsTiedToASpecificProducer ( producer . getProducerId ( ) ) ) ; } // Populate common cumulated stat
if ( hasCumulatedStat ( decorator ) ) { populateCumulatedStats ( decorator ) ; } // Remove cumulated stats from producer lines
if ( hasAnyStat ( decorator ) ) { removeCumulatedStats ( decorator ) ; } req . setAttribute ( "decorator" , decorator ) ; req . setAttribute ( "graphDatas" , graphData . values ( ) ) ; setAccumulatorAttributes ( accumulatorIds , req ) ; setThresholdAttributes ( thresholdIds , req ) ; return mapping . findCommand ( getForward ( req ) ) ; |
public class AmazonCloudDirectoryClient { /** * Retrieves all available parent paths for any object type such as node , leaf node , policy node , and index node
* objects . For more information about objects , see < a
* href = " https : / / docs . aws . amazon . com / clouddirectory / latest / developerguide / key _ concepts _ directorystructure . html "
* > Directory Structure < / a > .
* Use this API to evaluate all parents for an object . The call returns all objects from the root of the directory
* up to the requested object . The API returns the number of paths based on user - defined < code > MaxResults < / code > , in
* case there are multiple paths to the parent . The order of the paths and nodes returned is consistent among
* multiple API calls unless the objects are deleted or moved . Paths not leading to the directory root are ignored
* from the target object .
* @ param listObjectParentPathsRequest
* @ return Result of the ListObjectParentPaths operation returned by the service .
* @ throws InternalServiceException
* Indicates a problem that must be resolved by Amazon Web Services . This might be a transient error in
* which case you can retry your request until it succeeds . Otherwise , go to the < a
* href = " http : / / status . aws . amazon . com / " > AWS Service Health Dashboard < / a > site to see if there are any
* operational issues with the service .
* @ throws InvalidArnException
* Indicates that the provided ARN value is not valid .
* @ throws RetryableConflictException
* Occurs when a conflict with a previous successful write is detected . For example , if a write operation
* occurs on an object and then an attempt is made to read the object using “ SERIALIZABLE ” consistency , this
* exception may result . This generally occurs when the previous write did not have time to propagate to the
* host serving the current request . A retry ( with appropriate backoff logic ) is the recommended response to
* this exception .
* @ throws ValidationException
* Indicates that your request is malformed in some manner . See the exception message .
* @ throws LimitExceededException
* Indicates that limits are exceeded . See < a
* href = " https : / / docs . aws . amazon . com / clouddirectory / latest / developerguide / limits . html " > Limits < / a > for more
* information .
* @ throws AccessDeniedException
* Access denied . Check your permissions .
* @ throws DirectoryNotEnabledException
* Operations are only permitted on enabled directories .
* @ throws InvalidNextTokenException
* Indicates that the < code > NextToken < / code > value is not valid .
* @ throws ResourceNotFoundException
* The specified resource could not be found .
* @ sample AmazonCloudDirectory . ListObjectParentPaths
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / clouddirectory - 2017-01-11 / ListObjectParentPaths "
* target = " _ top " > AWS API Documentation < / a > */
@ Override public ListObjectParentPathsResult listObjectParentPaths ( ListObjectParentPathsRequest request ) { } } | request = beforeClientExecution ( request ) ; return executeListObjectParentPaths ( request ) ; |
public class HttpInputStream { /** * Reads into an array of bytes . This method will block until some input
* is available .
* @ param b the buffer into which the data is read
* @ param off the start offset of the data
* @ param len the maximum number of bytes read
* @ return the actual number of bytes read , or - 1 if the end of the
* stream is reached
* @ exception IOException if an I / O error has occurred */
public int read ( byte [ ] read_buffer , int offset , int length ) throws IOException { } } | // Copy as much as possible from the read buffer
if ( TraceComponent . isAnyTracingEnabled ( ) && logger . isLoggable ( Level . FINE ) ) { // 306998.15
logger . logp ( Level . FINE , CLASS_NAME , "read" , "read length -->" + length ) ; } if ( total >= limit ) { if ( TraceComponent . isAnyTracingEnabled ( ) && logger . isLoggable ( Level . FINE ) ) { // 306998.15
logger . logp ( Level . FINE , CLASS_NAME , "read" , "Over the limit: -1" ) ; } return - 1 ; } int buf_len = count - pos ; if ( buf_len > 0 ) { if ( buf_len >= length ) { // Copy part of read buffer
System . arraycopy ( buf , pos , read_buffer , offset , length ) ; pos += length ; // begin 280584.2 java . io . IOException : SRVE0080E : Invalid content length WAS . webcontainer
this . total += length ; // end 280584.2 java . io . IOException : SRVE0080E : Invalid content length WAS . webcontainer
if ( TraceComponent . isAnyTracingEnabled ( ) && logger . isLoggable ( Level . FINE ) ) { // 306998.15
logger . logp ( Level . FINE , CLASS_NAME , "read" , "read returning -->" + length ) ; } return length ; } // Copy all read buffer
System . arraycopy ( buf , pos , read_buffer , offset , buf_len ) ; count = pos = 0 ; // reset buffer
offset += buf_len ; length -= buf_len ; } // Try to read remainder directly from the input stream into
// the caller ' s buffer , avoiding an extra copy
int bytes_read = buf_len ; int rtn = 0 ; if ( length > 0 ) { rtn = in . read ( read_buffer , offset , length ) ; } if ( rtn > 0 ) { bytes_read += rtn ; } // begin 280584.2 java . io . IOException : SRVE0080E : Invalid content length WAS . webcontainer
this . total += bytes_read ; // end 280584.2 java . io . IOException : SRVE0080E : Invalid content length WAS . webcontainer
if ( bytes_read == 0 ) { bytes_read = - 1 ; } if ( TraceComponent . isAnyTracingEnabled ( ) && logger . isLoggable ( Level . FINE ) ) { // 306998.15
logger . logp ( Level . FINE , CLASS_NAME , "read" , "read returning -->" + bytes_read + ", total=" + total + ",limit=" + limit ) ; } return bytes_read ; |
public class SeaGlassTabbedPaneUI { /** * Scroll the tab buttons backwards . */
protected void scrollBackward ( ) { } } | int selectedIndex = tabPane . getSelectedIndex ( ) ; if ( -- selectedIndex < 0 ) { tabPane . setSelectedIndex ( tabPane . getTabCount ( ) == 0 ? - 1 : 0 ) ; } else { tabPane . setSelectedIndex ( selectedIndex ) ; } tabPane . repaint ( ) ; |
public class MSPDIReader { /** * The way calendars are stored in an MSPDI file means that there
* can be forward references between the base calendar unique ID for a
* derived calendar , and the base calendar itself . To get around this ,
* we initially populate the base calendar name attribute with the
* base calendar unique ID , and now in this method we can convert those
* ID values into the correct names .
* @ param baseCalendars list of calendars and base calendar IDs
* @ param map map of calendar ID values and calendar objects */
private static void updateBaseCalendarNames ( List < Pair < ProjectCalendar , BigInteger > > baseCalendars , HashMap < BigInteger , ProjectCalendar > map ) { } } | for ( Pair < ProjectCalendar , BigInteger > pair : baseCalendars ) { ProjectCalendar cal = pair . getFirst ( ) ; BigInteger baseCalendarID = pair . getSecond ( ) ; ProjectCalendar baseCal = map . get ( baseCalendarID ) ; if ( baseCal != null ) { cal . setParent ( baseCal ) ; } } |
public class PerturbedAtomHashGenerator { /** * Combines the values in an n x m matrix into a single array of size n .
* This process scans the rows and xors all unique values in the row
* together . If a duplicate value is found it is rotated using a
* pseudorandom number generator .
* @ param perturbed n x m , matrix
* @ return the combined values of each row */
long [ ] combine ( long [ ] [ ] perturbed ) { } } | int n = perturbed . length ; int m = perturbed [ 0 ] . length ; long [ ] combined = new long [ n ] ; long [ ] rotated = new long [ m ] ; for ( int i = 0 ; i < n ; i ++ ) { Arrays . sort ( perturbed [ i ] ) ; for ( int j = 0 ; j < m ; j ++ ) { // if non - unique , then get the next random number
if ( j > 0 && perturbed [ i ] [ j ] == perturbed [ i ] [ j - 1 ] ) { combined [ i ] ^= rotated [ j ] = rotate ( rotated [ j - 1 ] ) ; } else { combined [ i ] ^= rotated [ j ] = perturbed [ i ] [ j ] ; } } } return combined ; |
public class DistributedObjectCacheAdapter { /** * Returns < tt > true < / tt > if this map contains no key - value mappings .
* @ param includeDiskCache true to check the memory and disk maps ; false to check
* the memory map .
* @ return < tt > true < / tt > if this map contains no key - value mappings . */
@ Override public boolean isEmpty ( boolean includeDiskCache ) { } } | boolean isCacheEmpty = false ; /* * Check if the memory cache has entries */
isCacheEmpty = cache . getNumberCacheEntries ( ) == 0 ; /* * If the memory cache is not empty then we don ' t need to check the disk cache .
* If the cache is an instanceof CacheProviderWrapper then we know it ' s not dynacache so
* we need to check if it supports disk caching . If it doesn ' t then we don ' t need to check
* if the disk cache is empty . */
if ( includeDiskCache && isCacheEmpty ) { if ( cache instanceof CacheProviderWrapper ) { if ( ( ( CacheProviderWrapper ) cache ) . featureSupport . isDiskCacheSupported ( ) ) { isCacheEmpty = cache . getIdsSizeDisk ( ) == 0 ; } } else { // Not an instanceof CacheProviderWrapper so we know it ' s dynacache and we know
// getIdsSizeDisk is implemented .
isCacheEmpty = cache . getIdsSizeDisk ( ) == 0 ; } } return isCacheEmpty ; |
public class ClientStateListener { /** * Waits until the client is connected to cluster or the timeout expires .
* Does not wait if the client is already shutting down or shutdown .
* @ param timeout the maximum time to wait
* @ param unit the time unit of the { @ code timeout } argument
* @ return true if the client is connected to the cluster . On returning false ,
* you can check if timeout occured or the client is shutdown using { @ code isShutdown } { @ code getCurrentState }
* @ throws InterruptedException */
public boolean awaitConnected ( long timeout , TimeUnit unit ) throws InterruptedException { } } | lock . lock ( ) ; try { if ( currentState . equals ( CLIENT_CONNECTED ) ) { return true ; } if ( currentState . equals ( SHUTTING_DOWN ) || currentState . equals ( SHUTDOWN ) ) { return false ; } long duration = unit . toNanos ( timeout ) ; while ( duration > 0 ) { duration = connectedCondition . awaitNanos ( duration ) ; if ( currentState . equals ( CLIENT_CONNECTED ) ) { return true ; } } return false ; } finally { lock . unlock ( ) ; } |
public class UIComponent { /** * Sets the < code > focused < / code > state of this { @ link UIComponent } .
* @ param focused the state */
public void setFocused ( boolean focused ) { } } | if ( ! isEnabled ( ) ) return ; boolean flag = this . focused != focused ; flag |= MalisisGui . setFocusedComponent ( this , focused ) ; if ( ! flag ) return ; this . focused = focused ; fireEvent ( new FocusStateChange < > ( this , focused ) ) ; |
public class RemoteBrowserIterator { /** * / * ( non - Javadoc )
* @ see java . util . Iterator # next ( ) */
public Object next ( ) { } } | if ( tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "next" ) ; AOBrowserSession aoBrowserSession ; RemoteBrowserReceiver remoteBrowserReceiver ; if ( browserIterator . hasNext ( ) ) { aoBrowserSession = ( AOBrowserSession ) browserIterator . next ( ) ; remoteBrowserReceiver = new RemoteBrowserReceiver ( aoBrowserSession ) ; } else { remoteBrowserReceiver = null ; } if ( tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "next" , remoteBrowserReceiver ) ; return remoteBrowserReceiver ; |
public class ConvexHull { /** * Orientation predicates */
private static ECoordinate determinant_ ( Point2D p , Point2D q , Point2D r ) { } } | ECoordinate det_ec = new ECoordinate ( ) ; det_ec . set ( q . x ) ; det_ec . sub ( p . x ) ; ECoordinate rp_y_ec = new ECoordinate ( ) ; rp_y_ec . set ( r . y ) ; rp_y_ec . sub ( p . y ) ; ECoordinate qp_y_ec = new ECoordinate ( ) ; qp_y_ec . set ( q . y ) ; qp_y_ec . sub ( p . y ) ; ECoordinate rp_x_ec = new ECoordinate ( ) ; rp_x_ec . set ( r . x ) ; rp_x_ec . sub ( p . x ) ; det_ec . mul ( rp_y_ec ) ; qp_y_ec . mul ( rp_x_ec ) ; det_ec . sub ( qp_y_ec ) ; return det_ec ; |
public class TableCompactor { /** * Copies the { @ link Candidate } s in the given { @ link CompactionArgs } set to a contiguous block at the end of the Segment .
* @ param segment A { @ link DirectSegmentAccess } representing the Segment to operate on .
* @ param args A { @ link CompactionArgs } containing the { @ link Candidate } s to copy .
* @ param timer Timer for the operation .
* @ return A CompletableFuture that , when completed , indicate the candidates have been copied . */
private CompletableFuture < Void > copyCandidates ( DirectSegmentAccess segment , CompactionArgs args , TimeoutTimer timer ) { } } | // Collect all the candidates for copying and calculate the total serialization length .
val toWrite = new ArrayList < TableEntry > ( ) ; int totalLength = 0 ; for ( val list : args . candidates . values ( ) ) { for ( val c : list . getAll ( ) ) { toWrite . add ( c . entry ) ; totalLength += this . connector . getSerializer ( ) . getUpdateLength ( c . entry ) ; } } // Generate the necessary AttributeUpdates that will need to be applied regardless of whether we copy anything or not .
val attributes = generateAttributeUpdates ( args ) ; CompletableFuture < ? > result ; if ( totalLength == 0 ) { // Nothing to do ; update the necessary segment attributes .
assert toWrite . size ( ) == 0 ; result = segment . updateAttributes ( attributes , timer . getRemaining ( ) ) ; } else { // Perform a Segment Append with re - serialized entries ( Explicit versions ) , and atomically update the necessary
// segment attributes .
toWrite . sort ( Comparator . comparingLong ( c -> c . getKey ( ) . getVersion ( ) ) ) ; byte [ ] appendData = new byte [ totalLength ] ; this . connector . getSerializer ( ) . serializeUpdateWithExplicitVersion ( toWrite , appendData ) ; result = segment . append ( appendData , attributes , timer . getRemaining ( ) ) ; log . debug ( "TableCompactor[{}]: Compacting {}, CopyCount={}, CopyLength={}." , segment . getSegmentId ( ) , args , toWrite , totalLength ) ; } return Futures . toVoid ( result ) ; |
public class CreateBicMapConstantsClass { /** * Instantiates a class via deferred binding .
* @ return the new instance , which must be cast to the requested class */
public static BicMapSharedConstants create ( ) { } } | if ( bicMapConstants == null ) { // NOPMD it ' s thread save !
synchronized ( BicMapConstantsImpl . class ) { if ( bicMapConstants == null ) { bicMapConstants = new BicMapConstantsImpl ( readMapFromProperties ( "BicMapConstants" , "bics" ) ) ; } } } return bicMapConstants ; |
public class Row { /** * autofill generated values */
public void autoFill ( RowCursor cursor ) { } } | for ( Column col : columns ( ) ) { col . autoFill ( cursor . buffer ( ) , 0 ) ; } |
public class SVGParser { /** * < tref > element */
private void tref ( Attributes attributes ) throws SVGParseException { } } | debug ( "<tref>" ) ; if ( currentElement == null ) throw new SVGParseException ( "Invalid document. Root element must be <svg>" ) ; if ( ! ( currentElement instanceof SVG . TextContainer ) ) throw new SVGParseException ( "Invalid document. <tref> elements are only valid inside <text> or <tspan> elements." ) ; SVG . TRef obj = new SVG . TRef ( ) ; obj . document = svgDocument ; obj . parent = currentElement ; parseAttributesCore ( obj , attributes ) ; parseAttributesStyle ( obj , attributes ) ; parseAttributesConditional ( obj , attributes ) ; parseAttributesTRef ( obj , attributes ) ; currentElement . addChild ( obj ) ; if ( obj . parent instanceof TextRoot ) obj . setTextRoot ( ( TextRoot ) obj . parent ) ; else obj . setTextRoot ( ( ( TextChild ) obj . parent ) . getTextRoot ( ) ) ; |
public class JKSTrustStore { /** * This method is used to find file from disk or classpath .
* @ param trustStorePath
* file to load
* @ return instance of { @ link InputStream } containing content of the file
* @ throws FileNotFoundException
* if file does not exist */
@ SuppressWarnings ( "resource" ) private InputStream loadFile ( String trustStorePath ) throws FileNotFoundException { } } | InputStream input ; try { input = new FileInputStream ( trustStorePath ) ; } catch ( FileNotFoundException e ) { LOGGER . warn ( "File {} not found. Fallback to classpath." , trustStorePath ) ; input = Thread . currentThread ( ) . getContextClassLoader ( ) . getResourceAsStream ( trustStorePath ) ; } if ( input == null ) { throw new FileNotFoundException ( "File " + trustStorePath + " does not exist" ) ; } return input ; |
public class JStatusbar { /** * Display the status text .
* @ param strMessage The message to display . */
public void setStatus ( int iStatus ) { } } | if ( m_iconArea != null ) { if ( ( iStatus == Cursor . WAIT_CURSOR ) && ( BaseApplet . getSharedInstance ( ) != null ) ) m_iconArea . setIcon ( BaseApplet . getSharedInstance ( ) . loadImageIcon ( ThinMenuConstants . WAIT ) ) ; else m_iconArea . setIcon ( null ) ; } |
public class LaContainerDefaultProvider { protected static void assertCircularInclude ( final LaContainer container , final String path ) { } } | assertCircularInclude ( container , path , new LinkedList < String > ( ) ) ; |
public class Base64 { /** * Encodes the given data to a base64 string .
* @ param data a valid byte [ ] , must not be null .
* @ return a valid < code > String < / code > instance representing the base64 encoding of the given data . */
@ SuppressWarnings ( "AssignmentToForLoopParameter" ) public static String encode ( final byte [ ] data ) { } } | final int len = data . length ; final StringBuilder result = new StringBuilder ( ( len / 3 + 1 ) * 4 ) ; int bte ; int index ; for ( int i = 0 ; i < len ; i ++ ) { bte = data [ i ] ; // First 6 bits
index = bte >> 2 & MASK_6 ; result . append ( CHAR_TABLE . charAt ( index ) ) ; // Last 2 bits plus 4 from next byte
index = bte << 4 & MASK_6 ; if ( ++ i < len ) { bte = data [ i ] ; index |= bte >> 4 & MASK_4 ; } result . append ( CHAR_TABLE . charAt ( index ) ) ; // 4 + 2 from next
if ( i < len ) { index = bte << 2 & MASK_6 ; if ( ++ i < len ) { bte = data [ i ] ; index |= bte >> 6 & MASK_2 ; } result . append ( CHAR_TABLE . charAt ( index ) ) ; } else { i ++ ; result . append ( CHAR_TABLE . charAt ( 64 ) ) ; } if ( i < len ) { index = bte & MASK_6 ; result . append ( CHAR_TABLE . charAt ( index ) ) ; } else { result . append ( CHAR_TABLE . charAt ( 64 ) ) ; } } return result . toString ( ) ; |
public class CmsSetupBean { /** * Sets the needed database parameters . < p >
* @ param request the http request
* @ param provider the db provider
* @ return true if already submitted */
public boolean setDbParamaters ( HttpServletRequest request , String provider ) { } } | return setDbParamaters ( request . getParameterMap ( ) , provider , request . getContextPath ( ) , request . getSession ( ) ) ; |
public class XCostExtension { /** * Retrieves the cost type for an attribute , if set by this extension ' s type
* attribute .
* @ param attribute
* Attribute element to retrieve cost type for .
* @ return The requested cost type . */
public String extractType ( XAttribute attribute ) { } } | XAttribute attr = attribute . getAttributes ( ) . get ( KEY_TYPE ) ; if ( attr == null ) { return null ; } else { return ( ( XAttributeLiteral ) attr ) . getValue ( ) ; } |
public class MaterialDialogDecorator { /** * Inflates the view , which is used to show the dialog ' s title . The view may either be the
* default one or a custom view , if one has been set before .
* @ return The view , which has been inflated , as an instance of the class { @ link View } or null ,
* if no view has been inflated */
private View inflateTitleView ( ) { } } | if ( getRootView ( ) != null ) { inflateTitleContainer ( ) ; if ( customTitleView != null ) { titleContainer . addView ( customTitleView ) ; } else if ( customTitleViewId != - 1 ) { LayoutInflater layoutInflater = LayoutInflater . from ( getContext ( ) ) ; View view = layoutInflater . inflate ( customTitleViewId , titleContainer , false ) ; titleContainer . addView ( view ) ; } else { LayoutInflater layoutInflater = LayoutInflater . from ( getContext ( ) ) ; View view = layoutInflater . inflate ( R . layout . material_dialog_title , titleContainer , false ) ; titleContainer . addView ( view ) ; } View titleView = titleContainer . findViewById ( android . R . id . title ) ; titleTextView = titleView instanceof TextView ? ( TextView ) titleView : null ; View iconView = titleContainer . findViewById ( android . R . id . icon ) ; iconImageView = iconView instanceof ImageView ? ( ImageView ) iconView : null ; return titleContainer ; } return null ; |
public class UBLPEReaderBuilder { /** * Create a new reader builder .
* @ param aClass
* The UBL class to be read . May not be < code > null < / code > .
* @ return The new reader builder . Never < code > null < / code > .
* @ param < T >
* The UBLPE document implementation type */
@ Nonnull public static < T > UBLPEReaderBuilder < T > create ( @ Nonnull final Class < T > aClass ) { } } | return new UBLPEReaderBuilder < > ( aClass ) ; |
public class TypeAdapter { /** * Converts the JSON document in { @ code in } to a Java object . Unlike Gson ' s
* similar { @ link Gson # fromJson ( java . io . Reader , Class ) fromJson } method , this
* read is strict . Create a { @ link JsonReader # setLenient ( boolean ) lenient }
* { @ code JsonReader } and call { @ link # read ( JsonReader ) } for lenient reading .
* @ return the converted Java object . May be null .
* @ since 2.2 */
public final T fromJson ( Reader in ) throws IOException { } } | JsonReader reader = new JsonReader ( in ) ; return read ( reader ) ; |
public class AbstractCommand { /** * 映射方法
* @ param pObject
* @ throws APPErrorException */
protected Object invokeByReturn ( Object pObject ) throws APPErrorException { } } | try { Method method = null ; if ( null == getModelClass ( ) ) { method = tgtools . util . ReflectionUtil . findMethod ( mService . getClass ( ) , getMethodName ( ) ) ; return method . invoke ( mService ) ; } else { method = tgtools . util . ReflectionUtil . findMethod ( mService . getClass ( ) , getMethodName ( ) , getModelClass ( ) ) ; return method . invoke ( mService , pObject ) ; } } catch ( Exception ex ) { throw new APPErrorException ( "执行方法错误;原因:" + ex . getCause ( ) . getMessage ( ) , ex ) ; } |
public class TextComponentUtil { /** * Gets the line at a given position in the editor */
public static int getLineAtPosition ( JTextComponent editor , int position ) { } } | if ( position <= 0 ) { return 1 ; } String s = editor . getText ( ) ; if ( position > s . length ( ) ) { position = s . length ( ) ; } try { return GosuStringUtil . countMatches ( editor . getText ( 0 , position ) , "\n" ) + 1 ; } catch ( BadLocationException e ) { throw new RuntimeException ( e ) ; } |
public class PrintWriterImpl { /** * Writes a character . */
final public void write ( char [ ] buf , int offset , int length ) { } } | Writer out = this . out ; if ( out == null ) return ; try { out . write ( buf , offset , length ) ; } catch ( IOException e ) { log . log ( Level . FINE , e . toString ( ) , e ) ; } |
public class ServletRESTRequestWithParams { /** * ( non - Javadoc )
* @ see com . ibm . wsspi . rest . handler . RESTRequest # getURL ( ) */
@ Override public String getURL ( ) { } } | ServletRESTRequestImpl ret = castRequest ( ) ; if ( ret != null ) return ret . getURL ( ) ; return null ; |
public class VitoDrawableFactoryImpl { /** * We handle the given bitmap and return a Drawable ready for being displayed : If rounding is set ,
* the image will be rounded , if a border if set , the border will be applied and finally , the
* image will be rotated if required .
* < p > Bitmap - > border - > rounded corners - > RoundedBitmapDrawable ( since bitmap is square ) - >
* fully circular - > CircularBorderBitmapDrawable ( bitmap is circular ) - > square image - >
* RoundedBitmapDrawable ( for border support ) - > no border - > rounded corners - >
* RoundedBitmapDrawable ( since bitmap is square ) - > fully circular - > BitmapDrawable ( since
* bitmap is circular ) - > square image - > BitmapDrawable
* < p > If needed , the resulting drawable is rotated using OrientedDrawable .
* @ param closeableStaticBitmap the image to handle
* @ param imageOptions display options for the given image
* @ return the drawable to display */
protected Drawable handleCloseableStaticBitmap ( CloseableStaticBitmap closeableStaticBitmap , ImageOptions imageOptions ) { } } | RoundingOptions roundingOptions = imageOptions . getRoundingOptions ( ) ; BorderOptions borderOptions = imageOptions . getBorderOptions ( ) ; if ( borderOptions != null && borderOptions . width > 0 ) { return rotatedDrawable ( closeableStaticBitmap , roundedDrawableWithBorder ( closeableStaticBitmap , borderOptions , roundingOptions ) ) ; } else { return rotatedDrawable ( closeableStaticBitmap , roundedDrawableWithoutBorder ( closeableStaticBitmap , roundingOptions ) ) ; } |
public class AbstractContainerTask { /** * Executes a List of Tasks as children of this Task */
protected void performSubtasks ( TaskRequest req , TaskResponse res , List < Task > tasks ) { } } | // Assertions . . .
if ( req == null ) { String msg = "Argument 'req' cannot be null." ; throw new IllegalArgumentException ( msg ) ; } if ( res == null ) { String msg = "Argument 'res' cannot be null." ; throw new IllegalArgumentException ( msg ) ; } if ( tasks == null ) { String msg = "Child tasks have not been initialized. Subclasses " + "of AbstractContainerTask must call super.init() " + "within their own init() method." ; throw new IllegalStateException ( msg ) ; } // Invoke each of our children . . .
for ( Task k : tasks ) { k . perform ( req , res ) ; } |
public class SQLiteSession { /** * Executes a statement that does not return a result .
* @ param sql The SQL statement to execute .
* @ param bindArgs The arguments to bind , or null if none .
* @ param connectionFlags The connection flags to use if a connection must be
* acquired by this operation . Refer to { @ link SQLiteConnectionPool } .
* @ param cancellationSignal A signal to cancel the operation in progress , or null if none .
* @ throws SQLiteException if an error occurs , such as a syntax error
* or invalid number of bind arguments .
* @ throws OperationCanceledException if the operation was canceled . */
public void execute ( String sql , Object [ ] bindArgs , int connectionFlags , CancellationSignal cancellationSignal ) { } } | if ( sql == null ) { throw new IllegalArgumentException ( "sql must not be null." ) ; } if ( executeSpecial ( sql , bindArgs , connectionFlags , cancellationSignal ) ) { return ; } acquireConnection ( sql , connectionFlags , cancellationSignal ) ; // might throw
try { mConnection . execute ( sql , bindArgs , cancellationSignal ) ; // might throw
} finally { releaseConnection ( ) ; // might throw
} |
public class ThumbnailBuilder { /** * Create a thumbnail for the video .
* @ param videoPath video path .
* @ return thumbnail path . */
@ WorkerThread @ Nullable public String createThumbnailForVideo ( String videoPath ) { } } | if ( TextUtils . isEmpty ( videoPath ) ) return null ; File thumbnailFile = randomPath ( videoPath ) ; if ( thumbnailFile . exists ( ) ) return thumbnailFile . getAbsolutePath ( ) ; try { MediaMetadataRetriever retriever = new MediaMetadataRetriever ( ) ; if ( URLUtil . isNetworkUrl ( videoPath ) ) { retriever . setDataSource ( videoPath , new HashMap < String , String > ( ) ) ; } else { retriever . setDataSource ( videoPath ) ; } Bitmap bitmap = retriever . getFrameAtTime ( ) ; thumbnailFile . createNewFile ( ) ; bitmap . compress ( Bitmap . CompressFormat . JPEG , THUMBNAIL_QUALITY , new FileOutputStream ( thumbnailFile ) ) ; return thumbnailFile . getAbsolutePath ( ) ; } catch ( Exception ignored ) { return null ; } |
public class UTF8Reader { /** * @ param available Number of " unused " bytes in the input buffer
* @ return True , if enough bytes were read to allow decoding of at least
* one full character ; false if EOF was encountered instead . */
private boolean loadMore ( int available ) throws IOException { } } | mByteCount += ( mByteBufferEnd - available ) ; // Bytes that need to be moved to the beginning of buffer ?
if ( available > 0 ) { /* 11 - Nov - 2008 , TSa : can only move if we own the buffer ; otherwise
* we are stuck with the data . */
if ( mBytePtr > 0 && canModifyBuffer ( ) ) { for ( int i = 0 ; i < available ; ++ i ) { mByteBuffer [ i ] = mByteBuffer [ mBytePtr + i ] ; } mBytePtr = 0 ; mByteBufferEnd = available ; } } else { /* Ok ; here we can actually reasonably expect an EOF ,
* so let ' s do a separate read right away : */
int count = readBytes ( ) ; if ( count < 1 ) { if ( count < 0 ) { freeBuffers ( ) ; // to help GC ?
return false ; } // 0 count is no good ; let ' s err out
reportStrangeStream ( ) ; } } /* We now have at least one byte . . . and that allows us to
* calculate exactly how many bytes we need ! */
@ SuppressWarnings ( "cast" ) int c = ( int ) mByteBuffer [ mBytePtr ] ; if ( c >= 0 ) { // single byte ( ascii ) char . . . cool , can return
return true ; } // Ok , a multi - byte char , let ' s check how many bytes we ' ll need :
int needed ; if ( ( c & 0xE0 ) == 0xC0 ) { // 2 bytes ( 0x0080 - 0x07FF )
needed = 2 ; } else if ( ( c & 0xF0 ) == 0xE0 ) { // 3 bytes ( 0x0800 - 0xFFFF )
needed = 3 ; } else if ( ( c & 0xF8 ) == 0xF0 ) { // 4 bytes ; double - char BS , with surrogates and all . . .
needed = 4 ; } else { reportInvalidInitial ( c & 0xFF , 0 ) ; // never gets here . . . but compiler whines without this :
needed = 1 ; } /* And then we ' ll just need to load up to that many bytes ;
* if an EOF is hit , that ' ll be an error . But we need not do
* actual decoding here , just load enough bytes . */
while ( ( mBytePtr + needed ) > mByteBufferEnd ) { int count = readBytesAt ( mByteBufferEnd ) ; if ( count < 1 ) { if ( count < 0 ) { // -1 , EOF . . . no good !
freeBuffers ( ) ; reportUnexpectedEOF ( mByteBufferEnd , needed ) ; } // 0 count is no good ; let ' s err out
reportStrangeStream ( ) ; } } return true ; |
public class ULocale { /** * < strong > [ icu ] < / strong > Converts the specified keyword value ( legacy type , or BCP 47
* Unicode locale extension type ) to the well - formed BCP 47 Unicode locale
* extension type for the specified keyword ( category ) . For example , BCP 47
* Unicode locale extension type " phonebk " is returned for the input
* keyword value " phonebook " , with the keyword " collation " ( or " co " ) .
* When the specified keyword is not recognized , but the specified value
* satisfies the syntax of the BCP 47 Unicode locale extension type ,
* or when the specified keyword allows ' variable ' type and the specified
* value satisfies the syntax , the lower - case version of the input value
* will be returned . For example ,
* < code > toUnicodeLocaleType ( " Foo " , " Bar " ) < / code > returns " bar " ,
* < code > toUnicodeLocaleType ( " variableTop " , " 00A4 " ) < / code > returns " 00a4 " .
* @ param keyword the locale keyword ( either legacy key such as
* " collation " or BCP 47 Unicode locale extension
* key such as " co " ) .
* @ param value the locale keyword value ( either legacy type
* such as " phonebook " or BCP 47 Unicode locale extension
* type such as " phonebk " ) .
* @ return the well - formed BCP47 Unicode locale extension type ,
* or null if the locale keyword value cannot be mapped to
* a well - formed BCP 47 Unicode locale extension type .
* @ see # toLegacyType ( String , String ) */
public static String toUnicodeLocaleType ( String keyword , String value ) { } } | String bcpType = KeyTypeData . toBcpType ( keyword , value , null , null ) ; if ( bcpType == null && UnicodeLocaleExtension . isType ( value ) ) { // unknown keyword , but syntax is fine . .
bcpType = AsciiUtil . toLowerString ( value ) ; } return bcpType ; |
public class OrmElf { /** * Insert an annotated object into the database .
* @ param connection a SQL connection
* @ param target the annotated object to insert
* @ param < T > the class template
* @ return the same object that was passed in , but with possibly updated @ Id field due to auto - generated keys
* @ throws SQLException if a { @ link SQLException } occurs */
public static < T > T insertObject ( Connection connection , T target ) throws SQLException { } } | return OrmWriter . insertObject ( connection , target ) ; |
public class Cipher { /** * Encrypts or decrypts data in a single - part operation , or finishes a
* multiple - part operation . The data is encrypted or decrypted ,
* depending on how this cipher was initialized .
* < p > All < code > input . remaining ( ) < / code > bytes starting at
* < code > input . position ( ) < / code > are processed .
* If an AEAD mode such as GCM / CCM is being used , the authentication
* tag is appended in the case of encryption , or verified in the
* case of decryption .
* The result is stored in the output buffer .
* Upon return , the input buffer ' s position will be equal
* to its limit ; its limit will not have changed . The output buffer ' s
* position will have advanced by n , where n is the value returned
* by this method ; the output buffer ' s limit will not have changed .
* < p > If < code > output . remaining ( ) < / code > bytes are insufficient to
* hold the result , a < code > ShortBufferException < / code > is thrown .
* In this case , repeat this call with a larger output buffer . Use
* { @ link # getOutputSize ( int ) getOutputSize } to determine how big
* the output buffer should be .
* < p > Upon finishing , this method resets this cipher object to the state
* it was in when previously initialized via a call to < code > init < / code > .
* That is , the object is reset and available to encrypt or decrypt
* ( depending on the operation mode that was specified in the call to
* < code > init < / code > ) more data .
* < p > Note : if any exception is thrown , this cipher object may need to
* be reset before it can be used again .
* < p > Note : this method should be copy - safe , which means the
* < code > input < / code > and < code > output < / code > buffers can reference
* the same byte array and no unprocessed input data is overwritten
* when the result is copied into the output buffer .
* @ param input the input ByteBuffer
* @ param output the output ByteBuffer
* @ return the number of bytes stored in < code > output < / code >
* @ exception IllegalStateException if this cipher is in a wrong state
* ( e . g . , has not been initialized )
* @ exception IllegalArgumentException if input and output are the
* same object
* @ exception ReadOnlyBufferException if the output buffer is read - only
* @ exception IllegalBlockSizeException if this cipher is a block cipher ,
* no padding has been requested ( only in encryption mode ) , and the total
* input length of the data processed by this cipher is not a multiple of
* block size ; or if this encryption algorithm is unable to
* process the input data provided .
* @ exception ShortBufferException if there is insufficient space in the
* output buffer
* @ exception BadPaddingException if this cipher is in decryption mode ,
* and ( un ) padding has been requested , but the decrypted data is not
* bounded by the appropriate padding bytes
* @ exception AEADBadTagException if this cipher is decrypting in an
* AEAD mode ( such as GCM / CCM ) , and the received authentication tag
* does not match the calculated value
* @ since 1.5 */
public final int doFinal ( ByteBuffer input , ByteBuffer output ) throws ShortBufferException , IllegalBlockSizeException , BadPaddingException { } } | checkCipherState ( ) ; if ( ( input == null ) || ( output == null ) ) { throw new IllegalArgumentException ( "Buffers must not be null" ) ; } if ( input == output ) { throw new IllegalArgumentException ( "Input and output buffers must " + "not be the same object, consider using buffer.duplicate()" ) ; } if ( output . isReadOnly ( ) ) { throw new ReadOnlyBufferException ( ) ; } updateProviderIfNeeded ( ) ; return spi . engineDoFinal ( input , output ) ; |
public class NavigationTelemetry { /** * Creates a new { @ link FeedbackEvent } and adds it to the queue
* of events to be sent .
* @ param feedbackType defined in FeedbackEvent
* @ param description optional String describing event
* @ param feedbackSource from either reroute or UI
* @ return String feedbackId to identify the event created if needed */
String recordFeedbackEvent ( @ FeedbackEvent . FeedbackType String feedbackType , String description , @ FeedbackEvent . FeedbackSource String feedbackSource ) { } } | FeedbackEvent feedbackEvent = queueFeedbackEvent ( feedbackType , description , feedbackSource ) ; return feedbackEvent . getEventId ( ) ; |
public class SpatialPointLeafEntry { /** * Calls the super method and writes the values of this entry to the specified
* stream .
* @ param out the stream to write the object to
* @ throws java . io . IOException Includes any I / O exceptions that may occur */
@ Override public void writeExternal ( ObjectOutput out ) throws IOException { } } | out . writeInt ( DBIDUtil . asInteger ( id ) ) ; out . writeInt ( values . length ) ; for ( double v : values ) { out . writeDouble ( v ) ; } |
public class PartnersInner { /** * Gets an integration account partner .
* @ param resourceGroupName The resource group name .
* @ param integrationAccountName The integration account name .
* @ param partnerName The integration account partner name .
* @ param serviceCallback the async ServiceCallback to handle successful and failed responses .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the { @ link ServiceFuture } object */
public ServiceFuture < IntegrationAccountPartnerInner > getAsync ( String resourceGroupName , String integrationAccountName , String partnerName , final ServiceCallback < IntegrationAccountPartnerInner > serviceCallback ) { } } | return ServiceFuture . fromResponse ( getWithServiceResponseAsync ( resourceGroupName , integrationAccountName , partnerName ) , serviceCallback ) ; |
public class DbPersistenceManager { /** * { @ inheritDoc }
* Basically wraps a JDBC transaction around super . store ( ) .
* FIXME : the retry logic is almost a duplicate of { @ code ConnectionHelper . RetryManager } . */
public synchronized void store ( final ChangeLog changeLog ) throws ItemStateException { } } | int failures = 0 ; ItemStateException lastException = null ; boolean sleepInterrupted = false ; while ( ! sleepInterrupted && ( blockOnConnectionLoss || failures <= 1 ) ) { try { conHelper . startBatch ( ) ; super . store ( changeLog ) ; conHelper . endBatch ( true ) ; return ; } catch ( SQLException e ) { // Either startBatch or stopBatch threw it : either way the
// transaction was not persisted and no action needs to be taken .
lastException = new ItemStateException ( e . getMessage ( ) , e ) ; } catch ( ItemStateException e ) { // store call threw it : we need to cancel the transaction
lastException = e ; try { conHelper . endBatch ( false ) ; } catch ( SQLException e2 ) { DbUtility . logException ( "rollback failed" , e2 ) ; } // if we got here due to a constraint violation and we
// are running in test mode , we really want to stop
assert ! isIntegrityConstraintViolation ( e . getCause ( ) ) ; } failures ++ ; log . error ( "Failed to persist ChangeLog (stacktrace on DEBUG log level), blockOnConnectionLoss = " + blockOnConnectionLoss + ": " + lastException ) ; log . debug ( "Failed to persist ChangeLog" , lastException ) ; if ( blockOnConnectionLoss || failures <= 1 ) { // if we ' re going to try again
try { Thread . sleep ( 100 ) ; } catch ( InterruptedException e1 ) { Thread . currentThread ( ) . interrupt ( ) ; sleepInterrupted = true ; log . error ( "Interrupted: canceling retry of ChangeLog storage" ) ; } } } throw lastException ; |
public class BigComplex { /** * Calculates the addition of the given complex value to this complex number using the specified { @ link MathContext } .
* < p > This methods < strong > does not < / strong > modify this instance . < / p >
* @ param value the { @ link BigComplex } value to add
* @ param mathContext the { @ link MathContext } used to calculate the result
* @ return the calculated { @ link BigComplex } result */
public BigComplex add ( BigComplex value , MathContext mathContext ) { } } | return valueOf ( re . add ( value . re , mathContext ) , im . add ( value . im , mathContext ) ) ; |
public class UserService { /** * Retrieves user login history records matching the given criteria .
* Retrieves up to < code > limit < / code > user history records matching the
* given terms and sorted by the given predicates . Only history records
* associated with data that the given user can read are returned .
* @ param user
* The user retrieving the login history .
* @ param requiredContents
* The search terms that must be contained somewhere within each of the
* returned records .
* @ param sortPredicates
* A list of predicates to sort the returned records by , in order of
* priority .
* @ param limit
* The maximum number of records that should be returned .
* @ return
* The login history of the given user , including any active sessions .
* @ throws GuacamoleException
* If permission to read the user login history is denied . */
public List < ActivityRecord > retrieveHistory ( ModeledAuthenticatedUser user , Collection < ActivityRecordSearchTerm > requiredContents , List < ActivityRecordSortPredicate > sortPredicates , int limit ) throws GuacamoleException { } } | List < ActivityRecordModel > searchResults ; // Bypass permission checks if the user is a system admin
if ( user . getUser ( ) . isAdministrator ( ) ) searchResults = userRecordMapper . search ( requiredContents , sortPredicates , limit ) ; // Otherwise only return explicitly readable history records
else searchResults = userRecordMapper . searchReadable ( user . getUser ( ) . getModel ( ) , requiredContents , sortPredicates , limit , user . getEffectiveUserGroups ( ) ) ; return getObjectInstances ( searchResults ) ; |
public class MtasToken { /** * Adds the offset .
* @ param start the start
* @ param end the end */
final public void addOffset ( Integer start , Integer end ) { } } | if ( tokenOffset == null ) { setOffset ( start , end ) ; } else if ( ( start == null ) || ( end == null ) ) { // do nothing
} else if ( start > end ) { throw new IllegalArgumentException ( "Start offset after end offset" ) ; } else { tokenOffset . add ( start , end ) ; } |
public class DefaultGroovyMethods { /** * Finds the items matching the IDENTITY Closure ( i . e . & # 160 ; matching Groovy truth ) .
* Example :
* < pre class = " groovyTestCase " >
* def items = [ 1 , 2 , 0 , false , true , ' ' , ' foo ' , [ ] , [ 4 , 5 ] , null ]
* assert items . findAll ( ) = = [ 1 , 2 , true , ' foo ' , [ 4 , 5 ] ]
* < / pre >
* @ param self a Collection
* @ return a Collection of the values found
* @ since 1.8.1
* @ see Closure # IDENTITY */
public static < T > Collection < T > findAll ( Collection < T > self ) { } } | return findAll ( self , Closure . IDENTITY ) ; |
public class RulesExchangeHandler { /** * private boolean isContinue ( Exchange exchange , Message message ) {
* return isBoolean ( exchange , message , RulesConstants . CONTINUE _ PROPERTY ) ; */
private boolean isDispose ( Exchange exchange , Message message ) { } } | return isBoolean ( exchange , message , RulesConstants . DISPOSE_PROPERTY ) ; |
public class FactionWarfareApi { /** * List of the top pilots in faction warfare Top 100 leaderboard of pilots
* for kills and victory points separated by total , last week and yesterday
* - - - This route expires daily at 11:05
* @ param datasource
* The server name you would like data from ( optional , default to
* tranquility )
* @ param ifNoneMatch
* ETag from a previous request . A 304 will be returned if this
* matches the current ETag ( optional )
* @ return ApiResponse & lt ; FactionWarfareLeaderboardCharactersResponse & gt ;
* @ throws ApiException
* If fail to call the API , e . g . server error or cannot
* deserialize the response body */
public ApiResponse < FactionWarfareLeaderboardCharactersResponse > getFwLeaderboardsCharactersWithHttpInfo ( String datasource , String ifNoneMatch ) throws ApiException { } } | com . squareup . okhttp . Call call = getFwLeaderboardsCharactersValidateBeforeCall ( datasource , ifNoneMatch , null ) ; Type localVarReturnType = new TypeToken < FactionWarfareLeaderboardCharactersResponse > ( ) { } . getType ( ) ; return apiClient . execute ( call , localVarReturnType ) ; |
public class DeviceMarshaller { /** * Marshall the given parameter object . */
public void marshall ( Device device , ProtocolMarshaller protocolMarshaller ) { } } | if ( device == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( device . getHostPath ( ) , HOSTPATH_BINDING ) ; protocolMarshaller . marshall ( device . getContainerPath ( ) , CONTAINERPATH_BINDING ) ; protocolMarshaller . marshall ( device . getPermissions ( ) , PERMISSIONS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; } |
public class SftpClient { /** * Download the remote files to the local computer .
* @ param remote
* the regular expression path to the remote file
* @ param progress
* @ return SftpFile [ ]
* @ throws FileNotFoundException
* @ throws SftpStatusException
* @ throws SshException
* @ throws TransferCancelledException */
public SftpFile [ ] getFiles ( String remote , FileTransferProgress progress ) throws FileNotFoundException , SftpStatusException , SshException , TransferCancelledException { } } | return getFiles ( remote , progress , false ) ; |
public class CFMLTransformer { /** * Liest den Body eines Tag ein . Kommentare , Tags und Literale inkl . Expressions . < br / >
* EBNF : < br / >
* < code > [ comment ] ( " < / " | " < " tag body | literal body ) ; < / code >
* @ param body CFXD Body Element dem der Inhalt zugeteilt werden soll .
* @ param parseExpression Definiert ob Expressions innerhalb von Literalen uebersetzt werden sollen
* oder nicht .
* @ param transformer Expression Transfomer zum uebersetzten von Expression .
* @ throws TemplateException */
public void body ( Data data , Body body ) throws TemplateException { } } | boolean parseLiteral = true ; // Comment
comment ( data . srcCode , false ) ; // Tag
// is Tag Beginning
if ( data . srcCode . isCurrent ( '<' ) ) { // return if end tag and inside tag
if ( data . srcCode . isNext ( '/' ) ) { // lucee . print . ln ( " early return " ) ;
return ; } parseLiteral = ! tag ( data , body ) ; } // no Tag
if ( parseLiteral ) { literal ( data , body ) ; } // not at the end
if ( ! done && data . srcCode . isValidIndex ( ) ) body ( data , body ) ; |
public class FileUtils { /** * Creates a file with the given { @ link File } path .
* @ param path the given { @ link File } indicating the absolute location and name of the file .
* @ return true if the path represented by the { @ link File } object is not null , is not an existing directory ,
* or the path can be created as a file if it does not already exist . Returns true if the file already exists .
* @ see java . io . File # createNewFile ( ) */
@ NullSafe public static boolean createFile ( File path ) { } } | try { return ( path != null && ! path . isDirectory ( ) && ( path . isFile ( ) || path . createNewFile ( ) ) ) ; } catch ( IOException ignore ) { return false ; } |
public class ServiceActionDetail { /** * A map that defines the self - service action .
* @ param definition
* A map that defines the self - service action .
* @ return Returns a reference to this object so that method calls can be chained together . */
public ServiceActionDetail withDefinition ( java . util . Map < String , String > definition ) { } } | setDefinition ( definition ) ; return this ; |
public class CalendarDataUtility { /** * Transform a { @ link Calendar } style constant into an ICU context value . */
private static int toContext ( int style ) { } } | switch ( style ) { case Calendar . SHORT_FORMAT : case Calendar . NARROW_FORMAT : case Calendar . LONG_FORMAT : return DateFormatSymbols . FORMAT ; case Calendar . SHORT_STANDALONE : case Calendar . NARROW_STANDALONE : case Calendar . LONG_STANDALONE : return DateFormatSymbols . STANDALONE ; default : throw new IllegalArgumentException ( "Invalid style: " + style ) ; } |
public class Resources { /** * Gets a resource string formatted with MessageFormat .
* @ param key the key
* @ param arguments the arguments
* @ return Formatted stirng . */
public String getFormatted ( String key , Object ... arguments ) { } } | return MessageFormat . format ( resource . getString ( key ) , arguments ) ; |
public class ApkParsers { /** * Get apk manifest xml file as text
* @ throws IOException */
public static String getManifestXml ( String apkFilePath , Locale locale ) throws IOException { } } | try ( ApkFile apkFile = new ApkFile ( apkFilePath ) ) { apkFile . setPreferredLocale ( locale ) ; return apkFile . getManifestXml ( ) ; } |
public class SmartsMatchers { /** * Do not use - temporary method until the SMARTS packages are cleaned up .
* Prepares a target molecule for matching with SMARTS .
* @ param container the container to initialise
* @ param ringQuery whether the smarts will check ring size queries */
public static void prepare ( IAtomContainer container , boolean ringQuery ) { } } | if ( ringQuery ) { SMARTSAtomInvariants . configureDaylightWithRingInfo ( container ) ; } else { SMARTSAtomInvariants . configureDaylightWithoutRingInfo ( container ) ; } |
public class Proxy { /** * TODO consider storing cache of metricName - > index mappings for O ( 1 ) performance . */
public boolean match ( String destination , String metricName ) { } } | int [ ] matches = match ( destination ) ; if ( matches == null || matches . length == 0 ) { return false ; } for ( int match : matches ) { String candidateMetricName = proxyRules . get ( match ) . getMetricSystemName ( ) ; if ( metricName . equals ( candidateMetricName ) ) { return true ; } } return false ; |
public class SequencedFragment { /** * Convert quality scores in - place .
* @ throws FormatException if quality scores are out of the range
* allowed by the current encoding .
* @ throws IllegalArgumentException if current and target quality encodings are the same . */
public static void convertQuality ( Text quality , BaseQualityEncoding current , BaseQualityEncoding target ) { } } | if ( current == target ) throw new IllegalArgumentException ( "current and target quality encodinds are the same (" + current + ")" ) ; byte [ ] bytes = quality . getBytes ( ) ; final int len = quality . getLength ( ) ; final int illuminaSangerDistance = FormatConstants . ILLUMINA_OFFSET - FormatConstants . SANGER_OFFSET ; if ( current == BaseQualityEncoding . Illumina && target == BaseQualityEncoding . Sanger ) { for ( int i = 0 ; i < len ; ++ i ) { if ( bytes [ i ] < FormatConstants . ILLUMINA_OFFSET || bytes [ i ] > ( FormatConstants . ILLUMINA_OFFSET + FormatConstants . ILLUMINA_MAX ) ) { throw new FormatException ( "base quality score out of range for Illumina Phred+64 format (found " + ( bytes [ i ] - FormatConstants . ILLUMINA_OFFSET ) + " but acceptable range is [0," + FormatConstants . ILLUMINA_MAX + "]).\n" + "Maybe qualities are encoded in Sanger format?\n" ) ; } bytes [ i ] -= illuminaSangerDistance ; } } else if ( current == BaseQualityEncoding . Sanger && target == BaseQualityEncoding . Illumina ) { for ( int i = 0 ; i < len ; ++ i ) { if ( bytes [ i ] < FormatConstants . SANGER_OFFSET || bytes [ i ] > ( FormatConstants . SANGER_OFFSET + FormatConstants . SANGER_MAX ) ) { throw new FormatException ( "base quality score out of range for Sanger Phred+64 format (found " + ( bytes [ i ] - FormatConstants . SANGER_OFFSET ) + " but acceptable range is [0," + FormatConstants . SANGER_MAX + "]).\n" + "Maybe qualities are encoded in Illumina format?\n" ) ; } bytes [ i ] += illuminaSangerDistance ; } } else throw new IllegalArgumentException ( "unsupported BaseQualityEncoding transformation from " + current + " to " + target ) ; |
public class PopupMenuFactoryAddUserFromSession { /** * Gets the Users extension
* @ return the extension Users */
private ExtensionUserManagement getExtensionUserManagement ( ) { } } | if ( extensionUsers == null ) { extensionUsers = Control . getSingleton ( ) . getExtensionLoader ( ) . getExtension ( ExtensionUserManagement . class ) ; } return extensionUsers ; |
public class BPSImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
@ Override public Object eGet ( int featureID , boolean resolve , boolean coreType ) { } } | switch ( featureID ) { case AfplibPackage . BPS__PSEG_NAME : return getPsegName ( ) ; case AfplibPackage . BPS__TRIPLETS : return getTriplets ( ) ; } return super . eGet ( featureID , resolve , coreType ) ; |
public class MultiLayerNetwork { /** * Returns a 1 x m vector where the vector is composed of a flattened vector of all of the parameters ( weights and
* biases etc ) for all parameters in the network . Note that this method is generally reserved for developer and
* internal use - see { @ link # getParam ( String ) } and { @ link # paramTable ( ) } for a more useful / interpretable
* representation of the parameters . < br >
* Note that with backwardsOnly = false the parameter vector is not a copy , and changes to the returned INDArray
* will impact the network parameters .
* @ param backwardOnly Return a copy of the parameters excluding any parameters used only for unsupervised layers '
* unsupervised training ( such as decoder parameters in an autoencoder layer
* @ return the params for this neural net */
public INDArray params ( boolean backwardOnly ) { } } | if ( backwardOnly ) return params ( ) ; List < INDArray > params = new ArrayList < > ( ) ; for ( Layer layer : getLayers ( ) ) { INDArray layerParams = layer . params ( ) ; if ( layerParams != null ) params . add ( layerParams ) ; // may be null : subsampling etc layers
} return Nd4j . toFlattened ( 'f' , params ) ; |
public class FrustumRayBuilder { /** * Update the stored frustum corner rays and origin of < code > this < / code > { @ link FrustumRayBuilder } with the given { @ link Matrix4fc matrix } .
* Reference : < a href = " http : / / gamedevs . org / uploads / fast - extraction - viewing - frustum - planes - from - world - view - projection - matrix . pdf " >
* Fast Extraction of Viewing Frustum Planes from the World - View - Projection Matrix < / a >
* Reference : < a href = " http : / / geomalgorithms . com / a05 - _ intersect - 1 . html " > http : / / geomalgorithms . com < / a >
* @ param m
* the { @ link Matrix4fc matrix } to update the frustum corner rays and origin with
* @ return this */
public FrustumRayBuilder set ( Matrix4fc m ) { } } | float nxX = m . m03 ( ) + m . m00 ( ) , nxY = m . m13 ( ) + m . m10 ( ) , nxZ = m . m23 ( ) + m . m20 ( ) , d1 = m . m33 ( ) + m . m30 ( ) ; float pxX = m . m03 ( ) - m . m00 ( ) , pxY = m . m13 ( ) - m . m10 ( ) , pxZ = m . m23 ( ) - m . m20 ( ) , d2 = m . m33 ( ) - m . m30 ( ) ; float nyX = m . m03 ( ) + m . m01 ( ) , nyY = m . m13 ( ) + m . m11 ( ) , nyZ = m . m23 ( ) + m . m21 ( ) ; float pyX = m . m03 ( ) - m . m01 ( ) , pyY = m . m13 ( ) - m . m11 ( ) , pyZ = m . m23 ( ) - m . m21 ( ) , d3 = m . m33 ( ) - m . m31 ( ) ; // bottom left
nxnyX = nyY * nxZ - nyZ * nxY ; nxnyY = nyZ * nxX - nyX * nxZ ; nxnyZ = nyX * nxY - nyY * nxX ; // bottom right
pxnyX = pxY * nyZ - pxZ * nyY ; pxnyY = pxZ * nyX - pxX * nyZ ; pxnyZ = pxX * nyY - pxY * nyX ; // top left
nxpyX = nxY * pyZ - nxZ * pyY ; nxpyY = nxZ * pyX - nxX * pyZ ; nxpyZ = nxX * pyY - nxY * pyX ; // top right
pxpyX = pyY * pxZ - pyZ * pxY ; pxpyY = pyZ * pxX - pyX * pxZ ; pxpyZ = pyX * pxY - pyY * pxX ; // compute origin
float pxnxX , pxnxY , pxnxZ ; pxnxX = pxY * nxZ - pxZ * nxY ; pxnxY = pxZ * nxX - pxX * nxZ ; pxnxZ = pxX * nxY - pxY * nxX ; float invDot = 1.0f / ( nxX * pxpyX + nxY * pxpyY + nxZ * pxpyZ ) ; cx = ( - pxpyX * d1 - nxpyX * d2 - pxnxX * d3 ) * invDot ; cy = ( - pxpyY * d1 - nxpyY * d2 - pxnxY * d3 ) * invDot ; cz = ( - pxpyZ * d1 - nxpyZ * d2 - pxnxZ * d3 ) * invDot ; return this ; |
public class CmsSourceSearchApp { /** * Generates the state string for the given search settings . < p >
* @ param settings the search settings
* @ return the state string */
public static String generateState ( CmsSearchReplaceSettings settings ) { } } | String state = "" ; state = A_CmsWorkplaceApp . addParamToState ( state , SITE_ROOT , settings . getSiteRoot ( ) ) ; state = A_CmsWorkplaceApp . addParamToState ( state , SEARCH_TYPE , settings . getType ( ) . name ( ) ) ; state = A_CmsWorkplaceApp . addParamToState ( state , SEARCH_PATTERN , settings . getSearchpattern ( ) ) ; if ( ! settings . getPaths ( ) . isEmpty ( ) ) { state = A_CmsWorkplaceApp . addParamToState ( state , FOLDER , settings . getPaths ( ) . get ( 0 ) ) ; } state = A_CmsWorkplaceApp . addParamToState ( state , RESOURCE_TYPE , settings . getTypes ( ) ) ; state = A_CmsWorkplaceApp . addParamToState ( state , LOCALE , settings . getLocale ( ) ) ; state = A_CmsWorkplaceApp . addParamToState ( state , QUERY , settings . getQuery ( ) ) ; state = A_CmsWorkplaceApp . addParamToState ( state , INDEX , settings . getSource ( ) ) ; state = A_CmsWorkplaceApp . addParamToState ( state , XPATH , settings . getXpath ( ) ) ; state = A_CmsWorkplaceApp . addParamToState ( state , IGNORE_SUBSITES , String . valueOf ( settings . ignoreSubSites ( ) ) ) ; state = A_CmsWorkplaceApp . addParamToState ( state , PROPERTY , settings . getProperty ( ) . getName ( ) ) ; return state ; |
public class InputParallelismUpdateMarshaller { /** * Marshall the given parameter object . */
public void marshall ( InputParallelismUpdate inputParallelismUpdate , ProtocolMarshaller protocolMarshaller ) { } } | if ( inputParallelismUpdate == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( inputParallelismUpdate . getCountUpdate ( ) , COUNTUPDATE_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; } |
public class IntegerExtensions { /** * The bitwise exclusive < code > or < / code > operation . This is the equivalent to the java < code > ^ < / code > operator .
* @ param a
* an integer .
* @ param b
* an integer .
* @ return < code > a ^ b < / code > */
@ Pure @ Inline ( value = "($1 ^ $2)" , constantExpression = true ) public static int bitwiseXor ( int a , int b ) { } } | return a ^ b ; |
public class Iterators { /** * Returns the elements in the base iterator until it hits any element that doesn ' t satisfy the filter .
* Then the rest of the elements in the base iterator gets ignored .
* @ since 1.485 */
public static < T > Iterator < T > limit ( final Iterator < ? extends T > base , final CountingPredicate < ? super T > filter ) { } } | return new Iterator < T > ( ) { private T next ; private boolean end ; private int index = 0 ; public boolean hasNext ( ) { fetch ( ) ; return next != null ; } public T next ( ) { fetch ( ) ; T r = next ; next = null ; return r ; } private void fetch ( ) { if ( next == null && ! end ) { if ( base . hasNext ( ) ) { next = base . next ( ) ; if ( ! filter . apply ( index ++ , next ) ) { next = null ; end = true ; } } else { end = true ; } } } public void remove ( ) { throw new UnsupportedOperationException ( ) ; } } ; |
public class AmazonConnectClient { /** * Retrieves the contact attributes associated with a contact .
* @ param getContactAttributesRequest
* @ return Result of the GetContactAttributes operation returned by the service .
* @ throws InvalidRequestException
* The request is not valid .
* @ throws ResourceNotFoundException
* The specified resource was not found .
* @ throws InternalServiceException
* Request processing failed due to an error or failure with the service .
* @ sample AmazonConnect . GetContactAttributes
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / connect - 2017-08-08 / GetContactAttributes " target = " _ top " > AWS
* API Documentation < / a > */
@ Override public GetContactAttributesResult getContactAttributes ( GetContactAttributesRequest request ) { } } | request = beforeClientExecution ( request ) ; return executeGetContactAttributes ( request ) ; |
public class Webhook { /** * all */
public static WebhookCollection all ( ) throws EasyPostException { } } | Map < String , Object > params = new HashMap < String , Object > ( ) ; return all ( params , null ) ; |
public class MetadataHandler { /** * Return a list of all REST API Routes and a Markdown Table of Contents . */
@ SuppressWarnings ( "unused" ) // called through reflection by RequestServer
public MetadataV3 listRoutes ( int version , MetadataV3 docs ) { } } | MarkdownBuilder builder = new MarkdownBuilder ( ) ; builder . comment ( "Preview with http://jbt.github.io/markdown-editor" ) ; builder . heading1 ( "REST API Routes Table of Contents" ) ; builder . hline ( ) ; builder . tableHeader ( "HTTP method" , "URI pattern" , "Input schema" , "Output schema" , "Summary" ) ; docs . routes = new RouteV3 [ RequestServer . numRoutes ( ) ] ; int i = 0 ; for ( Route route : RequestServer . routes ( ) ) { RouteV3 schema = new RouteV3 ( route ) ; docs . routes [ i ] = schema ; // ModelBuilder input / output schema hackery
MetadataV3 look = new MetadataV3 ( ) ; look . routes = new RouteV3 [ 1 ] ; look . routes [ 0 ] = schema ; look . path = route . _url ; look . http_method = route . _http_method ; fetchRoute ( version , look ) ; schema . input_schema = look . routes [ 0 ] . input_schema ; schema . output_schema = look . routes [ 0 ] . output_schema ; builder . tableRow ( route . _http_method , route . _url , Handler . getHandlerMethodInputSchema ( route . _handler_method ) . getSimpleName ( ) , Handler . getHandlerMethodOutputSchema ( route . _handler_method ) . getSimpleName ( ) , route . _summary ) ; i ++ ; } docs . markdown = builder . toString ( ) ; return docs ; |
public class LinearClassifier { /** * Returns of the score of the Datum as internalized features for the
* specified label . Ignores the true label of the Datum .
* Doesn ' t consider a value for each feature . */
private double scoreOf ( int [ ] feats , L label ) { } } | int iLabel = labelIndex . indexOf ( label ) ; double score = 0.0 ; for ( int feat : feats ) { score += weight ( feat , iLabel ) ; } return score + thresholds [ iLabel ] ; |
public class IndexVectorUtil { /** * Loads a mapping from word to { @ link TernaryVector } from the file */
@ SuppressWarnings ( "unchecked" ) public static Map < String , TernaryVector > load ( File indexVectorFile ) { } } | try { FileInputStream fis = new FileInputStream ( indexVectorFile ) ; ObjectInputStream inStream = new ObjectInputStream ( fis ) ; Map < String , TernaryVector > vectorMap = ( Map < String , TernaryVector > ) inStream . readObject ( ) ; inStream . close ( ) ; return vectorMap ; } catch ( IOException ioe ) { throw new IOError ( ioe ) ; } catch ( ClassNotFoundException cnfe ) { throw new Error ( cnfe ) ; } |
public class Asset { /** * Create a AssetUpdater to execute update .
* @ param pathServiceSid The service _ sid
* @ param pathSid The sid
* @ param friendlyName The friendly _ name
* @ return AssetUpdater capable of executing the update */
public static AssetUpdater updater ( final String pathServiceSid , final String pathSid , final String friendlyName ) { } } | return new AssetUpdater ( pathServiceSid , pathSid , friendlyName ) ; |
public class InternalSimpleAntlrParser { /** * InternalSimpleAntlr . g : 317:1 : entryRuleRule returns [ EObject current = null ] : iv _ ruleRule = ruleRule EOF ; */
public final EObject entryRuleRule ( ) throws RecognitionException { } } | EObject current = null ; EObject iv_ruleRule = null ; try { // InternalSimpleAntlr . g : 318:2 : ( iv _ ruleRule = ruleRule EOF )
// InternalSimpleAntlr . g : 319:2 : iv _ ruleRule = ruleRule EOF
{ if ( state . backtracking == 0 ) { newCompositeNode ( grammarAccess . getRuleRule ( ) ) ; } pushFollow ( FOLLOW_1 ) ; iv_ruleRule = ruleRule ( ) ; state . _fsp -- ; if ( state . failed ) return current ; if ( state . backtracking == 0 ) { current = iv_ruleRule ; } match ( input , EOF , FOLLOW_2 ) ; if ( state . failed ) return current ; } } catch ( RecognitionException re ) { recover ( input , re ) ; appendSkippedTokens ( ) ; } finally { } return current ; |
public class SelectBooleanCheckboxRenderer { /** * This methods receives and processes input made by the user . More
* specifically , it ckecks whether the user has interacted with the current
* b : selectBooleanCheckbox . The default implementation simply stores the
* input value in the list of submitted values . If the validation checks are
* passed , the values in the < code > submittedValues < / code > list are store in
* the backend bean .
* @ param context
* the FacesContext .
* @ param component
* the current b : selectBooleanCheckbox . */
@ Override public void decode ( FacesContext context , UIComponent component ) { } } | SelectBooleanCheckbox selectBooleanCheckbox = ( SelectBooleanCheckbox ) component ; if ( selectBooleanCheckbox . isDisabled ( ) || selectBooleanCheckbox . isReadonly ( ) ) { return ; } decodeBehaviors ( context , selectBooleanCheckbox ) ; // moved to
// AJAXRenderer
String clientId = selectBooleanCheckbox . getClientId ( context ) ; String submittedValue = context . getExternalContext ( ) . getRequestParameterMap ( ) . get ( clientId ) ; if ( submittedValue != null ) { selectBooleanCheckbox . setSubmittedValue ( "on" . equals ( submittedValue ) ) ; } else if ( context . getExternalContext ( ) . getRequestParameterMap ( ) . containsKey ( clientId + "_helper" ) ) { selectBooleanCheckbox . setSubmittedValue ( false ) ; } if ( Boolean . FALSE . equals ( selectBooleanCheckbox . getSubmittedValue ( ) ) && selectBooleanCheckbox . isRequired ( ) ) { String userDefinedMessage = selectBooleanCheckbox . getRequiredMessage ( ) ; if ( null != userDefinedMessage ) { FacesMessages . error ( clientId , userDefinedMessage , userDefinedMessage ) ; } else { String label = selectBooleanCheckbox . getLabel ( ) ; if ( label == null || label . isEmpty ( ) ) { label = selectBooleanCheckbox . getCaption ( ) ; if ( label == null || label . isEmpty ( ) ) { label = clientId ; } } FacesMessages . createErrorMessageFromResourceBundle ( clientId , "javax.faces.component.UIInput.REQUIRED" , label ) ; } selectBooleanCheckbox . setValid ( false ) ; } else { new AJAXRenderer ( ) . decode ( context , component , "input_" + clientId ) ; } |
public class ConBox { /** * Makes sure that the PhysicalEntity is controlling more reactions than it participates
* ( excluding complex assembly ) .
* @ return non - generative constraint */
public static Constraint moreControllerThanParticipant ( ) { } } | return new ConstraintAdapter ( 1 ) { PathAccessor partConv = new PathAccessor ( "PhysicalEntity/participantOf:Conversion" ) ; PathAccessor partCompAss = new PathAccessor ( "PhysicalEntity/participantOf:ComplexAssembly" ) ; PathAccessor effects = new PathAccessor ( "PhysicalEntity/controllerOf/controlled*:Conversion" ) ; @ Override public boolean satisfies ( Match match , int ... ind ) { PhysicalEntity pe = ( PhysicalEntity ) match . get ( ind [ 0 ] ) ; int partCnvCnt = partConv . getValueFromBean ( pe ) . size ( ) ; int partCACnt = partCompAss . getValueFromBean ( pe ) . size ( ) ; int effCnt = effects . getValueFromBean ( pe ) . size ( ) ; return ( partCnvCnt - partCACnt ) <= effCnt ; } } ; |
public class Computer { /** * Returns the { @ link Node } that this computer represents .
* @ return
* null if the configuration has changed and the node is removed , yet the corresponding { @ link Computer }
* is not yet gone . */
@ CheckForNull public Node getNode ( ) { } } | Jenkins j = Jenkins . getInstanceOrNull ( ) ; // TODO confirm safe to assume non - null and use getInstance ( )
if ( j == null ) { return null ; } if ( nodeName == null ) { return j ; } return j . getNode ( nodeName ) ; |
public class ESRIFileUtil { /** * Translate a M - coordinate from Java standard
* to ESRI standard .
* @ param measure is the Java z - coordinate
* @ return the ESRI m - coordinate */
@ Pure public static double toESRI_m ( double measure ) { } } | if ( Double . isInfinite ( measure ) || Double . isNaN ( measure ) ) { return ESRI_NAN ; } return measure ; |
public class IdentityT { /** * { @ inheritDoc } */
@ Override public < B > IdentityT < M , B > discardL ( Applicative < B , MonadT < M , Identity < ? > , ? > > appB ) { } } | return MonadT . super . discardL ( appB ) . coerce ( ) ; |
public class AmazonKinesisFirehoseToRedshiftSample { /** * Method to create delivery stream with Redshift destination configuration .
* @ throws Exception */
private static void createDeliveryStream ( ) throws Exception { } } | boolean deliveryStreamExists = false ; LOG . info ( "Checking if " + deliveryStreamName + " already exits" ) ; List < String > deliveryStreamNames = listDeliveryStreams ( ) ; if ( deliveryStreamNames != null && deliveryStreamNames . contains ( deliveryStreamName ) ) { deliveryStreamExists = true ; LOG . info ( "DeliveryStream " + deliveryStreamName + " already exists. Not creating the new delivery stream" ) ; } else { LOG . info ( "DeliveryStream " + deliveryStreamName + " does not exist" ) ; } if ( ! deliveryStreamExists ) { // Create DeliveryStream
CreateDeliveryStreamRequest createDeliveryStreamRequest = new CreateDeliveryStreamRequest ( ) ; createDeliveryStreamRequest . setDeliveryStreamName ( deliveryStreamName ) ; S3DestinationConfiguration redshiftS3Configuration = new S3DestinationConfiguration ( ) ; redshiftS3Configuration . setBucketARN ( s3BucketARN ) ; redshiftS3Configuration . setPrefix ( s3ObjectPrefix ) ; BufferingHints bufferingHints = null ; if ( s3DestinationSizeInMBs != null || s3DestinationIntervalInSeconds != null ) { bufferingHints = new BufferingHints ( ) ; bufferingHints . setSizeInMBs ( s3DestinationSizeInMBs ) ; bufferingHints . setIntervalInSeconds ( s3DestinationIntervalInSeconds ) ; } redshiftS3Configuration . setBufferingHints ( bufferingHints ) ; // Create and set IAM role so that firehose service has access to the S3Buckets to put data .
// Please check the trustPolicyDocument . json and permissionsPolicyDocument . json files
// for the trust and permissions policies set for the role .
String iamRoleArn = createIamRole ( s3ObjectPrefix ) ; redshiftS3Configuration . setRoleARN ( iamRoleArn ) ; CopyCommand copyCommand = new CopyCommand ( ) ; copyCommand . withCopyOptions ( copyOptions ) . withDataTableName ( dataTableName ) ; RedshiftDestinationConfiguration redshiftDestinationConfiguration = new RedshiftDestinationConfiguration ( ) ; redshiftDestinationConfiguration . withClusterJDBCURL ( clusterJDBCUrl ) . withRoleARN ( iamRoleArn ) . withUsername ( username ) . withPassword ( password ) . withCopyCommand ( copyCommand ) . withS3Configuration ( redshiftS3Configuration ) ; createDeliveryStreamRequest . setRedshiftDestinationConfiguration ( redshiftDestinationConfiguration ) ; firehoseClient . createDeliveryStream ( createDeliveryStreamRequest ) ; // The Delivery Stream is now being created .
LOG . info ( "Creating DeliveryStream : " + deliveryStreamName ) ; waitForDeliveryStreamToBecomeAvailable ( deliveryStreamName ) ; } |
public class IoUtil { /** * 将Reader中的内容复制到Writer中
* @ param reader Reader
* @ param writer Writer
* @ param bufferSize 缓存大小
* @ param streamProgress 进度处理器
* @ return 传输的byte数
* @ throws IORuntimeException IO异常 */
public static long copy ( Reader reader , Writer writer , int bufferSize , StreamProgress streamProgress ) throws IORuntimeException { } } | char [ ] buffer = new char [ bufferSize ] ; long size = 0 ; int readSize ; if ( null != streamProgress ) { streamProgress . start ( ) ; } try { while ( ( readSize = reader . read ( buffer , 0 , bufferSize ) ) != EOF ) { writer . write ( buffer , 0 , readSize ) ; size += readSize ; writer . flush ( ) ; if ( null != streamProgress ) { streamProgress . progress ( size ) ; } } } catch ( Exception e ) { throw new IORuntimeException ( e ) ; } if ( null != streamProgress ) { streamProgress . finish ( ) ; } return size ; |
public class CmsSite { /** * Sets the parameters . < p >
* @ param parameters the parameters to set */
public void setParameters ( SortedMap < String , String > parameters ) { } } | m_parameters = new TreeMap < String , String > ( parameters ) ; |
public class JsonWriter { /** * Writes an array with the given key name
* @ param key the key name for the array
* @ param array the array to be written
* @ return This structured writer
* @ throws IOException Something went wrong writing */
protected JsonWriter property ( String key , Object [ ] array ) throws IOException { } } | return property ( key , Arrays . asList ( array ) ) ; |
public class BaseRequest { /** * Send this resource request asynchronously , with the given byte array as the request body .
* If the Content - Type header was not previously set , this method will set it to " application / octet - stream " .
* @ param data The byte array to put in the request body
* @ param listener The listener whose onSuccess or onFailure methods will be called when this request finishes */
protected void send ( byte [ ] data , ResponseListener listener ) { } } | String contentTypeHeader = headers . get ( CONTENT_TYPE ) ; final String contentType = contentTypeHeader != null ? contentTypeHeader : BINARY_CONTENT_TYPE ; RequestBody body = RequestBody . create ( MediaType . parse ( contentType ) , data ) ; sendRequest ( null , listener , body ) ; |
public class PeerNode { /** * documentation inherited from interface ClientObserver */
public void clientDidLogoff ( Client client ) { } } | if ( nodeobj == null ) { return ; } String nodeName = getNodeName ( ) ; for ( ClientInfo clinfo : nodeobj . clients ) { _peermgr . clientLoggedOff ( nodeName , clinfo ) ; } for ( NodeObject . Lock lock : nodeobj . locks ) { _peermgr . peerRemovedLock ( nodeName , lock ) ; } nodeobj . removeListener ( _listener ) ; _peermgr . disconnectedFromPeer ( this ) ; _listener = null ; nodeobj = null ; |
public class CuratorZookeeperClient { /** * This method blocks until the connection to ZK succeeds . Use with caution . The block
* will timeout after the connection timeout ( as passed to the constructor ) has elapsed
* @ return true if the connection succeeded , false if not
* @ throws InterruptedException interrupted while waiting */
public boolean blockUntilConnectedOrTimedOut ( ) throws InterruptedException { } } | Preconditions . checkState ( started . get ( ) , "Client is not started" ) ; log . debug ( "blockUntilConnectedOrTimedOut() start" ) ; OperationTrace trace = startAdvancedTracer ( "blockUntilConnectedOrTimedOut" ) ; internalBlockUntilConnectedOrTimedOut ( ) ; trace . commit ( ) ; boolean localIsConnected = state . isConnected ( ) ; log . debug ( "blockUntilConnectedOrTimedOut() end. isConnected: " + localIsConnected ) ; return localIsConnected ; |
public class LinkGenerator { /** * Returns the first item .
* @ return first item
* @ throws IOException { @ link IOException } */
private byte [ ] getFirstItem ( ) throws IOException { } } | ByteArrayOutputStream outStream = new ByteArrayOutputStream ( ) ; int [ ] firstItem = { 0x1F , 0x50 , 0xE0 , 0x4F , 0xD0 , 0x20 , 0xEA , 0x3A , 0x69 , 0x10 , 0xA2 , 0xD8 , 0x08 , 0x00 , 0x2B , 0x30 , 0x30 , 0x9D , } ; writeInts ( firstItem , outStream ) ; return outStream . toByteArray ( ) ; |
public class SysUtils { /** * Close a closable ignoring any exceptions .
* This method is used during cleanup , or in a finally block .
* @ param closeable source or destination of data can be closed */
public static void closeIgnoringExceptions ( AutoCloseable closeable ) { } } | if ( closeable != null ) { try { closeable . close ( ) ; // Suppress it since we ignore any exceptions
// SUPPRESS CHECKSTYLE IllegalCatch
} catch ( Exception e ) { // Still log the Exception for issue tracing
LOG . log ( Level . WARNING , String . format ( "Failed to close %s" , closeable ) , e ) ; } } |
public class ExecutorFactoryConfigurationBuilder { /** * Add key / value property pair to this executor factory configuration
* @ param key property key
* @ param value property value
* @ return this ExecutorFactoryConfig */
public ExecutorFactoryConfigurationBuilder addProperty ( String key , String value ) { } } | attributes . attribute ( PROPERTIES ) . get ( ) . put ( key , value ) ; return this ; |
public class LiveReloadServer { /** * Trigger livereload of all connected clients . */
public void triggerReload ( ) { } } | synchronized ( this . monitor ) { synchronized ( this . connections ) { for ( Connection connection : this . connections ) { try { connection . triggerReload ( ) ; } catch ( Exception ex ) { logger . debug ( "Unable to send reload message" , ex ) ; } } } } |
public class EntityPathTracker { /** * Returns the current entity path with the given literal name appended .
* @ param literalName the literal name to append to the current entity path .
* Must not be null .
* @ return the current entity path with the literal name appended . The { @ link
* # getEntitySeparator ( ) } is used to separate both unless no entity was
* received yet in which case only the literal name is returned . */
public String getCurrentPathWith ( final String literalName ) { } } | if ( entityStack . size ( ) == 0 ) { return literalName ; } return getCurrentPath ( ) + entitySeparator + literalName ; |
public class FunctionRegistry { /** * Registers a function by a name other than its default name . */
static public void putFunction ( String name , Function function ) { } } | FunctionRegistry . functions . put ( Objects . requireNonNull ( name ) , function ) ; |
public class CveDB { /** * Analyzes the description to determine if the vulnerability / software is
* for a specific known ecosystem . The ecosystem can be used later for
* filtering CPE matches .
* @ param description the description to analyze
* @ return the ecosystem if one could be identified ; otherwise
* < code > null < / code > */
private String determineBaseEcosystem ( String description ) { } } | if ( description == null ) { return null ; } int idx = StringUtils . indexOfIgnoreCase ( description , ".php" ) ; if ( ( idx > 0 && ( idx + 4 == description . length ( ) || ! Character . isLetterOrDigit ( description . charAt ( idx + 4 ) ) ) ) || StringUtils . containsIgnoreCase ( description , "wordpress" ) || StringUtils . containsIgnoreCase ( description , "drupal" ) || StringUtils . containsIgnoreCase ( description , "joomla" ) || StringUtils . containsIgnoreCase ( description , "moodle" ) || StringUtils . containsIgnoreCase ( description , "typo3" ) ) { return ComposerLockAnalyzer . DEPENDENCY_ECOSYSTEM ; } if ( StringUtils . containsIgnoreCase ( description , " npm " ) || StringUtils . containsIgnoreCase ( description , " node.js" ) ) { return AbstractNpmAnalyzer . NPM_DEPENDENCY_ECOSYSTEM ; } idx = StringUtils . indexOfIgnoreCase ( description , ".pm" ) ; if ( idx > 0 && ( idx + 3 == description . length ( ) || ! Character . isLetterOrDigit ( description . charAt ( idx + 3 ) ) ) ) { return "perl" ; } else { idx = StringUtils . indexOfIgnoreCase ( description , ".pl" ) ; if ( idx > 0 && ( idx + 3 == description . length ( ) || ! Character . isLetterOrDigit ( description . charAt ( idx + 3 ) ) ) ) { return "perl" ; } } idx = StringUtils . indexOfIgnoreCase ( description , ".java" ) ; if ( idx > 0 && ( idx + 5 == description . length ( ) || ! Character . isLetterOrDigit ( description . charAt ( idx + 5 ) ) ) ) { return JarAnalyzer . DEPENDENCY_ECOSYSTEM ; } else { idx = StringUtils . indexOfIgnoreCase ( description , ".jsp" ) ; if ( idx > 0 && ( idx + 4 == description . length ( ) || ! Character . isLetterOrDigit ( description . charAt ( idx + 4 ) ) ) ) { return JarAnalyzer . DEPENDENCY_ECOSYSTEM ; } } if ( StringUtils . containsIgnoreCase ( description , " grails " ) ) { return JarAnalyzer . DEPENDENCY_ECOSYSTEM ; } idx = StringUtils . indexOfIgnoreCase ( description , ".rb" ) ; if ( idx > 0 && ( idx + 3 == description . length ( ) || ! Character . isLetterOrDigit ( description . charAt ( idx + 3 ) ) ) ) { return RubyBundleAuditAnalyzer . DEPENDENCY_ECOSYSTEM ; } if ( StringUtils . containsIgnoreCase ( description , "ruby gem" ) ) { return RubyBundleAuditAnalyzer . DEPENDENCY_ECOSYSTEM ; } idx = StringUtils . indexOfIgnoreCase ( description , ".py" ) ; if ( ( idx > 0 && ( idx + 3 == description . length ( ) || ! Character . isLetterOrDigit ( description . charAt ( idx + 3 ) ) ) ) || StringUtils . containsIgnoreCase ( description , "django" ) ) { return PythonPackageAnalyzer . DEPENDENCY_ECOSYSTEM ; } if ( StringUtils . containsIgnoreCase ( description , "buffer overflow" ) && ! StringUtils . containsIgnoreCase ( description , "android" ) ) { return CMakeAnalyzer . DEPENDENCY_ECOSYSTEM ; } idx = StringUtils . indexOfIgnoreCase ( description , ".c" ) ; if ( idx > 0 && ( idx + 2 == description . length ( ) || ! Character . isLetterOrDigit ( description . charAt ( idx + 2 ) ) ) ) { return CMakeAnalyzer . DEPENDENCY_ECOSYSTEM ; } else { idx = StringUtils . indexOfIgnoreCase ( description , ".cpp" ) ; if ( idx > 0 && ( idx + 4 == description . length ( ) || ! Character . isLetterOrDigit ( description . charAt ( idx + 4 ) ) ) ) { return CMakeAnalyzer . DEPENDENCY_ECOSYSTEM ; } else { idx = StringUtils . indexOfIgnoreCase ( description , ".h" ) ; if ( idx > 0 && ( idx + 2 == description . length ( ) || ! Character . isLetterOrDigit ( description . charAt ( idx + 2 ) ) ) ) { return CMakeAnalyzer . DEPENDENCY_ECOSYSTEM ; } } } return null ; |
public class TinylogLoggingProvider { /** * Creates a writing thread for a matrix of writers .
* @ param matrix
* All writers
* @ return Initialized and running writhing thread */
private static WritingThread createWritingThread ( final Collection < Writer > [ ] [ ] matrix ) { } } | Collection < Writer > writers = getAllWriters ( matrix ) ; WritingThread thread = new WritingThread ( writers ) ; thread . start ( ) ; return thread ; |
public class EntityUrl { /** * Get Resource Url for GetEntity
* @ param entityListFullName The full name of the EntityList including namespace in name @ nameSpace format
* @ param id Unique identifier of the customer segment to retrieve .
* @ param responseFields Filtering syntax appended to an API call to increase or decrease the amount of data returned inside a JSON object . This parameter should only be used to retrieve data . Attempting to update data using this parameter may cause data loss .
* @ return String Resource Url */
public static MozuUrl getEntityUrl ( String entityListFullName , String id , String responseFields ) { } } | UrlFormatter formatter = new UrlFormatter ( "/api/platform/entitylists/{entityListFullName}/entities/{id}?responseFields={responseFields}" ) ; formatter . formatUrl ( "entityListFullName" , entityListFullName ) ; formatter . formatUrl ( "id" , id ) ; formatter . formatUrl ( "responseFields" , responseFields ) ; return new MozuUrl ( formatter . getResourceUrl ( ) , MozuUrl . UrlLocation . TENANT_POD ) ; |
public class Record { /** * Updates the binary representation of the data , such that it reflects the state of the currently
* stored fields . If the binary representation is already up to date , nothing happens . Otherwise ,
* this function triggers the modified fields to serialize themselves into the records buffer and
* afterwards updates the offset table . */
public void updateBinaryRepresenation ( ) { } } | // check whether the binary state is in sync
final int firstModified = this . firstModifiedPos ; if ( firstModified == Integer . MAX_VALUE ) { return ; } final InternalDeSerializer serializer = this . serializer ; final int [ ] offsets = this . offsets ; final int numFields = this . numFields ; serializer . memory = this . switchBuffer != null ? this . switchBuffer : ( this . binaryLen > 0 ? new byte [ this . binaryLen ] : new byte [ numFields * DEFAULT_FIELD_LEN_ESTIMATE + 1 ] ) ; serializer . position = 0 ; if ( numFields > 0 ) { int offset = 0 ; // search backwards to find the latest preceding non - null field
if ( firstModified > 0 ) { for ( int i = firstModified - 1 ; i >= 0 ; i -- ) { if ( this . offsets [ i ] != NULL_INDICATOR_OFFSET ) { offset = this . offsets [ i ] + this . lengths [ i ] ; break ; } } } // we assume that changed and unchanged fields are interleaved and serialize into another array
try { if ( offset > 0 ) { // copy the first unchanged portion as one
serializer . write ( this . binaryData , 0 , offset ) ; } // copy field by field
for ( int i = firstModified ; i < numFields ; i ++ ) { final int co = offsets [ i ] ; // / skip null fields
if ( co == NULL_INDICATOR_OFFSET ) { continue ; } offsets [ i ] = offset ; if ( co == MODIFIED_INDICATOR_OFFSET ) { final Value writeField = this . writeFields [ i ] ; if ( writeField == RESERVE_SPACE ) { // RESERVE _ SPACE is a placeholder indicating lengths [ i ] bytes should be reserved
final int length = this . lengths [ i ] ; if ( serializer . position >= serializer . memory . length - length - 1 ) { serializer . resize ( length ) ; } serializer . position += length ; } else { // serialize modified fields
this . writeFields [ i ] . write ( serializer ) ; } } else { // bin - copy unmodified fields
serializer . write ( this . binaryData , co , this . lengths [ i ] ) ; } this . lengths [ i ] = serializer . position - offset ; offset = serializer . position ; } } catch ( Exception e ) { throw new RuntimeException ( "Error in data type serialization: " + e . getMessage ( ) , e ) ; } } serializeHeader ( serializer , offsets , numFields ) ; // set the fields
this . switchBuffer = this . binaryData ; this . binaryData = serializer . memory ; this . binaryLen = serializer . position ; this . firstModifiedPos = Integer . MAX_VALUE ; |
public class SizeBoundedQueue { /** * Removes and returns the first element or null if the queue is empty */
public T remove ( ) { } } | lock . lock ( ) ; try { if ( queue . isEmpty ( ) ) return null ; El < T > el = queue . poll ( ) ; count -= el . size ; not_full . signalAll ( ) ; return el . el ; } finally { lock . unlock ( ) ; } |
public class ConsumerDispatcher { /** * Attach a new ConsumerPoint to this ConsumerDispatcher . A ConsumerKey
* object is created for this ConsumerPoint which contains various pieces of
* state information including the consumer point ' s ready state , which is
* initially set to not ready . Also included is a getCursor to be used by
* the consumer point to access this ConsumerDispatcher ' s item or reference
* stream .
* @ param consumerPoint The consumer point being attached
* @ param selector The Filter that the consumer has specified
* @ param discriminator The discriminator that the consumer has specified
* @ param connectionUuid The connections UUID
* @ param readAhead If the consumer can read ahead
* @ return The ConsumerKey object which was created for this consumer point .
* being deleted */
@ Override public ConsumerKey attachConsumerPoint ( ConsumerPoint consumerPoint , SelectionCriteria criteria , SIBUuid12 connectionUuid , boolean readAhead , boolean forwardScanning , JSConsumerSet consumerSet ) throws SINotPossibleInCurrentConfigurationException , SIDestinationLockedException , SISelectorSyntaxException , SIDiscriminatorSyntaxException , SIResourceException { } } | if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "attachConsumerPoint" , new Object [ ] { consumerPoint , criteria , connectionUuid , Boolean . valueOf ( readAhead ) , consumerSet } ) ; DispatchableConsumerPoint dispatchableConsumerPoint = ( DispatchableConsumerPoint ) consumerPoint ; ConsumerKey consumerKey = null ; synchronized ( consumerPoints ) { // Check if the destination has been deleted .
if ( ( dispatchableConsumerPoint . getNamedDestination ( this ) . isToBeDeleted ( ) ) || ( dispatchableConsumerPoint . getNamedDestination ( this ) . isDeleted ( ) ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "attachConsumerPoint" , "destination deleted" ) ; throw new SINotPossibleInCurrentConfigurationException ( nls . getFormattedMessage ( "DESTINATION_DELETED_ERROR_CWSIP0111" , new Object [ ] { _baseDestHandler . getName ( ) , _messageProcessor . getMessagingEngineName ( ) } , null ) ) ; } // Two types of exclusive check here :
// 1 ) ReceiveExclusive when another consumer is already on Queue
// 2 ) Durable when we ' re pubsub
// 3 ) Ordered messaging enabled
if ( ! _isPubSub ) { if ( consumerPoints . size ( ) > 0 ) { if ( dispatchableConsumerPoint . getNamedDestination ( this ) . isReceiveExclusive ( ) ) { SIDestinationLockedException e = new SIDestinationLockedException ( nls . getFormattedMessage ( "DESTINATION_RECEIVE_EXCLUSIVE_CWSIP0114" , new Object [ ] { _baseDestHandler . getName ( ) , _messageProcessor . getMessagingEngineName ( ) } , null ) ) ; SibTr . exception ( tc , e ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "attachConsumerPoint" , "Destination receive exclusive" ) ; throw e ; } // If this is an ordered consumer and we already have other consumer , reject it
if ( ! dispatchableConsumerPoint . ignoreInitialIndoubts ( ) ) { SIDestinationLockedException e = new SIDestinationLockedException ( nls . getFormattedMessage ( "TEMPORARY_CWSIP9999" , new Object [ ] { _baseDestHandler . getName ( ) , _messageProcessor . getMessagingEngineName ( ) } , null ) ) ; SibTr . exception ( tc , e ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "attachConsumerPoint" , "Destination locked due to indoubt messages" ) ; throw e ; } else // If this ia a normal consumer but we have an ordered consumer attached , reject the consumer
if ( ! consumerPoints . get ( 0 ) . getConsumerPoint ( ) . ignoreInitialIndoubts ( ) ) { SIDestinationLockedException e = new SIDestinationLockedException ( nls . getFormattedMessage ( "TEMPORARY_CWSIP9999" , // Should be CWSIP0999 ? ?
new Object [ ] { _baseDestHandler . getName ( ) , _messageProcessor . getMessagingEngineName ( ) } , null ) ) ; SibTr . exception ( tc , e ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "attachConsumerPoint" , "Destination locked for ordering" ) ; throw e ; } } } else if ( isDurable ( ) && ! dispatcherState . isCloned ( ) && consumerPoints . size ( ) > 0 ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "attachConsumerPoint" , "SIDestinationLockedException" ) ; // NOTE : this is a bit of an abuse of the locked exception since
// we ' d rather throw SIDurableSubscriptionLockedException .
// We actually only see this case for remote durable when an
// existing AOH handles an attempt to attach to an in - use
// durable sub . In this case , the AOH sends a cardinality error
// which gets wrapped appropriately at the remote requester .
throw new SIDestinationLockedException ( nls . getFormattedMessage ( "SUBSCRIPTION_IN_USE_ERROR_CWSIP0152" , new Object [ ] { dispatcherState . getSubscriberID ( ) , _messageProcessor . getMessagingEngineName ( ) } , null ) ) ; } if ( _baseDestHandler . isOrdered ( ) ) { if ( consumerPoints . size ( ) > 0 ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "attachConsumerPoint" , new Object [ ] { "SIDestinationLockedException" , consumerPoints . size ( ) } ) ; // NOTE : this is a bit of an abuse of the locked exception since
// we ' d rather throw an ordered message specific exception .
// We only allow one consumer to attach when ordered messaging is
// specified . b
throw new SIDestinationLockedException ( nls . getFormattedMessage ( "ORDERED_DESTINATION_IN_USE_CWSIP0116" , new Object [ ] { _baseDestHandler . getName ( ) , _messageProcessor . getMessagingEngineName ( ) } , null ) ) ; } else if ( ! isNewTransactionAllowed ( null ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "attachConsumerPoint" , new Object [ ] { "SIDestinationLockedException" , "!isTransactionAllowed(null)" } ) ; throw new SIDestinationLockedException ( nls . getFormattedMessage ( "ORDERED_MESSAGING_ERROR_CWSIP0194" , new Object [ ] { _baseDestHandler . getName ( ) , _messageProcessor . getMessagingEngineName ( ) } , null ) ) ; } } // create a new ConsumerKey object and add it in to the list of
// atatched consumer points
consumerKey = createConsumerKey ( dispatchableConsumerPoint , criteria , connectionUuid , readAhead , forwardScanning , consumerSet ) ; // Add the CP to the list irrespective of whether p2p or pubsub , we use the array
// to keep track of whether any CPs are attached .
consumerPoints . add ( ( DispatchableKey ) consumerKey ) ; } // if we ' re a pt - pt destination
if ( ! _isPubSub ) { // Store the CP in the MatchSpace
_baseDestHandler . addConsumerPointMatchTarget ( ( DispatchableKey ) consumerKey , getUuid ( ) , // uuid of the ConsumerDispatcher
criteria ) ; // 594730 : Now that we ' ve added the conusmer ' s selector to the matchspace
// we know that any search performed by internalPut will pick this consumer
// up , so we update the specificConsumerVersion now to ensure that we re - run the
// search . Otherwise there ' s a chance that a previous search ( when this consumer
// wasn ' t quite registered ) will appear to still be valid , resulting in the
// consumer not being found and the message not being delivered to it .
if ( ( ( DispatchableKey ) consumerKey ) . isSpecific ( ) ) { synchronized ( _baseDestHandler . getReadyConsumerPointLock ( ) ) { specificConsumerVersion ++ ; } } } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "attachConsumerPoint" , consumerKey ) ; return consumerKey ; |
public class WebSocketNode { /** * 广播消息 , 给所有人发消息
* @ param wsrange 过滤条件
* @ param convert Convert
* @ param message0 消息内容
* @ param last 是否最后一条
* @ return 为0表示成功 , 其他值表示部分发送异常 */
@ Local public CompletableFuture < Integer > broadcastMessage ( final WebSocketRange wsrange , final Convert convert , final Object message0 , final boolean last ) { } } | if ( message0 instanceof CompletableFuture ) return ( ( CompletableFuture ) message0 ) . thenApply ( msg -> broadcastMessage ( wsrange , convert , msg , last ) ) ; final Object message = ( convert == null || message0 instanceof WebSocketPacket ) ? message0 : ( ( convert instanceof TextConvert ) ? new WebSocketPacket ( ( ( TextConvert ) convert ) . convertTo ( message0 ) , last ) : new WebSocketPacket ( ( ( BinaryConvert ) convert ) . convertTo ( message0 ) , last ) ) ; if ( this . localEngine != null && this . sncpNodeAddresses == null ) { // 本地模式且没有分布式
return this . localEngine . broadcastLocalMessage ( wsrange , message , last ) ; } final Object remoteMessage = formatRemoteMessage ( message ) ; CompletableFuture < Integer > localFuture = this . localEngine == null ? null : this . localEngine . broadcastLocalMessage ( wsrange , message , last ) ; tryAcquireSemaphore ( ) ; CompletableFuture < Collection < InetSocketAddress > > addrsFuture = sncpNodeAddresses . getCollectionAsync ( SOURCE_SNCP_ADDRS_KEY , InetSocketAddress . class ) ; if ( semaphore != null ) addrsFuture . whenComplete ( ( r , e ) -> releaseSemaphore ( ) ) ; CompletableFuture < Integer > remoteFuture = addrsFuture . thenCompose ( ( Collection < InetSocketAddress > addrs ) -> { if ( logger . isLoggable ( Level . FINEST ) ) logger . finest ( "websocket broadcast message (" + remoteMessage + ") on " + addrs ) ; if ( addrs == null || addrs . isEmpty ( ) ) return CompletableFuture . completedFuture ( 0 ) ; CompletableFuture < Integer > future = null ; for ( InetSocketAddress addr : addrs ) { if ( addr == null || addr . equals ( localSncpAddress ) ) continue ; future = future == null ? remoteNode . broadcastMessage ( addr , wsrange , remoteMessage , last ) : future . thenCombine ( remoteNode . broadcastMessage ( addr , wsrange , remoteMessage , last ) , ( a , b ) -> a | b ) ; } return future == null ? CompletableFuture . completedFuture ( 0 ) : future ; } ) ; return localFuture == null ? remoteFuture : localFuture . thenCombine ( remoteFuture , ( a , b ) -> a | b ) ; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.