signature
stringlengths 43
39.1k
| implementation
stringlengths 0
450k
|
|---|---|
public class PathMerger { /** * This method iterates over all instructions and uses the available context to improve the instructions .
* If the requests contains a heading , this method can transform the first continue to a u - turn if the heading
* points into the opposite direction of the route .
* At a waypoint it can transform the continue to a u - turn if the route involves turning . */
private InstructionList updateInstructionsWithContext ( InstructionList instructions ) { } }
|
Instruction instruction ; Instruction nextInstruction ; for ( int i = 0 ; i < instructions . size ( ) - 1 ; i ++ ) { instruction = instructions . get ( i ) ; if ( i == 0 && ! Double . isNaN ( favoredHeading ) && instruction . extraInfo . containsKey ( "heading" ) ) { double heading = ( double ) instruction . extraInfo . get ( "heading" ) ; double diff = Math . abs ( heading - favoredHeading ) % 360 ; if ( diff > 170 && diff < 190 ) { // The requested heading points into the opposite direction of the calculated heading
// therefore we change the continue instruction to a u - turn
instruction . setSign ( Instruction . U_TURN_UNKNOWN ) ; } } if ( instruction . getSign ( ) == Instruction . REACHED_VIA ) { nextInstruction = instructions . get ( i + 1 ) ; if ( nextInstruction . getSign ( ) != Instruction . CONTINUE_ON_STREET || ! instruction . extraInfo . containsKey ( "last_heading" ) || ! nextInstruction . extraInfo . containsKey ( "heading" ) ) { // TODO throw exception ?
continue ; } double lastHeading = ( double ) instruction . extraInfo . get ( "last_heading" ) ; double heading = ( double ) nextInstruction . extraInfo . get ( "heading" ) ; // Since it ' s supposed to go back the same edge , we can be very strict with the diff
double diff = Math . abs ( lastHeading - heading ) % 360 ; if ( diff > 179 && diff < 181 ) { nextInstruction . setSign ( Instruction . U_TURN_UNKNOWN ) ; } } } return instructions ;
|
public class CliFrontend { /** * Creates a Packaged program from the given command line options .
* @ return A PackagedProgram ( upon success ) */
PackagedProgram buildProgram ( ProgramOptions options ) throws FileNotFoundException , ProgramInvocationException { } }
|
String [ ] programArgs = options . getProgramArgs ( ) ; String jarFilePath = options . getJarFilePath ( ) ; List < URL > classpaths = options . getClasspaths ( ) ; if ( jarFilePath == null ) { throw new IllegalArgumentException ( "The program JAR file was not specified." ) ; } File jarFile = new File ( jarFilePath ) ; // Check if JAR file exists
if ( ! jarFile . exists ( ) ) { throw new FileNotFoundException ( "JAR file does not exist: " + jarFile ) ; } else if ( ! jarFile . isFile ( ) ) { throw new FileNotFoundException ( "JAR file is not a file: " + jarFile ) ; } // Get assembler class
String entryPointClass = options . getEntryPointClassName ( ) ; PackagedProgram program = entryPointClass == null ? new PackagedProgram ( jarFile , classpaths , programArgs ) : new PackagedProgram ( jarFile , classpaths , entryPointClass , programArgs ) ; program . setSavepointRestoreSettings ( options . getSavepointRestoreSettings ( ) ) ; return program ;
|
public class SmilesValencyChecker { /** * Saturates a set of Bonds in an AtomContainer . */
public boolean saturate ( IBond [ ] bonds , IAtomContainer atomContainer ) throws CDKException { } }
|
logger . debug ( "Saturating bond set of size: " , bonds . length ) ; boolean bondsAreFullySaturated = false ; if ( bonds . length > 0 ) { IBond bond = bonds [ 0 ] ; // determine bonds left
int leftBondCount = bonds . length - 1 ; IBond [ ] leftBonds = new IBond [ leftBondCount ] ; System . arraycopy ( bonds , 1 , leftBonds , 0 , leftBondCount ) ; // examine this bond
logger . debug ( "Examining this bond: " , bond ) ; if ( isSaturated ( bond , atomContainer ) ) { logger . debug ( "OK, bond is saturated, now try to saturate remaining bonds (if needed)" ) ; bondsAreFullySaturated = saturate ( leftBonds , atomContainer ) ; } else if ( isUnsaturated ( bond , atomContainer ) ) { logger . debug ( "Ok, this bond is unsaturated, and can be saturated" ) ; // two options now :
// 1 . saturate this one directly
// 2 . saturate this one by saturating the rest
logger . debug ( "Option 1: Saturating this bond directly, then trying to saturate rest" ) ; // considering organic bonds , the max order is 3 , so increase twice
boolean bondOrderIncreased = saturateByIncreasingBondOrder ( bond , atomContainer ) ; bondsAreFullySaturated = bondOrderIncreased && saturate ( bonds , atomContainer ) ; if ( bondsAreFullySaturated ) { logger . debug ( "Option 1: worked" ) ; } else { logger . debug ( "Option 1: failed. Trying option 2." ) ; logger . debug ( "Option 2: Saturing this bond by saturating the rest" ) ; // revert the increase ( if succeeded ) , then saturate the rest
if ( bondOrderIncreased ) unsaturateByDecreasingBondOrder ( bond ) ; bondsAreFullySaturated = saturate ( leftBonds , atomContainer ) && isSaturated ( bond , atomContainer ) ; if ( ! bondsAreFullySaturated ) logger . debug ( "Option 2: failed" ) ; } } else { logger . debug ( "Ok, this bond is unsaturated, but cannot be saturated" ) ; // try recursing and see if that fixes things
bondsAreFullySaturated = saturate ( leftBonds , atomContainer ) && isSaturated ( bond , atomContainer ) ; } } else { bondsAreFullySaturated = true ; // empty is saturated by default
} return bondsAreFullySaturated ;
|
public class FXBinder { /** * Start point of the fluent API to create a binding .
* @ param writableDoubleValue the javafx property
* @ return binder that can be used by the fluent API to create binding . */
public static < T > JavaFXBinder < T > bind ( WritableValue < T > writableDoubleValue ) { } }
|
requireNonNull ( writableDoubleValue , "writableDoubleValue" ) ; return new DefaultJavaFXBinder ( writableDoubleValue ) ;
|
public class RemoveAttributesFromFindingsRequest { /** * The array of attribute keys that you want to remove from specified findings .
* < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use
* { @ link # setAttributeKeys ( java . util . Collection ) } or { @ link # withAttributeKeys ( java . util . Collection ) } if you want
* to override the existing values .
* @ param attributeKeys
* The array of attribute keys that you want to remove from specified findings .
* @ return Returns a reference to this object so that method calls can be chained together . */
public RemoveAttributesFromFindingsRequest withAttributeKeys ( String ... attributeKeys ) { } }
|
if ( this . attributeKeys == null ) { setAttributeKeys ( new java . util . ArrayList < String > ( attributeKeys . length ) ) ; } for ( String ele : attributeKeys ) { this . attributeKeys . add ( ele ) ; } return this ;
|
public class XTryCatchFinallyExpressionImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
@ Override public void eUnset ( int featureID ) { } }
|
switch ( featureID ) { case XbasePackage . XTRY_CATCH_FINALLY_EXPRESSION__EXPRESSION : setExpression ( ( XExpression ) null ) ; return ; case XbasePackage . XTRY_CATCH_FINALLY_EXPRESSION__FINALLY_EXPRESSION : setFinallyExpression ( ( XExpression ) null ) ; return ; case XbasePackage . XTRY_CATCH_FINALLY_EXPRESSION__CATCH_CLAUSES : getCatchClauses ( ) . clear ( ) ; return ; case XbasePackage . XTRY_CATCH_FINALLY_EXPRESSION__RESOURCES : getResources ( ) . clear ( ) ; return ; } super . eUnset ( featureID ) ;
|
public class MapsInner { /** * Gets a list of integration account maps .
* @ param nextPageLink The NextLink from the previous successful call to List operation .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable to the PagedList & lt ; IntegrationAccountMapInner & gt ; object */
public Observable < Page < IntegrationAccountMapInner > > listByIntegrationAccountsNextAsync ( final String nextPageLink ) { } }
|
return listByIntegrationAccountsNextWithServiceResponseAsync ( nextPageLink ) . map ( new Func1 < ServiceResponse < Page < IntegrationAccountMapInner > > , Page < IntegrationAccountMapInner > > ( ) { @ Override public Page < IntegrationAccountMapInner > call ( ServiceResponse < Page < IntegrationAccountMapInner > > response ) { return response . body ( ) ; } } ) ;
|
public class OrganizationResource { /** * Remove an existing Corporate GroupId from an organization .
* @ return Response */
@ DELETE @ Path ( "/{name}" + ServerAPI . GET_CORPORATE_GROUPIDS ) public Response removeCorporateGroupIdPrefix ( @ Auth final DbCredential credential , @ PathParam ( "name" ) final String organizationId , final String corporateGroupId ) { } }
|
LOG . info ( "Got an remove a corporate groupId prefix request for organization " + organizationId + "." ) ; if ( ! credential . getRoles ( ) . contains ( DbCredential . AvailableRoles . DATA_UPDATER ) ) { throw new WebApplicationException ( Response . status ( Response . Status . UNAUTHORIZED ) . build ( ) ) ; } if ( corporateGroupId == null || corporateGroupId . isEmpty ( ) ) { LOG . error ( "No corporate GroupId to remove!" ) ; return Response . serverError ( ) . status ( HttpStatus . BAD_REQUEST_400 ) . build ( ) ; } getOrganizationHandler ( ) . removeCorporateGroupId ( organizationId , corporateGroupId ) ; return Response . ok ( "done" ) . build ( ) ;
|
public class VectorVectorMult_ZDRM { /** * Computes the inner product of the two vectors . In geometry this is known as the dot product . < br >
* < br >
* & sum ; < sub > k = 1 : n < / sub > x < sub > k < / sub > * y < sub > k < / sub > < br >
* where x and y are vectors with n elements .
* These functions are often used inside of highly optimized code and therefor sanity checks are
* kept to a minimum . It is not recommended that any of these functions be used directly .
* @ param x A vector with n elements . Not modified .
* @ param y A vector with n elements . Not modified .
* @ return The inner product of the two vectors . */
public static Complex_F64 innerProd ( ZMatrixRMaj x , ZMatrixRMaj y , Complex_F64 output ) { } }
|
if ( output == null ) output = new Complex_F64 ( ) ; else { output . real = output . imaginary = 0 ; } int m = x . getDataLength ( ) ; for ( int i = 0 ; i < m ; i += 2 ) { double realX = x . data [ i ] ; double imagX = x . data [ i + 1 ] ; double realY = y . data [ i ] ; double imagY = y . data [ i + 1 ] ; output . real += realX * realY - imagX * imagY ; output . imaginary += realX * imagY + imagX * realY ; } return output ;
|
public class Parser { void store ( final RECORD record , final String key , final String name , final Value value ) { } }
|
boolean calledASetter = false ; if ( value == null ) { LOG . error ( "Got a null value to store for key={} name={}." , key , name ) ; return ; // Nothing to do
} final Set < Pair < Method , SetterPolicy > > methodPairs = targets . get ( key ) ; if ( methodPairs == null ) { LOG . error ( "NO methods for key={} name={}." , key , name ) ; return ; } EnumSet < Casts > castsTo = castsOfTargets . get ( key ) ; if ( castsTo == null ) { castsTo = castsOfTargets . get ( name ) ; if ( castsTo == null ) { LOG . error ( "NO casts for \"{}\"" , name ) ; return ; } } for ( Pair < Method , SetterPolicy > methodPair : methodPairs ) { Method method = methodPair . getLeft ( ) ; if ( method != null ) { SetterPolicy setterPolicy = methodPair . getRight ( ) ; try { Class < ? > [ ] parameters = method . getParameterTypes ( ) ; Class < ? > valueClass = parameters [ parameters . length - 1 ] ; // Always the last one
if ( valueClass == String . class ) { if ( castsTo . contains ( Casts . STRING ) ) { String stringValue = value . getString ( ) ; if ( stringValue == null ) { if ( setterPolicy == NOT_NULL || setterPolicy == NOT_EMPTY ) { calledASetter = true ; continue ; } } else { if ( stringValue . isEmpty ( ) && setterPolicy == NOT_EMPTY ) { calledASetter = true ; continue ; } } if ( parameters . length == 2 ) { method . invoke ( record , name , stringValue ) ; } else { method . invoke ( record , stringValue ) ; } calledASetter = true ; } continue ; } if ( valueClass == Long . class ) { if ( castsTo . contains ( Casts . LONG ) ) { Long longValue = value . getLong ( ) ; if ( longValue == null && ( setterPolicy == NOT_NULL || setterPolicy == NOT_EMPTY ) ) { calledASetter = true ; continue ; } if ( parameters . length == 2 ) { method . invoke ( record , name , longValue ) ; } else { method . invoke ( record , longValue ) ; } calledASetter = true ; } continue ; } if ( valueClass == Double . class ) { if ( castsTo . contains ( Casts . DOUBLE ) ) { Double doubleValue = value . getDouble ( ) ; if ( doubleValue == null && ( setterPolicy == NOT_NULL || setterPolicy == NOT_EMPTY ) ) { calledASetter = true ; continue ; } if ( parameters . length == 2 ) { method . invoke ( record , name , doubleValue ) ; } else { method . invoke ( record , doubleValue ) ; } calledASetter = true ; } continue ; } throw new FatalErrorDuringCallOfSetterMethod ( "Tried to call setter with unsupported class :" + " key = \"" + key + "\" " + " name = \"" + name + "\" " + " value = \"" + value + "\"" + " castsTo = \"" + castsTo + "\"" ) ; } catch ( final Exception e ) { throw new FatalErrorDuringCallOfSetterMethod ( e . getMessage ( ) + " caused by \"" + e . getCause ( ) + "\" when calling \"" + method . toGenericString ( ) + "\" for " + " key = \"" + key + "\" " + " name = \"" + name + "\" " + " value = \"" + value + "\"" + " castsTo = \"" + castsTo + "\"" , e ) ; } } } if ( ! calledASetter ) { throw new FatalErrorDuringCallOfSetterMethod ( "No setter called for " + " key = \"" + key + "\" " + " name = \"" + name + "\" " + " value = \"" + value + "\"" ) ; }
|
public class HttpRequestUtil { /** * Simple validation , using java . net . InetAddress . getByName ( ) .
* @ param ip the IP address string to check
* @ return true for a valid IP address */
private static boolean isIPAdressValid ( final String ip ) { } }
|
// InetAddress . getByName ( ) validates ' null ' as a valid IP ( localhost ) .
// we do not want that
if ( hasValue ( ip ) ) { return IP_ADDR_PATTERN . matcher ( ip ) . matches ( ) ; } return false ;
|
public class Config { /** * Return a configuration value as list
* @ param key
* @ param c
* @ param < T >
* @ return the list */
public < T > List < T > getList ( AppConfigKey key , Class < T > c ) { } }
|
Object o = data . get ( key ) ; if ( null == o ) { List < T > l = key . implList ( key . key ( ) , raw , c ) ; data . put ( key , l ) ; return l ; } else { return ( List ) o ; }
|
public class ManagedDatabasesInner { /** * Creates a new database or updates an existing database .
* @ param resourceGroupName The name of the resource group that contains the resource . You can obtain this value from the Azure Resource Manager API or the portal .
* @ param managedInstanceName The name of the managed instance .
* @ param databaseName The name of the database .
* @ param parameters The requested database resource state .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable to the ManagedDatabaseInner object */
public Observable < ManagedDatabaseInner > beginCreateOrUpdateAsync ( String resourceGroupName , String managedInstanceName , String databaseName , ManagedDatabaseInner parameters ) { } }
|
return beginCreateOrUpdateWithServiceResponseAsync ( resourceGroupName , managedInstanceName , databaseName , parameters ) . map ( new Func1 < ServiceResponse < ManagedDatabaseInner > , ManagedDatabaseInner > ( ) { @ Override public ManagedDatabaseInner call ( ServiceResponse < ManagedDatabaseInner > response ) { return response . body ( ) ; } } ) ;
|
public class AbstractTheme { /** * Returns a list of redundant theme entries in this theme . A redundant entry means that it doesn ' t need to be
* specified because there is a parent node in the hierarchy which has the same property so if the redundant entry
* wasn ' t there , the parent node would be picked up and the end result would be the same .
* @ return List of redundant theme entries */
public List < String > findRedundantDeclarations ( ) { } }
|
List < String > result = new ArrayList < String > ( ) ; for ( ThemeTreeNode node : rootNode . childMap . values ( ) ) { findRedundantDeclarations ( result , node ) ; } Collections . sort ( result ) ; return result ;
|
public class NumeratorSubstitution { /** * Dispatches to the inherited version of this function , but makes
* sure that lenientParse is off . */
public Number doParse ( String text , ParsePosition parsePosition , double baseValue , double upperBound , boolean lenientParse ) { } }
|
// we don ' t have to do anything special to do the parsing here ,
// but we have to turn lenient parsing off - - if we leave it on ,
// it SERIOUSLY messes up the algorithm
// if withZeros is true , we need to count the zeros
// and use that to adjust the parse result
int zeroCount = 0 ; if ( withZeros ) { String workText = text ; ParsePosition workPos = new ParsePosition ( 1 ) ; // int digit ;
while ( workText . length ( ) > 0 && workPos . getIndex ( ) != 0 ) { workPos . setIndex ( 0 ) ; /* digit = */
ruleSet . parse ( workText , workPos , 1 ) . intValue ( ) ; // parse zero or nothing at all
if ( workPos . getIndex ( ) == 0 ) { // we failed , either there were no more zeros , or the number was formatted with digits
// either way , we ' re done
break ; } ++ zeroCount ; parsePosition . setIndex ( parsePosition . getIndex ( ) + workPos . getIndex ( ) ) ; workText = workText . substring ( workPos . getIndex ( ) ) ; while ( workText . length ( ) > 0 && workText . charAt ( 0 ) == ' ' ) { workText = workText . substring ( 1 ) ; parsePosition . setIndex ( parsePosition . getIndex ( ) + 1 ) ; } } text = text . substring ( parsePosition . getIndex ( ) ) ; // arrgh !
parsePosition . setIndex ( 0 ) ; } // we ' ve parsed off the zeros , now let ' s parse the rest from our current position
Number result = super . doParse ( text , parsePosition , withZeros ? 1 : baseValue , upperBound , false ) ; if ( withZeros ) { // any base value will do in this case . is there a way to
// force this to not bother trying all the base values ?
// compute the ' effective ' base and prescale the value down
long n = result . longValue ( ) ; long d = 1 ; while ( d <= n ) { d *= 10 ; } // now add the zeros
while ( zeroCount > 0 ) { d *= 10 ; -- zeroCount ; } // d is now our true denominator
result = new Double ( n / ( double ) d ) ; } return result ;
|
public class QueryFactory { /** * Method declaration
* @ param classToSearchFrom
* @ param criteria
* @ return QueryByCriteria */
public static QueryByCriteria newQuery ( Class classToSearchFrom , Criteria criteria ) { } }
|
return newQuery ( classToSearchFrom , criteria , false ) ;
|
public class JobConf { /** * Compute the number of slots required to run a single map task - attempt
* of this job .
* @ param slotSizePerMap cluster - wide value of the amount of memory required
* to run a map - task
* @ return the number of slots required to run a single map task - attempt
* 1 if memory parameters are disabled . */
int computeNumSlotsPerMap ( long slotSizePerMap ) { } }
|
if ( ( slotSizePerMap == DISABLED_MEMORY_LIMIT ) || ( getMemoryForMapTask ( ) == DISABLED_MEMORY_LIMIT ) ) { return 1 ; } return ( int ) ( Math . ceil ( ( float ) getMemoryForMapTask ( ) / ( float ) slotSizePerMap ) ) ;
|
public class Attribute { /** * Prepares for given < code > _ values < / code > depending on this attribute the
* < code > _ update < / code > into the database .
* @ param _ update SQL update statement for related { @ link # sqlTable }
* @ param _ values values to update
* @ throws SQLException if values could not be inserted */
public void prepareDBUpdate ( final SQLUpdate _update , final Object ... _values ) throws SQLException { } }
|
Object [ ] tmp = _values ; try { final List < Return > returns = executeEvents ( EventType . UPDATE_VALUE , ParameterValues . CLASS , this , ParameterValues . OTHERS , _values ) ; for ( final Return aRet : returns ) { if ( aRet . contains ( ReturnValues . VALUES ) ) { tmp = ( Object [ ] ) aRet . get ( ReturnValues . VALUES ) ; } } } catch ( final EFapsException e ) { throw new SQLException ( e ) ; } this . attributeType . getDbAttrType ( ) . prepareUpdate ( _update , this , tmp ) ;
|
public class GetPlaybackConfigurationRequestMarshaller { /** * Marshall the given parameter object . */
public void marshall ( GetPlaybackConfigurationRequest getPlaybackConfigurationRequest , ProtocolMarshaller protocolMarshaller ) { } }
|
if ( getPlaybackConfigurationRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( getPlaybackConfigurationRequest . getName ( ) , NAME_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
|
public class JobScheduler { /** * Run a job .
* This method runs the job immediately without going through the Quartz scheduler .
* This is particularly useful for testing .
* This method does what { @ link # runJob ( Properties , JobListener ) } does , and additionally it allows
* the caller to pass in a { @ link JobLauncher } instance used to launch the job to run .
* @ param jobProps Job configuration properties
* @ param jobListener { @ link JobListener } used for callback , can be < em > null < / em > if no callback is needed .
* @ param jobLauncher a { @ link JobLauncher } object used to launch the job to run
* @ return If current job is a stop - early job based on { @ link Source # isEarlyStopped ( ) }
* @ throws JobException when there is anything wrong with running the job */
public boolean runJob ( Properties jobProps , JobListener jobListener , JobLauncher jobLauncher ) throws JobException { } }
|
Preconditions . checkArgument ( jobProps . containsKey ( ConfigurationKeys . JOB_NAME_KEY ) , "A job must have a job name specified by job.name" ) ; String jobName = jobProps . getProperty ( ConfigurationKeys . JOB_NAME_KEY ) ; // Check if the job has been disabled
boolean disabled = Boolean . valueOf ( jobProps . getProperty ( ConfigurationKeys . JOB_DISABLED_KEY , "false" ) ) ; if ( disabled ) { LOG . info ( "Skipping disabled job " + jobName ) ; return false ; } // Launch the job
try ( Closer closer = Closer . create ( ) ) { closer . register ( jobLauncher ) . launchJob ( jobListener ) ; boolean runOnce = Boolean . valueOf ( jobProps . getProperty ( ConfigurationKeys . JOB_RUN_ONCE_KEY , "false" ) ) ; boolean isEarlyStopped = jobLauncher . isEarlyStopped ( ) ; if ( ! isEarlyStopped && runOnce && this . scheduledJobs . containsKey ( jobName ) ) { this . scheduler . getScheduler ( ) . deleteJob ( this . scheduledJobs . remove ( jobName ) ) ; } return isEarlyStopped ; } catch ( Throwable t ) { throw new JobException ( "Failed to launch and run job " + jobName , t ) ; }
|
public class SnackbarWrapper { /** * Set the icon at the start of the Snackbar . If there is no icon it will be added , or if there is then it will be
* replaced .
* @ param icon The icon drawable resource to display .
* @ return This instance . */
@ NonNull @ SuppressWarnings ( "WeakerAccess" ) public SnackbarWrapper setIcon ( @ DrawableRes int icon ) { } }
|
return setIcon ( ContextCompat . getDrawable ( context , icon ) ) ;
|
public class DataSynchronizer { /** * Update all documents in the collection according to the specified arguments .
* @ param filter a document describing the query filter , which may not be null .
* @ param update a document describing the update , which may not be null . The update to
* apply must include only update operators .
* @ param updateOptions the options to apply to the update operation
* @ return the result of the update many operation */
UpdateResult updateMany ( final MongoNamespace namespace , final Bson filter , final Bson update , final UpdateOptions updateOptions ) { } }
|
this . waitUntilInitialized ( ) ; ongoingOperationsGroup . enter ( ) ; try { final List < ChangeEvent < BsonDocument > > eventsToEmit = new ArrayList < > ( ) ; final UpdateResult result ; final NamespaceSynchronizationConfig nsConfig = this . syncConfig . getNamespaceConfig ( namespace ) ; final Lock lock = nsConfig . getLock ( ) . writeLock ( ) ; lock . lock ( ) ; try { // fetch all of the documents that this filter will match
final Map < BsonValue , BsonDocument > idToBeforeDocumentMap = new HashMap < > ( ) ; final BsonArray ids = new BsonArray ( ) ; final MongoCollection < BsonDocument > localCollection = getLocalCollection ( namespace ) ; final MongoCollection < BsonDocument > undoCollection = getUndoCollection ( namespace ) ; localCollection . find ( filter ) . forEach ( ( Block < BsonDocument > ) bsonDocument -> { final BsonValue documentId = BsonUtils . getDocumentId ( bsonDocument ) ; ids . add ( documentId ) ; idToBeforeDocumentMap . put ( documentId , bsonDocument ) ; undoCollection . insertOne ( bsonDocument ) ; } ) ; // use the matched ids from prior to create a new filter .
// this will prevent any race conditions if documents were
// inserted between the prior find
Bson updatedFilter = updateOptions . isUpsert ( ) ? filter : new BsonDocument ( "_id" , new BsonDocument ( "$in" , ids ) ) ; // do the bulk write
result = localCollection . updateMany ( updatedFilter , update , updateOptions ) ; // if this was an upsert , create the post - update filter using
// the upserted id .
if ( result . getUpsertedId ( ) != null ) { updatedFilter = getDocumentIdFilter ( result . getUpsertedId ( ) ) ; } // iterate over the after - update docs using the updated filter
localCollection . find ( updatedFilter ) . forEach ( ( Block < BsonDocument > ) unsanitizedAfterDocument -> { // get the id of the after - update document , and fetch the before - update
// document from the map we created from our pre - update ` find `
final BsonValue documentId = BsonUtils . getDocumentId ( unsanitizedAfterDocument ) ; final BsonDocument beforeDocument = idToBeforeDocumentMap . get ( documentId ) ; // if there was no before - update document and this was not an upsert ,
// a document that meets the filter criteria must have been
// inserted or upserted asynchronously between this find and the update .
if ( beforeDocument == null && ! updateOptions . isUpsert ( ) ) { return ; } // Ensure that the update didn ' t add any forbidden fields to the document , and remove
// them if it did .
final BsonDocument afterDocument = sanitizeCachedDocument ( localCollection , unsanitizedAfterDocument , documentId ) ; // because we are looking up a bulk write , we may have queried documents
// that match the updated state , but were not actually modified .
// if the document before the update is the same as the updated doc ,
// assume it was not modified and take no further action
if ( afterDocument . equals ( beforeDocument ) ) { undoCollection . deleteOne ( getDocumentIdFilter ( documentId ) ) ; return ; } final CoreDocumentSynchronizationConfig config ; final ChangeEvent < BsonDocument > event ; // if there was no earlier document and this was an upsert ,
// treat the upsert as an insert , as far as sync is concerned
// else treat it as a standard update
if ( beforeDocument == null && updateOptions . isUpsert ( ) ) { config = syncConfig . addAndGetSynchronizedDocument ( namespace , documentId ) ; event = ChangeEvents . changeEventForLocalInsert ( namespace , afterDocument , true ) ; } else { config = syncConfig . getSynchronizedDocument ( namespace , documentId ) ; event = ChangeEvents . changeEventForLocalUpdate ( namespace , documentId , UpdateDescription . diff ( beforeDocument , afterDocument ) , afterDocument , true ) ; } config . setSomePendingWritesAndSave ( logicalT , event ) ; undoCollection . deleteOne ( getDocumentIdFilter ( documentId ) ) ; eventsToEmit . add ( event ) ; } ) ; } finally { lock . unlock ( ) ; } if ( result . getUpsertedId ( ) != null ) { checkAndInsertNamespaceListener ( namespace ) ; } for ( final ChangeEvent < BsonDocument > event : eventsToEmit ) { eventDispatcher . emitEvent ( nsConfig , event ) ; } return result ; } finally { ongoingOperationsGroup . exit ( ) ; }
|
public class MenusSession { /** * Override this to do an action sent from the client .
* @ param strCommand The command to execute
* @ param properties The properties for the command
* @ returns Object Return a Boolean . TRUE for success , Boolean . FALSE for failure . */
public Object doRemoteCommand ( String strCommand , Map < String , Object > properties ) throws RemoteException , DBException { } }
|
Map < String , Object > propMenu = properties ; // I NOW for a fact this is a Properties object .
if ( propMenu == null ) propMenu = new Hashtable < String , Object > ( ) ; if ( strCommand != null ) if ( strCommand . length ( ) > 0 ) if ( strCommand . indexOf ( '=' ) == - 1 ) strCommand = DBParams . MENU + '=' + strCommand ; // If no param specified , it is a menu =
Utility . parseArgs ( propMenu , strCommand ) ; String strMenu = ( String ) propMenu . get ( DBParams . MENU ) ; if ( ( strMenu == null ) || ( strMenu . length ( ) == 0 ) ) strMenu = this . getProperty ( DBParams . MENU ) ; if ( ( strMenu == null ) || ( strMenu . length ( ) == 0 ) ) strMenu = this . getProperty ( DBParams . HOME ) ; if ( ( strMenu == null ) || ( strMenu . length ( ) == 0 ) ) strMenu = HtmlConstants . MAIN_MENU_KEY ; if ( strMenu != null ) { this . setupSubMenus ( strMenu ) ; return Boolean . TRUE ; } return super . doRemoteCommand ( strCommand , properties ) ;
|
public class CmsJspStandardContextBean { /** * Gets a map providing access to the locale variants of the current page . < p >
* Note that all available locales for the site / subsite are used as keys , not just the ones for which a locale
* variant actually exists .
* Usage in JSPs : $ { cms . localeResource [ ' de ' ] ]
* @ return the map from locale strings to locale variant resources */
public Map < String , CmsJspResourceWrapper > getLocaleResource ( ) { } }
|
Map < String , CmsJspResourceWrapper > result = getPageResource ( ) . getLocaleResource ( ) ; List < Locale > locales = CmsLocaleGroupService . getPossibleLocales ( m_cms , getPageResource ( ) ) ; for ( Locale locale : locales ) { if ( ! result . containsKey ( locale . toString ( ) ) ) { result . put ( locale . toString ( ) , null ) ; } } return result ;
|
public class Call { /** * Performs a get http call and writes the call and response information to
* the output file
* @ param endpoint - the endpoint of the service under test
* @ param params - the parameters to be passed to the endpoint for the service
* call
* @ return Response : the response provided from the http call */
public Response get ( String endpoint , Request params ) { } }
|
return call ( Method . GET , endpoint , params , null ) ;
|
public class CommandBusOnClient { /** * Executes the request to the remote url */
private < T > T execute ( String url , Class < T > returnType , Object requestObject ) throws Exception { } }
|
URLConnection connection = new URL ( url ) . openConnection ( ) ; if ( ! ( connection instanceof HttpURLConnection ) ) { throw new IllegalStateException ( "Not an http connection! " + connection ) ; } HttpURLConnection httpConnection = ( HttpURLConnection ) connection ; httpConnection . setUseCaches ( false ) ; httpConnection . setDefaultUseCaches ( false ) ; httpConnection . setDoInput ( true ) ; /* * With followRedirects enabled , simple URL redirects work as expected . But with port redirects ( http - > https )
* followRedirects doesn ' t work and a HTTP 302 code is returned instead ( ARQ - 1365 ) .
* In order to handle all redirects in one place , followRedirects is set to false and all HTTP 302 response codes are
* treated accordingly within the execute method . */
httpConnection . setInstanceFollowRedirects ( false ) ; try { if ( requestObject != null ) { httpConnection . setRequestMethod ( "POST" ) ; httpConnection . setDoOutput ( true ) ; httpConnection . setRequestProperty ( "Content-Type" , "application/octet-stream" ) ; } if ( requestObject != null ) { ObjectOutputStream ous = new ObjectOutputStream ( httpConnection . getOutputStream ( ) ) ; try { ous . writeObject ( requestObject ) ; } catch ( Exception e ) { throw new RuntimeException ( "Error sending request Object, " + requestObject , e ) ; } finally { ous . flush ( ) ; ous . close ( ) ; } } try { httpConnection . getResponseCode ( ) ; } catch ( ConnectException e ) { return null ; // Could not connect
} if ( httpConnection . getResponseCode ( ) == HttpURLConnection . HTTP_OK ) { ObjectInputStream ois = new ObjectInputStream ( httpConnection . getInputStream ( ) ) ; Object o ; try { o = ois . readObject ( ) ; } finally { ois . close ( ) ; } if ( ! returnType . isInstance ( o ) ) { throw new IllegalStateException ( "Error reading results, expected a " + returnType . getName ( ) + " but got " + o ) ; } return returnType . cast ( o ) ; } else if ( httpConnection . getResponseCode ( ) == HttpURLConnection . HTTP_NO_CONTENT ) { return null ; } else if ( httpConnection . getResponseCode ( ) == HttpURLConnection . HTTP_MOVED_TEMP ) { String redirectUrl = httpConnection . getHeaderField ( "Location" ) ; return execute ( redirectUrl , returnType , requestObject ) ; } else if ( httpConnection . getResponseCode ( ) != HttpURLConnection . HTTP_NOT_FOUND ) { throw new IllegalStateException ( "Error launching test at " + url + ". " + "Got " + httpConnection . getResponseCode ( ) + " (" + httpConnection . getResponseMessage ( ) + ")" ) ; } } finally { httpConnection . disconnect ( ) ; } return null ;
|
public class OjbTagsHandler { /** * Processes a procedure tag .
* @ param template The template
* @ param attributes The attributes of the tag
* @ exception XDocletException If an error occurs
* @ doc . tag type = " content "
* @ doc . param name = " arguments " optional = " true " description = " The arguments of the procedure as a comma - separated
* list of names of procedure attribute tags "
* @ doc . param name = " attributes " optional = " true " description = " Attributes of the procedure as name - value pairs ' name = value ' ,
* separated by commas "
* @ doc . param name = " documentation " optional = " true " description = " Documentation on the procedure "
* @ doc . param name = " include - all - fields " optional = " true " description = " For insert / update : whether all fields of the current
* class shall be included ( arguments is ignored then ) " values = " true , false "
* @ doc . param name = " include - pk - only " optional = " true " description = " For delete : whether all primary key fields
* shall be included ( arguments is ignored then ) " values = " true , false "
* @ doc . param name = " name " optional = " false " description = " The name of the procedure "
* @ doc . param name = " return - field - ref " optional = " true " description = " Identifies the field that receives the return value "
* @ doc . param name = " type " optional = " false " description = " The type of the procedure " values = " delete , insert , update " */
public String processProcedure ( Properties attributes ) throws XDocletException { } }
|
String type = attributes . getProperty ( ATTRIBUTE_TYPE ) ; ProcedureDef procDef = _curClassDef . getProcedure ( type ) ; String attrName ; if ( procDef == null ) { procDef = new ProcedureDef ( type ) ; _curClassDef . addProcedure ( procDef ) ; } for ( Enumeration attrNames = attributes . propertyNames ( ) ; attrNames . hasMoreElements ( ) ; ) { attrName = ( String ) attrNames . nextElement ( ) ; procDef . setProperty ( attrName , attributes . getProperty ( attrName ) ) ; } return "" ;
|
public class ContainersInner { /** * Get the logs for a specified container instance .
* Get the logs for a specified container instance in a specified resource group and container group .
* @ param resourceGroupName The name of the resource group .
* @ param containerGroupName The name of the container group .
* @ param containerName The name of the container instance .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable to the LogsInner object */
public Observable < LogsInner > listLogsAsync ( String resourceGroupName , String containerGroupName , String containerName ) { } }
|
return listLogsWithServiceResponseAsync ( resourceGroupName , containerGroupName , containerName ) . map ( new Func1 < ServiceResponse < LogsInner > , LogsInner > ( ) { @ Override public LogsInner call ( ServiceResponse < LogsInner > response ) { return response . body ( ) ; } } ) ;
|
public class CodingUtil { /** * AES加密 */
private static SecretKey getKey ( String strKey ) { } }
|
try { KeyGenerator _generator = KeyGenerator . getInstance ( AES ) ; SecureRandom secureRandom = SecureRandom . getInstance ( "SHA1PRNG" ) ; secureRandom . setSeed ( strKey . getBytes ( ) ) ; _generator . init ( 128 , secureRandom ) ; return _generator . generateKey ( ) ; } catch ( Exception e ) { throw new RuntimeException ( " 初始化密钥出现异常 " ) ; }
|
public class SnapshotUtil { /** * Returns a detailed report and a boolean indicating whether the snapshot can be successfully loaded
* The implementation supports disabling the hashinator check , e . g . for old snapshots in tests .
* @ param snapshotTime
* @ param snapshot
* @ param expectHashinator */
public static Pair < Boolean , String > generateSnapshotReport ( Long snapshotTxnId , Snapshot snapshot ) { } }
|
CharArrayWriter caw = new CharArrayWriter ( ) ; PrintWriter pw = new PrintWriter ( caw ) ; boolean snapshotConsistent = true ; String indentString = "" ; pw . println ( indentString + "TxnId: " + snapshotTxnId ) ; pw . println ( indentString + "Date: " + new Date ( org . voltcore . TransactionIdManager . getTimestampFromTransactionId ( snapshotTxnId ) ) ) ; pw . println ( indentString + "Digests:" ) ; indentString = "\t" ; TreeSet < String > digestTablesSeen = new TreeSet < String > ( ) ; if ( snapshot . m_digests . isEmpty ( ) ) { pw . println ( indentString + "No snapshot related digests files found." ) ; snapshotConsistent = false ; } else { boolean inconsistent = false ; /* * Iterate over the digests and ensure that they all contain the same list of tables */
Map < Integer , List < Integer > > inconsistentDigests = new HashMap < Integer , List < Integer > > ( ) ; for ( int ii = 0 ; ii < snapshot . m_digests . size ( ) ; ii ++ ) { inconsistentDigests . put ( ii , new ArrayList < Integer > ( ) ) ; Set < String > tables = snapshot . m_digestTables . get ( ii ) ; for ( int zz = 0 ; zz < snapshot . m_digests . size ( ) ; zz ++ ) { if ( zz == ii ) { continue ; } if ( ! tables . equals ( snapshot . m_digestTables . get ( zz ) ) ) { snapshotConsistent = false ; inconsistent = true ; inconsistentDigests . get ( ii ) . add ( zz ) ; } } } /* * Summarize what was inconsistent / consistent */
if ( ! inconsistent ) { for ( int ii = 0 ; ii < snapshot . m_digests . size ( ) ; ii ++ ) { pw . println ( indentString + snapshot . m_digests . get ( ii ) . getPath ( ) ) ; } } else { pw . println ( indentString + "Not all digests are consistent" ) ; indentString = indentString + "\t" ; for ( Map . Entry < Integer , List < Integer > > entry : inconsistentDigests . entrySet ( ) ) { File left = snapshot . m_digests . get ( entry . getKey ( ) ) ; pw . println ( indentString + left . getPath ( ) + " is inconsistent with:" ) ; indentString = indentString + "\t" ; for ( Integer id : entry . getValue ( ) ) { File right = snapshot . m_digests . get ( id ) ; pw . println ( indentString + right . getPath ( ) ) ; } indentString = indentString . substring ( 1 ) ; } } /* * Print the list of tables found in the digests */
indentString = indentString . substring ( 1 ) ; pw . print ( indentString + "Tables: " ) ; int ii = 0 ; for ( int jj = 0 ; jj < snapshot . m_digestTables . size ( ) ; jj ++ ) { for ( String table : snapshot . m_digestTables . get ( jj ) ) { digestTablesSeen . add ( table ) ; } } for ( String table : digestTablesSeen ) { if ( ii != 0 ) { pw . print ( ", " ) ; } ii ++ ; pw . print ( table ) ; } pw . print ( "\n" ) ; } /* * Check the hash data ( if expected ) . */
pw . print ( indentString + "Hash configuration: " ) ; if ( snapshot . m_hashConfig != null ) { pw . println ( indentString + "present" ) ; } else { pw . println ( indentString + "not present" ) ; snapshotConsistent = false ; } /* * Check that the total partition count is the same in every table file */
Integer totalPartitionCount = null ; indentString = indentString + "\t" ; for ( Map . Entry < String , TableFiles > entry : snapshot . m_tableFiles . entrySet ( ) ) { if ( entry . getValue ( ) . m_isReplicated ) { continue ; } for ( Integer partitionCount : entry . getValue ( ) . m_totalPartitionCounts ) { if ( totalPartitionCount == null ) { totalPartitionCount = partitionCount ; } else if ( ! totalPartitionCount . equals ( partitionCount ) ) { snapshotConsistent = false ; pw . println ( indentString + "Partition count is not consistent throughout snapshot files for " + entry . getKey ( ) + ". Saw " + partitionCount + " and " + totalPartitionCount ) ; } } } /* * Now check that each individual table has enough information to be restored .
* It is possible for a valid partition set to be available and still have a restore
* fail because the restore plan loads a save file with a corrupt partition . */
TreeSet < String > consistentTablesSeen = new TreeSet < String > ( ) ; for ( Map . Entry < String , TableFiles > entry : snapshot . m_tableFiles . entrySet ( ) ) { TableFiles tableFiles = entry . getValue ( ) ; /* * Calculate the set of visible partitions not corrupted partitions */
TreeSet < Integer > partitionsAvailable = new TreeSet < Integer > ( ) ; int kk = 0 ; for ( Set < Integer > validPartitionIds : tableFiles . m_validPartitionIds ) { if ( tableFiles . m_completed . get ( kk ++ ) ) { partitionsAvailable . addAll ( validPartitionIds ) ; } } /* * Ensure the correct range of partition ids is present */
boolean partitionsPresent = false ; if ( ( partitionsAvailable . size ( ) == ( tableFiles . m_isReplicated ? 1 : totalPartitionCount ) ) && ( partitionsAvailable . first ( ) == 0 ) && ( partitionsAvailable . last ( ) == ( tableFiles . m_isReplicated ? 1 : totalPartitionCount ) - 1 ) ) { partitionsPresent = true ; } /* * Report if any of the files have corrupt partitions */
boolean hasCorruptPartitions = false ; for ( Set < Integer > corruptIds : tableFiles . m_corruptParititionIds ) { if ( ! corruptIds . isEmpty ( ) ) { hasCorruptPartitions = true ; snapshotConsistent = false ; } } pw . println ( indentString + "Table name: " + entry . getKey ( ) ) ; indentString = indentString + "\t" ; pw . println ( indentString + "Replicated: " + entry . getValue ( ) . m_isReplicated ) ; pw . println ( indentString + "Valid partition set available: " + partitionsPresent ) ; pw . println ( indentString + "Corrupt partitions present: " + hasCorruptPartitions ) ; /* * Print information about individual files such as the partitions present and whether
* they are corrupted */
pw . println ( indentString + "Files: " ) ; indentString = indentString + "\t" ; for ( int ii = 0 ; ii < tableFiles . m_files . size ( ) ; ii ++ ) { String corruptPartitionIdString = "" ; int zz = 0 ; for ( Integer partitionId : tableFiles . m_corruptParititionIds . get ( ii ) ) { if ( zz != 0 ) { corruptPartitionIdString = corruptPartitionIdString + ", " ; } zz ++ ; corruptPartitionIdString = corruptPartitionIdString + partitionId ; } String validPartitionIdString = "" ; zz = 0 ; for ( Integer partitionId : tableFiles . m_validPartitionIds . get ( ii ) ) { if ( zz != 0 ) { validPartitionIdString = validPartitionIdString + ", " ; } zz ++ ; validPartitionIdString = validPartitionIdString + partitionId ; } if ( corruptPartitionIdString . isEmpty ( ) ) { consistentTablesSeen . add ( entry . getKey ( ) ) ; pw . println ( indentString + tableFiles . m_files . get ( ii ) . getPath ( ) + " Completed: " + tableFiles . m_completed . get ( ii ) + " Partitions: " + validPartitionIdString ) ; } else { pw . println ( indentString + tableFiles . m_files . get ( ii ) . getPath ( ) + " Completed: " + tableFiles . m_completed . get ( ii ) + " Valid Partitions: " + validPartitionIdString + " Corrupt Partitions: " + corruptPartitionIdString ) ; } } indentString = indentString . substring ( 2 ) ; } indentString = indentString . substring ( 1 ) ; StringBuilder missingTables = new StringBuilder ( 8192 ) ; if ( ! consistentTablesSeen . containsAll ( digestTablesSeen ) ) { snapshotConsistent = false ; missingTables . append ( "Missing tables: " ) ; Set < String > missingTablesSet = new TreeSet < String > ( digestTablesSeen ) ; missingTablesSet . removeAll ( consistentTablesSeen ) ; int hh = 0 ; for ( String tableName : missingTablesSet ) { if ( hh > 0 ) { missingTables . append ( ", " ) ; } missingTables . append ( tableName ) ; hh ++ ; } missingTables . append ( '\n' ) ; } /* * Tack on a summary at the beginning to indicate whether a restore is guaranteed to succeed
* with this file set . */
if ( snapshotConsistent ) { return Pair . of ( true , "Snapshot valid\n" + caw . toString ( ) ) ; } else { StringBuilder sb = new StringBuilder ( 8192 ) ; sb . append ( "Snapshot corrupted\n" ) . append ( missingTables ) . append ( caw . toCharArray ( ) ) ; return Pair . of ( false , sb . toString ( ) ) ; }
|
public class JMSConnectionFactoryResourceBuilder { /** * Utility method that creates a unique identifier for an application defined data source .
* For example ,
* application [ MyApp ] / module [ MyModule ] / connectionFactory [ java : module / env / jdbc / cf1]
* @ param application application name if data source is in java : app , java : module , or java : comp . Otherwise null .
* @ param module module name if data source is in java : module or java : comp . Otherwise null .
* @ param component component name if data source is in java : comp and isn ' t in web container . Otherwise null .
* @ param jndiName configured JNDI name for the data source . For example , java : module / env / jca / cf1
* @ return the unique identifier */
private static final String getConnectionFactoryID ( String application , String module , String component , String jndiName ) { } }
|
StringBuilder sb = new StringBuilder ( jndiName . length ( ) + 80 ) ; if ( application != null ) { sb . append ( AppDefinedResource . APPLICATION ) . append ( '[' ) . append ( application ) . append ( ']' ) . append ( '/' ) ; if ( module != null ) { sb . append ( AppDefinedResource . MODULE ) . append ( '[' ) . append ( module ) . append ( ']' ) . append ( '/' ) ; if ( component != null ) sb . append ( AppDefinedResource . COMPONENT ) . append ( '[' ) . append ( component ) . append ( ']' ) . append ( '/' ) ; } } return sb . append ( ConnectionFactoryService . CONNECTION_FACTORY ) . append ( '[' ) . append ( jndiName ) . append ( ']' ) . toString ( ) ;
|
public class Util { /** * Returns true if an HTTP request for { @ code a } and { @ code b } can reuse a connection . */
public static boolean sameConnection ( HttpUrl a , HttpUrl b ) { } }
|
return a . host ( ) . equals ( b . host ( ) ) && a . port ( ) == b . port ( ) && a . scheme ( ) . equals ( b . scheme ( ) ) ;
|
public class RandomAccessFastaIndex { /** * Index fasta file or loads previously created index
* @ param file file to index
* @ param indexStep index step
* @ param save whether to save index to { input _ file _ name } . mifdx file
* @ param reporter reporter
* @ return index */
public static RandomAccessFastaIndex index ( Path file , int indexStep , boolean save , LongProcessReporter reporter ) { } }
|
Path indexFile = file . resolveSibling ( file . getFileName ( ) + INDEX_SUFFIX ) ; if ( Files . exists ( indexFile ) ) try ( FileInputStream fis = new FileInputStream ( indexFile . toFile ( ) ) ) { RandomAccessFastaIndex index = RandomAccessFastaIndex . read ( new BufferedInputStream ( fis ) ) ; if ( index . getIndexStep ( ) != indexStep ) throw new IllegalArgumentException ( "Mismatched index step in " + indexFile + ". Remove the file to recreate the index." ) ; return index ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } try ( LongProcess lp = reporter . start ( "Indexing " + file . getFileName ( ) ) ; FileChannel fc = FileChannel . open ( file , StandardOpenOption . READ ) ) { // Requesting file size to correctly report status
long size = Files . size ( file ) ; // Allocating buffer
ByteBuffer buffer = ByteBuffer . allocate ( 65536 ) ; // Extracting backing byte array
byte [ ] bufferArray = buffer . array ( ) ; // Creating builder
StreamIndexBuilder builder = new StreamIndexBuilder ( indexStep ) ; // Indexing file
int read ; long done = 0 ; while ( ( read = fc . read ( ( ByteBuffer ) buffer . clear ( ) ) ) > 0 ) { builder . processBuffer ( bufferArray , 0 , read ) ; lp . reportStatus ( 1.0 * ( done += read ) / size ) ; } // Build index
RandomAccessFastaIndex index = builder . build ( ) ; if ( save ) try ( FileOutputStream fos = new FileOutputStream ( indexFile . toFile ( ) ) ) { index . write ( new BufferedOutputStream ( fos ) ) ; } return index ; } catch ( IOException ioe ) { throw new RuntimeException ( ioe ) ; }
|
public class PrefHelper { /** * < p > Sets the value of the { @ link String } key value supplied in preferences . < / p >
* @ param key A { @ link String } value containing the key to reference .
* @ param value A { @ link Long } value to set the preference record to . */
public void setLong ( String key , long value ) { } }
|
prefHelper_ . prefsEditor_ . putLong ( key , value ) ; prefHelper_ . prefsEditor_ . apply ( ) ;
|
public class CachingAvatarZooKeeperClient { /** * Makes sure the given file has r / w permissions for everyone . We need this
* since the cache files might be accessed by different users on the
* same machine . */
private void setRWPermissions ( File f ) { } }
|
if ( ! f . setReadable ( true , false ) || ! f . setWritable ( true , false ) ) { LOG . info ( "Could not set permissions for file : " + f + " probably because user : " + System . getProperty ( "user.name" ) + " is not the owner" ) ; }
|
public class BundlePackager { /** * We should have generated a { @ code target / osgi / osgi . properties } file with the metadata we inherit from Maven .
* @ param baseDir the project directory
* @ return the computed set of properties */
public static Properties readMavenProperties ( File baseDir ) throws IOException { } }
|
return Instructions . load ( new File ( baseDir , org . wisdom . maven . Constants . OSGI_PROPERTIES ) ) ;
|
public class CassandraClientBase { /** * Sets the batch size .
* @ param batch _ Size
* the new batch size */
void setBatchSize ( String batch_Size ) { } }
|
if ( ! StringUtils . isBlank ( batch_Size ) ) { batchSize = Integer . valueOf ( batch_Size ) ; if ( batchSize == 0 ) { throw new IllegalArgumentException ( "kundera.batch.size property must be numeric and > 0." ) ; } }
|
public class ManagedBuffer { /** * Convenience method to fill the buffer from the channel .
* Unlocks the buffer if an { @ link IOException } occurs .
* This method may only be invoked for { @ link ManagedBuffer } s
* backed by a { @ link ByteBuffer } .
* @ param channel the channel
* @ return the bytes read
* @ throws IOException Signals that an I / O exception has occurred . */
public int fillFromChannel ( ReadableByteChannel channel ) throws IOException { } }
|
if ( ! ( backing instanceof ByteBuffer ) ) { throw new IllegalArgumentException ( "Backing buffer is not a ByteBuffer." ) ; } try { return channel . read ( ( ByteBuffer ) backing ) ; } catch ( IOException e ) { unlockBuffer ( ) ; throw e ; }
|
public class TransformSupport { /** * Tag logic */
@ Override public int doStartTag ( ) throws JspException { } }
|
// set up transformer in the start tag so that nested < param > tags can set parameters directly
if ( xslt == null ) { throw new JspTagException ( Resources . getMessage ( "TRANSFORM_XSLT_IS_NULL" ) ) ; } Source source ; try { if ( xslt instanceof Source ) { source = ( Source ) xslt ; } else if ( xslt instanceof String ) { String s = ( String ) xslt ; s = s . trim ( ) ; if ( s . length ( ) == 0 ) { throw new JspTagException ( Resources . getMessage ( "TRANSFORM_XSLT_IS_EMPTY" ) ) ; } source = XmlUtil . newSAXSource ( new StringReader ( s ) , xsltSystemId , entityResolver ) ; } else if ( xslt instanceof Reader ) { source = XmlUtil . newSAXSource ( ( Reader ) xslt , xsltSystemId , entityResolver ) ; } else { throw new JspTagException ( Resources . getMessage ( "TRANSFORM_XSLT_UNSUPPORTED_TYPE" , xslt . getClass ( ) ) ) ; } } catch ( SAXException e ) { throw new JspException ( e ) ; } catch ( ParserConfigurationException e ) { throw new JspException ( e ) ; } try { t = XmlUtil . newTransformer ( source , uriResolver ) ; t . setURIResolver ( uriResolver ) ; } catch ( TransformerConfigurationException e ) { throw new JspTagException ( e ) ; } return EVAL_BODY_BUFFERED ;
|
public class LockedMessageEnumerationImpl { /** * Returns the next available locked message in the enumeration .
* A value of null is returned if there is no next message .
* @ see com . ibm . wsspi . sib . core . LockedMessageEnumeration # nextLocked ( ) */
public SIBusMessage nextLocked ( ) throws SISessionUnavailableException , SISessionDroppedException , SIConnectionUnavailableException , SIConnectionDroppedException , SIResourceException , SIConnectionLostException , SIErrorException { } }
|
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "nextLocked" ) ; JsMessage retMsg = null ; synchronized ( lmeOperationMonitor ) { checkValid ( ) ; // At this point we look at each item in the array up to end of the array for the next
// non - null item . This is because some points in the array may be null if they have been
// deleted or unlocked .
while ( nextIndex != messages . length ) { retMsg = messages [ nextIndex ] ; nextIndex ++ ; if ( retMsg != null ) break ; } } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "nextLocked" , retMsg ) ; return retMsg ;
|
public class QYWeixinSupport { /** * 绑定服务器的方法
* @ param request 请求
* @ param response 响应 */
public void bindServer ( HttpServletRequest request , HttpServletResponse response ) { } }
|
PrintWriter pw = null ; try { pw = response . getWriter ( ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } if ( StrUtil . isBlank ( getToken ( ) ) || StrUtil . isBlank ( getAESKey ( ) ) || StrUtil . isBlank ( getCropId ( ) ) ) { pw . write ( "" ) ; pw . flush ( ) ; pw . close ( ) ; } try { WXBizMsgCrypt pc = new WXBizMsgCrypt ( getToken ( ) , getAESKey ( ) , getCropId ( ) ) ; String echoStr = pc . verifyUrl ( request . getParameter ( "msg_signature" ) , request . getParameter ( "timestamp" ) , request . getParameter ( "nonce" ) , request . getParameter ( "echostr" ) ) ; pw . write ( echoStr ) ; pw . flush ( ) ; pw . close ( ) ; } catch ( AesException e ) { e . printStackTrace ( ) ; pw . write ( "" ) ; pw . flush ( ) ; pw . close ( ) ; }
|
public class MaskPasswordsBuildWrapper { /** * Contributes the passwords defined by the user as variables that can be reused
* from build steps ( and other places ) . */
@ Override public void makeBuildVariables ( AbstractBuild build , Map < String , String > variables ) { } }
|
// global var / password pairs
MaskPasswordsConfig config = MaskPasswordsConfig . getInstance ( ) ; List < VarPasswordPair > globalVarPasswordPairs = config . getGlobalVarPasswordPairs ( ) ; // we can ' t use variables . putAll ( ) since passwords are ciphered when in varPasswordPairs
for ( VarPasswordPair globalVarPasswordPair : globalVarPasswordPairs ) { variables . put ( globalVarPasswordPair . getVar ( ) , globalVarPasswordPair . getPassword ( ) ) ; } // job ' s var / password pairs
if ( varPasswordPairs != null ) { // cf . comment above
for ( VarPasswordPair varPasswordPair : varPasswordPairs ) { if ( StringUtils . isNotBlank ( varPasswordPair . getVar ( ) ) ) { variables . put ( varPasswordPair . getVar ( ) , varPasswordPair . getPassword ( ) ) ; } } }
|
public class DatabaseUtils { /** * Ensure values { @ code inputs } are unique ( which avoids useless arguments ) and sorted before creating the partition . */
public static < INPUT extends Comparable < INPUT > > Iterable < List < INPUT > > toUniqueAndSortedPartitions ( Collection < INPUT > inputs ) { } }
|
return toUniqueAndSortedPartitions ( inputs , i -> i ) ;
|
public class WebApp { /** * LIBERTY Added for delayed start . */
public void initialize ( ) throws ServletException , Throwable { } }
|
if ( com . ibm . ejs . ras . TraceComponent . isAnyTracingEnabled ( ) && logger . isLoggable ( Level . FINE ) ) logger . entering ( CLASS_NAME , "Initialize : app = " + config . getApplicationName ( ) + ", initialized = " + initialized + ", destroyed = " + destroyed ) ; if ( ! initialized && ! destroyed ) { synchronized ( lock ) { if ( ! initialized && ! destroyed && ! com . ibm . ws . webcontainer . osgi . WebContainer . isServerStopping ( ) ) { initialize ( this . config , this . moduleConfig , this . extensionFactories ) ; started ( ) ; initialized = true ; config . setSessionCookieConfigInitilialized ( ) ; } } } if ( com . ibm . ejs . ras . TraceComponent . isAnyTracingEnabled ( ) && logger . isLoggable ( Level . FINE ) ) logger . exiting ( CLASS_NAME , "Initialize : initialized = " + initialized + ", destroyed = " + destroyed ) ;
|
public class RetryBuilder { /** * Set both the { @ link Delay } and the { @ link Scheduler } on which the delay is waited .
* If the delay is null , { @ link Retry # DEFAULT _ DELAY } is used . */
public RetryBuilder delay ( Delay delay , Scheduler scheduler ) { } }
|
this . delay = ( delay == null ) ? Retry . DEFAULT_DELAY : delay ; this . scheduler = scheduler ; return this ;
|
public class NetworkInterfaceTapConfigurationsInner { /** * Get the specified tap configuration on a network interface .
* @ param resourceGroupName The name of the resource group .
* @ param networkInterfaceName The name of the network interface .
* @ param tapConfigurationName The name of the tap configuration .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable to the NetworkInterfaceTapConfigurationInner object */
public Observable < NetworkInterfaceTapConfigurationInner > getAsync ( String resourceGroupName , String networkInterfaceName , String tapConfigurationName ) { } }
|
return getWithServiceResponseAsync ( resourceGroupName , networkInterfaceName , tapConfigurationName ) . map ( new Func1 < ServiceResponse < NetworkInterfaceTapConfigurationInner > , NetworkInterfaceTapConfigurationInner > ( ) { @ Override public NetworkInterfaceTapConfigurationInner call ( ServiceResponse < NetworkInterfaceTapConfigurationInner > response ) { return response . body ( ) ; } } ) ;
|
public class IOUtils { /** * Returns all the text in the given file with the given encoding . If the file
* cannot be read ( non - existent , etc . ) , then and only then the method returns
* < code > null < / code > . */
public static String slurpFileNoExceptions ( String filename , String encoding ) { } }
|
try { return slurpFile ( filename , encoding ) ; } catch ( Exception e ) { throw new RuntimeIOException ( "slurpFile IO problem" , e ) ; }
|
public class BlockdevOptions { @ Nonnull public static BlockdevOptions blkdebug ( @ Nonnull BlockdevOptionsBlkdebug blkdebug ) { } }
|
BlockdevOptions self = new BlockdevOptions ( ) ; self . driver = BlockdevDriver . blkdebug ; self . blkdebug = blkdebug ; return self ;
|
public class Ifc2x3tc1FactoryImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
public IfcVibrationIsolatorTypeEnum createIfcVibrationIsolatorTypeEnumFromString ( EDataType eDataType , String initialValue ) { } }
|
IfcVibrationIsolatorTypeEnum result = IfcVibrationIsolatorTypeEnum . get ( initialValue ) ; if ( result == null ) throw new IllegalArgumentException ( "The value '" + initialValue + "' is not a valid enumerator of '" + eDataType . getName ( ) + "'" ) ; return result ;
|
public class JandexUtils { /** * Indexes all classes in the classpath ( * . jar or * . class ) .
* @ param classLoader
* Class loader to use .
* @ param indexer
* Indexer to use .
* @ param knownFiles
* List of files already analyzed . New files will be added within this method . */
public static void indexClasspath ( final URLClassLoader classLoader , final Indexer indexer , final List < File > knownFiles ) { } }
|
// Variant that works with Maven " exec : java "
final List < File > classPathFiles = Utils4J . localFilesFromUrlClassLoader ( classLoader ) ; for ( final File file : classPathFiles ) { if ( Utils4J . nonJreJarFile ( file ) ) { indexJar ( indexer , knownFiles , file ) ; } else if ( file . isDirectory ( ) && ! file . getName ( ) . startsWith ( "." ) ) { indexDir ( indexer , knownFiles , file ) ; } } // Variant that works for Maven surefire tests
for ( final File file : Utils4J . classpathFiles ( Utils4J :: nonJreJarFile ) ) { indexJar ( indexer , knownFiles , file ) ; } for ( final File file : Utils4J . classpathFiles ( Utils4J :: classFile ) ) { indexClassFile ( indexer , knownFiles , file ) ; }
|
public class CodeBuilderUtil { /** * Generate code to throw an exception with a message concatenated at runtime .
* @ param b { @ link CodeBuilder } to which to add code
* @ param type type of the object to throw
* @ param messages messages to concat at runtime */
public static void throwConcatException ( CodeBuilder b , Class type , String ... messages ) { } }
|
if ( messages == null || messages . length == 0 ) { throwException ( b , type , null ) ; return ; } if ( messages . length == 1 ) { throwException ( b , type , messages [ 0 ] ) ; return ; } TypeDesc desc = TypeDesc . forClass ( type ) ; b . newObject ( desc ) ; b . dup ( ) ; TypeDesc [ ] params = new TypeDesc [ ] { TypeDesc . STRING } ; for ( int i = 0 ; i < messages . length ; i ++ ) { b . loadConstant ( String . valueOf ( messages [ i ] ) ) ; if ( i > 0 ) { b . invokeVirtual ( TypeDesc . STRING , "concat" , TypeDesc . STRING , params ) ; } } b . invokeConstructor ( desc , params ) ; b . throwObject ( ) ;
|
public class Importer { /** * - - - - - public static methods - - - - - */
public static Page parsePageFromSource ( final SecurityContext securityContext , final String source , final String name ) throws FrameworkException { } }
|
return parsePageFromSource ( securityContext , source , name , false ) ;
|
public class ThriftConnectionPool { /** * 获取方法调用堆栈信息的方法
* @ param message
* 提示语
* @ return 堆栈信息 */
protected String captureStackTrace ( String message ) { } }
|
StringBuilder stringBuilder = new StringBuilder ( String . format ( message , Thread . currentThread ( ) . getName ( ) ) ) ; StackTraceElement [ ] trace = Thread . currentThread ( ) . getStackTrace ( ) ; for ( int i = 0 ; i < trace . length ; i ++ ) { stringBuilder . append ( ' ' ) . append ( trace [ i ] ) . append ( "\r\n" ) ; } stringBuilder . append ( "" ) ; return stringBuilder . toString ( ) ;
|
public class N { /** * Mostly it ' s designed for one - step operation to complete the operation in one step .
* < code > java . util . stream . Stream < / code > is preferred for multiple phases operation .
* @ param a
* @ param fromIndex
* @ param toIndex
* @ param func
* @ return */
public static < T , E extends Exception > CharList mapToChar ( final Collection < ? extends T > c , final int fromIndex , final int toIndex , final Try . ToCharFunction < ? super T , E > func ) throws E { } }
|
checkFromToIndex ( fromIndex , toIndex , size ( c ) ) ; N . checkArgNotNull ( func ) ; if ( N . isNullOrEmpty ( c ) && fromIndex == 0 && toIndex == 0 ) { return new CharList ( ) ; } final CharList result = new CharList ( toIndex - fromIndex ) ; if ( c instanceof List && c instanceof RandomAccess ) { final List < T > list = ( List < T > ) c ; for ( int i = fromIndex ; i < toIndex ; i ++ ) { result . add ( func . applyAsChar ( list . get ( i ) ) ) ; } } else { int idx = 0 ; for ( T e : c ) { if ( idx ++ < fromIndex ) { continue ; } result . add ( func . applyAsChar ( e ) ) ; if ( idx >= toIndex ) { break ; } } } return result ;
|
public class CmsLetsEncryptConfiguration { /** * Gets the configured port , or - 1 if the port is not set or has an invalid value . < p >
* The port is used to signal to the LetsEncrypt docker container that the certificate configuration has changed .
* @ return the configured port */
public int getPort ( ) { } }
|
try { String portStr = m_config . get ( ATTR_PORT ) ; return Integer . valueOf ( portStr ) . intValue ( ) ; } catch ( Exception e ) { LOG . error ( "Error getting letsencrypt port: " + e . getLocalizedMessage ( ) , e ) ; return - 1 ; }
|
public class SimpleTransformer { /** * Transform a java pojo object to sfsobject
* @ param value pojo java object
* @ return a SFSDataWrapper object */
protected SFSDataWrapper transformObject ( Object value ) { } }
|
ResponseParamsClass struct = null ; if ( context != null ) struct = context . getResponseParamsClass ( value . getClass ( ) ) ; if ( struct == null ) struct = new ResponseParamsClass ( value . getClass ( ) ) ; ISFSObject sfsObject = new ResponseParamSerializer ( ) . object2params ( struct , value ) ; return new SFSDataWrapper ( SFSDataType . SFS_OBJECT , sfsObject ) ;
|
public class InJvmContainerExecutor { /** * Creates CLI parser which can be used to extract Container ' s class name and
* its launch arguments .
* @ param containerWorkDir
* @ return */
private ExecJavaCliParser createExecCommandParser ( String containerWorkDir ) { } }
|
String execLine = this . filterAndExecuteLaunchScriptAndReturnExecLine ( containerWorkDir ) ; String [ ] values = execLine . split ( "\"" ) ; String javaCli = values [ 1 ] ; String [ ] javaCliValues = javaCli . split ( " " ) ; StringBuffer buffer = new StringBuffer ( ) ; for ( int i = 0 ; i < javaCliValues . length ; i ++ ) { if ( i > 0 ) { buffer . append ( javaCliValues [ i ] ) ; if ( javaCliValues . length - i > 1 ) { buffer . append ( " " ) ; } } } String extractedJavaCli = buffer . toString ( ) ; ExecJavaCliParser execJavaCliParser = new ExecJavaCliParser ( extractedJavaCli ) ; return execJavaCliParser ;
|
public class JScreen { /** * Get the GridBagConstraints .
* @ return The gridbag constraints object . */
public GridBagConstraints getGBConstraints ( ) { } }
|
if ( m_gbconstraints == null ) { m_gbconstraints = new GridBagConstraints ( ) ; m_gbconstraints . insets = new Insets ( 2 , 2 , 2 , 2 ) ; m_gbconstraints . ipadx = 2 ; m_gbconstraints . ipady = 2 ; } return m_gbconstraints ;
|
public class ModifyReservedInstancesRequest { /** * The IDs of the Reserved Instances to modify .
* < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use
* { @ link # setReservedInstancesIds ( java . util . Collection ) } or { @ link # withReservedInstancesIds ( java . util . Collection ) }
* if you want to override the existing values .
* @ param reservedInstancesIds
* The IDs of the Reserved Instances to modify .
* @ return Returns a reference to this object so that method calls can be chained together . */
public ModifyReservedInstancesRequest withReservedInstancesIds ( String ... reservedInstancesIds ) { } }
|
if ( this . reservedInstancesIds == null ) { setReservedInstancesIds ( new com . amazonaws . internal . SdkInternalList < String > ( reservedInstancesIds . length ) ) ; } for ( String ele : reservedInstancesIds ) { this . reservedInstancesIds . add ( ele ) ; } return this ;
|
public class ProjectApi { /** * Get a list of project users matching the specified search string . This list
* includes all project members and all users assigned to project parent groups .
* < pre > < code > GET / projects / : id / users < / code > < / pre >
* @ param projectIdOrPath projectIdOrPath the project in the form of an Integer ( ID ) , String ( path ) , or Project instance , required
* @ param search the string to match specific users
* @ return the users matching the search string and belonging to the specified project and its parent groups
* @ throws GitLabApiException if any exception occurs */
public List < ProjectUser > getProjectUsers ( Object projectIdOrPath , String search ) throws GitLabApiException { } }
|
return ( getProjectUsers ( projectIdOrPath , search , getDefaultPerPage ( ) ) . all ( ) ) ;
|
public class Functions { /** * Offset a numeric string by another numeric string .
* Any numeric string recognized by { @ code BigDecimal } is supported .
* @ param initial A valid number string
* @ param offset A valid number string
* @ return a number string with the precision matching the highest precision
* argument .
* @ see BigDecimal */
public static String numericOffset ( String initial , String offset ) { } }
|
BigDecimal number ; BigDecimal dOffset ; try { number = new BigDecimal ( initial ) ; dOffset = new BigDecimal ( offset ) ; } catch ( Exception ex ) { return null ; } return number . add ( dOffset ) . toString ( ) ;
|
public class Applications { /** * Adds the instances to the internal vip address map .
* @ param app
* - the applications for which the instances need to be added . */
private void addInstancesToVIPMaps ( Application app , Map < String , VipIndexSupport > virtualHostNameAppMap , Map < String , VipIndexSupport > secureVirtualHostNameAppMap ) { } }
|
// Check and add the instances to the their respective virtual host name
// mappings
for ( InstanceInfo info : app . getInstances ( ) ) { String vipAddresses = info . getVIPAddress ( ) ; if ( vipAddresses != null ) { addInstanceToMap ( info , vipAddresses , virtualHostNameAppMap ) ; } String secureVipAddresses = info . getSecureVipAddress ( ) ; if ( secureVipAddresses != null ) { addInstanceToMap ( info , secureVipAddresses , secureVirtualHostNameAppMap ) ; } }
|
public class ModelsImpl { /** * Gets information about the hierarchical entity models .
* @ param appId The application ID .
* @ param versionId The version ID .
* @ param listHierarchicalEntitiesOptionalParameter the object representing the optional parameters to be set before calling this API
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ throws ErrorResponseException thrown if the request is rejected by server
* @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @ return the List & lt ; HierarchicalEntityExtractor & gt ; object if successful . */
public List < HierarchicalEntityExtractor > listHierarchicalEntities ( UUID appId , String versionId , ListHierarchicalEntitiesOptionalParameter listHierarchicalEntitiesOptionalParameter ) { } }
|
return listHierarchicalEntitiesWithServiceResponseAsync ( appId , versionId , listHierarchicalEntitiesOptionalParameter ) . toBlocking ( ) . single ( ) . body ( ) ;
|
public class ResourceHandle { /** * The ' adaptTo ' like wrapping helper .
* @ return the wrapped resource ( may be resource itself if it is a ResourceHandle ) , not null */
public static ResourceHandle use ( Resource resource ) { } }
|
return resource instanceof ResourceHandle ? ( ( ResourceHandle ) resource ) : new ResourceHandle ( resource ) ;
|
public class FileBeanStore { /** * Get object ouput stream suitable for reading persistent state
* associated with given key . */
public GZIPOutputStream getGZIPOutputStream ( BeanId beanId ) throws CSIException { } }
|
final boolean isTraceOn = TraceComponent . isAnyTracingEnabled ( ) ; if ( isTraceOn && tc . isEntryEnabled ( ) ) Tr . entry ( tc , "getOutputStream" , beanId ) ; final String fileName = getPortableFilename ( beanId ) ; final long beanTimeoutTime = getBeanTimeoutTime ( beanId ) ; GZIPOutputStream result = null ; try { result = ( GZIPOutputStream ) AccessController . doPrivileged ( new PrivilegedExceptionAction < GZIPOutputStream > ( ) { public GZIPOutputStream run ( ) throws IOException { File statefulBeanFile = getStatefulBeanFile ( fileName , false ) ; FileOutputStream fos = EJSPlatformHelper . isZOS ( ) ? new WSFileOutputStream ( statefulBeanFile , beanTimeoutTime ) : new FileOutputStream ( statefulBeanFile ) ; return new GZIPOutputStream ( fos ) ; // d651126
} } ) ; } catch ( PrivilegedActionException ex2 ) { Exception ex = ex2 . getException ( ) ; FFDCFilter . processException ( ex , CLASS_NAME + ".getOutputStream" , "127" , this ) ; Tr . warning ( tc , "IOEXCEPTION_WRITING_FILE_FOR_STATEFUL_SESSION_BEAN_CNTR0025W" , new Object [ ] { fileName , this , ex } ) ; // p111002.3
throw new CSIException ( "Unable to open output stream" , ex ) ; } if ( isTraceOn && tc . isEntryEnabled ( ) ) Tr . exit ( tc , "getOutputStream" ) ; return result ;
|
public class MapViewerTemplate { /** * Template method to create the map views . */
protected void createMapViews ( ) { } }
|
mapView = getMapView ( ) ; mapView . getModel ( ) . init ( this . preferencesFacade ) ; mapView . setClickable ( true ) ; mapView . getMapScaleBar ( ) . setVisible ( true ) ; mapView . setBuiltInZoomControls ( hasZoomControls ( ) ) ; mapView . getMapZoomControls ( ) . setAutoHide ( isZoomControlsAutoHide ( ) ) ; mapView . getMapZoomControls ( ) . setZoomLevelMin ( getZoomLevelMin ( ) ) ; mapView . getMapZoomControls ( ) . setZoomLevelMax ( getZoomLevelMax ( ) ) ;
|
public class AWSMediaLiveClient { /** * Produces list of inputs that have been created
* @ param listInputsRequest
* Placeholder documentation for ListInputsRequest
* @ return Result of the ListInputs operation returned by the service .
* @ throws BadRequestException
* This request was invalid
* @ throws InternalServerErrorException
* Internal Service Error
* @ throws ForbiddenException
* Access was denied
* @ throws BadGatewayException
* Bad Gateway Error
* @ throws GatewayTimeoutException
* Gateway Timeout Error
* @ throws TooManyRequestsException
* Limit exceeded
* @ sample AWSMediaLive . ListInputs
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / medialive - 2017-10-14 / ListInputs " target = " _ top " > AWS API
* Documentation < / a > */
@ Override public ListInputsResult listInputs ( ListInputsRequest request ) { } }
|
request = beforeClientExecution ( request ) ; return executeListInputs ( request ) ;
|
public class DensePositiveMapper { @ Override public void map ( int [ ] input , final int start , final int length ) { } }
|
for ( int i = start , l = length ; l > 0 ; l -- , i ++ ) { input [ i ] = forward [ input [ i ] + offset ] ; }
|
public class ModularParser { /** * Searches the Range given by the Span s for the double occurence of
* " quotation " and puts the results in the List quotedSpans . The Quotation
* tags will be deleted .
* @ param sm
* , the Source in which will be searched
* @ param s
* , the range in which will be searched
* @ param quotedSpans
* , the List where the Spans will be placed , should be managed
* by the SpanManager sm
* @ param quotation
* , the start and end tag as String */
private void parseQuotedSpans ( SpanManager sm , Span s , List < Span > quotedSpans , String quotation ) { } }
|
final int qlen = quotation . length ( ) ; // get the start position
int start = sm . indexOf ( quotation , s . getStart ( ) , s . getEnd ( ) ) ; while ( start != - 1 ) { // get the end position
int end = sm . indexOf ( quotation , start + qlen , s . getEnd ( ) ) ; if ( end == - 1 ) { break ; } // build a new span from start and end position .
Span qs = new Span ( start , end ) ; quotedSpans . add ( qs ) ; // calculate the original src positions .
if ( calculateSrcSpans ) { qs . setSrcSpan ( new SrcSpan ( sm . getSrcPos ( start ) , sm . getSrcPos ( end + qlen - 1 ) + 1 ) ) ; } // delete the tags .
sm . delete ( end , end + qlen ) ; sm . delete ( start , start + qlen ) ; // get the next start position
start = sm . indexOf ( quotation , qs . getEnd ( ) , s . getEnd ( ) ) ; }
|
public class ModuleApiKeys { /** * Delete a given api key from the configured space .
* @ param key the key to be deleted .
* @ return 204 upon success .
* @ throws IllegalArgumentException if key is null .
* @ throws IllegalArgumentException if key ' s spaceId is null . */
public int delete ( CMAApiKey key ) { } }
|
assertNotNull ( key , "key" ) ; final String space = getSpaceIdOrThrow ( key , "key" ) ; final String id = getResourceIdOrThrow ( key , "key" ) ; return service . delete ( space , id ) . blockingFirst ( ) . code ( ) ;
|
public class AnnotatedTypeBuilder { /** * Add an annotation to the specified field . If the field is not already
* present , it will be added .
* @ param field the field to add the annotation to
* @ param annotation the annotation to add
* @ throws IllegalArgumentException if the annotation is null */
public AnnotatedTypeBuilder < X > addToField ( Field field , Annotation annotation ) { } }
|
if ( fields . get ( field ) == null ) { fields . put ( field , new AnnotationBuilder ( ) ) ; } fields . get ( field ) . add ( annotation ) ; return this ;
|
public class JCalendarPopup { /** * User pressed the " next year " button , change the calendar .
* @ param evt The action event ( ignored ) . */
private void nextYearActionPerformed ( ActionEvent evt ) { } }
|
calendar . setTime ( targetPanelDate ) ; calendar . add ( Calendar . YEAR , 1 ) ; calendar . set ( Calendar . HOUR_OF_DAY , 12 ) ; calendar . set ( Calendar . MINUTE , 0 ) ; calendar . set ( Calendar . SECOND , 0 ) ; calendar . set ( Calendar . MILLISECOND , 0 ) ; targetPanelDate = calendar . getTime ( ) ; this . layoutCalendar ( targetPanelDate ) ;
|
public class WTree { /** * Handle the current expanded state .
* @ param request the request containing row expansion data . */
private void handleExpandedState ( final Request request ) { } }
|
String [ ] paramValue = request . getParameterValues ( getId ( ) + OPEN_REQUEST_KEY ) ; if ( paramValue == null ) { paramValue = new String [ 0 ] ; } String [ ] expandedRowIds = removeEmptyStrings ( paramValue ) ; Set < String > newExpansionIds = new HashSet < > ( ) ; if ( expandedRowIds != null ) { int offset = getItemIdPrefix ( ) . length ( ) ; for ( String expandedRowId : expandedRowIds ) { if ( expandedRowId . length ( ) <= offset ) { LOG . warn ( "Expanded row id [" + expandedRowId + "] does not have a valid prefix and will be ignored." ) ; continue ; } // Remove prefix to get item id
String itemId = expandedRowId . substring ( offset ) ; // Assume the item id is valid
newExpansionIds . add ( itemId ) ; } } setExpandedRows ( newExpansionIds ) ;
|
public class PBKDF { /** * Implementation of PBKDF2 ( RFC2898 ) .
* @ param mac Pre - initialized { @ link Mac } instance to use .
* @ param S Salt .
* @ param c Iteration count .
* @ param DK Byte array that derived key will be placed in .
* @ param dkLen Intended length , in octets , of the derived key .
* @ throws GeneralSecurityException */
public static void pbkdf2 ( Mac mac , byte [ ] S , int c , byte [ ] DK , int dkLen ) throws GeneralSecurityException { } }
|
int hLen = mac . getMacLength ( ) ; if ( dkLen > ( Math . pow ( 2 , 32 ) - 1 ) * hLen ) { throw new GeneralSecurityException ( "Requested key length too long" ) ; } byte [ ] U = new byte [ hLen ] ; byte [ ] T = new byte [ hLen ] ; byte [ ] block1 = new byte [ S . length + 4 ] ; int l = ( int ) Math . ceil ( ( double ) dkLen / hLen ) ; int r = dkLen - ( l - 1 ) * hLen ; arraycopy ( S , 0 , block1 , 0 , S . length ) ; for ( int i = 1 ; i <= l ; i ++ ) { block1 [ S . length + 0 ] = ( byte ) ( i >> 24 & 0xff ) ; block1 [ S . length + 1 ] = ( byte ) ( i >> 16 & 0xff ) ; block1 [ S . length + 2 ] = ( byte ) ( i >> 8 & 0xff ) ; block1 [ S . length + 3 ] = ( byte ) ( i >> 0 & 0xff ) ; mac . update ( block1 ) ; mac . doFinal ( U , 0 ) ; arraycopy ( U , 0 , T , 0 , hLen ) ; for ( int j = 1 ; j < c ; j ++ ) { mac . update ( U ) ; mac . doFinal ( U , 0 ) ; for ( int k = 0 ; k < hLen ; k ++ ) { T [ k ] ^= U [ k ] ; } } arraycopy ( T , 0 , DK , ( i - 1 ) * hLen , ( i == l ? r : hLen ) ) ; }
|
public class JDBC4PreparedStatement { /** * Sets the designated parameter to the given REF ( < structured - type > ) value . */
@ Override public void setRef ( int parameterIndex , Ref x ) throws SQLException { } }
|
checkParameterBounds ( parameterIndex ) ; throw SQLError . noSupport ( ) ;
|
public class DefaultPassConfig { /** * Creates several passes aimed at removing code . */
private List < PassFactory > getCodeRemovingPasses ( ) { } }
|
List < PassFactory > passes = new ArrayList < > ( ) ; if ( options . collapseObjectLiterals ) { passes . add ( collapseObjectLiterals ) ; } if ( options . inlineVariables || options . inlineLocalVariables ) { passes . add ( inlineVariables ) ; } else if ( options . inlineConstantVars ) { passes . add ( inlineConstants ) ; } if ( options . foldConstants ) { passes . add ( peepholeOptimizations ) ; } if ( options . removeDeadCode ) { passes . add ( removeUnreachableCode ) ; } if ( shouldRunRemoveUnusedCode ( ) ) { passes . add ( removeUnusedCode ) ; } assertAllLoopablePasses ( passes ) ; return passes ;
|
public class dnssuffix { /** * Use this API to fetch all the dnssuffix resources that are configured on netscaler . */
public static dnssuffix [ ] get ( nitro_service service ) throws Exception { } }
|
dnssuffix obj = new dnssuffix ( ) ; dnssuffix [ ] response = ( dnssuffix [ ] ) obj . get_resources ( service ) ; return response ;
|
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link Status } { @ code > } } */
@ XmlElementDecl ( namespace = "http://schema.intuit.com/finance/v3" , name = "Status" , substitutionHeadNamespace = "http://schema.intuit.com/finance/v3" , substitutionHeadName = "IntuitObject" ) public JAXBElement < Status > createStatus ( Status value ) { } }
|
return new JAXBElement < Status > ( _Status_QNAME , Status . class , null , value ) ;
|
public class PointDrawController { public void onMouseDown ( MouseDownEvent event ) { } }
|
Coordinate newCoords = getWorldPosition ( event ) ; geometry = factory . createPoint ( newCoords ) ;
|
public class SetBase { /** * Sub class could override this method to implement iterating in parallel .
* < p > The iterating support partial function visitor by ignoring the
* { @ link org . osgl . exception . NotAppliedException } thrown out by visitor ' s apply
* method call < / p >
* @ param visitor the visitor
* @ throws $ . Break if visitor needs to terminate the iteration */
public SetBase < T > forEach ( $ . Visitor < ? super T > visitor ) throws $ . Break { } }
|
for ( T t : this ) { try { visitor . apply ( t ) ; } catch ( NotAppliedException e ) { // ignore
} } return this ;
|
public class StandardWebSocketSession { /** * { @ inheritDoc } */
@ Override protected Future < Void > sendObjectMessage ( Object message ) { } }
|
return getNativeSession ( ) . getAsyncRemote ( ) . sendObject ( message ) ;
|
public class RDS { /** * Use this to revoke access of security groups in EC2 - Classic
* @ param groupName
* @ param sourceCidr
* @ throws CloudException
* @ throws InternalException */
private void revokeClassicDbSecurityGroup ( String groupName , String sourceCidr ) throws CloudException , InternalException { } }
|
Map < String , String > parameters = getProvider ( ) . getStandardRdsParameters ( getProvider ( ) . getContext ( ) , REVOKE_DB_SECURITY_GROUP_INGRESS ) ; parameters . put ( "CIDRIP" , sourceCidr ) ; parameters . put ( "DBSecurityGroupName" , groupName ) ; EC2Method method = new EC2Method ( SERVICE_ID , getProvider ( ) , parameters ) ; method . invoke ( ) ;
|
public class SoapHeaderScanner { /** * Search for the given character in the buffer , starting at the
* given index . If not found , return - 1 . If found , return the
* index of the character .
* @ param c
* @ param index */
private int findFrom ( char c , int index ) { } }
|
int currentIdx = index ; while ( currentIdx < buffer . length ( ) ) { if ( buffer . get ( currentIdx ) == c ) { return currentIdx ; } currentIdx ++ ; } return - 1 ;
|
public class PlaceIndexController { /** * Connects HTML template file with data for the surnames index page . The
* page displays the surnames that begin with the provided letter .
* @ param dbName name of database for the lookup .
* @ param model Spring connection between the data model wrapper .
* @ return a string identifying which HTML template to use . */
@ RequestMapping ( "/places" ) public final String places ( @ RequestParam ( value = "db" , required = false , defaultValue = "schoeller" ) final String dbName , final Model model ) { } }
|
logger . debug ( "Entering surnames" ) ; final Root root = fetchRoot ( dbName ) ; final RenderingContext context = createRenderingContext ( ) ; final IndexByPlaceRenderer gedRenderer = new IndexByPlaceRenderer ( root , client , context ) ; model . addAttribute ( "filename" , gedbrowserHome + "/" + dbName + ".ged" ) ; model . addAttribute ( "model" , gedRenderer ) ; model . addAttribute ( "appInfo" , appInfo ) ; return "places" ;
|
public class AbstractTTTLearner { /** * Determines a global splitter , i . e . , a splitter for any block . This method may ( but is not required to ) employ
* heuristics to obtain a splitter with a relatively short suffix length .
* @ return a splitter for any of the blocks */
private GlobalSplitter < I , D > findSplitterGlobal ( ) { } }
|
// TODO : Make global option
boolean optimizeGlobal = true ; AbstractBaseDTNode < I , D > bestBlockRoot = null ; Splitter < I , D > bestSplitter = null ; for ( AbstractBaseDTNode < I , D > blockRoot : blockList ) { Splitter < I , D > splitter = findSplitter ( blockRoot ) ; if ( splitter != null ) { if ( bestSplitter == null || splitter . getDiscriminatorLength ( ) < bestSplitter . getDiscriminatorLength ( ) ) { bestSplitter = splitter ; bestBlockRoot = blockRoot ; } if ( ! optimizeGlobal ) { break ; } } } if ( bestSplitter == null ) { return null ; } return new GlobalSplitter < > ( bestBlockRoot , bestSplitter ) ;
|
public class Predicate { /** * A list of the conditions that determine when the trigger will fire .
* < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use
* { @ link # setConditions ( java . util . Collection ) } or { @ link # withConditions ( java . util . Collection ) } if you want to
* override the existing values .
* @ param conditions
* A list of the conditions that determine when the trigger will fire .
* @ return Returns a reference to this object so that method calls can be chained together . */
public Predicate withConditions ( Condition ... conditions ) { } }
|
if ( this . conditions == null ) { setConditions ( new java . util . ArrayList < Condition > ( conditions . length ) ) ; } for ( Condition ele : conditions ) { this . conditions . add ( ele ) ; } return this ;
|
public class KolmogorovSmirnovIndependentSamples { /** * Checks the Critical Value to determine if the Hypothesis should be rejected
* @ param score
* @ param is _ twoTailed
* @ param n1
* @ param n2
* @ param aLevel
* @ return */
private static boolean checkCriticalValue ( double score , boolean is_twoTailed , int n1 , int n2 , double aLevel ) { } }
|
boolean rejected = false ; double criticalValue = calculateCriticalValue ( is_twoTailed , n1 , n2 , aLevel ) ; if ( score > criticalValue ) { rejected = true ; } return rejected ;
|
public class SimpleBeanBoundTableModel { /** * { @ inheritDoc } */
@ Override public Class < ? extends WComponent > getRendererClass ( final List < Integer > row ) { } }
|
int idx = getLevelIndex ( row ) ; LevelDetails level = levels . get ( idx ) ; return level . getRenderer ( ) ;
|
public class AbstractConfiguration { /** * Gets the value of the configuration property identified by name as a value of the specified Class type .
* The required parameter can be used to indicate the property is not required and that a ConfigurationException
* should not be thrown if the property is undeclared or undefined .
* @ param propertyName a String value indicating the name of the configuration property .
* @ param required used to indicate whether the configuration property is required to be declared and defined .
* @ param type the expected Class type of the configuration property value .
* @ return the value of the configuration property identified by name .
* @ throws ConfigurationException if and only if the property is required and the property is either undeclared
* or undefined . */
public < T > T getPropertyValueAs ( final String propertyName , final boolean required , final Class < T > type ) { } }
|
try { return convert ( getPropertyValue ( propertyName , required ) , type ) ; } catch ( ConversionException e ) { if ( required ) { throw new ConfigurationException ( String . format ( "Failed to get the value of configuration setting property (%1$s) as type (%2$s)!" , propertyName , ClassUtils . getName ( type ) ) , e ) ; } return null ; }
|
public class CmsAjaxLinkGallery { /** * Writes the current link into the pointer resource . < p >
* @ see org . opencms . workplace . galleries . CmsAjaxLinkGallery # changeItemLinkUrl ( String )
* @ param itemUrl the pointer resource to change the link of */
@ Override protected void changeItemLinkUrl ( String itemUrl ) { } }
|
try { JspWriter out = getJsp ( ) . getJspContext ( ) . getOut ( ) ; if ( getCms ( ) . existsResource ( itemUrl ) ) { try { writePointerLink ( getCms ( ) . readResource ( itemUrl ) ) ; out . print ( buildJsonItemObject ( getCms ( ) . readResource ( itemUrl ) ) ) ; } catch ( CmsException e ) { // can not happen in theory , because we used existsResource ( ) before . . .
} } else { out . print ( RETURNVALUE_NONE ) ; } } catch ( IOException e ) { if ( LOG . isErrorEnabled ( ) ) { LOG . error ( e . getLocalizedMessage ( ) , e ) ; } }
|
public class CreateCustomMetadataRequestMarshaller { /** * Marshall the given parameter object . */
public void marshall ( CreateCustomMetadataRequest createCustomMetadataRequest , ProtocolMarshaller protocolMarshaller ) { } }
|
if ( createCustomMetadataRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( createCustomMetadataRequest . getAuthenticationToken ( ) , AUTHENTICATIONTOKEN_BINDING ) ; protocolMarshaller . marshall ( createCustomMetadataRequest . getResourceId ( ) , RESOURCEID_BINDING ) ; protocolMarshaller . marshall ( createCustomMetadataRequest . getVersionId ( ) , VERSIONID_BINDING ) ; protocolMarshaller . marshall ( createCustomMetadataRequest . getCustomMetadata ( ) , CUSTOMMETADATA_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
|
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
@ Override public EClass getIfcDuctSilencer ( ) { } }
|
if ( ifcDuctSilencerEClass == null ) { ifcDuctSilencerEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 204 ) ; } return ifcDuctSilencerEClass ;
|
public class OrderItemUrl { /** * Get Resource Url for GetOrderItemViaLineId
* @ param draft If true , retrieve the draft version of the order , which might include uncommitted changes to the order or its components .
* @ param lineId The specific line id that ' s associated with the order item .
* @ param orderId Unique identifier of the order .
* @ param responseFields Filtering syntax appended to an API call to increase or decrease the amount of data returned inside a JSON object . This parameter should only be used to retrieve data . Attempting to update data using this parameter may cause data loss .
* @ return String Resource Url */
public static MozuUrl getOrderItemViaLineIdUrl ( Boolean draft , Integer lineId , String orderId , String responseFields ) { } }
|
UrlFormatter formatter = new UrlFormatter ( "/api/commerce/orders/{orderId}/items/{lineId}?draft={draft}&responseFields={responseFields}" ) ; formatter . formatUrl ( "draft" , draft ) ; formatter . formatUrl ( "lineId" , lineId ) ; formatter . formatUrl ( "orderId" , orderId ) ; formatter . formatUrl ( "responseFields" , responseFields ) ; return new MozuUrl ( formatter . getResourceUrl ( ) , MozuUrl . UrlLocation . TENANT_POD ) ;
|
public class DiscountCurveInterpolation { /** * Create a discount curve from given times and given discount factors using given interpolation and extrapolation methods .
* @ param name The name of this discount curve .
* @ param referenceDate The reference date for this curve , i . e . , the date which defined t = 0.
* @ param times Array of times as doubles .
* @ param givenDiscountFactors Array of corresponding discount factors .
* @ param isParameter Array of booleans specifying whether this point is served " as as parameter " , e . g . , whether it is calibrates ( e . g . using CalibratedCurves ) .
* @ param interpolationMethod The interpolation method used for the curve .
* @ param extrapolationMethod The extrapolation method used for the curve .
* @ param interpolationEntity The entity interpolated / extrapolated .
* @ return A new discount factor object . */
public static DiscountCurveInterpolation createDiscountCurveFromDiscountFactors ( String name , LocalDate referenceDate , double [ ] times , double [ ] givenDiscountFactors , boolean [ ] isParameter , InterpolationMethod interpolationMethod , ExtrapolationMethod extrapolationMethod , InterpolationEntity interpolationEntity ) { } }
|
DiscountCurveInterpolation discountFactors = new DiscountCurveInterpolation ( name , referenceDate , interpolationMethod , extrapolationMethod , interpolationEntity ) ; for ( int timeIndex = 0 ; timeIndex < times . length ; timeIndex ++ ) { discountFactors . addDiscountFactor ( times [ timeIndex ] , givenDiscountFactors [ timeIndex ] , isParameter != null && isParameter [ timeIndex ] ) ; } return discountFactors ;
|
public class AtomClientFactory { /** * Create ClientCollection bound to URI . */
public static ClientCollection getCollection ( final String uri , final AuthStrategy authStrategy ) throws ProponoException { } }
|
return new ClientCollection ( uri , authStrategy ) ;
|
public class Aggregate { /** * < div color = ' red ' style = " font - size : 24px ; color : red " > < b > < i > < u > JCYPHER < / u > < / i > < / b > < / div >
* < div color = ' red ' style = " font - size : 18px ; color : red " > < i > calculate the average of numeric values < / i > < / div >
* < div color = ' red ' style = " font - size : 18px ; color : red " > < i > e . g . . . . aggregate ( ) . < b > avg ( n . property ( " amount " ) ) < / b > < / i > < / div >
* < br / > */
public RElement < RElement < ? > > avg ( JcProperty property ) { } }
|
ReturnExpression rx = getReturnExpression ( ) ; ReturnAggregate ra = ( ReturnAggregate ) rx . getReturnValue ( ) ; ra . setType ( AggregateFunctionType . AVG ) ; ra . setArgument ( property ) ; RElement < RElement < ? > > ret = new RElement < RElement < ? > > ( rx ) ; return ret ;
|
public class ScanSpec { /** * Add a classpath element filter . The provided ClasspathElementFilter should return true if the path string
* passed to it is a path you want to scan .
* @ param classpathElementFilter
* The classpath element filter to apply to all discovered classpath elements , to decide which should
* be scanned . */
public void filterClasspathElements ( final ClasspathElementFilter classpathElementFilter ) { } }
|
if ( this . classpathElementFilters == null ) { this . classpathElementFilters = new ArrayList < > ( 2 ) ; } this . classpathElementFilters . add ( classpathElementFilter ) ;
|
public class DescribeTableRestoreStatusResult { /** * A list of status details for one or more table restore requests .
* < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use
* { @ link # setTableRestoreStatusDetails ( java . util . Collection ) } or
* { @ link # withTableRestoreStatusDetails ( java . util . Collection ) } if you want to override the existing values .
* @ param tableRestoreStatusDetails
* A list of status details for one or more table restore requests .
* @ return Returns a reference to this object so that method calls can be chained together . */
public DescribeTableRestoreStatusResult withTableRestoreStatusDetails ( TableRestoreStatus ... tableRestoreStatusDetails ) { } }
|
if ( this . tableRestoreStatusDetails == null ) { setTableRestoreStatusDetails ( new com . amazonaws . internal . SdkInternalList < TableRestoreStatus > ( tableRestoreStatusDetails . length ) ) ; } for ( TableRestoreStatus ele : tableRestoreStatusDetails ) { this . tableRestoreStatusDetails . add ( ele ) ; } return this ;
|
public class SemanticAPI { /** * 语义理解
* @ param accessToken access _ token
* @ param semproxySearch semproxySearch
* @ return SemproxySearchResult
* @ since 2.8.22 */
public static SemproxySearchResult semproxySearch ( String accessToken , SemproxySearch semproxySearch ) { } }
|
return semproxySearch ( accessToken , JsonUtil . toJSONString ( semproxySearch ) ) ;
|
public class MsgNode { /** * Returns the list of expressions for gender values and sets that field to null .
* < p > Note that this node ' s command text will still contain the substring genders = " . . . " . We think
* this is okay since the command text is only used for reporting errors ( in fact , it might be
* good as a reminder of how the msg was originally written ) . */
@ Nullable public List < ExprRootNode > getAndRemoveGenderExprs ( ) { } }
|
List < ExprRootNode > genderExprs = this . genderExprs ; this . genderExprs = null ; return genderExprs ;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.