signature
stringlengths 43
39.1k
| implementation
stringlengths 0
450k
|
|---|---|
public class ICUHumanize { /** * Formats the given date with the specified style .
* @ param style
* DateFormat style
* @ param value
* Date to be formatted
* @ return String representation of the date */
public static String formatDate ( final int style , final Date value ) { } }
|
return context . get ( ) . formatDate ( style , value ) ;
|
public class QuartzScheduler { /** * Only cron schedule register is supported . Since register might be called when
* concurrently uploading projects , so synchronized is added to ensure thread safety .
* @ param cronExpression the cron schedule for this job
* @ param jobDescription Regarding QuartzJobDescription # groupName , in order to guarantee no
* duplicate quartz schedules , we design the naming convention depending on use cases : < ul >
* < li > User flow schedule : we use { @ link JobKey # JobKey } to represent the identity of a
* flow ' s schedule . The format follows " $ projectID . $ flowName " to guarantee no duplicates .
* < li > Quartz schedule for AZ internal use : the groupName should start with letters , rather
* than
* number , which is the first case . < / ul >
* @ return true if job has been scheduled , false if the same job exists already . */
public synchronized boolean scheduleJobIfAbsent ( final String cronExpression , final QuartzJobDescription jobDescription ) throws SchedulerException { } }
|
requireNonNull ( jobDescription , "jobDescription is null" ) ; if ( ifJobExist ( jobDescription . getJobName ( ) , jobDescription . getGroupName ( ) ) ) { logger . warn ( String . format ( "can not register existing job with job name: " + "%s and group name: %s" , jobDescription . getJobName ( ) , jobDescription . getGroupName ( ) ) ) ; return false ; } if ( ! CronExpression . isValidExpression ( cronExpression ) ) { throw new SchedulerException ( "The cron expression string <" + cronExpression + "> is not valid." ) ; } // TODO kunkun - tang : we will modify this when we start supporting multi schedules per flow .
final JobDetail job = JobBuilder . newJob ( jobDescription . getJobClass ( ) ) . withIdentity ( jobDescription . getJobName ( ) , jobDescription . getGroupName ( ) ) . build ( ) ; // Add external dependencies to Job Data Map .
job . getJobDataMap ( ) . putAll ( jobDescription . getContextMap ( ) ) ; // TODO kunkun - tang : Need management code to deal with different misfire policy
final Trigger trigger = TriggerBuilder . newTrigger ( ) . withSchedule ( CronScheduleBuilder . cronSchedule ( cronExpression ) . withMisfireHandlingInstructionFireAndProceed ( ) // . withMisfireHandlingInstructionDoNothing ( )
// . withMisfireHandlingInstructionIgnoreMisfires ( )
) . build ( ) ; this . scheduler . scheduleJob ( job , trigger ) ; logger . info ( "Quartz Schedule with jobDetail " + job . getDescription ( ) + " is registered." ) ; return true ;
|
public class EnvironmentStream { /** * Logs a message to the original stderr in cases where java . util . logging
* is dangerous , e . g . in the logging code itself . */
public static void logStderr ( String msg , Throwable e ) { } }
|
try { long now = CurrentTime . currentTime ( ) ; // msg = QDate . formatLocal ( now , " [ % Y - % m - % d % H : % M : % S ] " ) + msg ;
_origSystemErr . println ( msg ) ; // e . printStackTrace ( _ origSystemErr . getPrintWriter ( ) ) ;
_origSystemErr . flush ( ) ; } catch ( Throwable e1 ) { }
|
public class AsaCalculator { /** * Gets the van der Waals radius of the given atom following the values defined by
* Chothia ( 1976 ) J . Mol . Biol . 105,1-14
* NOTE : the vdw values defined by the paper assume no Hydrogens and thus " inflates "
* slightly the heavy atoms to account for Hydrogens . Thus this method cannot be used
* in a structure that contains Hydrogens !
* If atom is neither part of a nucleotide nor of a standard aminoacid ,
* the default vdw radius for the element is returned . If atom is of
* unknown type ( element ) the vdw radius of { @ link Element ( ) . N } is returned
* @ param atom
* @ return */
public static double getRadius ( Atom atom ) { } }
|
if ( atom . getElement ( ) == null ) { logger . warn ( "Unrecognised atom " + atom . getName ( ) + " with serial " + atom . getPDBserial ( ) + ", assigning the default vdw radius (Nitrogen vdw radius)." ) ; return Element . N . getVDWRadius ( ) ; } Group res = atom . getGroup ( ) ; if ( res == null ) { logger . warn ( "Unknown parent residue for atom " + atom . getName ( ) + " with serial " + atom . getPDBserial ( ) + ", assigning its default vdw radius" ) ; return atom . getElement ( ) . getVDWRadius ( ) ; } GroupType type = res . getType ( ) ; if ( type == GroupType . AMINOACID ) return getRadiusForAmino ( ( ( AminoAcid ) res ) , atom ) ; if ( type == GroupType . NUCLEOTIDE ) return getRadiusForNucl ( ( NucleotideImpl ) res , atom ) ; return atom . getElement ( ) . getVDWRadius ( ) ;
|
public class TrivialClassCreationBenchmark { /** * Performs a benchmark for a trivial class creation using javassist proxies .
* @ return The created instance , in order to avoid JIT removal . */
@ Benchmark public Class < ? > benchmarkJavassist ( ) { } }
|
ProxyFactory proxyFactory = new ProxyFactory ( ) { protected ClassLoader getClassLoader ( ) { return newClassLoader ( ) ; } } ; proxyFactory . setUseCache ( false ) ; proxyFactory . setUseWriteReplace ( false ) ; proxyFactory . setSuperclass ( baseClass ) ; proxyFactory . setFilter ( new MethodFilter ( ) { public boolean isHandled ( Method method ) { return false ; } } ) ; return proxyFactory . createClass ( ) ;
|
public class BulkIterationNode { /** * Sets the nextPartialSolution for this BulkIterationNode .
* @ param nextPartialSolution The nextPartialSolution to set . */
public void setNextPartialSolution ( OptimizerNode nextPartialSolution , OptimizerNode terminationCriterion ) { } }
|
// check if the root of the step function has the same parallelism as the iteration
// or if the step function has any operator at all
if ( nextPartialSolution . getParallelism ( ) != getParallelism ( ) || nextPartialSolution == partialSolution || nextPartialSolution instanceof BinaryUnionNode ) { // add a no - op to the root to express the re - partitioning
NoOpNode noop = new NoOpNode ( ) ; noop . setParallelism ( getParallelism ( ) ) ; DagConnection noOpConn = new DagConnection ( nextPartialSolution , noop , ExecutionMode . PIPELINED ) ; noop . setIncomingConnection ( noOpConn ) ; nextPartialSolution . addOutgoingConnection ( noOpConn ) ; nextPartialSolution = noop ; } this . nextPartialSolution = nextPartialSolution ; this . terminationCriterion = terminationCriterion ; if ( terminationCriterion == null ) { this . singleRoot = nextPartialSolution ; this . rootConnection = new DagConnection ( nextPartialSolution , ExecutionMode . PIPELINED ) ; } else { // we have a termination criterion
SingleRootJoiner singleRootJoiner = new SingleRootJoiner ( ) ; this . rootConnection = new DagConnection ( nextPartialSolution , singleRootJoiner , ExecutionMode . PIPELINED ) ; this . terminationCriterionRootConnection = new DagConnection ( terminationCriterion , singleRootJoiner , ExecutionMode . PIPELINED ) ; singleRootJoiner . setInputs ( this . rootConnection , this . terminationCriterionRootConnection ) ; this . singleRoot = singleRootJoiner ; // add connection to terminationCriterion for interesting properties visitor
terminationCriterion . addOutgoingConnection ( terminationCriterionRootConnection ) ; } nextPartialSolution . addOutgoingConnection ( rootConnection ) ;
|
public class CobolStringType { /** * { @ inheritDoc } */
public boolean isValid ( Class < T > javaClass , CobolContext cobolContext , byte [ ] hostData , int start ) { } }
|
int hostBytesLen = getBytesLen ( charNum ) ; // Is buffer large enough to contain this type ?
// TODO last field in a record might be truncated if all low - values or
// spaces
if ( hostData . length < start + hostBytesLen ) { return false ; } if ( javaClass . equals ( String . class ) ) { return isValidString ( cobolContext , hostData , start ) ; } else if ( javaClass . equals ( ByteBuffer . class ) ) { return true ; } else { throw new IllegalArgumentException ( "Unsupported java type " + javaClass ) ; }
|
public class ComputeKNNOutlierScores { /** * Iterate over the k range .
* @ param prefix Prefix string
* @ param mink Minimum value of k for this method
* @ param maxk Maximum value of k for this method
* @ param runner Runner to run
* @ param out Output function */
private void runForEachK ( String prefix , int mink , int maxk , IntFunction < OutlierResult > runner , BiConsumer < String , OutlierResult > out ) { } }
|
if ( isDisabled ( prefix ) ) { LOG . verbose ( "Skipping (disabled): " + prefix ) ; return ; // Disabled
} LOG . verbose ( "Running " + prefix ) ; final int digits = ( int ) FastMath . ceil ( FastMath . log10 ( krange . getMax ( ) + 1 ) ) ; final String format = "%s-%0" + digits + "d" ; krange . forEach ( k -> { if ( k >= mink && k <= maxk ) { Duration time = LOG . newDuration ( this . getClass ( ) . getCanonicalName ( ) + "." + prefix + ".k" + k + ".runtime" ) . begin ( ) ; OutlierResult result = runner . apply ( k ) ; LOG . statistics ( time . end ( ) ) ; if ( result != null ) { out . accept ( String . format ( Locale . ROOT , format , prefix , k ) , result ) ; result . getHierarchy ( ) . removeSubtree ( result ) ; } } } ) ;
|
public class ClassLoaderUtil { /** * Load all resources with a given name , potentially aggregating all results
* from the searched classloaders . If no results are found , the resource
* name is prepended by ' / ' and tried again .
* This method will try to load the resources using the following methods
* ( in order ) :
* < ul >
* < li > From Thread . currentThread ( ) . getContextClassLoader ( )
* < li > From ClassLoaderUtil . class . getClassLoader ( )
* < li > callingClass . getClassLoader ( )
* < / ul >
* @ param resourceName
* The name of the resources to load
* @ param callingClass
* The Class object of the calling object
* @ param aggregate
* < code > true < / code > to aggregate resources from all classloaders
* @ return Iterator of matching resources
* @ throws IOException
* If I / O errors occur */
public static Iterator < URL > getResources ( String resourceName , Class < ? > callingClass , boolean aggregate ) throws IOException { } }
|
AggregateIterator < URL > iterator = new AggregateIterator < URL > ( ) ; iterator . addEnumeration ( Thread . currentThread ( ) . getContextClassLoader ( ) . getResources ( resourceName ) ) ; if ( ! iterator . hasNext ( ) || aggregate ) { iterator . addEnumeration ( ClassLoaderUtil . class . getClassLoader ( ) . getResources ( resourceName ) ) ; } if ( ! iterator . hasNext ( ) || aggregate ) { ClassLoader cl = callingClass . getClassLoader ( ) ; if ( cl != null ) { iterator . addEnumeration ( cl . getResources ( resourceName ) ) ; } } if ( ! iterator . hasNext ( ) && ( resourceName != null ) && ( ( resourceName . length ( ) == 0 ) || ( resourceName . charAt ( 0 ) != '/' ) ) ) { return getResources ( '/' + resourceName , callingClass , aggregate ) ; } return iterator ;
|
public class WebDriverTool { /** * Delegates to { @ link # findElement ( By ) } and then calls
* { @ link WebElement # getCssValue ( String ) getAttribute ( String ) } on the returned element .
* @ param by
* the { @ link By } used to locate the element
* @ param propertyName
* the name of the CSS property
* @ return The current , computed value of the property . */
public String getCssValue ( final By by , final String propertyName ) { } }
|
WebElement element = findElement ( by ) ; return element . getCssValue ( propertyName ) ;
|
public class EsMarshalling { /** * Unmarshals the given map source into a bean .
* @ param source the source
* @ return the plan version summary */
public static PlanVersionSummaryBean unmarshallPlanVersionSummary ( Map < String , Object > source ) { } }
|
if ( source == null ) { return null ; } PlanVersionSummaryBean bean = new PlanVersionSummaryBean ( ) ; bean . setDescription ( asString ( source . get ( "planDescription" ) ) ) ; bean . setId ( asString ( source . get ( "planId" ) ) ) ; bean . setName ( asString ( source . get ( "planName" ) ) ) ; bean . setOrganizationId ( asString ( source . get ( "organizationId" ) ) ) ; bean . setOrganizationName ( asString ( source . get ( "organizationName" ) ) ) ; bean . setStatus ( asEnum ( source . get ( "status" ) , PlanStatus . class ) ) ; bean . setVersion ( asString ( source . get ( "version" ) ) ) ; postMarshall ( bean ) ; return bean ;
|
public class RegistrationManagerImpl { /** * { @ inheritDoc } */
@ Override public void updateService ( ProvidedServiceInstance serviceInstance ) throws ServiceException { } }
|
if ( ! isStarted ) { ServiceDirectoryError error = new ServiceDirectoryError ( ErrorCode . SERVICE_DIRECTORY_MANAGER_FACTORY_CLOSED ) ; throw new ServiceException ( error ) ; } if ( serviceInstance == null ) { throw new IllegalArgumentException ( "The ServiceInstance can not be null." ) ; } ErrorCode code = ServiceInstanceUtils . validateProvidedServiceInstance ( serviceInstance ) ; if ( ! code . equals ( ErrorCode . OK ) ) { ServiceDirectoryError error = new ServiceDirectoryError ( code ) ; throw new ServiceException ( error ) ; } getRegistrationService ( ) . updateService ( serviceInstance ) ;
|
public class AponReader { /** * Converts to a Parameters object from a file .
* @ param file the file to parse
* @ param encoding the character encoding
* @ return the Parameters object
* @ throws AponParseException if reading APON format document fails */
public static Parameters parse ( File file , String encoding ) throws AponParseException { } }
|
if ( file == null ) { throw new IllegalArgumentException ( "file must not be null" ) ; } Parameters parameters = new VariableParameters ( ) ; return parse ( file , encoding , parameters ) ;
|
public class gslbvserver { /** * Use this API to update gslbvserver resources . */
public static base_responses update ( nitro_service client , gslbvserver resources [ ] ) throws Exception { } }
|
base_responses result = null ; if ( resources != null && resources . length > 0 ) { gslbvserver updateresources [ ] = new gslbvserver [ resources . length ] ; for ( int i = 0 ; i < resources . length ; i ++ ) { updateresources [ i ] = new gslbvserver ( ) ; updateresources [ i ] . name = resources [ i ] . name ; updateresources [ i ] . iptype = resources [ i ] . iptype ; updateresources [ i ] . dnsrecordtype = resources [ i ] . dnsrecordtype ; updateresources [ i ] . backupvserver = resources [ i ] . backupvserver ; updateresources [ i ] . backupsessiontimeout = resources [ i ] . backupsessiontimeout ; updateresources [ i ] . lbmethod = resources [ i ] . lbmethod ; updateresources [ i ] . backuplbmethod = resources [ i ] . backuplbmethod ; updateresources [ i ] . netmask = resources [ i ] . netmask ; updateresources [ i ] . v6netmasklen = resources [ i ] . v6netmasklen ; updateresources [ i ] . tolerance = resources [ i ] . tolerance ; updateresources [ i ] . persistencetype = resources [ i ] . persistencetype ; updateresources [ i ] . persistenceid = resources [ i ] . persistenceid ; updateresources [ i ] . persistmask = resources [ i ] . persistmask ; updateresources [ i ] . v6persistmasklen = resources [ i ] . v6persistmasklen ; updateresources [ i ] . timeout = resources [ i ] . timeout ; updateresources [ i ] . edr = resources [ i ] . edr ; updateresources [ i ] . mir = resources [ i ] . mir ; updateresources [ i ] . disableprimaryondown = resources [ i ] . disableprimaryondown ; updateresources [ i ] . dynamicweight = resources [ i ] . dynamicweight ; updateresources [ i ] . considereffectivestate = resources [ i ] . considereffectivestate ; updateresources [ i ] . somethod = resources [ i ] . somethod ; updateresources [ i ] . sopersistence = resources [ i ] . sopersistence ; updateresources [ i ] . sopersistencetimeout = resources [ i ] . sopersistencetimeout ; updateresources [ i ] . sothreshold = resources [ i ] . sothreshold ; updateresources [ i ] . sobackupaction = resources [ i ] . sobackupaction ; updateresources [ i ] . servicename = resources [ i ] . servicename ; updateresources [ i ] . weight = resources [ i ] . weight ; updateresources [ i ] . domainname = resources [ i ] . domainname ; updateresources [ i ] . ttl = resources [ i ] . ttl ; updateresources [ i ] . backupip = resources [ i ] . backupip ; updateresources [ i ] . cookie_domain = resources [ i ] . cookie_domain ; updateresources [ i ] . cookietimeout = resources [ i ] . cookietimeout ; updateresources [ i ] . sitedomainttl = resources [ i ] . sitedomainttl ; updateresources [ i ] . comment = resources [ i ] . comment ; updateresources [ i ] . appflowlog = resources [ i ] . appflowlog ; } result = update_bulk_request ( client , updateresources ) ; } return result ;
|
public class AipEasyDL { /** * easyDL通用请求方法
* @ param url 服务的url
* @ param image 图片二进制数据
* @ param options 可选参数
* @ return Json返回 */
public JSONObject sendImageRequest ( String url , byte [ ] image , HashMap < String , Object > options ) { } }
|
AipRequest request = new AipRequest ( ) ; preOperation ( request ) ; String content = Base64Util . encode ( image ) ; request . addBody ( "image" , content ) ; if ( options != null ) { request . addBody ( options ) ; } request . setUri ( url ) ; request . addHeader ( Headers . CONTENT_ENCODING , HttpCharacterEncoding . ENCODE_UTF8 ) ; request . addHeader ( Headers . CONTENT_TYPE , HttpContentType . JSON_DATA ) ; request . setBodyFormat ( EBodyFormat . RAW_JSON ) ; postOperation ( request ) ; return requestServer ( request ) ;
|
public class MP3FileID3Controller { /** * Returns the track of this mp3 if set and the empty string if not .
* @ return the track of this mp3
* @ exception ID3v2FormatException if the data of this field is incorrect */
public String getTrack ( int type ) { } }
|
if ( allow ( type & ID3V1 ) ) { return Integer . toString ( id3v1 . getTrack ( ) ) ; } if ( allow ( type & ID3V2 ) ) { return id3v2 . getFrameDataString ( ID3v2Frames . TRACK_NUMBER ) ; } return null ;
|
public class XmlParserBase { /** * Parse content that is known to be a resource */
@ Override public Resource parse ( InputStream input ) throws IOException , FHIRFormatError { } }
|
try { XmlPullParser xpp = loadXml ( input ) ; return parse ( xpp ) ; } catch ( XmlPullParserException e ) { throw new FHIRFormatError ( e . getMessage ( ) , e ) ; }
|
public class HttpSupport { /** * Sends long to live cookie to browse with response . This cookie will be asked to live for 20 years .
* @ param name name of cookie
* @ param value value of cookie . */
public void sendPermanentCookie ( String name , String value ) { } }
|
Cookie cookie = new Cookie ( name , value ) ; cookie . setMaxAge ( 60 * 60 * 24 * 365 * 20 ) ; RequestContext . getHttpResponse ( ) . addCookie ( Cookie . toServletCookie ( cookie ) ) ;
|
public class VatIdValidator { /** * check the VAT identification number , country version for Sweden .
* @ param pvatId vat id to check
* @ return true if checksum is ok */
private boolean checkSeVatId ( final String pvatId ) { } }
|
final int checkSum = pvatId . charAt ( 11 ) - '0' ; final int sum = squareSum ( ( pvatId . charAt ( 2 ) - '0' ) * 2 ) + pvatId . charAt ( 3 ) - '0' + squareSum ( ( pvatId . charAt ( 4 ) - '0' ) * 2 ) + pvatId . charAt ( 5 ) - '0' + squareSum ( ( pvatId . charAt ( 6 ) - '0' ) * 2 ) + pvatId . charAt ( 7 ) - '0' + squareSum ( ( pvatId . charAt ( 8 ) - '0' ) * 2 ) + pvatId . charAt ( 9 ) - '0' + squareSum ( ( pvatId . charAt ( 10 ) - '0' ) * 2 ) ; int calculatedCheckSum = 10 - sum % 10 ; if ( calculatedCheckSum == 10 ) { calculatedCheckSum = 0 ; } return checkSum == calculatedCheckSum ;
|
public class AbstractDestinationHandler { /** * / * ( non - Javadoc )
* @ see com . ibm . ws . sib . processor . impl . interfaces . DestinationHandler # getDestinationManager ( ) */
public DestinationManager getDestinationManager ( ) { } }
|
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { SibTr . entry ( tc , "getDestinationManager" ) ; SibTr . exit ( tc , "getDestinationManager" , destinationManager ) ; } return destinationManager ;
|
public class MultiIndex { /** * Create index .
* @ param iterator
* the NodeDataIndexing iterator
* @ param rootNode
* the root node of the index
* @ param count
* the number of nodes already indexed .
* @ throws IOException
* if an error occurs while writing to the index .
* @ throws RepositoryException
* if any other error occurs
* @ throws InterruptedException
* if the task has been interrupted */
private void createIndex ( final NodeDataIndexingIterator iterator , NodeData rootNode , final AtomicLong count , final AtomicLong processed ) throws RepositoryException , InterruptedException , IOException { } }
|
for ( NodeDataIndexing node : iterator . next ( ) ) { processed . incrementAndGet ( ) ; if ( stopped . get ( ) || Thread . interrupted ( ) ) { throw new InterruptedException ( ) ; } if ( indexingTree . isExcluded ( node ) ) { continue ; } if ( ! node . getQPath ( ) . isDescendantOf ( rootNode . getQPath ( ) ) && ! node . getQPath ( ) . equals ( rootNode . getQPath ( ) ) ) { continue ; } executeAndLog ( new AddNode ( getTransactionId ( ) , node , true ) ) ; if ( count . incrementAndGet ( ) % 1000 == 0 ) { if ( nodesCount == null ) { LOG . info ( "indexing... {} ({})" , node . getQPath ( ) . getAsString ( ) , count . get ( ) ) ; } else { DecimalFormat format = new DecimalFormat ( "###.#" ) ; LOG . info ( "indexing... {} ({}%)" , node . getQPath ( ) . getAsString ( ) , format . format ( Math . min ( 100d * processed . get ( ) / nodesCount . get ( ) , 100 ) ) ) ; } } synchronized ( this ) { checkVolatileCommit ( ) ; } }
|
public class GapStatistic { /** * { @ inheritDoc } */
public Assignments cluster ( Matrix matrix , Properties props ) { } }
|
return cluster ( matrix , Integer . MAX_VALUE , props ) ;
|
public class UserInfo { /** * Is this a valid user property ? . */
public boolean validUserProperty ( String strProperty ) { } }
|
if ( DBParams . FRAMES . equalsIgnoreCase ( strProperty ) ) return true ; if ( DBParams . MENUBARS . equalsIgnoreCase ( strProperty ) ) return true ; if ( DBParams . NAVMENUS . equalsIgnoreCase ( strProperty ) ) return true ; if ( DBParams . JAVA . equalsIgnoreCase ( strProperty ) ) return true ; if ( DBParams . BANNERS . equalsIgnoreCase ( strProperty ) ) return true ; if ( DBParams . LOGOS . equalsIgnoreCase ( strProperty ) ) return true ; if ( DBParams . TRAILERS . equalsIgnoreCase ( strProperty ) ) return true ; if ( DBParams . LANGUAGE . equalsIgnoreCase ( strProperty ) ) return true ; return false ;
|
public class DynamicPooledExecutor { /** * Executes a set of commands and waits until all commands have been
* executed . The results of the commands are returned in the same order as
* the commands .
* @ param commands the commands to execute .
* @ return the results . */
public Result [ ] executeAndWait ( Command [ ] commands ) { } }
|
Result [ ] results = new Result [ commands . length ] ; if ( numProcessors == 1 ) { // optimize for one processor
for ( int i = 0 ; i < commands . length ; i ++ ) { Object obj = null ; InvocationTargetException ex = null ; try { obj = commands [ i ] . call ( ) ; } catch ( Exception e ) { ex = new InvocationTargetException ( e ) ; } results [ i ] = new Result ( obj , ex ) ; } } else { FutureResult [ ] futures = new FutureResult [ commands . length ] ; for ( int i = 0 ; i < commands . length ; i ++ ) { final Command c = commands [ i ] ; futures [ i ] = new FutureResult ( ) ; Runnable r = futures [ i ] . setter ( new Callable ( ) { public Object call ( ) throws Exception { return c . call ( ) ; } } ) ; try { executor . execute ( r ) ; } catch ( InterruptedException e ) { // run with current thread instead
r . run ( ) ; } } // wait for all results
boolean interrupted = false ; for ( int i = 0 ; i < futures . length ; i ++ ) { Object obj = null ; InvocationTargetException ex = null ; for ( ; ; ) { try { obj = futures [ i ] . get ( ) ; } catch ( InterruptedException e ) { interrupted = true ; // reset interrupted status and try again
Thread . interrupted ( ) ; continue ; } catch ( InvocationTargetException e ) { ex = e ; } results [ i ] = new Result ( obj , ex ) ; break ; } } if ( interrupted ) { // restore interrupt status again
Thread . currentThread ( ) . interrupt ( ) ; } } return results ;
|
public class KltTracker { /** * Returns true if the features is entirely enclosed inside of the image . */
public boolean isFullyInside ( float x , float y ) { } }
|
if ( x < allowedLeft || x > allowedRight ) return false ; if ( y < allowedTop || y > allowedBottom ) return false ; return true ;
|
public class RetireJSDataSource { /** * Downloads the current RetireJS data source .
* @ return returns false as no updates are made to the database that would
* require compaction
* @ throws UpdateException thrown if the update failed */
@ Override public boolean update ( Engine engine ) throws UpdateException { } }
|
this . settings = engine . getSettings ( ) ; String url = null ; try { final boolean autoupdate = settings . getBoolean ( Settings . KEYS . AUTO_UPDATE , true ) ; final boolean enabled = settings . getBoolean ( Settings . KEYS . ANALYZER_RETIREJS_ENABLED , true ) ; final File repoFile = new File ( settings . getDataDirectory ( ) , "jsrepository.json" ) ; final boolean proceed = enabled && autoupdate && shouldUpdagte ( repoFile ) ; if ( proceed ) { LOGGER . debug ( "Begin RetireJS Update" ) ; url = settings . getString ( Settings . KEYS . ANALYZER_RETIREJS_REPO_JS_URL , DEFAULT_JS_URL ) ; initializeRetireJsRepo ( settings , new URL ( url ) ) ; } } catch ( InvalidSettingException ex ) { throw new UpdateException ( "Unable to determine if autoupdate is enabled" , ex ) ; } catch ( MalformedURLException ex ) { throw new UpdateException ( String . format ( "Inavlid URL for RetireJS repository (%s)" , url ) , ex ) ; } catch ( IOException ex ) { throw new UpdateException ( "Unable to get the data directory" , ex ) ; } return false ;
|
public class UpdateBranchRequestMarshaller { /** * Marshall the given parameter object . */
public void marshall ( UpdateBranchRequest updateBranchRequest , ProtocolMarshaller protocolMarshaller ) { } }
|
if ( updateBranchRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( updateBranchRequest . getAppId ( ) , APPID_BINDING ) ; protocolMarshaller . marshall ( updateBranchRequest . getBranchName ( ) , BRANCHNAME_BINDING ) ; protocolMarshaller . marshall ( updateBranchRequest . getDescription ( ) , DESCRIPTION_BINDING ) ; protocolMarshaller . marshall ( updateBranchRequest . getFramework ( ) , FRAMEWORK_BINDING ) ; protocolMarshaller . marshall ( updateBranchRequest . getStage ( ) , STAGE_BINDING ) ; protocolMarshaller . marshall ( updateBranchRequest . getEnableNotification ( ) , ENABLENOTIFICATION_BINDING ) ; protocolMarshaller . marshall ( updateBranchRequest . getEnableAutoBuild ( ) , ENABLEAUTOBUILD_BINDING ) ; protocolMarshaller . marshall ( updateBranchRequest . getEnvironmentVariables ( ) , ENVIRONMENTVARIABLES_BINDING ) ; protocolMarshaller . marshall ( updateBranchRequest . getBasicAuthCredentials ( ) , BASICAUTHCREDENTIALS_BINDING ) ; protocolMarshaller . marshall ( updateBranchRequest . getEnableBasicAuth ( ) , ENABLEBASICAUTH_BINDING ) ; protocolMarshaller . marshall ( updateBranchRequest . getBuildSpec ( ) , BUILDSPEC_BINDING ) ; protocolMarshaller . marshall ( updateBranchRequest . getTtl ( ) , TTL_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
|
public class GZIPChannel { /** * Changes uncompressed position . Only forward direction is allowed with
* small skips . This method is for alignment purposes mostly .
* @ param newPosition
* @ return
* @ throws IOException */
@ Override public GZIPChannel position ( long newPosition ) throws IOException { } }
|
int skip = ( int ) ( newPosition - position ( ) ) ; if ( skip < 0 ) { throw new UnsupportedOperationException ( "backwards position not supported" ) ; } if ( skip > skipBuffer . capacity ( ) ) { throw new UnsupportedOperationException ( skip + " skip not supported maxSkipSize=" + maxSkipSize ) ; } if ( skip > 0 ) { if ( skipBuffer == null ) { throw new UnsupportedOperationException ( "skip not supported maxSkipSize=" + maxSkipSize ) ; } skipBuffer . clear ( ) ; skipBuffer . limit ( skip ) ; if ( options . contains ( READ ) ) { read ( skipBuffer ) ; } else { write ( skipBuffer ) ; } } return this ;
|
public class SecurityUtils { /** * Sets the authentication attribute in the current request .
* @ param authentication the authentication object to set as request attribute */
public static void setCurrentAuthentication ( Authentication authentication ) { } }
|
RequestContext context = RequestContext . getCurrent ( ) ; if ( context != null ) { setAuthentication ( context . getRequest ( ) , authentication ) ; }
|
public class StringFixture { /** * Determines length of string .
* @ param value value to determine length of
* @ return length of value */
public int lengthOf ( String value ) { } }
|
int length = 0 ; if ( value != null ) { length = value . length ( ) ; } return length ;
|
public class ConstructorWriterImpl { /** * { @ inheritDoc } */
@ Override public Content getConstructorDetails ( Content constructorDetailsTree ) { } }
|
if ( configuration . allowTag ( HtmlTag . SECTION ) ) { HtmlTree htmlTree = HtmlTree . SECTION ( getMemberTree ( constructorDetailsTree ) ) ; return htmlTree ; } return getMemberTree ( constructorDetailsTree ) ;
|
public class RLSSuspendTokenManager { /** * Registers that a suspend call has been made on the RecoveryLogService and generates
* a unique RLSSuspendToken which must be passed in to registerResume to cancel this
* suspend operation
* @ param timeout the value in seconds in which a corresponding resume call is expected
* @ return A unique token */
RLSSuspendToken registerSuspend ( int timeout ) { } }
|
if ( tc . isEntryEnabled ( ) ) Tr . entry ( tc , "registerSuspend" , new Integer ( timeout ) ) ; // Generate the suspend token
// RLSSuspendToken token = new RLSSuspendTokenImpl ( ) ;
RLSSuspendToken token = Configuration . getRecoveryLogComponent ( ) . createRLSSuspendToken ( null ) ; // Alarm reference
Alarm alarm = null ; // For a timeout value greater than zero , we create an alarm
// A zero timeout value indicates that this suspend operation will
// never timeout , hence no alarm is required
if ( timeout > 0 ) { // Create an alarm
// alarm = AlarmManager . createNonDeferrable ( ( ( long ) timeout ) * 1000L , this , token ) ;
alarm = Configuration . getAlarmManager ( ) . scheduleAlarm ( timeout * 1000L , this , token ) ; if ( tc . isEventEnabled ( ) ) Tr . event ( tc , "Alarm has been created for this suspend call" , alarm ) ; } synchronized ( _tokenMap ) { // Register the token and the alarm with the token map
// bearing in mind that this alarm could be null
_tokenMap . put ( token , alarm ) ; } if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "registerSuspend" , token ) ; // Return the generated token
return token ;
|
public class CompositeResourceBundle { /** * ( non - Javadoc )
* @ see net . jawr . web . resource . bundle . JoinableResourceBundleImpl #
* createBundlePathMappingBuilder ( java . lang . String ,
* net . jawr . web . resource . handler . reader . ResourceReaderHandler ,
* net . jawr . web . resource . bundle . generator . GeneratorRegistry ) */
@ Override protected BundlePathMappingBuilder createBundlePathMappingBuilder ( String fileExtension , ResourceReaderHandler resourceReaderHandler , GeneratorRegistry generatorRegistry ) { } }
|
return new CompositeBundlePathMappingBuilder ( this , fileExtension , generatorRegistry , resourceReaderHandler ) ;
|
public class CommerceAccountOrganizationRelPersistenceImpl { /** * Returns the commerce account organization rels before and after the current commerce account organization rel in the ordered set where commerceAccountId = & # 63 ; .
* @ param commerceAccountOrganizationRelPK the primary key of the current commerce account organization rel
* @ param commerceAccountId the commerce account ID
* @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > )
* @ return the previous , current , and next commerce account organization rel
* @ throws NoSuchAccountOrganizationRelException if a commerce account organization rel with the primary key could not be found */
@ Override public CommerceAccountOrganizationRel [ ] findByCommerceAccountId_PrevAndNext ( CommerceAccountOrganizationRelPK commerceAccountOrganizationRelPK , long commerceAccountId , OrderByComparator < CommerceAccountOrganizationRel > orderByComparator ) throws NoSuchAccountOrganizationRelException { } }
|
CommerceAccountOrganizationRel commerceAccountOrganizationRel = findByPrimaryKey ( commerceAccountOrganizationRelPK ) ; Session session = null ; try { session = openSession ( ) ; CommerceAccountOrganizationRel [ ] array = new CommerceAccountOrganizationRelImpl [ 3 ] ; array [ 0 ] = getByCommerceAccountId_PrevAndNext ( session , commerceAccountOrganizationRel , commerceAccountId , orderByComparator , true ) ; array [ 1 ] = commerceAccountOrganizationRel ; array [ 2 ] = getByCommerceAccountId_PrevAndNext ( session , commerceAccountOrganizationRel , commerceAccountId , orderByComparator , false ) ; return array ; } catch ( Exception e ) { throw processException ( e ) ; } finally { closeSession ( session ) ; }
|
public class AbstractBean { /** * Creates a new instance of the PropertyChangeEvent initialized with this Bean as the source as well as the
* name of the property that is changing along with the property ' s old and new values . A PropertyChangeEvent
* will be created only if event dispatching to registered listeners is enabled and there are either
* PropertyChangeListeners or VetoableChangeListeners registered on this Bean .
* @ param propertyName a String value specifying the name of the property on this Bean that is being changed .
* @ param oldValue an Object containing the old value of the specified property .
* @ param newValue an Object containing the new value for the specified property .
* @ return a PropertyChangeEvent for this Bean specifying the name of the property changing along with the
* property ' s old and new value .
* @ see java . beans . PropertyChangeEvent */
protected PropertyChangeEvent newPropertyChangeEvent ( String propertyName , Object oldValue , Object newValue ) { } }
|
if ( isEventDispatchEnabled ( ) ) { if ( vetoableChangeSupport . hasListeners ( propertyName ) || propertyChangeSupport . hasListeners ( propertyName ) ) { return new PropertyChangeEvent ( this , propertyName , oldValue , newValue ) ; } } return this . propertyChangeEvent ;
|
public class FeaturableModel { /** * Get all with that require an injected service .
* @ param object The object which requires injected services .
* @ return The field requiring injected services . */
private static List < Field > getServiceFields ( Object object ) { } }
|
final List < Field > toInject = new ArrayList < > ( ) ; Class < ? > clazz = object . getClass ( ) ; while ( clazz != null ) { final Field [ ] fields = clazz . getDeclaredFields ( ) ; final int length = fields . length ; for ( int i = 0 ; i < length ; i ++ ) { final Field field = fields [ i ] ; if ( field . isAnnotationPresent ( FeatureGet . class ) ) { toInject . add ( field ) ; } } clazz = clazz . getSuperclass ( ) ; } return toInject ;
|
public class AfplibPackageImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
public EClass getBandImageRG ( ) { } }
|
if ( bandImageRGEClass == null ) { bandImageRGEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( AfplibPackage . eNS_URI ) . getEClassifiers ( ) . get ( 426 ) ; } return bandImageRGEClass ;
|
public class StaticIntGenerator { /** * Compute the minimum and maximum . */
private void updateMinMax ( ) { } }
|
if ( values . length == 0 ) { return ; // Keep invalid .
} min = max = values [ 0 ] ; for ( int i = 1 ; i < values . length ; i ++ ) { int v = values [ i ] ; min = min < v ? min : v ; max = max > v ? max : v ; }
|
public class Purge { /** * Takes the properties supplied and updates the dependency - check settings .
* Additionally , this sets the system properties required to change the
* proxy server , port , and connection timeout .
* @ throws BuildException thrown if the properties file cannot be read . */
protected void populateSettings ( ) throws BuildException { } }
|
settings = new Settings ( ) ; try ( InputStream taskProperties = this . getClass ( ) . getClassLoader ( ) . getResourceAsStream ( PROPERTIES_FILE ) ) { settings . mergeProperties ( taskProperties ) ; } catch ( IOException ex ) { final String msg = "Unable to load the dependency-check ant task.properties file." ; if ( this . failOnError ) { throw new BuildException ( msg , ex ) ; } log ( msg , ex , Project . MSG_WARN ) ; } if ( dataDirectory != null ) { settings . setString ( Settings . KEYS . DATA_DIRECTORY , dataDirectory ) ; } else { final File jarPath = new File ( Purge . class . getProtectionDomain ( ) . getCodeSource ( ) . getLocation ( ) . getPath ( ) ) ; final File base = jarPath . getParentFile ( ) ; final String sub = settings . getString ( Settings . KEYS . DATA_DIRECTORY ) ; final File dataDir = new File ( base , sub ) ; settings . setString ( Settings . KEYS . DATA_DIRECTORY , dataDir . getAbsolutePath ( ) ) ; }
|
public class AmazonRoute53Client { /** * Returns the current status of a change batch request . The status is one of the following values :
* < ul >
* < li >
* < code > PENDING < / code > indicates that the changes in this request have not propagated to all Amazon Route 53 DNS
* servers . This is the initial status of all change batch requests .
* < / li >
* < li >
* < code > INSYNC < / code > indicates that the changes have propagated to all Route 53 DNS servers .
* < / li >
* < / ul >
* @ param getChangeRequest
* The input for a GetChange request .
* @ return Result of the GetChange operation returned by the service .
* @ throws NoSuchChangeException
* A change with the specified change ID does not exist .
* @ throws InvalidInputException
* The input is not valid .
* @ sample AmazonRoute53 . GetChange
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / route53-2013-04-01 / GetChange " target = " _ top " > AWS API
* Documentation < / a > */
@ Override public GetChangeResult getChange ( GetChangeRequest request ) { } }
|
request = beforeClientExecution ( request ) ; return executeGetChange ( request ) ;
|
public class SynchronizationPoint { /** * Send the given top level stream element and wait for a response .
* @ param request the plain stream element to send .
* @ throws NoResponseException if no response was received .
* @ throws NotConnectedException if the connection is not connected .
* @ throws InterruptedException if the connection is interrupted .
* @ return < code > null < / code > if synchronization point was successful , or the failure Exception . */
public Exception sendAndWaitForResponse ( TopLevelStreamElement request ) throws NoResponseException , NotConnectedException , InterruptedException { } }
|
assert ( state == State . Initial ) ; connectionLock . lock ( ) ; try { if ( request != null ) { if ( request instanceof Stanza ) { connection . sendStanza ( ( Stanza ) request ) ; } else if ( request instanceof Nonza ) { connection . sendNonza ( ( Nonza ) request ) ; } else { throw new IllegalStateException ( "Unsupported element type" ) ; } state = State . RequestSent ; } waitForConditionOrTimeout ( ) ; } finally { connectionLock . unlock ( ) ; } return checkForResponse ( ) ;
|
public class DefaultWhenVertx { /** * Deploy a verticle programmatically
* @ param name The verticle identifier
* @ return A promise for the deployment id */
@ Override public Promise < String > deployVerticle ( String name ) { } }
|
return adapter . toPromise ( handler -> vertx . deployVerticle ( name , handler ) ) ;
|
public class Widget { /** * Execute a { @ link Runnable } on the GL thread . If this method is called
* from the GL thread , the { @ code Runnable } is executed immediately ;
* otherwise , the { @ code Runnable } will be executed in the next frame .
* This differs from { @ link GVRContext # runOnGlThread ( Runnable ) } : that method
* always queues the { @ code Runnable } for execution in the next frame .
* @ param r { @ link Runnable } to execute on the GL thread . */
protected final void runOnGlThread ( final Runnable r ) { } }
|
getGVRContext ( ) . runOnGlThread ( new Runnable ( ) { public void run ( ) { FPSCounter . timeCheck ( "runOnGlThread <START>: " + r ) ; r . run ( ) ; FPSCounter . timeCheck ( "runOnGlThread <END>: " + r ) ; } } ) ;
|
public class PresetSettingsMarshaller { /** * Marshall the given parameter object . */
public void marshall ( PresetSettings presetSettings , ProtocolMarshaller protocolMarshaller ) { } }
|
if ( presetSettings == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( presetSettings . getAudioDescriptions ( ) , AUDIODESCRIPTIONS_BINDING ) ; protocolMarshaller . marshall ( presetSettings . getCaptionDescriptions ( ) , CAPTIONDESCRIPTIONS_BINDING ) ; protocolMarshaller . marshall ( presetSettings . getContainerSettings ( ) , CONTAINERSETTINGS_BINDING ) ; protocolMarshaller . marshall ( presetSettings . getVideoDescription ( ) , VIDEODESCRIPTION_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
|
public class AwsJobExecutionsRolloutConfigMarshaller { /** * Marshall the given parameter object . */
public void marshall ( AwsJobExecutionsRolloutConfig awsJobExecutionsRolloutConfig , ProtocolMarshaller protocolMarshaller ) { } }
|
if ( awsJobExecutionsRolloutConfig == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( awsJobExecutionsRolloutConfig . getMaximumPerMinute ( ) , MAXIMUMPERMINUTE_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
|
public class JSType { /** * Returns the template type argument in this type ' s map corresponding to the supertype ' s template
* parameter , or the UNKNOWN _ TYPE if the supertype template key is not present .
* < p > Note : this only supports arguments that have a singleton list of template keys , and will
* throw an exception for arguments with zero or multiple or template keys . */
public JSType getInstantiatedTypeArgument ( JSType supertype ) { } }
|
TemplateType templateType = Iterables . getOnlyElement ( supertype . getTemplateTypeMap ( ) . getTemplateKeys ( ) ) ; return getTemplateTypeMap ( ) . getResolvedTemplateType ( templateType ) ;
|
public class Jpa20BeanManager { /** * Get a specific instance of a particular schema type with its basic properties initialized .
* The direct , but no further , successors that references this bean will also be
* fetched and initalized with their direct , but no further , predecessors .
* Fetching 1.1.2 in the example below will also fetch its successors 1.1.2.1 , 1.1.2.2 , but
* not 1.1.2.1.1 , 1.1.2.1.2 . Also its predecessor 1.1 and its direct successors 1.1.1 , 1.1.3 , but not
* 1.1.3.1.
* < pre >
* | - - 1.1
* | | - - 1.1.1
* | | - - 1.1.2
* | | | - - 1.1.2.1
* | | | | - - 1.1.2.1.1
* | | | ` - - 1.1.2.1.2
* | | ` - - 1.1.2.2
* | ` - - 1.1.3
* | ` - - 1.1.3.1
* | - - 1.2
* | | - - 1.2.1
* | | | - - 1.2.1.1
* | | ` - - 1.2.1.2
* | ` - - 1.2.1
* ` - - 1.3
* ` - - 1.4
* < / pre >
* @ see < a href = " http : / / en . wikipedia . org / wiki / Graph _ % 28mathematics % 29 # Directed _ graph " > Directed graph < / a >
* @ param beans targeted bean . */
@ Override public Map < BeanId , Bean > getBeanToValidate ( Collection < Bean > beans ) throws AbortRuntimeException { } }
|
try { if ( ! begin ( ) ) { return new HashMap < > ( ) ; } Set < BeanId > ids = new HashSet < > ( ) ; for ( Bean bean : beans ) { ids . add ( bean . getId ( ) ) ; } Set < Bean > beansToValidate = JpaBean . getBeanToValidate ( ids ) ; commit ( ) ; return uniqueIndex ( beansToValidate ) ; } catch ( AbortRuntimeException e ) { rollback ( ) ; throw e ; } catch ( Throwable e ) { rollback ( ) ; throw new RuntimeException ( e ) ; }
|
public class TransactionalProtocolOperationHandler { /** * Send an operation response .
* @ param context the request context
* @ param responseType the response type
* @ param response the operation response
* @ throws java . io . IOException for any error */
static void sendResponse ( final ManagementRequestContext < ExecuteRequestContext > context , final byte responseType , final ModelNode response ) throws IOException { } }
|
// WFLY - 3090 Protect the communication channel from getting closed due to administrative
// cancellation of the management op by using a separate thread to send
final CountDownLatch latch = new CountDownLatch ( 1 ) ; final IOExceptionHolder exceptionHolder = new IOExceptionHolder ( ) ; boolean accepted = context . executeAsync ( new AsyncTask < TransactionalProtocolOperationHandler . ExecuteRequestContext > ( ) { @ Override public void execute ( final ManagementRequestContext < ExecuteRequestContext > context ) throws Exception { FlushableDataOutput output = null ; try { MGMT_OP_LOGGER . tracef ( "Transmitting response for %d" , context . getOperationId ( ) ) ; final ManagementResponseHeader header = ManagementResponseHeader . create ( context . getRequestHeader ( ) ) ; output = context . writeMessage ( header ) ; // response type
output . writeByte ( responseType ) ; // operation result
response . writeExternal ( output ) ; // response end
output . writeByte ( ManagementProtocol . RESPONSE_END ) ; output . close ( ) ; } catch ( IOException toCache ) { exceptionHolder . exception = toCache ; } finally { StreamUtils . safeClose ( output ) ; latch . countDown ( ) ; } } } , false ) ; if ( accepted ) { try { latch . await ( ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; } if ( exceptionHolder . exception != null ) { throw exceptionHolder . exception ; } }
|
public class ResponseOf { /** * Apply to servlet response .
* @ param sresp Servlet response
* @ throws IOException If fails */
public void applyTo ( final HttpServletResponse sresp ) throws IOException { } }
|
final Iterator < String > head = this . rsp . head ( ) . iterator ( ) ; final Matcher matcher = ResponseOf . HTTP_MATCHER . matcher ( head . next ( ) ) ; if ( matcher . matches ( ) ) { sresp . setStatus ( Integer . parseInt ( matcher . group ( 1 ) ) ) ; while ( head . hasNext ( ) ) { ResponseOf . applyHeader ( sresp , head . next ( ) ) ; } try ( InputStream body = this . rsp . body ( ) ; OutputStream out = sresp . getOutputStream ( ) ) { final byte [ ] buff = new byte [ ResponseOf . BUFSIZE ] ; // @ checkstyle LineLengthCheck ( 1 line )
for ( int read = body . read ( buff ) ; read >= 0 ; read = body . read ( buff ) ) { out . write ( buff ) ; } } } else { throw new IOException ( "Invalid response: response code not found" ) ; }
|
public class Normalize { /** * Normalize the values of the { @ code Matrix } by using the Pearson
* correlation . This will give values between - 1 and 1 . If { @ code
* saveNegatives } is is { @ code true } , negative correlations will be saved ,
* and otherwise they are reduced to zero .
* @ param m The { @ code Matrix } to normalize .
* @ param saveNegatives If true , save all values , even if they are negative . */
public static void byCorrelation ( Matrix m , boolean saveNegatives ) { } }
|
double totalSum = 0 ; // Generate the total value in each row and column .
double [ ] rowSums = new double [ m . rows ( ) ] ; double [ ] colSums = new double [ m . columns ( ) ] ; for ( int i = 0 ; i < m . rows ( ) ; ++ i ) { for ( int j = 0 ; j < m . columns ( ) ; ++ j ) { totalSum += m . get ( i , j ) ; colSums [ j ] += m . get ( i , j ) ; rowSums [ i ] += m . get ( i , j ) ; } } // Use the row and column totals to compute the correlation .
for ( int i = 0 ; i < m . rows ( ) ; ++ i ) { for ( int j = 0 ; j < m . columns ( ) ; ++ j ) { double newVal = ( totalSum * m . get ( i , j ) - rowSums [ i ] * colSums [ j ] ) / Math . sqrt ( rowSums [ i ] * ( totalSum - rowSums [ i ] ) * colSums [ j ] * ( totalSum - colSums [ j ] ) ) ; // Store the computed value .
if ( saveNegatives ) m . set ( i , j , newVal ) ; else m . set ( i , j , newVal > 0 ? newVal : 0 ) ; } }
|
public class RtfFont { /** * Sets the RtfDocument this RtfFont belongs to
* @ param doc The RtfDocument to use */
public void setRtfDocument ( RtfDocument doc ) { } }
|
this . document = doc ; if ( document != null ) { this . fontNumber = document . getDocumentHeader ( ) . getFontNumber ( this ) ; } if ( this . color != null ) { this . color . setRtfDocument ( this . document ) ; }
|
public class AnnotationGroupConverter { /** * { @ inheritDoc } */
@ Override public XBELAnnotationGroup convert ( AnnotationGroup source ) { } }
|
if ( source == null ) return null ; List < Annotation > annotations = source . getAnnotations ( ) ; Citation citation = source . getCitation ( ) ; Evidence evidence = source . getEvidence ( ) ; XBELAnnotationGroup xag = new XBELAnnotationGroup ( ) ; List < Object > list = xag . getAnnotationOrEvidenceOrCitation ( ) ; if ( hasItems ( annotations ) ) { // Defer to AnnotationConverter
AnnotationConverter aConverter = new AnnotationConverter ( ) ; for ( final Annotation a : annotations ) { XBELAnnotation xa = aConverter . convert ( a ) ; list . add ( xa ) ; } } if ( citation != null ) { // Defer to CitationConverter
CitationConverter cConverter = new CitationConverter ( ) ; XBELCitation xc = cConverter . convert ( citation ) ; list . add ( xc ) ; } if ( evidence != null ) { list . add ( evidence . getValue ( ) ) ; } return xag ;
|
public class ApiUrl { /** * Create the ID based URL portion
* @ param params The parameters for the method
* @ return Builder object */
private StringBuilder idProcessing ( final TmdbParameters params ) { } }
|
StringBuilder urlString = new StringBuilder ( ) ; // Append the ID
if ( params . has ( Param . ID ) ) { urlString . append ( "/" ) . append ( params . get ( Param . ID ) ) ; } if ( params . has ( Param . SEASON_NUMBER ) ) { urlString . append ( "/season/" ) . append ( params . get ( Param . SEASON_NUMBER ) ) ; } if ( params . has ( Param . EPISODE_NUMBER ) ) { urlString . append ( "/episode/" ) . append ( params . get ( Param . EPISODE_NUMBER ) ) ; } if ( submethod != MethodSub . NONE ) { urlString . append ( "/" ) . append ( submethod . getValue ( ) ) ; } // Append the key information
urlString . append ( DELIMITER_FIRST ) . append ( Param . API_KEY . getValue ( ) ) . append ( apiKey ) ; return urlString ;
|
public class Slf4jLoggerBackend { /** * Adapts the JUL level to SLF4J level per the below mapping :
* < table >
* < tr >
* < th > JUL < / th >
* < th > SLF4J < / th >
* < / tr >
* < tr >
* < td > FINEST < / td > < td > TRACE < / td >
* < / tr > < tr >
* < td > FINER < / td > < td > TRACE < / td >
* < / tr >
* < tr >
* < td > FINE < / td > < td > DEBUG < / td >
* < / tr >
* < tr >
* < td > CONFIG < / td > < td > DEBUG < / td >
* < / tr >
* < tr >
* < td > INFO < / td >
* < td > INFO < / td >
* < / tr >
* < tr >
* < td > WARNING < / td >
* < td > WARN < / td >
* < / tr >
* < tr >
* < td > SEVERE < / td >
* < td > ERROR < / td >
* < / tr >
* < / table >
* < p > Custom JUL levels are mapped to the next - lowest standard JUL level ; for example , a custom
* level at 750 ( between INFO : 800 and CONFIG : 700 ) would map to the same as CONFIG ( DEBUG ) .
* < p > It isn ' t expected that the JUL levels ' ALL ' or ' OFF ' are passed into this method ; doing so
* will throw an IllegalArgumentException , as those levels are for configuration , not logging
* @ param level the JUL level to map ; any standard or custom JUL level , except for ALL or OFF
* @ return the MappedLevel object representing the SLF4J adapters appropriate for the requested
* log level ; never null . */
private static Slf4jLogLevel mapToSlf4jLogLevel ( Level level ) { } }
|
// Performance consideration : mapToSlf4jLogLevel is a very hot method , called even when
// logging is disabled . Allocations ( and other latency - introducing constructs ) should be avoided
int requestedLevel = level . intValue ( ) ; // Flogger shouldn ' t allow ALL or OFF to be used for logging
// if Flogger does add this check to the core library it can be removed here ( and should be ,
// as this method is on the critical performance path for determining whether log statements
// are disabled , hence called for all log statements )
if ( requestedLevel == Level . ALL . intValue ( ) || requestedLevel == Level . OFF . intValue ( ) ) { throw new IllegalArgumentException ( "Unsupported log level: " + level ) ; } if ( requestedLevel < Level . FINE . intValue ( ) ) { return Slf4jLogLevel . TRACE ; } if ( requestedLevel < Level . INFO . intValue ( ) ) { return Slf4jLogLevel . DEBUG ; } if ( requestedLevel < Level . WARNING . intValue ( ) ) { return Slf4jLogLevel . INFO ; } if ( requestedLevel < Level . SEVERE . intValue ( ) ) { return Slf4jLogLevel . WARN ; } return Slf4jLogLevel . ERROR ;
|
public class JmsJMSContextImpl { /** * ( non - Javadoc )
* @ see javax . jms . JMSContext # acknowledge ( ) */
@ Override public void acknowledge ( ) throws IllegalStateRuntimeException , JMSRuntimeException { } }
|
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "acknowledge" ) ; try { if ( jmsSession == null ) { IllegalStateException ise = ( javax . jms . IllegalStateException ) JmsErrorUtils . newThrowable ( javax . jms . IllegalStateException . class , "INVALID_FOR_UNCONSUMED_MSG_CWSIA0110" , new Object [ ] { "acknowledge" } , tc ) ; throw ( IllegalStateRuntimeException ) JmsErrorUtils . getJMS2Exception ( ise , IllegalStateRuntimeException . class ) ; } synchronized ( jmsSession . getSessionSyncLock ( ) ) { // lock on the jmssession ' s sessionSyncLock
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "got lock" ) ; // Throw an exception if the session is closed .
jmsSession . checkNotClosed ( ) ; // throw an exception if the acknowledge method conflicts with async usage
jmsSession . checkSynchronousUsage ( "acknowledge" ) ; // Perform the appropriate action for session ' s ack mode . The action for
// a dups ok session in JMS1.1 was somewhat unspecified , so choose to commit .
// But in JMS2.0 clearly commit is only for the client ack mode
int sessAck = jmsSession . getAcknowledgeMode ( ) ; if ( ( sessAck == Session . CLIENT_ACKNOWLEDGE ) ) { jmsSession . commitTransaction ( ) ; } } } catch ( IllegalStateException ise ) { throw ( IllegalStateRuntimeException ) JmsErrorUtils . getJMS2Exception ( ise , IllegalStateRuntimeException . class ) ; } catch ( JMSException jmse ) { throw ( JMSRuntimeException ) JmsErrorUtils . getJMS2Exception ( jmse , JMSRuntimeException . class ) ; } finally { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "acknowledge" ) ; }
|
public class ProcessingConfiguration { /** * The data processors .
* < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use
* { @ link # setProcessors ( java . util . Collection ) } or { @ link # withProcessors ( java . util . Collection ) } if you want to
* override the existing values .
* @ param processors
* The data processors .
* @ return Returns a reference to this object so that method calls can be chained together . */
public ProcessingConfiguration withProcessors ( Processor ... processors ) { } }
|
if ( this . processors == null ) { setProcessors ( new java . util . ArrayList < Processor > ( processors . length ) ) ; } for ( Processor ele : processors ) { this . processors . add ( ele ) ; } return this ;
|
public class Widget { /** * Set the ( optional ) name of the { @ link Widget } . { @ code Widget } names are
* not needed : they are only for the application ' s convenience . */
public void setName ( String name ) { } }
|
mName = name ; if ( mSceneObject != null ) { mSceneObject . setName ( name ) ; }
|
public class OracleNoSQLClient { /** * Execute .
* @ param batches
* the batches */
private void execute ( Map < Key , List < TableOperation > > batches ) { } }
|
if ( batches != null && ! batches . isEmpty ( ) ) { try { for ( List < TableOperation > batch : batches . values ( ) ) { tableAPI . execute ( batch , null ) ; } } catch ( DurabilityException e ) { log . error ( "Error while executing operations in OracleNOSQL, Caused by:" + e + "." ) ; throw new PersistenceException ( "Error while Persisting data using batch" , e ) ; } catch ( TableOpExecutionException e ) { log . error ( "Error while executing operations in OracleNOSQL, Caused by:" + e + "." ) ; throw new PersistenceException ( "Error while Persisting data using batch" , e ) ; } catch ( FaultException e ) { log . error ( "Error while executing operations in OracleNOSQL, Caused by:" + e + "." ) ; throw new PersistenceException ( "Error while Persisting data using batch" , e ) ; } finally { batches . clear ( ) ; } }
|
public class ConnectionMonitorsInner { /** * Create or update a connection monitor .
* @ param resourceGroupName The name of the resource group containing Network Watcher .
* @ param networkWatcherName The name of the Network Watcher resource .
* @ param connectionMonitorName The name of the connection monitor .
* @ param parameters Parameters that define the operation to create a connection monitor .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable to the ConnectionMonitorResultInner object */
public Observable < ConnectionMonitorResultInner > beginCreateOrUpdateAsync ( String resourceGroupName , String networkWatcherName , String connectionMonitorName , ConnectionMonitorInner parameters ) { } }
|
return beginCreateOrUpdateWithServiceResponseAsync ( resourceGroupName , networkWatcherName , connectionMonitorName , parameters ) . map ( new Func1 < ServiceResponse < ConnectionMonitorResultInner > , ConnectionMonitorResultInner > ( ) { @ Override public ConnectionMonitorResultInner call ( ServiceResponse < ConnectionMonitorResultInner > response ) { return response . body ( ) ; } } ) ;
|
public class ApiImplementor { /** * Tells whether or not the given { @ code method } should be ignored , thus not included in the ZAP API .
* Checks if the given { @ code method } has been annotated with { @ code ZapApiIgnore } or if it ' s not public , if any of the
* conditions is { @ code true } the { @ code method } is ignored .
* @ param method the method that will be checked
* @ return { @ code true } if the method should be ignored , { @ code false } otherwise .
* @ see ZapApiIgnore */
private static boolean isIgnored ( Method method ) { } }
|
return method . getAnnotation ( ZapApiIgnore . class ) != null || ! Modifier . isPublic ( method . getModifiers ( ) ) ;
|
public class ProvFactory { /** * A factory method to create an instance of a start { @ link WasStartedBy }
* @ param id
* @ return an instance of { @ link WasStartedBy } */
public WasStartedBy newWasStartedBy ( QualifiedName id ) { } }
|
WasStartedBy res = of . createWasStartedBy ( ) ; res . setId ( id ) ; return res ;
|
public class GlobalSecondaryIndexMarshaller { /** * Marshall the given parameter object . */
public void marshall ( GlobalSecondaryIndex globalSecondaryIndex , ProtocolMarshaller protocolMarshaller ) { } }
|
if ( globalSecondaryIndex == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( globalSecondaryIndex . getIndexName ( ) , INDEXNAME_BINDING ) ; protocolMarshaller . marshall ( globalSecondaryIndex . getKeySchema ( ) , KEYSCHEMA_BINDING ) ; protocolMarshaller . marshall ( globalSecondaryIndex . getProjection ( ) , PROJECTION_BINDING ) ; protocolMarshaller . marshall ( globalSecondaryIndex . getProvisionedThroughput ( ) , PROVISIONEDTHROUGHPUT_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
|
public class SoftHashSet { /** * Rehashes the contents of this map into a new < tt > HashMap < / tt > instance
* with a larger capacity . This method is called automatically when the
* number of keys in this map exceeds its capacity and load factor . */
private void rehash ( ) { } }
|
int oldCapacity = mTable . length ; Entry oldSet [ ] = mTable ; int newCapacity = oldCapacity * 2 + 1 ; Entry newSet [ ] = new Entry [ newCapacity ] ; mModCount ++ ; mThreshold = ( int ) ( newCapacity * mLoadFactor ) ; mTable = newSet ; for ( int i = oldCapacity ; i -- > 0 ; ) { for ( Entry old = oldSet [ i ] ; old != null ; ) { Entry e = old ; old = old . mNext ; // Only copy entry if its value hasn ' t been cleared .
if ( e . get ( ) == null ) { mCount -- ; } else { int index = ( e . mHash & 0x7FFFFFFF ) % newCapacity ; e . mNext = newSet [ index ] ; newSet [ index ] = e ; } } }
|
public class HBaseQueueClientFactory { /** * Helper method to select the queue or stream admin , and to ensure it ' s table exists .
* @ param queueName name of the queue to be opened .
* @ return the queue admin for that queue .
* @ throws java . io . IOException */
private HBaseQueueAdmin ensureTableExists ( QueueName queueName ) throws IOException { } }
|
HBaseQueueAdmin admin = queueAdmin ; try { if ( ! admin . exists ( queueName ) ) { admin . create ( queueName ) ; } } catch ( Exception e ) { throw new IOException ( "Failed to open table " + admin . getActualTableName ( queueName ) , e ) ; } return admin ;
|
public class DevAppServerArgs { /** * Returns { @ code [ - - name = value1 , - - name = value2 , . . . ] } or { @ code [ ] } if value = null . */
public static List < String > get ( String name , @ Nullable List < String > values ) { } }
|
return Args . stringsWithEq ( name , values ) ;
|
public class WebHookManager { /** * Fire the event to the registered listeners .
* @ param event the Event instance to fire to the registered event listeners
* @ throws GitLabApiException if the event is not supported */
public void fireEvent ( Event event ) throws GitLabApiException { } }
|
switch ( event . getObjectKind ( ) ) { case BuildEvent . OBJECT_KIND : fireBuildEvent ( ( BuildEvent ) event ) ; break ; case IssueEvent . OBJECT_KIND : fireIssueEvent ( ( IssueEvent ) event ) ; break ; case MergeRequestEvent . OBJECT_KIND : fireMergeRequestEvent ( ( MergeRequestEvent ) event ) ; break ; case NoteEvent . OBJECT_KIND : fireNoteEvent ( ( NoteEvent ) event ) ; break ; case PipelineEvent . OBJECT_KIND : firePipelineEvent ( ( PipelineEvent ) event ) ; break ; case PushEvent . OBJECT_KIND : firePushEvent ( ( PushEvent ) event ) ; break ; case TagPushEvent . OBJECT_KIND : fireTagPushEvent ( ( TagPushEvent ) event ) ; break ; case WikiPageEvent . OBJECT_KIND : fireWikiPageEvent ( ( WikiPageEvent ) event ) ; break ; default : String message = "Unsupported event object_kind, object_kind=" + event . getObjectKind ( ) ; LOGGER . warning ( message ) ; throw new GitLabApiException ( message ) ; }
|
public class KratiDataStore { /** * java - Xmx4G krati . examples . KratiDataStore homeDir initialCapacity */
public static void main ( String [ ] args ) { } }
|
try { // Parse arguments : homeDir keyCount
File homeDir = new File ( args [ 0 ] ) ; int initialCapacity = Integer . parseInt ( args [ 1 ] ) ; // Create an instance of Krati DataStore
KratiDataStore store = new KratiDataStore ( homeDir , initialCapacity ) ; // Populate data store
store . populate ( ) ; // Perform some random reads from data store .
store . doRandomReads ( 10 ) ; // Close data store
store . close ( ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; }
|
public class ProxyBranchImpl { /** * ( non - Javadoc )
* @ see javax . servlet . sip . ProxyBranch # cancel ( java . lang . String [ ] , int [ ] , java . lang . String [ ] ) */
public void cancel ( String [ ] protocol , int [ ] reasonCode , String [ ] reasonText ) { } }
|
if ( proxy . getAckReceived ( ) ) throw new IllegalStateException ( "There has been an ACK received on this branch. Can not cancel." ) ; try { cancelTimer ( ) ; if ( this . isStarted ( ) && ! canceled && ! timedOut && ( outgoingRequest . getMethod ( ) . equalsIgnoreCase ( Request . INVITE ) || // https : / / code . google . com / p / sipservlets / issues / detail ? id = 253
outgoingRequest . getMethod ( ) . equalsIgnoreCase ( Request . PRACK ) || // https : / / code . google . com / p / sipservlets / issues / detail ? id = 33
outgoingRequest . getMethod ( ) . equalsIgnoreCase ( Request . UPDATE ) ) ) { if ( lastResponse != null ) { /* According to SIP RFC we should send cancel only if we receive any response first */
if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Trying to cancel ProxyBranch for outgoing request " + outgoingRequest ) ; } if ( lastResponse . getStatus ( ) > Response . OK && ! recursedBranches . isEmpty ( ) ) { // Javadoc says it should throw an java . lang . IllegalStateException if the transaction has already been completed and it has no child branches
if ( logger . isDebugEnabled ( ) ) { logger . debug ( "lastResponse status for this branch is " + lastResponse . getStatus ( ) + " and it has " + recursedBranches . size ( ) + " to cancel" ) ; } return ; } SipServletRequest cancelRequest = null ; if ( outgoingRequest . getMethod ( ) . equalsIgnoreCase ( Request . PRACK ) || outgoingRequest . getMethod ( ) . equalsIgnoreCase ( Request . UPDATE ) ) { // https : / / code . google . com / p / sipservlets / issues / detail ? id = 253 and https : / / code . google . com / p / sipservlets / issues / detail ? id = 33
// in case of PRACK or UPDATE we need to take the original INVITE
cancelRequest = originalRequest . getLinkedRequest ( ) . createCancel ( ) ; } else { cancelRequest = outgoingRequest . createCancel ( ) ; } // https : / / code . google . com / p / sipservlets / issues / detail ? id = 272 Adding reason headers if needed
if ( protocol != null && reasonCode != null && reasonText != null && protocol . length == reasonCode . length && reasonCode . length == reasonText . length ) { for ( int i = 0 ; i < protocol . length ; i ++ ) { String reasonHeaderValue = protocol [ i ] + ";cause=" + reasonCode [ i ] ; if ( reasonText [ i ] != null && reasonText [ i ] . trim ( ) . length ( ) > 0 ) { reasonHeaderValue = reasonHeaderValue . concat ( ";text=\"" + reasonText [ i ] + "\"" ) ; } ( ( SipServletRequestImpl ) cancelRequest ) . setHeaderInternal ( "Reason" , reasonHeaderValue , false ) ; } } cancelRequest . send ( ) ; } else { // We dont send cancel , but we must stop the invite retrans
SIPClientTransaction tx = ( SIPClientTransaction ) outgoingRequest . getTransaction ( ) ; if ( tx != null ) { StaticServiceHolder . disableRetransmissionTimer . invoke ( tx ) ; // disableTimeoutTimer . invoke ( tx ) ;
} else { logger . warn ( "Transaction is null. Can not stop retransmission, they are already dead in the branch." ) ; } /* try {
/ / tx . terminate ( ) ;
/ / Do not terminate the tx here , because com . bea . sipservlet . tck . agents . spec . ProxyTest . testProxyCancel test is failing . If the tx
/ / is terminated 100 Trying is dropped at JSIP .
} catch ( Exception e2 ) {
logger . error ( " Can not terminate transaction " , e2 ) ; */
} canceled = true ; } if ( ! this . isStarted ( ) && ( outgoingRequest . getMethod ( ) . equalsIgnoreCase ( Request . INVITE ) || // https : / / code . google . com / p / sipservlets / issues / detail ? id = 253
outgoingRequest . getMethod ( ) . equalsIgnoreCase ( Request . PRACK ) ) ) { canceled = true ; } } catch ( Exception e ) { throw new IllegalStateException ( "Failed canceling proxy branch" , e ) ; } finally { onBranchTerminated ( ) ; }
|
public class AbstractMappingStrategy { /** * Creates a new instance of an AGI script .
* @ param className Class name of the AGI script . The class must implement
* { @ link AgiScript } .
* @ return the created instance of the AGI script class . If the instance
* can ' t be created an error is logged and < code > null < / code > is
* returned . */
@ SuppressWarnings ( "unchecked" ) protected AgiScript createAgiScriptInstance ( String className ) { } }
|
Class < ? > tmpClass ; Class < AgiScript > agiScriptClass ; Constructor < AgiScript > constructor ; AgiScript agiScript ; agiScript = null ; try { tmpClass = getClassLoader ( ) . loadClass ( className ) ; } catch ( ClassNotFoundException e1 ) { logger . debug ( "Unable to create AgiScript instance of type " + className + ": Class not found, make sure the class exists and is available on the CLASSPATH" ) ; return null ; } if ( ! AgiScript . class . isAssignableFrom ( tmpClass ) ) { logger . warn ( "Unable to create AgiScript instance of type " + className + ": Class does not implement the AgiScript interface" ) ; return null ; } agiScriptClass = ( Class < AgiScript > ) tmpClass ; try { constructor = agiScriptClass . getConstructor ( ) ; agiScript = constructor . newInstance ( ) ; } catch ( Exception e ) { logger . warn ( "Unable to create AgiScript instance of type " + className , e ) ; } return agiScript ;
|
public class AccessBallCommand { /** * { @ inheritDoc } */
@ Override public void perform ( final Wave wave ) { } }
|
final JRebirthEvent event = wave . get ( EditorWaves . EVENT ) ; final BallModel targetBallModel = getModel ( BallModel . class , event ) ; targetBallModel . access ( ) ; // getModel ( EditorModel . class ) . unregisterBall ( targetBallModel ) ;
|
public class SOD { /** * Performs the SOD algorithm on the given database .
* @ param relation Data relation to process
* @ return Outlier result */
public OutlierResult run ( Relation < V > relation ) { } }
|
SimilarityQuery < V > snnInstance = similarityFunction . instantiate ( relation ) ; FiniteProgress progress = LOG . isVerbose ( ) ? new FiniteProgress ( "Assigning Subspace Outlier Degree" , relation . size ( ) , LOG ) : null ; WritableDoubleDataStore sod_scores = DataStoreUtil . makeDoubleStorage ( relation . getDBIDs ( ) , DataStoreFactory . HINT_STATIC ) ; WritableDataStore < SODModel > sod_models = models ? DataStoreUtil . makeStorage ( relation . getDBIDs ( ) , DataStoreFactory . HINT_STATIC , SODModel . class ) : null ; DoubleMinMax minmax = new DoubleMinMax ( ) ; for ( DBIDIter iter = relation . iterDBIDs ( ) ; iter . valid ( ) ; iter . advance ( ) ) { DBIDs neighborhood = getNearestNeighbors ( relation , snnInstance , iter ) ; double [ ] center ; long [ ] weightVector = null ; double sod = 0. ; if ( neighborhood . size ( ) > 0 ) { center = Centroid . make ( relation , neighborhood ) . getArrayRef ( ) ; // Note : per - dimension variances ; no covariances .
double [ ] variances = computePerDimensionVariances ( relation , center , neighborhood ) ; double expectationOfVariance = Mean . of ( variances ) ; weightVector = BitsUtil . zero ( variances . length ) ; for ( int d = 0 ; d < variances . length ; d ++ ) { if ( variances [ d ] < alpha * expectationOfVariance ) { BitsUtil . setI ( weightVector , d ) ; } } sod = subspaceOutlierDegree ( relation . get ( iter ) , center , weightVector ) ; } else { center = relation . get ( iter ) . toArray ( ) ; } if ( sod_models != null ) { sod_models . put ( iter , new SODModel ( center , weightVector ) ) ; } sod_scores . putDouble ( iter , sod ) ; minmax . put ( sod ) ; LOG . incrementProcessed ( progress ) ; } LOG . ensureCompleted ( progress ) ; // combine results .
OutlierScoreMeta meta = new BasicOutlierScoreMeta ( minmax . getMin ( ) , minmax . getMax ( ) ) ; OutlierResult sodResult = new OutlierResult ( meta , new MaterializedDoubleRelation ( "Subspace Outlier Degree" , "sod-outlier" , sod_scores , relation . getDBIDs ( ) ) ) ; if ( sod_models != null ) { sodResult . addChildResult ( new MaterializedRelation < > ( "Subspace Outlier Model" , "sod-outlier" , new SimpleTypeInformation < > ( SODModel . class ) , sod_models , relation . getDBIDs ( ) ) ) ; } return sodResult ;
|
public class AbstractServerDetector { /** * Check for the existence of a certain MBean . All known MBeanServers are queried
* @ param pMBeanServerExecutor mbean servers to query for
* @ param pMbeanPattern MBean name pattern for MBeans to check for
* @ return set of { @ link ObjectName } s if the pattern matches , or an empty set if not mbean has been found */
protected Set < ObjectName > searchMBeans ( MBeanServerExecutor pMBeanServerExecutor , String pMbeanPattern ) { } }
|
try { ObjectName oName = new ObjectName ( pMbeanPattern ) ; return pMBeanServerExecutor . queryNames ( oName ) ; } catch ( MalformedObjectNameException e ) { return new HashSet < ObjectName > ( ) ; } catch ( IOException e ) { return new HashSet < ObjectName > ( ) ; }
|
public class Async { /** * Pauses a queue . A paused queue stops delivering commands to listeners . It still can accumulate commands .
* @ param queueName queue name . */
public void pause ( String queueName ) { } }
|
try { getQueueControl ( queueName ) . pause ( ) ; } catch ( Exception e ) { throw new AsyncException ( e ) ; }
|
public class ClipBuffer { /** * Frees up the internal audio buffers associated with this clip . */
public void dispose ( ) { } }
|
if ( _buffer != null ) { // if there are sources bound to this buffer , we must wait
// for them to be unbound
if ( _bound > 0 ) { _state = UNLOADING ; return ; } // free up our buffer
_buffer . delete ( ) ; _buffer = null ; _state = UNLOADED ; }
|
public class CmsSolrIndexWriter { /** * Adds Solr documents to the index for the { @ link I _ CmsSearchDocument } .
* Documents for serial dates are added for each occurrence once with the date of the respective occurrence .
* @ param document the document for the indexed resource
* @ throws SolrServerException thrown if adding the document to the index fails
* @ throws IOException thrown if adding the document to the index fails */
private void addDocumentInstances ( I_CmsSearchDocument document ) throws SolrServerException , IOException { } }
|
List < String > serialDates = document . getMultivaluedFieldAsStringList ( CmsSearchField . FIELD_SERIESDATES ) ; SolrInputDocument inputDoc = ( SolrInputDocument ) document . getDocument ( ) ; String id = inputDoc . getFieldValue ( CmsSearchField . FIELD_ID ) . toString ( ) ; if ( null != serialDates ) { // NOTE : We can assume the following to arrays have the same length as serialDates .
List < String > serialDatesEnd = document . getMultivaluedFieldAsStringList ( CmsSearchField . FIELD_SERIESDATES_END ) ; List < String > serialDatesCurrentTill = document . getMultivaluedFieldAsStringList ( CmsSearchField . FIELD_SERIESDATES_CURRENT_TILL ) ; for ( int i = 0 ; i < serialDates . size ( ) ; i ++ ) { String date = serialDates . get ( i ) ; String endDate = serialDatesEnd . get ( i ) ; String currentTillDate = serialDatesCurrentTill . get ( i ) ; inputDoc . setField ( CmsSearchField . FIELD_INSTANCEDATE + CmsSearchField . FIELD_POSTFIX_DATE , date ) ; inputDoc . setField ( CmsSearchField . FIELD_INSTANCEDATE_END + CmsSearchField . FIELD_POSTFIX_DATE , endDate ) ; inputDoc . setField ( CmsSearchField . FIELD_INSTANCEDATE_CURRENT_TILL + CmsSearchField . FIELD_POSTFIX_DATE , currentTillDate ) ; for ( String locale : document . getMultivaluedFieldAsStringList ( CmsSearchField . FIELD_CONTENT_LOCALES ) ) { inputDoc . setField ( CmsSearchField . FIELD_INSTANCEDATE + "_" + locale + CmsSearchField . FIELD_POSTFIX_DATE , date ) ; inputDoc . setField ( CmsSearchField . FIELD_INSTANCEDATE_END + "_" + locale + CmsSearchField . FIELD_POSTFIX_DATE , endDate ) ; inputDoc . setField ( CmsSearchField . FIELD_INSTANCEDATE_CURRENT_TILL + "_" + locale + CmsSearchField . FIELD_POSTFIX_DATE , currentTillDate ) ; } String newId = id + String . format ( "-%04d" , Integer . valueOf ( i + 1 ) ) ; inputDoc . setField ( CmsSearchField . FIELD_SOLR_ID , newId ) ; // remove fields that should not be part of the index , but were used to transport extra - information on date series
inputDoc . removeField ( CmsSearchField . FIELD_SERIESDATES_END ) ; inputDoc . removeField ( CmsSearchField . FIELD_SERIESDATES_CURRENT_TILL ) ; m_server . add ( inputDoc , m_commitMs ) ; } } else { inputDoc . setField ( CmsSearchField . FIELD_SOLR_ID , id ) ; m_server . add ( inputDoc , m_commitMs ) ; }
|
public class ClassWriter { /** * Returns the equivalent of the given class file , with the ASM specific instructions replaced
* with standard ones . This is done with a ClassReader - & gt ; ClassWriter round trip .
* @ param classFile a class file containing ASM specific instructions , generated by this
* ClassWriter .
* @ param hasFrames whether there is at least one stack map frames in ' classFile ' .
* @ return an equivalent of ' classFile ' , with the ASM specific instructions replaced with standard
* ones . */
private byte [ ] replaceAsmInstructions ( final byte [ ] classFile , final boolean hasFrames ) { } }
|
Attribute [ ] attributes = getAttributePrototypes ( ) ; firstField = null ; lastField = null ; firstMethod = null ; lastMethod = null ; firstAttribute = null ; compute = hasFrames ? MethodWriter . COMPUTE_INSERTED_FRAMES : MethodWriter . COMPUTE_NOTHING ; return toByteArray ( ) ;
|
public class ApacheHTTPSender { /** * { @ inheritDoc } */
@ SuppressWarnings ( "deprecation" ) public void destroy ( ) { } }
|
lock . lock ( ) ; try { if ( httpClient != null ) { httpClient . getConnectionManager ( ) . shutdown ( ) ; } } finally { cfg = null ; httpClient = null ; lock . unlock ( ) ; }
|
public class ValidateAccessToken { /** * query for access token info and set it into the request context
* and return the scope of current token .
* @ return the scope of the request or empty
* if no token string is provided . */
private static Single < Optional < String > > fetchAccessTokenAndReturnScope ( UnitRequest request ) { } }
|
LOG . info ( "fetchAccessTokenAndReturnScope" ) ; String ip = request . getContext ( ) . getIp ( ) ; if ( StringUtil . isEmpty ( ip ) ) { throw new IllegalArgumentException ( "Client's ip is empty, please check!" ) ; } if ( isWhiteIp ( ip ) ) { LOG . info ( new JSONObject ( ) . fluentPut ( "type" , LogTypeGateway . whiteIp ) . fluentPut ( "description" , "request is from white ip " + ip ) . fluentPut ( "ip" , ip ) ) ; return Single . just ( Optional . of ( Scope . api_all ) ) ; } String accessToken = request . getContext ( ) . getHeader ( ) == null ? null : request . getContext ( ) . getHeader ( ) . getOrDefault ( Constant . XIAN_REQUEST_TOKEN_HEADER , null ) ; if ( StringUtil . isEmpty ( accessToken ) ) { return Single . just ( Optional . empty ( ) ) ; } else { return forToken ( accessToken ) . map ( optionalAccessToken -> { if ( optionalAccessToken . isPresent ( ) ) { request . getContext ( ) . setAccessToken ( optionalAccessToken . get ( ) ) ; return Optional . of ( optionalAccessToken . get ( ) . getScope ( ) ) ; } else { return Optional . empty ( ) ; } } ) ; }
|
public class Database { public void deleteClassPipe ( String className , String pipeName ) throws DevFailed { } }
|
databaseDAO . deleteClassPipe ( this , className , pipeName ) ;
|
public class ScreenIn { /** * Set up the key areas . */
public void setupKeys ( ) { } }
|
KeyAreaInfo keyArea = null ; keyArea = new KeyAreaInfo ( this , Constants . UNIQUE , ID_KEY ) ; keyArea . addKeyField ( ID , Constants . ASCENDING ) ; keyArea = new KeyAreaInfo ( this , Constants . NOT_UNIQUE , SCREEN_IN_PROG_NAME_KEY ) ; keyArea . addKeyField ( SCREEN_IN_PROG_NAME , Constants . ASCENDING ) ; keyArea . addKeyField ( SCREEN_ITEM_NUMBER , Constants . ASCENDING ) ;
|
public class ComponentDocumentationBuilder { /** * Used to convert an image object to buffered image . Used in
* { @ link ComponentDocumentationWrapper # getIconSrc ( int ) }
* @ param image
* @ return buffered image */
public static BufferedImage toBufferedImage ( final Image image ) { } }
|
if ( image instanceof BufferedImage ) { return ( BufferedImage ) image ; } // Create a buffered image with transparency
final BufferedImage bufferedImage = new BufferedImage ( image . getWidth ( null ) , image . getHeight ( null ) , BufferedImage . TYPE_INT_ARGB ) ; final Graphics2D bufferedGraphics = bufferedImage . createGraphics ( ) ; bufferedGraphics . drawImage ( image , 0 , 0 , null ) ; bufferedGraphics . dispose ( ) ; // Return the buffered image
return bufferedImage ;
|
public class CPRuleUserSegmentRelLocalServiceUtil { /** * Creates a new cp rule user segment rel with the primary key . Does not add the cp rule user segment rel to the database .
* @ param CPRuleUserSegmentRelId the primary key for the new cp rule user segment rel
* @ return the new cp rule user segment rel */
public static com . liferay . commerce . product . model . CPRuleUserSegmentRel createCPRuleUserSegmentRel ( long CPRuleUserSegmentRelId ) { } }
|
return getService ( ) . createCPRuleUserSegmentRel ( CPRuleUserSegmentRelId ) ;
|
public class DateTimeFormatter { /** * Returns a copy of this formatter with a new set of resolver fields .
* This returns a formatter with similar state to this formatter but with
* the resolver fields set . By default , a formatter has no resolver fields .
* Changing the resolver fields only has an effect during parsing .
* Parsing a text string occurs in two phases .
* Phase 1 is a basic text parse according to the fields added to the builder .
* Phase 2 resolves the parsed field - value pairs into date and / or time objects .
* The resolver fields are used to filter the field - value pairs between phase 1 and 2.
* This can be used to select between two or more ways that a date or time might
* be resolved . For example , if the formatter consists of year , month , day - of - month
* and day - of - year , then there are two ways to resolve a date .
* Calling this method with the arguments { @ link ChronoField # YEAR YEAR } and
* { @ link ChronoField # DAY _ OF _ YEAR DAY _ OF _ YEAR } will ensure that the date is
* resolved using the year and day - of - year , effectively meaning that the month
* and day - of - month are ignored during the resolving phase .
* In a similar manner , this method can be used to ignore secondary fields that
* would otherwise be cross - checked . For example , if the formatter consists of year ,
* month , day - of - month and day - of - week , then there is only one way to resolve a
* date , but the parsed value for day - of - week will be cross - checked against the
* resolved date . Calling this method with the arguments { @ link ChronoField # YEAR YEAR } ,
* { @ link ChronoField # MONTH _ OF _ YEAR MONTH _ OF _ YEAR } and
* { @ link ChronoField # DAY _ OF _ MONTH DAY _ OF _ MONTH } will ensure that the date is
* resolved correctly , but without any cross - check for the day - of - week .
* In implementation terms , this method behaves as follows . The result of the
* parsing phase can be considered to be a map of field to value . The behavior
* of this method is to cause that map to be filtered between phase 1 and 2,
* removing all fields other than those specified as arguments to this method .
* This instance is immutable and unaffected by this method call .
* @ param resolverFields the new set of resolver fields , null if no fields
* @ return a formatter based on this formatter with the requested resolver style , not null */
public DateTimeFormatter withResolverFields ( TemporalField ... resolverFields ) { } }
|
Set < TemporalField > fields = null ; if ( resolverFields != null ) { fields = Collections . unmodifiableSet ( new HashSet < > ( Arrays . asList ( resolverFields ) ) ) ; } if ( Objects . equals ( this . resolverFields , fields ) ) { return this ; } return new DateTimeFormatter ( printerParser , locale , decimalStyle , resolverStyle , fields , chrono , zone ) ;
|
public class RangeSelectorHelper { /** * restore the index of the last long pressed index
* IMPORTANT ! Call this method only after all items where added to the adapters again . Otherwise it may select wrong items !
* @ param savedInstanceState If the activity is being re - initialized after
* previously being shut down then this Bundle contains the data it most
* recently supplied in Note : Otherwise it is null .
* @ param prefix a prefix added to the savedInstance key so we can store multiple states
* @ return this */
public RangeSelectorHelper withSavedInstanceState ( Bundle savedInstanceState , String prefix ) { } }
|
if ( savedInstanceState != null && savedInstanceState . containsKey ( BUNDLE_LAST_LONG_PRESS + prefix ) ) mLastLongPressIndex = savedInstanceState . getInt ( BUNDLE_LAST_LONG_PRESS + prefix ) ; return this ;
|
public class CharSequenceScanner { /** * This method gets the tail of this scanner without changing the state .
* @ return the tail of this scanner . */
protected String getTail ( ) { } }
|
String tail = "" ; if ( this . offset < this . limit ) { tail = new String ( this . buffer , this . offset , this . limit - this . offset + 1 ) ; } return tail ;
|
public class LabelBuilder { /** * Handle composite named links .
* @ param tagString Full tag name and value */
private void tryHandleNamedLink ( final String tagString ) { } }
|
final String namedLinkPatternString = PLAIN_LINK + "\\.(\\w+-?)+=(\\w+(-|_)?)+" ; final Pattern namedLinkPattern = Pattern . compile ( namedLinkPatternString , Pattern . CASE_INSENSITIVE ) ; if ( namedLinkPattern . matcher ( tagString ) . matches ( ) ) { final String type = tagString . split ( COMPOSITE_TAG_DELIMITER ) [ 0 ] . split ( "[.]" ) [ 1 ] ; final String name = tagString . split ( COMPOSITE_TAG_DELIMITER ) [ 1 ] ; getScenarioLinks ( ) . add ( ResultsUtils . createLink ( null , name , null , type ) ) ; } else { LOGGER . warn ( "Composite named tag {} does not match regex {}. Skipping" , tagString , namedLinkPatternString ) ; }
|
public class LogManager { /** * Returns a LoggerContext .
* @ param loader The ClassLoader for the context . If null the context will attempt to determine the appropriate
* ClassLoader .
* @ param currentContext if false the LoggerContext appropriate for the caller of this method is returned . For
* example , in a web application if the caller is a class in WEB - INF / lib then one LoggerContext may be
* returned and if the caller is a class in the container ' s classpath then a different LoggerContext may
* be returned . If true then only a single LoggerContext will be returned .
* @ return a LoggerContext . */
public static LoggerContext getContext ( final ClassLoader loader , final boolean currentContext ) { } }
|
try { return factory . getContext ( FQCN , loader , null , currentContext ) ; } catch ( final IllegalStateException ex ) { LOGGER . warn ( ex . getMessage ( ) + " Using SimpleLogger" ) ; return new SimpleLoggerContextFactory ( ) . getContext ( FQCN , loader , null , currentContext ) ; }
|
public class ExceptionSet { /** * Add all exceptions in the given set .
* @ param other
* the set */
public void addAll ( ExceptionSet other ) { } }
|
exceptionSet . or ( other . exceptionSet ) ; explicitSet . or ( other . explicitSet ) ; size = countBits ( exceptionSet ) ; commonSupertype = null ;
|
public class SubscriptionMessageImpl { /** * Set the value of the SubscriptionMessageType field in the message .
* Javadoc description supplied by SubscriptionMessage interface . */
public final void setSubscriptionMessageType ( SubscriptionMessageType value ) { } }
|
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "setSubscriptionMessageType to " + value ) ; /* Get the int value of the SubscriptionMessageType and set that into the subtype */
setSubtype ( value . toInt ( ) ) ;
|
public class CompanyBS { /** * Listar usuários da instituição do domínio acessado .
* @ param page Número da página .
* @ param pageSize Tamanho da página
* @ return PaginatedList Lista de usuários . */
public PaginatedList < User > listFromCurrentCompany ( Integer page , Integer pageSize ) { } }
|
if ( page == null || page < 1 ) { page = 1 ; } if ( pageSize == null ) { pageSize = PAGESIZE ; } PaginatedList < User > results = new PaginatedList < User > ( ) ; if ( this . domain == null ) { Criteria criteria = this . dao . newCriteria ( User . class ) . setFirstResult ( ( page - 1 ) * pageSize ) . setMaxResults ( pageSize ) . addOrder ( Order . asc ( "name" ) ) ; Criteria counting = this . dao . newCriteria ( User . class ) . setProjection ( Projections . countDistinct ( "id" ) ) ; results . setList ( this . dao . findByCriteria ( criteria , User . class ) ) ; results . setTotal ( ( Long ) counting . uniqueResult ( ) ) ; } else { Criteria criteria = this . dao . newCriteria ( CompanyUser . class ) . setFirstResult ( ( page - 1 ) * pageSize ) . setMaxResults ( pageSize ) . add ( Restrictions . eq ( "company" , this . domain . getCompany ( ) ) ) . createAlias ( "user" , "user" , JoinType . INNER_JOIN ) . addOrder ( Order . asc ( "user.name" ) ) ; Criteria counting = this . dao . newCriteria ( CompanyUser . class ) . add ( Restrictions . eq ( "company" , this . domain . getCompany ( ) ) ) . createAlias ( "user" , "user" , JoinType . INNER_JOIN ) . setProjection ( Projections . countDistinct ( "user.id" ) ) ; List < CompanyUser > companyUsers = this . dao . findByCriteria ( criteria , CompanyUser . class ) ; ArrayList < User > users = new ArrayList < User > ( companyUsers . size ( ) ) ; for ( CompanyUser companyUser : companyUsers ) { User user = companyUser . getUser ( ) ; user . setAccessLevel ( Math . max ( user . getAccessLevel ( ) , companyUser . getAccessLevel ( ) ) ) ; users . add ( user ) ; } results . setList ( users ) ; results . setTotal ( ( Long ) counting . uniqueResult ( ) ) ; } return results ;
|
public class ScalarizationUtils { /** * Objective values are multiplied by weights and summed . Weights should
* always be positive .
* @ param solutionsList A list of solutions .
* @ param weights Positive constants by which objectives are summed . */
public static < S extends Solution < ? > > void weightedSum ( List < S > solutionsList , double [ ] weights ) { } }
|
for ( S solution : solutionsList ) { double sum = weights [ 0 ] * solution . getObjective ( 0 ) ; for ( int i = 1 ; i < solution . getNumberOfObjectives ( ) ; i ++ ) { sum += weights [ i ] * solution . getObjective ( i ) ; } setScalarizationValue ( solution , sum ) ; }
|
public class CommerceCountryLocalServiceBaseImpl { /** * Deletes the commerce country with the primary key from the database . Also notifies the appropriate model listeners .
* @ param commerceCountryId the primary key of the commerce country
* @ return the commerce country that was removed
* @ throws PortalException if a commerce country with the primary key could not be found */
@ Indexable ( type = IndexableType . DELETE ) @ Override public CommerceCountry deleteCommerceCountry ( long commerceCountryId ) throws PortalException { } }
|
return commerceCountryPersistence . remove ( commerceCountryId ) ;
|
public class clusterinstance { /** * Use this API to fetch clusterinstance resource of given name . */
public static clusterinstance get ( nitro_service service , Long clid ) throws Exception { } }
|
clusterinstance obj = new clusterinstance ( ) ; obj . set_clid ( clid ) ; clusterinstance response = ( clusterinstance ) obj . get_resource ( service ) ; return response ;
|
public class JarafeMEDecoder { /** * @ param features - A list of string - double pairs representing the valued features for a classification instance
* @ return label - A string representing the predicted label according to the decoder */
public String classifyValuedInstance ( List < StringDoublePair > features ) { } }
|
List < scala . Tuple2 < String , Double > > nfs = new ArrayList < scala . Tuple2 < String , Double > > ( ) ; for ( StringDoublePair el : features ) { nfs . add ( new scala . Tuple2 < String , Double > ( el . getString ( ) , el . getDouble ( ) ) ) ; } return maxEnt . decodeValuedInstance ( nfs ) ;
|
public class Beans { /** * A slightly optimized way to get the bean identifier - there is not need to call ContextualStore . putIfAbsent ( ) for passivation capable beans because it ' s
* already called during bootstrap . See also { @ link BeanManagerImpl # addBean ( Bean ) } .
* @ param contextual
* @ param contextualStore
* @ param serviceRegistry
* @ return the identifier for the given contextual */
private static BeanIdentifier getIdentifier ( Contextual < ? > contextual , ContextualStore contextualStore , ServiceRegistry serviceRegistry ) { } }
|
if ( contextual instanceof RIBean < ? > ) { return ( ( RIBean < ? > ) contextual ) . getIdentifier ( ) ; } if ( contextualStore == null ) { contextualStore = serviceRegistry . get ( ContextualStore . class ) ; } return contextualStore . putIfAbsent ( contextual ) ;
|
public class JDBCDatabaseMetaData { /** * Retrieves " SELECT * FROM INFORMATION _ SCHEMA . & lt ; table & gt ; WHERE 1 = 1 " in string
* buffer form . < p >
* This is a convenience method provided because , for most
* < code > DatabaseMetaData < / code > queries , this is the most suitable
* thing upon which to start building . < p >
* @ return an StringBuffer whose content is :
* " SELECT * FROM & lt ; table & gt ; WHERE 1 = 1"
* @ param t the name of the table */
private StringBuffer toQueryPrefix ( String t ) { } }
|
StringBuffer sb = new StringBuffer ( 255 ) ; return sb . append ( selstar ) . append ( t ) . append ( whereTrue ) ;
|
public class PHS398FellowshipSupplementalV1_1Generator { /** * This method is used to get AttachmentGroupMin0Max100DataType xmlObject
* and set data to it based on narrative type code */
private AttachmentGroupMin0Max100DataType getAppendix ( ) { } }
|
AttachmentGroupMin0Max100DataType attachmentGroupType = AttachmentGroupMin0Max100DataType . Factory . newInstance ( ) ; List < AttachedFileDataType > attachedFileDataTypeList = new ArrayList < > ( ) ; AttachedFileDataType attachedFileDataType = null ; for ( NarrativeContract narrative : pdDoc . getDevelopmentProposal ( ) . getNarratives ( ) ) { if ( narrative . getNarrativeType ( ) . getCode ( ) != null && Integer . parseInt ( narrative . getNarrativeType ( ) . getCode ( ) ) == APPENDIX ) { attachedFileDataType = getAttachedFileType ( narrative ) ; if ( attachedFileDataType != null ) { attachedFileDataTypeList . add ( attachedFileDataType ) ; } } } attachmentGroupType . setAttachedFileArray ( attachedFileDataTypeList . toArray ( new AttachedFileDataType [ attachedFileDataTypeList . size ( ) ] ) ) ; return attachmentGroupType ;
|
public class InternalXtypeParser { /** * InternalXtype . g : 242:1 : ruleJvmUpperBoundAnded : ( ( rule _ _ JvmUpperBoundAnded _ _ Group _ _ 0 ) ) ; */
public final void ruleJvmUpperBoundAnded ( ) throws RecognitionException { } }
|
int stackSize = keepStackSize ( ) ; try { // InternalXtype . g : 246:2 : ( ( ( rule _ _ JvmUpperBoundAnded _ _ Group _ _ 0 ) ) )
// InternalXtype . g : 247:2 : ( ( rule _ _ JvmUpperBoundAnded _ _ Group _ _ 0 ) )
{ // InternalXtype . g : 247:2 : ( ( rule _ _ JvmUpperBoundAnded _ _ Group _ _ 0 ) )
// InternalXtype . g : 248:3 : ( rule _ _ JvmUpperBoundAnded _ _ Group _ _ 0 )
{ if ( state . backtracking == 0 ) { before ( grammarAccess . getJvmUpperBoundAndedAccess ( ) . getGroup ( ) ) ; } // InternalXtype . g : 249:3 : ( rule _ _ JvmUpperBoundAnded _ _ Group _ _ 0 )
// InternalXtype . g : 249:4 : rule _ _ JvmUpperBoundAnded _ _ Group _ _ 0
{ pushFollow ( FOLLOW_2 ) ; rule__JvmUpperBoundAnded__Group__0 ( ) ; state . _fsp -- ; if ( state . failed ) return ; } if ( state . backtracking == 0 ) { after ( grammarAccess . getJvmUpperBoundAndedAccess ( ) . getGroup ( ) ) ; } } } } catch ( RecognitionException re ) { reportError ( re ) ; recover ( input , re ) ; } finally { restoreStackSize ( stackSize ) ; } return ;
|
public class ComputeEnvironmentDetailMarshaller { /** * Marshall the given parameter object . */
public void marshall ( ComputeEnvironmentDetail computeEnvironmentDetail , ProtocolMarshaller protocolMarshaller ) { } }
|
if ( computeEnvironmentDetail == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( computeEnvironmentDetail . getComputeEnvironmentName ( ) , COMPUTEENVIRONMENTNAME_BINDING ) ; protocolMarshaller . marshall ( computeEnvironmentDetail . getComputeEnvironmentArn ( ) , COMPUTEENVIRONMENTARN_BINDING ) ; protocolMarshaller . marshall ( computeEnvironmentDetail . getEcsClusterArn ( ) , ECSCLUSTERARN_BINDING ) ; protocolMarshaller . marshall ( computeEnvironmentDetail . getType ( ) , TYPE_BINDING ) ; protocolMarshaller . marshall ( computeEnvironmentDetail . getState ( ) , STATE_BINDING ) ; protocolMarshaller . marshall ( computeEnvironmentDetail . getStatus ( ) , STATUS_BINDING ) ; protocolMarshaller . marshall ( computeEnvironmentDetail . getStatusReason ( ) , STATUSREASON_BINDING ) ; protocolMarshaller . marshall ( computeEnvironmentDetail . getComputeResources ( ) , COMPUTERESOURCES_BINDING ) ; protocolMarshaller . marshall ( computeEnvironmentDetail . getServiceRole ( ) , SERVICEROLE_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
|
public class TSIG { /** * Verifies the data ( computes the secure hash and compares it to the input )
* @ param mac The HMAC generator
* @ param signature The signature to compare against
* @ param truncation _ ok If true , the signature may be truncated ; only the
* number of bytes in the provided signature are compared .
* @ return true if the signature matches , false otherwise */
private static boolean verify ( Mac mac , byte [ ] signature , boolean truncation_ok ) { } }
|
byte [ ] expected = mac . doFinal ( ) ; if ( truncation_ok && signature . length < expected . length ) { byte [ ] truncated = new byte [ signature . length ] ; System . arraycopy ( expected , 0 , truncated , 0 , truncated . length ) ; expected = truncated ; } return Arrays . equals ( signature , expected ) ;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.