signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class RoaringArray { /** * involves a binary search */ protected Container getContainer ( short x ) { } }
int i = this . binarySearch ( 0 , size , x ) ; if ( i < 0 ) { return null ; } return this . values [ i ] ;
public class snmptrap { /** * Use this API to delete snmptrap resources . */ public static base_responses delete ( nitro_service client , snmptrap resources [ ] ) throws Exception { } }
base_responses result = null ; if ( resources != null && resources . length > 0 ) { snmptrap deleteresources [ ] = new snmptrap [ resources . length ] ; for ( int i = 0 ; i < resources . length ; i ++ ) { deleteresources [ i ] = new snmptrap ( ) ; deleteresources [ i ] . trapclass = resources [ i ] . trapclass ; deleteresources [ i ] . trapdestination = resources [ i ] . trapdestination ; } result = delete_bulk_request ( client , deleteresources ) ; } return result ;
public class SingletonServiceFactory { /** * Get a cached singleton object from service map by interface class and generic type class . * The serviceMap is constructed from service . yml which defines interface and generic type * to implementation mapping . * @ param interfaceClass Interface class * @ param < T > class type * @ param typeClass Generic type class * @ return The implementation object */ public static < T > T getBean ( Class < T > interfaceClass , Class typeClass ) { } }
Object object = serviceMap . get ( interfaceClass . getName ( ) + "<" + typeClass . getName ( ) + ">" ) ; if ( object == null ) return null ; if ( object instanceof Object [ ] ) { return ( T ) Array . get ( object , 0 ) ; } else { return ( T ) object ; }
public class XmlUtil { /** * Get the single named element . * @ param el Node * @ param name String tag name of required node * @ return Node node value or null * @ throws SAXException */ public static Node getOneTaggedNode ( final Node el , final String name ) throws SAXException { } }
if ( ! el . hasChildNodes ( ) ) { return null ; } final NodeList children = el . getChildNodes ( ) ; for ( int i = 0 ; i < children . getLength ( ) ; i ++ ) { final Node n = children . item ( i ) ; if ( name . equals ( n . getNodeName ( ) ) ) { return n ; } } return null ;
public class DeletePermissionPolicyRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( DeletePermissionPolicyRequest deletePermissionPolicyRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( deletePermissionPolicyRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( deletePermissionPolicyRequest . getResourceArn ( ) , RESOURCEARN_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class DscNodeConfigurationsInner { /** * Retrieve the Dsc node configurations by node configuration . * @ param resourceGroupName Name of an Azure Resource group . * @ param automationAccountName The name of the automation account . * @ param nodeConfigurationName The Dsc node configuration name . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws ErrorResponseException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the DscNodeConfigurationInner object if successful . */ public DscNodeConfigurationInner get ( String resourceGroupName , String automationAccountName , String nodeConfigurationName ) { } }
return getWithServiceResponseAsync ( resourceGroupName , automationAccountName , nodeConfigurationName ) . toBlocking ( ) . single ( ) . body ( ) ;
public class Utils { /** * Returns directory containing server log directories . * @ return instance of the log directory or ' null ' if LOG _ DIR is not defined */ public static File getLogDir ( ) { } }
String logDirLoc = null ; // 1st check in environment variable is set . This is the normal case // when the server is started from the command line . if ( logDir . get ( ) == null ) { File resultDir = null ; try { logDirLoc = AccessController . doPrivileged ( new java . security . PrivilegedExceptionAction < String > ( ) { @ Override public String run ( ) throws Exception { return System . getenv ( BootstrapConstants . ENV_LOG_DIR ) ; } } ) ; } catch ( Exception ex ) { } // outputDirLoc = System . getenv ( BootstrapConstants . ENV _ WLP _ OUTPUT _ DIR ) ; if ( logDirLoc != null ) { resultDir = new File ( logDirLoc ) ; } else { // PI20344 : Check if the Java property is set , which is the normal case when // the server is embedded ; i . e . they didn ' t launch it from the command line . logDirLoc = System . getProperty ( BootstrapConstants . ENV_LOG_DIR ) ; if ( logDirLoc != null ) { resultDir = new File ( logDirLoc ) ; } } logDir = StaticValue . mutateStaticValue ( logDir , new FileInitializer ( resultDir ) ) ; } return logDir . get ( ) ;
public class UpdateInPlaceStrategy { /** * Goes through each attachment and adds / updates the attachment as needed . * @ throws RepositoryResourceException * @ throws RepositoryBackendException * @ throws RepositoryResourceUpdateException * @ throws RepositoryBadDataException * @ throws RepositoryResourceCreationException */ public void uploadAttachment ( RepositoryResourceImpl resource , AttachmentResourceImpl attachment , RepositoryResourceImpl matchingResource ) throws RepositoryResourceCreationException , RepositoryBadDataException , RepositoryResourceUpdateException , RepositoryBackendException , RepositoryResourceException { } }
switch ( attachment . updateRequired ( matchingResource ) ) { case ADD : resource . addAttachment ( attachment ) ; break ; case UPDATE : resource . updateAttachment ( attachment ) ; break ; case NOTHING : // Nothing to do but have to include this to stop findbugs crying break ; }
public class Page { /** * Called by the segment writing service to write the page to the stream . * @ param table * @ param sOut * @ param oldSequence * @ param saveLength * @ param tail * @ param saveSequence * @ return * @ throws IOException */ @ InService ( SegmentServiceImpl . class ) public Page writeCheckpoint ( TableKelp table , OutSegment sOut , long oldSequence , int saveLength , int tail , int saveSequence ) throws IOException { } }
return null ;
public class MMDCfgPanel { /** * GEN - LAST : event _ checkBoxDropShadowActionPerformed */ private void colorChooserPaperColorActionPerformed ( java . awt . event . ActionEvent evt ) { } }
// GEN - FIRST : event _ colorChooserPaperColorActionPerformed if ( this . colorChooserPaperColor . isLastOkPressed ( ) && changeNotificationAllowed ) { this . controller . changed ( ) ; }
public class Atom10Parser { /** * List ( Elements ) - > List ( Persons ) */ private List < SyndPerson > parsePersons ( final String baseURI , final List < Element > ePersons , final Locale locale ) { } }
final List < SyndPerson > persons = new ArrayList < SyndPerson > ( ) ; for ( final Element ePerson : ePersons ) { persons . add ( parsePerson ( baseURI , ePerson , locale ) ) ; } return Lists . emptyToNull ( persons ) ;
public class EmoFileSystem { /** * All remaining FileSystem operations are not supported and will throw exceptions . */ @ Override public FSDataOutputStream create ( Path f , FsPermission permission , boolean overwrite , int bufferSize , short replication , long blockSize , Progressable progress ) throws IOException { } }
throw new IOException ( "Create not supported for EmoFileSystem: " + f ) ;
public class ImageSizeUtils { /** * Computes sample size for downscaling image size ( < b > srcSize < / b > ) to view size ( < b > targetSize < / b > ) . This sample * size is used during * { @ linkplain BitmapFactory # decodeStream ( java . io . InputStream , android . graphics . Rect , android . graphics . BitmapFactory . Options ) * decoding image } to bitmap . < br / > * < br / > * < b > Examples : < / b > < br / > * < pre > * srcSize ( 100x100 ) , targetSize ( 10x10 ) , powerOf2Scale = true - > sampleSize = 8 * srcSize ( 100x100 ) , targetSize ( 10x10 ) , powerOf2Scale = false - > sampleSize = 10 * srcSize ( 100x100 ) , targetSize ( 20x40 ) , viewScaleType = FIT _ INSIDE - > sampleSize = 5 * srcSize ( 100x100 ) , targetSize ( 20x40 ) , viewScaleType = CROP - > sampleSize = 2 * < / pre > * < br / > * The sample size is the number of pixels in either dimension that correspond to a single pixel in the decoded * bitmap . For example , inSampleSize = = 4 returns an image that is 1/4 the width / height of the original , and 1/16 * the number of pixels . Any value < = 1 is treated the same as 1. * @ param srcSize Original ( image ) size * @ param targetSize Target ( view ) size * @ param viewScaleType { @ linkplain ViewScaleType Scale type } for placing image in view * @ param powerOf2Scale < i > true < / i > - if sample size be a power of 2 ( 1 , 2 , 4 , 8 , . . . ) * @ return Computed sample size */ public static int computeImageSampleSize ( ImageSize srcSize , ImageSize targetSize , ViewScaleType viewScaleType , boolean powerOf2Scale ) { } }
final int srcWidth = srcSize . getWidth ( ) ; final int srcHeight = srcSize . getHeight ( ) ; final int targetWidth = targetSize . getWidth ( ) ; final int targetHeight = targetSize . getHeight ( ) ; int scale = 1 ; switch ( viewScaleType ) { case FIT_INSIDE : if ( powerOf2Scale ) { final int halfWidth = srcWidth / 2 ; final int halfHeight = srcHeight / 2 ; while ( ( halfWidth / scale ) > targetWidth || ( halfHeight / scale ) > targetHeight ) { scale *= 2 ; } } else { scale = Math . max ( srcWidth / targetWidth , srcHeight / targetHeight ) ; // max } break ; case CROP : if ( powerOf2Scale ) { final int halfWidth = srcWidth / 2 ; final int halfHeight = srcHeight / 2 ; while ( ( halfWidth / scale ) > targetWidth && ( halfHeight / scale ) > targetHeight ) { scale *= 2 ; } } else { scale = Math . min ( srcWidth / targetWidth , srcHeight / targetHeight ) ; // min } break ; } if ( scale < 1 ) { scale = 1 ; } scale = considerMaxTextureSize ( srcWidth , srcHeight , scale , powerOf2Scale ) ; return scale ;
public class ShanksAgentBayesianReasoningCapability { /** * Return the complete node * @ param agent * @ param nodeName * @ return the ProbabilisticNode object * @ throws ShanksException */ public static int getNode ( BayesianReasonerShanksAgent agent , String nodeName ) throws ShanksException { } }
return ShanksAgentBayesianReasoningCapability . getNode ( agent . getBayesianNetwork ( ) , nodeName ) ;
public class TransactionToDispatchableMap { /** * Adds a dispatchable for use with a specific local transaction . * Typically this is done by the done by the TCP channel thread * when it determines it is about to pass the transmission relating * to the start of a local transaction to the receive listener * dispatcher . * @ param clientTransactionId Identifies the local transaction that * the dispatchable will be used to dispatch work for . * @ return the dispatchable to use when ordering work for the local * transaction . * @ see com . ibm . ws . sib . comms . server . clientsupport . ServerTransportReceiveListener # getThreadContext ( Conversation , WsByteBuffer , int ) */ public Dispatchable addDispatchableForLocalTransaction ( int clientTransactionId ) { } }
if ( tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "addDispatchableForLocalTransaction" , "" + clientTransactionId ) ; if ( idToFirstLevelEntryMap . containsKey ( clientTransactionId ) ) { final SIErrorException exception = new SIErrorException ( CommsConstants . TRANTODISPATCHMAP_ADDDISPATCHLOCALTX_01 ) ; FFDCFilter . processException ( exception , CLASS_NAME + ".addDispatchableForLocalTransaction" , CommsConstants . TRANTODISPATCHMAP_ADDDISPATCHLOCALTX_01 , new Object [ ] { "" + clientTransactionId , idToFirstLevelEntryMap , this } ) ; if ( tc . isEventEnabled ( ) ) SibTr . exception ( this , tc , exception ) ; throw exception ; } LocalFirstLevelMapEntry entry = new LocalFirstLevelMapEntry ( ) ; Dispatchable result = entry . getDispatchable ( ) ; idToFirstLevelEntryMap . put ( clientTransactionId , entry ) ; if ( tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "addDispatchableForLocalTransaction" , result ) ; return result ;
public class ChorusContext { /** * This get method will return the value if its type matches the type parameter * @ param key to lookup * @ param type the expected type of the value * @ return null if the key does not exist or the type is incorrect */ @ SuppressWarnings ( { } }
"unchecked" , "unused" } ) public < T > T get ( String key , Class < T > type ) { try { return ( T ) state . get ( key ) ; } catch ( ClassCastException cce ) { return null ; }
public class appqoeaction { /** * Use this API to fetch appqoeaction resource of given name . */ public static appqoeaction get ( nitro_service service , String name ) throws Exception { } }
appqoeaction obj = new appqoeaction ( ) ; obj . set_name ( name ) ; appqoeaction response = ( appqoeaction ) obj . get_resource ( service ) ; return response ;
public class GeoBackupPoliciesInner { /** * Gets a geo backup policy . * @ param resourceGroupName The name of the resource group that contains the resource . You can obtain this value from the Azure Resource Manager API or the portal . * @ param serverName The name of the server . * @ param databaseName The name of the database . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the GeoBackupPolicyInner object */ public Observable < GeoBackupPolicyInner > getAsync ( String resourceGroupName , String serverName , String databaseName ) { } }
return getWithServiceResponseAsync ( resourceGroupName , serverName , databaseName ) . map ( new Func1 < ServiceResponse < GeoBackupPolicyInner > , GeoBackupPolicyInner > ( ) { @ Override public GeoBackupPolicyInner call ( ServiceResponse < GeoBackupPolicyInner > response ) { return response . body ( ) ; } } ) ;
public class DataSet { /** * Create data set that results from joining * several data sets . * @ param dataSets Data sets to join . * @ return A new data set containing the rows * from all given data sets . * @ throws InvalidOperationException if < code > dataSets < / code > * is null or empty or if the data source is not the * same for all data sets . */ @ SafeVarargs public static DataSet join ( DataSet ... dataSets ) { } }
if ( dataSets == null || dataSets . length == 0 ) { throw new InvalidOperationException ( "No source data sets given for joining." ) ; } DataSet r = copyOf ( dataSets [ 0 ] ) ; for ( int i = 1 ; i < dataSets . length ; i ++ ) { DataSet d = dataSets [ i ] ; if ( d . getSource ( ) != r . getSource ( ) ) { throw new InvalidOperationException ( "Data source mismatch." ) ; } r . getRows ( ) . addAll ( d . getRows ( ) ) ; } return r ;
public class ApiOvhOverTheBox { /** * Link a device to this service * REST : POST / overTheBox / { serviceName } / linkDevice * @ param deviceId [ required ] The id of the device * @ param serviceName [ required ] The internal name of your overTheBox offer */ public void serviceName_linkDevice_POST ( String serviceName , String deviceId ) throws IOException { } }
String qPath = "/overTheBox/{serviceName}/linkDevice" ; StringBuilder sb = path ( qPath , serviceName ) ; HashMap < String , Object > o = new HashMap < String , Object > ( ) ; addBody ( o , "deviceId" , deviceId ) ; exec ( qPath , "POST" , sb . toString ( ) , o ) ;
public class PathUtils { /** * Test whether the active file system is case < em > sensitive < / em > . A true result means * that the active file system is case sensitive . A false result means * that the active file system is not case sensitive . * The test accesses the bundle context of this class , and creates and removes files in the * persistent data storage area of that bundle context . If the bundle or bundle context are * not available , or if the persistent data storage area cannot be accessed , then we test the * file system directly ( using the canonical name ) by writing a file to the file system and * comparing the File to a File that differs only by case . * The result is used when testing for file existence . See { @ link # checkCase ( File , String ) } . * @ return True if the active file system is case sensitive , otherwise false . */ static boolean isOsCaseSensitive ( ) { } }
File caseSensitiveFile = null ; Bundle bundle = FrameworkUtil . getBundle ( PathUtils . class ) ; if ( bundle != null ) { BundleContext ctx = bundle . getBundleContext ( ) ; if ( ctx != null ) { caseSensitiveFile = ctx . getDataFile ( "caseSensitive" ) ; if ( caseSensitiveFile != null && ( caseSensitiveFile . delete ( ) || ! caseSensitiveFile . exists ( ) ) ) { try { if ( caseSensitiveFile . createNewFile ( ) ) { // We created " caseSensitive " , so check if " CASeSENSITIVE " exists . // new File ( " A " ) . equals ( new File ( " a " ) ) returns true on Windows , but // OS / 400 returns false even though the files are the same , so use // getCanonicalFile ( ) first , which allows the comparison to succeed . // Note that OS / 400 only considers two files equal if they have both // been canonicalized . . . return ! getCanonicalFile ( caseSensitiveFile ) . equals ( getCanonicalFile ( new File ( caseSensitiveFile . getParentFile ( ) , "CASEsENSITIVE" ) ) ) ; } } catch ( PrivilegedActionException pae ) { // auto FFDC } catch ( IOException ioe ) { // auto FFDC } finally { caseSensitiveFile . delete ( ) ; } } } } try { // Need to double check , since the above code is intended to be run in an // OSGi environment , not a Java SE / JUnit env caseSensitiveFile = File . createTempFile ( "caseSENSITIVEprefix" , "TxT" ) ; boolean iAmCaseSensitive = ! getCanonicalFile ( caseSensitiveFile ) . equals ( new File ( caseSensitiveFile . getAbsolutePath ( ) . toUpperCase ( ) ) ) ; if ( iAmCaseSensitive ) { return true ; } } catch ( Exception e ) { // We can ' t tell if this OS is case sensitive or not . // Assume we might not be case sensitive . return false ; } finally { // caseSensitiveFile . delete ( ) ; } // Something went wrong . Assume we might be not be case sensitive . return false ;
public class AMRLearnerProcessor { /** * Process input instances */ private void trainRuleOnInstance ( int ruleID , Instance instance ) { } }
// System . out . println ( " Processor : " + this . processorId + " : Rule : " + ruleID + " - > Counter = " + counter ) ; Iterator < ActiveRule > ruleIterator = this . ruleSet . iterator ( ) ; while ( ruleIterator . hasNext ( ) ) { ActiveRule rule = ruleIterator . next ( ) ; if ( rule . getRuleNumberID ( ) == ruleID ) { // Check ( again ) for coverage if ( rule . isCovering ( instance ) == true ) { double error = rule . computeError ( instance ) ; // Use adaptive mode error boolean changeDetected = ( ( RuleActiveRegressionNode ) rule . getLearningNode ( ) ) . updateChangeDetection ( error ) ; if ( changeDetected == true ) { ruleIterator . remove ( ) ; this . sendRemoveRuleEvent ( ruleID ) ; } else { rule . updateStatistics ( instance ) ; if ( rule . getInstancesSeen ( ) % this . gracePeriod == 0.0 ) { if ( rule . tryToExpand ( this . splitConfidence , this . tieThreshold ) ) { rule . split ( ) ; // expanded : update Aggregator with new / updated predicate this . sendPredicate ( rule . getRuleNumberID ( ) , rule . getLastUpdatedRuleSplitNode ( ) , ( RuleActiveRegressionNode ) rule . getLearningNode ( ) ) ; } } } } return ; } }
public class FnDouble { /** * It performs the operation target < sup > power < / sup > and returns its value . The result * rounding mode is specified by the given { @ link RoundingMode } * @ param power the power to raise the target to * @ param roundingMode the { @ link RoundingMode } * @ return the result of target < sup > power < / sup > */ public final static Function < Double , Double > pow ( int power , RoundingMode roundingMode ) { } }
return new Pow ( power , roundingMode ) ;
public class TaskTracker { /** * Obtain the max number of task slots based on the configuration and CPU */ protected int getMaxSlots ( JobConf conf , int numCpuOnTT , TaskType type ) { } }
int maxSlots ; String cpuToSlots ; if ( type == TaskType . MAP ) { maxSlots = conf . getInt ( "mapred.tasktracker.map.tasks.maximum" , 2 ) ; cpuToSlots = conf . get ( "mapred.tasktracker.cpus.to.maptasks" ) ; } else { maxSlots = conf . getInt ( "mapred.tasktracker.reduce.tasks.maximum" , 2 ) ; cpuToSlots = conf . get ( "mapred.tasktracker.cpus.to.reducetasks" ) ; } if ( cpuToSlots != null ) { try { // Format of the configuration is // numCpu1 : maxSlot1 , numCpu2 : maxSlot2 , numCpu3 : maxSlot3 for ( String str : cpuToSlots . split ( "," ) ) { String [ ] pair = str . split ( ":" ) ; int numCpu = Integer . parseInt ( pair [ 0 ] . trim ( ) ) ; int max = Integer . parseInt ( pair [ 1 ] . trim ( ) ) ; if ( numCpu == numCpuOnTT ) { maxSlots = max ; break ; } } } catch ( Exception e ) { LOG . warn ( "Error parsing number of CPU to map slots configuration" , e ) ; } } return maxSlots ;
public class ANXAdapters { /** * Adapts a ANXAccountInfo to an AccountInfo * @ param anxAccountInfo * @ return */ public static AccountInfo adaptAccountInfo ( ANXAccountInfo anxAccountInfo ) { } }
// Adapt to XChange DTOs AccountInfo accountInfo = new AccountInfo ( anxAccountInfo . getLogin ( ) , percentToFactor ( anxAccountInfo . getTradeFee ( ) ) , ANXAdapters . adaptWallet ( anxAccountInfo . getWallets ( ) ) ) ; return accountInfo ;
public class StaticTypeCheckingVisitor { /** * Test if a node is an inner class node , and if it is , then checks if the enclosing method is skipped . * @ param node * @ return true if the inner class node should be skipped */ protected boolean isSkippedInnerClass ( AnnotatedNode node ) { } }
if ( ! ( node instanceof InnerClassNode ) ) return false ; MethodNode enclosingMethod = ( ( InnerClassNode ) node ) . getEnclosingMethod ( ) ; return enclosingMethod != null && isSkipMode ( enclosingMethod ) ;
public class AbstractViewQuery { /** * Clear a view . Applies to ImageView , WebView , and TextView . * @ return self */ public T clear ( ) { } }
if ( view != null ) { if ( view instanceof ImageView ) { ImageView iv = ( ( ImageView ) view ) ; iv . setImageBitmap ( null ) ; } else if ( view instanceof WebView ) { WebView wv = ( ( WebView ) view ) ; wv . stopLoading ( ) ; wv . clearView ( ) ; } else if ( view instanceof TextView ) { TextView tv = ( ( TextView ) view ) ; tv . setText ( "" ) ; } } return self ( ) ;
public class DynamicPipelineServiceImpl { /** * Computes the commit stage of the pipeline . * @ param pipeline * @ param commits */ protected void processCommits ( Pipeline pipeline , List < Commit > commits ) { } }
// TODO when processing commits should we only add the commits that are within the time boundaries ? Set < String > seenRevisionNumbers = new HashSet < > ( ) ; if ( logger . isDebugEnabled ( ) ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( "\n===== Commit List =====\n" ) ; for ( Commit commit : commits ) { sb . append ( " - " + commit . getId ( ) + " (" + commit . getScmRevisionNumber ( ) + ") - " + commit . getScmCommitLog ( ) + "\n" ) ; } logger . debug ( sb . toString ( ) ) ; } for ( Commit commit : commits ) { boolean commitNotSeen = seenRevisionNumbers . add ( commit . getScmRevisionNumber ( ) ) ; if ( commitNotSeen ) { pipeline . addCommit ( PipelineStage . COMMIT . getName ( ) , new PipelineCommit ( commit , commit . getScmCommitTimestamp ( ) ) ) ; } }
public class Database { public DbPipe getClassPipeProperties ( String className , String pipeName ) throws DevFailed { } }
return databaseDAO . getClassPipeProperties ( this , className , pipeName ) ;
public class Strman { /** * Ensures that the value ends with suffix . If it doesn ' t , it ' s appended . * @ param value The input String * @ param suffix The substr to be ensured to be right * @ param caseSensitive Use case ( in - ) sensitive matching for determining if value already ends with suffix * @ return The string which is guarenteed to start with substr */ public static String ensureRight ( final String value , final String suffix , boolean caseSensitive ) { } }
validate ( value , NULL_STRING_PREDICATE , NULL_STRING_MSG_SUPPLIER ) ; return endsWith ( value , suffix , caseSensitive ) ? value : append ( value , suffix ) ;
public class SelfCalibrationGuessAndCheckFocus { /** * Computes the best rectifying homography given the set of camera matrices . Must call { @ link # setCamera } first . * @ param cameraMatrices camera matrices for view 2 and beyond . view 1 is implicit and assumed to be P = [ I | 0] * @ return true if successful or false if it fails */ public boolean process ( List < DMatrixRMaj > cameraMatrices ) { } }
if ( cameraMatrices . size ( ) == 0 ) throw new IllegalArgumentException ( "Must contain at least 1 matrix" ) ; // Apply normalization as suggested in the paper , then force the first camera matrix to be [ I | 0 ] again CommonOps_DDRM . setIdentity ( tmpP ) ; CommonOps_DDRM . mult ( Vinv , tmpP , P1 ) ; MultiViewOps . projectiveToIdentityH ( P1 , H ) ; // P = inv ( V ) * P / | | P ( 2,0:2 ) | | this . normalizedP . reset ( ) ; for ( int i = 0 ; i < cameraMatrices . size ( ) ; i ++ ) { DMatrixRMaj A = cameraMatrices . get ( i ) ; DMatrixRMaj Pi = normalizedP . grow ( ) ; CommonOps_DDRM . mult ( Vinv , A , tmpP ) ; CommonOps_DDRM . mult ( tmpP , H , Pi ) ; double a0 = Pi . get ( 2 , 0 ) ; double a1 = Pi . get ( 2 , 1 ) ; double a2 = Pi . get ( 2 , 2 ) ; double scale = Math . sqrt ( a0 * a0 + a1 * a1 + a2 * a2 ) ; CommonOps_DDRM . scale ( 1.0 / scale , Pi ) ; } // Find the best combinations of focal lengths double bestScore ; if ( sameFocus ) { bestScore = findBestFocusOne ( normalizedP . get ( 0 ) ) ; } else { bestScore = findBestFocusTwo ( normalizedP . get ( 0 ) ) ; } // undo normalization CommonOps_DDRM . extract ( bestH , 0 , 0 , tmp ) ; CommonOps_DDRM . mult ( V , tmp , K1 ) ; CommonOps_DDRM . insert ( K1 , bestH , 0 , 0 ) ; // if it ' s not at a local minimum it almost definately failed return bestScore != Double . MAX_VALUE && localMinimum ;
public class SyncGroupsInner { /** * Gets a sync group . * @ param resourceGroupName The name of the resource group that contains the resource . You can obtain this value from the Azure Resource Manager API or the portal . * @ param serverName The name of the server . * @ param databaseName The name of the database on which the sync group is hosted . * @ param syncGroupName The name of the sync group . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the SyncGroupInner object */ public Observable < SyncGroupInner > getAsync ( String resourceGroupName , String serverName , String databaseName , String syncGroupName ) { } }
return getWithServiceResponseAsync ( resourceGroupName , serverName , databaseName , syncGroupName ) . map ( new Func1 < ServiceResponse < SyncGroupInner > , SyncGroupInner > ( ) { @ Override public SyncGroupInner call ( ServiceResponse < SyncGroupInner > response ) { return response . body ( ) ; } } ) ;
public class MultipleInputsInterface { /** * Use this method for configuring a Job instance according to the multiple * input specs that has been specified . Returns the instance files created . */ public Set < String > configureJob ( Job job ) throws FileNotFoundException , IOException { } }
Set < String > instanceFiles = new HashSet < String > ( ) ; for ( Map . Entry < Path , List < Input > > entry : multiInputs . entrySet ( ) ) { for ( int inputId = 0 ; inputId < entry . getValue ( ) . size ( ) ; inputId ++ ) { Input input = entry . getValue ( ) . get ( inputId ) ; instanceFiles . addAll ( PangoolMultipleInputs . addInputPath ( job , input . path , input . inputFormat , input . inputProcessor , input . specificContext , inputId ) ) ; } } return instanceFiles ;
public class RunMode { /** * Checks if given run mode is active . * @ param runModes Run modes for current instance * @ param expectedRunModes Run mode ( s ) to check for * @ return true if any of the given run modes is active * @ deprecated Instead of directly using the run modes , it is better to make the component in question require a * configuration ( see OSGI Declarative Services Spec : configuration policy ) . In this case , a component * gets only active if a configuration is available . Such a configuration can be put into the repository * for the specific run mode . */ @ Deprecated public static boolean is ( Set < String > runModes , String ... expectedRunModes ) { } }
if ( runModes != null && expectedRunModes != null ) { for ( String expectedRunMode : expectedRunModes ) { if ( runModes . contains ( expectedRunMode ) ) { return true ; } } } return false ;
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link MunderType } { @ code > } } */ @ XmlElementDecl ( namespace = "http://www.w3.org/1998/Math/MathML" , name = "munder" ) public JAXBElement < MunderType > createMunder ( MunderType value ) { } }
return new JAXBElement < MunderType > ( _Munder_QNAME , MunderType . class , null , value ) ;
public class FileTree { /** * Returns the entry for the file in its parent directory . This will be the given entry unless the * name for the entry is " . " or " . . " , in which the directory linking to the file is not the file ' s * parent directory . In that case , we know the file must be a directory ( " . " and " . . " can only * link to directories ) , so we can just get the entry in the directory ' s parent directory that * links to it . So , for example , if we have a directory " foo " that contains a directory " bar " and * we find an entry [ bar - > " . " - > bar ] , we instead return the entry for bar in its parent , [ foo * - > " bar " - > bar ] . */ @ Nullable private DirectoryEntry getRealEntry ( DirectoryEntry entry ) { } }
Name name = entry . name ( ) ; if ( name . equals ( Name . SELF ) || name . equals ( Name . PARENT ) ) { Directory dir = toDirectory ( entry . file ( ) ) ; assert dir != null ; return dir . entryInParent ( ) ; } else { return entry ; }
public class HelloReefAzBatch { /** * Start the Hello REEF job with the Azure Batch runtime . * @ param args command line parameters . * @ throws InjectionException configuration error . * @ throws IOException */ public static void main ( final String [ ] args ) throws InjectionException , IOException { } }
final Configuration partialConfiguration = getEnvironmentConfiguration ( ) ; final Injector injector = Tang . Factory . getTang ( ) . newInjector ( partialConfiguration ) ; final AzureBatchRuntimeConfigurationProvider runtimeConfigurationProvider = injector . getInstance ( AzureBatchRuntimeConfigurationProvider . class ) ; final Configuration driverConfiguration = getDriverConfiguration ( ) ; try ( final REEF reef = Tang . Factory . getTang ( ) . newInjector ( runtimeConfigurationProvider . getAzureBatchRuntimeConfiguration ( ) ) . getInstance ( REEF . class ) ) { reef . submit ( driverConfiguration ) ; } LOG . log ( Level . INFO , "Job Submitted" ) ;
public class StorageSnippets { /** * [ VARIABLE 42] */ public List < Acl > listBlobAcls ( String bucketName , String blobName , long blobGeneration ) { } }
// [ START listBlobAcls ] BlobId blobId = BlobId . of ( bucketName , blobName , blobGeneration ) ; List < Acl > acls = storage . listAcls ( blobId ) ; for ( Acl acl : acls ) { // do something with ACL entry } // [ END listBlobAcls ] return acls ;
public class PathImpl { /** * Returns the scheme portion of a uri . Since schemes are case - insensitive , * normalize them to lower case . */ protected String scanScheme ( String uri ) { } }
int i = 0 ; if ( uri == null ) return null ; int length = uri . length ( ) ; if ( length == 0 ) return null ; int ch = uri . charAt ( 0 ) ; if ( ch >= 'a' && ch <= 'z' || ch >= 'A' && ch <= 'Z' ) { for ( i = 1 ; i < length ; i ++ ) { ch = uri . charAt ( i ) ; if ( ch == ':' ) return uri . substring ( 0 , i ) . toLowerCase ( Locale . ENGLISH ) ; if ( ! ( ch >= 'a' && ch <= 'z' || ch >= 'A' && ch <= 'Z' || ch >= '0' && ch <= '0' || ch == '+' || ch == '-' || ch == '.' ) ) break ; } } return null ;
public class LanguageProfileReader { /** * Reads a { @ link LanguageProfile } from a File in UTF - 8. */ public LanguageProfile read ( File profileFile ) throws IOException { } }
return OldLangProfileConverter . convert ( internalReader . read ( profileFile ) ) ;
public class SynchronousEventProcessor { /** * ( non - Javadoc ) * @ see EventProcessor # add ( EventBase , org . jgrapes . core . Channel [ ] ) */ @ Override public < T extends Event < ? > > T add ( T event , Channel ... channels ) { } }
( ( EventBase < ? > ) event ) . generatedBy ( newEventsParent . get ( ) ) ; ( ( EventBase < ? > ) event ) . processedBy ( this ) ; synchronized ( queue ) { queue . add ( event , channels ) ; } if ( isRunning ) { return event ; } isRunning = true ; GeneratorRegistry . instance ( ) . add ( this ) ; run ( ) ; isRunning = false ; return event ;
public class Proxies { /** * Unwraps the proxy type if javassist or CGLib is used * @ param type the class type * @ return the unproxied class name */ public static String unwrapProxyClassName ( Class < ? > type ) { } }
String typeName = null ; if ( type != null ) { if ( type . getName ( ) . contains ( "$$EnhancerByCGLIB$$" ) ) { typeName = CGLIB_CLASSNAME_REGEXP . matcher ( type . getName ( ) ) . replaceAll ( "$1" ) ; } else if ( type . getName ( ) . contains ( "_jvst" ) ) { typeName = JAVASSIST_CLASSNAME_REGEXP . matcher ( type . getName ( ) ) . replaceAll ( "$1" ) ; } else { typeName = type . getName ( ) ; } } return typeName ;
public class FacetInspector { /** * Inspect the given { @ link Class } for all { @ link FacetConstraintType # OPTIONAL } dependency { @ link Facet } types . This * method inspects the entire constraint tree . */ public static < FACETEDTYPE extends Faceted < FACETTYPE > , FACETTYPE extends Facet < FACETEDTYPE > > Set < Class < FACETTYPE > > getAllOptionalFacets ( final Class < FACETTYPE > inspectedType ) { } }
Set < Class < FACETTYPE > > seen = new LinkedHashSet < Class < FACETTYPE > > ( ) ; return getAllRelatedFacets ( seen , inspectedType , FacetConstraintType . OPTIONAL ) ;
public class SSLReadServiceContext { /** * Release the potential buffer that were created */ public void close ( ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . entry ( tc , "close, vc=" + getVCHash ( ) ) ; } synchronized ( closeSync ) { if ( closeCalled ) { return ; } closeCalled = true ; if ( null != this . netBuffer ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) { Tr . event ( tc , "Releasing netBuffer during close " + SSLUtils . getBufferTraceInfo ( netBuffer ) ) ; } this . netBuffer . release ( ) ; this . netBuffer = null ; } cleanupDecBuffers ( ) ; if ( unconsumedDecData != null ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) { Tr . event ( tc , "Releasing unconsumed decrypted buffers, " + SSLUtils . getBufferTraceInfo ( unconsumedDecData ) ) ; } WsByteBufferUtils . releaseBufferArray ( unconsumedDecData ) ; unconsumedDecData = null ; } } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . exit ( tc , "close" ) ; }
public class KdTreeSearch1Standard { /** * Recursive step for finding the closest point */ private void stepClosest ( KdTree . Node node ) { } }
if ( node == null ) return ; if ( node . isLeaf ( ) ) { // a leaf can be empty . if ( node . point != null ) { double distSq = distance . distance ( ( P ) node . point , target ) ; if ( distSq <= bestDistanceSq ) { if ( closest == null || distSq < bestDistanceSq ) { closest = node ; bestDistanceSq = distSq ; } } } return ; } else { double distSq = distance . distance ( ( P ) node . point , target ) ; if ( distSq <= bestDistanceSq ) { if ( closest == null || distSq < bestDistanceSq ) { closest = node ; bestDistanceSq = distSq ; } } } // select the most promising branch to investigate first KdTree . Node nearer , further ; double splitValue = distance . valueAt ( ( P ) node . point , node . split ) ; double targetAtSplit = distance . valueAt ( target , node . split ) ; if ( targetAtSplit <= splitValue ) { nearer = node . left ; further = node . right ; } else { nearer = node . right ; further = node . left ; } stepClosest ( nearer ) ; // See if it is possible for ' further ' to contain a better node double dx = splitValue - targetAtSplit ; double dx2 = dx * dx ; if ( dx2 <= bestDistanceSq ) { if ( closest == null || dx2 < bestDistanceSq ) stepClosest ( further ) ; }
public class FibonacciHeap { /** * Decreases the key of the specified element to the new priority . If the * new priority is greater than the old priority , this function throws an * IllegalArgumentException . The new priority must be a finite double , * so you cannot set the priority to be NaN , or + / - infinity . Doing * so also throws an IllegalArgumentException . * It is assumed that the entry belongs in this heap . For efficiency * reasons , this is not checked at runtime . * @ param entry The element whose priority should be decreased . * @ param newPriority The new priority to associate with this entry . * @ throws IllegalArgumentException If the new priority exceeds the old * priority , or if the argument is not a finite double . */ public void decreaseKey ( Entry < T > entry , double newPriority ) { } }
checkPriority ( newPriority ) ; if ( newPriority > entry . mPriority ) throw new IllegalArgumentException ( "New priority exceeds old." ) ; /* Forward this to a helper function . */ decreaseKeyUnchecked ( entry , newPriority ) ;
public class GalleryImagesInner { /** * Get gallery image . * @ param resourceGroupName The name of the resource group . * @ param labAccountName The name of the lab Account . * @ param galleryImageName The name of the gallery Image . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the GalleryImageInner object */ public Observable < GalleryImageInner > getAsync ( String resourceGroupName , String labAccountName , String galleryImageName ) { } }
return getWithServiceResponseAsync ( resourceGroupName , labAccountName , galleryImageName ) . map ( new Func1 < ServiceResponse < GalleryImageInner > , GalleryImageInner > ( ) { @ Override public GalleryImageInner call ( ServiceResponse < GalleryImageInner > response ) { return response . body ( ) ; } } ) ;
public class RemoteSessionServer { /** * Shutdown the server . */ public void shutdown ( ) { } }
Environment env = ( ( BaseApplication ) m_app ) . getEnvironment ( ) ; if ( m_app != null ) m_app . free ( ) ; m_app = null ; if ( ClassServiceUtility . getClassService ( ) . getClassFinder ( null ) != null ) ClassServiceUtility . getClassService ( ) . getClassFinder ( null ) . shutdownService ( null , this ) ; if ( env != null ) env . freeIfDone ( ) ; // x System . exit ( 0 ) ;
public class MessageTemplate { /** * Creates a default { @ link AsyncMessage } for a specified destination * @ param destination the target destination for the message * @ return the created message */ public AsyncMessage createMessageForDestination ( String destination ) { } }
AsyncMessage message = this . defaultMessageCreator . createMessage ( ) ; message . setDestination ( destination ) ; return message ;
public class Vacuum { /** * Get the current soundpack installation status . * @ return The current soundpack installation status . * @ throws CommandExecutionException When there has been a error during the communication or the response was invalid . */ public VacuumSounpackInstallState soundpackInstallStatus ( ) throws CommandExecutionException { } }
JSONArray ret = sendToArray ( "get_sound_progress" ) ; if ( ret == null ) return null ; return new VacuumSounpackInstallState ( ret . optJSONObject ( 0 ) ) ;
public class HttpUtil { /** * 发送get请求 * @ param urlString 网址 * @ param customCharset 自定义请求字符集 , 如果字符集获取不到 , 使用此字符集 * @ return 返回内容 , 如果只检查状态码 , 正常只返回 " " , 不正常返回 null */ public static String get ( String urlString , Charset customCharset ) { } }
return HttpRequest . get ( urlString ) . charset ( customCharset ) . execute ( ) . body ( ) ;
public class SheetBindingErrors { /** * 指定したパスで現在のパスを初期化します 。 * < p > nullまたは空文字を与えると 、 トップに移動します 。 * @ param nestedPath ネストするパス */ public void setNestedPath ( final String nestedPath ) { } }
final String canonicalPath = normalizePath ( nestedPath ) ; this . nestedPathStack . clear ( ) ; if ( canonicalPath . isEmpty ( ) ) { this . currentPath = buildPath ( ) ; } else { pushNestedPath ( canonicalPath ) ; }
public class Pipelines { /** * Creates a ' watching ' pipeline . * @ param session the maven session * @ param baseDir the project ' s base directory * @ param mojo the ' run ' mojo * @ param pomFileMonitoring flag enabling or disabling the pom file monitoring * @ return the created pipeline */ public static Pipeline watchers ( MavenSession session , File baseDir , Mojo mojo , boolean pomFileMonitoring ) { } }
return new Pipeline ( mojo , baseDir , Watchers . all ( session ) , pomFileMonitoring ) ;
public class StatsOptions { /** * Adds a facet on field to the statistics to be requested . * @ param fieldName * @ return */ public StatsOptions addFacet ( String fieldName ) { } }
Assert . hasText ( fieldName , "Fieldname for facet statistics must not be blank." ) ; return addFacet ( new SimpleField ( fieldName ) ) ;
public class GroupMarshaller { /** * Marshall the given parameter object . */ public void marshall ( Group group , ProtocolMarshaller protocolMarshaller ) { } }
if ( group == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( group . getArn ( ) , ARN_BINDING ) ; protocolMarshaller . marshall ( group . getGroupName ( ) , GROUPNAME_BINDING ) ; protocolMarshaller . marshall ( group . getDescription ( ) , DESCRIPTION_BINDING ) ; protocolMarshaller . marshall ( group . getPrincipalId ( ) , PRINCIPALID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class Environment { /** * Parses an environment file from an URL . */ public static Environment parse ( URL url ) throws IOException { } }
try { return new ConfigUtil . LowerCaseYamlMapper ( ) . readValue ( url , Environment . class ) ; } catch ( JsonMappingException e ) { throw new SqlClientException ( "Could not parse environment file. Cause: " + e . getMessage ( ) ) ; }
public class DynamoDBTableMapper { /** * Deletes the table and ignores the { @ code ResourceNotFoundException } if * it does not already exist . * @ return True if the table was deleted , or false if the table did not exist . * @ see com . amazonaws . services . dynamodbv2 . AmazonDynamoDB # deleteTable * @ see com . amazonaws . services . dynamodbv2 . model . DeleteTableRequest */ public boolean deleteTableIfExists ( ) { } }
try { deleteTable ( ) ; } catch ( final ResourceNotFoundException e ) { if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( "Table does not exist, no need to delete" , e ) ; } return false ; } return true ;
public class SVGUtil { /** * line */ public static String line ( String x1 , String y1 , String x2 , String y2 , String style ) { } }
return String . format ( "<line x1=\"%s\" y1=\"%s\" x2=\"%s\" y2=\"%s\" style=\"%s\" />\n" , x1 , y1 , x2 , y2 , style ) ;
public class ModelsImpl { /** * Gets information about the prebuilt entity models . * @ param appId The application ID . * @ param versionId The version ID . * @ param listPrebuiltsOptionalParameter the object representing the optional parameters to be set before calling this API * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the List & lt ; PrebuiltEntityExtractor & gt ; object */ public Observable < ServiceResponse < List < PrebuiltEntityExtractor > > > listPrebuiltsWithServiceResponseAsync ( UUID appId , String versionId , ListPrebuiltsOptionalParameter listPrebuiltsOptionalParameter ) { } }
if ( this . client . endpoint ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.endpoint() is required and cannot be null." ) ; } if ( appId == null ) { throw new IllegalArgumentException ( "Parameter appId is required and cannot be null." ) ; } if ( versionId == null ) { throw new IllegalArgumentException ( "Parameter versionId is required and cannot be null." ) ; } final Integer skip = listPrebuiltsOptionalParameter != null ? listPrebuiltsOptionalParameter . skip ( ) : null ; final Integer take = listPrebuiltsOptionalParameter != null ? listPrebuiltsOptionalParameter . take ( ) : null ; return listPrebuiltsWithServiceResponseAsync ( appId , versionId , skip , take ) ;
public class HibernateClient { /** * Instantiate entity . * @ param entityClass * the entity class * @ param entity * the entity * @ return the object */ private Object instantiateEntity ( Class entityClass , Object entity ) { } }
try { if ( entity == null ) { return entityClass . newInstance ( ) ; } return entity ; } catch ( InstantiationException e ) { log . error ( "Error while instantiating " + entityClass + ", Caused by: " , e ) ; } catch ( IllegalAccessException e ) { log . error ( "Error while instantiating " + entityClass + ", Caused by: " , e ) ; } return null ;
public class RandomUtil { /** * 获得指定范围内的随机数 * @ param scale 保留小数位数 * @ param roundingMode 保留小数的模式 { @ link RoundingMode } * @ return 随机数 * @ since 4.0.8 */ public static double randomDouble ( int scale , RoundingMode roundingMode ) { } }
return NumberUtil . round ( randomDouble ( ) , scale , roundingMode ) . doubleValue ( ) ;
public class Filter { /** * Main methods of this class . * @ throws IOException as thrown by the specified FileTask */ public void invoke ( Node root , Action result ) throws IOException { } }
doInvoke ( 0 , root , root . isLink ( ) , new ArrayList < > ( includes ) , new ArrayList < > ( excludes ) , result ) ;
public class SolrResultPage { /** * ( non - Javadoc ) * @ see org . springframework . data . solr . core . query . result . SpellcheckQueryResult # getAlternatives ( java . lang . String ) */ @ Override public Collection < Alternative > getAlternatives ( String term ) { } }
return suggestions . containsKey ( term ) ? Collections . < Alternative > unmodifiableList ( this . suggestions . get ( term ) ) : Collections . < Alternative > emptyList ( ) ;
public class AmazonCloudSearchClient { /** * Gets the scaling parameters configured for a domain . A domain ' s scaling parameters specify the desired search * instance type and replication count . For more information , see < a * href = " http : / / docs . aws . amazon . com / cloudsearch / latest / developerguide / configuring - scaling - options . html " * target = " _ blank " > Configuring Scaling Options < / a > in the < i > Amazon CloudSearch Developer Guide < / i > . * @ param describeScalingParametersRequest * Container for the parameters to the < code > < a > DescribeScalingParameters < / a > < / code > operation . Specifies the * name of the domain you want to describe . * @ return Result of the DescribeScalingParameters operation returned by the service . * @ throws BaseException * An error occurred while processing the request . * @ throws InternalException * An internal error occurred while processing the request . If this problem persists , report an issue from * the < a href = " http : / / status . aws . amazon . com / " target = " _ blank " > Service Health Dashboard < / a > . * @ throws ResourceNotFoundException * The request was rejected because it attempted to reference a resource that does not exist . * @ sample AmazonCloudSearch . DescribeScalingParameters */ @ Override public DescribeScalingParametersResult describeScalingParameters ( DescribeScalingParametersRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDescribeScalingParameters ( request ) ;
public class BufferedElementReader { /** * Creates a duplicate of this stream . * < p > Closing the duplicate will not close this stream . < / p > * < p > Both stream have a independent current position . < / p > * @ return Stream that can be closed without affecting this stream . */ @ Override public RandomElementReader duplicate ( ) { } }
if ( closed ) { throw new IllegalStateException ( "Reader is closed." ) ; } return new RandomElementReader ( ) { private boolean closed = false ; private int nextElement = 0 ; @ Override public Element readElement ( int index ) throws IOException { if ( closed ) { throw new IllegalStateException ( "Reader closed" ) ; } return BufferedElementReader . this . readElement ( index ) ; } @ Override public ElementType getElementType ( ) { return BufferedElementReader . this . getElementType ( ) ; } @ Override public int getCount ( ) { return BufferedElementReader . this . getCount ( ) ; } @ Override public Element readElement ( ) throws IOException { if ( nextElement >= getCount ( ) ) { return null ; } if ( closed ) { throw new IllegalStateException ( "Reader closed" ) ; } return BufferedElementReader . this . readElement ( nextElement ++ ) ; } @ Override public void close ( ) throws IOException { closed = true ; } @ Override public RandomElementReader duplicate ( ) { return BufferedElementReader . this . duplicate ( ) ; } @ Override public boolean isClosed ( ) { return closed ; } } ;
public class CreateBGPPeerRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( CreateBGPPeerRequest createBGPPeerRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( createBGPPeerRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( createBGPPeerRequest . getVirtualInterfaceId ( ) , VIRTUALINTERFACEID_BINDING ) ; protocolMarshaller . marshall ( createBGPPeerRequest . getNewBGPPeer ( ) , NEWBGPPEER_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class MultiPageWidget { /** * Select or deselect an item at position { @ code pos } . * @ param dataIndex * item position in the adapter * @ param select * operation to perform select or deselect . * @ return { @ code true } if the requested operation is successful , * { @ code false } otherwise . */ public boolean selectItem ( int dataIndex , boolean select ) { } }
Log . d ( Log . SUBSYSTEM . LAYOUT , TAG , "selectItem [%d] select [%b]" , dataIndex , select ) ; if ( dataIndex < 0 || dataIndex >= mItemAdapter . getCount ( ) ) { throw new IndexOutOfBoundsException ( "Selection index [" + dataIndex + "] is out of bounds!" ) ; } boolean done = updateSelectedItemsList ( dataIndex , select ) ; if ( done ) { List < Widget > views = new ArrayList < > ( ) ; if ( mItemsPerPage >= 0 ) { int pageIndex = dataIndex / mItemsPerPage ; Widget view = getListView ( pageIndex ) ; if ( view != null ) { views . add ( view ) ; } } else { views = getAllViews ( ) ; } for ( Widget view : views ) { if ( selectItem ( ( ( ListWidget ) view ) , dataIndex , select ) ) { requestLayout ( ) ; break ; } } } return done ;
public class GosuStringUtil { /** * < p > Checks that the String does not contain certain characters . < / p > * < p > A < code > null < / code > String will return < code > true < / code > . * A < code > null < / code > invalid character array will return < code > true < / code > . * An empty String ( " " ) always returns true . < / p > * < pre > * GosuStringUtil . containsNone ( null , * ) = true * GosuStringUtil . containsNone ( * , null ) = true * GosuStringUtil . containsNone ( " " , * ) = true * GosuStringUtil . containsNone ( " ab " , " " ) = true * GosuStringUtil . containsNone ( " abab " , " xyz " ) = true * GosuStringUtil . containsNone ( " ab1 " , " xyz " ) = true * GosuStringUtil . containsNone ( " abz " , " xyz " ) = false * < / pre > * @ param str the String to check , may be null * @ param invalidChars a String of invalid chars , may be null * @ return true if it contains none of the invalid chars , or is null * @ since 2.0 */ public static boolean containsNone ( String str , String invalidChars ) { } }
if ( str == null || invalidChars == null ) { return true ; } return containsNone ( str , invalidChars . toCharArray ( ) ) ;
public class PgResultSet { /** * { @ inheritDoc } * < p > In normal use , the bytes represent the raw values returned by the backend . However , if the * column is an OID , then it is assumed to refer to a Large Object , and that object is returned as * a byte array . < / p > * < p > < b > Be warned < / b > If the large object is huge , then you may run out of memory . < / p > */ @ Override public byte [ ] getBytes ( int columnIndex ) throws SQLException { } }
connection . getLogger ( ) . log ( Level . FINEST , " getBytes columnIndex: {0}" , columnIndex ) ; checkResultSet ( columnIndex ) ; if ( wasNullFlag ) { return null ; } if ( isBinary ( columnIndex ) ) { // If the data is already binary then just return it return thisRow [ columnIndex - 1 ] ; } if ( fields [ columnIndex - 1 ] . getOID ( ) == Oid . BYTEA ) { return trimBytes ( columnIndex , PGbytea . toBytes ( thisRow [ columnIndex - 1 ] ) ) ; } else { return trimBytes ( columnIndex , thisRow [ columnIndex - 1 ] ) ; }
public class StringFeatureAlphabet { /** * 查询字符串索引编号 * @ param str 字符串 * @ param indent 间隔 * @ return 字符串索引编号 , - 1表示词典中不存在字符串 */ public int lookupIndex ( String str , int indent ) { } }
if ( indent < 1 ) throw new IllegalArgumentException ( "Invalid Argument in FeatureAlphabet: " + indent ) ; int ret = data . get ( str ) ; if ( ret == - 1 && ! frozen ) { // 字典中没有 , 并且允许插入 synchronized ( this ) { data . put ( str , last ) ; ret = last ; last += indent ; } } // if ( ret = = 0) // System . out . println ( str ) ; return ret ;
public class Bean { /** * Returns an array of all the listeners which have been associated with the named property . * @ param propertyName the name of the property to lookup listeners * @ return all of the { @ code PropertyChangeListeners } associated with the named property or an * empty array if no listeners have been added * @ see # addPropertyChangeListener ( String , PropertyChangeListener ) * @ see # removePropertyChangeListener ( String , PropertyChangeListener ) * @ see # getPropertyChangeListeners ( ) */ @ Override public final synchronized PropertyChangeListener [ ] getPropertyChangeListeners ( String propertyName ) { } }
if ( changeSupport == null ) { return new PropertyChangeListener [ 0 ] ; } return changeSupport . getPropertyChangeListeners ( propertyName ) ;
public class ConstantPool { /** * Get or create a constant from the constant pool representing an array * class . * @ param dim Number of array dimensions . */ public ConstantClassInfo addConstantClass ( String className , int dim ) { } }
return ( ConstantClassInfo ) addConstant ( new ConstantClassInfo ( this , className , dim ) ) ;
public class GStreamerDevice { /** * Initialize webcam device . */ private synchronized void init ( ) { } }
if ( ! initialized . compareAndSet ( false , true ) ) { return ; } LOG . debug ( "GStreamer webcam device initialization" ) ; pipe = new Pipeline ( getName ( ) ) ; source = ElementFactory . make ( GStreamerDriver . getSourceBySystem ( ) , "source" ) ; if ( Platform . isWindows ( ) ) { source . set ( "device-index" , deviceIndex ) ; } else if ( Platform . isLinux ( ) ) { source . set ( "device" , videoFile . getAbsolutePath ( ) ) ; } else if ( Platform . isMacOSX ( ) ) { throw new IllegalStateException ( "not yet implemented" ) ; } sink = new RGBDataSink ( getName ( ) , this ) ; sink . setPassDirectBuffer ( true ) ; sink . getSinkElement ( ) . setMaximumLateness ( LATENESS , TimeUnit . MILLISECONDS ) ; sink . getSinkElement ( ) . setQOSEnabled ( true ) ; filter = ElementFactory . make ( "capsfilter" , "capsfilter" ) ; jpegdec = ElementFactory . make ( "jpegdec" , "jpegdec" ) ; pipelineReady ( ) ; resolutions = parseResolutions ( source . getPads ( ) . get ( 0 ) ) ; pipelineStop ( ) ;
public class DeploymentMarshaller { /** * Marshall the given parameter object . */ public void marshall ( Deployment deployment , ProtocolMarshaller protocolMarshaller ) { } }
if ( deployment == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( deployment . getDeploymentId ( ) , DEPLOYMENTID_BINDING ) ; protocolMarshaller . marshall ( deployment . getStackId ( ) , STACKID_BINDING ) ; protocolMarshaller . marshall ( deployment . getAppId ( ) , APPID_BINDING ) ; protocolMarshaller . marshall ( deployment . getCreatedAt ( ) , CREATEDAT_BINDING ) ; protocolMarshaller . marshall ( deployment . getCompletedAt ( ) , COMPLETEDAT_BINDING ) ; protocolMarshaller . marshall ( deployment . getDuration ( ) , DURATION_BINDING ) ; protocolMarshaller . marshall ( deployment . getIamUserArn ( ) , IAMUSERARN_BINDING ) ; protocolMarshaller . marshall ( deployment . getComment ( ) , COMMENT_BINDING ) ; protocolMarshaller . marshall ( deployment . getCommand ( ) , COMMAND_BINDING ) ; protocolMarshaller . marshall ( deployment . getStatus ( ) , STATUS_BINDING ) ; protocolMarshaller . marshall ( deployment . getCustomJson ( ) , CUSTOMJSON_BINDING ) ; protocolMarshaller . marshall ( deployment . getInstanceIds ( ) , INSTANCEIDS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class ApnsClientBuilder { /** * < p > Sets the trusted certificate chain for the client under construction . If not set ( or { @ code null } ) , the * client will use the JVM ' s default trust manager . < / p > * < p > Callers will generally not need to set a trusted server certificate chain in normal operation , but may wish * to do so for < a href = " https : / / www . owasp . org / index . php / Certificate _ and _ Public _ Key _ Pinning " > certificate pinning < / a > * or connecting to a mock server for integration testing or benchmarking . < / p > * @ param certificates one or more trusted certificates * @ return a reference to this builder * @ since 0.8 */ public ApnsClientBuilder setTrustedServerCertificateChain ( final X509Certificate ... certificates ) { } }
this . trustedServerCertificatePemFile = null ; this . trustedServerCertificateInputStream = null ; this . trustedServerCertificates = certificates ; return this ;
public class MediaPackageOutputSettingsMarshaller { /** * Marshall the given parameter object . */ public void marshall ( MediaPackageOutputSettings mediaPackageOutputSettings , ProtocolMarshaller protocolMarshaller ) { } }
if ( mediaPackageOutputSettings == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class ModelMethod { /** * / * ( non - Javadoc ) * @ see com . abubusoft . kripton . processor . core . ModelWithAnnotation # getAnnotation ( java . lang . Class ) */ public ModelAnnotation getAnnotation ( Class < ? extends Annotation > value ) { } }
for ( ModelAnnotation item : annotations ) { if ( item . getName ( ) . equals ( value . getCanonicalName ( ) ) ) { return item ; } } return null ;
public class CheerleaderPlayer { /** * Pause the playback . */ public void pause ( ) { } }
checkState ( ) ; if ( mState == STATE_PLAYING ) { PlaybackService . pause ( getContext ( ) , mClientKey ) ; mState = STATE_PAUSED ; }
public class RESTClient { /** * Performs POST request . * @ param path Request path . * @ throws IOException If error during HTTP connection or entity parsing occurs . * @ throws RESTException If HTTP response code is non OK . */ public Response post ( String path ) throws IOException , RESTException { } }
return post ( path , null , null ) ;
public class FFMQJNDIContext { /** * ( non - Javadoc ) * @ see javax . naming . Context # listBindings ( javax . naming . Name ) */ @ Override public NamingEnumeration < Binding > listBindings ( Name name ) throws NamingException { } }
if ( name . isEmpty ( ) ) return new ListOfBindings ( bindings . keys ( ) ) ; Object target = lookup ( name ) ; if ( target instanceof Context ) { try { return ( ( Context ) target ) . listBindings ( "" ) ; } finally { ( ( Context ) target ) . close ( ) ; } } throw new NotContextException ( name + " cannot be listed" ) ;
public class AmazonRDSClient { /** * Stops an Amazon RDS DB instance . When you stop a DB instance , Amazon RDS retains the DB instance ' s metadata , * including its endpoint , DB parameter group , and option group membership . Amazon RDS also retains the transaction * logs so you can do a point - in - time restore if necessary . * For more information , see < a * href = " https : / / docs . aws . amazon . com / AmazonRDS / latest / UserGuide / USER _ StopInstance . html " > Stopping an Amazon RDS DB * Instance Temporarily < / a > in the < i > Amazon RDS User Guide . < / i > * < note > * This command doesn ' t apply to Aurora MySQL and Aurora PostgreSQL . For Aurora clusters , use < a > StopDBCluster < / a > * instead . * < / note > * @ param stopDBInstanceRequest * @ return Result of the StopDBInstance operation returned by the service . * @ throws DBInstanceNotFoundException * < i > DBInstanceIdentifier < / i > doesn ' t refer to an existing DB instance . * @ throws InvalidDBInstanceStateException * The DB instance isn ' t in a valid state . * @ throws DBSnapshotAlreadyExistsException * < i > DBSnapshotIdentifier < / i > is already used by an existing snapshot . * @ throws SnapshotQuotaExceededException * The request would result in the user exceeding the allowed number of DB snapshots . * @ throws InvalidDBClusterStateException * The requested operation can ' t be performed while the cluster is in this state . * @ sample AmazonRDS . StopDBInstance * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / rds - 2014-10-31 / StopDBInstance " target = " _ top " > AWS API * Documentation < / a > */ @ Override public DBInstance stopDBInstance ( StopDBInstanceRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeStopDBInstance ( request ) ;
public class PolymerPassStaticUtils { /** * Switches all " this . $ . foo " to " this . $ [ ' foo ' ] " . */ static void switchDollarSignPropsToBrackets ( Node def , final AbstractCompiler compiler ) { } }
checkState ( def . isObjectLit ( ) || def . isClassMembers ( ) ) ; for ( Node keyNode : def . children ( ) ) { Node value = keyNode . getFirstChild ( ) ; if ( value != null && value . isFunction ( ) ) { NodeUtil . visitPostOrder ( value . getLastChild ( ) , new NodeUtil . Visitor ( ) { @ Override public void visit ( Node n ) { if ( n . isString ( ) && n . getString ( ) . equals ( "$" ) && n . getParent ( ) . isGetProp ( ) && n . getGrandparent ( ) . isGetProp ( ) ) { Node dollarChildProp = n . getGrandparent ( ) ; dollarChildProp . setToken ( Token . GETELEM ) ; compiler . reportChangeToEnclosingScope ( dollarChildProp ) ; } } } ) ; } }
public class HomographyTotalLeastSquares { /** * P * P _ plus * X * 1 */ static void computePPpX ( DMatrixRMaj P , DMatrixRMaj P_plus , DMatrixRMaj X , int offsetX , DMatrixRMaj output ) { } }
final int N = P . numRows ; output . reshape ( N , 1 ) ; // A = P _ plus * X < - - 2x1 double a00 = 0 , a10 = 0 ; for ( int i = 0 , indexX = offsetX ; i < N ; i ++ , indexX += 2 ) { double x = - X . data [ indexX ] ; a00 += x * P_plus . data [ i ] ; a10 += x * P_plus . data [ i + N ] ; } // P * A for ( int i = 0 , indexP = 0 ; i < N ; i ++ ) { output . data [ i ] = a00 * P . data [ indexP ++ ] + a10 * P . data [ indexP ++ ] ; }
public class Probability { /** * This function returns the material nonimplication of the specified probabilities . The value * of the material nonimplication of two probabilities is P and not ( Q ) . * @ param probability1 The first probability . * @ param probability2 The second probability . * @ return The material nonimplication of the two probabilities . */ static public Probability sans ( Probability probability1 , Probability probability2 ) { } }
double p1 = probability1 . value ; double p2 = probability2 . value ; return new Probability ( p1 * ( 1.0d - p2 ) ) ;
public class DataXceiver { /** * Get block checksum ( MD5 of CRC32 ) . * @ param in */ void getBlockChecksum ( DataInputStream in , VersionAndOpcode versionAndOpcode ) throws IOException { } }
// header BlockChecksumHeader blockChecksumHeader = new BlockChecksumHeader ( versionAndOpcode ) ; blockChecksumHeader . readFields ( in ) ; final int namespaceId = blockChecksumHeader . getNamespaceId ( ) ; final Block block = new Block ( blockChecksumHeader . getBlockId ( ) , 0 , blockChecksumHeader . getGenStamp ( ) ) ; DataOutputStream out = null ; InputStream rawStreamIn = null ; DataInputStream streamIn = null ; ReplicaToRead ri = datanode . data . getReplicaToRead ( namespaceId , block ) ; if ( ri == null ) { throw new IOException ( "Unknown block" ) ; } updateCurrentThreadName ( "getting checksum for block " + block ) ; try { int bytesPerCRC ; int checksumSize ; long crcPerBlock ; MD5Hash md5 ; if ( ! ri . isInlineChecksum ( ) ) { rawStreamIn = BlockWithChecksumFileReader . getMetaDataInputStream ( datanode . data , namespaceId , block ) ; streamIn = new DataInputStream ( new BufferedInputStream ( rawStreamIn , BUFFER_SIZE ) ) ; final BlockMetadataHeader header = BlockMetadataHeader . readHeader ( streamIn ) ; final DataChecksum checksum = header . getChecksum ( ) ; bytesPerCRC = checksum . getBytesPerChecksum ( ) ; checksumSize = checksum . getChecksumSize ( ) ; crcPerBlock = ( ( ( BlockWithChecksumFileReader . MetaDataInputStream ) rawStreamIn ) . getLength ( ) - BlockMetadataHeader . getHeaderSize ( ) ) / checksumSize ; // compute block checksum md5 = MD5Hash . digest ( streamIn ) ; } else { bytesPerCRC = ri . getBytesPerChecksum ( ) ; checksumSize = DataChecksum . getChecksumSizeByType ( ri . getChecksumType ( ) ) ; ReplicaToRead replica = datanode . data . getReplicaToRead ( namespaceId , block ) ; rawStreamIn = replica . getBlockInputStream ( datanode , 0 ) ; streamIn = new DataInputStream ( new BufferedInputStream ( rawStreamIn , BUFFER_SIZE ) ) ; long lengthLeft = ( ( FileInputStream ) rawStreamIn ) . getChannel ( ) . size ( ) - BlockInlineChecksumReader . getHeaderSize ( ) ; if ( lengthLeft == 0 ) { crcPerBlock = 0 ; md5 = MD5Hash . digest ( new byte [ 0 ] ) ; } else { crcPerBlock = ( lengthLeft - 1 ) / ( checksumSize + bytesPerCRC ) + 1 ; MessageDigest digester = MD5Hash . getDigester ( ) ; byte [ ] buffer = new byte [ checksumSize ] ; while ( lengthLeft > 0 ) { if ( lengthLeft >= bytesPerCRC + checksumSize ) { streamIn . skip ( bytesPerCRC ) ; IOUtils . readFully ( streamIn , buffer , 0 , buffer . length ) ; lengthLeft -= bytesPerCRC + checksumSize ; } else if ( lengthLeft > checksumSize ) { streamIn . skip ( lengthLeft - checksumSize ) ; IOUtils . readFully ( streamIn , buffer , 0 , buffer . length ) ; lengthLeft = 0 ; } else { out = new DataOutputStream ( NetUtils . getOutputStream ( s , datanode . socketWriteTimeout ) ) ; out . writeShort ( DataTransferProtocol . OP_STATUS_ERROR ) ; out . flush ( ) ; // report to name node the corruption . DataBlockScanner . reportBadBlocks ( block , namespaceId , datanode ) ; throw new IOException ( "File for namespace " + namespaceId + " block " + block + " seems to be corrupted" ) ; } digester . update ( buffer ) ; } md5 = new MD5Hash ( digester . digest ( ) , checksumSize * crcPerBlock ) ; } } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "block=" + block + ", bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock + ", md5=" + md5 ) ; } // write reply out = new DataOutputStream ( NetUtils . getOutputStream ( s , datanode . socketWriteTimeout ) ) ; out . writeShort ( DataTransferProtocol . OP_STATUS_SUCCESS ) ; out . writeInt ( bytesPerCRC ) ; out . writeLong ( crcPerBlock ) ; md5 . write ( out ) ; out . flush ( ) ; } finally { IOUtils . closeStream ( out ) ; if ( streamIn != null ) { IOUtils . closeStream ( streamIn ) ; } if ( rawStreamIn != null ) { IOUtils . closeStream ( rawStreamIn ) ; } }
public class Marker { /** * Defines a text for this marker . This text can be * used as a description and will be used in tooltips . * @ param TEXT */ public void setText ( final String TEXT ) { } }
if ( null == text ) { _text = TEXT ; } else { text . set ( TEXT ) ; } fireMarkerEvent ( TEXT_CHANGED_EVENT ) ;
public class Permute { /** * Returns a list containing the current permutation . * @ param storage Optional storage . If null a new list will be declared . * @ return Current permutation */ public List < T > getPermutation ( List < T > storage ) { } }
if ( storage == null ) storage = new ArrayList < T > ( ) ; else storage . clear ( ) ; for ( int i = 0 ; i < list . size ( ) ; i ++ ) { storage . add ( get ( i ) ) ; } return storage ;
public class WebSocketClient { /** * Creates a new connection builder that can be used to create a web socket connection . * @ param worker The XnioWorker to use for the connection * @ param bufferPool The buffer pool * @ param uri The connection URI * @ return The connection builder */ public static ConnectionBuilder connectionBuilder ( XnioWorker worker , ByteBufferPool bufferPool , URI uri ) { } }
return new ConnectionBuilder ( worker , bufferPool , uri ) ;
public class BasicAuthenticationHttpContext { /** * { @ inheritDoc } */ public boolean handleSecurity ( HttpServletRequest request , HttpServletResponse response ) throws IOException { } }
if ( ! authenticator . authenticate ( request ) ) { response . setHeader ( "WWW-Authenticate" , HttpServletRequest . BASIC_AUTH + " realm=\"" + realm + "\"" ) ; response . sendError ( HttpServletResponse . SC_UNAUTHORIZED ) ; return false ; } else { return true ; }
public class PDTHelper { /** * Check if the two birthdays are equal . Equal birthdays are identified by * equal months and equal days . * @ param aDate1 * First date . May be < code > null < / code > . * @ param aDate2 * Second date . May be < code > null < / code > . * @ return < code > true < / code > if month and day are equal */ public static boolean birthdayEquals ( @ Nullable final LocalDate aDate1 , @ Nullable final LocalDate aDate2 ) { } }
return birthdayCompare ( aDate1 , aDate2 ) == 0 ;
public class Strings2 { /** * Returns a markup id that is JQuery - safe and could be used as a selector . * @ param component the component which markup id should be return * @ return the component ' s markup id that is escaped so that it could be used as JQuery selector */ public static CharSequence getMarkupId ( final Component component ) { } }
Args . notNull ( component , "component" ) ; String markupId = component . getMarkupId ( true ) ; return escapeMarkupId ( markupId ) ;
public class NetworkInterface { /** * Any tags assigned to the network interface . * @ return Any tags assigned to the network interface . */ public java . util . List < Tag > getTagSet ( ) { } }
if ( tagSet == null ) { tagSet = new com . amazonaws . internal . SdkInternalList < Tag > ( ) ; } return tagSet ;
public class Optimizer { /** * If a char sequence is preceded by a repeated any match , then replace with an * IndexOfMatcher . The index of operation seems to be optimized by the JDK and is * much faster . Example : { @ code " . * foo " = > indexOf ( " foo " ) } . */ static Matcher convertRepeatedAnyCharSeqToIndexOf ( Matcher matcher ) { } }
if ( matcher instanceof ZeroOrMoreMatcher ) { ZeroOrMoreMatcher zm1 = matcher . as ( ) ; Matcher prefix = PatternUtils . getPrefix ( zm1 . next ( ) ) ; if ( zm1 . repeated ( ) instanceof AnyMatcher && prefix instanceof CharSeqMatcher ) { String pattern = prefix . < CharSeqMatcher > as ( ) . pattern ( ) ; Matcher suffix = PatternUtils . getSuffix ( zm1 . next ( ) ) ; return new IndexOfMatcher ( pattern , suffix ) ; } } return matcher ;
public class AbstractDisplayer { /** * REFRESH TIMER */ @ Override public void setRefreshOn ( boolean enabled ) { } }
boolean changed = enabled != refreshEnabled ; refreshEnabled = enabled ; if ( changed ) { updateRefreshTimer ( ) ; }
public class BaseClient { /** * Initialise ComapiImpl client instance . * @ param application Application context . * @ param instance Client instance . * @ param adapter Observables to callbacks adapter . * @ return Observable returning client instance . */ < E extends BaseClient > Observable < E > initialise ( @ NonNull final Application application , @ NonNull final E instance , @ NonNull final CallbackAdapter adapter ) { } }
if ( state . compareAndSet ( GlobalState . NOT_INITIALISED , GlobalState . INITIALISING ) ) { return init ( application , adapter ) . concatMap ( new Func1 < Boolean , Observable < SessionData > > ( ) { @ Override public Observable < SessionData > call ( Boolean state ) { return loadSession ( state ) ; } } ) . doOnNext ( session -> log . d ( session != null ? "Comapi initialised with session profile id : " + session . getProfileId ( ) : "Comapi initialisation with no session." ) ) . doOnError ( e -> { if ( log != null ) { log . f ( "Error initialising ComapiImpl SDK. " + e . getMessage ( ) , new ComapiException ( "Error initialising ComapiImpl SDK." , e ) ) ; } } ) . flatMap ( session -> { if ( state . get ( ) == GlobalState . SESSION_ACTIVE && config . isFcmEnabled ( ) ) { return instance . service . updatePushToken ( ) . doOnNext ( sessionComapiResultPair -> log . d ( "Push token updated" ) ) . doOnError ( throwable -> log . f ( "Error updating push token" , throwable ) ) . map ( ( Func1 < Pair < SessionData , ComapiResult < Void > > , Object > ) resultPair -> resultPair . first ) . onErrorReturn ( new Func1 < Throwable , SessionData > ( ) { @ Override public SessionData call ( Throwable throwable ) { return session ; } } ) ; } return Observable . fromCallable ( ( ) -> session ) ; } ) . map ( result -> instance ) ; } else if ( state . get ( ) >= GlobalState . INITIALISED ) { return Observable . fromCallable ( ( ) -> instance ) ; } else { return Observable . error ( new ComapiException ( "Initialise in progress. Shouldn't be called twice. Ignoring." ) ) ; }
public class TemplatesApi { /** * Gets a page image from a template for display . * Retrieves a page image for display from the specified template . * @ param accountId The external account number ( int ) or account ID Guid . ( required ) * @ param templateId The ID of the template being accessed . ( required ) * @ param documentId The ID of the document being accessed . ( required ) * @ param pageNumber The page number being accessed . ( required ) * @ return byte [ ] */ public byte [ ] getDocumentPageImage ( String accountId , String templateId , String documentId , String pageNumber ) throws ApiException { } }
return getDocumentPageImage ( accountId , templateId , documentId , pageNumber , null ) ;
public class ListGroupsResult { /** * A list of GroupIdentifier objects . Each identifier is an object that contains both the GroupName and the * GroupArn . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setGroupIdentifiers ( java . util . Collection ) } or { @ link # withGroupIdentifiers ( java . util . Collection ) } if you * want to override the existing values . * @ param groupIdentifiers * A list of GroupIdentifier objects . Each identifier is an object that contains both the GroupName and the * GroupArn . * @ return Returns a reference to this object so that method calls can be chained together . */ public ListGroupsResult withGroupIdentifiers ( GroupIdentifier ... groupIdentifiers ) { } }
if ( this . groupIdentifiers == null ) { setGroupIdentifiers ( new java . util . ArrayList < GroupIdentifier > ( groupIdentifiers . length ) ) ; } for ( GroupIdentifier ele : groupIdentifiers ) { this . groupIdentifiers . add ( ele ) ; } return this ;
public class CPAttachmentFileEntryLocalServiceBaseImpl { /** * Deletes the cp attachment file entry from the database . Also notifies the appropriate model listeners . * @ param cpAttachmentFileEntry the cp attachment file entry * @ return the cp attachment file entry that was removed * @ throws PortalException */ @ Indexable ( type = IndexableType . DELETE ) @ Override public CPAttachmentFileEntry deleteCPAttachmentFileEntry ( CPAttachmentFileEntry cpAttachmentFileEntry ) throws PortalException { } }
return cpAttachmentFileEntryPersistence . remove ( cpAttachmentFileEntry ) ;
public class lbmonitor { /** * Use this API to unset the properties of lbmonitor resource . * Properties that need to be unset are specified in args array . */ public static base_response unset ( nitro_service client , lbmonitor resource , String [ ] args ) throws Exception { } }
lbmonitor unsetresource = new lbmonitor ( ) ; unsetresource . monitorname = resource . monitorname ; unsetresource . type = resource . type ; unsetresource . ipaddress = resource . ipaddress ; return unsetresource . unset_resource ( client , args ) ;
public class ForceConfig { /** * Create the force data from setup . * @ param configurer The configurer reference ( must not be < code > null < / code > ) . * @ return The force data . * @ throws LionEngineException If unable to read node . */ public static Force imports ( Configurer configurer ) { } }
Check . notNull ( configurer ) ; return imports ( configurer . getRoot ( ) ) ;