signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class OsUtils { /** * Executes the given command , redirecting stdout to the given file * and stderr to the given stream * @ param command array of commands to run * @ param file file to write stdout * @ param stderr output stream for stderr * @ return error code from system * @ throws java . io . IOException if problems * @ throws java . lang . InterruptedException if interrupted */ public static int executeToFile ( final String [ ] command , final File file , final OutputStream stderr ) throws IOException , InterruptedException { } }
return executeToStreams ( command , new FileOutputStream ( file ) , stderr ) ;
public class RedirectRule { /** * Sets the redirect path . * @ param path the redirect path */ public void setPath ( String path ) { } }
this . path = path ; List < Token > tokens = Tokenizer . tokenize ( path , true ) ; int tokenCount = 0 ; for ( Token t : tokens ) { if ( t . getType ( ) != TokenType . TEXT ) { tokenCount ++ ; } } if ( tokenCount > 0 ) { this . pathTokens = tokens . toArray ( new Token [ 0 ] ) ; } else { this . pathTokens = null ; }
public class Closure { /** * Support for Closure currying at a given index . * Parameters are supplied from index position " n " . * Typical usage : * < pre > * def caseInsensitive = { a , b { @ code - > } a . toLowerCase ( ) { @ code < = > } b . toLowerCase ( ) } as Comparator * def caseSensitive = { a , b { @ code - > } a { @ code < = > } b } as Comparator * def animals1 = [ ' ant ' , ' dog ' , ' BEE ' ] * def animals2 = animals1 + [ ' Cat ' ] * / / curry middle param of this utility method : * / / Collections # binarySearch ( List list , Object key , Comparator c ) * { @ code def catSearcher = Collections . & binarySearch . ncurry ( 1 , " cat " ) } * [ [ animals1 , animals2 ] , [ caseInsensitive , caseSensitive ] ] . combinations ( ) . each { a , c { @ code - > } * def idx = catSearcher ( a . sort ( c ) , c ) * print a . sort ( c ) . toString ( ) . padRight ( 22) * { @ code if ( idx < 0 ) println " Not found but would belong in position $ { - idx - 1 } " } * else println " Found at index $ idx " * / / { @ code = > } * / / [ ant , BEE , dog ] Not found but would belong in position 2 * / / [ ant , BEE , Cat , dog ] Found at index 2 * / / [ BEE , ant , dog ] Not found but would belong in position 2 * / / [ BEE , Cat , ant , dog ] Not found but would belong in position 3 * < / pre > * The position of the curried parameters will be calculated eagerly * and implies all arguments prior to the specified n index are supplied . * Default parameter values prior to the n index will not be available . * @ param n the index from which to bind parameters ( may be - ve in which case it will be normalized ) * @ param arguments the arguments to bind * @ return the new closure with its arguments bound * @ see # curry ( Object . . . ) */ public Closure < V > ncurry ( int n , final Object ... arguments ) { } }
return new CurriedClosure < V > ( n , this , arguments ) ;
public class AnalysisScreen { /** * Just before the view prints out the screen . * This is a good time to adjust the variables or screen fields before printing . * ( Called from the view in the printScreen method ) . */ public void prePrintReport ( ) { } }
// First step - Get the source and analysis files . Record recBasis = this . getBasisRecord ( ) ; // Record to analyze Record recSummary = this . getSummaryRecord ( ) ; // Record to summarize the data into if ( recSummary == null ) recSummary = this . makeDefaultAnalysisRecord ( recBasis ) ; if ( recSummary . getCounterField ( ) == recSummary . getKeyArea ( ) . getField ( 0 ) ) if ( recSummary . getKeyAreaCount ( ) > 1 ) recSummary . setKeyArea ( DBConstants . MAIN_KEY_FIELD + 1 ) ; // Step 2 - Get the field mappings ( keys and summary fields ) . BaseField [ ] [ ] mxKeyFields = this . getKeyMap ( recSummary , recBasis ) ; BaseField [ ] [ ] mxDataFields = this . getDataMap ( recSummary , recBasis ) ; // Step 3 - Read through the source file and update the summary file . try { while ( recBasis . hasNext ( ) ) { recBasis . next ( ) ; this . addSummary ( recSummary , mxKeyFields , mxDataFields ) ; } } catch ( DBException ex ) { ex . printStackTrace ( ) ; }
public class A_CmsImport { /** * Writes already imported access control entries for a given resource . < p > * @ param resource the resource assigned to the access control entries * @ param aceList the access control entries to create */ protected void importAccessControlEntries ( CmsResource resource , List < CmsAccessControlEntry > aceList ) { } }
if ( aceList . size ( ) == 0 ) { // no ACE in the list return ; } try { m_cms . importAccessControlEntries ( resource , aceList ) ; } catch ( CmsException exc ) { m_report . println ( Messages . get ( ) . container ( Messages . RPT_IMPORT_ACL_DATA_FAILED_0 ) , I_CmsReport . FORMAT_WARNING ) ; }
public class FormMappingOption { public OptionalThing < Function < Map < String , Object > , Map < String , Object > > > getRequestParameterMapFilter ( ) { } }
return requestParameterMapFilter ;
public class WSManagedConnectionFactoryImpl { /** * Get the log writer for this ManagedConnectionFactory instance . * The log writer is a character output stream to which all logging and tracing * messages for this ManagedConnectionFactory instance will be printed * ApplicationServer manages the association of output stream with the * ManagedConnectionFactory . When a ManagedConnectionFactory object is created the * log writer is initially null , in other words , logging is disabled . * @ return a PrintWriter * @ exception ResourceException - Possible causes for this exception are : * 1 ) getLogWriter on the dataSource failed */ public final PrintWriter getLogWriter ( ) throws ResourceException { } }
if ( dataSourceOrDriver == null ) { return logWriter ; } try { if ( ! Driver . class . equals ( type ) ) { return ( ( CommonDataSource ) dataSourceOrDriver ) . getLogWriter ( ) ; } // Return null for Driver since that is the default value which can ' t be modified return null ; } catch ( SQLException se ) { FFDCFilter . processException ( se , getClass ( ) . getName ( ) , "1656" , this ) ; throw AdapterUtil . translateSQLException ( se , this , false , getClass ( ) ) ; }
public class DateFormat { /** * Returns the current value for the specified BooleanAttribute for this instance * if attribute is missing false is returned . * @ see BooleanAttribute */ public boolean getBooleanAttribute ( BooleanAttribute key ) { } }
if ( key == DateFormat . BooleanAttribute . PARSE_PARTIAL_MATCH ) { key = DateFormat . BooleanAttribute . PARSE_PARTIAL_LITERAL_MATCH ; } return booleanAttributes . contains ( key ) ;
public class DateUtil { /** * Creates a label of the form " 9:15am " . TODO : support 24 hour time for people who go for that * sort of thing . If date is null the empty string is returned . */ public static String formatTime ( Date date ) { } }
return ( date == null ) ? "" : _tfmt . format ( date ) . toLowerCase ( ) ;
public class JobsInner { /** * List all directories and files inside the given directory of the output directory ( Only if the output directory is on Azure File Share or Azure Storage container ) . * @ param nextPageLink The NextLink from the previous successful call to List operation . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the PagedList & lt ; FileInner & gt ; object if successful . */ public PagedList < FileInner > listOutputFilesNext ( final String nextPageLink ) { } }
ServiceResponse < Page < FileInner > > response = listOutputFilesNextSinglePageAsync ( nextPageLink ) . toBlocking ( ) . single ( ) ; return new PagedList < FileInner > ( response . body ( ) ) { @ Override public Page < FileInner > nextPage ( String nextPageLink ) { return listOutputFilesNextSinglePageAsync ( nextPageLink ) . toBlocking ( ) . single ( ) . body ( ) ; } } ;
public class AdGroupEstimateRequest { /** * Gets the keywordEstimateRequests value for this AdGroupEstimateRequest . * @ return keywordEstimateRequests * The keywords to estimate . * < span class = " constraint ContentsDistinct " > This * field must contain distinct elements . < / span > * < span class = " constraint ContentsNotNull " > This * field must not contain { @ code null } elements . < / span > * < span class = " constraint Required " > This field is * required and should not be { @ code null } . < / span > */ public com . google . api . ads . adwords . axis . v201809 . o . KeywordEstimateRequest [ ] getKeywordEstimateRequests ( ) { } }
return keywordEstimateRequests ;
public class CmsToolBar { /** * Updates the app indicator site and project info . < p > */ public void updateAppIndicator ( ) { } }
if ( CmsAppWorkplaceUi . isOnlineProject ( ) ) { m_appIndicator . addStyleName ( OpenCmsTheme . TOOLABER_APP_INDICATOR_ONLINE ) ; } else { m_appIndicator . removeStyleName ( OpenCmsTheme . TOOLABER_APP_INDICATOR_ONLINE ) ; } CmsObject cms = A_CmsUI . getCmsObject ( ) ; String siteRoot = cms . getRequestContext ( ) . getSiteRoot ( ) ; CmsSite site = OpenCms . getSiteManager ( ) . getSiteForSiteRoot ( siteRoot ) ; String siteName = null ; if ( site != null ) { siteName = site . getTitle ( ) ; } else { try { CmsResource folder = cms . readResource ( "/" , CmsResourceFilter . ONLY_VISIBLE_NO_DELETED ) ; siteName = OpenCms . getSiteManager ( ) . getSiteTitle ( cms , folder ) ; } catch ( CmsException e ) { LOG . warn ( "Error reading site title." , e ) ; } } if ( CmsStringUtil . isEmptyOrWhitespaceOnly ( siteName ) ) { siteName = siteRoot ; } else { siteName = CmsWorkplace . substituteSiteTitleStatic ( siteName , UI . getCurrent ( ) . getLocale ( ) ) ; } m_appIndicator . setDescription ( CmsVaadinUtils . getMessageText ( Messages . GUI_TOOLBAR_PROJECT_SITE_INFO_2 , A_CmsUI . getCmsObject ( ) . getRequestContext ( ) . getCurrentProject ( ) . getName ( ) , siteName ) , ContentMode . HTML ) ;
public class WalkingIterator { /** * Detaches the iterator from the set which it iterated over , releasing * any computational resources and placing the iterator in the INVALID * state . After < code > detach < / code > has been invoked , calls to * < code > nextNode < / code > or < code > previousNode < / code > will raise the * exception INVALID _ STATE _ ERR . */ public void detach ( ) { } }
if ( m_allowDetach ) { AxesWalker walker = m_firstWalker ; while ( null != walker ) { walker . detach ( ) ; walker = walker . getNextWalker ( ) ; } m_lastUsedWalker = null ; // Always call the superclass detach last ! super . detach ( ) ; }
public class HuobiAdapters { /** * Huobi currently doesn ' t have trade history API . We simulate it by using the orders history . * @ param order * @ return */ private static UserTrade adaptTrade ( LimitOrder order ) { } }
BigDecimal feeAmount = order . getCumulativeAmount ( ) . multiply ( order . getLimitPrice ( ) ) . multiply ( fee ) . setScale ( 8 , RoundingMode . DOWN ) ; return new UserTrade ( order . getType ( ) , order . getCumulativeAmount ( ) , order . getCurrencyPair ( ) , order . getLimitPrice ( ) , order . getTimestamp ( ) , null , // Trade id order . getId ( ) , // Original order id feeAmount , order . getCurrencyPair ( ) . counter ) ;
public class AmazonGameLiftClient { /** * Updates the current run - time configuration for the specified fleet , which tells Amazon GameLift how to launch * server processes on instances in the fleet . You can update a fleet ' s run - time configuration at any time after the * fleet is created ; it does not need to be in an < code > ACTIVE < / code > status . * To update run - time configuration , specify the fleet ID and provide a < code > RuntimeConfiguration < / code > object * with an updated set of server process configurations . * Each instance in a Amazon GameLift fleet checks regularly for an updated run - time configuration and changes how * it launches server processes to comply with the latest version . Existing server processes are not affected by the * update ; run - time configuration changes are applied gradually as existing processes shut down and new processes * are launched during Amazon GameLift ' s normal process recycling activity . * < b > Learn more < / b > * < a href = " https : / / docs . aws . amazon . com / gamelift / latest / developerguide / fleets - intro . html " > Working with Fleets < / a > . * < b > Related operations < / b > * < ul > * < li > * < a > CreateFleet < / a > * < / li > * < li > * < a > ListFleets < / a > * < / li > * < li > * < a > DeleteFleet < / a > * < / li > * < li > * Describe fleets : * < ul > * < li > * < a > DescribeFleetAttributes < / a > * < / li > * < li > * < a > DescribeFleetCapacity < / a > * < / li > * < li > * < a > DescribeFleetPortSettings < / a > * < / li > * < li > * < a > DescribeFleetUtilization < / a > * < / li > * < li > * < a > DescribeRuntimeConfiguration < / a > * < / li > * < li > * < a > DescribeEC2InstanceLimits < / a > * < / li > * < li > * < a > DescribeFleetEvents < / a > * < / li > * < / ul > * < / li > * < li > * Update fleets : * < ul > * < li > * < a > UpdateFleetAttributes < / a > * < / li > * < li > * < a > UpdateFleetCapacity < / a > * < / li > * < li > * < a > UpdateFleetPortSettings < / a > * < / li > * < li > * < a > UpdateRuntimeConfiguration < / a > * < / li > * < / ul > * < / li > * < li > * Manage fleet actions : * < ul > * < li > * < a > StartFleetActions < / a > * < / li > * < li > * < a > StopFleetActions < / a > * < / li > * < / ul > * < / li > * < / ul > * @ param updateRuntimeConfigurationRequest * Represents the input for a request action . * @ return Result of the UpdateRuntimeConfiguration operation returned by the service . * @ throws UnauthorizedException * The client failed authentication . Clients should not retry such requests . * @ throws NotFoundException * A service resource associated with the request could not be found . Clients should not retry such * requests . * @ throws InternalServiceException * The service encountered an unrecoverable internal failure while processing the request . Clients can retry * such requests immediately or after a waiting period . * @ throws InvalidRequestException * One or more parameter values in the request are invalid . Correct the invalid parameter values before * retrying . * @ throws InvalidFleetStatusException * The requested operation would cause a conflict with the current state of a resource associated with the * request and / or the fleet . Resolve the conflict before retrying . * @ sample AmazonGameLift . UpdateRuntimeConfiguration * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / gamelift - 2015-10-01 / UpdateRuntimeConfiguration " * target = " _ top " > AWS API Documentation < / a > */ @ Override public UpdateRuntimeConfigurationResult updateRuntimeConfiguration ( UpdateRuntimeConfigurationRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeUpdateRuntimeConfiguration ( request ) ;
public class ActivityServiceLocator { /** * For the given interface , get the stub implementation . * If this service has no port for the given interface , * then ServiceException is thrown . */ public java . rmi . Remote getPort ( Class serviceEndpointInterface ) throws javax . xml . rpc . ServiceException { } }
try { if ( com . google . api . ads . admanager . axis . v201805 . ActivityServiceInterface . class . isAssignableFrom ( serviceEndpointInterface ) ) { com . google . api . ads . admanager . axis . v201805 . ActivityServiceSoapBindingStub _stub = new com . google . api . ads . admanager . axis . v201805 . ActivityServiceSoapBindingStub ( new java . net . URL ( ActivityServiceInterfacePort_address ) , this ) ; _stub . setPortName ( getActivityServiceInterfacePortWSDDServiceName ( ) ) ; return _stub ; } } catch ( java . lang . Throwable t ) { throw new javax . xml . rpc . ServiceException ( t ) ; } throw new javax . xml . rpc . ServiceException ( "There is no stub implementation for the interface: " + ( serviceEndpointInterface == null ? "null" : serviceEndpointInterface . getName ( ) ) ) ;
public class ListAccountAliasesResult { /** * A list of aliases associated with the account . AWS supports only one alias per account . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setAccountAliases ( java . util . Collection ) } or { @ link # withAccountAliases ( java . util . Collection ) } if you want * to override the existing values . * @ param accountAliases * A list of aliases associated with the account . AWS supports only one alias per account . * @ return Returns a reference to this object so that method calls can be chained together . */ public ListAccountAliasesResult withAccountAliases ( String ... accountAliases ) { } }
if ( this . accountAliases == null ) { setAccountAliases ( new com . amazonaws . internal . SdkInternalList < String > ( accountAliases . length ) ) ; } for ( String ele : accountAliases ) { this . accountAliases . add ( ele ) ; } return this ;
public class DescribeRecordResult { /** * Information about the product created as the result of a request . For example , the output for a * CloudFormation - backed product that creates an S3 bucket would include the S3 bucket URL . * @ param recordOutputs * Information about the product created as the result of a request . For example , the output for a * CloudFormation - backed product that creates an S3 bucket would include the S3 bucket URL . */ public void setRecordOutputs ( java . util . Collection < RecordOutput > recordOutputs ) { } }
if ( recordOutputs == null ) { this . recordOutputs = null ; return ; } this . recordOutputs = new java . util . ArrayList < RecordOutput > ( recordOutputs ) ;
public class AbstractQueryBuilderFactory { /** * add parser after * @ param parser * @ param afterParser */ public void addRuleParserAfter ( IRuleParser parser , Class < ? extends IRuleParser > afterParser ) { } }
int index = getIndexOfClass ( ruleParsers , afterParser ) ; if ( index == - 1 ) { throw new ParserAddException ( "parser " + afterParser . getSimpleName ( ) + " has not been added" ) ; } ruleParsers . add ( index + 1 , parser ) ;
public class Log { /** * Simple method for logging a single information exception . */ public void info ( Throwable t ) { } }
if ( isEnabled ( ) && isInfoEnabled ( ) ) { dispatchLogException ( new LogEvent ( this , LogEvent . INFO_TYPE , t ) ) ; }
public class AbstractJacksonContext { /** * / * ( non - Javadoc ) * @ see com . abubusoft . kripton . AbstractContext # createSerializer ( java . io . OutputStream , com . fasterxml . jackson . core . JsonEncoding ) */ @ Override public JacksonWrapperSerializer createSerializer ( OutputStream out , JsonEncoding encoding ) { } }
try { JsonGenerator generator = innerFactory . createGenerator ( out , encoding ) ; // generator . setPrettyPrinter ( new MinimalPrettyPrinter ( ) ) ; return new JacksonWrapperSerializer ( generator , getSupportedFormat ( ) ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; throw new KriptonRuntimeException ( e ) ; }
public class POSTagger { /** * 设置词典 , 参数指定是否同时设置分词词典 * @ param dict 词典 */ public void setDictionary ( Dictionary dict , boolean isSetSegDict ) { } }
removeDictionary ( isSetSegDict ) ; if ( cws != null && isSetSegDict ) cws . setDictionary ( dict ) ; dictPipe = null ; dictPipe = new DictPOSLabel ( dict , labels ) ; oldfeaturePipe = featurePipe ; featurePipe = new SeriesPipes ( new Pipe [ ] { dictPipe , featurePipe } ) ; LinearViterbi dv = new ConstraintViterbi ( ( LinearViterbi ) getClassifier ( ) . getInferencer ( ) , labels . size ( ) ) ; getClassifier ( ) . setInferencer ( dv ) ;
public class Util { /** * Cancels then closes a { @ link PreparedStatement } and logs exceptions * without throwing . Does nothing if ps is null . * @ param ps */ static void closeQuietly ( PreparedStatement ps ) { } }
try { boolean isClosed ; try { if ( ps != null ) isClosed = ps . isClosed ( ) ; else isClosed = true ; } catch ( SQLException e ) { log . debug ( e . getMessage ( ) ) ; isClosed = true ; } if ( ps != null && ! isClosed ) { try { ps . cancel ( ) ; log . debug ( "cancelled {}" , ps ) ; } catch ( SQLException e ) { log . debug ( e . getMessage ( ) ) ; } ps . close ( ) ; log . debug ( "closed {}" , ps ) ; } } catch ( SQLException e ) { log . debug ( e . getMessage ( ) , e ) ; } catch ( RuntimeException e ) { log . debug ( e . getMessage ( ) , e ) ; }
public class PresenceNotifySender { /** * This method creates a NOTIFY message using the given parameters and sends it to the subscriber . * The request will be resent if challenged . Use this method only if you have previously called * processSubscribe ( ) . Use this method if you don ' t care about checking the response to the sent * NOTIFY , otherwise use sendStatefulNotify ( ) . * @ param subscriptionState - String to use as the subscription state . * @ param termReason - used only when subscriptionState = TERMINATED . * @ param body - NOTIFY body to put in the message * @ param timeLeft - expiry in seconds to put in the NOTIFY message ( used only when * subscriptionState = ACTIVE or PENDING ) . * @ param viaProxy If true , send the message to the proxy . In this case a Route header will be * added . Else send the message as is . * @ return true if successful , false otherwise ( call getErrorMessage ( ) for details ) . */ public boolean sendNotify ( String subscriptionState , String termReason , String body , int timeLeft , boolean viaProxy ) { } }
return sendNotify ( subscriptionState , termReason , body , timeLeft , null , null , null , null , viaProxy ) ;
public class CamelEndpointDeployerService { /** * Exposes an HTTP endpoint that will be served by the given { @ link HttpHandler } under the given { @ link URI } ' s path . * @ param uri determines the path and protocol under which the HTTP endpoint should be exposed * @ param routingHandler an { @ link HttpHandler } to use for handling HTTP requests sent to the given * { @ link URI } ' s path */ public void deploy ( URI uri , final HttpHandler routingHandler ) { } }
final Set < Deployment > availableDeployments = hostSupplier . getValue ( ) . getDeployments ( ) ; if ( ! availableDeployments . stream ( ) . anyMatch ( deployment -> deployment . getHandler ( ) instanceof CamelEndpointDeployerHandler && ( ( CamelEndpointDeployerHandler ) deployment . getHandler ( ) ) . getRoutingHandler ( ) == routingHandler ) ) { /* deploy only if the routing handler is not there already */ doDeploy ( uri , servletInstance -> servletInstance . setEndpointHttpHandler ( new DelegatingEndpointHttpHandler ( routingHandler ) ) , // plug the endpointHttpHandler into the servlet deploymentInfo -> deploymentInfo . addInnerHandlerChainWrapper ( exchangeStoringHandlerWrapper ) , // add the handler to the chain deployment -> { // wrap the initial handler with our custom class so that we can recognize it at other places final HttpHandler servletHandler = new CamelEndpointDeployerHandler ( deployment . getHandler ( ) , routingHandler ) ; deployment . setInitialHandler ( servletHandler ) ; } ) ; }
public class TableCellBox { /** * Set the total width of the cell . The content width is computed automatically . * @ param width the width to be set */ public void setWidth ( int width ) { } }
content . width = width - border . left - padding . left - padding . right - border . right ; bounds . width = width ; wset = true ; updateChildSizes ( ) ;
public class ExceptionUtils { /** * Converts MjdbcException into MjdbcSQLException . * Useful in cases when internal logic thrown MjdbcException during processing of Query output . * This allows for user to catch only SQLException instead of SQLException and MjdbcException * @ param cause original MjdbcException which would be converted * @ throws MjdbcSQLException */ public static void rethrow ( MjdbcException cause ) throws MjdbcSQLException { } }
MjdbcSQLException ex = new MjdbcSQLException ( cause . getMessage ( ) ) ; ex . setStackTrace ( cause . getStackTrace ( ) ) ; throw ex ;
public class StringUtil { /** * FOO _ BAR _ BAZ → fooBarBaz */ public static String camelize ( String targetStr ) { } }
if ( targetStr == null ) { return null ; } Pattern p = Pattern . compile ( "_([a-z])" ) ; Matcher m = p . matcher ( targetStr . toLowerCase ( ) ) ; StringBuffer sb = new StringBuffer ( targetStr . length ( ) ) ; while ( m . find ( ) ) { m . appendReplacement ( sb , m . group ( 1 ) . toUpperCase ( ) ) ; } m . appendTail ( sb ) ; return sb . toString ( ) ;
public class BindDataSourceSubProcessor { /** * Analyze second round . * @ param annotations * the annotations * @ param roundEnv * the round env * @ return true , if successful */ public boolean analyzeSecondRound ( Set < ? extends TypeElement > annotations , RoundEnvironment roundEnv ) { } }
parseBindType ( roundEnv ) ; // Put all @ BindTable elements in beanElements for ( Element item : roundEnv . getElementsAnnotatedWith ( BindSqlType . class ) ) { if ( item . getKind ( ) != ElementKind . CLASS ) { String msg = String . format ( "%s %s, only class can be annotated with @%s annotation" , item . getKind ( ) , item , BindSqlType . class . getSimpleName ( ) ) ; throw ( new InvalidKindForAnnotationException ( msg ) ) ; } globalBeanElements . put ( item . toString ( ) , ( TypeElement ) item ) ; } // generate dao Set < ? extends Element > generatedDaos = roundEnv . getElementsAnnotatedWith ( BindGeneratedDao . class ) ; for ( Element item : generatedDaos ) { String keyToReplace = AnnotationUtility . extractAsClassName ( item , BindGeneratedDao . class , AnnotationAttributeType . DAO ) ; globalDaoElements . put ( keyToReplace , ( TypeElement ) item ) ; globalDaoGenerated . add ( keyToReplace ) ; } return false ;
public class DictionaryMatcher { /** * Gets the substitutions for the password . * @ param password the password to get leet substitutions for . * @ param unleet _ password the password to get leet substitutions for . * @ return a { @ code List } of { @ code Character [ ] } that are the leet substitutions for the password . */ private static List < Character [ ] > getLeetSub ( final String password , final String unleet_password ) { } }
List < Character [ ] > leet_subs = new ArrayList < > ( ) ; for ( int i = 0 ; i < unleet_password . length ( ) ; i ++ ) { if ( password . charAt ( i ) != unleet_password . charAt ( i ) ) { leet_subs . add ( new Character [ ] { password . charAt ( i ) , unleet_password . charAt ( i ) } ) ; } } return leet_subs ;
public class ExamplePointFeatureTracker { /** * Draw tracked features in blue , or red if they were just spawned . */ private void updateGUI ( SimpleImageSequence < T > sequence ) { } }
BufferedImage orig = sequence . getGuiImage ( ) ; Graphics2D g2 = orig . createGraphics ( ) ; // draw tracks with semi - unique colors so you can track individual points with your eyes for ( PointTrack p : tracker . getActiveTracks ( null ) ) { int red = ( int ) ( 2.5 * ( p . featureId % 100 ) ) ; int green = ( int ) ( ( 255.0 / 150.0 ) * ( p . featureId % 150 ) ) ; int blue = ( int ) ( p . featureId % 255 ) ; VisualizeFeatures . drawPoint ( g2 , ( int ) p . x , ( int ) p . y , new Color ( red , green , blue ) ) ; } // draw tracks which have just been spawned green for ( PointTrack p : tracker . getNewTracks ( null ) ) { VisualizeFeatures . drawPoint ( g2 , ( int ) p . x , ( int ) p . y , Color . green ) ; } // tell the GUI to update gui . setImage ( orig ) ; gui . repaint ( ) ;
public class ConsumerLogMessages { /** * Logs an error . * @ param logger * reference to the logger * @ param e * reference to the error */ public static void logError ( final Logger logger , final Error e ) { } }
logger . logError ( Level . ERROR , "Unexpected Error" , e ) ;
public class TableSession { /** * Move the current position and read the record ( optionally read several records ) . * @ param iRelPosition relative Position to read the next record . * @ param iRecordCount Records to read . * @ return If I read 1 record , this is the record ' s data . * @ return If I read several records , this is a vector of the returned records . * @ return If at EOF , or error , returns the error code as a Integer . * @ exception DBException File exception . * @ exception RemoteException RMI exception . */ public Object doMoveOne ( int iRelPosition ) throws DBException , RemoteException { } }
try { synchronized ( this . getTask ( ) ) { FieldList record = this . getMainRecord ( ) . move ( iRelPosition ) ; int iRecordStatus = DBConstants . RECORD_NORMAL ; if ( record == null ) { if ( iRelPosition >= 0 ) iRecordStatus = DBConstants . RECORD_AT_EOF ; else iRecordStatus = DBConstants . RECORD_AT_BOF ; } if ( iRecordStatus == DBConstants . NORMAL_RETURN ) { Record recordBase = this . getMainRecord ( ) ; int iFieldTypes = this . getFieldTypes ( recordBase ) ; BaseBuffer buffer = new VectorBuffer ( null , iFieldTypes ) ; if ( ! ( recordBase instanceof QueryRecord ) ) { Record recordTarget = recordBase . getTable ( ) . getCurrentTable ( ) . getRecord ( ) ; if ( recordTarget != recordBase ) if ( ! recordTarget . getTableNames ( false ) . equalsIgnoreCase ( recordBase . getTableNames ( false ) ) ) { buffer . addHeader ( DBParams . RECORD ) ; // Since header count is not passed this specifies multitable buffer . addHeader ( recordTarget . getTableNames ( false ) ) ; } recordBase = recordTarget ; } buffer . fieldsToBuffer ( recordBase , iFieldTypes ) ; return buffer . getPhysicalData ( ) ; } else { return new Integer ( iRecordStatus ) ; } } } catch ( DBException ex ) { throw ex ; } catch ( Exception ex ) { ex . printStackTrace ( ) ; throw new DBException ( ex . getMessage ( ) ) ; }
public class BaseCommandTask { /** * { @ inheritDoc } */ @ Override public void execute ( ExecutionContext context ) { } }
try { if ( ! validateArguments ( context ) ) { return ; } if ( ! populateCommonOptions ( context ) ) { return ; } doExecute ( context ) ; } finally { cleanUp ( context ) ; }
public class UBL20ValidatorBuilder { /** * Create a new validation builder . * @ param aClass * The UBL class to be validated . May not be < code > null < / code > . * @ return The new validation builder . Never < code > null < / code > . * @ param < T > * The UBL 2.0 document implementation type */ @ Nonnull public static < T > UBL20ValidatorBuilder < T > create ( @ Nonnull final Class < T > aClass ) { } }
return new UBL20ValidatorBuilder < > ( aClass ) ;
public class HeronClient { /** * Handle the timeout for a particular REQID */ protected void handleTimeout ( REQID rid ) { } }
if ( contextMap . containsKey ( rid ) ) { Object ctx = contextMap . get ( rid ) ; contextMap . remove ( rid ) ; responseMessageMap . remove ( rid ) ; onResponse ( StatusCode . TIMEOUT_ERROR , ctx , null ) ; } else { // Since we dont do cancel timer , this is because we already have // the response . So just disregard this timeout // TODO : - implement cancel timer to avoid this overhead }
public class AppiumDriverLocalService { /** * Stops this service is it is currently running . This method will attempt to block until the * server has been fully shutdown . * @ see # start ( ) */ @ Override public void stop ( ) { } }
lock . lock ( ) ; try { if ( process != null ) { destroyProcess ( ) ; } process = null ; } finally { lock . unlock ( ) ; }
public class RowTypeInfo { /** * Creates a { @ link RowTypeInfo } with projected fields . * @ param rowType The original RowTypeInfo whose fields are projected * @ param fieldMapping The field mapping of the projection * @ return A RowTypeInfo with projected fields . */ public static RowTypeInfo projectFields ( RowTypeInfo rowType , int [ ] fieldMapping ) { } }
TypeInformation [ ] fieldTypes = new TypeInformation [ fieldMapping . length ] ; String [ ] fieldNames = new String [ fieldMapping . length ] ; for ( int i = 0 ; i < fieldMapping . length ; i ++ ) { fieldTypes [ i ] = rowType . getTypeAt ( fieldMapping [ i ] ) ; fieldNames [ i ] = rowType . getFieldNames ( ) [ fieldMapping [ i ] ] ; } return new RowTypeInfo ( fieldTypes , fieldNames ) ;
public class ClassGraph { /** * Print a class ' s relations */ public void printRelations ( ClassDoc c ) { } }
Options opt = optionProvider . getOptionsFor ( c ) ; if ( hidden ( c ) || c . name ( ) . equals ( "" ) ) // avoid phantom classes , they may pop up when the source uses annotations return ; // Print generalization ( through the Java superclass ) Type s = c . superclassType ( ) ; ClassDoc sc = s != null && ! s . qualifiedTypeName ( ) . equals ( Object . class . getName ( ) ) ? s . asClassDoc ( ) : null ; if ( sc != null && ! c . isEnum ( ) && ! hidden ( sc ) ) relation ( opt , RelationType . EXTENDS , c , sc , null , null , null ) ; // Print generalizations ( through @ extends tags ) for ( Tag tag : c . tags ( "extends" ) ) if ( ! hidden ( tag . text ( ) ) ) relation ( opt , RelationType . EXTENDS , c , c . findClass ( tag . text ( ) ) , null , null , null ) ; // Print realizations ( Java interfaces ) for ( Type iface : c . interfaceTypes ( ) ) { ClassDoc ic = iface . asClassDoc ( ) ; if ( ! hidden ( ic ) ) relation ( opt , RelationType . IMPLEMENTS , c , ic , null , null , null ) ; } // Print other associations allRelation ( opt , RelationType . COMPOSED , c ) ; allRelation ( opt , RelationType . NAVCOMPOSED , c ) ; allRelation ( opt , RelationType . HAS , c ) ; allRelation ( opt , RelationType . NAVHAS , c ) ; allRelation ( opt , RelationType . ASSOC , c ) ; allRelation ( opt , RelationType . NAVASSOC , c ) ; allRelation ( opt , RelationType . DEPEND , c ) ;
public class ObjectType { /** * Checks that the prototype is an implicit prototype of this object . Since each object has an * implicit prototype , an implicit prototype ' s implicit prototype is also this implicit * prototype ' s . * @ param prototype any prototype based object * @ return { @ code true } if { @ code prototype } is { @ code equal } to any object in this object ' s * implicit prototype chain . */ @ SuppressWarnings ( "ReferenceEquality" ) final boolean isImplicitPrototype ( ObjectType prototype ) { } }
for ( ObjectType current = this ; current != null ; current = current . getImplicitPrototype ( ) ) { if ( current . isTemplatizedType ( ) ) { current = current . toMaybeTemplatizedType ( ) . getReferencedType ( ) ; } current = deeplyUnwrap ( current ) ; // The prototype should match exactly . // NOTE : the use of " = = " here rather than isEquivalentTo is deliberate . This method // is very hot in the type checker and relying on identity improves performance of both // type checking / type inferrence and property disambiguation . if ( current != null && current == prototype ) { return true ; } } return false ;
public class A_CmsStaticExportHandler { /** * Add the link sources of moved resources to the list of published resources . < p > * @ param cms the cms context * @ param publishedResources the published resources * @ return the list of published resources included the link sources of moved resources */ protected List < CmsPublishedResource > addMovedLinkSources ( CmsObject cms , List < CmsPublishedResource > publishedResources ) { } }
long timer = System . currentTimeMillis ( ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( Messages . get ( ) . getBundle ( ) . key ( Messages . LOG_SCRUB_EXPORT_START_MOVED_SOURCES_0 ) ) ; } publishedResources = new ArrayList < CmsPublishedResource > ( publishedResources ) ; Set < String > pubResources = new HashSet < String > ( publishedResources . size ( ) ) ; // this is needed since the CmsPublishedResource # equals ( Object ) method just compares ids and not paths // and with moved files you have 2 entries with the same id and different paths . . . for ( CmsPublishedResource pubRes : publishedResources ) { pubResources . add ( pubRes . getRootPath ( ) ) ; } boolean modified = true ; // until no more resources are added while ( modified ) { modified = false ; Iterator < CmsPublishedResource > itPrePubRes = new ArrayList < CmsPublishedResource > ( publishedResources ) . iterator ( ) ; while ( itPrePubRes . hasNext ( ) ) { CmsPublishedResource res = itPrePubRes . next ( ) ; if ( res . getMovedState ( ) != CmsPublishedResource . STATE_MOVED_DESTINATION ) { // handle only resources that are destination of move operations continue ; } List < CmsRelation > relations = null ; try { // get all link sources to this resource relations = cms . getRelationsForResource ( cms . getRequestContext ( ) . removeSiteRoot ( res . getRootPath ( ) ) , CmsRelationFilter . SOURCES ) ; } catch ( CmsException e ) { // should never happen if ( LOG . isErrorEnabled ( ) ) { LOG . error ( e . getLocalizedMessage ( ) , e ) ; } } if ( ( relations == null ) || relations . isEmpty ( ) ) { // continue with next resource if no link sources found continue ; } Iterator < CmsRelation > itRelations = relations . iterator ( ) ; while ( itRelations . hasNext ( ) ) { CmsRelation relation = itRelations . next ( ) ; CmsPublishedResource source = null ; try { // get the link source source = new CmsPublishedResource ( relation . getSource ( cms , CmsResourceFilter . ALL ) ) ; } catch ( CmsException e ) { // should never happen if ( LOG . isWarnEnabled ( ) ) { LOG . warn ( e . getLocalizedMessage ( ) ) ; } } if ( ( source == null ) || pubResources . contains ( source . getRootPath ( ) ) ) { // continue if the link source could not been retrieved or if the list already contains it continue ; } // add it , and set the modified flag to give it another round modified = true ; pubResources . add ( source . getRootPath ( ) ) ; publishedResources . add ( source ) ; } } } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( Messages . get ( ) . getBundle ( ) . key ( Messages . LOG_SCRUB_EXPORT_FINISH_MOVED_SOURCES_1 , ( System . currentTimeMillis ( ) - timer ) + "" ) ) ; } return publishedResources ;
public class CommonDatabaseMetaData { /** * Retrieves a description of the given catalog ' s system or user function parameters and return type . * < P > Only descriptions matching the schema , function and parameter name criteria are returned . They are ordered by * < code > FUNCTION _ CAT < / code > , < code > FUNCTION _ SCHEM < / code > , < code > FUNCTION _ NAME < / code > and < code > SPECIFIC _ * NAME < / code > . Within this , the return value , if any , is first . Next are the parameter descriptions in call order . * The column descriptions follow in column number order . * < P > Each row in the < code > ResultSet < / code > is a parameter description , column description or return type * description with the following fields : < OL > < LI > < B > FUNCTION _ CAT < / B > String = > function catalog ( may be * < code > null < / code > ) < LI > < B > FUNCTION _ SCHEM < / B > String = > function schema ( may be < code > null < / code > ) * < LI > < B > FUNCTION _ NAME < / B > String = > function name . This is the name used to invoke the function * < LI > < B > COLUMN _ NAME < / B > String = > column / parameter name < LI > < B > COLUMN _ TYPE < / B > Short = > kind of column / parameter : * < UL > < LI > functionColumnUnknown - nobody knows < LI > functionColumnIn - IN parameter < LI > functionColumnInOut - * INOUT parameter < LI > functionColumnOut - OUT parameter < LI > functionColumnReturn - function return value < LI > * functionColumnResult - Indicates that the parameter or column is a column in the < code > ResultSet < / code > < / UL > * < LI > < B > DATA _ TYPE < / B > int = > SQL type from java . sql . Types < LI > < B > TYPE _ NAME < / B > String = > SQL type name , for a UDT * type the type name is fully qualified < LI > < B > PRECISION < / B > int = > precision < LI > < B > LENGTH < / B > int = > length in * bytes of data < LI > < B > SCALE < / B > short = > scale - null is returned for data types where SCALE is not applicable . * < LI > < B > RADIX < / B > short = > radix < LI > < B > NULLABLE < / B > short = > can it contain NULL . < UL > < LI > functionNoNulls - * does not allow NULL values < LI > functionNullable - allows NULL values < LI > functionNullableUnknown - nullability * unknown < / UL > < LI > < B > REMARKS < / B > String = > comment describing column / parameter < LI > < B > CHAR _ OCTET _ LENGTH < / B > int * = > the maximum length of binary and character based parameters or columns . For any other datatype the returned * value is a NULL < LI > < B > ORDINAL _ POSITION < / B > int = > the ordinal position , starting from 1 , for the input and * output parameters . A value of 0 is returned if this row describes the function ' s return value . For result set * columns , it is the ordinal position of the column in the result set starting from 1 . < LI > < B > IS _ NULLABLE < / B > * String = > ISO rules are used to determine the nullability for a parameter or column . < UL > < LI > YES - - - * if the parameter or column can include NULLs < LI > NO - - - if the parameter or column cannot include * NULLs < LI > empty string - - - if the nullability for the parameter or column is unknown < / UL > * < LI > < B > SPECIFIC _ NAME < / B > String = > the name which uniquely identifies this function within its schema . This is * a user specified , or DBMS generated , name that may be different then the < code > FUNCTION _ NAME < / code > for example * with overload functions < / OL > * < p > The PRECISION column represents the specified column size for the given parameter or column . For numeric data , * this is the maximum precision . For character data , this is the length in characters . For datetime datatypes , * this is the length in characters of the String representation ( assuming the maximum allowed precision of the * fractional seconds component ) . For binary data , this is the length in bytes . For the ROWID datatype , this is the * length in bytes . Null is returned for data types where the column size is not applicable . * @ param catalog a catalog name ; must match the catalog name as it is stored in the database ; " " * retrieves those without a catalog ; < code > null < / code > means that the catalog name * should not be used to narrow the search * @ param schemaPattern a schema name pattern ; must match the schema name as it is stored in the database ; " " * retrieves those without a schema ; < code > null < / code > means that the schema name should * not be used to narrow the search * @ param functionNamePattern a procedure name pattern ; must match the function name as it is stored in the * database * @ param columnNamePattern a parameter name pattern ; must match the parameter or column name as it is stored in * the database * @ return < code > ResultSet < / code > - each row describes a user function parameter , column or return type * @ throws java . sql . SQLException if a database access error occurs * @ see # getSearchStringEscape * @ since 1.6 */ public ResultSet getFunctionColumns ( final String catalog , final String schemaPattern , final String functionNamePattern , final String columnNamePattern ) throws SQLException { } }
throw SQLExceptionMapper . getSQLException ( "uh7" ) ;
public class BitZTradeServiceRaw { /** * 取消委托单 * @ param entrustSheetId * @ return * @ throws IOException */ public BitZTradeCancelResult cancelEntrustSheet ( String entrustSheetId ) throws IOException { } }
return bitz . cancelEntrustSheet ( apiKey , getTimeStamp ( ) , nonce , signer , entrustSheetId ) ;
public class Ifc4FactoryImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public String convertIfcStructuralSurfaceMemberTypeEnumToString ( EDataType eDataType , Object instanceValue ) { } }
return instanceValue == null ? null : instanceValue . toString ( ) ;
public class XDMClientChildSbb { /** * ( non - Javadoc ) * @ see javax . slee . Sbb # setSbbContext ( javax . slee . SbbContext ) */ public void setSbbContext ( SbbContext sbbContext ) { } }
this . sbbContext = ( SbbContextExt ) sbbContext ; if ( tracer == null ) { tracer = sbbContext . getTracer ( XDMClientChildSbb . class . getSimpleName ( ) ) ; } try { Context context = ( Context ) new InitialContext ( ) . lookup ( "java:comp/env" ) ; xcapClientSbbInterface = ( XCAPClientResourceAdaptorSbbInterface ) context . lookup ( "slee/resources/xcapclient/2.0/sbbrainterface" ) ; xcapClientACIF = ( XCAPClientActivityContextInterfaceFactory ) context . lookup ( "slee/resources/xcapclient/2.0/acif" ) ; } catch ( NamingException e ) { tracer . severe ( "Can't set sbb context." , e ) ; }
public class DataSet { /** * Inserts the currently contained data objects into the database . * @ param platform The ( connected ) database platform for inserting data * @ param model The database model * @ param batchSize The batch size ; use 1 for not using batch mode */ public void insert ( Platform platform , Database model , int batchSize ) throws SQLException { } }
if ( batchSize <= 1 ) { for ( Iterator it = _beans . iterator ( ) ; it . hasNext ( ) ; ) { platform . insert ( model , ( DynaBean ) it . next ( ) ) ; } } else { for ( int startIdx = 0 ; startIdx < _beans . size ( ) ; startIdx += batchSize ) { platform . insert ( model , _beans . subList ( startIdx , startIdx + batchSize ) ) ; } }
public class InstanceGroup { /** * A list of configurations that were successfully applied for an instance group last time . * @ return A list of configurations that were successfully applied for an instance group last time . */ public java . util . List < Configuration > getLastSuccessfullyAppliedConfigurations ( ) { } }
if ( lastSuccessfullyAppliedConfigurations == null ) { lastSuccessfullyAppliedConfigurations = new com . amazonaws . internal . SdkInternalList < Configuration > ( ) ; } return lastSuccessfullyAppliedConfigurations ;
public class DeleteEvaluationRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( DeleteEvaluationRequest deleteEvaluationRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( deleteEvaluationRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( deleteEvaluationRequest . getEvaluationId ( ) , EVALUATIONID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class RemoteServiceProxy { /** * Returns a { @ link com . google . gwt . user . client . rpc . SerializationStreamWriter * SerializationStreamWriter } that has had * { @ link ClientSerializationStreamWriter # prepareToWrite ( ) } called on it and * it has already had had the name of the remote service interface written as * well . * @ return { @ link com . google . gwt . user . client . rpc . SerializationStreamWriter * SerializationStreamWriter } that has had * { @ link ClientSerializationStreamWriter # prepareToWrite ( ) } called on * it and it has already had had the name of the remote service * interface written as well */ public SerializationStreamWriter createStreamWriter ( ) { } }
ClientSerializationStreamWriter clientSerializationStreamWriter = new ClientSerializationStreamWriter ( serializer , moduleBaseURL , serializationPolicyName ) ; clientSerializationStreamWriter . prepareToWrite ( ) ; return clientSerializationStreamWriter ;
public class FileSystem { /** * The src file is on the local disk . Add it to FS at * the given dst name . * delSrc indicates if the source should be removed */ @ Deprecated public void copyFromLocalFile ( boolean delSrc , boolean overwrite , Path src , Path dst ) throws IOException { } }
copyFromLocalFile ( delSrc , overwrite , false , src , dst ) ;
public class SelectableVirtualCircuit { /** * Start virtual circuit * @ param executorFactory * @ throws IOException */ @ Override public void start ( Supplier < ExecutorService > executorFactory ) throws IOException { } }
future = executorFactory . get ( ) . submit ( this ) ;
public class QueryBuilder { /** * Sets a named parameter . If name is null throws a * { @ link NullPointerException } . If value is instance of Observable then * throws an { @ link IllegalArgumentException } . * @ param name * the parameter name . Cannot be null . * @ param value * the parameter value */ void parameter ( String name , Object value ) { } }
Preconditions . checkNotNull ( name , "parameter name cannot be null" ) ; if ( value instanceof Observable ) throw new IllegalArgumentException ( "use parameters() method not the parameter() method for an Observable" ) ; this . parameters = parameters . concatWith ( Observable . just ( new Parameter ( name , value ) ) ) ;
public class JSONArray { /** * Put or replace a long value . If the index is greater than the length of * the JSONArray , then null elements will be added as necessary to pad * it out . * @ param index The subscript . * @ param value A long value . * @ return this * @ throws JSONException If the index is negative . */ public JSONArray put ( int index , long value ) throws JSONException { } }
put ( index , Long . valueOf ( value ) ) ; return this ;
public class ConditionDateFormatterFactory { /** * 日時の書式かどうか判定する 。 * @ param store * @ return */ public boolean isDatePattern ( final TokenStore store ) { } }
if ( store . containsInFactor ( "General" ) ) { return false ; } if ( store . containsAnyInFactorIgnoreCase ( DATE_DECISTION_CHARS ) ) { return true ; } // [ h ] [ m ] [ s ] の形式のチェック for ( Token token : store . getTokens ( ) ) { if ( ! ( token instanceof Token . Condition ) ) { continue ; } final Token . Condition condition = token . asCondition ( ) ; final String value = condition . getValue ( ) ; if ( PATTERN_ELAPSED_TIME . matcher ( value ) . matches ( ) ) { return true ; } } return false ;
public class ClusterHeartbeatManager { /** * Initializes the { @ link ClusterHeartbeatManager } . It will schedule the * heartbeat operation to the { @ link # getHeartbeatInterval ( HazelcastProperties ) } interval . */ void init ( ) { } }
ExecutionService executionService = nodeEngine . getExecutionService ( ) ; executionService . scheduleWithRepetition ( CLUSTER_EXECUTOR_NAME , this :: heartbeat , heartbeatIntervalMillis , heartbeatIntervalMillis , TimeUnit . MILLISECONDS ) ; if ( icmpParallelMode ) { startPeriodicPinger ( ) ; }
public class CPFriendlyURLEntryUtil { /** * Returns all the cp friendly url entries where groupId = & # 63 ; and classNameId = & # 63 ; and classPK = & # 63 ; and main = & # 63 ; . * @ param groupId the group ID * @ param classNameId the class name ID * @ param classPK the class pk * @ param main the main * @ return the matching cp friendly url entries */ public static List < CPFriendlyURLEntry > findByG_C_C_M ( long groupId , long classNameId , long classPK , boolean main ) { } }
return getPersistence ( ) . findByG_C_C_M ( groupId , classNameId , classPK , main ) ;
public class NLMS { /** * { @ inheritDoc } */ @ Override public Prediction _predictRecord ( Record r ) { } }
Map < Object , Double > thitas = knowledgeBase . getModelParameters ( ) . getThitas ( ) ; double yPredicted = hypothesisFunction ( r . getX ( ) , thitas ) ; return new Prediction ( yPredicted , null ) ;
public class TypedValue { /** * Converts a DOM into a string representation , after " normalizing " it . * @ param n DOM Node to convert . */ public void setValueAsJava ( org . w3c . dom . Node n ) { } }
DOMProcessing . trimNode ( n ) ; try { this . value = DOMProcessing . writeToString ( n ) ; } catch ( TransformerConfigurationException e ) { this . value = n . toString ( ) ; // TODO : not the most compelling handling } catch ( TransformerException e ) { this . value = n . toString ( ) ; // TODO : not the most compelling handling }
public class KeyVaultClientBaseImpl { /** * Updates the specified certificate issuer . * The UpdateCertificateIssuer operation performs an update on the specified certificate issuer entity . This operation requires the certificates / setissuers permission . * @ param vaultBaseUrl The vault name , for example https : / / myvault . vault . azure . net . * @ param issuerName The name of the issuer . * @ param provider The issuer provider . * @ param credentials The credentials to be used for the issuer . * @ param organizationDetails Details of the organization as provided to the issuer . * @ param attributes Attributes of the issuer object . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < IssuerBundle > updateCertificateIssuerAsync ( String vaultBaseUrl , String issuerName , String provider , IssuerCredentials credentials , OrganizationDetails organizationDetails , IssuerAttributes attributes , final ServiceCallback < IssuerBundle > serviceCallback ) { } }
return ServiceFuture . fromResponse ( updateCertificateIssuerWithServiceResponseAsync ( vaultBaseUrl , issuerName , provider , credentials , organizationDetails , attributes ) , serviceCallback ) ;
public class ProcessClosurePrimitives { /** * Reports an incorrect use of super - method calling . */ private void reportBadGoogBaseUse ( Node n , String extraMessage ) { } }
compiler . report ( JSError . make ( n , GOOG_BASE_CLASS_ERROR , extraMessage ) ) ;
public class ContentSpecParser { /** * Creates an empty Level using the LevelType to determine which Level subclass to instantiate . * @ param lineNumber The line number of the level . * @ param levelType The Level Type . * @ param input The string that represents the level , if one exists , * @ return The empty Level subclass object , or a plain Level object if no type matches a subclass . */ protected Level createEmptyLevelFromType ( final int lineNumber , final LevelType levelType , final String input ) { } }
// Create the level based on the type switch ( levelType ) { case APPENDIX : return new Appendix ( null , lineNumber , input ) ; case CHAPTER : return new Chapter ( null , lineNumber , input ) ; case SECTION : return new Section ( null , lineNumber , input ) ; case PART : return new Part ( null , lineNumber , input ) ; case PROCESS : return new Process ( null , lineNumber , input ) ; case INITIAL_CONTENT : return new InitialContent ( lineNumber , input ) ; default : return new Level ( null , lineNumber , input , levelType ) ; }
public class DiskFileItem { /** * Returns the contents of the file as an array of bytes . If the contents of * the file were not yet cached in memory , they will be loaded from the disk * storage and cached . * @ return The contents of the file as an array of bytes . */ @ ReturnsMutableObject ( "Speed" ) @ SuppressFBWarnings ( "EI_EXPOSE_REP" ) @ Nullable public byte [ ] directGet ( ) { } }
if ( isInMemory ( ) ) { _ensureCachedContentIsPresent ( ) ; return m_aCachedContent ; } return SimpleFileIO . getAllFileBytes ( m_aDFOS . getFile ( ) ) ;
public class JobSchedulesImpl { /** * Updates the properties of the specified job schedule . * This replaces only the job schedule properties specified in the request . For example , if the schedule property is not specified with this request , then the Batch service will keep the existing schedule . Changes to a job schedule only impact jobs created by the schedule after the update has taken place ; currently running jobs are unaffected . * @ param jobScheduleId The ID of the job schedule to update . * @ param jobSchedulePatchParameter The parameters for the request . * @ param jobSchedulePatchOptions Additional parameters for the operation * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < Void > patchAsync ( String jobScheduleId , JobSchedulePatchParameter jobSchedulePatchParameter , JobSchedulePatchOptions jobSchedulePatchOptions , final ServiceCallback < Void > serviceCallback ) { } }
return ServiceFuture . fromHeaderResponse ( patchWithServiceResponseAsync ( jobScheduleId , jobSchedulePatchParameter , jobSchedulePatchOptions ) , serviceCallback ) ;
public class StandardDdlParser { /** * Method to parse fully qualified schema , table and column names that are defined with ' . ' separator and optionally bracketed * with square brackets Example : partsSchema . supplier Example : [ partsSchema ] . [ supplier ] * @ param tokens the { @ link DdlTokenStream } representing the tokenized DDL content ; may not be null * @ return the parsed name */ protected String parseName ( DdlTokenStream tokens ) { } }
// Basically we want to construct a name that could have the form : // [ schemaName ] . [ tableName ] . [ columnName ] // NOTE : " [ ] " brackets are optional StringBuilder sb = new StringBuilder ( ) ; if ( tokens . matches ( '[' ) ) { // We have the bracketed case , so assume all brackets while ( true ) { tokens . consume ( '[' ) ; // [ bracket sb . append ( consumeIdentifier ( tokens ) ) ; // name tokens . consume ( ']' ) ; // ] bracket if ( tokens . matches ( '.' ) ) { sb . append ( tokens . consume ( ) ) ; } else { break ; } } } else { // We have the NON - bracketed case , so assume all brackets while ( true ) { sb . append ( consumeIdentifier ( tokens ) ) ; // name if ( tokens . matches ( '.' ) ) { sb . append ( tokens . consume ( ) ) ; } else { break ; } } } return sb . toString ( ) ;
public class AbcGrammar { /** * dots are ignored - for legibility only * parts - play - order : : = 1 * ( ALPHA / ( " ( " parts - play - order " ) " ) * DIGIT ) / " . " */ Rule PartsPlayOrder ( ) { } }
return OneOrMore ( FirstOfS ( CharRange ( 'A' , 'Z' ) . label ( "ALPHA" ) . suppressSubnodes ( ) , DIGITS ( ) , String ( "(" ) , String ( ")" ) , suppr ( "." ) , suppr ( "\"" ) , // found P : " AABBCCAA " . . . suppr ( WSPS ( ) ) ) ) . label ( PartsPlayOrder ) ; // return OneOrMore ( AnyOf ( " 1234567890 ( ) . ABCDEFGHIJKLMNOPQRSTUVWXYZ " ) ) // . label ( " PartsPlayOrder " ) . suppressSubnodes ( ) ; // if ( cachedPartsPlayOrder = = null ) { // / / cachedPartsPlayOrder = String ( " building . . . " ) ; // cachedPartsPlayOrder = // FirstOfS ( // OneOrMoreS ( // SequenceS ( // FirstOfS ( CharRange ( ' A ' , ' Z ' ) . label ( " ALPHA " ) , // Sequence ( ' ( ' , cachedPartsPlayOrder , ' ) ' ) // ZeroOrMore ( DIGIT ( ) ) . label ( " DIGIT " ) . suppressSubnodes ( ) // suppr ( " . " ) // ) . label ( " PartsPlayOrder " ) ; // return cachedPartsPlayOrder ;
public class KpiSkin { /** * * * * * * Methods * * * * * */ @ Override protected void handleEvents ( final String EVENT_TYPE ) { } }
super . handleEvents ( EVENT_TYPE ) ; if ( "RECALC" . equals ( EVENT_TYPE ) ) { angleRange = Helper . clamp ( 90.0 , 180.0 , gauge . getAngleRange ( ) ) ; minValue = gauge . getMinValue ( ) ; range = gauge . getRange ( ) ; angleStep = angleRange / range ; redraw ( ) ; }
public class BucketWriteTrx { /** * { @ inheritDoc } */ @ Override public void removeData ( final IData pData ) throws TTException { } }
checkState ( ! mDelegate . isClosed ( ) , "Transaction already closed" ) ; checkNotNull ( pData ) ; final long dataBucketKey = pData . getDataKey ( ) >> IConstants . INDIRECT_BUCKET_COUNT [ 3 ] ; LogValue container = prepareDataBucket ( pData . getDataKey ( ) ) ; final IData delData = new DeletedData ( pData . getDataKey ( ) ) ; ( ( DataBucket ) container . getComplete ( ) ) . setData ( dataBucketOffset ( pData . getDataKey ( ) ) , delData ) ; ( ( DataBucket ) container . getModified ( ) ) . setData ( dataBucketOffset ( pData . getDataKey ( ) ) , delData ) ; mLog . put ( new LogKey ( false , IConstants . INDIRECT_BUCKET_COUNT . length , dataBucketKey ) , container ) ;
public class DecoratingDynamicTypeBuilder { /** * { @ inheritDoc } */ public DynamicType . Builder < T > annotateType ( Collection < ? extends AnnotationDescription > annotations ) { } }
return attribute ( new TypeAttributeAppender . Explicit ( new ArrayList < AnnotationDescription > ( annotations ) ) ) ;
public class ActivePlanRepository { /** * Get the site - local fragment id for a given plan identified by 20 - byte sha - 1 hash * If the plan isn ' t known to this SPC , load it up . Otherwise addref it . */ public static long loadOrAddRefPlanFragment ( byte [ ] planHash , byte [ ] plan , String stmtText ) { } }
Sha1Wrapper key = new Sha1Wrapper ( planHash ) ; synchronized ( FragInfo . class ) { FragInfo frag = m_plansByHash . get ( key ) ; if ( frag == null ) { frag = new FragInfo ( key , plan , m_nextFragId ++ , stmtText ) ; m_plansByHash . put ( frag . hash , frag ) ; m_plansById . put ( frag . fragId , frag ) ; if ( m_plansById . size ( ) > ExecutionEngine . EE_PLAN_CACHE_SIZE ) { evictLRUfragment ( ) ; } } // Bit of a hack to work around an issue where a statement - less adhoc // fragment could be identical to a statement - needing regular procedure . // This doesn ' t really address the broader issue that fragment hashes // are not 1-1 with SQL statements . if ( frag . stmtText == null ) { frag . stmtText = stmtText ; } // The fragment MAY be in the LRU map . // An incremented refCount is a lazy way to keep it safe from eviction // without having to update the map . // This optimizes for popular fragments in a small or stable cache that may be reused // many times before the eviction process needs to take any notice . frag . refCount ++ ; return frag . fragId ; }
public class UsersInner { /** * Gets the properties of the specified user . * @ param deviceName The device name . * @ param name The user name . * @ param resourceGroupName The resource group name . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the UserInner object */ public Observable < UserInner > getAsync ( String deviceName , String name , String resourceGroupName ) { } }
return getWithServiceResponseAsync ( deviceName , name , resourceGroupName ) . map ( new Func1 < ServiceResponse < UserInner > , UserInner > ( ) { @ Override public UserInner call ( ServiceResponse < UserInner > response ) { return response . body ( ) ; } } ) ;
public class WsCommandContextFactory { /** * Creates a new { @ link WsCommandContext } with the given { @ code session } . * @ param session the session the present request came from * @ return a new { @ link WsCommandContext } */ public WsCommandContext newCommandContext ( Session session ) { } }
return new WsCommandContext ( connectionFactoryProvider . getConnectionFactory ( ) , session , wsEndpoints . getUiClientSessions ( ) , wsEndpoints . getFeedSessions ( ) ) ;
public class FilePolicyIndex { /** * ( non - Javadoc ) * @ see org . fcrepo . server . security . xacml . pdp . data . PolicyDataManager # getPolicy ( java . lang . String ) */ @ Override public AbstractPolicy getPolicy ( String name , PolicyFinder policyFinder ) throws PolicyIndexException { } }
readLock . lock ( ) ; try { logger . debug ( "Getting policy named: " + name ) ; if ( policies . containsKey ( name ) ) { return handleDocument ( m_policyReader . readPolicy ( policies . get ( name ) ) , policyFinder ) ; } else { throw new PolicyIndexException ( "Attempting to get non-existent policy " + name ) ; } } catch ( ParsingException pe ) { throw new PolicyIndexException ( pe . getMessage ( ) , pe ) ; } finally { readLock . unlock ( ) ; }
public class ConfigValidation { /** * Returns a new NestableFieldValidator for a given class . * @ param cls the Class the field should be a type of * @ param nullAllowed whether or not a value of null is valid * @ return a NestableFieldValidator for that class */ public static NestableFieldValidator fv ( final Class cls , final boolean nullAllowed ) { } }
return new NestableFieldValidator ( ) { @ Override public void validateField ( String pd , String name , Object field ) throws IllegalArgumentException { if ( nullAllowed && field == null ) { return ; } if ( ! cls . isInstance ( field ) ) { throw new IllegalArgumentException ( pd + name + " must be a " + cls . getName ( ) + ". (" + field + ")" ) ; } } } ;
public class ByteBuffer { /** * { @ inheritDoc } */ @ Override public Buffer slice ( final int start , final int stop ) { } }
if ( start == stop ) { return Buffers . EMPTY_BUFFER ; } checkIndex ( this . lowerBoundary + start ) ; checkIndex ( this . lowerBoundary + stop - 1 ) ; final int upperBoundary = this . lowerBoundary + stop ; final int writerIndex = upperBoundary ; return new ByteBuffer ( 0 , this . lowerBoundary + start , upperBoundary , writerIndex , this . buffer ) ;
public class ThriftConnectionPool { /** * 异步获取连接的方法 * @ return 连接代理对象 */ public ListenableFuture < ThriftConnection < T > > getAsyncConnection ( ) { } }
return this . asyncExecutor . submit ( new Callable < ThriftConnection < T > > ( ) { public ThriftConnection < T > call ( ) throws Exception { return getConnection ( ) ; } } ) ;
public class TenantService { /** * Search for the first { @ link TenantDefinition } that is selected by the given filter . * All current tenant definitions are searched . * @ param filter { @ link TenantFilter } that decides selection criteria . * @ return First { @ link TenantDefinition } selected by the filter or null if * the search exhausts all known tenants without a selection . */ public TenantDefinition searchForTenant ( TenantFilter filter ) { } }
checkServiceState ( ) ; for ( TenantDefinition tenantDef : getAllTenantDefs ( ) . values ( ) ) { if ( filter . selectTenant ( tenantDef ) ) { return tenantDef ; } } return null ;
public class ActionButton { /** * Calculates shadow width in actual pixels * @ return shadow width in actual pixels */ private int calculateShadowWidth ( ) { } }
float mShadowRadius = isShadowResponsiveEffectEnabled ( ) ? ( ( ShadowResponsiveDrawer ) shadowResponsiveDrawer ) . getMaxShadowRadius ( ) : getShadowRadius ( ) ; int shadowWidth = hasShadow ( ) ? ( int ) ( ( mShadowRadius + Math . abs ( getShadowXOffset ( ) ) ) * 2 ) : 0 ; LOGGER . trace ( "Calculated Action Button shadow width: {}" , shadowWidth ) ; return shadowWidth ;
public class OClusterLocalHole { /** * Append the hole to the end of segment * @ throws IOException */ public long pushPosition ( final long iPosition ) throws IOException { } }
final int position = getHoles ( ) * RECORD_SIZE ; file . allocateSpace ( RECORD_SIZE ) ; file . writeLong ( position , iPosition ) ; if ( OLogManager . instance ( ) . isDebugEnabled ( ) ) OLogManager . instance ( ) . debug ( this , "Pushed new hole %s/#%d -> #%d:%d" , owner . getName ( ) , position / RECORD_SIZE , owner . getId ( ) , iPosition ) ; return position ;
public class ReflectUtil { /** * Visits parameterized types to collect package names . * @ see { @ link # getTypePackageNames } . */ private static void getParameterizedTypePackageNames ( ParameterizedType type , Map < String , Class < ? > > packageNames ) { } }
for ( Type argumentType : type . getActualTypeArguments ( ) ) { getTypePackageNames ( argumentType , packageNames ) ; } getTypePackageNames ( type . getRawType ( ) , packageNames ) ; Type ownerType = type . getOwnerType ( ) ; if ( ownerType != null ) { getTypePackageNames ( ownerType , packageNames ) ; }
public class Utils { /** * Write the given long value as a 4 byte unsigned integer . Overflow is * ignored . * @ param buffer The buffer to write to * @ param index The position in the buffer at which to begin writing * @ param value The value to write */ public static void putUnsignedInt ( ByteBuffer buffer , int index , long value ) { } }
buffer . putInt ( index , ( int ) ( value & 0xffffffffL ) ) ;
public class SingleDeletionNeighbourhood { /** * Generates a random deletion move for the given subset solution that removes a single ID from the selection . * Possible fixed IDs are not considered to be removed and the minimum subset size is taken into account . * If no deletion move can be generated , < code > null < / code > is returned . * @ param solution solution for which a random deletion move is generated * @ param rnd source of randomness used to generate random move * @ return random deletion move , < code > null < / code > if no move can be generated */ @ Override public SubsetMove getRandomMove ( SubsetSolution solution , Random rnd ) { } }
// check minimum size if ( minSizeReached ( solution ) ) { return null ; } // get set of candidate IDs for deletion ( possibly fixed IDs are discarded ) Set < Integer > removeCandidates = getRemoveCandidates ( solution ) ; // check if removal is possible if ( removeCandidates . isEmpty ( ) ) { return null ; } // select random ID to remove from selection int del = SetUtilities . getRandomElement ( removeCandidates , rnd ) ; // create and return deletion move return new DeletionMove ( del ) ;
public class ItemStreamLink { /** * / * ( non - Javadoc ) * @ see com . ibm . ws . sib . msgstore . ItemCollection # removeFirstMatching ( com . ibm . ws . sib . msgstore . Filter , com . ibm . ws . sib . msgstore . Transaction ) */ public AbstractItem removeFirstMatchingItem ( Filter filter , Transaction transaction ) throws MessageStoreException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "removeFirstMatching" , new Object [ ] { filter , transaction } ) ; PrioritizedList items = _items ( ) ; AbstractItem item = items . removeFirstMatching ( filter , ( PersistentTransaction ) transaction ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "removeFirstMatching" , item ) ; return item ;
public class DurableOutputHandler { /** * Attempt to create a new durable subscription and send back either a * ControlDurableConfirm giving the result . * @ param msg The ControlCreateDurable request message . */ protected static void handleCreateDurable ( DestinationManager DM , ControlCreateDurable msg , MessageProcessor MP ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "handleCreateDurable" , new Object [ ] { DM , msg , MP } ) ; int status = STATUS_SUB_GENERAL_ERROR ; SIBUuid12 handlerID = msg . getGuaranteedTargetDestinationDefinitionUUID ( ) ; // create requests require a target dest ID String subName = msg . getDurableSubName ( ) ; String discriminator = msg . getDurableDiscriminator ( ) ; String selectorString = msg . getDurableSelector ( ) ; SelectorDomain selectorDomain = SelectorDomain . getSelectorDomain ( msg . getDurableSelectorDomain ( ) ) ; // Create a selectorProperties Map to convey any additional properties associated // with the selector . At present ( 26/03/08 ) there is only one additional property // which is itself a map ( of name spaces ) . The name space map is used in the XPath10 selector domain // to map URLs to prefixes . The use of a selectorProperties map keeps the Core SPI generic but // when conveying information over JMF we need a simpler structure and so will need to // break out individual properties for transportation . Map < String , Object > selectorProperties = null ; Map < String , String > selectorNamespaceMap = msg . getDurableSelectorNamespaceMap ( ) ; if ( selectorNamespaceMap != null ) { selectorProperties = new HashMap < String , Object > ( ) ; selectorProperties . put ( "namespacePrefixMappings" , selectorNamespaceMap ) ; } String user = msg . getSecurityUserid ( ) ; // TODO this flag needs to be set from the message . boolean isSIBServerSubject = msg . isSecurityUseridSentBySystem ( ) ; try { // Resolve the target BaseDestinationHandler BaseDestinationHandler handler = ( BaseDestinationHandler ) DM . getDestination ( handlerID , false ) ; // We ' ll create SelectionCriteria based on the properties we ' ve been passed . SelectionCriteria criteria = null ; // Use the appropriate SelectionCriteria factory dependent on whether we ' ve been passed // selectorProperties if ( selectorProperties == null ) { criteria = MP . getSelectionCriteriaFactory ( ) . createSelectionCriteria ( discriminator , selectorString , selectorDomain ) ; } else { // Non - null selectorProperties , so we create MPSelectionCriteria criteria = MPSelectionCriteriaFactory . getInstance ( ) . createSelectionCriteria ( discriminator , selectorString , selectorDomain , selectorProperties ) ; } // Ask the BaseDestinationHandler to attempt the create // then we send back the result . status = handler . createDurableFromRemote ( subName , criteria , user , msg . isCloned ( ) , msg . isNoLocal ( ) , isSIBServerSubject ) ; } catch ( Exception e ) { // Log the exception FFDCFilter . processException ( e , "com.ibm.ws.sib.processor.impl.DurableOutputHandler.handleCreateDurable" , "1:281:1.45.1.1" , DurableOutputHandler . class ) ; SibTr . exception ( tc , e ) ; } // Forward the status to the caller and we ' re done SIBUuid8 sender = msg . getGuaranteedSourceMessagingEngineUUID ( ) ; ControlDurableConfirm reply = createDurableConfirm ( MP , sender , msg . getRequestID ( ) , status ) ; MP . getMPIO ( ) . sendToMe ( sender , SIMPConstants . CONTROL_MESSAGE_PRIORITY , reply ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "handleCreateDurable" ) ;
public class HttpClientBuilder { /** * Begins SSL configuration < em > for the given host < / em > . Actual * configuration must follow ( see { @ link # trustKeyStore ( URL , String ) } , * { @ link # keyStore ( URL , String ) } , { @ link # secureSchema ( String ) } , * { @ link # securePort ( int ) } ) . * Allows to enable both server validation ( by supplying keyStore ) and * client authentication ( by supplying trustKeyStore ) . * @ param hosthost for which to enable SSL * @ return this * @ see # trustKeyStore ( URL , String ) * @ see # keyStore ( URL , String ) * @ see # secureSchema ( String ) * @ see # securePort ( int ) */ public HttpClientBuilder ssl ( String host ) { } }
if ( sslHostConfig != null ) { sslHostConfigs . put ( secureHost , sslHostConfig ) ; } secureHost = host ; sslHostConfig = new SslHostConfig ( ) ; return this ;
public class SmsMfaConfigTypeMarshaller { /** * Marshall the given parameter object . */ public void marshall ( SmsMfaConfigType smsMfaConfigType , ProtocolMarshaller protocolMarshaller ) { } }
if ( smsMfaConfigType == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( smsMfaConfigType . getSmsAuthenticationMessage ( ) , SMSAUTHENTICATIONMESSAGE_BINDING ) ; protocolMarshaller . marshall ( smsMfaConfigType . getSmsConfiguration ( ) , SMSCONFIGURATION_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class H2DbConfig { /** * H2 Data source . * jdbc : h2 : mem : test * @ return data source . */ @ Bean public DataSource dataSource ( ) { } }
DriverManagerDataSource dataSource = new DriverManagerDataSource ( dbUrl , userName , password ) ; dataSource . setDriverClassName ( Driver . class . getName ( ) ) ; // no need shutdown , EmbeddedDatabaseFactoryBean will take care of this // EmbeddedDatabaseBuilder builder = new EmbeddedDatabaseBuilder ( ) ; // EmbeddedDatabase dataSource = builder . setType ( EmbeddedDatabaseType . H2 ) . setName ( " h2 _ example " ) . build ( ) ; return dataSource ;
public class MgcpCall { /** * Unregisters a connection from the call . * @ param endpointId The identifier of the endpoint that owns the connection . * @ param connectionId The connection identifier . * @ return Returns < code > true < / code > if connection was removed successfully . Returns < code > false < / code > otherwise . */ public boolean removeConnection ( String endpointId , int connectionId ) { } }
boolean removed = this . entries . remove ( endpointId , connectionId ) ; if ( removed && log . isDebugEnabled ( ) ) { int left = this . entries . get ( endpointId ) . size ( ) ; log . debug ( "Call " + getCallIdHex ( ) + " unregistered connection " + Integer . toHexString ( connectionId ) + " from endpoint " + endpointId + ". Connection count: " + left ) ; } return removed ;
public class ComponentCollision { /** * HandlerListener */ @ Override public void notifyHandlableAdded ( Featurable featurable ) { } }
if ( featurable . hasFeature ( Collidable . class ) ) { final Transformable transformable = featurable . getFeature ( Transformable . class ) ; transformable . addListener ( this ) ; }
public class UIViewRoot { /** * Broadcast all events in the specified collection , stopping the at any time an AbortProcessingException * is thrown . * @ param context the current JSF context * @ param events the events to broadcast * @ return * @ return < code > true < / code > if the broadcast was completed without unexpected abortion / exception , * < code > false < / code > otherwise */ private boolean _broadcastAll ( FacesContext context , List < ? extends FacesEvent > events , Collection < FacesEvent > eventsAborted ) { } }
assert events != null ; for ( int i = 0 ; i < events . size ( ) ; i ++ ) { FacesEvent event = events . get ( i ) ; UIComponent source = event . getComponent ( ) ; UIComponent compositeParent = UIComponent . getCompositeComponentParent ( source ) ; if ( compositeParent != null ) { pushComponentToEL ( context , compositeParent ) ; } // Push the source as the current component pushComponentToEL ( context , source ) ; try { // Actual event broadcasting if ( ! source . isCachedFacesContext ( ) ) { try { source . setCachedFacesContext ( context ) ; source . broadcast ( event ) ; } finally { source . setCachedFacesContext ( null ) ; } } else { source . broadcast ( event ) ; } } catch ( Exception e ) { Throwable cause = e ; AbortProcessingException ape = null ; do { if ( cause != null && cause instanceof AbortProcessingException ) { ape = ( AbortProcessingException ) cause ; break ; } cause = cause . getCause ( ) ; } while ( cause != null ) ; // for any other exception publish ExceptionQueuedEvent // publish the Exception to be handled by the ExceptionHandler // to publish or to not publish APE ? That is the question : MYFACES - 3199 . We publish it , // because user can handle it in custom exception handler then . if ( ape != null ) { e = ape ; } ExceptionQueuedEventContext exceptionContext = new ExceptionQueuedEventContext ( context , e , source , context . getCurrentPhaseId ( ) ) ; context . getApplication ( ) . publishEvent ( context , ExceptionQueuedEvent . class , exceptionContext ) ; if ( ape != null ) { // APE found , abortion for this event only eventsAborted . add ( event ) ; } else { // We can ' t continue broadcast processing if other exception is thrown : return false ; } } finally { // Restore the current component source . popComponentFromEL ( context ) ; if ( compositeParent != null ) { compositeParent . popComponentFromEL ( context ) ; } } } return true ;
public class MethodInvocationProcessor { /** * - - - - - AbstractProcessor implementation - - - - - */ @ Override public Object process ( InvocableMap . Entry entry ) { } }
ReflectionExtractor extractor = new ReflectionExtractor ( name , args ) ; if ( mutator ) { Object value = entry . getValue ( ) ; Object result = extractor . extract ( value ) ; entry . setValue ( value ) ; return result ; } else { return entry . extract ( extractor ) ; }
public class AWSWAFRegionalClient { /** * Permanently deletes a < a > ByteMatchSet < / a > . You can ' t delete a < code > ByteMatchSet < / code > if it ' s still used in any * < code > Rules < / code > or if it still includes any < a > ByteMatchTuple < / a > objects ( any filters ) . * If you just want to remove a < code > ByteMatchSet < / code > from a < code > Rule < / code > , use < a > UpdateRule < / a > . * To permanently delete a < code > ByteMatchSet < / code > , perform the following steps : * < ol > * < li > * Update the < code > ByteMatchSet < / code > to remove filters , if any . For more information , see * < a > UpdateByteMatchSet < / a > . * < / li > * < li > * Use < a > GetChangeToken < / a > to get the change token that you provide in the < code > ChangeToken < / code > parameter of a * < code > DeleteByteMatchSet < / code > request . * < / li > * < li > * Submit a < code > DeleteByteMatchSet < / code > request . * < / li > * < / ol > * @ param deleteByteMatchSetRequest * @ return Result of the DeleteByteMatchSet operation returned by the service . * @ throws WAFInternalErrorException * The operation failed because of a system problem , even though the request was valid . Retry your request . * @ throws WAFInvalidAccountException * The operation failed because you tried to create , update , or delete an object by using an invalid account * identifier . * @ throws WAFNonexistentItemException * The operation failed because the referenced object doesn ' t exist . * @ throws WAFReferencedItemException * The operation failed because you tried to delete an object that is still in use . For example : < / p > * < ul > * < li > * You tried to delete a < code > ByteMatchSet < / code > that is still referenced by a < code > Rule < / code > . * < / li > * < li > * You tried to delete a < code > Rule < / code > that is still referenced by a < code > WebACL < / code > . * < / li > * @ throws WAFStaleDataException * The operation failed because you tried to create , update , or delete an object by using a change token * that has already been used . * @ throws WAFNonEmptyEntityException * The operation failed because you tried to delete an object that isn ' t empty . For example : < / p > * < ul > * < li > * You tried to delete a < code > WebACL < / code > that still contains one or more < code > Rule < / code > objects . * < / li > * < li > * You tried to delete a < code > Rule < / code > that still contains one or more < code > ByteMatchSet < / code > objects * or other predicates . * < / li > * < li > * You tried to delete a < code > ByteMatchSet < / code > that contains one or more < code > ByteMatchTuple < / code > * objects . * < / li > * < li > * You tried to delete an < code > IPSet < / code > that references one or more IP addresses . * < / li > * @ sample AWSWAFRegional . DeleteByteMatchSet * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / waf - regional - 2016-11-28 / DeleteByteMatchSet " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DeleteByteMatchSetResult deleteByteMatchSet ( DeleteByteMatchSetRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDeleteByteMatchSet ( request ) ;
public class ContainerTracker { /** * Remove a container from this container ( if stored ) and return its descriptor * @ param containerId id to remove * @ return descriptor of the container removed or < code > null < / code > */ public synchronized ContainerShutdownDescriptor removeContainer ( String containerId ) { } }
ContainerShutdownDescriptor descriptor = shutdownDescriptorPerContainerMap . remove ( containerId ) ; if ( descriptor != null ) { removeContainerIdFromLookupMaps ( containerId ) ; removeDescriptorFromPomLabelMap ( descriptor ) ; } return descriptor ;
public class Pattern { /** * Starts a new pattern sequence . The provided pattern is the initial pattern * of the new sequence . * @ param group the pattern to begin with * @ param afterMatchSkipStrategy the { @ link AfterMatchSkipStrategy . SkipStrategy } to use after each match . * @ return The first pattern of a pattern sequence */ public static < T , F extends T > GroupPattern < T , F > begin ( final Pattern < T , F > group , final AfterMatchSkipStrategy afterMatchSkipStrategy ) { } }
return new GroupPattern < > ( null , group , ConsumingStrategy . STRICT , afterMatchSkipStrategy ) ;
public class UpdatePreferencesServlet { /** * Move a tab left or right . * @ param sourceId node ID of tab to move * @ param method insertBefore or appendAfter . If appendAfter , tab is added as last tab ( parent * of destinationId ) . * @ param destinationId insertBefore : node ID of tab to move sourceId before . insertAfter : node * ID of another tab * @ param request * @ param response * @ throws PortalException * @ throws IOException */ @ RequestMapping ( method = RequestMethod . POST , params = "action=moveTab" ) public ModelAndView moveTab ( HttpServletRequest request , HttpServletResponse response , @ RequestParam ( value = "sourceID" ) String sourceId , @ RequestParam String method , @ RequestParam ( value = "elementID" ) String destinationId ) throws IOException { } }
IUserInstance ui = userInstanceManager . getUserInstance ( request ) ; UserPreferencesManager upm = ( UserPreferencesManager ) ui . getPreferencesManager ( ) ; IUserLayoutManager ulm = upm . getUserLayoutManager ( ) ; final Locale locale = RequestContextUtils . getLocale ( request ) ; // If we ' re moving this element before another one , we need // to know what the target is . If there ' s no target , just // assume we ' re moving it to the very end of the list . String siblingId = null ; if ( "insertBefore" . equals ( method ) ) siblingId = destinationId ; try { // move the node as requested and save the layout if ( ! ulm . moveNode ( sourceId , ulm . getParentId ( destinationId ) , siblingId ) ) { logger . warn ( "Failed to move tab in user layout. moveNode returned false" ) ; response . setStatus ( HttpServletResponse . SC_FORBIDDEN ) ; return new ModelAndView ( "jsonView" , Collections . singletonMap ( "response" , getMessage ( "error.move.tab" , "There was an issue moving the tab, please refresh the page and try again." , locale ) ) ) ; } ulm . saveUserLayout ( ) ; } catch ( PortalException e ) { return handlePersistError ( request , response , e ) ; } return new ModelAndView ( "jsonView" , Collections . singletonMap ( "response" , getMessage ( "success.move.tab" , "Tab moved successfully" , locale ) ) ) ;
public class Gauge { /** * Defines if an inner shadow should be drawn on the gauge * background . * @ param ENABLED */ public void setInnerShadowEnabled ( final boolean ENABLED ) { } }
if ( null == innerShadowEnabled ) { _innerShadowEnabled = ENABLED ; fireUpdateEvent ( REDRAW_EVENT ) ; } else { innerShadowEnabled . set ( ENABLED ) ; }
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public EClass getIfcRelConnectsWithEccentricity ( ) { } }
if ( ifcRelConnectsWithEccentricityEClass == null ) { ifcRelConnectsWithEccentricityEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 540 ) ; } return ifcRelConnectsWithEccentricityEClass ;
public class UpgradeInputByteBufferUtil { /** * Sets the ReadListener provided by the application to this stream * Once the ReadListener is set we will kick off the initial read * @ param readListenerl */ public void setupReadListener ( ReadListener readListenerl , SRTUpgradeInputStream31 srtUpgradeStream ) { } }
if ( readListenerl == null ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isErrorEnabled ( ) ) Tr . error ( tc , "readlistener.is.null" ) ; throw new NullPointerException ( Tr . formatMessage ( tc , "readlistener.is.null" ) ) ; } if ( _rl != null ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isErrorEnabled ( ) ) Tr . error ( tc , "readlistener.already.started" ) ; throw new IllegalStateException ( Tr . formatMessage ( tc , "readlistener.already.started" ) ) ; } // Save off the current Thread data by creating the ThreadContextManager . Then pass it into the callback ThreadContextManager tcm = new ThreadContextManager ( ) ; _tcpChannelCallback = new UpgradeReadCallback ( readListenerl , this , tcm , srtUpgradeStream ) ; _rl = readListenerl ; _isReady = false ; _upConn . getVirtualConnection ( ) . getStateMap ( ) . put ( TransportConstants . UPGRADED_LISTENER , "true" ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "setupReadListener, Starting the initial read" ) ; } initialRead ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "setupReadListener, ReadListener set : " + _rl ) ; }
public class DistributedFileSystem { /** * Returns the stat information about the file . * @ throws FileNotFoundException if the file does not exist . */ public FileStatus getFileStatus ( Path f ) throws IOException { } }
FileStatus fi = dfs . getFileInfo ( getPathName ( f ) ) ; if ( fi != null ) { fi . makeQualified ( this ) ; return fi ; } else { throw new FileNotFoundException ( "File does not exist: " + f ) ; }
public class SiteSwitcherHandlerInterceptor { /** * Creates a site switcher that redirects to a < code > m . < / code > domain for normal site requests that either * originate from a mobile device or indicate a mobile site preference . * Uses a { @ link CookieSitePreferenceRepository } that saves a cookie that is shared between the two domains . */ public static SiteSwitcherHandlerInterceptor mDot ( String serverName , Boolean tabletIsMobile ) { } }
return new SiteSwitcherHandlerInterceptor ( StandardSiteSwitcherHandlerFactory . mDot ( serverName , tabletIsMobile ) ) ;
public class ClientEvents { /** * Register a client listener that uses a query DSL based filter . The listener is expected to be annotated such that * { @ link org . infinispan . client . hotrod . annotation . ClientListener # useRawData } = true and { @ link * org . infinispan . client . hotrod . annotation . ClientListener # filterFactoryName } and { @ link * org . infinispan . client . hotrod . annotation . ClientListener # converterFactoryName } are equal to { @ link * Filters # QUERY _ DSL _ FILTER _ FACTORY _ NAME } * @ param remoteCache the remote cache to attach the listener * @ param listener the listener instance * @ param query the query to be used for filtering and conversion ( if projections are used ) */ public static void addClientQueryListener ( RemoteCache < ? , ? > remoteCache , Object listener , Query query ) { } }
ClientListener l = ReflectionUtil . getAnnotation ( listener . getClass ( ) , ClientListener . class ) ; if ( l == null ) { throw log . missingClientListenerAnnotation ( listener . getClass ( ) . getName ( ) ) ; } if ( ! l . useRawData ( ) ) { throw log . clientListenerMustUseRawData ( listener . getClass ( ) . getName ( ) ) ; } if ( ! l . filterFactoryName ( ) . equals ( Filters . QUERY_DSL_FILTER_FACTORY_NAME ) ) { throw log . clientListenerMustUseDesignatedFilterConverterFactory ( Filters . QUERY_DSL_FILTER_FACTORY_NAME ) ; } if ( ! l . converterFactoryName ( ) . equals ( Filters . QUERY_DSL_FILTER_FACTORY_NAME ) ) { throw log . clientListenerMustUseDesignatedFilterConverterFactory ( Filters . QUERY_DSL_FILTER_FACTORY_NAME ) ; } Object [ ] factoryParams = makeFactoryParams ( query ) ; remoteCache . addClientListener ( listener , factoryParams , null ) ;