signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class ModelsImpl { /** * Gets information about the composite entity models . * @ param appId The application ID . * @ param versionId The version ID . * @ param listCompositeEntitiesOptionalParameter the object representing the optional parameters to be set before calling this API * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the List & lt ; CompositeEntityExtractor & gt ; object */ public Observable < ServiceResponse < List < CompositeEntityExtractor > > > listCompositeEntitiesWithServiceResponseAsync ( UUID appId , String versionId , ListCompositeEntitiesOptionalParameter listCompositeEntitiesOptionalParameter ) { } }
if ( this . client . endpoint ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.endpoint() is required and cannot be null." ) ; } if ( appId == null ) { throw new IllegalArgumentException ( "Parameter appId is required and cannot be null." ) ; } if ( versionId == null ) { throw new IllegalArgumentException ( "Parameter versionId is required and cannot be null." ) ; } final Integer skip = listCompositeEntitiesOptionalParameter != null ? listCompositeEntitiesOptionalParameter . skip ( ) : null ; final Integer take = listCompositeEntitiesOptionalParameter != null ? listCompositeEntitiesOptionalParameter . take ( ) : null ; return listCompositeEntitiesWithServiceResponseAsync ( appId , versionId , skip , take ) ;
public class RuntimeEnvironment { /** * Returns the registered output gate with index < code > pos < / code > . * @ param index the index of the output gate to return * @ return the output gate at index < code > pos < / code > or < code > null < / code > if no such index exists */ public OutputGate getOutputGate ( int index ) { } }
if ( index < this . outputGates . size ( ) ) { return this . outputGates . get ( index ) ; } return null ;
public class PoolableConnection { /** * Method createStatement . * @ param resultSetType * @ param resultSetConcurrency * @ return Statement * @ throws SQLException * @ see java . sql . Connection # createStatement ( int , int ) */ @ Override public Statement createStatement ( int resultSetType , int resultSetConcurrency ) throws SQLException { } }
// return new // NativeStatement ( internalConn . createStatement ( resultSetType , // resultSetConcurrency ) , this ) ; return internalConn . createStatement ( resultSetType , resultSetConcurrency ) ;
public class AmqpFilterAdapter { /** * - - - - - AmqpSecureMessage - - - - - */ @ Override public void messageReceived ( NextFilter nextFilter , S session , AmqpSecureMessage message ) throws Exception { } }
nextFilter . messageReceived ( session , message ) ;
public class CSSErrorStrategy { /** * Consumes token until lexer state is function - balanced and * token from follow is matched . */ public void consumeUntil ( Parser recognizer , IntervalSet follow , CSSLexerState . RecoveryMode mode , CSSLexerState ls ) { } }
CSSToken t ; boolean finish ; TokenStream input = recognizer . getInputStream ( ) ; do { Token next = input . LT ( 1 ) ; if ( next instanceof CSSToken ) { t = ( CSSToken ) input . LT ( 1 ) ; if ( t . getType ( ) == Token . EOF ) { logger . trace ( "token eof " ) ; break ; } } else break ; /* not a CSSToken , probably EOF */ // consume token if does not match finish = ( t . getLexerState ( ) . isBalanced ( mode , ls , t ) && follow . contains ( t . getType ( ) ) ) ; if ( ! finish ) { logger . trace ( "Skipped: {}" , t ) ; input . consume ( ) ; } } while ( ! finish ) ;
public class BizwifiAPI { /** * Wi - Fi设备管理 - 查询设备 * 可通过指定分页或具体门店ID的方式 , 查询当前MP账号下指定门店连网成功的设备信息 。 一次最多能查询20个门店的设备信息 。 * @ param accessToken accessToken * @ param deviceList deviceList * @ return DeviceListResult */ public static DeviceListResult deviceList ( String accessToken , DeviceList deviceList ) { } }
return deviceList ( accessToken , JsonUtil . toJSONString ( deviceList ) ) ;
public class OverrideService { /** * Increase the priority of an overrideId * @ param overrideId ID of override * @ param pathId ID of path containing override * @ param clientUUID UUID of client */ public void increasePriority ( int overrideId , int ordinal , int pathId , String clientUUID ) { } }
logger . info ( "Increase priority" ) ; int origPriority = - 1 ; int newPriority = - 1 ; int origId = 0 ; int newId = 0 ; PreparedStatement statement = null ; ResultSet results = null ; try ( Connection sqlConnection = sqlService . getConnection ( ) ) { results = null ; statement = sqlConnection . prepareStatement ( "SELECT * FROM " + Constants . DB_TABLE_ENABLED_OVERRIDE + " WHERE " + Constants . ENABLED_OVERRIDES_PATH_ID + " = ?" + " AND " + Constants . GENERIC_CLIENT_UUID + " = ?" + " ORDER BY " + Constants . ENABLED_OVERRIDES_PRIORITY ) ; statement . setInt ( 1 , pathId ) ; statement . setString ( 2 , clientUUID ) ; results = statement . executeQuery ( ) ; int ordinalCount = 0 ; while ( results . next ( ) ) { if ( results . getInt ( Constants . ENABLED_OVERRIDES_OVERRIDE_ID ) == overrideId ) { ordinalCount ++ ; if ( ordinalCount == ordinal ) { origPriority = results . getInt ( Constants . ENABLED_OVERRIDES_PRIORITY ) ; origId = results . getInt ( Constants . GENERIC_ID ) ; break ; } } newPriority = results . getInt ( Constants . ENABLED_OVERRIDES_PRIORITY ) ; newId = results . getInt ( Constants . GENERIC_ID ) ; } } catch ( Exception e ) { e . printStackTrace ( ) ; } finally { try { if ( results != null ) { results . close ( ) ; } } catch ( Exception e ) { } try { if ( statement != null ) { statement . close ( ) ; } } catch ( Exception e ) { } } try ( Connection sqlConnection = sqlService . getConnection ( ) ) { // update priorities if ( origPriority != - 1 && newPriority != - 1 ) { statement = sqlConnection . prepareStatement ( "UPDATE " + Constants . DB_TABLE_ENABLED_OVERRIDE + " SET " + Constants . ENABLED_OVERRIDES_PRIORITY + "=?" + " WHERE " + Constants . GENERIC_ID + "=?" ) ; statement . setInt ( 1 , origPriority ) ; statement . setInt ( 2 , newId ) ; statement . executeUpdate ( ) ; statement . close ( ) ; statement = sqlConnection . prepareStatement ( "UPDATE " + Constants . DB_TABLE_ENABLED_OVERRIDE + " SET " + Constants . ENABLED_OVERRIDES_PRIORITY + "=?" + " WHERE " + Constants . GENERIC_ID + "=?" ) ; statement . setInt ( 1 , newPriority ) ; statement . setInt ( 2 , origId ) ; statement . executeUpdate ( ) ; } } catch ( Exception e ) { } finally { try { if ( statement != null ) { statement . close ( ) ; } } catch ( Exception e ) { } }
public class DownloadRunner { /** * Construct all required command line options */ private static Options constructCliOptions ( ) { } }
Options options = new Options ( ) ; Option packageUri = Option . builder ( "u" ) . desc ( "Uri indicating from where to download the file" ) . longOpt ( CliArgs . TOPOLOGY_PACKAGE_URI . text ) . hasArgs ( ) . argName ( CliArgs . TOPOLOGY_PACKAGE_URI . text ) . build ( ) ; Option destination = Option . builder ( "f" ) . desc ( "Destination to store the downloaded file" ) . longOpt ( CliArgs . EXTRACT_DESTINATION . text ) . hasArgs ( ) . argName ( CliArgs . EXTRACT_DESTINATION . text ) . build ( ) ; Option heronHome = Option . builder ( "d" ) . desc ( "Directory where heron is installed" ) . longOpt ( CliArgs . HERON_HOME . text ) . hasArgs ( ) . argName ( "heron home dir" ) . build ( ) ; Option configFile = Option . builder ( "p" ) . desc ( "Path of the config files" ) . longOpt ( CliArgs . CONFIG_PATH . text ) . hasArgs ( ) . argName ( "config path" ) . build ( ) ; // candidates : // local : download to client local machine // cluster : download into the container in the cloud Option mode = Option . builder ( "m" ) . desc ( "download mode, cluster or local" ) . longOpt ( CliArgs . MODE . text ) . hasArg ( ) . argName ( "download mode" ) . build ( ) ; options . addOption ( packageUri ) ; options . addOption ( destination ) ; options . addOption ( heronHome ) ; options . addOption ( configFile ) ; options . addOption ( mode ) ; return options ;
public class cacheobject { /** * Use this API to expire cacheobject . */ public static base_response expire ( nitro_service client , cacheobject resource ) throws Exception { } }
cacheobject expireresource = new cacheobject ( ) ; expireresource . locator = resource . locator ; expireresource . url = resource . url ; expireresource . host = resource . host ; expireresource . port = resource . port ; expireresource . groupname = resource . groupname ; expireresource . httpmethod = resource . httpmethod ; return expireresource . perform_operation ( client , "expire" ) ;
public class HttpServletHelper { /** * { @ inheritDoc } */ @ Override protected void redirectForLogout ( ) { } }
String url = ConfigurationProperties . getLogoutUrl ( ) ; if ( Util . empty ( url ) ) { LOG . warn ( "No logout URL specified" ) ; try { getResponse ( ) . getWriter ( ) . write ( "Logged out successfully" ) ; } catch ( IOException e ) { LOG . error ( "Failed to send logout message" , e ) ; } } else { try { getResponse ( ) . sendRedirect ( url ) ; } catch ( IOException e ) { LOG . error ( "Failed to redirect to logout url " + url , e ) ; } }
public class ActivityUtils { /** * Returns a { @ code List } of all the opened / active activities . * @ return a { @ code List } of all the opened / active activities */ public ArrayList < Activity > getAllOpenedActivities ( ) { } }
ArrayList < Activity > activities = new ArrayList < Activity > ( ) ; Iterator < WeakReference < Activity > > activityStackIterator = activityStack . iterator ( ) ; while ( activityStackIterator . hasNext ( ) ) { Activity activity = activityStackIterator . next ( ) . get ( ) ; if ( activity != null ) activities . add ( activity ) ; } return activities ;
public class Composite { /** * Contents of the composite . */ public String contents ( ) { } }
StringBuffer buf = new StringBuffer ( ) ; synchronized ( buf ) { for ( int i = 0 ; i < elements . size ( ) ; i ++ ) { Object element = elements . get ( i ) ; if ( element == null ) buf . append ( "null" ) ; else buf . append ( element . toString ( ) ) ; } } return buf . toString ( ) ;
public class SuggestionsOrdererFeatureExtractor { /** * compute features for training or prediction of a ranking model for suggestions * @ param suggestions * @ param word * @ param sentence * @ param startPos * @ return correction candidates , features for the match in general , features specific to candidates */ public Pair < List < SuggestedReplacement > , SortedMap < String , Float > > computeFeatures ( List < String > suggestions , String word , AnalyzedSentence sentence , int startPos ) { } }
if ( suggestions . isEmpty ( ) ) { return Pair . of ( Collections . emptyList ( ) , Collections . emptySortedMap ( ) ) ; } if ( topN <= 0 ) { topN = suggestions . size ( ) ; } List < String > topSuggestions = suggestions . subList ( 0 , Math . min ( suggestions . size ( ) , topN ) ) ; // EditDistance < Integer > levenshteinDistance = new LevenshteinDistance ( 4 ) ; EditDistance levenstheinDistance = new EditDistance ( word , EditDistance . DistanceAlgorithm . Damerau ) ; SimilarityScore < Double > jaroWrinklerDistance = new JaroWinklerDistance ( ) ; List < Feature > features = new ArrayList < > ( topSuggestions . size ( ) ) ; for ( String candidate : topSuggestions ) { double prob1 = languageModel . getPseudoProbability ( Collections . singletonList ( candidate ) ) . getProb ( ) ; double prob3 = LanguageModelUtils . get3gramProbabilityFor ( language , languageModel , startPos , sentence , candidate ) ; // double prob4 = LanguageModelUtils . get4gramProbabilityFor ( language , languageModel , startPos , sentence , candidate ) ; long wordCount = ( ( BaseLanguageModel ) languageModel ) . getCount ( candidate ) ; int levenstheinDist = levenstheinDistance . compare ( candidate , 3 ) ; double jaroWrinklerDist = jaroWrinklerDistance . apply ( word , candidate ) ; DetailedDamerauLevenstheinDistance . Distance detailedDistance = DetailedDamerauLevenstheinDistance . compare ( word , candidate ) ; features . add ( new Feature ( prob1 , prob3 , wordCount , levenstheinDist , detailedDistance , jaroWrinklerDist , candidate ) ) ; } if ( ! "noop" . equals ( score ) ) { features . sort ( Feature :: compareTo ) ; } // logger . trace ( " Features for ' % s ' in ' % s ' : % n " , word , sentence . getText ( ) ) ; // features . stream ( ) . map ( Feature : : toString ) . forEach ( logger : : trace ) ; List < String > words = features . stream ( ) . map ( Feature :: getWord ) . collect ( Collectors . toList ( ) ) ; // compute general features , not tied to candidates SortedMap < String , Float > matchData = new TreeMap < > ( ) ; matchData . put ( "candidateCount" , ( float ) words . size ( ) ) ; List < SuggestedReplacement > suggestionsData = features . stream ( ) . map ( f -> { SuggestedReplacement s = new SuggestedReplacement ( f . getWord ( ) ) ; s . setFeatures ( f . getData ( ) ) ; return s ; } ) . collect ( Collectors . toList ( ) ) ; return Pair . of ( suggestionsData , matchData ) ;
public class Metadata { /** * < p > Takes the { @ link Method } definition of a request and discovers the { @ link RequestMethod } which * has been specified using annotated metadata . < / p > * @ param definition * the { @ link Method } definition for the request whose HTTP method is to be discovered * < br > < br > * @ return the { @ link RequestMethod } for the given request definition ; else { @ code null } if no * { @ link RequestMethod } metadata can be found * < br > < br > * @ since 1.3.0 */ public static RequestMethod findMethod ( Method definition ) { } }
Request request = definition . getAnnotation ( Request . class ) ; if ( request != null ) { return request . method ( ) ; } Annotation [ ] annotations = definition . getAnnotations ( ) ; for ( Annotation annotation : annotations ) { if ( annotation . annotationType ( ) . isAnnotationPresent ( Request . class ) ) { return annotation . annotationType ( ) . getAnnotation ( Request . class ) . method ( ) ; } } return null ;
public class ServletHolderMBean { public String [ ] getPaths ( ) { } }
ServletHandler handler = ( ServletHandler ) _holder . getHttpHandler ( ) ; Map servletMap = handler . getServletMap ( ) ; ArrayList paths = new ArrayList ( servletMap . size ( ) ) ; Iterator iter = servletMap . entrySet ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { Map . Entry entry = ( Map . Entry ) iter . next ( ) ; if ( entry . getValue ( ) == _holder ) paths . add ( entry . getKey ( ) ) ; } return ( String [ ] ) paths . toArray ( new String [ paths . size ( ) ] ) ;
public class CsvParser { /** * Check whether the token can be parsed to a number . * @ param state the state of the parser * @ param token the token * @ return true if token matches a double */ private boolean isTokenNumberParsable ( State state , String token ) { } }
if ( token . isEmpty ( ) ) { return true ; } return state . mDouble . reset ( token ) . matches ( ) ;
public class FacesConfigTypeImpl { /** * If not already created , a new < code > behavior < / code > element will be created and returned . * Otherwise , the first existing < code > behavior < / code > element will be returned . * @ return the instance defined for the element < code > behavior < / code > */ public FacesConfigBehaviorType < FacesConfigType < T > > getOrCreateBehavior ( ) { } }
List < Node > nodeList = childNode . get ( "behavior" ) ; if ( nodeList != null && nodeList . size ( ) > 0 ) { return new FacesConfigBehaviorTypeImpl < FacesConfigType < T > > ( this , "behavior" , childNode , nodeList . get ( 0 ) ) ; } return createBehavior ( ) ;
public class MBeanRegistry { /** * Unregister MBean . * @ param bean */ public void unregister ( ZKMBeanInfo bean ) { } }
if ( bean == null ) return ; String path = mapBean2Path . get ( bean ) ; try { unregister ( path , bean ) ; } catch ( InstanceNotFoundException e ) { LOG . warn ( "InstanceNotFoundException during unregister usually means more than one Zookeeper server has been running in a single JVM" ) ; LOG . warn ( "InstanceNotFoundException during unregister can be safely ignored during automated tests." ) ; } catch ( JMException e ) { LOG . warn ( "Error during unregister" , e ) ; } mapBean2Path . remove ( bean ) ; mapName2Bean . remove ( bean . getName ( ) ) ;
public class NoDeleteModifyHandler { /** * Constructor . * @ param record My owner ( usually passed as null , and set on addListener in setOwner ( ) ) . */ public void init ( Record record , boolean bNoDelete , boolean bNoModify ) { } }
// For this to work right , the booking number field needs a listener to re - select this file whenever it changes m_bNoDelete = bNoDelete ; m_bNoModify = bNoModify ; super . init ( record ) ;
public class TableStatServiceImpl { /** * 先通过pipeLineId和DataMediaPairId在数据库里查找对应的tableStat , 如果有 , 则增量更新对应的Table统计状态 , 如果没有则将该数据插入 */ public void updateTableStat ( TableStat stat ) { } }
Assert . assertNotNull ( stat ) ; int affect = tableStatDao . modifyTableStat ( tableStatModelToDo ( stat ) ) ; if ( affect == 0 ) { tableStatDao . insertTableStat ( tableStatModelToDo ( stat ) ) ; } if ( stat . getStartTime ( ) != null && stat . getEndTime ( ) != null ) { if ( statUnit <= 0 ) { insertBehaviorHistory ( stat ) ; } else { synchronized ( tableStats ) { // 插入历史数据表 TableStat old = tableStats . get ( stat . getDataMediaPairId ( ) ) ; if ( old != null ) { // 合并数据 old . setInsertCount ( stat . getInsertCount ( ) + old . getInsertCount ( ) ) ; old . setUpdateCount ( stat . getUpdateCount ( ) + old . getUpdateCount ( ) ) ; old . setDeleteCount ( stat . getDeleteCount ( ) + old . getDeleteCount ( ) ) ; old . setFileCount ( stat . getFileCount ( ) + old . getFileCount ( ) ) ; old . setFileSize ( stat . getFileSize ( ) + old . getFileSize ( ) ) ; if ( stat . getEndTime ( ) . after ( old . getEndTime ( ) ) ) { old . setEndTime ( stat . getEndTime ( ) ) ; } if ( stat . getStartTime ( ) . before ( old . getStartTime ( ) ) ) { old . setStartTime ( stat . getStartTime ( ) ) ; } } else { tableStats . put ( stat . getDataMediaPairId ( ) , stat ) ; } } } }
public class VmwareIaasHandler { /** * ( non - Javadoc ) * @ see net . roboconf . target . api . AbstractThreadedTargetHandler # machineConfigurator ( * net . roboconf . target . api . TargetHandlerParameters , java . lang . String ) */ @ Override public MachineConfigurator machineConfigurator ( TargetHandlerParameters parameters , String machineId ) { } }
String userData = "" ; try { userData = UserDataHelpers . writeUserDataAsString ( parameters . getMessagingProperties ( ) , parameters . getDomain ( ) , parameters . getApplicationName ( ) , parameters . getScopedInstancePath ( ) ) ; } catch ( IOException e ) { this . logger . severe ( "User data could not be generated." ) ; Utils . logException ( this . logger , e ) ; } String rootInstanceName = InstanceHelpers . findRootInstancePath ( parameters . getScopedInstancePath ( ) ) ; return new VmWareMachineConfigurator ( parameters . getTargetProperties ( ) , userData , rootInstanceName , parameters . getScopedInstance ( ) ) ;
public class ProcessedCommand { /** * Returns a description String based on the defined command and options . * Useful when printing " help " info etc . */ public String printHelp ( String commandName ) { } }
int maxLength = 0 ; int width = 80 ; List < ProcessedOption > opts = getOptions ( ) ; for ( ProcessedOption o : opts ) { if ( o . getFormattedLength ( ) > maxLength ) maxLength = o . getFormattedLength ( ) ; } StringBuilder sb = new StringBuilder ( ) ; // first line sb . append ( "Usage: " ) ; if ( commandName == null || commandName . length ( ) == 0 ) sb . append ( name ( ) ) ; else sb . append ( commandName ) ; if ( opts . size ( ) > 0 ) sb . append ( " [<options>]" ) ; if ( argument != null ) { if ( argument . isTypeAssignableByResourcesOrFile ( ) ) sb . append ( " <file>" ) ; else sb . append ( " <" ) . append ( argument . getFieldName ( ) ) . append ( ">" ) ; } if ( arguments != null ) { if ( arguments . isTypeAssignableByResourcesOrFile ( ) ) sb . append ( " [<files>]" ) ; else sb . append ( " [<" ) . append ( arguments . getFieldName ( ) ) . append ( ">]" ) ; } sb . append ( Config . getLineSeparator ( ) ) ; // second line sb . append ( description ( ) ) . append ( Config . getLineSeparator ( ) ) ; // options and arguments if ( opts . size ( ) > 0 ) sb . append ( Config . getLineSeparator ( ) ) . append ( "Options:" ) . append ( Config . getLineSeparator ( ) ) ; for ( ProcessedOption o : opts ) sb . append ( o . getFormattedOption ( 2 , maxLength + 4 , width ) ) . append ( Config . getLineSeparator ( ) ) ; if ( arguments != null ) { sb . append ( Config . getLineSeparator ( ) ) . append ( "Arguments:" ) . append ( Config . getLineSeparator ( ) ) ; sb . append ( arguments . getFormattedOption ( 2 , maxLength + 4 , width ) ) . append ( Config . getLineSeparator ( ) ) ; } if ( argument != null ) { sb . append ( Config . getLineSeparator ( ) ) . append ( "Argument:" ) . append ( Config . getLineSeparator ( ) ) ; sb . append ( argument . getFormattedOption ( 2 , maxLength + 4 , width ) ) . append ( Config . getLineSeparator ( ) ) ; } return sb . toString ( ) ;
public class GeometryConverterService { /** * Convert a GTS coordinate to a Geomajas coordinate . * @ param coordinate jTS coordinate * @ return Geomajas coordinate * @ throws JtsConversionException conversion failed */ public static Coordinate fromJts ( com . vividsolutions . jts . geom . Coordinate coordinate ) throws JtsConversionException { } }
if ( coordinate == null ) { throw new JtsConversionException ( "Cannot convert null argument" ) ; } return new Coordinate ( coordinate . x , coordinate . y ) ;
public class AdminInitiateAuthRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( AdminInitiateAuthRequest adminInitiateAuthRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( adminInitiateAuthRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( adminInitiateAuthRequest . getUserPoolId ( ) , USERPOOLID_BINDING ) ; protocolMarshaller . marshall ( adminInitiateAuthRequest . getClientId ( ) , CLIENTID_BINDING ) ; protocolMarshaller . marshall ( adminInitiateAuthRequest . getAuthFlow ( ) , AUTHFLOW_BINDING ) ; protocolMarshaller . marshall ( adminInitiateAuthRequest . getAuthParameters ( ) , AUTHPARAMETERS_BINDING ) ; protocolMarshaller . marshall ( adminInitiateAuthRequest . getClientMetadata ( ) , CLIENTMETADATA_BINDING ) ; protocolMarshaller . marshall ( adminInitiateAuthRequest . getAnalyticsMetadata ( ) , ANALYTICSMETADATA_BINDING ) ; protocolMarshaller . marshall ( adminInitiateAuthRequest . getContextData ( ) , CONTEXTDATA_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class ExternalEntryPointHelper { /** * Based on the input for scanning annotations , look for @ ExternalEntryPoint and get the specific name black list elements . * @ param predefinedNameBlacklist black list * @ param method method to check * @ param scanEntryPointAnnotation annotation * @ return Set */ public static Set < String > getConsolidatedNameBlacklist ( List < String > predefinedNameBlacklist , Method method , boolean scanEntryPointAnnotation ) { } }
final Set < String > consolidatedBlacklist = new HashSet < String > ( predefinedNameBlacklist ) ; if ( scanEntryPointAnnotation ) { // first we look into the class level if ( method . getDeclaringClass ( ) . isAnnotationPresent ( ExternalEntryPoint . class ) ) { final ExternalEntryPoint externalEntryPoint = method . getDeclaringClass ( ) . getAnnotation ( ExternalEntryPoint . class ) ; if ( externalEntryPoint . nameBlacklist ( ) != null ) { consolidatedBlacklist . addAll ( Arrays . asList ( externalEntryPoint . nameBlacklist ( ) ) ) ; } } // then we look at the method level if ( method . isAnnotationPresent ( ExternalEntryPoint . class ) ) { final ExternalEntryPoint externalEntryPoint = method . getAnnotation ( ExternalEntryPoint . class ) ; if ( externalEntryPoint . nameBlacklist ( ) != null ) { consolidatedBlacklist . addAll ( Arrays . asList ( externalEntryPoint . nameBlacklist ( ) ) ) ; } } } return consolidatedBlacklist ;
public class DateTimeConverter { /** * < p > Return a < code > DateFormat < / code > instance to use for formatting * and parsing in this { @ link Converter } . < / p > * @ param locale The < code > Locale < / code > used to select formatting * and parsing conventions * @ throws ConverterException if no instance can be created */ private DateFormat getDateFormat ( Locale locale ) { } }
// PENDING ( craigmcc ) - Implement pooling if needed for performance ? if ( pattern == null && type == null ) { throw new IllegalArgumentException ( "Either pattern or type must" + " be specified." ) ; } DateFormat df ; if ( pattern != null ) { df = new SimpleDateFormat ( pattern , locale ) ; } else if ( type . equals ( "both" ) ) { df = DateFormat . getDateTimeInstance ( getStyle ( dateStyle ) , getStyle ( timeStyle ) , locale ) ; } else if ( type . equals ( "date" ) ) { df = DateFormat . getDateInstance ( getStyle ( dateStyle ) , locale ) ; } else if ( type . equals ( "time" ) ) { df = DateFormat . getTimeInstance ( getStyle ( timeStyle ) , locale ) ; } else { // PENDING ( craigmcc ) - i18n throw new IllegalArgumentException ( "Invalid type: " + type ) ; } df . setLenient ( false ) ; return ( df ) ;
public class ST_GraphAnalysis { /** * Calculate centrality indices on the nodes and edges of a graph * constructed from the input table . * @ param connection Connection * @ param inputTable Input table * @ param orientation Global orientation * @ return True if the calculation was successful * @ throws SQLException * @ throws NoSuchMethodException * @ throws InstantiationException * @ throws IllegalAccessException * @ throws InvocationTargetException */ public static boolean doGraphAnalysis ( Connection connection , String inputTable , String orientation ) throws SQLException , NoSuchMethodException , InstantiationException , IllegalAccessException , InvocationTargetException { } }
return doGraphAnalysis ( connection , inputTable , orientation , null ) ;
public class ComponentAPI { /** * 生成授权页 URL * @ param component _ appid 第三方平台ID * @ param pre _ auth _ code 预授权码 * @ param redirect _ uri 重定向URI * @ return URL */ public static String componentloginpage ( String component_appid , String pre_auth_code , String redirect_uri ) { } }
return componentloginpage ( component_appid , pre_auth_code , redirect_uri , null ) ;
public class NativeJDBCDriverHelper { /** * The DB2 Type 2 JDBC driver for z / OS attaches the JDBC connection to the * thread where it is running until another thread requires the connection . * An IRB is scheduled to move the connection to the new thread . In a * WebSphere for z / OS environment , the original thread may be in a WLM * SELECT WORK block , which prevents IRBs from running . The new thread * may block indefinitely . This method alerts the DB2 for z / OS JDBC * driver that the current thread is about to go into a WLM SELECT WORK * block and the connection should be removed from the thread . * This method is meant to be a happy medium between enabling and * disabling DB2DISABLETAF . This performs essentially the same function , * except that WebSphere attempts to tell DB2 when to disassociate the * affinity , instead of having DB2 disassociate after each JDBC call . */ public static void threadSwitch ( ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) Tr . entry ( tc , "threadSwitch" ) ; // Not in Liberty // / * We can ' t tell whether the user actually uses the DB2 Type 2 JCC * / // / * JDBC driver on z / OS . Only look for it once . * / // synchronized ( _ lock ) // if ( _ loadAttempted = = false ) // / * Only try to load the class if we ' re on z / OS . * / // if ( TxProperties . NATIVE _ CONTEXTS _ USED ) // / * If we can find the JDBC driver class , dump all the * / // / * methods for debugging purposes . * / // if ( TraceComponent . isAnyTracingEnabled ( ) & & tc . isDebugEnabled ( ) ) // try // final Class externalOps = ExtClassLoader . // getInstance ( ) . loadClass ( EXTERNAL _ OPS ) ; // final Method [ ] methods = externalOps . // getDeclaredMethods ( ) ; // Tr . debug ( tc , " ExternalOps methods " , methods ) ; // catch ( Throwable t ) // Tr . debug ( tc , " Debug check failed " , t ) ; // / * Try to reflect the method that allows us to notify * / // / * DB2 of the thread switch . * / // try // final Class externalOps = ExtClassLoader . // getInstance ( ) . loadClass ( EXTERNAL _ OPS ) ; // _ dissociateCurrentAttachmentFromTCB = // externalOps . getMethod ( // " dissociateCurrentAttachmentFromTCB " , // new Class [ 0 ] ) ; // catch ( Throwable t ) // if ( TraceComponent . isAnyTracingEnabled ( ) & & tc . isEventEnabled ( ) ) // Tr . event ( tc , " Could not load DB2 " + // " driver method " , t ) ; // / * Assuming we were able to load the thread switch method , * / // / * try to run the method . The JDBC driver will throw an * / // / * exception if the environment is invalid . * / // if ( _ dissociateCurrentAttachmentFromTCB ! = null ) // try // _ dissociateCurrentAttachmentFromTCB . // invoke ( null , ( Object [ ] ) null ) ; // catch ( Throwable t ) // _ dissociateCurrentAttachmentFromTCB = null ; // if ( TraceComponent . isAnyTracingEnabled ( ) & & tc . isEventEnabled ( ) ) // Tr . event ( tc , " Could not drive DB2 driver " + // " method " , t ) ; // / * Indicate that we ' ve already tried to load the class so * / // / * that subsequent requests don ' t do it . * / // _ loadAttempted = true ; // / * If we were able to load the thread switch method , drive it now . * / // / * We can be reasonably confident that it will work since we tried * / // / * to drive it when the method was loaded . * / // if ( _ dissociateCurrentAttachmentFromTCB ! = null ) // try // _ dissociateCurrentAttachmentFromTCB . invoke ( null , ( Object [ ] ) null ) ; // catch ( Throwable t ) // if ( TraceComponent . isAnyTracingEnabled ( ) & & tc . isEventEnabled ( ) ) // Tr . event ( tc , " Could not drive DB2 driver method " , t ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) Tr . exit ( tc , "threadSwitch" ) ;
public class AbstractFSMParser { /** * Parsed the FSM file line - by - line . * At first this method expects to parse data definitions , and calls { @ link # parseDataDefinition ( StreamTokenizer ) } * for each data definition . After " - - - " is encountered { @ link # checkDataDefinitions ( StreamTokenizer ) } is called , * and this method expects to parse state vectors . The behavior is similar for state vectors and transitions . * For each line this method will increment { @ link # partLineNumber } , and reset it when a new part in the FSM file * begins . * Note that { @ link StreamTokenizer } allows one to push back tokens . This is used whenever we have checked type * type of token we are going to read . * @ param reader * the source of the FSM file * @ throws FSMParseException when the FSM source is invalid . * @ throws IOException when FSM source could not be read . */ protected void parse ( Reader reader ) throws FSMParseException , IOException { } }
Part part = Part . DataDefinition ; partLineNumber = 0 ; final StreamTokenizer streamTokenizer = getStreamTokenizer ( reader ) ; while ( streamTokenizer . nextToken ( ) != StreamTokenizer . TT_EOF ) { streamTokenizer . pushBack ( ) ; switch ( part ) { case DataDefinition : { if ( streamTokenizer . nextToken ( ) == StreamTokenizer . TT_WORD && "---" . equals ( streamTokenizer . sval ) ) { // we entered the part with the state vectors part = Part . StateVectors ; partLineNumber = 0 ; checkDataDefinitions ( streamTokenizer ) ; } else { streamTokenizer . pushBack ( ) ; parseDataDefinition ( streamTokenizer ) ; } break ; } case StateVectors : { if ( streamTokenizer . nextToken ( ) == StreamTokenizer . TT_WORD && "---" . equals ( streamTokenizer . sval ) ) { // we entered the part with the transitions . part = Part . Transitions ; partLineNumber = 0 ; checkStateVectors ( streamTokenizer ) ; } else { streamTokenizer . pushBack ( ) ; parseStateVector ( streamTokenizer ) ; } break ; } case Transitions : { parseTransition ( streamTokenizer ) ; break ; } default : throw new AssertionError ( ) ; } // consume all tokens until EOL is reached while ( streamTokenizer . nextToken ( ) != StreamTokenizer . TT_EOL ) { } partLineNumber ++ ; } checkTransitions ( streamTokenizer ) ; reader . close ( ) ;
public class ViterbiBuilder { /** * Build lattice from input text * @ param text source text for the lattice * @ return built lattice , not null */ public ViterbiLattice build ( String text ) { } }
int textLength = text . length ( ) ; ViterbiLattice lattice = new ViterbiLattice ( textLength + 2 ) ; lattice . addBos ( ) ; int unknownWordEndIndex = - 1 ; // index of the last character of unknown word for ( int startIndex = 0 ; startIndex < textLength ; startIndex ++ ) { // If no token ends where current token starts , skip this index if ( lattice . tokenEndsWhereCurrentTokenStarts ( startIndex ) ) { String suffix = text . substring ( startIndex ) ; boolean found = processIndex ( lattice , startIndex , suffix ) ; // In the case of normal mode , it doesn ' t process unknown word greedily . if ( searchMode || unknownWordEndIndex <= startIndex ) { int [ ] categories = characterDefinitions . lookupCategories ( suffix . charAt ( 0 ) ) ; for ( int i = 0 ; i < categories . length ; i ++ ) { int category = categories [ i ] ; unknownWordEndIndex = processUnknownWord ( category , i , lattice , unknownWordEndIndex , startIndex , suffix , found ) ; } } } } if ( useUserDictionary ) { processUserDictionary ( text , lattice ) ; } lattice . addEos ( ) ; return lattice ;
public class ZipEntry { /** * Converts Java time to DOS time . */ private static long javaToDosTime ( long time ) { } }
Date d = new Date ( time ) ; int year = d . getYear ( ) + 1900 ; if ( year < 1980 ) { return ( 1 << 21 ) | ( 1 << 16 ) ; } return ( year - 1980 ) << 25 | ( d . getMonth ( ) + 1 ) << 21 | d . getDate ( ) << 16 | d . getHours ( ) << 11 | d . getMinutes ( ) << 5 | d . getSeconds ( ) >> 1 ;
public class jazz_license { /** * Use this operation to get license information . */ public static jazz_license get ( nitro_service client ) throws Exception { } }
jazz_license resource = new jazz_license ( ) ; resource . validate ( "get" ) ; return ( ( jazz_license [ ] ) resource . get_resources ( client ) ) [ 0 ] ;
public class SampleSupportDialogFragment { /** * Retrieve a new instance of the sample fragment . * @ param radius blur radius . * @ param downScaleFactor down scale factor . * @ param dimming dimming effect . * @ param debug debug policy . * @ param mBlurredActionBar blur affect on actionBar policy . * @ param useRenderScript use of RenderScript * @ return well instantiated fragment . */ public static SampleSupportDialogFragment newInstance ( int radius , float downScaleFactor , boolean dimming , boolean debug , boolean mBlurredActionBar , boolean useRenderScript ) { } }
SampleSupportDialogFragment fragment = new SampleSupportDialogFragment ( ) ; Bundle args = new Bundle ( ) ; args . putInt ( BUNDLE_KEY_BLUR_RADIUS , radius ) ; args . putFloat ( BUNDLE_KEY_DOWN_SCALE_FACTOR , downScaleFactor ) ; args . putBoolean ( BUNDLE_KEY_DIMMING , dimming ) ; args . putBoolean ( BUNDLE_KEY_DEBUG , debug ) ; args . putBoolean ( BUNDLE_KEY_BLURRED_ACTION_BAR , mBlurredActionBar ) ; args . putBoolean ( BUNDLE_KEY_USE_RENDERSCRIPT , useRenderScript ) ; fragment . setArguments ( args ) ; return fragment ;
public class StreamBase { @ Override public S slice ( final long from , final long to ) { } }
checkArgNotNegative ( from , "from" ) ; checkArgNotNegative ( to , "to" ) ; checkArgument ( to >= from , "'to' can't be less than `from`" ) ; return from == 0 ? limit ( to ) : skip ( from ) . limit ( to - from ) ;
public class GroovyMBean { /** * Description of the operation . * @ param operation the operation to describe * @ return pretty - printed description */ protected String describeOperation ( MBeanOperationInfo operation ) { } }
StringBuilder buf = new StringBuilder ( ) ; buf . append ( operation . getReturnType ( ) ) . append ( " " ) . append ( operation . getName ( ) ) . append ( "(" ) ; MBeanParameterInfo [ ] params = operation . getSignature ( ) ; for ( int j = 0 ; j < params . length ; j ++ ) { MBeanParameterInfo param = params [ j ] ; if ( j != 0 ) { buf . append ( ", " ) ; } buf . append ( param . getType ( ) ) . append ( " " ) . append ( param . getName ( ) ) ; } buf . append ( ")" ) ; return buf . toString ( ) ;
public class SystemOutLoggingTool { /** * { @ inheritDoc } */ @ Override public void info ( Object object , Object ... objects ) { } }
if ( level <= INFO ) { StringBuilder result = new StringBuilder ( ) ; result . append ( object ) ; for ( Object obj : objects ) { result . append ( obj ) ; } infoString ( result . toString ( ) ) ; }
public class ProteinPocketFinder { /** * Method which finds the pocket , with a simple nearest neighbour clustering . The points * which should be clustered or form a pocket can be determined with : * minPSPocket , minPSCluster , linkageRadius , and pocketSize . */ private void findPockets ( ) { } }
int [ ] dim = gridGenerator . getDim ( ) ; // logger . debug ( " FIND POCKETS > dimx : " + dim [ 0 ] + " dimy : " + dim [ 1] // + " dimz : " + dim [ 2 ] + " linkageRadius > " + linkageRadius // + " latticeConstant > " + latticeConstant + " pocketSize : " // + pocketSize + " minPSPocket : " + minPSPocket + " minPSCluster : " // + minPSCluster ) ; // int pointsVisited = 0 ; / / Debugging // int significantPointsVisited = 0 ; / / Debugging for ( int x = 0 ; x < dim [ 0 ] ; x ++ ) { for ( int y = 0 ; y < dim [ 1 ] ; y ++ ) { for ( int z = 0 ; z < dim [ 2 ] ; z ++ ) { // logger . debug . print ( " x : " + x + " y : " + y + " z : " + z ) ; Point3d start = new Point3d ( x , y , z ) ; // pointsVisited + + ; if ( this . grid [ x ] [ y ] [ z ] >= minPSPocket & ! visited . containsKey ( x + "." + y + "." + z ) ) { List < Point3d > subPocket = new ArrayList < Point3d > ( ) ; // logger . debug . print ( " new Point : " + grid [ x ] [ y ] [ z ] ) ; // significantPointsVisited + + ; // logger . debug ( " visited : " + pointsVisited ) ; subPocket = this . clusterPSPPocket ( start , subPocket , dim ) ; if ( subPocket != null && subPocket . size ( ) >= pocketSize ) { pockets . add ( subPocket ) ; } // logger . debug ( " Points visited : " + pointsVisited + " // subPocketSize : " + subPocket . size ( ) + " // pocketsSize : " + pockets . size ( ) // + " hashtable : " + visited . size ( ) ) ; } } } } // try { // logger . debug ( " - > > > > # pockets : " + pockets . size ( ) // + " significantPointsVisited : " + significantPointsVisited // + " keys : " + visited . size ( ) + " PointsVisited : " // + pointsVisited ) ; // } catch ( Exception ex1 ) { // logger . debug // . println ( " Problem in System . out due to " + ex1 . toString ( ) ) ;
public class WavefrontMeterRegistry { /** * VisibleForTesting */ void addMetric ( Stream . Builder < WavefrontMetricLineData > metrics , Meter . Id id , @ Nullable String suffix , long wallTime , double value ) { } }
if ( ! Double . isFinite ( value ) ) { return ; } Meter . Id fullId = id ; if ( suffix != null ) { fullId = idWithSuffix ( id , suffix ) ; } String name = getConventionName ( fullId ) ; String source = config . source ( ) ; Map < String , String > tags = getTagsAsMap ( id ) ; try { String lineData = Utils . metricToLineData ( name , value , wallTime , source , tags , "unknown" ) ; metrics . add ( new WavefrontMetricLineData ( lineData , false ) ) ; } catch ( IllegalArgumentException e ) { logger . error ( "failed to convert metric to Wavefront format: " + fullId . getName ( ) , e ) ; }
public class QName { /** * < p > < code > QName < / code > derived from parsing the formatted * < code > String < / code > . < / p > * < p > If the < code > String < / code > is < code > null < / code > or does not conform to * { @ link # toString ( ) QName . toString ( ) } formatting , an * < code > IllegalArgumentException < / code > is thrown . < / p > * < p > < em > The < code > String < / code > < strong > MUST < / strong > be in the * form returned by { @ link # toString ( ) QName . toString ( ) } . < / em > < / p > * < p > The commonly accepted way of representing a < code > QName < / code > * as a < code > String < / code > was * < a href = " http : / / jclark . com / xml / xmlns . htm " > defined < / a > * by James Clark . Although this is not a < em > standard < / em > * specification , it is in common use , e . g . { @ link * javax . xml . transform . Transformer # setParameter ( String name , Object value ) } . * This implementation parses a < code > String < / code > formatted * as : " { " + Namespace URI + " } " + local part . If the Namespace * URI < code > . equals ( XMLConstants . NULL _ NS _ URI ) < / code > , only the * local part should be provided . < / p > * < p > The prefix value < strong > < em > CANNOT < / em > < / strong > be * represented in the < code > String < / code > and will be set to * { @ link javax . xml . XMLConstants # DEFAULT _ NS _ PREFIX * XMLConstants . DEFAULT _ NS _ PREFIX } . < / p > * < p > This method does not do full validation of the resulting * < code > QName < / code > . * < p > The Namespace URI is not validated as a * < a href = " http : / / www . ietf . org / rfc / rfc2396 . txt " > URI reference < / a > . * The local part is not validated as a * < a href = " http : / / www . w3 . org / TR / REC - xml - names / # NT - NCName " > NCName < / a > * as specified in * < a href = " http : / / www . w3 . org / TR / REC - xml - names / " > Namespaces in XML < / a > . < / p > * @ param qNameAsString < code > String < / code > representation * of the < code > QName < / code > * @ throws IllegalArgumentException When < code > qNameAsString < / code > is * < code > null < / code > or malformed * @ return < code > QName < / code > corresponding to the given < code > String < / code > * @ see # toString ( ) QName . toString ( ) */ public static QName valueOf ( String qNameAsString ) { } }
// null is not valid if ( qNameAsString == null ) { throw new IllegalArgumentException ( "cannot create QName from \"null\" or \"\" String" ) ; } // " " local part is valid to preserve compatible behavior with QName 1.0 if ( qNameAsString . length ( ) == 0 ) { return new QName ( "" , qNameAsString , "" ) ; } // local part only ? int colon = qNameAsString . lastIndexOf ( ":" ) ; if ( colon == - 1 ) { return new QName ( "" , qNameAsString , "" ) ; } // Namespace URI and local part specified return new QName ( qNameAsString . substring ( 0 , colon ) , qNameAsString . substring ( colon + 1 ) , "" ) ;
public class StreamReader { /** * Read a nested table . Instantiates the supplied reader class to * extract the data . * @ param reader table reader class * @ return table rows */ public List < MapRow > readTable ( TableReader reader ) throws IOException { } }
reader . read ( ) ; return reader . getRows ( ) ;
public class AnnotationClassReader { /** * Reads UTF8 string in { @ link # b b } . * @ param index start offset of the UTF8 string to be read . * @ param utfLen length of the UTF8 string to be read . * @ param buf buffer to be used to read the string . This buffer must be * sufficiently large . It is not automatically resized . * @ return the String corresponding to the specified UTF8 string . */ private String readUTF ( int index , final int utfLen , final char [ ] buf ) { } }
int endIndex = index + utfLen ; byte [ ] b = this . b ; int strLen = 0 ; int c ; int st = 0 ; char cc = 0 ; while ( index < endIndex ) { c = b [ index ++ ] ; switch ( st ) { case 0 : c = c & 0xFF ; if ( c < 0x80 ) { // 0xxxxx buf [ strLen ++ ] = ( char ) c ; } else if ( c < 0xE0 && c > 0xBF ) { // 110x xxxx 10xx xxxx cc = ( char ) ( c & 0x1F ) ; st = 1 ; } else { // 1110 xxxx 10xx xxxx 10xx xxxx cc = ( char ) ( c & 0x0F ) ; st = 2 ; } break ; case 1 : // byte 2 of 2 - byte char or byte 3 of 3 - byte char buf [ strLen ++ ] = ( char ) ( ( cc << 6 ) | ( c & 0x3F ) ) ; st = 0 ; break ; case 2 : // byte 2 of 3 - byte char cc = ( char ) ( ( cc << 6 ) | ( c & 0x3F ) ) ; st = 1 ; break ; default : // no - op } } return new String ( buf , 0 , strLen ) ;
public class BlobContainersInner { /** * Creates a new container under the specified account as described by request body . The container resource includes metadata and properties for that container . It does not include a list of the blobs contained by the container . * @ param resourceGroupName The name of the resource group within the user ' s subscription . The name is case insensitive . * @ param accountName The name of the storage account within the specified resource group . Storage account names must be between 3 and 24 characters in length and use numbers and lower - case letters only . * @ param containerName The name of the blob container within the specified storage account . Blob container names must be between 3 and 63 characters in length and use numbers , lower - case letters and dash ( - ) only . Every dash ( - ) character must be immediately preceded and followed by a letter or number . * @ param publicAccess Specifies whether data in the container may be accessed publicly and the level of access . Possible values include : ' Container ' , ' Blob ' , ' None ' * @ param metadata A name - value pair to associate with the container as metadata . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < BlobContainerInner > createAsync ( String resourceGroupName , String accountName , String containerName , PublicAccess publicAccess , Map < String , String > metadata , final ServiceCallback < BlobContainerInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( createWithServiceResponseAsync ( resourceGroupName , accountName , containerName , publicAccess , metadata ) , serviceCallback ) ;
public class Configuration { /** * Returns the value associated with the given key as a boolean . * @ param key * the key pointing to the associated value * @ param defaultValue * the default value which is returned in case there is no value associated with the given key * @ return the ( default ) value associated with the given key */ public boolean getBoolean ( String key , boolean defaultValue ) { } }
Object o = getRawValue ( key ) ; if ( o == null ) { return defaultValue ; } return convertToBoolean ( o ) ;
public class VoldemortConfig { /** * This function populates the various strongly - typed variables of this class by * extracting the values from { @ link VoldemortConfig # allProps } . * At this point , all defaults should have been resolved properly , so we can assume * that all properties are present . If that ' s not the case , the correct behavior is * to bubble up an UndefinedPropertyException . * This code is isolated into its own function to prevent future code from trying to * extract configurations from anywhere else besides { @ link VoldemortConfig # allProps } . * @ throws UndefinedPropertyException if any required property has not been set . */ private void initializeStateFromProps ( ) throws UndefinedPropertyException { } }
this . nodeId = this . allProps . getInt ( NODE_ID , INVALID_NODE_ID ) ; this . voldemortHome = this . allProps . getString ( VOLDEMORT_HOME ) ; this . dataDirectory = this . allProps . getString ( DATA_DIRECTORY ) ; this . metadataDirectory = this . allProps . getString ( METADATA_DIRECTORY ) ; this . bdbCacheSize = this . allProps . getBytes ( BDB_CACHE_SIZE ) ; this . bdbWriteTransactions = this . allProps . getBoolean ( BDB_WRITE_TRANSACTIONS ) ; this . bdbFlushTransactions = this . allProps . getBoolean ( BDB_FLUSH_TRANSACTIONS ) ; this . bdbDataDirectory = this . allProps . getString ( BDB_DATA_DIRECTORY ) ; this . bdbMaxLogFileSize = this . allProps . getBytes ( BDB_MAX_LOGFILE_SIZE ) ; this . bdbBtreeFanout = this . allProps . getInt ( BDB_BTREE_FANOUT ) ; this . bdbMaxDelta = this . allProps . getInt ( BDB_MAX_DELTA ) ; this . bdbBinDelta = this . allProps . getInt ( BDB_BIN_DELTA ) ; this . bdbCheckpointBytes = this . allProps . getLong ( BDB_CHECKPOINT_INTERVAL_BYTES ) ; this . bdbCheckpointMs = this . allProps . getLong ( BDB_CHECKPOINT_INTERVAL_MS ) ; this . bdbOneEnvPerStore = this . allProps . getBoolean ( BDB_ONE_ENV_PER_STORE ) ; this . bdbCleanerMinFileUtilization = this . allProps . getInt ( BDB_CLEANER_MIN_FILE_UTILIZATION ) ; this . bdbCleanerMinUtilization = this . allProps . getInt ( BDB_CLEANER_MIN_UTILIZATION ) ; this . bdbCleanerThreads = this . allProps . getInt ( BDB_CLEANER_THREADS ) ; this . bdbCleanerBytesInterval = this . allProps . getLong ( BDB_CLEANER_INTERVAL_BYTES ) ; this . bdbCleanerLookAheadCacheSize = this . allProps . getInt ( BDB_CLEANER_LOOKAHEAD_CACHE_SIZE ) ; this . bdbLockTimeoutMs = this . allProps . getLong ( BDB_LOCK_TIMEOUT_MS ) ; this . bdbLockNLockTables = this . allProps . getInt ( BDB_LOCK_N_LOCK_TABLES ) ; this . bdbLogFaultReadSize = this . allProps . getInt ( BDB_LOG_FAULT_READ_SIZE ) ; this . bdbLogIteratorReadSize = this . allProps . getInt ( BDB_LOG_ITERATOR_READ_SIZE ) ; this . bdbFairLatches = this . allProps . getBoolean ( BDB_FAIR_LATCHES ) ; this . bdbCheckpointerHighPriority = this . allProps . getBoolean ( BDB_CHECKPOINTER_HIGH_PRIORITY ) ; this . bdbCleanerMaxBatchFiles = this . allProps . getInt ( BDB_CLEANER_MAX_BATCH_FILES ) ; this . bdbReadUncommitted = this . allProps . getBoolean ( BDB_LOCK_READ_UNCOMMITTED ) ; this . bdbStatsCacheTtlMs = this . allProps . getLong ( BDB_STATS_CACHE_TTL_MS ) ; this . bdbExposeSpaceUtilization = this . allProps . getBoolean ( BDB_EXPOSE_SPACE_UTILIZATION ) ; this . bdbMinimumSharedCache = this . allProps . getLong ( BDB_MINIMUM_SHARED_CACHE ) ; this . bdbCleanerLazyMigration = this . allProps . getBoolean ( BDB_CLEANER_LAZY_MIGRATION ) ; this . bdbCacheModeEvictLN = this . allProps . getBoolean ( BDB_CACHE_EVICTLN ) ; this . bdbMinimizeScanImpact = this . allProps . getBoolean ( BDB_MINIMIZE_SCAN_IMPACT ) ; this . bdbPrefixKeysWithPartitionId = this . allProps . getBoolean ( BDB_PREFIX_KEYS_WITH_PARTITIONID ) ; this . bdbLevelBasedEviction = this . allProps . getBoolean ( BDB_EVICT_BY_LEVEL ) ; this . bdbCheckpointerOffForBatchWrites = this . allProps . getBoolean ( BDB_CHECKPOINTER_OFF_BATCH_WRITES ) ; this . bdbCleanerFetchObsoleteSize = this . allProps . getBoolean ( BDB_CLEANER_FETCH_OBSOLETE_SIZE ) ; this . bdbCleanerAdjustUtilization = this . allProps . getBoolean ( BDB_CLEANER_ADJUST_UTILIZATION ) ; this . bdbRecoveryForceCheckpoint = this . allProps . getBoolean ( BDB_RECOVERY_FORCE_CHECKPOINT ) ; this . bdbRawPropertyString = this . allProps . getString ( BDB_RAW_PROPERTY_STRING ) ; this . numReadOnlyVersions = this . allProps . getInt ( READONLY_BACKUPS ) ; this . readOnlySearchStrategy = this . allProps . getString ( READONLY_SEARCH_STRATEGY ) ; this . readOnlyStorageDir = this . allProps . getString ( READONLY_DATA_DIRECTORY ) ; this . readOnlyDeleteBackupTimeMs = this . allProps . getInt ( READONLY_DELETE_BACKUP_MS ) ; this . readOnlyFetcherMaxBytesPerSecond = this . allProps . getBytes ( FETCHER_MAX_BYTES_PER_SEC ) ; this . readOnlyFetcherReportingIntervalBytes = this . allProps . getBytes ( FETCHER_REPORTING_INTERVAL_BYTES ) ; this . readOnlyFetcherThrottlerInterval = this . allProps . getInt ( FETCHER_THROTTLER_INTERVAL ) ; this . readOnlyFetchRetryCount = this . allProps . getInt ( FETCHER_RETRY_COUNT ) ; this . readOnlyFetchRetryDelayMs = this . allProps . getLong ( FETCHER_RETRY_DELAY_MS ) ; this . readOnlyLoginIntervalMs = this . allProps . getLong ( FETCHER_LOGIN_INTERVAL_MS ) ; this . defaultStorageSpaceQuotaInKB = this . allProps . getLong ( DEFAULT_STORAGE_SPACE_QUOTA_IN_KB ) ; this . fetcherBufferSize = ( int ) this . allProps . getBytes ( HDFS_FETCHER_BUFFER_SIZE ) ; this . fetcherSocketTimeout = this . allProps . getInt ( HDFS_FETCHER_SOCKET_TIMEOUT ) ; this . readOnlyKeytabPath = this . allProps . getString ( READONLY_KEYTAB_PATH ) ; this . readOnlyKerberosUser = this . allProps . getString ( READONLY_KERBEROS_USER ) ; this . hadoopConfigPath = this . allProps . getString ( READONLY_HADOOP_CONFIG_PATH ) ; this . readOnlyKerberosKdc = this . allProps . getString ( READONLY_KERBEROS_KDC ) ; this . readOnlykerberosRealm = this . allProps . getString ( READONLY_KERBEROS_REALM ) ; this . fileFetcherClass = this . allProps . getString ( FILE_FETCHER_CLASS ) ; this . readOnlyStatsFileEnabled = this . allProps . getBoolean ( READONLY_STATS_FILE_ENABLED ) ; this . readOnlyMaxVersionsStatsFile = this . allProps . getInt ( READONLY_STATS_FILE_MAX_VERSIONS ) ; this . readOnlyMaxValueBufferAllocationSize = this . allProps . getInt ( READONLY_MAX_VALUE_BUFFER_ALLOCATION_SIZE ) ; this . readOnlyCompressionCodec = this . allProps . getString ( READONLY_COMPRESSION_CODEC ) ; this . readOnlyModifyProtocol = this . allProps . getString ( READONLY_MODIFY_PROTOCOL ) ; this . readOnlyModifyPort = this . allProps . getInt ( READONLY_MODIFY_PORT ) ; this . readOnlyOmitPort = this . allProps . getBoolean ( READONLY_OMIT_PORT ) ; this . bouncyCastleEnabled = this . allProps . getBoolean ( USE_BOUNCYCASTLE_FOR_SSL ) ; this . readOnlyBuildPrimaryReplicasOnly = this . allProps . getBoolean ( READONLY_BUILD_PRIMARY_REPLICAS_ONLY ) ; this . highAvailabilityPushClusterId = this . allProps . getString ( PUSH_HA_CLUSTER_ID ) ; this . highAvailabilityPushLockPath = this . allProps . getString ( PUSH_HA_LOCK_PATH ) ; this . highAvailabilityPushLockImplementation = this . allProps . getString ( PUSH_HA_LOCK_IMPLEMENTATION ) ; this . highAvailabilityPushMaxNodeFailures = this . allProps . getInt ( PUSH_HA_MAX_NODE_FAILURES ) ; this . highAvailabilityPushEnabled = this . allProps . getBoolean ( PUSH_HA_ENABLED ) ; this . highAvailabilityStateAutoCleanUp = this . allProps . getBoolean ( PUSH_HA_STATE_AUTO_CLEANUP ) ; this . mysqlUsername = this . allProps . getString ( MYSQL_USER ) ; this . mysqlPassword = this . allProps . getString ( MYSQL_PASSWORD ) ; this . mysqlHost = this . allProps . getString ( MYSQL_HOST ) ; this . mysqlPort = this . allProps . getInt ( MYSQL_PORT ) ; this . mysqlDatabaseName = this . allProps . getString ( MYSQL_DATABASE ) ; this . testingSlowQueueingDelays = new OpTimeMap ( 0 ) ; this . testingSlowQueueingDelays . setOpTime ( VoldemortOpCode . GET_OP_CODE , this . allProps . getInt ( TESTING_SLOW_QUEUEING_GET_MS ) ) ; this . testingSlowQueueingDelays . setOpTime ( VoldemortOpCode . GET_ALL_OP_CODE , this . allProps . getInt ( TESTING_SLOW_QUEUEING_GETALL_MS ) ) ; this . testingSlowQueueingDelays . setOpTime ( VoldemortOpCode . GET_VERSION_OP_CODE , this . allProps . getInt ( TESTING_SLOW_QUEUEING_GETVERSIONS_MS ) ) ; this . testingSlowQueueingDelays . setOpTime ( VoldemortOpCode . PUT_OP_CODE , this . allProps . getInt ( TESTING_SLOW_QUEUEING_PUT_MS ) ) ; this . testingSlowQueueingDelays . setOpTime ( VoldemortOpCode . DELETE_OP_CODE , this . allProps . getInt ( TESTING_SLOW_QUEUEING_DELETE_MS ) ) ; this . testingSlowConcurrentDelays = new OpTimeMap ( 0 ) ; this . testingSlowConcurrentDelays . setOpTime ( VoldemortOpCode . GET_OP_CODE , this . allProps . getInt ( TESTING_SLOW_CONCURRENT_GET_MS ) ) ; this . testingSlowConcurrentDelays . setOpTime ( VoldemortOpCode . GET_ALL_OP_CODE , this . allProps . getInt ( TESTING_SLOW_CONCURRENT_GETALL_MS ) ) ; this . testingSlowConcurrentDelays . setOpTime ( VoldemortOpCode . GET_VERSION_OP_CODE , this . allProps . getInt ( TESTING_SLOW_CONCURRENT_GETVERSIONS_MS ) ) ; this . testingSlowConcurrentDelays . setOpTime ( VoldemortOpCode . PUT_OP_CODE , this . allProps . getInt ( TESTING_SLOW_CONCURRENT_PUT_MS ) ) ; this . testingSlowConcurrentDelays . setOpTime ( VoldemortOpCode . DELETE_OP_CODE , this . allProps . getInt ( TESTING_SLOW_CONCURRENT_DELETE_MS ) ) ; this . maxThreads = this . allProps . getInt ( MAX_THREADS ) ; this . coreThreads = this . allProps . getInt ( CORE_THREADS ) ; // Admin client should have less threads but very high buffer size . this . adminMaxThreads = this . allProps . getInt ( ADMIN_MAX_THREADS ) ; this . adminCoreThreads = this . allProps . getInt ( ADMIN_CORE_THREADS ) ; this . adminStreamBufferSize = ( int ) this . allProps . getBytes ( ADMIN_STREAMS_BUFFER_SIZE ) ; this . adminConnectionTimeout = this . allProps . getInt ( ADMIN_CLIENT_CONNECTION_TIMEOUT_SEC ) ; this . adminSocketTimeout = this . allProps . getInt ( ADMIN_CLIENT_SOCKET_TIMEOUT_SEC ) ; this . streamMaxReadBytesPerSec = this . allProps . getBytes ( STREAM_READ_BYTE_PER_SEC ) ; this . streamMaxWriteBytesPerSec = this . allProps . getBytes ( STREAM_WRITE_BYTE_PER_SEC ) ; this . multiVersionStreamingPutsEnabled = this . allProps . getBoolean ( USE_MULTI_VERSION_STREAMING_PUTS ) ; this . socketTimeoutMs = this . allProps . getInt ( SOCKET_TIMEOUT_MS ) ; this . socketBufferSize = ( int ) this . allProps . getBytes ( SOCKET_BUFFER_SIZE ) ; this . socketKeepAlive = this . allProps . getBoolean ( SOCKET_KEEPALIVE ) ; this . useNioConnector = this . allProps . getBoolean ( ENABLE_NIO_CONNECTOR ) ; this . nioConnectorKeepAlive = this . allProps . getBoolean ( NIO_CONNECTOR_KEEPALIVE ) ; this . nioConnectorSelectors = this . allProps . getInt ( NIO_CONNECTOR_SELECTORS ) ; this . nioAdminConnectorSelectors = this . allProps . getInt ( NIO_ADMIN_CONNECTOR_SELECTORS ) ; this . nioAdminConnectorKeepAlive = this . allProps . getBoolean ( NIO_ADMIN_CONNECTOR_KEEPALIVE ) ; // a value < = 0 forces the default to be used this . nioAcceptorBacklog = this . allProps . getInt ( NIO_ACCEPTOR_BACKLOG ) ; this . nioSelectorMaxHeartBeatTimeMs = this . allProps . getLong ( NIO_SELECTOR_MAX_HEART_BEAT_TIME_MS ) ; this . clientSelectors = this . allProps . getInt ( CLIENT_SELECTORS ) ; this . clientMaxConnectionsPerNode = this . allProps . getInt ( CLIENT_MAX_CONNECTIONS_PER_NODE ) ; this . clientConnectionTimeoutMs = this . allProps . getInt ( CLIENT_CONNECTION_TIMEOUT_MS ) ; this . clientRoutingTimeoutMs = this . allProps . getInt ( CLIENT_ROUTING_TIMEOUT_MS ) ; this . clientTimeoutConfig = new TimeoutConfig ( this . clientRoutingTimeoutMs , false ) ; this . clientTimeoutConfig . setOperationTimeout ( VoldemortOpCode . GET_OP_CODE , this . allProps . getInt ( CLIENT_ROUTING_GET_TIMEOUT_MS ) ) ; this . clientTimeoutConfig . setOperationTimeout ( VoldemortOpCode . GET_ALL_OP_CODE , this . allProps . getInt ( CLIENT_ROUTING_GETALL_TIMEOUT_MS ) ) ; this . clientTimeoutConfig . setOperationTimeout ( VoldemortOpCode . PUT_OP_CODE , this . allProps . getInt ( CLIENT_ROUTING_PUT_TIMEOUT_MS ) ) ; this . clientTimeoutConfig . setOperationTimeout ( VoldemortOpCode . GET_VERSION_OP_CODE , this . allProps . getLong ( CLIENT_ROUTING_GETVERSIONS_TIMEOUT_MS ) ) ; this . clientTimeoutConfig . setOperationTimeout ( VoldemortOpCode . DELETE_OP_CODE , this . allProps . getInt ( CLIENT_ROUTING_DELETE_TIMEOUT_MS ) ) ; this . clientTimeoutConfig . setPartialGetAllAllowed ( this . allProps . getBoolean ( CLIENT_ROUTING_ALLOW_PARTIAL_GETALL ) ) ; this . clientMaxThreads = this . allProps . getInt ( CLIENT_MAX_THREADS ) ; this . clientThreadIdleMs = this . allProps . getInt ( CLIENT_THREAD_IDLE_MS ) ; this . clientMaxQueuedRequests = this . allProps . getInt ( CLIENT_MAX_QUEUED_REQUESTS ) ; this . enableHttpServer = this . allProps . getBoolean ( HTTP_ENABLE ) ; this . enableSocketServer = this . allProps . getBoolean ( SOCKET_ENABLE ) ; this . enableAdminServer = this . allProps . getBoolean ( ADMIN_ENABLE ) ; this . enableJmx = this . allProps . getBoolean ( JMX_ENABLE ) ; this . enableSlop = this . allProps . getBoolean ( SLOP_ENABLE ) ; this . enableSlopPusherJob = this . allProps . getBoolean ( SLOP_PUSHER_ENABLE ) ; this . slopMaxWriteBytesPerSec = this . allProps . getBytes ( SLOP_WRITE_BYTE_PER_SEC ) ; this . enableVerboseLogging = this . allProps . getBoolean ( ENABLE_VERBOSE_LOGGING ) ; this . enableStatTracking = this . allProps . getBoolean ( ENABLE_STAT_TRACKING ) ; this . enableServerRouting = this . allProps . getBoolean ( ENABLE_SERVER_ROUTING ) ; this . enableMetadataChecking = this . allProps . getBoolean ( ENABLE_METADATA_CHECKING ) ; this . enableGossip = this . allProps . getBoolean ( ENABLE_GOSSIP ) ; this . enableRebalanceService = this . allProps . getBoolean ( ENABLE_REBALANCING ) ; this . enableRepair = this . allProps . getBoolean ( ENABLE_REPAIR ) ; this . enablePruneJob = this . allProps . getBoolean ( ENABLE_PRUNEJOB ) ; this . enableSlopPurgeJob = this . allProps . getBoolean ( ENABLE_SLOP_PURGE_JOB ) ; this . enableJmxClusterName = this . allProps . getBoolean ( ENABLE_JMX_CLUSTERNAME ) ; this . enableQuotaLimiting = this . allProps . getBoolean ( ENABLE_QUOTA_LIMITING ) ; this . gossipIntervalMs = this . allProps . getInt ( GOSSIP_INTERVAL_MS ) ; this . slopMaxWriteBytesPerSec = this . allProps . getBytes ( SLOP_WRITE_BYTE_PER_SEC1 ) ; this . slopMaxReadBytesPerSec = this . allProps . getBytes ( SLOP_READ_BYTE_PER_SEC ) ; this . slopStoreType = this . allProps . getString ( SLOP_STORE_ENGINE ) ; this . slopFrequencyMs = this . allProps . getLong ( SLOP_FREQUENCY_MS ) ; this . slopBatchSize = this . allProps . getInt ( SLOP_BATCH_SIZE ) ; this . pusherType = this . allProps . getString ( PUSHER_TYPE ) ; this . slopZonesDownToTerminate = this . allProps . getInt ( SLOP_ZONES_TERMINATE ) ; this . autoPurgeDeadSlops = this . allProps . getBoolean ( AUTO_PURGE_DEAD_SLOPS ) ; this . schedulerThreads = this . allProps . getInt ( SCHEDULER_THREADS ) ; this . mayInterruptService = this . allProps . getBoolean ( SERVICE_INTERRUPTIBLE ) ; this . numScanPermits = this . allProps . getInt ( NUM_SCAN_PERMITS ) ; this . storageConfigurations = this . allProps . getList ( STORAGE_CONFIGS ) ; this . retentionCleanupFirstStartTimeInHour = this . allProps . getInt ( RETENTION_CLEANUP_FIRST_START_HOUR ) ; this . retentionCleanupFirstStartDayOfWeek = this . allProps . getInt ( RETENTION_CLEANUP_FIRST_START_DAY ) ; this . retentionCleanupScheduledPeriodInHour = this . allProps . getInt ( RETENTION_CLEANUP_PERIOD_HOURS ) ; this . retentionCleanupPinStartTime = this . allProps . getBoolean ( RETENTION_CLEANUP_PIN_START_TIME ) ; this . enforceRetentionPolicyOnRead = this . allProps . getBoolean ( ENFORCE_RETENTION_POLICY_ON_READ ) ; this . deleteExpiredValuesOnRead = this . allProps . getBoolean ( DELETE_EXPIRED_VALUES_ON_READ ) ; this . requestFormatType = RequestFormatType . fromCode ( this . allProps . getString ( REQUEST_FORMAT ) ) ; // rebalancing parameters this . rebalancingTimeoutSec = this . allProps . getLong ( REBALANCING_TIMEOUT_SECONDS ) ; this . maxParallelStoresRebalancing = this . allProps . getInt ( MAX_PARALLEL_STORES_REBALANCING ) ; this . usePartitionScanForRebalance = this . allProps . getBoolean ( USE_PARTITION_SCAN_FOR_REBALANCE ) ; this . maxProxyPutThreads = this . allProps . getInt ( MAX_PROXY_PUT_THREADS ) ; this . failureDetectorImplementation = this . allProps . getString ( FAILUREDETECTOR_IMPLEMENTATION ) ; this . failureDetectorBannagePeriod = this . allProps . getLong ( FAILUREDETECTOR_BANNAGE_PERIOD ) ; this . failureDetectorThreshold = this . allProps . getInt ( FAILUREDETECTOR_THRESHOLD ) ; this . failureDetectorThresholdCountMinimum = this . allProps . getInt ( FAILUREDETECTOR_THRESHOLD_COUNTMINIMUM ) ; this . failureDetectorThresholdInterval = this . allProps . getLong ( FAILUREDETECTOR_THRESHOLD_INTERVAL ) ; this . failureDetectorAsyncRecoveryInterval = this . allProps . getLong ( FAILUREDETECTOR_ASYNCRECOVERY_INTERVAL ) ; this . failureDetectorCatastrophicErrorTypes = this . allProps . getList ( FAILUREDETECTOR_CATASTROPHIC_ERROR_TYPES ) ; this . failureDetectorRequestLengthThreshold = this . allProps . getLong ( FAILUREDETECTOR_REQUEST_LENGTH_THRESHOLD ) ; // network class loader disable by default . this . enableNetworkClassLoader = this . allProps . getBoolean ( ENABLE_NETWORK_CLASSLOADER ) ; // TODO : REST - Server decide on the numbers this . enableRestService = this . allProps . getBoolean ( REST_ENABLE ) ; this . numRestServiceNettyServerBacklog = this . allProps . getInt ( NUM_REST_SERVICE_NETTY_SERVER_BACKLOG ) ; this . numRestServiceNettyBossThreads = this . allProps . getInt ( NUM_REST_SERVICE_NETTY_BOSS_THREADS ) ; this . numRestServiceNettyWorkerThreads = this . allProps . getInt ( NUM_REST_SERVICE_NETTY_WORKER_THREADS ) ; this . numRestServiceStorageThreads = this . allProps . getInt ( NUM_REST_SERVICE_STORAGE_THREADS ) ; this . restServiceStorageThreadPoolQueueSize = this . allProps . getInt ( REST_SERVICE_STORAGE_THREAD_POOL_QUEUE_SIZE ) ; this . maxHttpAggregatedContentLength = this . allProps . getInt ( MAX_HTTP_AGGREGATED_CONTENT_LENGTH ) ; this . repairJobMaxKeysScannedPerSec = this . allProps . getInt ( REPAIRJOB_MAX_KEYS_SCANNED_PER_SEC ) ; this . pruneJobMaxKeysScannedPerSec = this . allProps . getInt ( PRUNEJOB_MAX_KEYS_SCANNED_PER_SEC ) ; this . slopPurgeJobMaxKeysScannedPerSec = this . allProps . getInt ( SLOP_PURGEJOB_MAX_KEYS_SCANNED_PER_SEC ) ; // RocksDB config this . rocksdbDataDirectory = this . allProps . getString ( ROCKSDB_DATA_DIR ) ; this . rocksdbPrefixKeysWithPartitionId = this . allProps . getBoolean ( ROCKSDB_PREFIX_KEYS_WITH_PARTITIONID ) ; this . rocksdbEnableReadLocks = this . allProps . getBoolean ( ROCKSDB_ENABLE_READ_LOCKS ) ; this . restrictedConfigs = this . allProps . getList ( RESTRICTED_CONFIGS ) ; // Node Id auto detection configs this . enableNodeIdDetection = this . allProps . getBoolean ( ENABLE_NODE_ID_DETECTION , false ) ; // validation is defaulted based on node id detection . this . validateNodeId = this . allProps . getBoolean ( VALIDATE_NODE_ID ) ; this . nodeIdImplementation = new HostMatcher ( ) ;
public class MessageUtils { /** * Create Marshaller from the JAXB context . * @ return Marshaller */ public static Marshaller createMarshaller ( ) throws JAXBException { } }
Marshaller marshaller = MessageUtilsHelper . getContext ( ) . createMarshaller ( ) ; marshaller . setProperty ( Marshaller . JAXB_FORMATTED_OUTPUT , Boolean . TRUE ) ; return marshaller ;
public class GenericJTable { /** * { @ inheritDoc } */ @ Override public int [ ] getSelectedRows ( ) { } }
// find the real selected rows . If the rows was sorted the index from // the // model does not fit to the table . final int [ ] selectedRows = super . getSelectedRows ( ) ; final int [ ] sr = new int [ selectedRows . length ] ; for ( int i = 0 ; i < selectedRows . length ; i ++ ) { sr [ i ] = this . convertRowIndexToModel ( selectedRows [ i ] ) ; } return sr ;
public class ConfigDocumentFactory { /** * Parses a string which should be valid HOCON or JSON . * @ param s string to parse * @ param options parse options * @ return the parsed configuration */ public static ConfigDocument parseString ( String s , ConfigParseOptions options ) { } }
return Parseable . newString ( s , options ) . parseConfigDocument ( ) ;
public class CommandForWheeledRobotNavigationImplementation { /** * Called by our overridden MovementInputFromOptions class . * @ return true if we ' ve handled the movement ; false if the MovementInputFromOptions class should delegate to the default handling . */ protected boolean updateState ( ) { } }
if ( ! overrideKeyboardInput ) { return false ; // Let the class do the default thing . } // Update movement : mTicksSinceLastVelocityChange ++ ; if ( mTicksSinceLastVelocityChange <= mInertiaTicks ) { mVelocity += ( mTargetVelocity - mVelocity ) * ( ( float ) mTicksSinceLastVelocityChange / ( float ) mInertiaTicks ) ; } else { mVelocity = mTargetVelocity ; } this . overrideMovement . moveForward = mVelocity ; // This code comes from the Minecraft MovementInput superclass - needed so as not to give the bot an unfair // advantage when sneaking ! if ( this . overrideMovement . sneak ) { this . overrideMovement . moveStrafe = ( float ) ( ( double ) this . overrideMovement . moveStrafe * 0.3D ) ; this . overrideMovement . moveForward = ( float ) ( ( double ) this . overrideMovement . moveForward * 0.3D ) ; } updateYawAndPitch ( ) ; return true ;
public class SARLValidator { /** * Check if the modifiers for the SARL events . * @ param event the event . */ @ Check public void checkContainerType ( SarlEvent event ) { } }
final XtendTypeDeclaration declaringType = event . getDeclaringType ( ) ; if ( declaringType != null ) { final String name = canonicalName ( declaringType ) ; assert name != null ; error ( MessageFormat . format ( Messages . SARLValidator_32 , name ) , event , null , INVALID_NESTED_DEFINITION ) ; }
public class LinkingUberspector { /** * Tries to create an uberspector instance using reflection . * @ param classname The name of the uberspector class to instantiate . * @ return An instance of the specified Uberspector . If the class cannot be instantiated using the default * constructor , or does not implement { @ link Uberspect } , < code > null < / code > is returned . */ protected Uberspect instantiateUberspector ( String classname ) { } }
Object o = null ; try { o = ClassUtils . getNewInstance ( classname ) ; } catch ( ClassNotFoundException e ) { this . log . warn ( String . format ( "The specified uberspector [%s]" + " does not exist or is not accessible to the current classloader." , classname ) ) ; } catch ( IllegalAccessException e ) { this . log . warn ( String . format ( "The specified uberspector [%s] does not have a public default constructor." , classname ) ) ; } catch ( InstantiationException e ) { this . log . warn ( String . format ( "The specified uberspector [%s] cannot be instantiated." , classname ) ) ; } catch ( ExceptionInInitializerError e ) { this . log . warn ( String . format ( "Exception while instantiating the Uberspector [%s]: %s" , classname , e . getMessage ( ) ) ) ; } if ( ! ( o instanceof Uberspect ) ) { if ( o != null ) { this . log . warn ( "The specified class for Uberspect [" + classname + "] does not implement " + Uberspect . class . getName ( ) ) ; } return null ; } return ( Uberspect ) o ;
public class DfState { /** * Store the specified bond index and mapped query atom ( optional ) * on the stack . * @ param bidx bond index * @ param queryatom query atom - can be null */ private void store ( int bidx , IQueryAtom queryatom ) { } }
++ sptr ; stack [ sptr ] . bidx = bidx ; stack [ sptr ] . iter = null ; if ( queryatom != null ) stack [ sptr ] . atom = queryatom ; else stack [ sptr ] . atom = null ;
public class RemoteAsyncResultReaper { /** * Remove a server - side Future object from the reaper and release its * resources . */ public synchronized void remove ( RemoteAsyncResultImpl asyncResult ) { } }
ivAllRemoteAsyncResults . remove ( asyncResult ) ; releaseResources ( asyncResult ) ; // d690014.3 // If no server - side Future objects and an alarm is set then cancel the alarm . d623593 if ( ivAllRemoteAsyncResults . isEmpty ( ) && ivFuture != null ) { ivFuture . cancel ( false ) ; ivFuture = null ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "remove " + asyncResult + "; size=" + ivAllRemoteAsyncResults . size ( ) ) ;
public class ModifyInstanceAttributeRequest { /** * Modifies the < code > DeleteOnTermination < / code > attribute for volumes that are currently attached . The volume must * be owned by the caller . If no value is specified for < code > DeleteOnTermination < / code > , the default is * < code > true < / code > and the volume is deleted when the instance is terminated . * To add instance store volumes to an Amazon EBS - backed instance , you must add them when you launch the instance . * For more information , see < a href = * " https : / / docs . aws . amazon . com / AWSEC2 / latest / UserGuide / block - device - mapping - concepts . html # Using _ OverridingAMIBDM " * > Updating the Block Device Mapping when Launching an Instance < / a > in the < i > Amazon Elastic Compute Cloud User * Guide < / i > . * @ return Modifies the < code > DeleteOnTermination < / code > attribute for volumes that are currently attached . The * volume must be owned by the caller . If no value is specified for < code > DeleteOnTermination < / code > , the * default is < code > true < / code > and the volume is deleted when the instance is terminated . < / p > * To add instance store volumes to an Amazon EBS - backed instance , you must add them when you launch the * instance . For more information , see < a href = * " https : / / docs . aws . amazon . com / AWSEC2 / latest / UserGuide / block - device - mapping - concepts . html # Using _ OverridingAMIBDM " * > Updating the Block Device Mapping when Launching an Instance < / a > in the < i > Amazon Elastic Compute Cloud * User Guide < / i > . */ public java . util . List < InstanceBlockDeviceMappingSpecification > getBlockDeviceMappings ( ) { } }
if ( blockDeviceMappings == null ) { blockDeviceMappings = new com . amazonaws . internal . SdkInternalList < InstanceBlockDeviceMappingSpecification > ( ) ; } return blockDeviceMappings ;
public class MersenneTwister { /** * This generates a coin flip with a probability < tt > probability < / tt > * of returning true , else returning false . < tt > probability < / tt > must * be between 0.0 and 1.0 , inclusive . Not as precise a random real * event as nextBoolean ( double ) , but twice as fast . To explicitly * use this , remember you may need to cast to float first . * @ param probability * @ return ? */ public boolean nextBoolean ( final float probability ) { } }
if ( probability < 0.0f || probability > 1.0f ) throw new IllegalArgumentException ( "probability must be between 0.0 and 1.0 inclusive." ) ; if ( probability == 0.0f ) return false ; // fix half - open issues else if ( probability == 1.0f ) return true ; // fix half - open issues return nextFloat ( ) < probability ;
public class JmxUtils { /** * Register the given object under the package name of the object ' s class * with the given type name . * this method using the platform mbean server as returned by * ManagementFactory . getPlatformMBeanServer ( ) * @ param typeName The name of the type to register * @ param obj The object to register as an mbean */ public static ObjectName registerMbean ( String typeName , Object obj ) { } }
MBeanServer server = ManagementFactory . getPlatformMBeanServer ( ) ; ObjectName name = JmxUtils . createObjectName ( JmxUtils . getPackageName ( obj . getClass ( ) ) , typeName ) ; registerMbean ( server , JmxUtils . createModelMBean ( obj ) , name ) ; return name ;
public class FileReport { /** * Close the file report instance and it ' s underlying writer * @ throws DiffException */ public void close ( ) throws DiffException { } }
try { if ( writer != null ) { writer . flush ( ) ; writer . close ( ) ; } } catch ( IOException e ) { throw new DiffException ( "Failed to close report file" , e ) ; }
public class Enhancements { /** * Returns the { @ link Collection } of extracted { @ link SentimentAnnotation } s . This * allows to process low level sentiment values extracted from sub - sections of the * document ( e . g . on Sentence level ) . Use { @ link # getDocumentSentiment ( ) } to * get the overall sentiment of the document as a whole * @ return A collection of Sentiment annotations for sub - sections of the document * @ see # getDocumentSentiment ( ) */ @ SuppressWarnings ( "unchecked" ) public Collection < SentimentAnnotation > getSentimentAnnotations ( ) { } }
Collection < ? extends Enhancement > result = enhancements . get ( SentimentAnnotation . class ) ; return result == null ? Collections . < SentimentAnnotation > emptySet ( ) : Collections . unmodifiableCollection ( ( Collection < SentimentAnnotation > ) result ) ;
public class ProviderManager { /** * Removes an IQ provider with the specified element name and namespace . This * method is typically called to cleanup providers that are programmatically added * using the { @ link # addIQProvider ( String , String , Object ) addIQProvider } method . * @ param elementName the XML element name . * @ param namespace the XML namespace . * @ return the key of the removed IQ Provider */ public static String removeIQProvider ( String elementName , String namespace ) { } }
String key = getKey ( elementName , namespace ) ; iqProviders . remove ( key ) ; return key ;
public class SamlIdPObjectEncrypter { /** * Gets data encryption parameters . * @ param samlObject the saml object * @ param service the service * @ param adaptor the adaptor * @ return the data encryption parameters */ protected DataEncryptionParameters getDataEncryptionParameters ( final Object samlObject , final SamlRegisteredService service , final SamlRegisteredServiceServiceProviderMetadataFacade adaptor ) { } }
val dataEncParams = new DataEncryptionParameters ( ) ; dataEncParams . setAlgorithm ( EncryptionConstants . ALGO_ID_BLOCKCIPHER_AES128 ) ; return dataEncParams ;
public class EVD { /** * Convenience method for computing the complete eigenvalue decomposition of * the given matrix * @ param A * Matrix to factorize . Not modified * @ return Newly allocated decomposition * @ throws NotConvergedException */ public static EVD factorize ( Matrix A ) throws NotConvergedException { } }
return new EVD ( A . numRows ( ) ) . factor ( new DenseMatrix ( A ) ) ;
public class TextProcessorAdapter { /** * Process the text * @ param source the sourcetext * @ return the result of text processing */ public String process ( String source ) { } }
CharSequence result = process ( ( CharSequence ) source ) ; if ( result instanceof String ) { return ( String ) result ; } else { return result . toString ( ) ; }
public class CrumbIssuer { /** * Get a crumb value based on user specific information in the request . * @ param request */ public String getCrumb ( ServletRequest request ) { } }
String crumb = null ; if ( request != null ) { crumb = ( String ) request . getAttribute ( CRUMB_ATTRIBUTE ) ; } if ( crumb == null ) { crumb = issueCrumb ( request , getDescriptor ( ) . getCrumbSalt ( ) ) ; if ( request != null ) { if ( ( crumb != null ) && crumb . length ( ) > 0 ) { request . setAttribute ( CRUMB_ATTRIBUTE , crumb ) ; } else { request . removeAttribute ( CRUMB_ATTRIBUTE ) ; } } } return crumb ;
public class ThriftClient { /** * ( non - Javadoc ) * @ see * com . impetus . client . cassandra . CassandraClientBase # releaseConnection ( java * . lang . Object ) */ protected void releaseConnection ( Object conn ) { } }
clientFactory . releaseConnection ( ( ( Connection ) conn ) . getPool ( ) , ( ( Connection ) conn ) . getClient ( ) ) ;
public class GetDedicatedIpsResult { /** * A list of dedicated IP addresses that are reserved for use by your Amazon Pinpoint account . * @ param dedicatedIps * A list of dedicated IP addresses that are reserved for use by your Amazon Pinpoint account . */ public void setDedicatedIps ( java . util . Collection < DedicatedIp > dedicatedIps ) { } }
if ( dedicatedIps == null ) { this . dedicatedIps = null ; return ; } this . dedicatedIps = new java . util . ArrayList < DedicatedIp > ( dedicatedIps ) ;
public class TypeReference { /** * Gets type token for the given { @ code Class } instance . */ public static < T > TypeReference < T > get ( Class < T > type ) { } }
return new SimpleTypeReference < T > ( type ) ;
public class DeleteFolderContentsRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( DeleteFolderContentsRequest deleteFolderContentsRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( deleteFolderContentsRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( deleteFolderContentsRequest . getAuthenticationToken ( ) , AUTHENTICATIONTOKEN_BINDING ) ; protocolMarshaller . marshall ( deleteFolderContentsRequest . getFolderId ( ) , FOLDERID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class NGramTokenizer { /** * Return tokenized version of a string . Tokens are all * character n - grams that are part of a token produced by the * inner tokenizer . */ public Token [ ] tokenize ( String input ) { } }
Token [ ] initialTokens = innerTokenizer . tokenize ( input ) ; List tokens = new ArrayList ( ) ; for ( int i = 0 ; i < initialTokens . length ; i ++ ) { Token tok = initialTokens [ i ] ; String str = "^" + tok . getValue ( ) + "$" ; if ( keepOldTokens ) tokens . add ( intern ( str ) ) ; for ( int lo = 0 ; lo < str . length ( ) ; lo ++ ) { for ( int len = minNGramSize ; len <= maxNGramSize ; len ++ ) { if ( lo + len < str . length ( ) ) { tokens . add ( innerTokenizer . intern ( str . substring ( lo , lo + len ) ) ) ; } } } } return ( Token [ ] ) tokens . toArray ( new BasicToken [ tokens . size ( ) ] ) ;
public class ConnectionValidator { /** * Start * @ exception Throwable Thrown if an error occurs */ public void start ( ) throws Throwable { } }
if ( ! isExternal ) { executorService = Executors . newSingleThreadExecutor ( new ValidatorThreadFactory ( ) ) ; } shutdown . set ( false ) ; interval = Long . MAX_VALUE ; next = Long . MAX_VALUE ; executorService . execute ( new ConnectionValidatorRunner ( ) ) ;
public class NoxView { /** * Initializes a NoxConfig instance given the NoxView configuration provided programmatically or * using XML styleable attributes . */ private void initializeNoxViewConfig ( Context context , AttributeSet attrs , int defStyleAttr , int defStyleRes ) { } }
noxConfig = new NoxConfig ( ) ; TypedArray attributes = context . getTheme ( ) . obtainStyledAttributes ( attrs , R . styleable . nox , defStyleAttr , defStyleRes ) ; initializeNoxItemSize ( attributes ) ; initializeNoxItemMargin ( attributes ) ; initializeNoxItemPlaceholder ( attributes ) ; initializeShapeConfig ( attributes ) ; initializeTransformationConfig ( attributes ) ; attributes . recycle ( ) ;
public class ListHITsResult { /** * The list of HIT elements returned by the query . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setHITs ( java . util . Collection ) } or { @ link # withHITs ( java . util . Collection ) } if you want to override the * existing values . * @ param hITs * The list of HIT elements returned by the query . * @ return Returns a reference to this object so that method calls can be chained together . */ public ListHITsResult withHITs ( HIT ... hITs ) { } }
if ( this . hITs == null ) { setHITs ( new java . util . ArrayList < HIT > ( hITs . length ) ) ; } for ( HIT ele : hITs ) { this . hITs . add ( ele ) ; } return this ;
public class ApiOvhHorizonView { /** * Disable two factor authentication on your pool * REST : POST / horizonView / { serviceName } / accessPoint / { accessPointId } / disableTwoFA * @ param serviceName [ required ] Domain of the service * @ param accessPointId [ required ] Pool id */ public OvhTask serviceName_accessPoint_accessPointId_disableTwoFA_POST ( String serviceName , Long accessPointId ) throws IOException { } }
String qPath = "/horizonView/{serviceName}/accessPoint/{accessPointId}/disableTwoFA" ; StringBuilder sb = path ( qPath , serviceName , accessPointId ) ; String resp = exec ( qPath , "POST" , sb . toString ( ) , null ) ; return convertTo ( resp , OvhTask . class ) ;
public class IfcOrganizationImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ SuppressWarnings ( "unchecked" ) @ Override public EList < IfcOrganizationRelationship > getIsRelatedBy ( ) { } }
return ( EList < IfcOrganizationRelationship > ) eGet ( Ifc4Package . Literals . IFC_ORGANIZATION__IS_RELATED_BY , true ) ;
public class UpdateSketchBuilder { /** * Returns an UpdateSketch with the current configuration of this Builder * with the specified backing destination Memory store . * Note : this cannot be used with the Alpha Family of sketches . * @ param dstMem The destination Memory . * @ return an UpdateSketch */ public UpdateSketch build ( final WritableMemory dstMem ) { } }
UpdateSketch sketch = null ; switch ( bFam ) { case ALPHA : { if ( dstMem == null ) { sketch = HeapAlphaSketch . newHeapInstance ( bLgNomLongs , bSeed , bP , bRF ) ; } else { throw new SketchesArgumentException ( "AlphaSketch cannot be made Direct to Memory." ) ; } break ; } case QUICKSELECT : { if ( dstMem == null ) { sketch = new HeapQuickSelectSketch ( bLgNomLongs , bSeed , bP , bRF , false ) ; } else { sketch = new DirectQuickSelectSketch ( bLgNomLongs , bSeed , bP , bRF , bMemReqSvr , dstMem , false ) ; } break ; } default : { throw new SketchesArgumentException ( "Given Family cannot be built as a Theta Sketch: " + bFam . toString ( ) ) ; } } return sketch ;
public class PortletContextLoader { /** * Customize the { @ link org . springframework . web . portlet . context . ConfigurablePortletApplicationContext } created by this * PortletContextLoader after config locations have been supplied to the context * but before the context is < em > refreshed < / em > . * < p > The default implementation { @ linkplain # determineContextInitializerClasses ( PortletContext ) * determines } what ( if any ) context initializer classes have been specified through * { @ linkplain # CONTEXT _ INITIALIZER _ CLASSES _ PARAM context init parameters } and * { @ linkplain ApplicationContextInitializer # initialize invokes each } with the * given web application context . * < p > Any { @ code ApplicationContextInitializers } implementing * { @ link org . springframework . core . Ordered Ordered } or marked with @ { @ link * org . springframework . core . annotation . Order Order } will be sorted appropriately . * @ param portletContext the current portlet context * @ param applicationContext the newly created application context * @ see # createPortletApplicationContext ( PortletContext ) * @ see # CONTEXT _ INITIALIZER _ CLASSES _ PARAM * @ see ApplicationContextInitializer # initialize ( ConfigurableApplicationContext ) * @ see # createPortletApplicationContext ( PortletContext ) * @ see # CONTEXT _ INITIALIZER _ CLASSES _ PARAM * @ see ApplicationContextInitializer # initialize ( ConfigurableApplicationContext ) * @ see ApplicationContextInitializer # initialize ( ConfigurableApplicationContext ) */ protected void customizeContext ( PortletContext portletContext , ConfigurablePortletApplicationContext applicationContext ) { } }
List < Class < ApplicationContextInitializer < ConfigurableApplicationContext > > > initializerClasses = determineContextInitializerClasses ( portletContext ) ; if ( initializerClasses . size ( ) == 0 ) { // no ApplicationContextInitializers have been declared - > nothing to do return ; } ArrayList < ApplicationContextInitializer < ConfigurableApplicationContext > > initializerInstances = new ArrayList < ApplicationContextInitializer < ConfigurableApplicationContext > > ( ) ; for ( Class < ApplicationContextInitializer < ConfigurableApplicationContext > > initializerClass : initializerClasses ) { Class < ? > contextClass = applicationContext . getClass ( ) ; Class < ? > initializerContextClass = GenericTypeResolver . resolveTypeArgument ( initializerClass , ApplicationContextInitializer . class ) ; Assert . isAssignable ( initializerContextClass , contextClass , String . format ( "Could not add context initializer [%s] as its generic parameter [%s] " + "is not assignable from the type of application context used by this " + "context loader [%s]" , initializerClass . getName ( ) , initializerContextClass , contextClass ) ) ; initializerInstances . add ( BeanUtils . instantiateClass ( initializerClass ) ) ; } // TODO remove cast when ContribXmlPortletApplicationContext is merged into super classes ( ( ConfigurablePortletEnvironment ) applicationContext . getEnvironment ( ) ) . initPropertySources ( this . servletContext , portletContext , null ) ; Collections . sort ( initializerInstances , new AnnotationAwareOrderComparator ( ) ) ; for ( ApplicationContextInitializer < ConfigurableApplicationContext > initializer : initializerInstances ) { initializer . initialize ( applicationContext ) ; }
public class Task { /** * Creates a task that will complete when any of the supplied tasks have completed . * The returned task will complete when any of the supplied tasks has completed . The returned task * will always end in the completed state with its result set to the first task to complete . This * is true even if the first task to complete ended in the canceled or faulted state . * @ param tasks * The tasks to wait on for completion . * @ return A task that represents the completion of one of the supplied tasks . * The return task ' s result is the task that completed . */ public static < TResult > Task < Task < TResult > > whenAnyResult ( Collection < ? extends Task < TResult > > tasks ) { } }
if ( tasks . size ( ) == 0 ) { return Task . forResult ( null ) ; } final bolts . TaskCompletionSource < Task < TResult > > firstCompleted = new bolts . TaskCompletionSource < > ( ) ; final AtomicBoolean isAnyTaskComplete = new AtomicBoolean ( false ) ; for ( Task < TResult > task : tasks ) { task . continueWith ( new Continuation < TResult , Void > ( ) { @ Override public Void then ( Task < TResult > task ) { if ( isAnyTaskComplete . compareAndSet ( false , true ) ) { firstCompleted . setResult ( task ) ; } else { Throwable ensureObserved = task . getError ( ) ; } return null ; } } ) ; } return firstCompleted . getTask ( ) ;
public class RelationalOperations { /** * Returns true if multipoint _ a is disjoint from point _ b . */ private static boolean multiPointDisjointPoint_ ( MultiPoint multipoint_a , Point point_b , double tolerance , ProgressTracker progress_tracker ) { } }
Point2D pt_b = point_b . getXY ( ) ; return multiPointDisjointPointImpl_ ( multipoint_a , pt_b , tolerance , progress_tracker ) ;
public class WAjaxControl { /** * Override preparePaintComponent in order to register the components for the current request . * @ param request the request being responded to */ @ Override protected void preparePaintComponent ( final Request request ) { } }
super . preparePaintComponent ( request ) ; List < AjaxTarget > targets = getTargets ( ) ; if ( targets != null && ! targets . isEmpty ( ) ) { WComponent triggerComponent = trigger == null ? this : trigger ; // The trigger maybe in a different context UIContext triggerContext = WebUtilities . getContextForComponent ( triggerComponent ) ; UIContextHolder . pushContext ( triggerContext ) ; // TODO The IDs of the targets are based on the Triggers Context . Not good for targets in repeaters try { List < String > targetIds = new ArrayList < > ( ) ; for ( AjaxTarget target : getTargets ( ) ) { targetIds . add ( target . getId ( ) ) ; } AjaxHelper . registerComponents ( targetIds , triggerComponent . getId ( ) ) ; } finally { UIContextHolder . popContext ( ) ; } }
public class AndPermission { /** * Some privileges permanently disabled , may need to set up in the execute . * @ param activity { @ link Activity } . * @ param deniedPermissions one or more permissions . * @ return true , other wise is false . */ public static boolean hasAlwaysDeniedPermission ( Activity activity , List < String > deniedPermissions ) { } }
return hasAlwaysDeniedPermission ( new ActivitySource ( activity ) , deniedPermissions ) ;
public class ListTemplatesResult { /** * An array the contains the name and creation time stamp for each template in your Amazon SES account . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setTemplatesMetadata ( java . util . Collection ) } or { @ link # withTemplatesMetadata ( java . util . Collection ) } if * you want to override the existing values . * @ param templatesMetadata * An array the contains the name and creation time stamp for each template in your Amazon SES account . * @ return Returns a reference to this object so that method calls can be chained together . */ public ListTemplatesResult withTemplatesMetadata ( TemplateMetadata ... templatesMetadata ) { } }
if ( this . templatesMetadata == null ) { setTemplatesMetadata ( new com . amazonaws . internal . SdkInternalList < TemplateMetadata > ( templatesMetadata . length ) ) ; } for ( TemplateMetadata ele : templatesMetadata ) { this . templatesMetadata . add ( ele ) ; } return this ;
public class DerValue { /** * Returns an ASN . 1 ENUMERATED value . * @ return the integer held in this DER value . */ public int getEnumerated ( ) throws IOException { } }
if ( tag != tag_Enumerated ) { throw new IOException ( "DerValue.getEnumerated, incorrect tag: " + tag ) ; } return buffer . getInteger ( data . available ( ) ) ;
public class UpdateItemRequest { /** * This is a legacy parameter . Use < code > ConditionExpression < / code > instead . For more information , see < a href = * " https : / / docs . aws . amazon . com / amazondynamodb / latest / developerguide / LegacyConditionalParameters . Expected . html " * > Expected < / a > in the < i > Amazon DynamoDB Developer Guide < / i > . * @ param expected * This is a legacy parameter . Use < code > ConditionExpression < / code > instead . For more information , see < a * href = * " https : / / docs . aws . amazon . com / amazondynamodb / latest / developerguide / LegacyConditionalParameters . Expected . html " * > Expected < / a > in the < i > Amazon DynamoDB Developer Guide < / i > . * @ return Returns a reference to this object so that method calls can be chained together . */ public UpdateItemRequest withExpected ( java . util . Map < String , ExpectedAttributeValue > expected ) { } }
setExpected ( expected ) ; return this ;
public class MarkLogicRepositoryConnection { /** * returns statements from supplied context * TBD - should share code path with above getStatements * @ param subj * @ param pred * @ param obj * @ param includeInferred * @ param contexts * @ throws RepositoryException */ @ Override public RepositoryResult < Statement > getStatements ( Resource subj , URI pred , Value obj , boolean includeInferred , Resource ... contexts ) throws RepositoryException { } }
if ( contexts == null ) { contexts = new Resource [ ] { null } ; } try { if ( isQuadMode ( ) ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( "SELECT * WHERE { GRAPH ?ctx { ?s ?p ?o } filter (?ctx = (" ) ; boolean first = true ; for ( Resource context : contexts ) { if ( first ) { first = ! first ; } else { sb . append ( "," ) ; } if ( notNull ( context ) ) { sb . append ( "IRI(\"" + context . toString ( ) + "\")" ) ; } else { sb . append ( "IRI(\"" + DEFAULT_GRAPH_URI + "\")" ) ; } } sb . append ( ") ) }" ) ; TupleQuery tupleQuery = prepareTupleQuery ( sb . toString ( ) ) ; tupleQuery . setIncludeInferred ( includeInferred ) ; setBindings ( tupleQuery , subj , pred , obj , ( Resource ) null ) ; TupleQueryResult qRes = tupleQuery . evaluate ( ) ; return new RepositoryResult < Statement > ( new ExceptionConvertingIteration < Statement , RepositoryException > ( toStatementIteration ( qRes , subj , pred , obj ) ) { @ Override protected RepositoryException convert ( Exception e ) { return new RepositoryException ( e ) ; } } ) ; } else if ( subj != null && pred != null && obj != null ) { if ( hasStatement ( subj , pred , obj , includeInferred , contexts ) ) { Statement st = new StatementImpl ( subj , pred , obj ) ; CloseableIteration < Statement , RepositoryException > cursor ; cursor = new SingletonIteration < Statement , RepositoryException > ( st ) ; return new RepositoryResult < Statement > ( cursor ) ; } else { return new RepositoryResult < Statement > ( new EmptyIteration < Statement , RepositoryException > ( ) ) ; } } else { MarkLogicGraphQuery query = prepareGraphQuery ( EVERYTHING ) ; setBindings ( query , subj , pred , obj , contexts ) ; GraphQueryResult result = query . evaluate ( ) ; return new RepositoryResult < Statement > ( new ExceptionConvertingIteration < Statement , RepositoryException > ( result ) { @ Override protected RepositoryException convert ( Exception e ) { return new RepositoryException ( e ) ; } } ) ; } } catch ( MalformedQueryException e ) { throw new RepositoryException ( e ) ; } catch ( QueryEvaluationException e ) { throw new RepositoryException ( e ) ; }
public class MultiColumnRegexFilter { /** * Create a row filter for the given regex and column . If the given * string is not a valid regex , then a warning will be printed and * < code > null < / code > will be returned . * @ param regex The regex * @ param columnIndex The column index * @ return The row filter */ private RowFilter < TableModel , Integer > createRegexFilter ( String regex , int columnIndex ) { } }
try { String finalRegex = regex ; if ( ignoringCase ) { finalRegex = "(?i)" + regex ; } RowFilter < TableModel , Integer > rowFilter = RowFilter . regexFilter ( finalRegex , columnIndex ) ; return rowFilter ; } catch ( PatternSyntaxException e ) { logger . warning ( e . getMessage ( ) ) ; return null ; }
public class StandbySafeMode { /** * Processes a register from the datanode . First , we will * await a heartbeat , and later for a incremental block * report . * @ param node * the datanode that has reported */ protected void reportRegister ( DatanodeID node ) { } }
if ( node != null && shouldUpdateNodes ( ) ) { if ( ! liveDatanodes . contains ( node ) ) { // A new node has checked in , we want to send a ClearPrimary command to // it as well . outStandingHeartbeats . add ( node ) ; liveDatanodes . add ( node ) ; } }
public class MultiViewOps { /** * Computes a Fundamental matrix given an Essential matrix and the camera ' s intrinsic * parameters . * @ see # createFundamental ( DMatrixRMaj , DMatrixRMaj ) * @ param E Essential matrix * @ param intrinsic Intrinsic camera calibration * @ return Fundamental matrix */ public static DMatrixRMaj createFundamental ( DMatrixRMaj E , CameraPinhole intrinsic ) { } }
DMatrixRMaj K = PerspectiveOps . pinholeToMatrix ( intrinsic , ( DMatrixRMaj ) null ) ; return createFundamental ( E , K ) ;
public class SignatureUtilImpl { /** * 使用私钥签名 * @ param content 要签名的数据 * @ return 签名结果 ( 会对签名结果做BASE64 encode处理 ) */ @ Override public byte [ ] sign ( byte [ ] content ) { } }
try ( PooledObject < SignatureHolder > holder = CACHE . get ( id ) . get ( ) ) { Signature signature = holder . get ( ) . getSign ( ) ; signature . update ( content ) ; return BASE_64 . encrypt ( signature . sign ( ) ) ; } catch ( Exception e ) { throw new SecureException ( "加密失败" , e ) ; }
public class MbeanImplCodeGen { /** * Output class * @ param def definition * @ param out Writer * @ throws IOException ioException */ @ Override public void writeClassBody ( Definition def , Writer out ) throws IOException { } }
int indent = 1 ; out . write ( "public class " + getClassName ( def ) + " implements " + def . getMbeanInterfaceClass ( ) ) ; writeLeftCurlyBracket ( out , 0 ) ; writeVars ( def , out , indent ) ; writeEol ( out ) ; writeMBeanLifecycle ( def , out , indent ) ; writeEol ( out ) ; writeMethods ( def , out , indent ) ; writeEol ( out ) ; writeGetConnection ( def , out , indent ) ; writeRightCurlyBracket ( out , 0 ) ;
public class Parser { /** * run parse on the internal data and return the command object */ private Command parse ( ) throws ParseException { } }
command ( ) ; while ( ! is ( Token . WHERE , Token . RETURN , Token . EOL ) ) { param ( ) ; } if ( token == Token . WHERE ) { where ( ) ; } if ( token == Token . RETURN ) { returns ( ) ; } expect ( Token . EOL ) ; return cmd ;
public class ThriftEventManager { /** * { @ inheritDoc } */ @ Override public void addAttack ( Attack attack ) { } }
TTransport transport = getTransport ( ) ; final TProtocol protocol = new TBinaryProtocol ( transport ) ; final AppSensorApi . Client client = new AppSensorApi . Client ( protocol ) ; // All hooked up , start using the service try { org . owasp . appsensor . rpc . thrift . generated . Attack thriftAttack = mapper . map ( attack , org . owasp . appsensor . rpc . thrift . generated . Attack . class ) ; client . addAttack ( thriftAttack , clientApplicationName ) ; transport . close ( ) ; } catch ( Exception e ) { logger . error ( "Could not complete event add." , e ) ; }
public class LIBORMarketModel { /** * / * ( non - Javadoc ) * @ see net . finmath . montecarlo . interestrate . LIBORMarketModelInterface # getIntegratedLIBORCovariance ( ) */ @ Override public double [ ] [ ] [ ] getIntegratedLIBORCovariance ( ) { } }
synchronized ( integratedLIBORCovarianceLazyInitLock ) { if ( integratedLIBORCovariance == null ) { TimeDiscretizationInterface liborPeriodDiscretization = getLiborPeriodDiscretization ( ) ; TimeDiscretizationInterface simulationTimeDiscretization = getCovarianceModel ( ) . getTimeDiscretization ( ) ; integratedLIBORCovariance = new double [ simulationTimeDiscretization . getNumberOfTimeSteps ( ) ] [ liborPeriodDiscretization . getNumberOfTimeSteps ( ) ] [ liborPeriodDiscretization . getNumberOfTimeSteps ( ) ] ; for ( int timeIndex = 0 ; timeIndex < simulationTimeDiscretization . getNumberOfTimeSteps ( ) ; timeIndex ++ ) { double dt = simulationTimeDiscretization . getTime ( timeIndex + 1 ) - simulationTimeDiscretization . getTime ( timeIndex ) ; RandomVariableInterface [ ] [ ] factorLoadings = new RandomVariableInterface [ liborPeriodDiscretization . getNumberOfTimeSteps ( ) ] [ ] ; // Prefetch factor loadings for ( int componentIndex = 0 ; componentIndex < liborPeriodDiscretization . getNumberOfTimeSteps ( ) ; componentIndex ++ ) { factorLoadings [ componentIndex ] = getCovarianceModel ( ) . getFactorLoading ( timeIndex , componentIndex , null ) ; } for ( int componentIndex1 = 0 ; componentIndex1 < liborPeriodDiscretization . getNumberOfTimeSteps ( ) ; componentIndex1 ++ ) { RandomVariableInterface [ ] factorLoadingOfComponent1 = factorLoadings [ componentIndex1 ] ; // Sum the libor cross terms ( use symmetry ) for ( int componentIndex2 = componentIndex1 ; componentIndex2 < liborPeriodDiscretization . getNumberOfTimeSteps ( ) ; componentIndex2 ++ ) { double integratedLIBORCovarianceValue = 0.0 ; if ( getLiborPeriod ( componentIndex1 ) > getTime ( timeIndex ) ) { RandomVariableInterface [ ] factorLoadingOfComponent2 = factorLoadings [ componentIndex2 ] ; for ( int factorIndex = 0 ; factorIndex < getNumberOfFactors ( ) ; factorIndex ++ ) { integratedLIBORCovarianceValue += factorLoadingOfComponent1 [ factorIndex ] . get ( 0 ) * factorLoadingOfComponent2 [ factorIndex ] . get ( 0 ) * dt ; } } integratedLIBORCovariance [ timeIndex ] [ componentIndex1 ] [ componentIndex2 ] = integratedLIBORCovarianceValue ; } } } // Integrate over time ( i . e . sum up ) . for ( int timeIndex = 1 ; timeIndex < simulationTimeDiscretization . getNumberOfTimeSteps ( ) ; timeIndex ++ ) { double [ ] [ ] prevIntegratedLIBORCovariance = integratedLIBORCovariance [ timeIndex - 1 ] ; double [ ] [ ] thisIntegratedLIBORCovariance = integratedLIBORCovariance [ timeIndex ] ; for ( int componentIndex1 = 0 ; componentIndex1 < liborPeriodDiscretization . getNumberOfTimeSteps ( ) ; componentIndex1 ++ ) { for ( int componentIndex2 = componentIndex1 ; componentIndex2 < liborPeriodDiscretization . getNumberOfTimeSteps ( ) ; componentIndex2 ++ ) { thisIntegratedLIBORCovariance [ componentIndex1 ] [ componentIndex2 ] = prevIntegratedLIBORCovariance [ componentIndex1 ] [ componentIndex2 ] + thisIntegratedLIBORCovariance [ componentIndex1 ] [ componentIndex2 ] ; thisIntegratedLIBORCovariance [ componentIndex2 ] [ componentIndex1 ] = thisIntegratedLIBORCovariance [ componentIndex1 ] [ componentIndex2 ] ; } } } } } return integratedLIBORCovariance ;
public class YamlOrchestrationMasterSlaveDataSourceFactory { /** * Create master - slave data source . * @ param yamlBytes YAML bytes for master - slave rule configuration with data sources * @ return master - slave data source * @ throws SQLException SQL exception * @ throws IOException IO exception */ public static DataSource createDataSource ( final byte [ ] yamlBytes ) throws SQLException , IOException { } }
YamlOrchestrationMasterSlaveRuleConfiguration config = unmarshal ( yamlBytes ) ; return createDataSource ( config . getDataSources ( ) , config . getMasterSlaveRule ( ) , config . getProps ( ) , config . getOrchestration ( ) ) ;
public class CamerasApi { /** * Returns all the brands of cameras that Flickr knows about . * < br > * This method does not require authentication . * @ return all camera brands Flickr knows about . * @ throws JinxException if there are any errors . * @ see < a href = " https : / / www . flickr . com / services / api / flickr . cameras . getBrands . html " > flickr . cameras . getBrands < / a > */ public CameraBrands getBrands ( ) throws JinxException { } }
Map < String , String > params = new TreeMap < > ( ) ; params . put ( "method" , "flickr.cameras.getBrands" ) ; return jinx . flickrGet ( params , CameraBrands . class , false ) ;
public class AmazonLexRuntimeClient { /** * Sends user input ( text - only ) to Amazon Lex . Client applications can use this API to send requests to Amazon Lex * at runtime . Amazon Lex then interprets the user input using the machine learning model it built for the bot . * In response , Amazon Lex returns the next < code > message < / code > to convey to the user an optional * < code > responseCard < / code > to display . Consider the following example messages : * < ul > * < li > * For a user input " I would like a pizza " , Amazon Lex might return a response with a message eliciting slot data * ( for example , PizzaSize ) : " What size pizza would you like ? " * < / li > * < li > * After the user provides all of the pizza order information , Amazon Lex might return a response with a message to * obtain user confirmation " Proceed with the pizza order ? " . * < / li > * < li > * After the user replies to a confirmation prompt with a " yes " , Amazon Lex might return a conclusion statement : * " Thank you , your cheese pizza has been ordered . " . * < / li > * < / ul > * Not all Amazon Lex messages require a user response . For example , a conclusion statement does not require a * response . Some messages require only a " yes " or " no " user response . In addition to the < code > message < / code > , * Amazon Lex provides additional context about the message in the response that you might use to enhance client * behavior , for example , to display the appropriate client user interface . These are the < code > slotToElicit < / code > , * < code > dialogState < / code > , < code > intentName < / code > , and < code > slots < / code > fields in the response . Consider the * following examples : * < ul > * < li > * If the message is to elicit slot data , Amazon Lex returns the following context information : * < ul > * < li > * < code > dialogState < / code > set to ElicitSlot * < / li > * < li > * < code > intentName < / code > set to the intent name in the current context * < / li > * < li > * < code > slotToElicit < / code > set to the slot name for which the < code > message < / code > is eliciting information * < / li > * < li > * < code > slots < / code > set to a map of slots , configured for the intent , with currently known values * < / li > * < / ul > * < / li > * < li > * If the message is a confirmation prompt , the < code > dialogState < / code > is set to ConfirmIntent and * < code > SlotToElicit < / code > is set to null . * < / li > * < li > * If the message is a clarification prompt ( configured for the intent ) that indicates that user intent is not * understood , the < code > dialogState < / code > is set to ElicitIntent and < code > slotToElicit < / code > is set to null . * < / li > * < / ul > * In addition , Amazon Lex also returns your application - specific < code > sessionAttributes < / code > . For more * information , see < a href = " http : / / docs . aws . amazon . com / lex / latest / dg / context - mgmt . html " > Managing Conversation * Context < / a > . * @ param postTextRequest * @ return Result of the PostText operation returned by the service . * @ throws NotFoundException * The resource ( such as the Amazon Lex bot or an alias ) that is referred to is not found . * @ throws BadRequestException * Request validation failed , there is no usable message in the context , or the bot build failed , is still * in progress , or contains unbuilt changes . * @ throws LimitExceededException * Exceeded a limit . * @ throws InternalFailureException * Internal service error . Retry the call . * @ throws ConflictException * Two clients are using the same AWS account , Amazon Lex bot , and user ID . * @ throws DependencyFailedException * One of the dependencies , such as AWS Lambda or Amazon Polly , threw an exception . For example , < / p > * < ul > * < li > * If Amazon Lex does not have sufficient permissions to call a Lambda function . * < / li > * < li > * If a Lambda function takes longer than 30 seconds to execute . * < / li > * < li > * If a fulfillment Lambda function returns a < code > Delegate < / code > dialog action without removing any slot * values . * < / li > * @ throws BadGatewayException * Either the Amazon Lex bot is still building , or one of the dependent services ( Amazon Polly , AWS Lambda ) * failed with an internal service error . * @ throws LoopDetectedException * This exception is not used . * @ sample AmazonLexRuntime . PostText * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / runtime . lex - 2016-11-28 / PostText " target = " _ top " > AWS API * Documentation < / a > */ @ Override public PostTextResult postText ( PostTextRequest request ) { } }
request = beforeClientExecution ( request ) ; return executePostText ( request ) ;
public class DataBlockScannerSet { /** * Wait for upgrading done for the given namespace */ private void waitForUpgradeDone ( int namespaceId ) { } }
UpgradeManagerDatanode um = datanode . getUpgradeManager ( namespaceId ) ; while ( ! um . isUpgradeCompleted ( ) ) { try { datanode . updateAndReportThreadLiveness ( BackgroundThread . BLOCK_SCANNER ) ; Thread . sleep ( 5000 ) ; LOG . info ( "sleeping ............" ) ; } catch ( InterruptedException e ) { blockScannerThread . interrupt ( ) ; return ; } }
public class HeatMap { /** * handles the map movement rendering portions , prevents more than one render at a time , * waits for the user to stop moving the map before triggering the render */ @ Override public void run ( ) { } }
try { Thread . sleep ( 1000 ) ; } catch ( InterruptedException e ) { } // TODO replace me with a timer task while ( running ) { try { Thread . sleep ( 1000 ) ; } catch ( InterruptedException e ) { e . printStackTrace ( ) ; } if ( needsDataRefresh ) { if ( System . currentTimeMillis ( ) - lastMovement > 500 ) { generateMap ( ) ; needsDataRefresh = false ; } } }
public class ScreenUtils { /** * @ param context * @ param millis Time for screen to turn off . Setting this value to - 1 will prohibit screen from turning off */ public static void setScreenOffTimeout ( Context context , int millis ) { } }
Settings . System . putInt ( context . getContentResolver ( ) , Settings . System . SCREEN_OFF_TIMEOUT , millis ) ;
public class PasswordEditText { /** * Obtains the prefix of helper texts , which are shown depending on the password strength , from * a specific typed array . * @ param typedArray * The typed array , the prefix should be obtained from , as an instance of the class * { @ link TypedArray } . The typed array may not be null */ private void obtainPasswordVerificationPrefix ( @ NonNull final TypedArray typedArray ) { } }
String format = typedArray . getString ( R . styleable . PasswordEditText_passwordVerificationPrefix ) ; if ( format == null ) { format = getResources ( ) . getString ( R . string . password_verification_prefix ) ; } setPasswordVerificationPrefix ( format ) ;
public class SwitchBuilder { /** * Adds a { @ code default } clause to this switch statement . */ public SwitchBuilder setDefault ( Statement body ) { } }
Preconditions . checkState ( defaultCaseBody == null ) ; defaultCaseBody = body ; return this ;
public class CommandExecutor { /** * public for testing purpose */ public void execute ( CommandHandler handler , int timeout , TimeUnit unit ) throws CommandLineException , InterruptedException , ExecutionException , TimeoutException { } }
ExecutableBuilder builder = new ExecutableBuilder ( ) { CommandContext c = newTimeoutCommandContext ( ctx ) ; @ Override public Executable build ( ) { return ( ) -> { handler . handle ( c ) ; } ; } @ Override public CommandContext getCommandContext ( ) { return c ; } } ; execute ( builder , timeout , unit ) ;