signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class string { /** * fill the string like * < code > My name is $ { name } , I ' m $ { age } years old < / code > with fields in * the given object . * @ param obj from object * @ return formated string */ public String from ( Object obj ) { } }
StringBuilder sb = new StringBuilder ( str ) ; Method [ ] methods = obj . getClass ( ) . getMethods ( ) ; Field [ ] fields = obj . getClass ( ) . getDeclaredFields ( ) ; int lastIndex = - 1 ; out : while ( true ) { int start = sb . indexOf ( "${" , lastIndex ) + 2 ; if ( start == 1 ) { break ; } int end = sb . indexOf ( "}" , start ) ; if ( end == - 1 || start == end ) { break ; } String name = sb . substring ( start , end ) ; // check functional methods for ( Method m : methods ) { String mName = m . getName ( ) ; if ( m . getParameterTypes ( ) . length == 0 && m . getReturnType ( ) != Void . TYPE && mName . equals ( name ) ) { m . setAccessible ( true ) ; try { sb . replace ( start - 2 , end + 1 , m . invoke ( obj , new Object [ 0 ] ) . toString ( ) ) ; } catch ( Exception e ) { throw $ ( e ) ; } lastIndex = end ; continue out ; } } // check getter for ( Method m : methods ) { String mName = m . getName ( ) ; if ( m . getParameterTypes ( ) . length == 0 && m . getReturnType ( ) != Void . TYPE && mName . equals ( "get" + name . substring ( 0 , 1 ) . toUpperCase ( ) + name . substring ( 1 ) ) ) { m . setAccessible ( true ) ; try { sb . replace ( start - 2 , end + 1 , m . invoke ( obj , new Object [ 0 ] ) . toString ( ) ) ; } catch ( Exception e ) { throw $ ( e ) ; } lastIndex = end ; continue out ; } } // check field for ( Field f : fields ) { if ( f . getName ( ) . equals ( name ) ) { f . setAccessible ( true ) ; try { sb . replace ( start - 2 , end + 1 , f . get ( obj ) . toString ( ) ) ; } catch ( Exception e ) { throw $ ( e ) ; } lastIndex = end ; continue out ; } } lastIndex = end ; } str = sb . toString ( ) ; return str ;
public class Scope { /** * Pop - scope ( same as exit - scope ) but return all keys that are tracked ( and * would have been deleted ) . */ static public Key [ ] pop ( ) { } }
Stack < HashSet < Key > > keys = _scope . get ( ) . _keys ; return keys . size ( ) > 0 ? keys . pop ( ) . toArray ( new Key [ 0 ] ) : null ;
public class HttpClient { /** * Perform a put against the WSAPI * @ param url the request url * @ param body the body of the put * @ return the JSON encoded string response * @ throws IOException if a non - 200 response code is returned or if some other * problem occurs while executing the request */ public String doPut ( String url , String body ) throws IOException { } }
HttpPut httpPut = new HttpPut ( getWsapiUrl ( ) + url ) ; httpPut . setEntity ( new StringEntity ( body , "utf-8" ) ) ; return doRequest ( httpPut ) ;
public class PyExpressionGenerator { /** * Generate the given object . * @ param call the feature call . * @ param it the target for the generated content . * @ param context the context . * @ return the feature call . */ protected XExpression _generate ( XFeatureCall call , IAppendable it , IExtraLanguageGeneratorContext context ) { } }
appendReturnIfExpectedReturnedExpression ( it , context ) ; newFeatureCallGenerator ( context , it ) . generate ( call ) ; return call ;
public class ListAccountsRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ListAccountsRequest listAccountsRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( listAccountsRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( listAccountsRequest . getName ( ) , NAME_BINDING ) ; protocolMarshaller . marshall ( listAccountsRequest . getUserEmail ( ) , USEREMAIL_BINDING ) ; protocolMarshaller . marshall ( listAccountsRequest . getNextToken ( ) , NEXTTOKEN_BINDING ) ; protocolMarshaller . marshall ( listAccountsRequest . getMaxResults ( ) , MAXRESULTS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class CategoryUpdatePermissionsEvent { /** * Filtered list of affected { @ link net . dv8tion . jda . core . entities . Member Members } * @ return Immutable list of affected members */ public List < Member > getChangedMembers ( ) { } }
return changed . stream ( ) . filter ( it -> it instanceof Member ) . map ( Member . class :: cast ) . collect ( Collectors . toList ( ) ) ;
public class Block { /** * Calculates the block hash by serializing the block and hashing the * resulting bytes . */ private Sha256Hash calculateHash ( ) { } }
try { ByteArrayOutputStream bos = new UnsafeByteArrayOutputStream ( HEADER_SIZE ) ; writeHeader ( bos ) ; return Sha256Hash . wrapReversed ( Sha256Hash . hashTwice ( bos . toByteArray ( ) ) ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; // Cannot happen . }
public class A_CmsUploadDialog { /** * Inserts a hidden form into . < p > * @ param form the form to insert */ protected void insertUploadForm ( FormPanel form ) { } }
form . getElement ( ) . getStyle ( ) . setDisplay ( Display . NONE ) ; m_contentWrapper . add ( form ) ;
public class BaseClassFinderService { /** * Find , resolve , and return this class definition . * @ param className * @ return The class definition or null if not found . */ public Class < ? > findClass ( String className , String versionRange ) { } }
// if ( ClassServiceBootstrap . repositoryAdmin = = null ) // return null ; Class < ? > c = this . getClassFromBundle ( null , className , versionRange ) ; if ( c == null ) { Object resource = this . deployThisResource ( ClassFinderActivator . getPackageName ( className , false ) , versionRange , false ) ; if ( resource != null ) { c = this . getClassFromBundle ( null , className , versionRange ) ; // It is possible that the newly started bundle registered itself if ( c == null ) c = this . getClassFromBundle ( resource , className , versionRange ) ; } } return c ;
public class GVRAnimator { /** * Starts all of the animations in this animator . * @ see GVRAnimator # reset ( ) * @ see GVRAnimationEngine # start ( GVRAnimation ) */ public void start ( GVROnFinish finishCallback ) { } }
if ( mAnimations . size ( ) == 0 ) { return ; } mIsRunning = true ; for ( int i = 0 ; i < mAnimations . size ( ) ; ++ i ) { GVRAnimation anim = mAnimations . get ( i ) ; anim . reset ( ) ; if ( i == 0 ) { anim . setOnFinish ( finishCallback ) ; } else { anim . setOnFinish ( null ) ; } anim . start ( getGVRContext ( ) . getAnimationEngine ( ) ) ; }
public class UrlMappingUtils { /** * Resolves a view for the given view and UrlMappingInfo instance * @ param request The request * @ param info The info * @ param viewName The view name * @ param viewResolver The view resolver * @ return The view or null * @ throws Exception */ public static View resolveView ( HttpServletRequest request , UrlMappingInfo info , String viewName , ViewResolver viewResolver ) throws Exception { } }
String controllerName = info . getControllerName ( ) ; return WebUtils . resolveView ( request , viewName , controllerName , viewResolver ) ;
public class AbstractProcessInstanceMarshaller { /** * Input methods */ public ProcessInstance readProcessInstance ( MarshallerReaderContext context ) throws IOException { } }
ObjectInputStream stream = context . stream ; InternalKnowledgeBase kBase = context . kBase ; InternalWorkingMemory wm = context . wm ; WorkflowProcessInstanceImpl processInstance = createProcessInstance ( ) ; processInstance . setId ( stream . readLong ( ) ) ; String processId = stream . readUTF ( ) ; processInstance . setProcessId ( processId ) ; Process process = kBase . getProcess ( processId ) ; if ( kBase != null ) { processInstance . setProcess ( process ) ; } processInstance . setState ( stream . readInt ( ) ) ; long nodeInstanceCounter = stream . readLong ( ) ; processInstance . setKnowledgeRuntime ( wm . getKnowledgeRuntime ( ) ) ; int nbSwimlanes = stream . readInt ( ) ; if ( nbSwimlanes > 0 ) { Context swimlaneContext = ( ( org . jbpm . process . core . Process ) process ) . getDefaultContext ( SwimlaneContext . SWIMLANE_SCOPE ) ; SwimlaneContextInstance swimlaneContextInstance = ( SwimlaneContextInstance ) processInstance . getContextInstance ( swimlaneContext ) ; for ( int i = 0 ; i < nbSwimlanes ; i ++ ) { String name = stream . readUTF ( ) ; String value = stream . readUTF ( ) ; swimlaneContextInstance . setActorId ( name , value ) ; } } while ( stream . readShort ( ) == PersisterEnums . NODE_INSTANCE ) { readNodeInstance ( context , processInstance , processInstance ) ; } int exclusiveGroupInstances = stream . readInt ( ) ; for ( int i = 0 ; i < exclusiveGroupInstances ; i ++ ) { ExclusiveGroupInstance exclusiveGroupInstance = new ExclusiveGroupInstance ( ) ; processInstance . addContextInstance ( ExclusiveGroup . EXCLUSIVE_GROUP , exclusiveGroupInstance ) ; int nodeInstances = stream . readInt ( ) ; for ( int j = 0 ; j < nodeInstances ; j ++ ) { long nodeInstanceId = stream . readLong ( ) ; NodeInstance nodeInstance = processInstance . getNodeInstance ( nodeInstanceId ) ; if ( nodeInstance == null ) { throw new IllegalArgumentException ( "Could not find node instance when deserializing exclusive group instance: " + nodeInstanceId ) ; } exclusiveGroupInstance . addNodeInstance ( nodeInstance ) ; } } // Process Variables // - Number of Variables = keys . size ( ) // For Each Variable // - Variable Key // - Marshalling Strategy Index // - Marshalled Object int nbVariables = stream . readInt ( ) ; if ( nbVariables > 0 ) { Context variableScope = ( ( org . jbpm . process . core . Process ) process ) . getDefaultContext ( VariableScope . VARIABLE_SCOPE ) ; VariableScopeInstance variableScopeInstance = ( VariableScopeInstance ) processInstance . getContextInstance ( variableScope ) ; for ( int i = 0 ; i < nbVariables ; i ++ ) { String name = stream . readUTF ( ) ; try { ObjectMarshallingStrategy strategy = null ; int index = stream . readInt ( ) ; // This is the old way of de / serializing strategy objects if ( index >= 0 ) { strategy = context . resolverStrategyFactory . getStrategy ( index ) ; } // This is the new way else if ( index == - 2 ) { String strategyClassName = context . stream . readUTF ( ) ; if ( ! StringUtils . isEmpty ( strategyClassName ) ) { strategy = context . resolverStrategyFactory . getStrategyObject ( strategyClassName ) ; if ( strategy == null ) { throw new IllegalStateException ( "No strategy of type " + strategyClassName + " available." ) ; } } } // If either way retrieves a strategy , use it Object value = null ; if ( strategy != null ) { value = strategy . read ( stream ) ; } variableScopeInstance . internalSetVariable ( name , value ) ; } catch ( ClassNotFoundException e ) { throw new IllegalArgumentException ( "Could not reload variable " + name ) ; } } } processInstance . internalSetNodeInstanceCounter ( nodeInstanceCounter ) ; if ( wm != null ) { processInstance . reconnect ( ) ; } return processInstance ;
public class DeployerModule { /** * Called to do all bindings for this module . * @ see < a href = " http : / / code . google . com / p / google - guice / " > Google Guice < / a > */ @ Override protected void configure ( ) { } }
try { InternalLoggerFactory . setDefaultFactory ( new Slf4JLoggerFactory ( ) ) ; File appRoot = new File ( System . getProperty ( CadmiumListener . BASE_PATH_ENV ) , "maven" ) ; FileUtils . forceMkdir ( appRoot ) ; String remoteMavenRepo = System . getProperty ( MAVEN_REPOSITORY ) ; ArtifactResolver resolver = new ArtifactResolver ( remoteMavenRepo , appRoot . getAbsolutePath ( ) ) ; bind ( ArtifactResolver . class ) . toInstance ( resolver ) ; bind ( JBossAdminApi . class ) ; Multibinder < ConfigurationListener > listenerBinder = Multibinder . newSetBinder ( binder ( ) , ConfigurationListener . class ) ; listenerBinder . addBinding ( ) . to ( JBossAdminApi . class ) ; bind ( IJBossUtil . class ) . to ( JBossDelegator . class ) ; } catch ( Exception e ) { logger . error ( "Failed to initialize maven artifact resolver." , e ) ; }
public class SQLiteAssetHelper { /** * Create and / or open a database that will be used for reading and writing . * The first time this is called , the database will be extracted and copied * from the application ' s assets folder . * < p > Once opened successfully , the database is cached , so you can * call this method every time you need to write to the database . * ( Make sure to call { @ link # close } when you no longer need the database . ) * Errors such as bad permissions or a full disk may cause this method * to fail , but future attempts may succeed if the problem is fixed . < / p > * < p class = " caution " > Database upgrade may take a long time , you * should not call this method from the application main thread , including * from { @ link android . content . ContentProvider # onCreate ContentProvider . onCreate ( ) } . * @ throws SQLiteException if the database cannot be opened for writing * @ return a read / write database object valid until { @ link # close } is called */ @ Override public synchronized SQLiteDatabase getWritableDatabase ( ) { } }
if ( mDatabase != null && mDatabase . isOpen ( ) && ! mDatabase . isReadOnly ( ) ) { return mDatabase ; // The database is already open for business } if ( mIsInitializing ) { throw new IllegalStateException ( "getWritableDatabase called recursively" ) ; } // If we have a read - only database open , someone could be using it // ( though they shouldn ' t ) , which would cause a lock to be held on // the file , and our attempts to open the database read - write would // fail waiting for the file lock . To prevent that , we acquire the // lock on the read - only database , which shuts out other users . boolean success = false ; SQLiteDatabase db = null ; // if ( mDatabase ! = null ) mDatabase . lock ( ) ; try { mIsInitializing = true ; // if ( mName = = null ) { // db = SQLiteDatabase . create ( null ) ; // } else { // db = mContext . openOrCreateDatabase ( mName , 0 , mFactory ) ; db = createOrOpenDatabase ( false ) ; int version = db . getVersion ( ) ; // do force upgrade if ( version != 0 && version < mForcedUpgradeVersion ) { db = createOrOpenDatabase ( true ) ; db . setVersion ( mNewVersion ) ; version = db . getVersion ( ) ; } if ( version != mNewVersion ) { db . beginTransaction ( ) ; try { if ( version == 0 ) { onCreate ( db ) ; } else { if ( version > mNewVersion ) { Log . w ( TAG , "Can't downgrade read-only database from version " + version + " to " + mNewVersion + ": " + db . getPath ( ) ) ; } onUpgrade ( db , version , mNewVersion ) ; } db . setVersion ( mNewVersion ) ; db . setTransactionSuccessful ( ) ; } finally { db . endTransaction ( ) ; } } onOpen ( db ) ; success = true ; return db ; } finally { mIsInitializing = false ; if ( success ) { if ( mDatabase != null ) { try { mDatabase . close ( ) ; } catch ( Exception e ) { } // mDatabase . unlock ( ) ; } mDatabase = db ; } else { // if ( mDatabase ! = null ) mDatabase . unlock ( ) ; if ( db != null ) db . close ( ) ; } }
public class GBSTree { /** * Find an InsertStack for use by the current thread . * < p > Allocation of an InsertStack is more expensive than serial * reuse . This is a very cheap form of pooling done by attaching an * InsertStack to each thread that calls insert . < / p > */ private InsertStack getInsertStack ( ) { } }
Object x = _insertStack . get ( ) ; InsertStack g = null ; if ( x != null ) { g = ( InsertStack ) x ; g . reset ( ) ; } else { g = new InsertStack ( this ) ; x = ( Object ) g ; _insertStack . set ( x ) ; } return g ;
public class ProposalLineItem { /** * Gets the frequencyCaps value for this ProposalLineItem . * @ return frequencyCaps * The set of frequency capping units for this { @ code ProposalLineItem } . * This * attribute is optional during creation and defaults * to the * { @ link Product # frequencyCaps product ' s frequency caps } * if * { @ link Product # allowFrequencyCapsCustomization } is * { @ code false } . */ public com . google . api . ads . admanager . axis . v201902 . FrequencyCap [ ] getFrequencyCaps ( ) { } }
return frequencyCaps ;
public class TransactionImpl { /** * { @ inheritDoc } */ public void registerSynchronization ( Synchronization sync ) throws RollbackException , IllegalStateException , SystemException { } }
if ( status == Status . STATUS_UNKNOWN ) throw new IllegalStateException ( "Status unknown" ) ; syncs . add ( sync ) ;
public class LocalDate { /** * Gets the property object for the specified type , which contains many * useful methods . * @ param fieldType the field type to get the chronology for * @ return the property object * @ throws IllegalArgumentException if the field is null or unsupported */ public Property property ( DateTimeFieldType fieldType ) { } }
if ( fieldType == null ) { throw new IllegalArgumentException ( "The DateTimeFieldType must not be null" ) ; } if ( isSupported ( fieldType ) == false ) { throw new IllegalArgumentException ( "Field '" + fieldType + "' is not supported" ) ; } return new Property ( this , fieldType . getField ( getChronology ( ) ) ) ;
public class RequestCollapserFactory { /** * Lookup ( or create and store ) the RequestVariable for a given HystrixCollapserKey . * @ param commandCollapser collapser to retrieve { @ link HystrixRequestVariableHolder } for * @ return HystrixRequestVariableHolder */ @ SuppressWarnings ( "unchecked" ) private HystrixRequestVariableHolder < RequestCollapser < ? , ? , ? > > getRequestVariableForCommand ( final HystrixCollapserBridge < BatchReturnType , ResponseType , RequestArgumentType > commandCollapser ) { } }
HystrixRequestVariableHolder < RequestCollapser < ? , ? , ? > > requestVariable = requestScopedCollapsers . get ( commandCollapser . getCollapserKey ( ) . name ( ) ) ; if ( requestVariable == null ) { // create new collapser using ' this ' first instance as the one that will get cached for future executions ( ' this ' is stateless so we can do that ) @ SuppressWarnings ( { "rawtypes" } ) HystrixRequestVariableHolder newCollapser = new RequestCollapserRequestVariable ( commandCollapser , properties , timer , concurrencyStrategy ) ; HystrixRequestVariableHolder < RequestCollapser < ? , ? , ? > > existing = requestScopedCollapsers . putIfAbsent ( commandCollapser . getCollapserKey ( ) . name ( ) , newCollapser ) ; if ( existing == null ) { // this thread won , so return the one we just created requestVariable = newCollapser ; } else { // another thread beat us ( this should only happen when we have concurrency on the FIRST request for the life of the app for this HystrixCollapser class ) requestVariable = existing ; /* * This * should * be okay to discard the created object without cleanup as the RequestVariable implementation * should properly do lazy - initialization and only call initialValue ( ) the first time get ( ) is called . * If it does not correctly follow this contract then there is a chance of a memory leak here . */ } } return requestVariable ;
public class DescriptionStrategy { /** * Provide a human readable description for Every instance . * @ param every - Every * @ return human readable description - String */ protected String describe ( final Every every , final boolean and ) { } }
String description ; if ( every . getPeriod ( ) . getValue ( ) > 1 ) { description = String . format ( "%s %s " , bundle . getString ( EVERY ) , nominalValue ( every . getPeriod ( ) ) ) + " %p " ; } else { description = bundle . getString ( EVERY ) + " %s " ; } if ( every . getExpression ( ) instanceof Between ) { final Between between = ( Between ) every . getExpression ( ) ; description += MessageFormat . format ( bundle . getString ( "between_x_and_y" ) , nominalValue ( between . getFrom ( ) ) , nominalValue ( between . getTo ( ) ) ) + WHITE_SPACE ; } return description ;
public class AdminCommandScheduled { /** * Parses command - line and directs to sub - commands . * @ param args Command - line input * @ throws Exception */ public static void executeCommand ( String [ ] args ) throws Exception { } }
String subCmd = ( args . length > 0 ) ? args [ 0 ] : "" ; args = AdminToolUtils . copyArrayCutFirst ( args ) ; if ( subCmd . equals ( "list" ) ) { SubCommandScheduledList . executeCommand ( args ) ; } else if ( subCmd . equals ( "stop" ) ) { SubCommandScheduledStop . executeCommand ( args ) ; } else if ( subCmd . equals ( "enable" ) ) { SubCommandScheduledEnable . executeCommand ( args ) ; } else { printHelp ( System . out ) ; }
public class ResourceReaderImpl { /** * / * ( non - Javadoc ) * @ see net . crowmagnumb . util . ResourceReader # getInteger ( java . lang . String ) */ @ Override public Integer getInteger ( final String key ) throws UtilException { } }
return formatInteger ( key , getRequiredPropValue ( key ) ) ;
public class AppEngineCorePluginConfiguration { /** * Configure core tasks for appengine app . yaml and appengine - web . xml based project plugins . */ public void configureCoreProperties ( Project project , AppEngineCoreExtensionProperties appEngineCoreExtensionProperties , String taskGroup , boolean requiresAppEngineJava ) { } }
project . getLogger ( ) . warn ( "WARNING: You are a using release candidate " + getClass ( ) . getPackage ( ) . getImplementationVersion ( ) + ". Behavior of this plugin has changed since 1.3.5. Please see release notes at: " + "https://github.com/GoogleCloudPlatform/app-gradle-plugin." ) ; project . getLogger ( ) . warn ( "Missing a feature? Can't get it to work?, please file a bug at: " + "https://github.com/GoogleCloudPlatform/app-gradle-plugin/issues." ) ; checkGradleVersion ( ) ; this . project = project ; this . taskGroup = taskGroup ; this . toolsExtension = appEngineCoreExtensionProperties . getTools ( ) ; this . deployExtension = appEngineCoreExtensionProperties . getDeploy ( ) ; this . requiresAppEngineJava = requiresAppEngineJava ; configureFactories ( ) ; createDownloadCloudSdkTask ( ) ; createCheckCloudSdkTask ( ) ; createLoginTask ( ) ; createDeployTask ( ) ; createDeployCronTask ( ) ; createDeployDispatchTask ( ) ; createDeployDosTask ( ) ; createDeployIndexTask ( ) ; createDeployQueueTask ( ) ; createDeployAllTask ( ) ; createShowConfigurationTask ( ) ;
public class VirtualNodeSpecMarshaller { /** * Marshall the given parameter object . */ public void marshall ( VirtualNodeSpec virtualNodeSpec , ProtocolMarshaller protocolMarshaller ) { } }
if ( virtualNodeSpec == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( virtualNodeSpec . getBackends ( ) , BACKENDS_BINDING ) ; protocolMarshaller . marshall ( virtualNodeSpec . getListeners ( ) , LISTENERS_BINDING ) ; protocolMarshaller . marshall ( virtualNodeSpec . getLogging ( ) , LOGGING_BINDING ) ; protocolMarshaller . marshall ( virtualNodeSpec . getServiceDiscovery ( ) , SERVICEDISCOVERY_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class tmtrafficpolicy_stats { /** * Use this API to fetch statistics of tmtrafficpolicy _ stats resource of given name . */ public static tmtrafficpolicy_stats get ( nitro_service service , String name ) throws Exception { } }
tmtrafficpolicy_stats obj = new tmtrafficpolicy_stats ( ) ; obj . set_name ( name ) ; tmtrafficpolicy_stats response = ( tmtrafficpolicy_stats ) obj . stat_resource ( service ) ; return response ;
public class CmsPropertyAdvanced { /** * Creates the HTML String for the edit properties form . < p > * The values of the form are set delayed , have a look at the * JavaDoc of buildSetFormValues ( ) for a detailed description . < p > * @ return the HTML output String for the edit properties form */ public String buildEditForm ( ) { } }
StringBuffer result = new StringBuffer ( 4096 ) ; // get currently active tab String activeTab = getActiveTabName ( ) ; // initialize " disabled " attribute for the input fields String disabled = "" ; if ( ! isEditable ( ) ) { disabled = " disabled=\"disabled\"" ; } // get all properties for the resource List < String [ ] > properties = getPropertyValues ( ) ; // check for presence of property definitions , should always be true if ( properties . size ( ) > 0 ) { // there are properties defined for this resource , build the form list result . append ( "<table border=\"0\" style=\"width:100%\">\n" ) ; result . append ( "<tr>\n" ) ; result . append ( "\t<td class=\"textbold\" nowrap>" ) ; result . append ( key ( Messages . GUI_PROPERTY_0 ) ) ; result . append ( "</td>\n" ) ; result . append ( "\t<td class=\"textbold\">" ) ; result . append ( key ( Messages . GUI_PROPERTY_VALUE_0 ) ) ; result . append ( "</td>\n" ) ; // build column for checkbox result . append ( "\t<td class=\"textbold\" style=\"white-space: nowrap;\">" ) ; result . append ( "&nbsp;" ) ; result . append ( "</td>\n" ) ; result . append ( "</tr>\n" ) ; result . append ( "<tr><td colspan=\"3\"><span style=\"height: 6px;\"></span></td></tr>\n" ) ; // show all possible properties for the resource Iterator < String [ ] > i = properties . iterator ( ) ; while ( i . hasNext ( ) ) { String [ ] curProp = i . next ( ) ; // create a single property row result . append ( buildPropertyRow ( curProp [ 0 ] , curProp [ 1 ] , curProp [ 2 ] , curProp [ 3 ] , disabled , activeTab ) ) ; } result . append ( "</table>" ) ; } else { // there are no properties defined for this resource , show nothing ( should never happen ) result . append ( key ( Messages . GUI_PROPERTY_ADVANCED_NO_PROPDEFS_0 ) ) ; } return result . toString ( ) ;
public class CharacterReader { /** * Check if the value of the provided range equals the string . */ static boolean rangeEquals ( final char [ ] charBuf , final int start , int count , final String cached ) { } }
if ( count == cached . length ( ) ) { int i = start ; int j = 0 ; while ( count -- != 0 ) { if ( charBuf [ i ++ ] != cached . charAt ( j ++ ) ) return false ; } return true ; } return false ;
public class CliFrontend { /** * Builds command line options for the cancel action . * @ return Command line options for the cancel action . */ static Options getCancelOptions ( Options options ) { } }
options . addOption ( ID_OPTION ) ; options = getJobManagerAddressOption ( options ) ; return options ;
public class BatchWriteRequest { /** * A list of operations that are part of the batch . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setOperations ( java . util . Collection ) } or { @ link # withOperations ( java . util . Collection ) } if you want to * override the existing values . * @ param operations * A list of operations that are part of the batch . * @ return Returns a reference to this object so that method calls can be chained together . */ public BatchWriteRequest withOperations ( BatchWriteOperation ... operations ) { } }
if ( this . operations == null ) { setOperations ( new java . util . ArrayList < BatchWriteOperation > ( operations . length ) ) ; } for ( BatchWriteOperation ele : operations ) { this . operations . add ( ele ) ; } return this ;
public class CommerceOrderPersistenceImpl { /** * Returns the last commerce order in the ordered set where groupId = & # 63 ; and commerceAccountId = & # 63 ; and orderStatus = & # 63 ; . * @ param groupId the group ID * @ param commerceAccountId the commerce account ID * @ param orderStatus the order status * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the last matching commerce order * @ throws NoSuchOrderException if a matching commerce order could not be found */ @ Override public CommerceOrder findByG_C_O_Last ( long groupId , long commerceAccountId , int orderStatus , OrderByComparator < CommerceOrder > orderByComparator ) throws NoSuchOrderException { } }
CommerceOrder commerceOrder = fetchByG_C_O_Last ( groupId , commerceAccountId , orderStatus , orderByComparator ) ; if ( commerceOrder != null ) { return commerceOrder ; } StringBundler msg = new StringBundler ( 8 ) ; msg . append ( _NO_SUCH_ENTITY_WITH_KEY ) ; msg . append ( "groupId=" ) ; msg . append ( groupId ) ; msg . append ( ", commerceAccountId=" ) ; msg . append ( commerceAccountId ) ; msg . append ( ", orderStatus=" ) ; msg . append ( orderStatus ) ; msg . append ( "}" ) ; throw new NoSuchOrderException ( msg . toString ( ) ) ;
public class CPMeasurementUnitUtil { /** * Returns the last cp measurement unit in the ordered set where uuid = & # 63 ; and companyId = & # 63 ; . * @ param uuid the uuid * @ param companyId the company ID * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the last matching cp measurement unit , or < code > null < / code > if a matching cp measurement unit could not be found */ public static CPMeasurementUnit fetchByUuid_C_Last ( String uuid , long companyId , OrderByComparator < CPMeasurementUnit > orderByComparator ) { } }
return getPersistence ( ) . fetchByUuid_C_Last ( uuid , companyId , orderByComparator ) ;
public class NfsCreateRequest { /** * ( non - Javadoc ) * @ see com . emc . ecs . nfsclient . nfs . NfsRequestBase # marshalling ( com . emc . ecs . * nfsclient . rpc . Xdr ) */ public void marshalling ( Xdr xdr ) { } }
super . marshalling ( xdr ) ; xdr . putString ( _name ) ; xdr . putInt ( _createMode . getValue ( ) ) ; if ( _createMode != NfsCreateMode . EXCLUSIVE ) { _attributes . marshalling ( xdr ) ; } else { xdr . putByteArray ( _verifier ) ; }
public class QuickStartSecurityRegistry { /** * { @ inheritDoc } */ @ Override public SearchResult getGroups ( String pattern , int limit ) throws RegistryException { } }
if ( pattern == null ) { throw new IllegalArgumentException ( "pattern is null" ) ; } if ( pattern . isEmpty ( ) ) { throw new IllegalArgumentException ( "pattern is an empty String" ) ; } return new SearchResult ( ) ;
public class HbaseSyncService { /** * 插入操作 * @ param config 配置项 * @ param dml DML数据 */ private void insert ( MappingConfig config , Dml dml ) { } }
List < Map < String , Object > > data = dml . getData ( ) ; if ( data == null || data . isEmpty ( ) ) { return ; } MappingConfig . HbaseMapping hbaseMapping = config . getHbaseMapping ( ) ; // if ( ! validHTable ( config ) ) { // logger . error ( " HBase table ' { } ' not exists " , // hbaseMapping . getHbaseTable ( ) ) ; // return ; int i = 1 ; boolean complete = false ; List < HRow > rows = new ArrayList < > ( ) ; for ( Map < String , Object > r : data ) { HRow hRow = new HRow ( ) ; // 拼接复合rowKey if ( hbaseMapping . getRowKey ( ) != null ) { String [ ] rowKeyColumns = hbaseMapping . getRowKey ( ) . trim ( ) . split ( "," ) ; String rowKeyVale = getRowKeys ( rowKeyColumns , r ) ; // params . put ( " rowKey " , Bytes . toBytes ( rowKeyVale ) ) ; hRow . setRowKey ( Bytes . toBytes ( rowKeyVale ) ) ; } convertData2Row ( hbaseMapping , hRow , r ) ; if ( hRow . getRowKey ( ) == null ) { throw new RuntimeException ( "empty rowKey" ) ; } rows . add ( hRow ) ; complete = false ; if ( i % config . getHbaseMapping ( ) . getCommitBatch ( ) == 0 && ! rows . isEmpty ( ) ) { hbaseTemplate . puts ( hbaseMapping . getHbaseTable ( ) , rows ) ; rows . clear ( ) ; complete = true ; } i ++ ; } if ( ! complete && ! rows . isEmpty ( ) ) { hbaseTemplate . puts ( hbaseMapping . getHbaseTable ( ) , rows ) ; }
public class ChannelSuppliers { /** * Collects data provided by the { @ code supplier } asynchronously and returns a * promise of accumulated result . This process will be getting values from the * { @ code supplier } , until a promise of { @ code null } is returned , which represents * end of stream . * If { @ code get } returns a promise of exception or there was an exception while * { @ code accumulator } accepted values , a promise of { @ code exception } will be * returned and the process will stop . * @ param supplier a { @ code ChannelSupplier } which provides data to be collected * @ param initialValue a value which will accumulate the results of accumulator * @ param accumulator a { @ link BiConsumer } which may perform some operations over provided * by supplier data and accumulates the result to the initialValue * @ param finisher a { @ link Function } which performs the final transformation of the * accumulated value * @ param < T > a data type provided by the { @ code supplier } * @ param < A > an intermediate accumulation data type * @ param < R > a data type of final result of { @ code finisher } * @ return a promise of accumulated result , transformed by the { @ code finisher } */ public static < T , A , R > Promise < R > collect ( ChannelSupplier < T > supplier , A initialValue , BiConsumer < A , T > accumulator , Function < A , R > finisher ) { } }
return Promise . ofCallback ( cb -> toCollectorImpl ( supplier , initialValue , accumulator , finisher , cb ) ) ;
public class FailoverRegion { /** * Notice the region to failover , */ private void failover ( long globalModVersionOfFailover ) { } }
if ( ! executionGraph . getRestartStrategy ( ) . canRestart ( ) ) { executionGraph . failGlobal ( new FlinkException ( "RestartStrategy validate fail" ) ) ; } else { JobStatus curStatus = this . state ; if ( curStatus . equals ( JobStatus . RUNNING ) ) { cancel ( globalModVersionOfFailover ) ; } else if ( curStatus . equals ( JobStatus . CANCELED ) ) { reset ( globalModVersionOfFailover ) ; } else { LOG . info ( "FailoverRegion {} is {} when notified to failover." , id , state ) ; } }
public class SynchroData { /** * Read the file header data . * @ param is input stream */ private void readHeader ( InputStream is ) throws IOException { } }
byte [ ] header = new byte [ 20 ] ; is . read ( header ) ; m_offset += 20 ; SynchroLogger . log ( "HEADER" , header ) ;
public class NetTimeConnector { /** * < p > Liefert die aktuelle Differenz zwischen Netz - Zeit und lokaler Zeit in Mikrosekunden . < / p > * @ param micros aktuelle lokale Zeit in Mikrosekunden * @ return Mikrosekunden - Offset ( { @ code 0 } , wenn noch keine Verbindung hergestellt wurde ) */ long getLastOffset ( long micros ) { } }
final ConnectionResult cr = this . result ; return ( ( cr == null ) ? 0 : cr . getActualOffset ( micros ) ) ;
public class LdapAdapter { /** * Method to get the list of descendants . * @ param entity * @ param ldapEntry * @ param descCtrl * @ throws WIMException */ private void getDescendants ( Entity entity , LdapEntry ldapEntry , DescendantControl descCtrl ) throws WIMException { } }
if ( descCtrl == null ) { return ; } List < String > propNames = descCtrl . getProperties ( ) ; int level = descCtrl . getLevel ( ) ; List < String > descTypes = getEntityTypes ( descCtrl ) ; String [ ] bases = getBases ( descCtrl , descTypes ) ; boolean treeView = descCtrl . isSetTreeView ( ) ; int scope = SearchControls . ONELEVEL_SCOPE ; if ( level == 0 && ! treeView ) { scope = SearchControls . SUBTREE_SCOPE ; } Set < String > descToDo = new HashSet < String > ( ) ; Map < String , Entity > descendants = new HashMap < String , Entity > ( ) ; Set < LdapEntry > descEntries = iLdapConn . searchEntities ( ldapEntry . getDN ( ) , "objectClass=*" , null , scope , descTypes , propNames , false , false ) ; for ( LdapEntry descEntry : descEntries ) { String descType = descEntry . getType ( ) ; String descDn = descEntry . getDN ( ) ; Entity descendant = null ; if ( LdapHelper . isUnderBases ( descDn , bases ) && descTypes . contains ( descType ) ) { descendant = createEntityFromLdapEntry ( entity , SchemaConstants . DO_CHILDREN , descEntry , propNames ) ; } else if ( treeView ) { descendant = createEntityFromLdapEntry ( entity , SchemaConstants . DO_CHILDREN , descEntry , null ) ; } if ( treeView ) { descToDo . add ( descDn ) ; descendants . put ( descDn , descendant ) ; } } if ( treeView ) { while ( descToDo . size ( ) > 0 ) { Set < String > nextDescs = new HashSet < String > ( ) ; for ( String dn : descToDo ) { Entity parent = descendants . get ( dn ) ; descEntries = iLdapConn . searchEntities ( dn , "objectClass=*" , null , scope , descTypes , propNames , false , false ) ; for ( LdapEntry descEntry : descEntries ) { String descType = descEntry . getType ( ) ; String descDn = descEntry . getDN ( ) ; Entity descendant = null ; if ( descTypes . contains ( descType ) ) { descendant = createEntityFromLdapEntry ( parent , SchemaConstants . DO_CHILDREN , descEntry , propNames ) ; } else if ( treeView ) { descendant = createEntityFromLdapEntry ( parent , SchemaConstants . DO_CHILDREN , descEntry , null ) ; } if ( ! descToDo . contains ( descDn ) ) { nextDescs . add ( descDn ) ; descendants . put ( descDn , descendant ) ; } } } descToDo = nextDescs ; } }
public class WebAppSecurityCollaboratorImpl { /** * This preInvoke is called during init & during destroy of a Servlet class object . * It will call the other preInvoke to ensure delegation occurs . { @ inheritDoc } */ @ Override public Object preInvoke ( String servletName ) throws SecurityViolationException , IOException { } }
// preInvoke will ensure delegation is done when run - as is specified return preInvoke ( null , null , servletName , true ) ;
public class CommerceAccountOrganizationRelPersistenceImpl { /** * Returns the first commerce account organization rel in the ordered set where organizationId = & # 63 ; . * @ param organizationId the organization ID * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the first matching commerce account organization rel * @ throws NoSuchAccountOrganizationRelException if a matching commerce account organization rel could not be found */ @ Override public CommerceAccountOrganizationRel findByOrganizationId_First ( long organizationId , OrderByComparator < CommerceAccountOrganizationRel > orderByComparator ) throws NoSuchAccountOrganizationRelException { } }
CommerceAccountOrganizationRel commerceAccountOrganizationRel = fetchByOrganizationId_First ( organizationId , orderByComparator ) ; if ( commerceAccountOrganizationRel != null ) { return commerceAccountOrganizationRel ; } StringBundler msg = new StringBundler ( 4 ) ; msg . append ( _NO_SUCH_ENTITY_WITH_KEY ) ; msg . append ( "organizationId=" ) ; msg . append ( organizationId ) ; msg . append ( "}" ) ; throw new NoSuchAccountOrganizationRelException ( msg . toString ( ) ) ;
public class CmsDeleteMultiplePrincipalDialog { /** * Initialized the dialog . < p > * @ param cms CmsObject * @ param window window * @ param app */ private void init ( CmsObject cms , final Window window , final CmsAccountsApp app ) { } }
CmsVaadinUtils . readAndLocalizeDesign ( this , CmsVaadinUtils . getWpMessagesForCurrentLocale ( ) , null ) ; m_icon . setContentMode ( ContentMode . HTML ) ; m_icon . setValue ( FontOpenCms . WARNING . getHtml ( ) ) ; m_icon2 . setContentMode ( ContentMode . HTML ) ; m_icon2 . setValue ( FontOpenCms . WARNING . getHtml ( ) ) ; m_label_deleteDefault . setVisible ( false ) ; m_cms = cms ; m_okButton . addClickListener ( new ClickListener ( ) { private static final long serialVersionUID = - 7845894751587879028L ; public void buttonClick ( ClickEvent event ) { deletePrincipal ( ) ; window . close ( ) ; app . reload ( ) ; } } ) ; m_cancelButton . addClickListener ( new ClickListener ( ) { private static final long serialVersionUID = 6649262870116199591L ; public void buttonClick ( ClickEvent event ) { window . close ( ) ; } } ) ;
public class SSLConnectionContextImpl { /** * @ see com . ibm . wsspi . tcpchannel . SSLConnectionContext # renegotiate ( ) */ public void renegotiate ( ) { } }
try { this . sslConnLink . getSSLEngine ( ) . beginHandshake ( ) ; } catch ( SSLException se ) { FFDCFilter . processException ( se , getClass ( ) . getName ( ) + ".renegotiate" , "1" ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Error while attempting handshake renegotiation; " + se ) ; } }
public class Attributes { /** * adds without checking if this key exists */ private void add ( String key , String value ) { } }
checkCapacity ( size + 1 ) ; keys [ size ] = key ; vals [ size ] = value ; size ++ ;
public class RedisClusterStorage { /** * Store a { @ link Calendar } * @ param name the name of the calendar * @ param calendar the calendar object to be stored * @ param replaceExisting if true , any existing calendar with the same name will be overwritten * @ param updateTriggers if true , any existing triggers associated with the calendar will be updated * @ param jedis a thread - safe Redis connection * @ throws JobPersistenceException */ @ Override public void storeCalendar ( String name , Calendar calendar , boolean replaceExisting , boolean updateTriggers , JedisCluster jedis ) throws JobPersistenceException { } }
final String calendarHashKey = redisSchema . calendarHashKey ( name ) ; if ( ! replaceExisting && jedis . exists ( calendarHashKey ) ) { throw new ObjectAlreadyExistsException ( String . format ( "Calendar with key %s already exists." , calendarHashKey ) ) ; } Map < String , String > calendarMap = new HashMap < > ( ) ; calendarMap . put ( CALENDAR_CLASS , calendar . getClass ( ) . getName ( ) ) ; try { calendarMap . put ( CALENDAR_JSON , mapper . writeValueAsString ( calendar ) ) ; } catch ( JsonProcessingException e ) { throw new JobPersistenceException ( "Unable to serialize calendar." , e ) ; } jedis . hmset ( calendarHashKey , calendarMap ) ; jedis . sadd ( redisSchema . calendarsSet ( ) , calendarHashKey ) ; if ( updateTriggers ) { final String calendarTriggersSetKey = redisSchema . calendarTriggersSetKey ( name ) ; Set < String > triggerHashKeys = jedis . smembers ( calendarTriggersSetKey ) ; for ( String triggerHashKey : triggerHashKeys ) { OperableTrigger trigger = retrieveTrigger ( redisSchema . triggerKey ( triggerHashKey ) , jedis ) ; long removed = jedis . zrem ( redisSchema . triggerStateKey ( RedisTriggerState . WAITING ) , triggerHashKey ) ; trigger . updateWithNewCalendar ( calendar , misfireThreshold ) ; if ( removed == 1 ) { setTriggerState ( RedisTriggerState . WAITING , ( double ) trigger . getNextFireTime ( ) . getTime ( ) , triggerHashKey , jedis ) ; } } }
public class DRL5Expressions { /** * src / main / resources / org / drools / compiler / lang / DRL5Expressions . g : 230:1 : conditionalAndExpression returns [ BaseDescr result ] : left = inclusiveOrExpression ( DOUBLE _ AMPER ( args = fullAnnotation [ null ] ) ? right = inclusiveOrExpression ) * ; */ public final BaseDescr conditionalAndExpression ( ) throws RecognitionException { } }
BaseDescr result = null ; BaseDescr left = null ; AnnotationDescr args = null ; BaseDescr right = null ; try { // src / main / resources / org / drools / compiler / lang / DRL5Expressions . g : 231:3 : ( left = inclusiveOrExpression ( DOUBLE _ AMPER ( args = fullAnnotation [ null ] ) ? right = inclusiveOrExpression ) * ) // src / main / resources / org / drools / compiler / lang / DRL5Expressions . g : 231:5 : left = inclusiveOrExpression ( DOUBLE _ AMPER ( args = fullAnnotation [ null ] ) ? right = inclusiveOrExpression ) * { pushFollow ( FOLLOW_inclusiveOrExpression_in_conditionalAndExpression1182 ) ; left = inclusiveOrExpression ( ) ; state . _fsp -- ; if ( state . failed ) return result ; if ( state . backtracking == 0 ) { if ( buildDescr ) { result = left ; } } // src / main / resources / org / drools / compiler / lang / DRL5Expressions . g : 232:3 : ( DOUBLE _ AMPER ( args = fullAnnotation [ null ] ) ? right = inclusiveOrExpression ) * loop27 : while ( true ) { int alt27 = 2 ; int LA27_0 = input . LA ( 1 ) ; if ( ( LA27_0 == DOUBLE_AMPER ) ) { alt27 = 1 ; } switch ( alt27 ) { case 1 : // src / main / resources / org / drools / compiler / lang / DRL5Expressions . g : 232:5 : DOUBLE _ AMPER ( args = fullAnnotation [ null ] ) ? right = inclusiveOrExpression { match ( input , DOUBLE_AMPER , FOLLOW_DOUBLE_AMPER_in_conditionalAndExpression1190 ) ; if ( state . failed ) return result ; if ( state . backtracking == 0 ) { if ( isNotEOF ( ) ) helper . emit ( Location . LOCATION_LHS_INSIDE_CONDITION_OPERATOR ) ; } // src / main / resources / org / drools / compiler / lang / DRL5Expressions . g : 234:13 : ( args = fullAnnotation [ null ] ) ? int alt26 = 2 ; int LA26_0 = input . LA ( 1 ) ; if ( ( LA26_0 == AT ) ) { alt26 = 1 ; } switch ( alt26 ) { case 1 : // src / main / resources / org / drools / compiler / lang / DRL5Expressions . g : 234:13 : args = fullAnnotation [ null ] { pushFollow ( FOLLOW_fullAnnotation_in_conditionalAndExpression1213 ) ; args = fullAnnotation ( null ) ; state . _fsp -- ; if ( state . failed ) return result ; } break ; } pushFollow ( FOLLOW_inclusiveOrExpression_in_conditionalAndExpression1219 ) ; right = inclusiveOrExpression ( ) ; state . _fsp -- ; if ( state . failed ) return result ; if ( state . backtracking == 0 ) { if ( buildDescr ) { ConstraintConnectiveDescr descr = ConstraintConnectiveDescr . newAnd ( ) ; descr . addOrMerge ( result ) ; descr . addOrMerge ( right ) ; if ( args != null ) { descr . addAnnotation ( args ) ; } result = descr ; } } } break ; default : break loop27 ; } } } } catch ( RecognitionException re ) { throw re ; } finally { // do for sure before leaving } return result ;
public class TimedMap { /** * This method is NOT supported and always throws UnsupportedOperationException * @ see Map # keySet ( ) */ public Set < K > keySet ( ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && _tc . isEntryEnabled ( ) ) SibTr . entry ( this , _tc , "keySet" ) ; UnsupportedOperationException uoe = new UnsupportedOperationException ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && _tc . isEntryEnabled ( ) ) SibTr . exit ( this , _tc , "keySet" , uoe ) ; throw uoe ;
public class FilteredNormalizer2 { /** * { @ inheritDoc } */ @ Override public Appendable normalize ( CharSequence src , Appendable dest ) { } }
if ( dest == src ) { throw new IllegalArgumentException ( ) ; } return normalize ( src , dest , UnicodeSet . SpanCondition . SIMPLE ) ;
public class ApproximateHistogram { /** * @ param h histogram to be merged into the current histogram * @ param mergedPositions temporary buffer of size greater or equal to { @ link # size } * @ param mergedBins temporary buffer of size greater or equal to { @ link # size } * @ return returns this histogram with h folded into it */ public ApproximateHistogram foldFast ( ApproximateHistogram h , float [ ] mergedPositions , long [ ] mergedBins ) { } }
if ( size == 0 ) { return copy ( h ) ; } else { return foldRule ( h , mergedPositions , mergedBins ) ; }
public class SMB1SigningDigest { /** * { @ inheritDoc } * @ see jcifs . internal . SMBSigningDigest # sign ( byte [ ] , int , int , jcifs . internal . CommonServerMessageBlock , * jcifs . internal . CommonServerMessageBlock ) */ @ Override public void sign ( byte [ ] data , int offset , int length , CommonServerMessageBlock request , CommonServerMessageBlock response ) { } }
if ( log . isTraceEnabled ( ) ) { log . trace ( "Signing with seq " + this . signSequence ) ; } ( ( ServerMessageBlock ) request ) . setSignSeq ( this . signSequence ) ; if ( response != null ) { ( ( ServerMessageBlock ) response ) . setSignSeq ( this . signSequence + 1 ) ; } try { update ( this . macSigningKey , 0 , this . macSigningKey . length ) ; int index = offset + SmbConstants . SIGNATURE_OFFSET ; for ( int i = 0 ; i < 8 ; i ++ ) data [ index + i ] = 0 ; SMBUtil . writeInt4 ( this . signSequence , data , index ) ; update ( data , offset , length ) ; System . arraycopy ( digest ( ) , 0 , data , index , 8 ) ; if ( this . bypass ) { this . bypass = false ; System . arraycopy ( "BSRSPYL " . getBytes ( ) , 0 , data , index , 8 ) ; } } catch ( Exception ex ) { log . error ( "Signature failed" , ex ) ; } finally { if ( request instanceof SmbComNtCancel ) { this . signSequence ++ ; } else { this . signSequence += 2 ; } }
public class appfwxmlcontenttype { /** * Use this API to delete appfwxmlcontenttype resources of given names . */ public static base_responses delete ( nitro_service client , String xmlcontenttypevalue [ ] ) throws Exception { } }
base_responses result = null ; if ( xmlcontenttypevalue != null && xmlcontenttypevalue . length > 0 ) { appfwxmlcontenttype deleteresources [ ] = new appfwxmlcontenttype [ xmlcontenttypevalue . length ] ; for ( int i = 0 ; i < xmlcontenttypevalue . length ; i ++ ) { deleteresources [ i ] = new appfwxmlcontenttype ( ) ; deleteresources [ i ] . xmlcontenttypevalue = xmlcontenttypevalue [ i ] ; } result = delete_bulk_request ( client , deleteresources ) ; } return result ;
public class RangeSeekBar { /** * Sets normalized max value to value so that 0 < = normalized min value < = value < = 1 . The View will get invalidated when calling this method . * @ param value The new normalized max value to set . */ private void setNormalizedMaxValue ( double value ) { } }
normalizedMaxValue = Math . max ( 0d , Math . min ( 1d , Math . max ( value , normalizedMinValue ) ) ) ; invalidate ( ) ;
public class SimpleDocumentDbRepository { /** * delete list of entities without partitions * @ param entities */ @ Override public void deleteAll ( Iterable < ? extends T > entities ) { } }
Assert . notNull ( entities , "Iterable entities should not be null" ) ; StreamSupport . stream ( entities . spliterator ( ) , true ) . forEach ( this :: delete ) ;
public class MechanizeAgent { /** * / * ( non - Javadoc ) * @ see com . gistlabs . mechanize . Mechanize # get ( java . lang . String ) */ @ Override public < T extends Resource > T get ( final String uri ) { } }
return doRequest ( uri ) . get ( ) ;
public class FSDirectory { /** * See { @ link ClientProtocol # setQuota ( String , long , long ) } for the contract . * Sets quota for for a directory . * @ returns INodeDirectory if any of the quotas have changed . null other wise . * @ throws FileNotFoundException if the path does not exist or is a file * @ throws QuotaExceededException if the directory tree size is * greater than the given quota */ INodeDirectory unprotectedSetQuota ( String src , long nsQuota , long dsQuota ) throws FileNotFoundException , QuotaExceededException { } }
// sanity check if ( ( nsQuota < 0 && nsQuota != FSConstants . QUOTA_DONT_SET && nsQuota < FSConstants . QUOTA_RESET ) || ( dsQuota < 0 && dsQuota != FSConstants . QUOTA_DONT_SET && dsQuota < FSConstants . QUOTA_RESET ) ) { throw new IllegalArgumentException ( "Illegal value for nsQuota or " + "dsQuota : " + nsQuota + " and " + dsQuota ) ; } String srcs = normalizePath ( src ) ; INode [ ] inodes = rootDir . getExistingPathINodes ( src ) ; INode targetNode = inodes [ inodes . length - 1 ] ; if ( targetNode == null ) { throw new FileNotFoundException ( "Directory does not exist: " + srcs ) ; } else if ( ! targetNode . isDirectory ( ) ) { throw new FileNotFoundException ( "Cannot set quota on a file: " + srcs ) ; } else { // a directory inode INodeDirectory dirNode = ( INodeDirectory ) targetNode ; long oldNsQuota = dirNode . getNsQuota ( ) ; long oldDsQuota = dirNode . getDsQuota ( ) ; if ( nsQuota == FSConstants . QUOTA_DONT_SET ) { nsQuota = oldNsQuota ; } if ( dsQuota == FSConstants . QUOTA_DONT_SET ) { dsQuota = oldDsQuota ; } if ( dirNode instanceof INodeDirectoryWithQuota ) { // a directory with quota ; so set the quota to the new value ( ( INodeDirectoryWithQuota ) dirNode ) . setQuota ( nsQuota , dsQuota ) ; } else { // a non - quota directory ; so replace it with a directory with quota INodeDirectoryWithQuota newNode = new INodeDirectoryWithQuota ( nsQuota , dsQuota , dirNode ) ; // non - root directory node ; parent ! = null INodeDirectory parent = ( INodeDirectory ) inodes [ inodes . length - 2 ] ; dirNode = newNode ; parent . replaceChild ( newNode ) ; // replace oldNode by newNode with the same id inodeMap . put ( newNode ) ; } return ( oldNsQuota != nsQuota || oldDsQuota != dsQuota ) ? dirNode : null ; }
public class TSIG { /** * Generates a TSIG record for a message and adds it to the message * @ param m The message * @ param old If this message is a response , the TSIG from the request */ public void apply ( Message m , TSIGRecord old ) { } }
apply ( m , Rcode . NOERROR , old ) ;
public class NamedResolverMap { /** * The string set of keys held by the resolver map . * @ return the string set of keys held by the resolver map . */ public Set < String > stringKeySet ( ) { } }
return data . keySet ( ) . stream ( ) . map ( Any2 :: getRight ) . flatMap ( opt -> opt . map ( Stream :: of ) . orElseGet ( Stream :: empty ) ) . collect ( Collectors . toSet ( ) ) ;
public class XMLSerializer { /** * Translate the ServiceWrapper into the wireline string . See * { @ link ServiceWrapper } * @ param service the instance of the ServiceWrapper * @ return the wireline string */ public String marshalService ( ServiceWrapper service ) { } }
Service xmlService = composeServiceFromServiceWrapper ( service ) ; StringWriter sw = new StringWriter ( ) ; try { JAXBContext jaxbContext = JAXBContext . newInstance ( Service . class ) ; Marshaller jaxbMarshaller = jaxbContext . createMarshaller ( ) ; // output pretty printed jaxbMarshaller . setProperty ( Marshaller . JAXB_FORMATTED_OUTPUT , true ) ; jaxbMarshaller . marshal ( xmlService , sw ) ; } catch ( PropertyException e ) { // TODO Auto - generated catch block e . printStackTrace ( ) ; } catch ( JAXBException e ) { // TODO Auto - generated catch block e . printStackTrace ( ) ; } return sw . toString ( ) ;
public class IOUtil { /** * Reads the entries of a Header and returns a map containing the * { @ link HeaderKey } as key and { @ link StandardField } as value . * The map is initialized with all possible HeaderKeys of the subtype and * empty fields . * The passed instances must not be null and the specName must not be empty . * @ param clazz * the concrete subclass of the HeaderKey * @ param specFormat * the format of the specification file * @ param specName * the name of the specification file ( not the path to it ) , must * not be empty . * @ param headerbytes * the bytes of the header * @ param headerOffset * the file offset to the start of the headerbytes * @ param < T > * the type for the header key that the returned map shall use * @ return header entries * @ throws IOException * if specification file can not be read */ public static < T extends Enum < T > & HeaderKey > Map < T , StandardField > readHeaderEntries ( Class < T > clazz , SpecificationFormat specFormat , String specName , byte [ ] headerbytes , long headerOffset ) throws IOException { } }
assert specName != null && specName . trim ( ) . length ( ) > 0 ; // get the specification List < String [ ] > specification = readArray ( specName ) ; // call readHeaderEntries for specification return readHeaderEntries ( clazz , specFormat , specification , headerbytes , headerOffset ) ;
public class Alerts { /** * 弹窗 , alertType默认为 { @ link AlertType # INFORMATION } , modality默认为 { @ link Modality # NONE } , window默认为null , style默认为 { @ link * StageStyle # DECORATED } * @ param title 标题 * @ param header 信息头 * @ param content 内容 * @ return { @ link ButtonType } */ public static Optional < ButtonType > alert ( String title , String header , String content ) { } }
return alert ( title , header , content , AlertType . INFORMATION ) ;
public class CreateClientVpnEndpointRequest { /** * Information about the authentication method to be used to authenticate clients . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setAuthenticationOptions ( java . util . Collection ) } or * { @ link # withAuthenticationOptions ( java . util . Collection ) } if you want to override the existing values . * @ param authenticationOptions * Information about the authentication method to be used to authenticate clients . * @ return Returns a reference to this object so that method calls can be chained together . */ public CreateClientVpnEndpointRequest withAuthenticationOptions ( ClientVpnAuthenticationRequest ... authenticationOptions ) { } }
if ( this . authenticationOptions == null ) { setAuthenticationOptions ( new com . amazonaws . internal . SdkInternalList < ClientVpnAuthenticationRequest > ( authenticationOptions . length ) ) ; } for ( ClientVpnAuthenticationRequest ele : authenticationOptions ) { this . authenticationOptions . add ( ele ) ; } return this ;
public class HMM { /** * Scaled backward procedure without underflow . * @ param o an observation sequence . * @ param beta on output , beta ( i , j ) holds the scaled total probability of * starting up in state i at time j . * @ param scaling on input , it should hold scaling factors computed by * forward procedure . */ private void backward ( int [ ] o , double [ ] [ ] beta , double [ ] scaling ) { } }
int n = o . length - 1 ; for ( int i = 0 ; i < numStates ; i ++ ) { beta [ n ] [ i ] = 1.0 / scaling [ n ] ; } for ( int t = n ; t -- > 0 ; ) { for ( int i = 0 ; i < numStates ; i ++ ) { double sum = 0. ; for ( int j = 0 ; j < numStates ( ) ; j ++ ) { sum += beta [ t + 1 ] [ j ] * a [ i ] [ j ] * b [ j ] [ o [ t + 1 ] ] ; } beta [ t ] [ i ] = sum / scaling [ t ] ; } }
public class HttpDateFormatImpl { /** * @ see com . ibm . websphere . http . HttpDateFormat # parseTime ( java . lang . String ) */ @ Override public Date parseTime ( String input ) throws ParseException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "parseTime parsing [" + input + "]" ) ; } String data = input ; int i = data . indexOf ( ';' , 0 ) ; // PK20062 - check for excess data following the date value if ( - 1 != i ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Ignoring excess data following semi-colon in date" ) ; } // strip off trailing whitespace before semi - colon for ( ; i > 20 ; i -- ) { char c = data . charAt ( i - 1 ) ; if ( ' ' != c && '\t' != c ) { break ; } } if ( 20 >= i ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Not enough data left to make a valid date" ) ; } throw new ParseException ( "Invalid date [" + input + "]" , 0 ) ; } data = input . substring ( 0 , i ) ; } Date parsedDate = attemptParse ( getFormat ( ) . get1123Parse ( ) , data ) ; if ( null == parsedDate ) { parsedDate = attemptParse ( getFormat ( ) . get1036Parse ( ) , data ) ; if ( null == parsedDate ) { parsedDate = attemptParse ( getFormat ( ) . getAsciiParse ( ) , data ) ; if ( null == parsedDate ) { parsedDate = attemptParse ( getFormat ( ) . get2109Parse ( ) , data ) ; if ( null == parsedDate ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Time does not match supported formats" ) ; } throw new ParseException ( "Unparseable [" + data + "]" , 0 ) ; } } } } return parsedDate ;
public class ERiC { /** * Returns true , if the specified parent cluster is a parent of one child of * the children clusters . * @ param npred Neighborhood predicate * @ param parent the parent to be tested * @ param iter the list of children to be tested * @ return true , if the specified parent cluster is a parent of one child of * the children clusters , false otherwise */ private boolean isParent ( ERiCNeighborPredicate < V > . Instance npred , Cluster < CorrelationModel > parent , It < Cluster < CorrelationModel > > iter ) { } }
StringBuilder msg = LOG . isDebugging ( ) ? new StringBuilder ( ) : null ; for ( ; iter . valid ( ) ; iter . advance ( ) ) { Cluster < CorrelationModel > child = iter . get ( ) ; if ( parent . getModel ( ) . getPCAResult ( ) . getCorrelationDimension ( ) == child . getModel ( ) . getPCAResult ( ) . getCorrelationDimension ( ) ) { return false ; } boolean dist = npred . weakNeighbors ( parent . getModel ( ) . getPrototype ( ) , child . getModel ( ) . getPrototype ( ) , parent . getModel ( ) . getPCAResult ( ) , child . getModel ( ) . getPCAResult ( ) ) ; if ( msg != null ) { msg . append ( "\ndist(" ) . append ( child ) . append ( " - " ) . append ( parent ) . append ( ") = " ) . append ( dist ) ; } if ( dist ) { if ( msg != null ) { LOG . debugFine ( msg ) ; } return true ; } } if ( msg != null ) { LOG . debugFine ( msg . toString ( ) ) ; } return false ;
public class BpmnParse { /** * Parse async continuation of an activity and create async jobs for the activity . * < br / > < br / > * When the activity is marked as multi instance , then async jobs create instead for the multi instance body . * When the wrapped activity has async characteristics in ' multiInstanceLoopCharacteristics ' element , * then async jobs create additionally for the wrapped activity . */ protected void parseAsynchronousContinuationForActivity ( Element activityElement , ActivityImpl activity ) { } }
// can ' t use # getMultiInstanceScope here to determine whether the task is multi - instance , // since the property hasn ' t been set yet ( cf parseActivity ) ActivityImpl parentFlowScopeActivity = activity . getParentFlowScopeActivity ( ) ; if ( parentFlowScopeActivity != null && parentFlowScopeActivity . getActivityBehavior ( ) instanceof MultiInstanceActivityBehavior && ! activity . isCompensationHandler ( ) ) { parseAsynchronousContinuation ( activityElement , parentFlowScopeActivity ) ; Element miLoopCharacteristics = activityElement . element ( "multiInstanceLoopCharacteristics" ) ; parseAsynchronousContinuation ( miLoopCharacteristics , activity ) ; } else { parseAsynchronousContinuation ( activityElement , activity ) ; }
public class Choice3 { /** * Static factory method for wrapping a value of type < code > A < / code > in a { @ link Choice3 } . * @ param c the value * @ param < A > the first possible type * @ param < B > the second possible type * @ param < C > the third possible type * @ return the wrapped value as a { @ link Choice3 } & lt ; A , B , C & gt ; */ public static < A , B , C > Choice3 < A , B , C > c ( C c ) { } }
return new _C < > ( c ) ;
public class Pql { /** * Gets a list of the maximum size for each column . * @ param resultSet the result set to process * @ return a list of the maximum size for each column */ private static List < Integer > getMaxColumnSizes ( List < String [ ] > resultSet ) { } }
List < Integer > maxColumnSizes = Lists . newArrayList ( ) ; for ( int i = 0 ; i < resultSet . get ( 0 ) . length ; i ++ ) { int maxColumnSize = - 1 ; for ( int j = 0 ; j < resultSet . size ( ) ; j ++ ) { if ( resultSet . get ( j ) [ i ] . length ( ) > maxColumnSize ) { maxColumnSize = resultSet . get ( j ) [ i ] . length ( ) ; } } maxColumnSizes . add ( maxColumnSize ) ; } return maxColumnSizes ;
public class MonitorableRegistry { /** * Informs this MonitorableRegistry of a new { @ link Monitorable } ; that * Monitorable will be added to the registry , assuming no Monitorable with * the same name has previously been registered . * @ throws UnsupportedOperationException * if the name of the provided monitorable has already been * registered */ public synchronized < T > void register ( Monitorable < T > monitorable ) { } }
if ( monitorables . containsKey ( monitorable . getName ( ) ) ) { throw new UnsupportedOperationException ( "There is already an instance of the Monitorable [" + monitorable . getName ( ) + "] registered." ) ; } monitorables . put ( monitorable . getName ( ) , monitorable ) ; notifyListenersOfNewMonitorable ( monitorable ) ;
public class FinderPatternFinder { /** * Get square of distance between a and b . */ private static double squaredDistance ( FinderPattern a , FinderPattern b ) { } }
double x = a . getX ( ) - b . getX ( ) ; double y = a . getY ( ) - b . getY ( ) ; return x * x + y * y ;
public class RobotControlProxy { /** * / * ( non - Javadoc ) * @ see com . github . thehilikus . jrobocom . RobotControl # changeBank ( int ) */ @ Override public void changeBank ( int newBank ) { } }
int penalty = Timing . getInstance ( ) . BANK_CHANGE ; log . trace ( "[changeBank] Waiting {} cycles to change to bank {}" , penalty , newBank ) ; turnsControl . waitTurns ( penalty , "Change bank" ) ; robot . changeBank ( newBank ) ;
public class ConstantPool { /** * Returns a constant from the pool by index , or throws an exception if not * found . If this constant pool has not yet been written or was not created * by the read method , indexes are not assigned . * @ throws ArrayIndexOutOfBoundsException if index is out of range . */ public ConstantInfo getConstant ( int index ) { } }
if ( mIndexedConstants == null ) { throw new ArrayIndexOutOfBoundsException ( "Constant pool indexes have not been assigned" ) ; } return ( ConstantInfo ) mIndexedConstants . get ( index ) ;
public class ParallelCombiner { /** * Find a minimum size of the buffer slice and corresponding leafCombineDegree and number of slices . Note that each * node in the combining tree is executed by different threads . This method assumes that combining the leaf nodes * requires threads as many as possible , while combining intermediate nodes is not . See the comment on * { @ link # MINIMUM _ LEAF _ COMBINE _ DEGREE } for more details . * @ param combineBuffer entire buffer used for combining tree * @ param requiredMinimumBufferCapacity minimum buffer capacity for { @ link StreamingMergeSortedGrouper } * @ param numAvailableThreads number of available threads * @ param numLeafNodes number of leaf nodes of combining tree * @ return a pair of leafCombineDegree and number of buffers if found . */ private Pair < Integer , Integer > findLeafCombineDegreeAndNumBuffers ( ByteBuffer combineBuffer , int requiredMinimumBufferCapacity , int numAvailableThreads , int numLeafNodes ) { } }
for ( int leafCombineDegree = MINIMUM_LEAF_COMBINE_DEGREE ; leafCombineDegree <= numLeafNodes ; leafCombineDegree ++ ) { final int requiredBufferNum = computeRequiredBufferNum ( numLeafNodes , leafCombineDegree ) ; if ( requiredBufferNum <= numAvailableThreads ) { final int expectedSliceSize = combineBuffer . capacity ( ) / requiredBufferNum ; if ( expectedSliceSize >= requiredMinimumBufferCapacity ) { return Pair . of ( leafCombineDegree , requiredBufferNum ) ; } } } throw new ISE ( "Cannot find a proper leaf combine degree for the combining tree. " + "Each node of the combining tree requires a buffer of [%d] bytes. " + "Try increasing druid.processing.buffer.sizeBytes (currently [%d] bytes) for larger buffer or " + "druid.query.groupBy.intermediateCombineDegree for a smaller tree" , requiredMinimumBufferCapacity , combineBuffer . capacity ( ) ) ;
public class HelpFormatter { /** * Appends the usage clause for an Option to a StringBuffer . * @ param buff the StringBuffer to append to * @ param option the Option to append * @ param required whether the Option is required or not */ private void appendOption ( StringBuffer buff , Option option , boolean required ) { } }
if ( ! required ) { buff . append ( "[" ) ; } if ( option . getOpt ( ) != null ) { buff . append ( "-" ) . append ( option . getOpt ( ) ) ; } else { buff . append ( "--" ) . append ( option . getLongOpt ( ) ) ; } // if the Option has a value and a non blank argname if ( option . hasArg ( ) && ( option . getArgName ( ) == null || option . getArgName ( ) . length ( ) != 0 ) ) { buff . append ( option . getOpt ( ) == null ? longOptSeparator : " " ) ; buff . append ( "<" ) . append ( option . getArgName ( ) != null ? option . getArgName ( ) : getArgName ( ) ) . append ( ">" ) ; } // if the Option is not a required option if ( ! required ) { buff . append ( "]" ) ; }
public class SourceWaterMarks { /** * Gets the low water mark of a source . * @ param source - the source */ public long getLWMScn ( String source ) { } }
WaterMarkEntry e = sourceWaterMarkMap . get ( source ) ; return ( e == null ) ? 0 : e . getLWMScn ( ) ;
public class ListVocabulariesRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ListVocabulariesRequest listVocabulariesRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( listVocabulariesRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( listVocabulariesRequest . getNextToken ( ) , NEXTTOKEN_BINDING ) ; protocolMarshaller . marshall ( listVocabulariesRequest . getMaxResults ( ) , MAXRESULTS_BINDING ) ; protocolMarshaller . marshall ( listVocabulariesRequest . getStateEquals ( ) , STATEEQUALS_BINDING ) ; protocolMarshaller . marshall ( listVocabulariesRequest . getNameContains ( ) , NAMECONTAINS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class UniversalDateAndTimeStampImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public void setYearAD ( Integer newYearAD ) { } }
Integer oldYearAD = yearAD ; yearAD = newYearAD ; if ( eNotificationRequired ( ) ) eNotify ( new ENotificationImpl ( this , Notification . SET , AfplibPackage . UNIVERSAL_DATE_AND_TIME_STAMP__YEAR_AD , oldYearAD , yearAD ) ) ;
public class PartitionTable { /** * Convert values to immutable sets . */ private static < K , V > void makeSetsImmutable ( Map < K , Set < V > > map ) { } }
Set < K > keys = map . keySet ( ) ; for ( K key : keys ) { Set < V > value = map . get ( key ) ; map . put ( key , Collections . unmodifiableSet ( value ) ) ; }
public class ProjectListType { /** * Gets the value of the project property . * This accessor method returns a reference to the live list , * not a snapshot . Therefore any modification you make to the * returned list will be present inside the JAXB object . * This is why there is not a < CODE > set < / CODE > method for the project property . * For example , to add a new item , do as follows : * < pre > * getProject ( ) . add ( newItem ) ; * < / pre > * Objects of the following type ( s ) are allowed in the list * { @ link ProjectListType . Project } */ public List < ProjectListType . Project > getProject ( ) { } }
if ( project == null ) { project = new ArrayList < ProjectListType . Project > ( ) ; } return this . project ;
public class CoreRepositorySetupService { /** * groups */ protected Authorizable makeGroupAvailable ( @ Nonnull final Session session , @ Nonnull final String id , @ Nonnull final String intermediatePath ) throws RepositoryException { } }
UserManager userManager = ( ( JackrabbitSession ) session ) . getUserManager ( ) ; Authorizable authorizable = userManager . getAuthorizable ( id ) ; if ( authorizable != null ) { if ( authorizable . isGroup ( ) ) { return authorizable ; } throw new RepositoryException ( "'" + id + "' exists but is not a group" ) ; } LOG . info ( "addGroup({},{})" , id , intermediatePath ) ; try { authorizable = userManager . createGroup ( new Principal ( ) { @ Override public String getName ( ) { return id ; } } , intermediatePath ) ; session . save ( ) ; } catch ( RepositoryException e ) { LOG . error ( "Error in makeGroupAvailable({},{}) : {}" , new Object [ ] { id , intermediatePath , e . toString ( ) } ) ; throw e ; } return authorizable ;
public class PumpStreamHandler { /** * Stop pumping the streams . */ public void stop ( ) { } }
if ( inputThread != null ) { if ( inputStreamPumper != null ) { inputStreamPumper . stopProcessing ( ) ; } // #33 Interrupt reading from a PipedInputStream to unblock the pumping thread inputThread . interrupt ( ) ; log . trace ( "Joining input thread {}..." , inputThread ) ; try { inputThread . join ( ) ; inputThread = null ; } catch ( InterruptedException e ) { // ignore } } if ( outputThread != null ) { log . trace ( "Joining output thread {}..." , outputThread ) ; try { outputThread . join ( ) ; outputThread = null ; } catch ( InterruptedException e ) { // ignore } } if ( errorThread != null ) { log . trace ( "Joining error thread {}..." , errorThread ) ; try { errorThread . join ( ) ; errorThread = null ; } catch ( InterruptedException e ) { // ignore } } flush ( ) ;
public class SeleniumSpec { /** * Browse to { @ code url } using the current browser . * @ param path path of running app * @ throws Exception exception */ @ Given ( "^I( securely)? browse to '(.+?)'$" ) public void seleniumBrowse ( String isSecured , String path ) throws Exception { } }
assertThat ( path ) . isNotEmpty ( ) ; if ( commonspec . getWebHost ( ) == null ) { throw new Exception ( "Web host has not been set" ) ; } if ( commonspec . getWebPort ( ) == null ) { throw new Exception ( "Web port has not been set" ) ; } String protocol = "http://" ; if ( isSecured != null ) { protocol = "https://" ; } String webURL = protocol + commonspec . getWebHost ( ) + commonspec . getWebPort ( ) ; commonspec . getDriver ( ) . get ( webURL + path ) ; commonspec . setParentWindow ( commonspec . getDriver ( ) . getWindowHandle ( ) ) ;
public class RTFEmbeddedObject { /** * Reads a data block and adds it to the list of blocks . * @ param text RTF data * @ param offset current offset * @ param length next block length * @ param blocks list of blocks * @ return next offset */ private static int readDataBlock ( String text , int offset , int length , List < byte [ ] > blocks ) { } }
int bytes = length / 2 ; byte [ ] data = new byte [ bytes ] ; for ( int index = 0 ; index < bytes ; index ++ ) { data [ index ] = ( byte ) Integer . parseInt ( text . substring ( offset , offset + 2 ) , 16 ) ; offset += 2 ; } blocks . add ( data ) ; return ( offset ) ;
public class Merger { /** * Merges argument iterators . Iterators should return values in natural order . * @ param < T > * @ param iterators * @ return */ public static < T > Iterator < T > merge ( Iterator < T > ... iterators ) { } }
return merge ( null , iterators ) ;
public class RelaxNGDefaultsComponent { /** * On start element * @ param name The element name * @ param atts The attributes */ private void onStartElement ( QName name , XMLAttributes atts ) { } }
if ( detecting ) { detecting = false ; loadDefaults ( ) ; } if ( defaults != null ) { checkAndAddDefaults ( name , atts ) ; }
public class Nodes { /** * Adds the given input map to the end of the node ' s list of input maps , so that an event will be pattern - matched * against all other input maps currently " installed " in the node before being pattern - matched against the given * input map . */ public static void addFallbackInputMap ( Node node , InputMap < ? > im ) { } }
// getInputMap calls init , so can use unsafe setter setInputMapUnsafe ( node , InputMap . sequence ( getInputMap ( node ) , im ) ) ;
public class CmsWorkplaceManager { /** * Returns the gallery default scope . < p > * @ return the gallery default scope */ public CmsGallerySearchScope getGalleryDefaultScope ( ) { } }
CmsGallerySearchScope result = CmsGallerySearchScope . siteShared ; if ( m_galleryDefaultScope != null ) { try { result = CmsGallerySearchScope . valueOf ( m_galleryDefaultScope ) ; } catch ( Throwable t ) { // ignore } } return result ;
public class SslPolicyClient { /** * Returns the specified SSL policy resource . Gets a list of available SSL policies by making a * list ( ) request . * < p > Sample code : * < pre > < code > * try ( SslPolicyClient sslPolicyClient = SslPolicyClient . create ( ) ) { * ProjectName project = ProjectName . of ( " [ PROJECT ] " ) ; * SslPolicy sslPolicyResource = SslPolicy . newBuilder ( ) . build ( ) ; * Operation response = sslPolicyClient . insertSslPolicy ( project , sslPolicyResource ) ; * < / code > < / pre > * @ param project Project ID for this request . * @ param sslPolicyResource A SSL policy specifies the server - side support for SSL features . This * can be attached to a TargetHttpsProxy or a TargetSslProxy . This affects connections between * clients and the HTTPS or SSL proxy load balancer . They do not affect the connection between * the load balancers and the backends . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi public final Operation insertSslPolicy ( ProjectName project , SslPolicy sslPolicyResource ) { } }
InsertSslPolicyHttpRequest request = InsertSslPolicyHttpRequest . newBuilder ( ) . setProject ( project == null ? null : project . toString ( ) ) . setSslPolicyResource ( sslPolicyResource ) . build ( ) ; return insertSslPolicy ( request ) ;
public class AbstractBeanJsonCreator { /** * Build the method that returns the class of the mapped type . * @ return the method built */ private MethodSpec buildClassGetterMethod ( ) { } }
return MethodSpec . methodBuilder ( isSerializer ( ) ? "getSerializedType" : "getDeserializedType" ) . addModifiers ( Modifier . PUBLIC ) . addAnnotation ( Override . class ) . returns ( Class . class ) . addStatement ( "return $T.class" , rawName ( beanInfo . getType ( ) ) ) . build ( ) ;
public class XMLSerializer { /** * Ensure namespaces capacity . */ protected void ensureNamespacesCapacity ( ) { } }
// int size ) { // int namespaceSize = namespacePrefix ! = null ? namespacePrefix . length // assert ( namespaceEnd > = namespacePrefix . length ) ; // if ( size > = namespaceSize ) { // int newSize = size > 7 ? 2 * size : 8 ; / / = lucky 7 + 1 / / 25 final int newSize = namespaceEnd > 7 ? 2 * namespaceEnd : 8 ; if ( TRACE_SIZING ) { System . err . println ( getClass ( ) . getName ( ) + " namespaceSize " + namespacePrefix . length + " ==> " + newSize ) ; } final String [ ] newNamespacePrefix = new String [ newSize ] ; final String [ ] newNamespaceUri = new String [ newSize ] ; if ( namespacePrefix != null ) { System . arraycopy ( namespacePrefix , 0 , newNamespacePrefix , 0 , namespaceEnd ) ; System . arraycopy ( namespaceUri , 0 , newNamespaceUri , 0 , namespaceEnd ) ; } namespacePrefix = newNamespacePrefix ; namespaceUri = newNamespaceUri ; // TODO use hashes for quick namespace - > prefix lookups // if ( ! allStringsInterned ) { // int [ ] newNamespacePrefixHash = new int [ newSize ] ; // if ( namespacePrefixHash ! = null ) { // System . arraycopy ( // namespacePrefixHash , 0 , newNamespacePrefixHash , 0 , namespaceEnd ) ; // namespacePrefixHash = newNamespacePrefixHash ; // prefixesSize = newSize ; // / / / / assert nsPrefixes . length > size & & nsPrefixes . length = = newSize
public class FileSystemConnector { /** * Utility method for determining the node identifier for the supplied file . Subclasses may override this method to change the * format of the identifiers , but in that case should also override the { @ link # fileFor ( String ) } , * { @ link # isContentNode ( String ) } , and { @ link # isRoot ( String ) } methods . * @ param file the file ; may not be null * @ return the node identifier ; never null * @ see # isRoot ( String ) * @ see # isContentNode ( String ) * @ see # fileFor ( String ) */ protected String idFor ( File file ) { } }
String path = file . getAbsolutePath ( ) ; if ( ! path . startsWith ( directoryAbsolutePath ) ) { if ( directory . getAbsolutePath ( ) . equals ( path ) ) { // This is the root return DELIMITER ; } String msg = JcrI18n . fileConnectorNodeIdentifierIsNotWithinScopeOfConnector . text ( getSourceName ( ) , directoryPath , path ) ; throw new DocumentStoreException ( path , msg ) ; } String id = path . substring ( directoryAbsolutePathLength ) ; id = id . replaceAll ( Pattern . quote ( FILE_SEPARATOR ) , DELIMITER ) ; assert id . startsWith ( DELIMITER ) ; return id ;
public class DataNode { /** * Sends a ' Blocks Being Written ' report to the given node . * @ param node the node to send the report to * @ throws IOException */ public void sendBlocksBeingWrittenReport ( DatanodeProtocol node , int namespaceId , DatanodeRegistration nsRegistration ) throws IOException { } }
Block [ ] blocks = data . getBlocksBeingWrittenReport ( namespaceId ) ; if ( blocks != null && blocks . length != 0 ) { long [ ] blocksAsLong = BlockListAsLongs . convertToArrayLongs ( blocks ) ; BlockReport bbwReport = new BlockReport ( blocksAsLong ) ; node . blocksBeingWrittenReport ( nsRegistration , bbwReport ) ; }
public class Row { /** * Returns the type - id of the element in a Row . * @ param column the column of which you ' d like to know the type * @ return the type - id of the element in the row */ int getElementID ( int column ) { } }
if ( cells [ column ] == null ) return NULL ; else if ( Cell . class . isInstance ( cells [ column ] ) ) return CELL ; else if ( Table . class . isInstance ( cells [ column ] ) ) return TABLE ; return - 1 ;
public class AfplibFactoryImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public String convertOVSBYPSIDENToString ( EDataType eDataType , Object instanceValue ) { } }
return instanceValue == null ? null : instanceValue . toString ( ) ;
public class Base64 { /** * Similar to { @ link # encodeBytes ( byte [ ] , int , int , int ) } but returns * a byte array instead of instantiating a String . This is more efficient * if you ' re working with I / O streams and have large data sets to encode . * @ param source The data to convert * @ param off Offset in array where conversion should begin * @ param len Length of data to convert * @ param options Specified options * @ return The Base64 - encoded data as a byte array * @ see Base64 # GZIP * @ see Base64 # DO _ BREAK _ LINES * @ throws java . io . IOException if there is an error * @ throws NullPointerException if source array is null * @ throws IllegalArgumentException if source array , offset , or length are invalid * @ since 2.3.1 */ public static byte [ ] encodeBytesToBytes ( byte [ ] source , int off , int len , int options ) throws java . io . IOException { } }
if ( source == null ) { throw new NullPointerException ( "Cannot serialize a null array." ) ; } // end if : null if ( off < 0 ) { throw new IllegalArgumentException ( "Cannot have negative offset: " + off ) ; } // end if : off < 0 if ( len < 0 ) { throw new IllegalArgumentException ( "Cannot have length offset: " + len ) ; } // end if : len < 0 if ( off + len > source . length ) { throw new IllegalArgumentException ( format ( "Cannot have offset of %d and length of %d with array of length %d" , off , len , source . length ) ) ; } // end if : off < 0 { boolean breakLines = ( options & DO_BREAK_LINES ) != 0 ; // int len43 = len * 4 / 3; // byte [ ] outBuff = new byte [ ( len43 ) / / Main 4:3 // + ( ( len % 3 ) > 0 ? 4 : 0 ) / / Account for padding // + ( breakLines ? ( len43 / MAX _ LINE _ LENGTH ) : 0 ) ] ; / / New lines // Try to determine more precisely how big the array needs to be . // If we get it right , we don ' t have to do an array copy , and // we save a bunch of memory . int encLen = ( len / 3 ) * 4 + ( len % 3 > 0 ? 4 : 0 ) ; // Bytes needed for actual encoding if ( breakLines ) { encLen += encLen / MAX_LINE_LENGTH ; // Plus extra newline characters } byte [ ] outBuff = new byte [ encLen ] ; int d = 0 ; int e = 0 ; int len2 = len - 2 ; int lineLength = 0 ; for ( ; d < len2 ; d += 3 , e += 4 ) { encode3to4 ( source , d + off , 3 , outBuff , e , options ) ; lineLength += 4 ; if ( breakLines && lineLength >= MAX_LINE_LENGTH ) { outBuff [ e + 4 ] = NEW_LINE ; e ++ ; lineLength = 0 ; } // end if : end of line } // en dfor : each piece of array if ( d < len ) { encode3to4 ( source , d + off , len - d , outBuff , e , options ) ; e += 4 ; } // end if : some padding needed // Only resize array if we didn ' t guess it right . if ( e <= outBuff . length - 1 ) { // If breaking lines and the last byte falls right at // the line length ( 76 bytes per line ) , there will be // one extra byte , and the array will need to be resized . // Not too bad of an estimate on array size , I ' d say . byte [ ] finalOut = new byte [ e ] ; System . arraycopy ( outBuff , 0 , finalOut , 0 , e ) ; // System . err . println ( " Having to resize array from " + outBuff . length + " to " + e ) ; return finalOut ; } else { // System . err . println ( " No need to resize array . " ) ; return outBuff ; } } // end else : don ' t compress
public class AjaxSlider { /** * Sets the call - back for the AJAX Change Event . * @ param ajaxChangeEvent */ public void setAjaxChangeEvent ( ISliderAjaxEvent ajaxChangeEvent ) { } }
this . ajaxEvents . put ( SliderAjaxEvent . ajaxChangeEvent , ajaxChangeEvent ) ; setChangeEvent ( new SliderAjaxJsScopeUiEvent ( this , SliderAjaxEvent . ajaxChangeEvent ) ) ;
public class AmazonRedshiftClient { /** * Returns information about Amazon Redshift security groups . If the name of a security group is specified , the * response will contain only information about only that security group . * For information about managing security groups , go to < a * href = " https : / / docs . aws . amazon . com / redshift / latest / mgmt / working - with - security - groups . html " > Amazon Redshift Cluster * Security Groups < / a > in the < i > Amazon Redshift Cluster Management Guide < / i > . * If you specify both tag keys and tag values in the same request , Amazon Redshift returns all security groups that * match any combination of the specified keys and values . For example , if you have < code > owner < / code > and * < code > environment < / code > for tag keys , and < code > admin < / code > and < code > test < / code > for tag values , all security * groups that have any combination of those values are returned . * If both tag keys and values are omitted from the request , security groups are returned regardless of whether they * have tag keys or values associated with them . * @ param describeClusterSecurityGroupsRequest * @ return Result of the DescribeClusterSecurityGroups operation returned by the service . * @ throws ClusterSecurityGroupNotFoundException * The cluster security group name does not refer to an existing cluster security group . * @ throws InvalidTagException * The tag is invalid . * @ sample AmazonRedshift . DescribeClusterSecurityGroups * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / redshift - 2012-12-01 / DescribeClusterSecurityGroups " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DescribeClusterSecurityGroupsResult describeClusterSecurityGroups ( DescribeClusterSecurityGroupsRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDescribeClusterSecurityGroups ( request ) ;
public class DataService { /** * Method to construct and return the CDCQueryResult object from CDCResponse * @ param cdcResponse * the CDC Response object * @ return the CDCQueryResult object */ protected CDCQueryResult getCDCQueryResult ( CDCResponse cdcResponse ) { } }
CDCQueryResult cdcQueryResult = new CDCQueryResult ( ) ; List < QueryResponse > queryResponses = cdcResponse . getQueryResponse ( ) ; if ( queryResponses != null ) { Map < String , QueryResult > queryResults = new HashMap < String , QueryResult > ( ) ; Iterator < QueryResponse > queryResponseItr = queryResponses . iterator ( ) ; while ( queryResponseItr . hasNext ( ) ) { QueryResponse queryResponse = queryResponseItr . next ( ) ; QueryResult queryResult = getQueryResult ( queryResponse ) ; populateQueryResultsInCDC ( queryResults , queryResult ) ; populateFaultInCDC ( cdcQueryResult , queryResult ) ; } if ( queryResults != null && ! queryResults . isEmpty ( ) ) { cdcQueryResult . setQueryResults ( queryResults ) ; cdcQueryResult . setSize ( cdcResponse . getSize ( ) ) ; } } else if ( cdcResponse . getFault ( ) != null ) { cdcQueryResult . setFalut ( cdcResponse . getFault ( ) ) ; } return cdcQueryResult ;
public class AbstractGenericTreeNode { /** * Traverse . * @ param node * the node * @ param list * the list */ @ Override public void traverse ( final ITreeNode < T > node , final List < ITreeNode < T > > list ) { } }
list . add ( node ) ; for ( final ITreeNode < T > data : node . getChildren ( ) ) { traverse ( data , list ) ; }
public class ListBuffer { /** * Copy list and sets last . */ private void copy ( ) { } }
if ( elems . nonEmpty ( ) ) { List < A > orig = elems ; elems = last = List . of ( orig . head ) ; while ( ( orig = orig . tail ) . nonEmpty ( ) ) { last . tail = List . of ( orig . head ) ; last = last . tail ; } }
public class ExceptionTranslationFilter { /** * ~ Methods = = = = = */ public void afterPropertiesSet ( ) throws Exception { } }
Assert . notNull ( authenticationEntryPoint , "authenticationEntryPoint must be specified" ) ; Assert . notNull ( portResolver , "portResolver must be specified" ) ; Assert . notNull ( authenticationTrustResolver , "authenticationTrustResolver must be specified" ) ;