signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class PathImpl { /** * Looks up all the existing resources . ( Generally only useful * with MergePath . */ public ArrayList < PathImpl > getResources ( ) { } }
ArrayList < PathImpl > list = new ArrayList < PathImpl > ( ) ; // if ( exists ( ) ) list . add ( this ) ; return list ;
public class CmsAliasView { /** * The event handler for the button for adding new rewrite aliases . < p > * @ param e the click event */ @ UiHandler ( "m_newRewriteButton" ) void onClickNewRewrite ( ClickEvent e ) { } }
String rewriteRegex = m_newRewriteRegex . getText ( ) ; String rewriteReplacement = m_newRewriteReplacement . getText ( ) ; String mode = m_newRewriteMode . getFormValueAsString ( ) ; m_controller . editNewRewrite ( rewriteRegex , rewriteReplacement , CmsAliasMode . valueOf ( mode ) ) ;
public class CommerceAccountUtil { /** * Removes the commerce account with the primary key from the database . Also notifies the appropriate model listeners . * @ param commerceAccountId the primary key of the commerce account * @ return the commerce account that was removed * @ throws NoSuchAccountException if a commerce account with the primary key could not be found */ public static CommerceAccount remove ( long commerceAccountId ) throws com . liferay . commerce . account . exception . NoSuchAccountException { } }
return getPersistence ( ) . remove ( commerceAccountId ) ;
public class UrlCopy { /** * This method is an implementation of the { @ link Runnable } interface * and can be used to perform the copy in a separate thread . * This method will perform the transfer and signal completion and * errors through the { @ link UrlCopyListener # transferCompleted ( ) } and * { @ link UrlCopyListener # transferError ( Exception ) } of any registered listeners * ( see { @ link # addUrlCopyListener ( UrlCopyListener ) } ) . */ public void run ( ) { } }
try { copy ( ) ; } catch ( Exception e ) { if ( listeners != null ) { Iterator iter = listeners . iterator ( ) ; while ( iter . hasNext ( ) ) { ( ( UrlCopyListener ) iter . next ( ) ) . transferError ( e ) ; } } } finally { if ( listeners != null ) { Iterator iter = listeners . iterator ( ) ; while ( iter . hasNext ( ) ) { ( ( UrlCopyListener ) iter . next ( ) ) . transferCompleted ( ) ; } } }
public class CmsContentService { /** * Returns the entity id to the given content value . < p > * @ param contentValue the content value * @ return the entity id */ public static String getEntityId ( I_CmsXmlContentValue contentValue ) { } }
String result = CmsContentDefinition . uuidToEntityId ( contentValue . getDocument ( ) . getFile ( ) . getStructureId ( ) , contentValue . getLocale ( ) . toString ( ) ) ; String valuePath = contentValue . getPath ( ) ; if ( valuePath . contains ( "/" ) ) { result += "/" + valuePath . substring ( 0 , valuePath . lastIndexOf ( "/" ) ) ; } if ( contentValue . isChoiceOption ( ) ) { result += "/" + CmsType . CHOICE_ATTRIBUTE_NAME + "_" + contentValue . getName ( ) + "[" + contentValue . getXmlIndex ( ) + "]" ; } return result ;
public class JsScriptExecutor { /** * Executes the given JavaScript , e . g . ' var product = 2 * 3 ; return product ; ' * @ param jsScript JavaScript * @ return value of which the type depends on the JavaScript type of the returned variable */ Object executeScript ( String jsScript ) { } }
String jsScriptWithFunction = "(function (){" + jsScript + "})();" ; try { return jsScriptEngine . eval ( jsScriptWithFunction ) ; } catch ( ScriptException e ) { throw new org . molgenis . script . core . ScriptException ( e ) ; }
public class PropertiesLoader { /** * Load { @ link Config } from ' file ' but do not resolve it . */ static Config loadOvercastConfigFromFile ( String file ) { } }
if ( file == null ) { return ConfigFactory . empty ( ) ; } File f = new File ( file ) ; if ( ! f . exists ( ) ) { logger . warn ( "File {} not found." , f . getAbsolutePath ( ) ) ; return ConfigFactory . empty ( ) ; } logger . info ( "Loading from file {}" , f . getAbsolutePath ( ) ) ; return ConfigFactory . parseFile ( f ) ;
public class AppServicePlansInner { /** * Get the maximum number of Hybrid Connections allowed in an App Service plan . * Get the maximum number of Hybrid Connections allowed in an App Service plan . * @ param resourceGroupName Name of the resource group to which the resource belongs . * @ param name Name of the App Service plan . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the HybridConnectionLimitsInner object */ public Observable < HybridConnectionLimitsInner > getHybridConnectionPlanLimitAsync ( String resourceGroupName , String name ) { } }
return getHybridConnectionPlanLimitWithServiceResponseAsync ( resourceGroupName , name ) . map ( new Func1 < ServiceResponse < HybridConnectionLimitsInner > , HybridConnectionLimitsInner > ( ) { @ Override public HybridConnectionLimitsInner call ( ServiceResponse < HybridConnectionLimitsInner > response ) { return response . body ( ) ; } } ) ;
public class Membership { /** * Adds a new member to this membership . If the member already exist ( Address . equals ( Object ) * returns true then the member will not be added to the membership */ public Membership add ( Address new_member ) { } }
synchronized ( members ) { if ( new_member != null && ! members . contains ( new_member ) ) { members . add ( new_member ) ; } } return this ;
public class ArrayUtil { /** * 包装类数组转为原始类型数组 * @ param values 包装类型数组 * @ return 原始类型数组 */ public static short [ ] unWrap ( Short ... values ) { } }
if ( null == values ) { return null ; } final int length = values . length ; if ( 0 == length ) { return new short [ 0 ] ; } final short [ ] array = new short [ length ] ; for ( int i = 0 ; i < length ; i ++ ) { array [ i ] = values [ i ] . shortValue ( ) ; } return array ;
public class View { /** * Enables the automatic layout for this view , with some default settings . * @ param enable a boolean */ public void setAutomaticLayout ( boolean enable ) { } }
if ( enable ) { this . setAutomaticLayout ( AutomaticLayout . RankDirection . TopBottom , 300 , 600 , 200 , false ) ; } else { this . automaticLayout = null ; }
public class AbstractLocation { /** * Iterates through all known sub - locations for this location but does * not descend */ @ Override public Iterator < Location > iterator ( ) { } }
List < Location > list ; if ( isComplex ( ) ) { list = getSubLocations ( ) ; } else { list = new ArrayList < Location > ( ) ; list . add ( this ) ; } return list . iterator ( ) ;
public class RBBIRuleScanner { void findSetFor ( String s , RBBINode node , UnicodeSet setToAdopt ) { } }
RBBISetTableEl el ; // First check whether we ' ve already cached a set for this string . // If so , just use the cached set in the new node . // delete any set provided by the caller , since we own it . el = fSetTable . get ( s ) ; if ( el != null ) { node . fLeftChild = el . val ; Assert . assrt ( node . fLeftChild . fType == RBBINode . uset ) ; return ; } // Haven ' t seen this set before . // If the caller didn ' t provide us with a prebuilt set , // create a new UnicodeSet now . if ( setToAdopt == null ) { if ( s . equals ( kAny ) ) { setToAdopt = new UnicodeSet ( 0x000000 , 0x10ffff ) ; } else { int c ; c = UTF16 . charAt ( s , 0 ) ; setToAdopt = new UnicodeSet ( c , c ) ; } } // Make a new uset node to refer to this UnicodeSet // This new uset node becomes the child of the caller ' s setReference // node . RBBINode usetNode = new RBBINode ( RBBINode . uset ) ; usetNode . fInputSet = setToAdopt ; usetNode . fParent = node ; node . fLeftChild = usetNode ; usetNode . fText = s ; // Add the new uset node to the list of all uset nodes . fRB . fUSetNodes . add ( usetNode ) ; // Add the new set to the set hash table . el = new RBBISetTableEl ( ) ; el . key = s ; el . val = usetNode ; fSetTable . put ( el . key , el ) ; return ;
public class GISCoordinates { /** * This function convert WSG84 GPS coordinate to one of the NTF Lambda - Phi coordinate . * @ param lambda is the WSG94 coordinate in decimal degrees . * @ param phi is the WSG84 coordinate is decimal in degrees . * @ param n is the exponential of the Lambert projection . * @ param c is the constant of projection . * @ param Xs is the x coordinate of the origine of the Lambert projection . * @ param Ys is the y coordinate of the origine of the Lambert projection . * @ return the NTF Lambda - Phi */ @ SuppressWarnings ( { } }
"checkstyle:parametername" , "checkstyle:magicnumber" , "checkstyle:localfinalvariablename" , "checkstyle:localvariablename" } ) private static Point2d WSG84_NTFLamdaPhi ( double lambda , double phi ) { // 0 ) degree - > radian final double lambda_w = Math . toRadians ( lambda ) ; final double phi_w = Math . toRadians ( phi ) ; // 1 ) geographical coordinates WGS84 ( phi _ w , lambda _ w ) // - > cartesian coordinate WGS84 ( x _ w , y _ w , z _ w ) // Formula from IGN are used from the official downloadable document , and // the two constants , one for each demi - axis , are given by the WGS84 specification // of the ellipsoide . // Ref : // http : / / www . ign . fr / telechargement / MPro / geodesie / CIRCE / NTG _ 80 . pdf // http : / / de . wikipedia . org / wiki / WGS84 final double a_w = 6378137.0 ; final double b_w = 6356752.314 ; // then final double e2_w = ( a_w * a_w - b_w * b_w ) / ( a_w * a_w ) ; // then let the big normal of the WGS84 ellipsoide final double N = a_w / Math . sqrt ( 1. - e2_w * Math . pow ( Math . sin ( phi_w ) , 2. ) ) ; // let the WGS84 cartesian coordinates : final double X_w = N * Math . cos ( phi_w ) * Math . cos ( lambda_w ) ; final double Y_w = N * Math . cos ( phi_w ) * Math . sin ( lambda_w ) ; final double Z_w = N * ( 1 - e2_w ) * Math . sin ( phi_w ) ; // 2 ) cartesian coordinate WGS84 ( X _ w , Y _ w , Z _ w ) // - > cartesian coordinate NTF ( X _ n , Y _ n , Z _ n ) // Ref : http : / / support . esrifrance . fr / Documents / Generalites / Projections / Generalites / Generalites . htm # 2 // No convertion to be done . final double dX = 168.0 ; final double dY = 60.0 ; final double dZ = - 320.0 ; final double X_n = X_w + dX ; final double Y_n = Y_w + dY ; final double Z_n = Z_w + dZ ; // 3 ) cartesian coordinate NTF ( X _ n , Y _ n , Z _ n ) // - > geographical coordinate NTF ( phi _ n , lambda _ n ) // One formula is given by the IGN , and two constants about // the ellipsoide are from the NTF system specification of Clarke 1880. // Ref : // http : / / www . ign . fr / telechargement / MPro / geodesie / CIRCE / NTG _ 80 . pdf // http : / / support . esrifrance . fr / Documents / Generalites / Projections / Generalites / Generalites . htm # 2 final double a_n = 6378249.2 ; final double b_n = 6356515.0 ; // then final double e2_n = ( a_n * a_n - b_n * b_n ) / ( a_n * a_n ) ; // let the convergence epsilon final double epsilon = 1e-10 ; // Then try to converge double p0 = Math . atan ( Z_n / Math . sqrt ( X_n * X_n + Y_n * Y_n ) * ( 1 - ( a_n * e2_n ) / ( Math . sqrt ( X_n * X_n + Y_n * Y_n + Z_n * Z_n ) ) ) ) ; double p1 = Math . atan ( ( Z_n / Math . sqrt ( X_n * X_n + Y_n * Y_n ) ) / ( 1 - ( a_n * e2_n * Math . cos ( p0 ) ) / ( Math . sqrt ( ( X_n * X_n + Y_n * Y_n ) * ( 1 - e2_n * Math . pow ( Math . sin ( p0 ) , 2 ) ) ) ) ) ) ; while ( Math . abs ( p1 - p0 ) >= epsilon ) { p0 = p1 ; p1 = Math . atan ( ( Z_n / Math . sqrt ( X_n * X_n + Y_n * Y_n ) ) / ( 1 - ( a_n * e2_n * Math . cos ( p0 ) ) / ( Math . sqrt ( ( X_n * X_n + Y_n * Y_n ) * ( 1 - e2_n * Math . pow ( Math . sin ( p0 ) , 2 ) ) ) ) ) ) ; } final double phi_n = p1 ; final double lambda_n = Math . atan ( Y_n / X_n ) ; return new Point2d ( lambda_n , phi_n ) ;
public class JSONSerializer { /** * Creates a JSONObject , JSONArray or a JSONNull from object . < br > * Accepts JSON formatted strings , Maps , arrays , Collections , DynaBeans and * JavaBeans . * @ param object any java Object * @ param jsonConfig additional configuration * @ throws JSONException if the object can not be converted */ public static JSON toJSON ( Object object , JsonConfig jsonConfig ) { } }
JSON json = null ; if ( object == null ) { json = JSONNull . getInstance ( ) ; } else if ( object instanceof JSONString ) { json = toJSON ( ( JSONString ) object , jsonConfig ) ; } else if ( object instanceof String ) { json = toJSON ( ( String ) object , jsonConfig ) ; } else if ( JSONUtils . isArray ( object ) ) { json = JSONArray . fromObject ( object , jsonConfig ) ; } else { try { json = JSONObject . fromObject ( object , jsonConfig ) ; } catch ( JSONException e ) { if ( object instanceof JSONTokener ) { ( ( JSONTokener ) object ) . reset ( ) ; } json = JSONArray . fromObject ( object , jsonConfig ) ; } } return json ;
public class BureauRegistry { /** * Check the credentials to make sure this is one of our bureaus . * @ return null if all ' s well , otherwise a string describing the authentication failure */ public String checkToken ( BureauCredentials creds ) { } }
Bureau bureau = _bureaus . get ( creds . clientId ) ; if ( bureau == null ) { return "Bureau " + creds . clientId + " not found" ; } if ( bureau . clientObj != null ) { return "Bureau " + creds . clientId + " already logged in" ; } if ( ! creds . areValid ( bureau . token ) ) { return "Bureau " + creds . clientId + " does not match credentials token" ; } return null ;
public class XAResourceRecoveryImpl { /** * Open a managed connection * @ param s The subject * @ return The managed connection * @ exception ResourceException Thrown in case of an error */ @ SuppressWarnings ( "unchecked" ) private ManagedConnection open ( Subject s ) throws ResourceException { } }
log . debugf ( "Open managed connection (%s)" , s ) ; if ( recoverMC == null ) recoverMC = mcf . createManagedConnection ( s , null ) ; if ( plugin == null ) { try { ValidatingManagedConnectionFactory vmcf = ( ValidatingManagedConnectionFactory ) mcf ; Set connectionSet = new HashSet ( 1 ) ; connectionSet . add ( recoverMC ) ; Set invalid = vmcf . getInvalidConnections ( connectionSet ) ; if ( invalid != null && ! invalid . isEmpty ( ) ) { log . debugf ( "Invalid managed connection: %s" , recoverMC ) ; close ( recoverMC ) ; recoverMC = mcf . createManagedConnection ( s , null ) ; } } catch ( ResourceException re ) { log . debugf ( "Exception during invalid check" , re ) ; close ( recoverMC ) ; recoverMC = mcf . createManagedConnection ( s , null ) ; } } return recoverMC ;
public class JenkinsHash { /** * gather an int from the specified index into the byte array */ private static final int gatherIntLE ( final byte [ ] data , final int index ) { } }
int i = data [ index ] & 0xFF ; i |= ( data [ index + 1 ] & 0xFF ) << 8 ; i |= ( data [ index + 2 ] & 0xFF ) << 16 ; i |= ( data [ index + 3 ] << 24 ) ; return i ;
public class TableSchema { /** * Creates a table schema from a { @ link TypeInformation } instance . If the type information is * a { @ link CompositeType } , the field names and types for the composite type are used to * construct the { @ link TableSchema } instance . Otherwise , a table schema with a single field * is created . The field name is " f0 " and the field type the provided type . * @ param typeInfo The { @ link TypeInformation } from which the table schema is generated . * @ return The table schema that was generated from the given { @ link TypeInformation } . */ public static TableSchema fromTypeInfo ( TypeInformation < ? > typeInfo ) { } }
if ( typeInfo instanceof CompositeType < ? > ) { final CompositeType < ? > compositeType = ( CompositeType < ? > ) typeInfo ; // get field names and types from composite type final String [ ] fieldNames = compositeType . getFieldNames ( ) ; final TypeInformation < ? > [ ] fieldTypes = new TypeInformation [ fieldNames . length ] ; for ( int i = 0 ; i < fieldTypes . length ; i ++ ) { fieldTypes [ i ] = compositeType . getTypeAt ( i ) ; } return new TableSchema ( fieldNames , fieldTypes ) ; } else { // create table schema with a single field named " f0 " of the given type . return new TableSchema ( new String [ ] { ATOMIC_TYPE_FIELD_NAME } , new TypeInformation < ? > [ ] { typeInfo } ) ; }
public class CmsVfsIndexer { /** * Updates a resource with the given index writer and the new document provided . < p > * @ param indexWriter the index writer to update the resource with * @ param rootPath the root path of the resource to update * @ param doc the new document for the resource */ protected void updateResource ( I_CmsIndexWriter indexWriter , String rootPath , I_CmsSearchDocument doc ) { } }
try { indexWriter . updateDocument ( rootPath , doc ) ; } catch ( Exception e ) { if ( LOG . isWarnEnabled ( ) ) { LOG . warn ( Messages . get ( ) . getBundle ( ) . key ( Messages . LOG_IO_INDEX_DOCUMENT_UPDATE_2 , rootPath , m_index . getName ( ) ) , e ) ; } }
public class AppEventsLogger { /** * Build an AppEventsLogger instance to log events through . * @ param context Used to access the attributionId for non - authenticated users . * @ param session Explicitly specified Session to log events against . If null , the activeSession * will be used if it ' s open , otherwise the logging will happen against the default * app ID specified via the app ID specified in the package metadata . * @ return AppEventsLogger instance to invoke log * methods on . */ public static AppEventsLogger newLogger ( Context context , Session session ) { } }
return new AppEventsLogger ( context , null , session ) ;
public class CPDefinitionLinkUtil { /** * Returns all the cp definition links where uuid = & # 63 ; and companyId = & # 63 ; . * @ param uuid the uuid * @ param companyId the company ID * @ return the matching cp definition links */ public static List < CPDefinitionLink > findByUuid_C ( String uuid , long companyId ) { } }
return getPersistence ( ) . findByUuid_C ( uuid , companyId ) ;
public class CodepointHelper { /** * Verifies a sequence of codepoints using the specified profile * @ param aIter * Codepoint iterator * @ param eProfile * profile to use */ public static void verifyNot ( final ICodepointIterator aIter , @ Nonnull final ECodepointProfile eProfile ) { } }
final CodepointIteratorRestricted rci = aIter . restrict ( eProfile . getFilter ( ) , false , true ) ; while ( rci . hasNext ( ) ) rci . next ( ) ;
public class ServerContext { /** * Handles a connection from another server . */ public void connectServer ( Connection connection ) { } }
threadContext . checkThread ( ) ; // Handlers for all request types are registered since requests can be proxied between servers . // Note we do not use method references here because the " state " variable changes over time . // We have to use lambdas to ensure the request handler points to the current state . connection . handler ( RegisterRequest . class , ( Function < RegisterRequest , CompletableFuture < RegisterResponse > > ) request -> state . register ( request ) ) ; connection . handler ( ConnectRequest . class , ( Function < ConnectRequest , CompletableFuture < ConnectResponse > > ) request -> state . connect ( request , connection ) ) ; connection . handler ( KeepAliveRequest . class , ( Function < KeepAliveRequest , CompletableFuture < KeepAliveResponse > > ) request -> state . keepAlive ( request ) ) ; connection . handler ( UnregisterRequest . class , ( Function < UnregisterRequest , CompletableFuture < UnregisterResponse > > ) request -> state . unregister ( request ) ) ; connection . handler ( ResetRequest . class , ( Consumer < ResetRequest > ) request -> state . reset ( request ) ) ; connection . handler ( ConfigureRequest . class , ( Function < ConfigureRequest , CompletableFuture < ConfigureResponse > > ) request -> state . configure ( request ) ) ; connection . handler ( InstallRequest . class , ( Function < InstallRequest , CompletableFuture < InstallResponse > > ) request -> state . install ( request ) ) ; connection . handler ( JoinRequest . class , ( Function < JoinRequest , CompletableFuture < JoinResponse > > ) request -> state . join ( request ) ) ; connection . handler ( ReconfigureRequest . class , ( Function < ReconfigureRequest , CompletableFuture < ReconfigureResponse > > ) request -> state . reconfigure ( request ) ) ; connection . handler ( LeaveRequest . class , ( Function < LeaveRequest , CompletableFuture < LeaveResponse > > ) request -> state . leave ( request ) ) ; connection . handler ( AppendRequest . class , ( Function < AppendRequest , CompletableFuture < AppendResponse > > ) request -> state . append ( request ) ) ; connection . handler ( PollRequest . class , ( Function < PollRequest , CompletableFuture < PollResponse > > ) request -> state . poll ( request ) ) ; connection . handler ( VoteRequest . class , ( Function < VoteRequest , CompletableFuture < VoteResponse > > ) request -> state . vote ( request ) ) ; connection . handler ( CommandRequest . class , ( Function < CommandRequest , CompletableFuture < CommandResponse > > ) request -> state . command ( request ) ) ; connection . handler ( QueryRequest . class , ( Function < QueryRequest , CompletableFuture < QueryResponse > > ) request -> state . query ( request ) ) ; connection . onClose ( stateMachine . executor ( ) . context ( ) . sessions ( ) :: unregisterConnection ) ;
public class LocalisationManager { /** * Method removeXmitQueuePoint * @ param meUuid * @ return */ public void removeXmitQueuePoint ( SIBUuid8 meUuid ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "removeXmitQueuePoint" , meUuid ) ; if ( _xmitQueuePoints != null ) { synchronized ( _xmitQueuePoints ) { _xmitQueuePoints . remove ( meUuid ) ; } } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "removeXmitQueuePoint" ) ;
public class HierarchicalTable { /** * Do the physical Open on this table ( requery the table ) . * @ exception DBException File exception . */ public void open ( ) throws DBException { } }
super . open ( ) ; this . getRecord ( ) . setEditMode ( DBConstants . EDIT_NONE ) ; // Being careful Iterator < BaseTable > iterator = this . getTables ( ) ; while ( iterator . hasNext ( ) ) { BaseTable table = iterator . next ( ) ; if ( ( table != null ) && ( table != this . getNextTable ( ) ) ) { this . syncRecordToBase ( table . getRecord ( ) , this . getRecord ( ) , true ) ; // Note : I am syncing the base to the alt here table . open ( ) ; } }
public class CmsContainerpageDNDController { /** * Initializes the nested container infos . < p > */ private void initNestedContainers ( ) { } }
for ( CmsContainer container : m_controller . getContainers ( ) . values ( ) ) { if ( container . isSubContainer ( ) ) { CmsContainerPageContainer containerWidget = m_controller . m_targetContainers . get ( container . getName ( ) ) ; // check if the sub container is a valid drop targets if ( m_dragInfos . keySet ( ) . contains ( containerWidget ) ) { CmsContainer parentContainer = m_controller . getContainers ( ) . get ( container . getParentContainerName ( ) ) ; // add the container to all it ' s ancestors as a dnd child while ( parentContainer != null ) { if ( m_dragInfos . keySet ( ) . contains ( m_controller . m_targetContainers . get ( parentContainer . getName ( ) ) ) ) { m_controller . m_targetContainers . get ( parentContainer . getName ( ) ) . addDndChild ( containerWidget ) ; } if ( parentContainer . isSubContainer ( ) ) { parentContainer = m_controller . getContainers ( ) . get ( parentContainer . getParentContainerName ( ) ) ; } else { parentContainer = null ; } } } } }
public class ContainerLifecycleEvents { /** * Fires a { @ link ProcessAnnotatedType } or { @ link ProcessSyntheticAnnotatedType } using the default event mechanism . */ private void fireProcessAnnotatedType ( ProcessAnnotatedTypeImpl < ? > event , BeanManagerImpl beanManager ) { } }
final Resolvable resolvable = ProcessAnnotatedTypeEventResolvable . of ( event , discovery ) ; try { beanManager . getGlobalLenientObserverNotifier ( ) . fireEvent ( event , resolvable ) ; } catch ( Exception e ) { throw new DefinitionException ( e ) ; }
public class SingleInputOperator { /** * Sets the input to the union of the given operators . * @ param input The operator ( s ) that form the input . * @ deprecated This method will be removed in future versions . Use the { @ link Union } operator instead . */ @ Deprecated public void setInput ( Operator < IN > ... input ) { } }
this . input = Operator . createUnionCascade ( null , input ) ;
public class ExecutionStage { /** * Checks which instance types and how many instances of these types are required to execute this stage * of the job graph . The required instance types and the number of instances are collected in the given map . Note * that this method does not clear the map before collecting the instances . * @ param instanceRequestMap * the map containing the instances types and the required number of instances of the respective type * @ param executionState * the execution state the considered vertices must be in */ public void collectRequiredInstanceTypes ( final InstanceRequestMap instanceRequestMap , final ExecutionState executionState ) { } }
final Set < AbstractInstance > collectedInstances = new HashSet < AbstractInstance > ( ) ; final ExecutionGroupVertexIterator groupIt = new ExecutionGroupVertexIterator ( this . getExecutionGraph ( ) , true , this . stageNum ) ; while ( groupIt . hasNext ( ) ) { final ExecutionGroupVertex groupVertex = groupIt . next ( ) ; final Iterator < ExecutionVertex > vertexIt = groupVertex . iterator ( ) ; while ( vertexIt . hasNext ( ) ) { // Get the instance type from the execution vertex if it final ExecutionVertex vertex = vertexIt . next ( ) ; if ( vertex . getExecutionState ( ) == executionState ) { final AbstractInstance instance = vertex . getAllocatedResource ( ) . getInstance ( ) ; if ( collectedInstances . contains ( instance ) ) { continue ; } else { collectedInstances . add ( instance ) ; } if ( instance instanceof DummyInstance ) { final InstanceType instanceType = instance . getType ( ) ; int num = instanceRequestMap . getMaximumNumberOfInstances ( instanceType ) ; ++ num ; instanceRequestMap . setMaximumNumberOfInstances ( instanceType , num ) ; if ( groupVertex . isInputVertex ( ) ) { num = instanceRequestMap . getMinimumNumberOfInstances ( instanceType ) ; ++ num ; instanceRequestMap . setMinimumNumberOfInstances ( instanceType , num ) ; } } else { LOG . debug ( "Execution Vertex " + vertex . getName ( ) + " (" + vertex . getID ( ) + ") is already assigned to non-dummy instance, skipping..." ) ; } } } } final Iterator < Map . Entry < InstanceType , Integer > > it = instanceRequestMap . getMaximumIterator ( ) ; while ( it . hasNext ( ) ) { final Map . Entry < InstanceType , Integer > entry = it . next ( ) ; if ( instanceRequestMap . getMinimumNumberOfInstances ( entry . getKey ( ) ) == 0 ) { instanceRequestMap . setMinimumNumberOfInstances ( entry . getKey ( ) , entry . getValue ( ) ) ; } }
public class TelemetrySender { /** * Align the retry times with sdk */ private ResponseEntity < String > executeRequest ( final TelemetryEventData eventData ) { } }
final HttpHeaders headers = new HttpHeaders ( ) ; headers . add ( HttpHeaders . CONTENT_TYPE , APPLICATION_JSON . toString ( ) ) ; try { final RestTemplate restTemplate = new RestTemplate ( ) ; final HttpEntity < String > body = new HttpEntity < > ( MAPPER . writeValueAsString ( eventData ) , headers ) ; return restTemplate . exchange ( TELEMETRY_TARGET_URL , HttpMethod . POST , body , String . class ) ; } catch ( JsonProcessingException | HttpClientErrorException ignore ) { log . warn ( "Failed to exchange telemetry request, {}." , ignore . getMessage ( ) ) ; } return null ;
public class AjaxAddableTabbedPanel { /** * On new tab . * @ param target * the target * @ param tab * the tab */ public void onNewTab ( final AjaxRequestTarget target , final T tab ) { } }
getTabs ( ) . add ( tab ) ; setSelectedTab ( getTabs ( ) . size ( ) - 1 ) ; target . add ( this ) ;
public class ElemSort { /** * Set the " select " attribute . * xsl : sort has a select attribute whose value is an expression . * For each node to be processed , the expression is evaluated * with that node as the current node and with the complete * list of nodes being processed in unsorted order as the current * node list . The resulting object is converted to a string as if * by a call to the string function ; this string is used as the * sort key for that node . The default value of the select attribute * is . , which will cause the string - value of the current node to * be used as the sort key . * @ param v Value to set for the " select " attribute */ public void setSelect ( XPath v ) { } }
if ( v . getPatternString ( ) . indexOf ( "{" ) < 0 ) m_selectExpression = v ; else error ( XSLTErrorResources . ER_NO_CURLYBRACE , null ) ;
public class AWSOrganizationsClient { /** * Disables an organizational control policy type in a root . A policy of a certain type can be attached to entities * in a root only if that type is enabled in the root . After you perform this operation , you no longer can attach * policies of the specified type to that root or to any organizational unit ( OU ) or account in that root . You can * undo this by using the < a > EnablePolicyType < / a > operation . * This operation can be called only from the organization ' s master account . * < note > * If you disable a policy type for a root , it still shows as enabled for the organization if all features are * enabled in that organization . Use < a > ListRoots < / a > to see the status of policy types for a specified root . Use * < a > DescribeOrganization < / a > to see the status of policy types in the organization . * < / note > * @ param disablePolicyTypeRequest * @ return Result of the DisablePolicyType operation returned by the service . * @ throws AccessDeniedException * You don ' t have permissions to perform the requested operation . The user or role that is making the * request must have at least one IAM permissions policy attached that grants the required permissions . For * more information , see < a href = " https : / / docs . aws . amazon . com / IAM / latest / UserGuide / access . html " > Access * Management < / a > in the < i > IAM User Guide < / i > . * @ throws AWSOrganizationsNotInUseException * Your account isn ' t a member of an organization . To make this request , you must use the credentials of an * account that belongs to an organization . * @ throws ConcurrentModificationException * The target of the operation is currently being modified by a different request . Try again later . * @ throws ConstraintViolationException * Performing this operation violates a minimum or maximum value limit . For example , attempting to remove * the last service control policy ( SCP ) from an OU or root , inviting or creating too many accounts to the * organization , or attaching too many policies to an account , OU , or root . This exception includes a reason * that contains additional information about the violated limit . < / p > * Some of the reasons in the following list might not be applicable to this specific API or operation : * < ul > * < li > * ACCOUNT _ NUMBER _ LIMIT _ EXCEEDED : You attempted to exceed the limit on the number of accounts in an * organization . If you need more accounts , contact < a * href = " https : / / console . aws . amazon . com / support / home # / " > AWS Support < / a > to request an increase in your * limit . * Or the number of invitations that you tried to send would cause you to exceed the limit of accounts in * your organization . Send fewer invitations or contact AWS Support to request an increase in the number of * accounts . * < note > * Deleted and closed accounts still count toward your limit . * < / note > < important > * If you get receive this exception when running a command immediately after creating the organization , * wait one hour and try again . If after an hour it continues to fail with this error , contact < a * href = " https : / / console . aws . amazon . com / support / home # / " > AWS Support < / a > . * < / important > < / li > * < li > * HANDSHAKE _ RATE _ LIMIT _ EXCEEDED : You attempted to exceed the number of handshakes that you can send in one * day . * < / li > * < li > * OU _ NUMBER _ LIMIT _ EXCEEDED : You attempted to exceed the number of OUs that you can have in an organization . * < / li > * < li > * OU _ DEPTH _ LIMIT _ EXCEEDED : You attempted to create an OU tree that is too many levels deep . * < / li > * < li > * ORGANIZATION _ NOT _ IN _ ALL _ FEATURES _ MODE : You attempted to perform an operation that requires the * organization to be configured to support all features . An organization that supports only consolidated * billing features can ' t perform this operation . * < / li > * < li > * POLICY _ NUMBER _ LIMIT _ EXCEEDED . You attempted to exceed the number of policies that you can have in an * organization . * < / li > * < li > * MAX _ POLICY _ TYPE _ ATTACHMENT _ LIMIT _ EXCEEDED : You attempted to exceed the number of policies of a certain * type that can be attached to an entity at one time . * < / li > * < li > * MIN _ POLICY _ TYPE _ ATTACHMENT _ LIMIT _ EXCEEDED : You attempted to detach a policy from an entity that would * cause the entity to have fewer than the minimum number of policies of a certain type required . * < / li > * < li > * ACCOUNT _ CANNOT _ LEAVE _ WITHOUT _ EULA : You attempted to remove an account from the organization that doesn ' t * yet have enough information to exist as a standalone account . This account requires you to first agree to * the AWS Customer Agreement . Follow the steps at < a href = * " http : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ accounts _ remove . html # leave - without - all - info " * > To leave an organization when all required account information has not yet been provided < / a > in the * < i > AWS Organizations User Guide < / i > . * < / li > * < li > * ACCOUNT _ CANNOT _ LEAVE _ WITHOUT _ PHONE _ VERIFICATION : You attempted to remove an account from the organization * that doesn ' t yet have enough information to exist as a standalone account . This account requires you to * first complete phone verification . Follow the steps at < a href = * " http : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ accounts _ remove . html # leave - without - all - info " * > To leave an organization when all required account information has not yet been provided < / a > in the * < i > AWS Organizations User Guide < / i > . * < / li > * < li > * MASTER _ ACCOUNT _ PAYMENT _ INSTRUMENT _ REQUIRED : To create an organization with this master account , you first * must associate a valid payment instrument , such as a credit card , with the account . Follow the steps at * < a href = * " http : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ accounts _ remove . html # leave - without - all - info " * > To leave an organization when all required account information has not yet been provided < / a > in the * < i > AWS Organizations User Guide < / i > . * < / li > * < li > * MEMBER _ ACCOUNT _ PAYMENT _ INSTRUMENT _ REQUIRED : To complete this operation with this member account , you * first must associate a valid payment instrument , such as a credit card , with the account . Follow the * steps at < a href = * " http : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ accounts _ remove . html # leave - without - all - info " * > To leave an organization when all required account information has not yet been provided < / a > in the * < i > AWS Organizations User Guide < / i > . * < / li > * < li > * ACCOUNT _ CREATION _ RATE _ LIMIT _ EXCEEDED : You attempted to exceed the number of accounts that you can create * in one day . * < / li > * < li > * MASTER _ ACCOUNT _ ADDRESS _ DOES _ NOT _ MATCH _ MARKETPLACE : To create an account in this organization , you first * must migrate the organization ' s master account to the marketplace that corresponds to the master * account ' s address . For example , accounts with India addresses must be associated with the AISPL * marketplace . All accounts in an organization must be associated with the same marketplace . * < / li > * < li > * MASTER _ ACCOUNT _ MISSING _ CONTACT _ INFO : To complete this operation , you must first provide contact a valid * address and phone number for the master account . Then try the operation again . * < / li > * < li > * MASTER _ ACCOUNT _ NOT _ GOVCLOUD _ ENABLED : To complete this operation , the master account must have an * associated account in the AWS GovCloud ( US - West ) Region . For more information , see < a * href = " http : / / docs . aws . amazon . com / govcloud - us / latest / UserGuide / govcloud - organizations . html " > AWS * Organizations < / a > in the < i > AWS GovCloud User Guide . < / i > * < / li > * @ throws InvalidInputException * The requested operation failed because you provided invalid values for one or more of the request * parameters . This exception includes a reason that contains additional information about the violated * limit : < / p > < note > * Some of the reasons in the following list might not be applicable to this specific API or operation : * < / note > * < ul > * < li > * IMMUTABLE _ POLICY : You specified a policy that is managed by AWS and can ' t be modified . * < / li > * < li > * INPUT _ REQUIRED : You must include a value for all required parameters . * < / li > * < li > * INVALID _ ENUM : You specified a value that isn ' t valid for that parameter . * < / li > * < li > * INVALID _ FULL _ NAME _ TARGET : You specified a full name that contains invalid characters . * < / li > * < li > * INVALID _ LIST _ MEMBER : You provided a list to a parameter that contains at least one invalid value . * < / li > * < li > * INVALID _ PARTY _ TYPE _ TARGET : You specified the wrong type of entity ( account , organization , or email ) as a * party . * < / li > * < li > * INVALID _ PAGINATION _ TOKEN : Get the value for the < code > NextToken < / code > parameter from the response to a * previous call of the operation . * < / li > * < li > * INVALID _ PATTERN : You provided a value that doesn ' t match the required pattern . * < / li > * < li > * INVALID _ PATTERN _ TARGET _ ID : You specified a policy target ID that doesn ' t match the required pattern . * < / li > * < li > * INVALID _ ROLE _ NAME : You provided a role name that isn ' t valid . A role name can ' t begin with the reserved * prefix < code > AWSServiceRoleFor < / code > . * < / li > * < li > * INVALID _ SYNTAX _ ORGANIZATION _ ARN : You specified an invalid Amazon Resource Name ( ARN ) for the * organization . * < / li > * < li > * INVALID _ SYNTAX _ POLICY _ ID : You specified an invalid policy ID . * < / li > * < li > * MAX _ FILTER _ LIMIT _ EXCEEDED : You can specify only one filter parameter for the operation . * < / li > * < li > * MAX _ LENGTH _ EXCEEDED : You provided a string parameter that is longer than allowed . * < / li > * < li > * MAX _ VALUE _ EXCEEDED : You provided a numeric parameter that has a larger value than allowed . * < / li > * < li > * MIN _ LENGTH _ EXCEEDED : You provided a string parameter that is shorter than allowed . * < / li > * < li > * MIN _ VALUE _ EXCEEDED : You provided a numeric parameter that has a smaller value than allowed . * < / li > * < li > * MOVING _ ACCOUNT _ BETWEEN _ DIFFERENT _ ROOTS : You can move an account only between entities in the same root . * < / li > * @ throws PolicyTypeNotEnabledException * The specified policy type isn ' t currently enabled in this root . You can ' t attach policies of the * specified type to entities in a root until you enable that type in the root . For more information , see < a * href * = " https : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ manage _ org _ support - all - features . html " * > Enabling All Features in Your Organization < / a > in the < i > AWS Organizations User Guide < / i > . * @ throws RootNotFoundException * We can ' t find a root with the < code > RootId < / code > that you specified . * @ throws ServiceException * AWS Organizations can ' t complete your request because of an internal service error . Try again later . * @ throws TooManyRequestsException * You ' ve sent too many requests in too short a period of time . The limit helps protect against * denial - of - service attacks . Try again later . < / p > * For information on limits that affect Organizations , see < a * href = " https : / / docs . aws . amazon . com / organizations / latest / userguide / orgs _ reference _ limits . html " > Limits of * AWS Organizations < / a > in the < i > AWS Organizations User Guide < / i > . * @ sample AWSOrganizations . DisablePolicyType * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / organizations - 2016-11-28 / DisablePolicyType " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DisablePolicyTypeResult disablePolicyType ( DisablePolicyTypeRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDisablePolicyType ( request ) ;
public class ConfigurationCheck { /** * Check that foreign keys have the same mapped type as the target . */ private void checkForeignKeyMapping ( List < String > errors , Config config ) { } }
for ( Entity entity : config . getProject ( ) . getEntities ( ) . getList ( ) ) { for ( Relation relation : entity . getRelations ( ) . getList ( ) ) { if ( relation . isInverse ( ) || relation . isIntermediate ( ) ) { continue ; } for ( AttributePair attributePair : relation . getAttributePairs ( ) ) { if ( attributePair . getFromAttribute ( ) . getMappedType ( ) != attributePair . getToAttribute ( ) . getMappedType ( ) ) { String errorMsg = "Inconsistent types: Column " + attributePair . getFromAttribute ( ) . getFullColumnName ( ) + "[" + attributePair . getFromAttribute ( ) . getJdbcType ( ) + "] references column " + attributePair . getToAttribute ( ) . getFullColumnName ( ) + "[" + attributePair . getToAttribute ( ) . getJdbcType ( ) + "]. " + "You should really fix your SQL schema." ; if ( attributePair . getFromAttribute ( ) . isInCpk ( ) ) { // we may get compile failure as the property is mapped ( inside the cpk ) log . warn ( errorMsg + ". To avoid this error you can force the mapped type using configuration." ) ; } else { // we should not get compile failure as the property is not mapped . log . warn ( errorMsg ) ; } } } } }
public class Sha1Hasher { /** * Create a new SHA1 hash URI for the specified { @ link ByteBuffer } . * @ param buf The { @ link ByteBuffer } to create the SHA - 1 for . * @ return The SHA1 hash as a URI . * @ throws IOException If there is an error creating the hash or if the * specified algorithm cannot be found . */ public static URI createSha1Urn ( final ByteBuffer buf ) throws IOException { } }
final MessageDigest md = new Sha1 ( ) ; md . update ( buf ) ; final byte [ ] sha1 = md . digest ( ) ; try { // preferred casing : lowercase " urn : sha1 : " , uppercase encoded value // note that all URNs are case - insensitive for the " urn : < type > : " part , // but some MAY be case - sensitive thereafter ( SHA1 / Base32 is case // insensitive ) return new URI ( "urn:sha1:" + Base32 . encode ( sha1 ) ) ; } catch ( final URISyntaxException e ) { // This should never happen . LOG . error ( "Could not encode SHA-1" , e ) ; throw new IOException ( "bad uri: " + Base32 . encode ( sha1 ) ) ; }
public class ObjectInputStream { /** * Reads a new class from the receiver . It is assumed the class has not been * read yet ( not a cyclic reference ) . Return the class read . * @ param unshared * read the object unshared * @ return The { @ code java . lang . Class } read from the stream . * @ throws IOException * If an IO exception happened when reading the class . * @ throws ClassNotFoundException * If a class for one of the objects could not be found */ private Class < ? > readNewClass ( boolean unshared ) throws ClassNotFoundException , IOException { } }
ObjectStreamClass classDesc = readClassDesc ( ) ; if ( classDesc == null ) { throw missingClassDescriptor ( ) ; } Class < ? > localClass = classDesc . forClass ( ) ; if ( localClass != null ) { registerObjectRead ( localClass , nextHandle ( ) , unshared ) ; } return localClass ;
public class LocalDate { /** * Returns a copy of this date minus the specified number of weeks . * This LocalDate instance is immutable and unaffected by this method call . * The following three lines are identical in effect : * < pre > * LocalDate subtracted = dt . minusWeeks ( 6 ) ; * LocalDate subtracted = dt . minus ( Period . weeks ( 6 ) ) ; * LocalDate subtracted = dt . withFieldAdded ( DurationFieldType . weeks ( ) , - 6 ) ; * < / pre > * @ param weeks the amount of weeks to subtract , may be negative * @ return the new LocalDate minus the increased weeks */ public LocalDate minusWeeks ( int weeks ) { } }
if ( weeks == 0 ) { return this ; } long instant = getChronology ( ) . weeks ( ) . subtract ( getLocalMillis ( ) , weeks ) ; return withLocalMillis ( instant ) ;
public class BouncyCastleUtil { /** * Retrieves the actual value of the X . 509 extension . * @ param certExtValue the DER - encoded OCTET string value of the extension . * @ return the decoded / actual value of the extension ( the octets ) . */ public static byte [ ] getExtensionValue ( byte [ ] certExtValue ) throws IOException { } }
ByteArrayInputStream inStream = new ByteArrayInputStream ( certExtValue ) ; ASN1InputStream derInputStream = new ASN1InputStream ( inStream ) ; ASN1Primitive object = derInputStream . readObject ( ) ; if ( object instanceof ASN1OctetString ) { return ( ( ASN1OctetString ) object ) . getOctets ( ) ; } else { throw new IOException ( i18n . getMessage ( "octectExp" ) ) ; }
public class Tokenizer { /** * Consumes the current token , expecting it to be as < tt > SYMBOL < / tt > with the given content * @ param symbol the expected trigger of the current token */ public void consumeExpectedSymbol ( String symbol ) { } }
if ( current ( ) . matches ( Token . TokenType . SYMBOL , symbol ) ) { consume ( ) ; } else { addError ( current ( ) , "Unexpected token: '%s'. Expected: '%s'" , current ( ) . getSource ( ) , symbol ) ; }
public class SAXParser { /** * Parse the content described by the giving Uniform Resource * Identifier ( URI ) as XML using the specified * { @ link org . xml . sax . helpers . DefaultHandler } . * @ param uri The location of the content to be parsed . * @ param dh The SAX DefaultHandler to use . * @ throws IllegalArgumentException If the uri is null . * @ throws IOException If any IO errors occur . * @ throws SAXException If any SAX errors occur during processing . * @ see org . xml . sax . DocumentHandler */ public void parse ( String uri , DefaultHandler dh ) throws SAXException , IOException { } }
if ( uri == null ) { throw new IllegalArgumentException ( "uri cannot be null" ) ; } InputSource input = new InputSource ( uri ) ; this . parse ( input , dh ) ;
public class AbstractSMTPClientSession { /** * Implementation which just send each { @ link SMTPRequest } which is hold in the { @ link SMTPPipeliningRequest # getRequests ( ) } method via * { @ link # send ( SMTPRequest ) } method . */ @ Override public SMTPClientFuture < FutureResult < Collection < SMTPResponse > > > send ( SMTPPipeliningRequest request ) { } }
SMTPClientFutureImpl < FutureResult < Collection < SMTPResponse > > > future = new SMTPClientFutureImpl < FutureResult < Collection < SMTPResponse > > > ( false ) ; future . setSMTPClientSession ( this ) ; AggregationListener listener = new AggregationListener ( future , request . getRequests ( ) . size ( ) ) ; for ( SMTPRequest req : request . getRequests ( ) ) { send ( req ) . addListener ( listener ) ; } return future ;
public class SimpleLog { /** * Do the actual org . apache . commons . logging . * This method assembles the message and then calls < code > write ( ) < / code > * to cause it to be written . * @ param type One of the LOG _ LEVEL _ XXX constants defining the log level * @ param message The message itself ( typically a String ) * @ param t The exception whose stack trace should be logged */ protected void log ( int type , Object message , Throwable t ) { } }
// Use a string buffer for better performance final StringBuffer buf = new StringBuffer ( ) ; // Append date - time if so configured if ( showDateTime ) { final Date now = new Date ( ) ; String dateText ; synchronized ( dateFormatter ) { dateText = dateFormatter . format ( now ) ; } buf . append ( dateText ) ; buf . append ( " " ) ; } // Append a readable representation of the log level switch ( type ) { case LOG_LEVEL_TRACE : buf . append ( "[TRACE] " ) ; break ; case LOG_LEVEL_DEBUG : buf . append ( "[DEBUG] " ) ; break ; case LOG_LEVEL_INFO : buf . append ( "[INFO] " ) ; break ; case LOG_LEVEL_WARN : buf . append ( "[WARN] " ) ; break ; case LOG_LEVEL_ERROR : buf . append ( "[ERROR] " ) ; break ; case LOG_LEVEL_FATAL : buf . append ( "[FATAL] " ) ; break ; } // Append the name of the log instance if so configured if ( showShortName ) { if ( shortLogName == null ) { // Cut all but the last component of the name for both styles final String slName = logName . substring ( logName . lastIndexOf ( "." ) + 1 ) ; shortLogName = slName . substring ( slName . lastIndexOf ( "/" ) + 1 ) ; } buf . append ( String . valueOf ( shortLogName ) ) . append ( " - " ) ; } else if ( showLogName ) { buf . append ( String . valueOf ( logName ) ) . append ( " - " ) ; } // Append the message buf . append ( String . valueOf ( message ) ) ; // Append stack trace if not null if ( t != null ) { buf . append ( " <" ) ; buf . append ( t . toString ( ) ) ; buf . append ( ">" ) ; final java . io . StringWriter sw = new java . io . StringWriter ( 1024 ) ; final java . io . PrintWriter pw = new java . io . PrintWriter ( sw ) ; t . printStackTrace ( pw ) ; pw . close ( ) ; buf . append ( sw . toString ( ) ) ; } // Print to the appropriate destination write ( buf ) ;
public class ProfileUtils { /** * Deletes prefs folders for Does nothing if prefs folders are default main user profile */ public boolean deleteProfile ( ) { } }
String [ ] profileDirs = { smallPrefsFolder , largePrefsFolder , cachePrefsFolder } ; // Assuming if any of those are main profile , skip the whole delete for ( String profileDir : profileDirs ) { if ( isMainProfile ( profileDir ) ) { logger . finer ( "Skipping profile deletion since '" + profileDir + "' is the main profile." ) ; return false ; } } for ( String profileDir : profileDirs ) { File currentDirHandle = new File ( profileDir ) ; if ( ! currentDirHandle . exists ( ) ) { logger . finer ( "Skipping profile deletion for '" + profileDir + "' since it doesn't exist." ) ; continue ; } boolean deleted = deleteFolder ( profileDir ) ; if ( ! deleted ) { final int retryIntervalMs = 500 ; final int retryMaxCount = 10 ; int retryCount = 0 ; boolean ok = false ; logger . warning ( "Profile could not be deleted, retrying..." ) ; do { try { Thread . sleep ( retryIntervalMs ) ; } catch ( InterruptedException e ) { // fall through } ok = deleteFolder ( profileDir ) ; retryCount ++ ; if ( retryCount > retryMaxCount ) { break ; } } while ( ! ok ) ; if ( ! ok ) { logger . severe ( "Could not delete profile in '" + profileDir + "'. Skipping further deletion." ) ; return false ; } else { logger . warning ( "Deleted profile, retry count = " + retryCount ) ; } } else { logger . finer ( "Deleted profile in '" + profileDir + "'" ) ; } } return true ;
public class JavaParsingAtomicQueueGenerator { /** * Generates something like < code > return field < / code > * @ param fieldName * @ return */ protected BlockStmt returnField ( String fieldName ) { } }
BlockStmt body = new BlockStmt ( ) ; body . addStatement ( new ReturnStmt ( fieldName ) ) ; return body ;
public class AttributeDeduplicatorDaemon { /** * Starts a daemon which performs deduplication on incoming attributes in real - time . * The thread listens to the RocksDbQueue queue for incoming attributes and applies * the AttributeDeduplicator # deduplicate ( SessionStore , KeyspaceIndexPair ) algorithm . */ public CompletableFuture < Void > startDeduplicationDaemon ( ) { } }
stopDaemon = false ; CompletableFuture < Void > daemon = CompletableFuture . supplyAsync ( ( ) -> { LOG . info ( "startDeduplicationDaemon() - attribute de-duplicator daemon started." ) ; while ( ! stopDaemon ) { try { List < Attribute > attributes = queue . read ( QUEUE_GET_BATCH_MAX ) ; LOG . trace ( "starting a new batch to process these new attributes: {}" , attributes ) ; // group the attributes into a set of unique ( keyspace - > value ) pair Set < KeyspaceIndexPair > uniqueKeyValuePairs = attributes . stream ( ) . map ( attr -> KeyspaceIndexPair . create ( attr . keyspace ( ) , attr . index ( ) ) ) . collect ( Collectors . toSet ( ) ) ; // perform deduplicate for each ( keyspace - > value ) for ( KeyspaceIndexPair keyspaceIndexPair : uniqueKeyValuePairs ) { deduplicate ( sessionFactory , keyspaceIndexPair ) ; } LOG . trace ( "new attributes processed." ) ; queue . ack ( attributes ) ; } catch ( InterruptedException | RuntimeException e ) { LOG . error ( "An exception has occurred in the attribute de-duplicator daemon. " , e ) ; } } LOG . info ( "startDeduplicationDaemon() - attribute de-duplicator daemon stopped" ) ; return null ; } , executorServiceForDaemon ) ; daemon . exceptionally ( e -> { LOG . error ( "An unhandled exception has occurred in the attribute de-duplicator daemon. " , e ) ; return null ; } ) ; return daemon ;
public class ProcessControlHelper { /** * Check the server status * @ return */ public ReturnCode startStatus ( ) { } }
System . out . println ( MessageFormat . format ( BootstrapConstants . messages . getString ( "info.serverStarting" ) , serverName ) ) ; // Use initialized bootstrap configuration to find the server lock file . ServerLock serverLock = ServerLock . createTestLock ( bootProps ) ; ReturnCode rc = ReturnCode . OK ; String pid = getPID ( ) ; if ( serverLock . lockFileExists ( ) ) { ProcessStatus ps = pid == null ? new FileShareLockProcessStatusImpl ( consoleLogFile ) : new PSProcessStatusImpl ( pid ) ; rc = serverLock . waitForStart ( ps ) ; if ( rc == ReturnCode . OK ) { ServerCommandClient scc = new ServerCommandClient ( bootProps ) ; rc = scc . startStatus ( serverLock ) ; } } else { // we have no server lock file , despite the fact that we ' re supposed to be looking // for a server in the process of starting . . . rc = ReturnCode . ERROR_SERVER_START ; } if ( rc == ReturnCode . OK ) { if ( pid == null ) { System . out . println ( MessageFormat . format ( BootstrapConstants . messages . getString ( "info.serverStarted" ) , serverName ) ) ; } else { System . out . println ( MessageFormat . format ( BootstrapConstants . messages . getString ( "info.serverStartedWithPID" ) , serverName , pid ) ) ; } } else if ( rc == ReturnCode . SERVER_COMMAND_PORT_DISABLED_STATUS ) { if ( pid == null ) { System . out . println ( MessageFormat . format ( BootstrapConstants . messages . getString ( "warning.serverStartedCommandPortDisabled" ) , serverName ) ) ; } else { System . out . println ( MessageFormat . format ( BootstrapConstants . messages . getString ( "warning.serverStartedWithPIDCommandPortDisabled" ) , serverName , pid ) ) ; } rc = ReturnCode . SERVER_UNKNOWN_STATUS ; } else { System . out . println ( MessageFormat . format ( BootstrapConstants . messages . getString ( "info.serverStartException" ) , serverName ) ) ; } return rc ;
public class DBQuery { /** * An element in the given array field matches the given query * @ param field the array field * @ param query The query to attempt to match against the elements of the array field * @ return the query */ public static Query elemMatch ( String field , Query query ) { } }
return new Query ( ) . elemMatch ( field , query ) ;
public class sslservice { /** * Use this API to fetch all the sslservice resources that are configured on netscaler . * This uses sslservice _ args which is a way to provide additional arguments while fetching the resources . */ public static sslservice [ ] get ( nitro_service service , sslservice_args args ) throws Exception { } }
sslservice obj = new sslservice ( ) ; options option = new options ( ) ; option . set_args ( nitro_util . object_to_string_withoutquotes ( args ) ) ; sslservice [ ] response = ( sslservice [ ] ) obj . get_resources ( service , option ) ; return response ;
public class GetDeclaredMethodLookup { /** * @ return true if m2 has a more specific return type than m1 */ private boolean isMoreSpecificReturnTypeThan ( Invoker m1 , Invoker m2 ) { } }
// This uses ' Class . isAssigableFrom ' . This is ok , assuming that inheritance hierarchy is not something that we are allowed // to change on reloads . Class < ? > cls1 = m1 . getReturnType ( ) ; Class < ? > cls2 = m2 . getReturnType ( ) ; return cls2 . isAssignableFrom ( cls1 ) ;
public class GoogleCloudStorageImpl { /** * Inverse function of { @ link # encodeMetadata ( Map ) } . */ @ VisibleForTesting static Map < String , byte [ ] > decodeMetadata ( Map < String , String > metadata ) { } }
return Maps . transformValues ( metadata , DECODE_METADATA_VALUES ) ;
public class Logger { /** * May return null */ private String getEffectiveResourceBundleName ( ) { } }
Logger target = this ; while ( target != null ) { String rbn = target . getResourceBundleName ( ) ; if ( rbn != null ) { return rbn ; } target = target . getParent ( ) ; } return null ;
public class GreatCircle { /** * Convert from " Lat , Long Lat , Long " String format * " Lat , Long , Lat , Long " Format * or all four entries " Lat Long Lat Long " * ( Convenience function ) * Since Distance is positive , a " - 1 " indicates an error in String formatting */ public static double calc ( String ... coords ) { } }
try { String [ ] array ; switch ( coords . length ) { case 1 : array = Split . split ( ',' , coords [ 0 ] ) ; if ( array . length != 4 ) return - 1 ; return calc ( Double . parseDouble ( array [ 0 ] ) , Double . parseDouble ( array [ 1 ] ) , Double . parseDouble ( array [ 2 ] ) , Double . parseDouble ( array [ 3 ] ) ) ; case 2 : array = Split . split ( ',' , coords [ 0 ] ) ; String [ ] array2 = Split . split ( ',' , coords [ 1 ] ) ; if ( array . length != 2 || array2 . length != 2 ) return - 1 ; return calc ( Double . parseDouble ( array [ 0 ] ) , Double . parseDouble ( array [ 1 ] ) , Double . parseDouble ( array2 [ 0 ] ) , Double . parseDouble ( array2 [ 1 ] ) ) ; case 4 : return calc ( Double . parseDouble ( coords [ 0 ] ) , Double . parseDouble ( coords [ 1 ] ) , Double . parseDouble ( coords [ 2 ] ) , Double . parseDouble ( coords [ 3 ] ) ) ; default : return - 1 ; } } catch ( NumberFormatException e ) { return - 1 ; }
public class RU { /** * / * NUMBER FORMAT FUNCTIONS */ public static String formatNumber ( Number value , String mask , double round ) { } }
double val = 0 ; if ( value != null ) { val = value . doubleValue ( ) ; } return formatNumber ( val , mask , round ) ;
public class TextStructureLayerStored { @ Override public TextSpan addSpan ( Token spanStart , Token spanEnd , String type ) { } }
return addSpan ( spanStart , spanEnd , type , null , null , null ) ;
public class FactoryImageBorder { /** * Creates an instance of the requested algorithms for handling borders pixels on { @ link ImageGray } . If type * { @ link BorderType # ZERO } is passed in then the value will be set to 0 . Alternatively you could * use { @ link # singleValue ( Class , double ) } instead . * @ param imageType Type of image being processed . * @ param borderType Which border algorithm should it use . * @ return The requested { @ link ImageBorder } . */ public static < T extends ImageGray < T > > ImageBorder < T > single ( Class < T > imageType , BorderType borderType ) { } }
Class < ? > borderClass ; switch ( borderType ) { case SKIP : throw new IllegalArgumentException ( "Skip border can't be implemented here and has to be done " + "externally. Call this might be a bug. Instead pass in EXTENDED and manually skip over the " + "pixel in a loop some place." ) ; // borderClass = BorderIndex1D _ Exception . class ; // break ; case NORMALIZED : throw new IllegalArgumentException ( "Normalized can't be supported by this border interface" ) ; case REFLECT : borderClass = BorderIndex1D_Reflect . class ; break ; case EXTENDED : borderClass = BorderIndex1D_Extend . class ; break ; case WRAP : borderClass = BorderIndex1D_Wrap . class ; break ; case ZERO : return FactoryImageBorder . singleValue ( imageType , 0 ) ; default : throw new IllegalArgumentException ( "Border type not supported: " + borderType ) ; } if ( imageType == GrayF32 . class ) return ( ImageBorder < T > ) new ImageBorder1D_F32 ( borderClass ) ; if ( imageType == GrayF64 . class ) return ( ImageBorder < T > ) new ImageBorder1D_F64 ( borderClass ) ; else if ( GrayI . class . isAssignableFrom ( imageType ) ) return ( ImageBorder < T > ) new ImageBorder1D_S32 ( ( Class ) borderClass ) ; else if ( imageType == GrayS64 . class ) return ( ImageBorder < T > ) new ImageBorder1D_S64 ( borderClass ) ; else throw new IllegalArgumentException ( "Unknown image type: " + imageType . getSimpleName ( ) ) ;
public class DiscreteFactor { /** * Get the partition function = denominator = total sum probability of all * assignments . */ private double getPartitionFunction ( ) { } }
if ( partitionFunction != - 1.0 ) { return partitionFunction ; } partitionFunction = 0.0 ; Iterator < Outcome > outcomeIterator = outcomeIterator ( ) ; while ( outcomeIterator . hasNext ( ) ) { partitionFunction += outcomeIterator . next ( ) . getProbability ( ) ; } return partitionFunction ;
public class BookmarksApi { /** * List corporation bookmarks A list of your corporation & # 39 ; s bookmarks - - - * This route is cached for up to 3600 seconds SSO Scope : * esi - bookmarks . read _ corporation _ bookmarks . v1 * @ param corporationId * An EVE corporation ID ( required ) * @ param datasource * The server name you would like data from ( optional , default to * tranquility ) * @ param ifNoneMatch * ETag from a previous request . A 304 will be returned if this * matches the current ETag ( optional ) * @ param page * Which page of results to return ( optional , default to 1) * @ param token * Access token to use if unable to set a header ( optional ) * @ return List & lt ; CorporationBookmarksResponse & gt ; * @ throws ApiException * If fail to call the API , e . g . server error or cannot * deserialize the response body */ public List < CorporationBookmarksResponse > getCorporationsCorporationIdBookmarks ( Integer corporationId , String datasource , String ifNoneMatch , Integer page , String token ) throws ApiException { } }
ApiResponse < List < CorporationBookmarksResponse > > resp = getCorporationsCorporationIdBookmarksWithHttpInfo ( corporationId , datasource , ifNoneMatch , page , token ) ; return resp . getData ( ) ;
public class JobDescFactory { /** * Returns the cluster that a give job was run on by mapping the jobtracker hostname to an * identifier . * @ param jobConf * @ return */ public static String getCluster ( Configuration jobConf ) { } }
String jobtracker = jobConf . get ( RESOURCE_MANAGER_KEY ) ; if ( jobtracker == null ) { jobtracker = jobConf . get ( JOBTRACKER_KEY ) ; } String cluster = null ; if ( jobtracker != null ) { // strip any port number int portIdx = jobtracker . indexOf ( ':' ) ; if ( portIdx > - 1 ) { jobtracker = jobtracker . substring ( 0 , portIdx ) ; } // An ExceptionInInitializerError may be thrown to indicate that an exception occurred during // evaluation of Cluster class ' static initialization cluster = Cluster . getIdentifier ( jobtracker ) ; } return cluster ;
public class JBasePanel { /** * Process this action . * Override this for functionality . * @ param strAction The action command or message . * @ param iOptions options * @ return true if handled . */ public boolean doAction ( String strAction , int iOptions ) { } }
if ( Constants . CLOSE . equalsIgnoreCase ( strAction ) ) { Frame frame = ScreenUtil . getFrame ( this ) ; if ( frame != null ) ( ( JBaseFrame ) frame ) . free ( ) ; return true ; } else if ( ThinMenuConstants . PRINT . equalsIgnoreCase ( strAction ) ) { return ScreenPrinter . onPrint ( this ) ; } else if ( ThinMenuConstants . ABOUT . equalsIgnoreCase ( strAction ) ) { return this . getBaseApplet ( ) . onAbout ( ) ; } else if ( ThinMenuConstants . LOGON . equalsIgnoreCase ( strAction ) ) { int iErrorCode = this . getBaseApplet ( ) . onLogonDialog ( ) ; if ( iErrorCode == JOptionPane . CANCEL_OPTION ) return true ; // User clicked the cancel button if ( iErrorCode == Constants . NORMAL_RETURN ) return true ; // Success // Display the error message and return ! String strDisplay = this . getBaseApplet ( ) . getLastError ( iErrorCode ) ; JOptionPane . showConfirmDialog ( this , strDisplay , "Error" , JOptionPane . OK_CANCEL_OPTION ) ; return true ; // Handled } else if ( ThinMenuConstants . LOGOUT . equalsIgnoreCase ( strAction ) ) { int iErrorCode = this . getBaseApplet ( ) . getApplication ( ) . login ( this . getBaseApplet ( ) , null , null , null ) ; // Logout if ( iErrorCode == Constants . NORMAL_RETURN ) return true ; // Success , handled // Display the error message and return ! String strDisplay = this . getBaseApplet ( ) . getLastError ( iErrorCode ) ; JOptionPane . showConfirmDialog ( this , strDisplay , "Error" , JOptionPane . OK_CANCEL_OPTION ) ; return true ; // Handled } else if ( ThinMenuConstants . CHANGE_PASSWORD . equalsIgnoreCase ( strAction ) ) { int iErrorCode = this . getBaseApplet ( ) . onChangePassword ( ) ; if ( iErrorCode == JOptionPane . CANCEL_OPTION ) return true ; // User clicked the cancel button if ( iErrorCode == Constants . NORMAL_RETURN ) return true ; // Success // Display the error message and return ! String strDisplay = this . getBaseApplet ( ) . getLastError ( iErrorCode ) ; JOptionPane . showConfirmDialog ( this , strDisplay , "Error" , JOptionPane . OK_CANCEL_OPTION ) ; return true ; // Handled } else if ( ThinMenuConstants . PREFERENCES . equalsIgnoreCase ( strAction ) ) { return this . getBaseApplet ( ) . onSetFont ( ) ; } return false ;
public class GeoPackageValidate { /** * Validate the GeoPackage has the minimum required tables * @ param geoPackage * GeoPackage */ public static void validateMinimumTables ( GeoPackageCore geoPackage ) { } }
if ( ! hasMinimumTables ( geoPackage ) ) { throw new GeoPackageException ( "Invalid GeoPackage. Does not contain required tables: " + SpatialReferenceSystem . TABLE_NAME + " & " + Contents . TABLE_NAME + ", GeoPackage Name: " + geoPackage . getName ( ) ) ; }
public class YarnSubmissionHelper { /** * Sets environment variable map . * @ param map * @ return */ public YarnSubmissionHelper setJobSubmissionEnvMap ( final Map < String , String > map ) { } }
for ( final Map . Entry < String , String > entry : map . entrySet ( ) ) { environmentVariablesMap . put ( entry . getKey ( ) , entry . getValue ( ) ) ; } return this ;
public class Util { /** * Returns length of the internal value . * @ param value a value . * @ param propType * @ return the length of the internal value or < code > - 1 < / code > if the length * cannot be determined . */ public static long getLength ( ValueData value , int propType ) { } }
if ( propType == PropertyType . BINARY ) { return value . getLength ( ) ; } else if ( propType == PropertyType . NAME || propType == PropertyType . PATH ) { return - 1 ; } else { return value . toString ( ) . length ( ) ; }
public class MediaManagementApi { /** * Complete a bulk of interactions * Complete a bulk of interactions * @ param mgtCancel ( required ) * @ return ApiResponse & lt ; ApiSuccessResponse & gt ; * @ throws ApiException If fail to call the API , e . g . server error or cannot deserialize the response body */ public ApiResponse < ApiSuccessResponse > mgtCompleteWithHttpInfo ( MgtCancel1 mgtCancel ) throws ApiException { } }
com . squareup . okhttp . Call call = mgtCompleteValidateBeforeCall ( mgtCancel , null , null ) ; Type localVarReturnType = new TypeToken < ApiSuccessResponse > ( ) { } . getType ( ) ; return apiClient . execute ( call , localVarReturnType ) ;
public class JarPluginProviderLoader { /** * Return true if the jar attributes declare it should load local dependency classes first . * @ param file plugin file * @ return true if plugin libs load first is set */ static boolean getLoadLocalLibsFirstForFile ( final File file ) { } }
Attributes attributes = loadMainAttributes ( file ) ; if ( null == attributes ) { return false ; } boolean loadFirstDefault = true ; String loadFirst = attributes . getValue ( RUNDECK_PLUGIN_LIBS_LOAD_FIRST ) ; if ( null != loadFirst ) { return Boolean . valueOf ( loadFirst ) ; } return loadFirstDefault ;
public class CProductPersistenceImpl { /** * Removes all the c products where uuid = & # 63 ; and companyId = & # 63 ; from the database . * @ param uuid the uuid * @ param companyId the company ID */ @ Override public void removeByUuid_C ( String uuid , long companyId ) { } }
for ( CProduct cProduct : findByUuid_C ( uuid , companyId , QueryUtil . ALL_POS , QueryUtil . ALL_POS , null ) ) { remove ( cProduct ) ; }
public class ServerClientDetection { /** * Handle callback from service call . */ public void resultReceived ( IPendingServiceCall call ) { } }
// if we aren ' t connection , skip any further testing if ( Call . STATUS_NOT_CONNECTED != call . getStatus ( ) ) { // receive time using nanos long now = System . nanoTime ( ) ; // increment received int received = packetsReceived . incrementAndGet ( ) ; log . debug ( "Call time stamps - write: {} read: {}" , call . getWriteTime ( ) , call . getReadTime ( ) ) ; // time passed is in milliseconds timePassed = ( now - startTime ) / 1000000 ; log . debug ( "Received count: {} sent: {} timePassed: {} ms" , new Object [ ] { received , packetsSent . get ( ) , timePassed } ) ; switch ( received ) { case 1 : // first packet is used to test latency latency = Math . max ( Math . min ( timePassed , LATENCY_MAX ) , LATENCY_MIN ) ; log . debug ( "Receive latency: {}" , latency ) ; // We now have a latency figure so can start sending test data . // Second call . 1st packet sent log . debug ( "Sending first payload at {} ns" , now ) ; callBWCheck ( payload ) ; // 1k break ; case 2 : log . debug ( "Sending second payload at {} ns" , now ) ; // increment cumulative latency cumLatency ++ ; callBWCheck ( payload1 ) ; // 32k break ; default : log . debug ( "Doing calculations at {} ns" , now ) ; // increment cumulative latency cumLatency ++ ; // bytes to kbits deltaDown = ( ( conn . getWrittenBytes ( ) - startBytesWritten ) * 8 ) / 1000d ; log . debug ( "Delta kbits: {}" , deltaDown ) ; // total dl time - latency for each packet sent in secs deltaTime = ( timePassed - ( latency * cumLatency ) ) ; if ( deltaTime <= 0 ) { deltaTime = ( timePassed + latency ) ; } log . debug ( "Delta time: {} ms" , deltaTime ) ; // calculate kbit / s kbitDown = Math . round ( deltaDown / ( deltaTime / 1000d ) ) ; log . debug ( "onBWDone: kbitDown: {} deltaDown: {} deltaTime: {} latency: {} " , new Object [ ] { kbitDown , deltaDown , deltaTime , latency } ) ; callBWDone ( ) ; } } else { log . debug ( "Pending call skipped due to being no longer connected" ) ; }
public class InfinispanFactory { /** * Create cache manager with custom idle time . * @ param idleTime idle time in seconds * @ return cache manager */ public static EmbeddedCacheManager create ( long idleTime ) { } }
Configuration configuration = new ConfigurationBuilder ( ) . expiration ( ) . maxIdle ( idleTime , TimeUnit . SECONDS ) . build ( ) ; return new DefaultCacheManager ( configuration ) ;
public class BackupEnginesInner { /** * Backup management servers registered to Recovery Services Vault . Returns a pageable list of servers . * @ param nextPageLink The NextLink from the previous successful call to List operation . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the PagedList & lt ; BackupEngineBaseResourceInner & gt ; object */ public Observable < Page < BackupEngineBaseResourceInner > > listNextAsync ( final String nextPageLink ) { } }
return listNextWithServiceResponseAsync ( nextPageLink ) . map ( new Func1 < ServiceResponse < Page < BackupEngineBaseResourceInner > > , Page < BackupEngineBaseResourceInner > > ( ) { @ Override public Page < BackupEngineBaseResourceInner > call ( ServiceResponse < Page < BackupEngineBaseResourceInner > > response ) { return response . body ( ) ; } } ) ;
public class XMLStringBuffer { /** * append * @ param c */ public void append ( char c ) { } }
if ( this . length + 1 > this . ch . length ) { int newLength = this . ch . length * 2 ; if ( newLength < this . ch . length + DEFAULT_SIZE ) newLength = this . ch . length + DEFAULT_SIZE ; char [ ] newch = new char [ newLength ] ; System . arraycopy ( this . ch , 0 , newch , 0 , this . length ) ; this . ch = newch ; } this . ch [ this . length ] = c ; this . length ++ ;
public class PluginProxy { /** * Executes the proxied plugin passing the received arguments as parameters . * @ param argsAry The parameters to be passed to the plugin * @ return The return value of the plugin . * @ throws BadThresholdException if an error occurs parsing the threshold */ public ReturnValue execute ( final String [ ] argsAry ) throws BadThresholdException { } }
// CommandLineParser clp = new PosixParser ( ) ; try { HelpFormatter hf = new HelpFormatter ( ) ; // configure a parser Parser cliParser = new Parser ( ) ; cliParser . setGroup ( mainOptionsGroup ) ; cliParser . setHelpFormatter ( hf ) ; CommandLine cl = cliParser . parse ( argsAry ) ; // Inject the context . . . InjectionUtils . inject ( proxiedPlugin , getContext ( ) ) ; Thread . currentThread ( ) . setContextClassLoader ( proxiedPlugin . getClass ( ) . getClassLoader ( ) ) ; ReturnValue retValue = proxiedPlugin . execute ( new PluginCommandLine ( cl ) ) ; if ( retValue == null ) { String msg = "Plugin [" + getPluginName ( ) + "] with args [" + StringUtils . join ( argsAry ) + "] returned null" ; retValue = new ReturnValue ( Status . UNKNOWN , msg ) ; } return retValue ; } catch ( OptionException e ) { String msg = e . getMessage ( ) ; LOG . error ( getContext ( ) , "Error parsing plugin '" + getPluginName ( ) + "' command line : " + msg , e ) ; return new ReturnValue ( Status . UNKNOWN , msg ) ; }
public class CcgParser { /** * Adds lexicon entry { @ code category } to { @ code chart } . This * method should be used by instances of { @ code CcgLexicon } * to initialize the lexicon entries of the parser . * @ param chart * @ param trigger * @ param category * @ param lexiconProb * @ param spanStart * @ param spanEnd * @ param triggerSpanStart * @ param triggerSpanEnd * @ param sentence * @ param lexiconNum */ public void addLexiconEntryToChart ( CcgChart chart , Object trigger , CcgCategory category , double lexiconProb , int spanStart , int spanEnd , int triggerSpanStart , int triggerSpanEnd , AnnotatedSentence sentence , int lexiconNum ) { } }
for ( LexiconScorer lexiconScorer : lexiconScorers ) { lexiconProb *= lexiconScorer . getCategoryWeight ( triggerSpanStart , triggerSpanEnd , sentence , category ) ; } // Add all possible chart entries to the ccg chart . ChartEntry chartEntry = ccgCategoryToChartEntry ( trigger , category , spanStart , spanEnd , triggerSpanStart , triggerSpanEnd , lexiconNum ) ; chart . addChartEntryForSpan ( chartEntry , lexiconProb , spanStart , spanEnd , syntaxVarType ) ;
public class Assert { /** * Asserts properties of a token . * @ param message the message to display on failure . * @ param expectedType the expected type of the token . * @ param expectedText the expected text of the token . * @ param token the token to assert . */ public static void assertToken ( String message , int expectedType , String expectedText , Token token ) { } }
assertToken ( message , BaseRecognizer . DEFAULT_TOKEN_CHANNEL , expectedType , expectedText , token ) ;
public class GVRPointLight { /** * Set the ambient light intensity . * This designates the color of the ambient reflection . * It is multiplied by the material ambient color to derive * the hue of the ambient reflection for that material . * The built - in phong shader { @ link GVRPhongShader } uses a { @ code vec4 } uniform named * { @ code ambient _ intensity } to control the intensity of ambient light reflected . * @ param r red component ( 0 to 1) * @ param g green component ( 0 to 1) * @ param b blue component ( 0 to 1) * @ param a alpha component ( 0 to 1) */ public void setAmbientIntensity ( float r , float g , float b , float a ) { } }
setVec4 ( "ambient_intensity" , r , g , b , a ) ;
public class CmsResourceWrapperPropertyFile { /** * Reads the resource for the property file . < p > * @ param cms the initialized CmsObject * @ param resourcename the name of the property resource * @ param filter the filter to use * @ return the resource for the property file or null if not found * @ throws CmsException if something goes wrong */ private CmsResource getResource ( CmsObject cms , String resourcename , CmsResourceFilter filter ) throws CmsException { } }
// the path without trailing slash String path = CmsResource . getParentFolder ( resourcename ) ; if ( path == null ) { return null ; } // the parent path String parent = CmsResource . getParentFolder ( path ) ; // the name of the resource String name = CmsResource . getName ( resourcename ) ; if ( name . endsWith ( "/" ) ) { name = name . substring ( 0 , name . length ( ) - 1 ) ; } // read the resource for the property dir if ( name . equals ( PROPERTY_DIR ) ) { return cms . readResource ( path , filter ) ; } if ( ( path . endsWith ( PROPERTY_DIR + "/" ) ) && ( name . endsWith ( CmsResourceWrapperUtils . EXTENSION_PROPERTIES ) ) ) { CmsResource res = null ; if ( name . startsWith ( FOLDER_PREFIX ) ) { name = name . substring ( 2 ) ; } try { String resPath = CmsResourceWrapperUtils . removeFileExtension ( cms , parent + name , CmsResourceWrapperUtils . EXTENSION_PROPERTIES ) ; res = cms . readResource ( resPath , filter ) ; } catch ( CmsException ex ) { // noop } return res ; } return null ;
public class ThreadPoolTaskScheduler { /** * { @ inheritDoc } * @ see org . audit4j . core . schedule . TaskScheduler # scheduleWithFixedDelay ( java . lang . Runnable , java . util . Date , long ) */ @ Override public ScheduledFuture < ? > scheduleWithFixedDelay ( Runnable task , Date startTime , long delay ) { } }
ScheduledExecutorService executor = getScheduledExecutor ( ) ; long initialDelay = startTime . getTime ( ) - System . currentTimeMillis ( ) ; try { return executor . scheduleWithFixedDelay ( errorHandlingTask ( task , true ) , initialDelay , delay , TimeUnit . MILLISECONDS ) ; } catch ( RejectedExecutionException ex ) { throw new TaskRejectedException ( "Executor [" + executor + "] did not accept task: " + task , ex ) ; }
public class Math { /** * Returns the first floating - point argument with the sign of the * second floating - point argument . Note that unlike the { @ link * StrictMath # copySign ( double , double ) StrictMath . copySign } * method , this method does not require NaN { @ code sign } * arguments to be treated as positive values ; implementations are * permitted to treat some NaN arguments as positive and other NaN * arguments as negative to allow greater performance . * @ param magnitude the parameter providing the magnitude of the result * @ param sign the parameter providing the sign of the result * @ return a value with the magnitude of { @ code magnitude } * and the sign of { @ code sign } . * @ since 1.6 */ public static double copySign ( double magnitude , double sign ) { } }
return Double . longBitsToDouble ( ( Double . doubleToRawLongBits ( sign ) & ( DoubleConsts . SIGN_BIT_MASK ) ) | ( Double . doubleToRawLongBits ( magnitude ) & ( DoubleConsts . EXP_BIT_MASK | DoubleConsts . SIGNIF_BIT_MASK ) ) ) ;
public class HTMLEntities { /** * unescapes html character inside a string * @ param str html code to unescape * @ return unescaped html code */ public static String unescapeHTML ( String str ) { } }
StringBuilder rtn = new StringBuilder ( ) ; int posStart = - 1 ; int posFinish = - 1 ; while ( ( posStart = str . indexOf ( '&' , posStart ) ) != - 1 ) { int last = posFinish + 1 ; posFinish = str . indexOf ( ';' , posStart ) ; if ( posFinish == - 1 ) break ; rtn . append ( str . substring ( last , posStart ) ) ; if ( posStart + 1 < posFinish ) { rtn . append ( unescapeHTMLEntity ( str . substring ( posStart + 1 , posFinish ) ) ) ; } else { rtn . append ( "&;" ) ; } posStart = posFinish + 1 ; } rtn . append ( str . substring ( posFinish + 1 ) ) ; return rtn . toString ( ) ;
public class TypeInfoCache { /** * Returns true if particular sqlType requires quoting . * This method is used internally by the driver , so it might disappear without notice . * @ param sqlType sql type as in java . sql . Types * @ return true if the type requires quoting * @ throws SQLException if something goes wrong */ public boolean requiresQuotingSqlType ( int sqlType ) throws SQLException { } }
switch ( sqlType ) { case Types . BIGINT : case Types . DOUBLE : case Types . FLOAT : case Types . INTEGER : case Types . REAL : case Types . SMALLINT : case Types . TINYINT : case Types . NUMERIC : case Types . DECIMAL : return false ; } return true ;
public class Vectors { /** * Returns true if the supplied vectors ' x and y components are equal to one another within * { @ link MathUtil # EPSILON } . */ public static boolean epsilonEquals ( IVector v1 , IVector v2 ) { } }
return epsilonEquals ( v1 , v2 , MathUtil . EPSILON ) ;
public class LocalConfig { /** * Get the configuration property value for configProperty . * @ param configProperty * The configuration property value to get * @ return The configuration property value or null if property does not exit . */ public synchronized String getConfigProperty ( Config . ConfigProperty configProperty ) { } }
SeLionLogger . getLogger ( ) . entering ( configProperty ) ; checkArgument ( configProperty != null , "Config property cannot be null" ) ; // Search locally then query SeLionConfig if not found String propValue = null ; if ( baseConfig . containsKey ( configProperty . getName ( ) ) ) { propValue = baseConfig . getString ( configProperty . getName ( ) ) ; } if ( StringUtils . isBlank ( propValue ) ) { propValue = Config . getConfigProperty ( configProperty ) ; } SeLionLogger . getLogger ( ) . exiting ( propValue ) ; return propValue ;
public class AgentSession { /** * Returns the transcripts of a given user . The answer will contain the complete history of * conversations that a user had . * @ param userID the id of the user to get his conversations . * @ return the transcripts of a given user . * @ throws XMPPException if an error occurs while getting the information . * @ throws SmackException * @ throws InterruptedException */ public Transcripts getTranscripts ( Jid userID ) throws XMPPException , SmackException , InterruptedException { } }
return transcriptManager . getTranscripts ( workgroupJID , userID ) ;
public class GetCommand { /** * Generates the value of Cache - Control header according to the content type . * @ param contentType content type * @ return Cache - Control value */ private String generateCacheControl ( Map < MediaType , String > cacheControlMap , String contentType ) { } }
ArrayList < MediaType > mediaTypesList = new ArrayList < MediaType > ( cacheControlMap . keySet ( ) ) ; Collections . sort ( mediaTypesList , MediaTypeHelper . MEDIA_TYPE_COMPARATOR ) ; String cacheControlValue = "no-cache" ; if ( contentType == null || contentType . equals ( "" ) ) { return cacheControlValue ; } for ( MediaType mediaType : mediaTypesList ) { if ( contentType . equals ( MediaType . WILDCARD ) ) { cacheControlValue = cacheControlMap . get ( MediaType . WILDCARD_TYPE ) ; break ; } else if ( mediaType . isCompatible ( new MediaType ( contentType . split ( "/" ) [ 0 ] , contentType . split ( "/" ) [ 1 ] ) ) ) { cacheControlValue = cacheControlMap . get ( mediaType ) ; break ; } } return cacheControlValue ;
public class BaseSerializer { /** * Deserialize a Transform List serialized using { @ link # serializeTransformList ( List ) } , or * an array serialized using { @ link # serialize ( Transform [ ] ) } * @ param str String representation ( YAML / JSON ) of the Transform list * @ return { @ code List < Transform > } */ public List < Transform > deserializeTransformList ( String str ) { } }
return load ( str , ListWrappers . TransformList . class ) . getList ( ) ;
public class DockerConfigReader { /** * Parse the contents of the config file and generate all possible * { @ link RegistryAuth } s , which are bundled into a { @ link RegistryConfigs } instance . * @ param configPath Path to config file . * @ return All registry auths that can be generated from the config file * @ throws IOException If the file cannot be read , or its JSON cannot be parsed */ public RegistryConfigs authForAllRegistries ( final Path configPath ) throws IOException { } }
checkNotNull ( configPath ) ; final DockerConfig config = MAPPER . readValue ( configPath . toFile ( ) , DockerConfig . class ) ; if ( config == null ) { return RegistryConfigs . empty ( ) ; } final RegistryConfigs . Builder registryConfigsBuilder = RegistryConfigs . builder ( ) ; final Map < String , String > credHelpers = config . credHelpers ( ) ; final boolean hasCredHelpers = credHelpers != null && ! credHelpers . isEmpty ( ) ; final Map < String , RegistryAuth > auths = config . auths ( ) ; final boolean hasAuths = auths != null && ! auths . isEmpty ( ) ; final String credsStore = config . credsStore ( ) ; final boolean hasCredsStore = credsStore != null ; final Set < String > addedRegistries = new HashSet < > ( ) ; // First use the credHelpers , if there are any if ( hasCredHelpers ) { for ( final Map . Entry < String , String > credHelpersEntry : credHelpers . entrySet ( ) ) { final String registry = credHelpersEntry . getKey ( ) ; final String aCredsStore = credHelpersEntry . getValue ( ) ; if ( ! addedRegistries . contains ( registry ) ) { addedRegistries . add ( registry ) ; registryConfigsBuilder . addConfig ( registry , authWithCredentialHelper ( aCredsStore , registry ) ) ; } } } // If there are any objects in " auths " , they could take two forms . // Older auths will map registry keys to objects with " auth " values , sometimes emails . // Newer auths will map registry keys to empty objects . They expect you // to use the credsStore to authenticate . if ( hasAuths ) { // We will use this empty RegistryAuth to check for empty auth values final RegistryAuth empty = RegistryAuth . builder ( ) . build ( ) ; for ( final Map . Entry < String , RegistryAuth > authEntry : auths . entrySet ( ) ) { final String registry = authEntry . getKey ( ) ; if ( addedRegistries . contains ( registry ) ) { continue ; } addedRegistries . add ( registry ) ; final RegistryAuth registryAuth = authEntry . getValue ( ) ; if ( registryAuth == null || registryAuth . equals ( empty ) ) { // We have an empty object . Can we use credsStore ? if ( hasCredsStore ) { registryConfigsBuilder . addConfig ( registry , authWithCredentialHelper ( credsStore , registry ) ) ; } // no else clause . If we can ' t fall back to credsStore , we can ' t auth . } else { // The auth object isn ' t empty . // We need to add the registry to its properties , then // add it to the RegistryConfigs registryConfigsBuilder . addConfig ( registry , registryAuth . toBuilder ( ) . serverAddress ( registry ) . build ( ) ) ; } } } // If there are no credHelpers or auths or credsStore , then the // config may be in a very old format . There aren ' t any keys for different // sections . The file is just a map of registries to auths . // In other words , it looks like a RegistryConfigs . // If we can map it to one , we ' ll return it . if ( ! ( hasAuths || hasCredHelpers || hasCredsStore ) ) { try { return MAPPER . readValue ( configPath . toFile ( ) , RegistryConfigs . class ) ; } catch ( IOException ignored ) { // Looks like that failed to parse . // Eat the exception , fall through , and return empty object . } } return registryConfigsBuilder . build ( ) ;
public class OgnlExpression { /** * < p > onUnresolvedExpression . < / p > * @ param expression a { @ link java . lang . String } object . * @ param targets a { @ link java . lang . Object } object . * @ return a { @ link com . greenpepper . extensions . ognl . OgnlExpression } object . */ public static OgnlExpression onUnresolvedExpression ( String expression , Object ... targets ) { } }
OgnlResolution resolver = new OgnlResolution ( expression ) ; return new OgnlExpression ( resolver . expressionsListToResolve ( ) , targets ) ;
public class AmazonEC2Client { /** * Creates an internet gateway for use with a VPC . After creating the internet gateway , you attach it to a VPC using * < a > AttachInternetGateway < / a > . * For more information about your VPC and internet gateway , see the < a * href = " https : / / docs . aws . amazon . com / AmazonVPC / latest / UserGuide / " > Amazon Virtual Private Cloud User Guide < / a > . * @ param createInternetGatewayRequest * @ return Result of the CreateInternetGateway operation returned by the service . * @ sample AmazonEC2 . CreateInternetGateway * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / ec2-2016-11-15 / CreateInternetGateway " target = " _ top " > AWS API * Documentation < / a > */ @ Override public CreateInternetGatewayResult createInternetGateway ( CreateInternetGatewayRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeCreateInternetGateway ( request ) ;
public class Utils4Swing { /** * Makes the glass pane visible and focused and stores the saves the current * state . * @ param source * Component to use when looking for the root pane container . * @ return State of the UI before the glasspane was visible . */ public static GlassPaneState showGlassPane ( final Component source ) { } }
final Component focusOwner = KeyboardFocusManager . getCurrentKeyboardFocusManager ( ) . getFocusOwner ( ) ; final RootPaneContainer rootPaneContainer = findRootPaneContainer ( source ) ; final Component glassPane = rootPaneContainer . getGlassPane ( ) ; final MouseListener mouseListener = new MouseAdapter ( ) { } ; final Cursor cursor = glassPane . getCursor ( ) ; glassPane . addMouseListener ( mouseListener ) ; glassPane . setVisible ( true ) ; glassPane . requestFocus ( ) ; glassPane . setCursor ( new Cursor ( Cursor . WAIT_CURSOR ) ) ; return new GlassPaneState ( glassPane , mouseListener , focusOwner , cursor ) ;
public class GenerateEntityIdOnTheClient { /** * Attempts to get the document key from an instance * @ param entity Entity to get id from * @ param idHolder output parameter which holds document id * @ return true if id was read from entity */ public boolean tryGetIdFromInstance ( Object entity , Reference < String > idHolder ) { } }
if ( entity == null ) { throw new IllegalArgumentException ( "Entity cannot be null" ) ; } try { Field identityProperty = getIdentityProperty ( entity . getClass ( ) ) ; if ( identityProperty != null ) { Object value = FieldUtils . readField ( identityProperty , entity , true ) ; if ( value instanceof String ) { idHolder . value = ( String ) value ; return true ; } } idHolder . value = null ; return false ; } catch ( IllegalAccessException e ) { throw new IllegalStateException ( e ) ; }
public class MappingServiceImpl { /** * Compares the attributes between the target repository and the mapping target . Applied Rules : - * The mapping target can not contain attributes which are not in the target repository - The * attributes of the mapping target with the same name as attributes in the target repository * should have the same type - If there are reference attributes , the name of the reference entity * should be the same in both the target repository as in the mapping target * @ param targetRepositoryEntityType the target repository EntityType to check * @ param mappingTargetEntityType the mapping target EntityType to check * @ throws MolgenisDataException if the types are not compatible */ private void compareTargetMetadatas ( EntityType targetRepositoryEntityType , EntityType mappingTargetEntityType ) { } }
Map < String , Attribute > targetRepositoryAttributeMap = newHashMap ( ) ; targetRepositoryEntityType . getAtomicAttributes ( ) . forEach ( attribute -> targetRepositoryAttributeMap . put ( attribute . getName ( ) , attribute ) ) ; for ( Attribute mappingTargetAttribute : mappingTargetEntityType . getAtomicAttributes ( ) ) { String mappingTargetAttributeName = mappingTargetAttribute . getName ( ) ; Attribute targetRepositoryAttribute = targetRepositoryAttributeMap . get ( mappingTargetAttributeName ) ; if ( targetRepositoryAttribute == null ) { throw new MolgenisDataException ( format ( "Target repository does not contain the following attribute: %s" , mappingTargetAttributeName ) ) ; } AttributeType targetRepositoryAttributeType = targetRepositoryAttribute . getDataType ( ) ; AttributeType mappingTargetAttributeType = mappingTargetAttribute . getDataType ( ) ; if ( ! mappingTargetAttributeType . equals ( targetRepositoryAttributeType ) ) { throw new MolgenisDataException ( format ( "attribute %s in the mapping target is type %s while attribute %s in the target repository is type %s. Please make sure the types are the same" , mappingTargetAttributeName , mappingTargetAttributeType , targetRepositoryAttribute . getName ( ) , targetRepositoryAttributeType ) ) ; } if ( isReferenceType ( mappingTargetAttribute ) ) { String mappingTargetRefEntityName = mappingTargetAttribute . getRefEntity ( ) . getId ( ) ; String targetRepositoryRefEntityName = targetRepositoryAttribute . getRefEntity ( ) . getId ( ) ; if ( ! mappingTargetRefEntityName . equals ( targetRepositoryRefEntityName ) ) { throw new MolgenisDataException ( format ( "In the mapping target, attribute %s of type %s has reference entity %s while in the target repository attribute %s of type %s has reference entity %s. " + "Please make sure the reference entities of your mapping target are pointing towards the same reference entities as your target repository" , mappingTargetAttributeName , mappingTargetAttributeType , mappingTargetRefEntityName , targetRepositoryAttribute . getName ( ) , targetRepositoryAttributeType , targetRepositoryRefEntityName ) ) ; } } }
public class TypicalFaihyApiFailureHook { @ Override public ApiResponse handleValidationError ( ApiFailureResource resource ) { } }
final FaihyUnifiedFailureType failureType = FaihyUnifiedFailureType . VALIDATION_ERROR ; final FaihyUnifiedFailureResult result = createFailureResult ( failureType , resource , null ) ; return asJson ( result ) . httpStatus ( prepareBusinessFailureStatus ( ) ) ;
public class ReflectionHelper { /** * Same as Arrays . asList ( . . . ) , but does automatically conversion of primitive arrays . * @ param array * @ return List of objects representing the given array contents */ public static List < Object > array2ObjectList ( final Object array ) { } }
final int length = Array . getLength ( array ) ; final List < Object > list = new ArrayList < Object > ( length ) ; for ( int i = 0 ; i < length ; ++ i ) { list . add ( Array . get ( array , i ) ) ; } return list ;
public class SuffixLocatorConnectionFactory { /** * { @ inheritDoc } */ @ Override public Transcoder < Object > getDefaultTranscoder ( ) { } }
final SerializingTranscoder transcoder = new SerializingTranscoder ( ) ; transcoder . setCompressionThreshold ( SerializingTranscoder . DEFAULT_COMPRESSION_THRESHOLD ) ; return new TranscoderWrapperStatisticsSupport ( _statistics , transcoder ) ;
public class Tracer { /** * Gets a string of spaces of the length specified . * @ param sb The string builder to append to . * @ param numSpaces The number of spaces in the string . */ @ VisibleForTesting static void appendSpaces ( StringBuilder sb , int numSpaces ) { } }
if ( numSpaces > 16 ) { logger . warning ( "Tracer.appendSpaces called with large numSpaces" ) ; // Avoid long loop in case some bug in the caller numSpaces = 16 ; } while ( numSpaces >= 5 ) { sb . append ( " " ) ; numSpaces -= 5 ; } // We know it ' s less than 5 now switch ( numSpaces ) { case 1 : sb . append ( " " ) ; break ; case 2 : sb . append ( " " ) ; break ; case 3 : sb . append ( " " ) ; break ; case 4 : sb . append ( " " ) ; break ; }
public class SchemaConfiguration { /** * Add tableGenerator to table info . * @ param appMetadata * @ param persistenceUnit * @ param tableInfos * @ param entityMetadata * @ param isCompositeId */ private void addTableGenerator ( ApplicationMetadata appMetadata , String persistenceUnit , List < TableInfo > tableInfos , EntityMetadata entityMetadata ) { } }
Metamodel metamodel = appMetadata . getMetamodel ( persistenceUnit ) ; IdDiscriptor keyValue = ( ( MetamodelImpl ) metamodel ) . getKeyValue ( entityMetadata . getEntityClazz ( ) . getName ( ) ) ; if ( keyValue != null && keyValue . getTableDiscriptor ( ) != null ) { TableInfo tableGeneratorDiscriptor = new TableInfo ( keyValue . getTableDiscriptor ( ) . getTable ( ) , "CounterColumnType" , String . class , keyValue . getTableDiscriptor ( ) . getPkColumnName ( ) ) ; if ( ! tableInfos . contains ( tableGeneratorDiscriptor ) ) { tableGeneratorDiscriptor . addColumnInfo ( getJoinColumn ( tableGeneratorDiscriptor , keyValue . getTableDiscriptor ( ) . getValueColumnName ( ) , Long . class ) ) ; tableInfos . add ( tableGeneratorDiscriptor ) ; } }
public class ClientFactoryBuilder { /** * Sets the idle timeout of a socket connection . The connection is closed if there is no request in * progress for this amount of time . */ public ClientFactoryBuilder idleTimeout ( Duration idleTimeout ) { } }
requireNonNull ( idleTimeout , "idleTimeout" ) ; checkArgument ( ! idleTimeout . isNegative ( ) , "idleTimeout: %s (expected: >= 0)" , idleTimeout ) ; return idleTimeoutMillis ( idleTimeout . toMillis ( ) ) ;
public class MetaMainTask { /** * Get the task ' s display name consisting of the general task name , * indentation showing the tree structure depending on the subtask level * and optionally a name suffix given from a supertask . * @ return display name */ public String getDisplayName ( ) { } }
StringBuilder name = new StringBuilder ( ) ; // add indentation representing tree structure of tasks for ( int i = 0 ; i < this . getSubtaskLevel ( ) - 1 ; i ++ ) { if ( this . isLastSubtaskOnLevel [ i ] ) { name . append ( " " ) ; } else { name . append ( "│ " ) ; } } if ( this . getSubtaskLevel ( ) > 0 ) { if ( this . isLastSubtaskOnLevel [ this . getSubtaskLevel ( ) - 1 ] ) { name . append ( "└──" ) ; } else { name . append ( "├──" ) ; } } // append class name name . append ( this . getClass ( ) . getSimpleName ( ) ) ; // append optional suffix name . append ( " " ) . append ( this . nameSuffix ) ; return name . toString ( ) ;
public class CmsScrollBar { /** * Starts the mouse sliding . < p > * @ param event the mouse event */ private void startMouseSliding ( Event event ) { } }
if ( ! m_slidingMouse ) { m_slidingMouse = true ; DOM . setCapture ( getElement ( ) ) ; m_mouseSlidingStartY = event . getClientY ( ) ; m_mouseSlidingStartValue = m_currentValue ; CmsDebugLog . getInstance ( ) . printLine ( "Mouse sliding started with clientY: " + m_mouseSlidingStartY + " and start value: " + m_mouseSlidingStartValue + " and a max value of " + getMaximumVerticalScrollPosition ( ) ) ; }
public class DTBuilder { /** * produces a normalized date time , using zero for the time fields if none * were provided . * @ return not null */ public DateTimeValue toDateTime ( ) { } }
normalize ( ) ; return new DateTimeValueImpl ( year , month , day , hour , minute , second ) ;
public class AttributesBuilder { /** * Sets custom or unlisted attribute * @ param attributeName * @ param attributeValue * @ return this instance . */ public AttributesBuilder attribute ( String attributeName , Object attributeValue ) { } }
this . attributes . setAttribute ( attributeName , attributeValue ) ; return this ;