signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class DescribeCacheSubnetGroupsResult { /** * A list of cache subnet groups . Each element in the list contains detailed information about one group . * @ return A list of cache subnet groups . Each element in the list contains detailed information about one group . */ public java . util . List < CacheSubnetGroup > getCacheSubnetGroups ( ) { } }
if ( cacheSubnetGroups == null ) { cacheSubnetGroups = new com . amazonaws . internal . SdkInternalList < CacheSubnetGroup > ( ) ; } return cacheSubnetGroups ;
public class RepeatableInputStream { /** * This method can only be used while less data has been read from the input * stream than fits into the buffer . The readLimit parameter is ignored * entirely . */ public void mark ( int readlimit ) { } }
abortIfNeeded ( ) ; if ( log . isDebugEnabled ( ) ) { log . debug ( "Input stream marked at " + bytesReadPastMark + " bytes" ) ; } if ( bytesReadPastMark <= bufferSize && buffer != null ) { /* * Clear buffer of already - read data to make more space . It ' s safe * to cast bytesReadPastMark to an int because it is known to be * less than bufferSize , which is an int . */ byte [ ] newBuffer = new byte [ this . bufferSize ] ; System . arraycopy ( buffer , bufferOffset , newBuffer , 0 , ( int ) ( bytesReadPastMark - bufferOffset ) ) ; this . buffer = newBuffer ; this . bytesReadPastMark -= bufferOffset ; this . bufferOffset = 0 ; } else { // If mark is called after the buffer was already exceeded , create a new buffer . this . bufferOffset = 0 ; this . bytesReadPastMark = 0 ; this . buffer = new byte [ this . bufferSize ] ; }
public class Regex { /** * Checks whether the given string matches the regular expression . * @ param string * the string to be matched against the regular expression * @ return * < code > true < / code > if the string matches the regular expression , * < code > false < / code > otherwise . */ public boolean matches ( String string ) { } }
Matcher matcher = pattern . matcher ( string ) ; boolean result = matcher . matches ( ) ; return result ;
public class PatchHandler { /** * Update a resource with Sparql - Update and build an HTTP response * @ param res the resource * @ return the Response builder */ public ResponseBuilder updateResource ( final Resource res ) { } }
final String baseUrl = getBaseUrl ( ) ; final String identifier = baseUrl + req . getPartition ( ) + req . getPath ( ) + ( ACL . equals ( req . getExt ( ) ) ? "?ext=acl" : "" ) ; if ( isNull ( sparqlUpdate ) ) { throw new WebApplicationException ( "Missing Sparql-Update body" , BAD_REQUEST ) ; } final Session session = ofNullable ( req . getSession ( ) ) . orElseGet ( HttpSession :: new ) ; // Check if this is already deleted checkDeleted ( res , identifier ) ; // Check the cache final EntityTag etag = new EntityTag ( md5Hex ( res . getModified ( ) + identifier ) ) ; checkCache ( req . getRequest ( ) , res . getModified ( ) , etag ) ; LOGGER . debug ( "Updating {} via PATCH" , identifier ) ; final IRI graphName = ACL . equals ( req . getExt ( ) ) ? PreferAccessControl : PreferUserManaged ; final IRI otherGraph = ACL . equals ( req . getExt ( ) ) ? PreferUserManaged : PreferAccessControl ; // Put triples in buffer final List < Triple > triples = updateGraph ( res , graphName ) ; try ( final TrellisDataset dataset = TrellisDataset . createDataset ( ) ) { triples . stream ( ) . map ( skolemizeTriples ( resourceService , baseUrl ) ) . map ( t -> rdf . createQuad ( graphName , t . getSubject ( ) , t . getPredicate ( ) , t . getObject ( ) ) ) . forEachOrdered ( dataset :: add ) ; // Add audit - related triples audit . ifPresent ( svc -> svc . update ( res . getIdentifier ( ) , session ) . stream ( ) . map ( skolemizeQuads ( resourceService , baseUrl ) ) . forEachOrdered ( dataset :: add ) ) ; // Add existing LDP type dataset . add ( rdf . createQuad ( PreferServerManaged , res . getIdentifier ( ) , RDF . type , res . getInteractionModel ( ) ) ) ; // Check any constraints final List < ConstraintViolation > violations = constraintServices . stream ( ) . flatMap ( svc -> dataset . getGraph ( graphName ) . map ( Stream :: of ) . orElseGet ( Stream :: empty ) . flatMap ( g -> svc . constrainedBy ( res . getInteractionModel ( ) , baseUrl , g ) ) ) . collect ( toList ( ) ) ; if ( ! violations . isEmpty ( ) ) { final ResponseBuilder err = status ( CONFLICT ) ; violations . forEach ( v -> err . link ( v . getConstraint ( ) . getIRIString ( ) , LDP . constrainedBy . getIRIString ( ) ) ) ; throw new WebApplicationException ( err . build ( ) ) ; } // When updating User or ACL triples , be sure to add the other category to the dataset try ( final Stream < ? extends Triple > remaining = res . stream ( otherGraph ) ) { remaining . map ( t -> rdf . createQuad ( otherGraph , t . getSubject ( ) , t . getPredicate ( ) , t . getObject ( ) ) ) . forEachOrdered ( dataset :: add ) ; } // Save new dataset if ( resourceService . put ( res . getIdentifier ( ) , dataset . asDataset ( ) ) ) { final ResponseBuilder builder = ok ( ) ; ldpResourceTypes ( res . getInteractionModel ( ) ) . map ( IRI :: getIRIString ) . forEach ( type -> builder . link ( type , "type" ) ) ; return ofNullable ( req . getPrefer ( ) ) . flatMap ( Prefer :: getPreference ) . filter ( PREFER_REPRESENTATION :: equals ) . map ( prefer -> { final RDFSyntax syntax = getSyntax ( req . getHeaders ( ) . getAcceptableMediaTypes ( ) , empty ( ) ) . orElseThrow ( NotAcceptableException :: new ) ; final IRI profile = ofNullable ( getProfile ( req . getHeaders ( ) . getAcceptableMediaTypes ( ) , syntax ) ) . orElseGet ( ( ) -> getDefaultProfile ( syntax , identifier ) ) ; final StreamingOutput stream = new StreamingOutput ( ) { @ Override public void write ( final OutputStream out ) throws IOException { ioService . write ( triples . stream ( ) . map ( unskolemizeTriples ( resourceService , baseUrl ) ) , out , syntax , profile ) ; } } ; return builder . header ( PREFERENCE_APPLIED , "return=representation" ) . type ( syntax . mediaType ) . entity ( stream ) ; } ) . orElseGet ( ( ) -> builder . status ( NO_CONTENT ) ) ; } } LOGGER . error ( "Unable to persist data to location at {}" , res . getIdentifier ( ) ) ; return serverError ( ) . type ( TEXT_PLAIN ) . entity ( "Unable to persist data. Please consult the logs for more information" ) ;
public class JavacParser { /** * Return type tag of basic type represented by token , * NONE if token is not a basic type identifier . */ static TypeTag typetag ( TokenKind token ) { } }
switch ( token ) { case BYTE : return TypeTag . BYTE ; case CHAR : return TypeTag . CHAR ; case SHORT : return TypeTag . SHORT ; case INT : return TypeTag . INT ; case LONG : return TypeTag . LONG ; case FLOAT : return TypeTag . FLOAT ; case DOUBLE : return TypeTag . DOUBLE ; case BOOLEAN : return TypeTag . BOOLEAN ; default : return TypeTag . NONE ; }
public class ClientBuilder { /** * Returns a newly - created client which implements the specified { @ code clientType } , based on the * properties of this builder . * @ throws IllegalArgumentException if the scheme of the { @ code uri } specified in * { @ link # ClientBuilder ( String ) } or the specified { @ code clientType } is * unsupported for the scheme */ public < T > T build ( Class < T > clientType ) { } }
requireNonNull ( clientType , "clientType" ) ; return factory . newClient ( uri , clientType , buildOptions ( ) ) ;
public class InternalXbaseParser { /** * InternalXbase . g : 1925:1 : ruleQualifiedNameInStaticImport : ( ( ( rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 ) ) ( ( rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 ) * ) ) ; */ public final void ruleQualifiedNameInStaticImport ( ) throws RecognitionException { } }
int stackSize = keepStackSize ( ) ; try { // InternalXbase . g : 1929:2 : ( ( ( ( rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 ) ) ( ( rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 ) * ) ) ) // InternalXbase . g : 1930:2 : ( ( ( rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 ) ) ( ( rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 ) * ) ) { // InternalXbase . g : 1930:2 : ( ( ( rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 ) ) ( ( rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 ) * ) ) // InternalXbase . g : 1931:3 : ( ( rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 ) ) ( ( rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 ) * ) { // InternalXbase . g : 1931:3 : ( ( rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 ) ) // InternalXbase . g : 1932:4 : ( rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 ) { if ( state . backtracking == 0 ) { before ( grammarAccess . getQualifiedNameInStaticImportAccess ( ) . getGroup ( ) ) ; } // InternalXbase . g : 1933:4 : ( rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 ) // InternalXbase . g : 1933:5 : rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 { pushFollow ( FOLLOW_3 ) ; rule__QualifiedNameInStaticImport__Group__0 ( ) ; state . _fsp -- ; if ( state . failed ) return ; } if ( state . backtracking == 0 ) { after ( grammarAccess . getQualifiedNameInStaticImportAccess ( ) . getGroup ( ) ) ; } } // InternalXbase . g : 1936:3 : ( ( rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 ) * ) // InternalXbase . g : 1937:4 : ( rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 ) * { if ( state . backtracking == 0 ) { before ( grammarAccess . getQualifiedNameInStaticImportAccess ( ) . getGroup ( ) ) ; } // InternalXbase . g : 1938:4 : ( rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 ) * loop1 : do { int alt1 = 2 ; int LA1_0 = input . LA ( 1 ) ; if ( ( LA1_0 == RULE_ID ) ) { int LA1_2 = input . LA ( 2 ) ; if ( ( LA1_2 == 43 ) ) { alt1 = 1 ; } } switch ( alt1 ) { case 1 : // InternalXbase . g : 1938:5 : rule _ _ QualifiedNameInStaticImport _ _ Group _ _ 0 { pushFollow ( FOLLOW_3 ) ; rule__QualifiedNameInStaticImport__Group__0 ( ) ; state . _fsp -- ; if ( state . failed ) return ; } break ; default : break loop1 ; } } while ( true ) ; if ( state . backtracking == 0 ) { after ( grammarAccess . getQualifiedNameInStaticImportAccess ( ) . getGroup ( ) ) ; } } } } } catch ( RecognitionException re ) { reportError ( re ) ; recover ( input , re ) ; } finally { restoreStackSize ( stackSize ) ; } return ;
public class CmsToolManager { /** * Sets the base tool path . < p > * @ param wp the workplace object * @ param baseToolPath the base tool path to set */ public void setBaseToolPath ( CmsWorkplace wp , String baseToolPath ) { } }
// use last used base if param empty if ( CmsStringUtil . isEmpty ( baseToolPath ) || baseToolPath . trim ( ) . equals ( "null" ) ) { baseToolPath = getBaseToolPath ( wp ) ; } baseToolPath = repairPath ( wp , baseToolPath ) ; // set it CmsToolUserData userData = getUserData ( wp ) ; userData . setBaseTool ( userData . getRootKey ( ) , baseToolPath ) ;
public class ParamBuilder { /** * Called to generate code that writes the out params for array type */ protected void writeArrayOutParamsToProxy ( VariableElement param , MethodSpec . Builder methodBuilder ) { } }
methodBuilder . beginControlFlow ( "if (" + param . getSimpleName ( ) + " == null)" ) ; methodBuilder . addStatement ( "data.writeInt(-1)" ) ; methodBuilder . endControlFlow ( ) ; methodBuilder . beginControlFlow ( "else" ) ; methodBuilder . addStatement ( "data.writeInt(" + param . getSimpleName ( ) + ".length)" ) ; methodBuilder . endControlFlow ( ) ;
public class GitRepo { /** * Checks out a branch for a project . * @ param project - a Git project * @ param branch - branch to check out */ void checkout ( File project , String branch ) { } }
try { String currentBranch = currentBranch ( project ) ; if ( currentBranch . equals ( branch ) ) { log . info ( "Won't check out the same branch. Skipping" ) ; return ; } log . info ( "Checking out branch [" + branch + "]" ) ; checkoutBranch ( project , branch ) ; log . info ( "Successfully checked out the branch [" + branch + "]" ) ; } catch ( Exception e ) { throw new IllegalStateException ( e ) ; }
public class CreateDocumentationVersionRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( CreateDocumentationVersionRequest createDocumentationVersionRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( createDocumentationVersionRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( createDocumentationVersionRequest . getRestApiId ( ) , RESTAPIID_BINDING ) ; protocolMarshaller . marshall ( createDocumentationVersionRequest . getDocumentationVersion ( ) , DOCUMENTATIONVERSION_BINDING ) ; protocolMarshaller . marshall ( createDocumentationVersionRequest . getStageName ( ) , STAGENAME_BINDING ) ; protocolMarshaller . marshall ( createDocumentationVersionRequest . getDescription ( ) , DESCRIPTION_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class ContactsApi { /** * Get alliance contacts ( asynchronously ) Return contacts of an alliance - - - * This route is cached for up to 300 seconds SSO Scope : * esi - alliances . read _ contacts . v1 * @ param allianceId * An EVE alliance ID ( required ) * @ param datasource * The server name you would like data from ( optional , default to * tranquility ) * @ param ifNoneMatch * ETag from a previous request . A 304 will be returned if this * matches the current ETag ( optional ) * @ param page * Which page of results to return ( optional , default to 1) * @ param token * Access token to use if unable to set a header ( optional ) * @ param callback * The callback to be executed when the API call finishes * @ return The request call * @ throws ApiException * If fail to process the API call , e . g . serializing the request * body object */ public com . squareup . okhttp . Call getAlliancesAllianceIdContactsAsync ( Integer allianceId , String datasource , String ifNoneMatch , Integer page , String token , final ApiCallback < List < AllianceContactsResponse > > callback ) throws ApiException { } }
com . squareup . okhttp . Call call = getAlliancesAllianceIdContactsValidateBeforeCall ( allianceId , datasource , ifNoneMatch , page , token , callback ) ; Type localVarReturnType = new TypeToken < List < AllianceContactsResponse > > ( ) { } . getType ( ) ; apiClient . executeAsync ( call , localVarReturnType , callback ) ; return call ;
public class Spies { /** * Monitors calls to a ternary function . * @ param < R > the function result type * @ param < T1 > the function first parameter type * @ param < T2 > the function second parameter type * @ param < T3 > the function third parameter type * @ param function the function that will be monitored * @ param calls a value holder accumulating calls * @ return the proxied function */ public static < T1 , T2 , T3 , R > TriFunction < T1 , T2 , T3 , R > monitor ( TriFunction < T1 , T2 , T3 , R > function , AtomicLong calls ) { } }
return new TernaryMonitoringFunction < T1 , T2 , T3 , R > ( function , calls ) ;
public class Table { /** * Updates the provisioned throughput for this table . Setting the * throughput for a table helps you manage performance and is part of the * provisioned throughput feature of DynamoDB . * The provisioned throughput values can be upgraded or downgraded based * on the maximums and minimums listed in the * < a href = " http : / / docs . aws . amazon . com / amazondynamodb / latest / developerguide / Limits . html " > Limits < / a > * section in the Amazon DynamoDB Developer Guide . * This table must be in the < code > ACTIVE < / code > state for this operation * to succeed . < i > UpdateTable < / i > is an asynchronous operation ; while * executing the operation , the table is in the < code > UPDATING < / code > * state . While the table is in the < code > UPDATING < / code > state , the * table still has the provisioned throughput from before the call . The * new provisioned throughput setting is in effect only when the table * returns to the < code > ACTIVE < / code > state after the < i > UpdateTable < / i > * operation . * You can create , update or delete indexes using < i > UpdateTable < / i > . * @ param spec used to specify all the detailed parameters * @ return the updated table description returned from DynamoDB . */ public TableDescription updateTable ( UpdateTableSpec spec ) { } }
UpdateTableRequest req = spec . getRequest ( ) ; req . setTableName ( getTableName ( ) ) ; UpdateTableResult result = client . updateTable ( req ) ; return this . tableDescription = result . getTableDescription ( ) ;
public class OmsPitfiller { /** * Try to find a drainage direction for undefinite cell . * < p > If the drainage direction is found * then puts it in the dir matrix else keeps its index in is and js . * < p > N . B . in the { @ link # setDirection ( double , int , int , int [ ] [ ] , double [ ] ) } method the drainage * directions is set only if the slope between two pixel is positive . < b > At this step the dir * value is set also if the slope is equal to zero . < / b > < / p > * @ param pitsCount the number of indefinite cell in the dir matrix . * @ return the number of unresolved pixel ( still pits ) after running the method or - 1 if the process has been cancelled . */ private int resolveFlats ( int pitsCount ) { } }
int stillPitsCount ; currentPitsCount = pitsCount ; do { if ( pm . isCanceled ( ) ) { return - 1 ; } pitsCount = currentPitsCount ; currentPitsCount = 0 ; for ( int ip = 1 ; ip <= pitsCount ; ip ++ ) { dn [ ip ] = 0 ; } for ( int k = 1 ; k <= 8 ; k ++ ) { for ( int pitIndex = 1 ; pitIndex <= pitsCount ; pitIndex ++ ) { double elevDelta = pitIter . getSampleDouble ( currentPitCols [ pitIndex ] , currentPitRows [ pitIndex ] , 0 ) - pitIter . getSampleDouble ( currentPitCols [ pitIndex ] + DIR_WITHFLOW_EXITING_INVERTED [ k ] [ 0 ] , currentPitRows [ pitIndex ] + DIR_WITHFLOW_EXITING_INVERTED [ k ] [ 1 ] , 0 ) ; if ( ( elevDelta >= 0. ) && ( ( dir [ currentPitCols [ pitIndex ] + DIR_WITHFLOW_EXITING_INVERTED [ k ] [ 0 ] ] [ currentPitRows [ pitIndex ] + DIR_WITHFLOW_EXITING_INVERTED [ k ] [ 1 ] ] != 0 ) && ( dn [ pitIndex ] == 0 ) ) ) dn [ pitIndex ] = k ; } } stillPitsCount = 1 ; /* location of point on stack with lowest elevation */ for ( int pitIndex = 1 ; pitIndex <= pitsCount ; pitIndex ++ ) { if ( dn [ pitIndex ] > 0 ) { dir [ currentPitCols [ pitIndex ] ] [ currentPitRows [ pitIndex ] ] = dn [ pitIndex ] ; } else { currentPitsCount ++ ; currentPitRows [ currentPitsCount ] = currentPitRows [ pitIndex ] ; currentPitCols [ currentPitsCount ] = currentPitCols [ pitIndex ] ; if ( pitIter . getSampleDouble ( currentPitCols [ currentPitsCount ] , currentPitRows [ currentPitsCount ] , 0 ) < pitIter . getSampleDouble ( currentPitCols [ stillPitsCount ] , currentPitRows [ stillPitsCount ] , 0 ) ) stillPitsCount = currentPitsCount ; } } // out . println ( " vdn n = " + n + " nis = " + nis ) ; } while ( currentPitsCount < pitsCount ) ; return stillPitsCount ;
public class IOUtils { /** * Writes the content provided by the given source input stream into the given destination output * stream . * < p > The input stream is guaranteed to be closed at the end of this method . * < p > Sample use : * < pre > * static void copy ( InputStream inputStream , File file ) throws IOException { * FileOutputStream out = new FileOutputStream ( file ) ; * try { * IOUtils . copy ( inputStream , out ) ; * } finally { * out . close ( ) ; * < / pre > * @ param inputStream source input stream * @ param outputStream destination output stream */ public static void copy ( InputStream inputStream , OutputStream outputStream ) throws IOException { } }
copy ( inputStream , outputStream , true ) ;
public class Distance { /** * Estimates the weighted manhattan distance of two Associative Arrays . * @ param a1 * @ param a2 * @ param columnWeights * @ return */ public static double manhattanWeighted ( AssociativeArray a1 , AssociativeArray a2 , Map < Object , Double > columnWeights ) { } }
Map < Object , Double > columnDistances = columnDistances ( a1 , a2 , columnWeights . keySet ( ) ) ; double distance = 0.0 ; for ( Map . Entry < Object , Double > entry : columnDistances . entrySet ( ) ) { distance += Math . abs ( entry . getValue ( ) ) * columnWeights . get ( entry . getKey ( ) ) ; } return distance ;
public class HttpUtil { /** * Parse given query string of the form < code > name1 = value1 & amp ; name2 = value2 < / code > and return it as { @ link Map } * @ param queryQuery string * @ returnParsed results * @ throws UnsupportedEncodingException */ public static Map < String , String > queryToParams ( String query ) throws UnsupportedEncodingException { } }
Map < String , String > query_pairs = new HashMap < String , String > ( ) ; String [ ] pairs = query . split ( "&" ) ; for ( String pair : pairs ) { int idx = pair . indexOf ( "=" ) ; query_pairs . put ( URLDecoder . decode ( pair . substring ( 0 , idx ) , "UTF-8" ) , URLDecoder . decode ( pair . substring ( idx + 1 ) , "UTF-8" ) ) ; } return query_pairs ;
public class GraphPath { /** * Remove the path ' s elements before the * specified one which is starting * at the specified point . The specified element will * be removed . * < p > This function removes until the < i > last occurence < / i > * of the given object . * @ param obj is the segment to remove * @ param pt is the point on which the segment was connected * as its first point . * @ return < code > true < / code > on success , otherwise < code > false < / code > */ public boolean removeUntilLast ( ST obj , PT pt ) { } }
return removeUntil ( lastIndexOf ( obj , pt ) , true ) ;
public class AnswerFunctionalInterfaces { /** * Construct an answer from a six parameter answer interface * @ param answer answer interface * @ param < T > return type * @ param < A > input parameter 1 type * @ param < B > input parameter 2 type * @ param < C > input parameter 3 type * @ param < D > input parameter 4 type * @ param < E > input parameter 5 type * @ param < F > input parameter 6 type * @ return a new answer object */ public static < T , A , B , C , D , E , F > Answer < T > toAnswer ( final Answer6 < T , A , B , C , D , E , F > answer ) { } }
return new Answer < T > ( ) { @ SuppressWarnings ( "unchecked" ) public T answer ( InvocationOnMock invocation ) throws Throwable { return answer . answer ( ( A ) invocation . getArgument ( 0 ) , ( B ) invocation . getArgument ( 1 ) , ( C ) invocation . getArgument ( 2 ) , ( D ) invocation . getArgument ( 3 ) , ( E ) invocation . getArgument ( 4 ) , ( F ) invocation . getArgument ( 5 ) ) ; } } ;
public class CellModel { /** * Add a { @ link Formatter } which can be used to format an Object for rendering . Many * { @ link Formatter } instances can be registered and will be executed in the order in * which they were added . This method is provided as a service to CellModel subclasses ; * the use of formatters can vary based on the implementation of a { @ link CellDecorator } . * @ param formatter the { @ link Formatter } to add */ public void addFormatter ( Formatter formatter ) { } }
if ( _formatters == null ) _formatters = new ArrayList /* < Formatter > */ ( ) ; _formatters . add ( formatter ) ;
public class PathClassLoader { /** * Helper for executing static methods on a Class * @ param className String fully qualified class * @ param methodName String method name * @ param params List of method parameters * @ return Object result * @ throws ClassLoaderException exception */ public Object execStaticMethod ( String className , String methodName , List params ) throws ClassLoaderException { } }
return execStaticMethod ( className , methodName , params , null ) ;
public class Connection { /** * Getter for the Type of Connection * @ return the connection Type . */ @ AuthType int getType ( ) { } }
switch ( strategy ) { case "auth0" : return AuthType . DATABASE ; case "sms" : case "email" : return AuthType . PASSWORDLESS ; case "ad" : case "adfs" : case "auth0-adldap" : case "custom" : case "google-apps" : case "google-openid" : case "ip" : case "mscrm" : case "office365" : case "pingfederate" : case "samlp" : case "sharepoint" : case "waad" : return AuthType . ENTERPRISE ; default : return AuthType . SOCIAL ; }
public class Example03_PatientResourceProvider { /** * Create / save a new resource */ @ Create public MethodOutcome create ( @ ResourceParam Patient thePatient ) { } }
// Give the resource the next sequential ID long id = myNextId ++ ; thePatient . setId ( new IdDt ( id ) ) ; // Store the resource in memory myPatients . put ( id , thePatient ) ; // Inform the server of the ID for the newly stored resource return new MethodOutcome ( thePatient . getId ( ) ) ;
public class CSTransformer { /** * Transform a Common Content CSNode entity into a Comment object that can be added to a Content Specification . * @ param node The CSNode to be transformed . * @ return The transformed Comment object . */ protected static CommonContent transformCommonContent ( CSNodeWrapper node ) { } }
final CommonContent commonContent ; if ( node . getNodeType ( ) == CommonConstants . CS_NODE_COMMON_CONTENT ) { commonContent = new CommonContent ( node . getTitle ( ) ) ; } else { throw new IllegalArgumentException ( "The passed node is not a Comment" ) ; } commonContent . setUniqueId ( node . getId ( ) == null ? null : node . getId ( ) . toString ( ) ) ; return commonContent ;
public class Dater { /** * Returns the given beginning clock and the given ending clock date array * @ param beginClock * @ param endClock * @ return */ public Date [ ] asRange ( String beginClock , String endClock ) { } }
String thisDay = null ; return new Date [ ] { of ( ( thisDay = ( asDayText ( ) + " " ) ) + beginClock ) . get ( ) , of ( thisDay + endClock ) . get ( ) } ;
public class BoxRequestUpdateSharedItem { /** * Gets the shared link access currently set for the item in the request . * @ return shared link access for the item , or null if not set . */ public BoxSharedLink . Access getAccess ( ) { } }
return mBodyMap . containsKey ( BoxItem . FIELD_SHARED_LINK ) ? ( ( BoxSharedLink ) mBodyMap . get ( BoxItem . FIELD_SHARED_LINK ) ) . getAccess ( ) : null ;
public class FileDownloader { /** * Set the location directory where files will be downloaded to * @ param downloadDirectory The directory that the file will be downloaded to . */ private String localFilePath ( File downloadDirectory ) throws MojoFailureException { } }
if ( downloadDirectory . exists ( ) ) { if ( downloadDirectory . isDirectory ( ) ) { return downloadDirectory . getAbsolutePath ( ) ; } else { throw new MojoFailureException ( "'" + downloadDirectory . getAbsolutePath ( ) + "' is not a directory!" ) ; } } if ( downloadDirectory . mkdirs ( ) ) { return downloadDirectory . getAbsolutePath ( ) ; } else { throw new MojoFailureException ( "Unable to create download directory!" ) ; }
public class CookieUtils { /** * Build cookie generation context cookie . * @ param cookie the cookie * @ return the cookie generation context */ public static CookieGenerationContext buildCookieGenerationContext ( final TicketGrantingCookieProperties cookie ) { } }
val rememberMeMaxAge = ( int ) Beans . newDuration ( cookie . getRememberMeMaxAge ( ) ) . getSeconds ( ) ; val builder = buildCookieGenerationContextBuilder ( cookie ) ; return builder . rememberMeMaxAge ( rememberMeMaxAge ) . build ( ) ;
public class Directory { /** * Unlinks the given name from the file it is linked to . * @ throws IllegalArgumentException if { @ code name } is a reserved name such as " . " or no entry * exists for the name */ public void unlink ( Name name ) { } }
DirectoryEntry entry = remove ( checkNotReserved ( name , "unlink" ) ) ; entry . file ( ) . unlinked ( ) ;
public class LeaveOrganizationRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( LeaveOrganizationRequest leaveOrganizationRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( leaveOrganizationRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class JBBPDslBuilder { /** * Add named string field . * @ return the builder instance , must not be null */ public JBBPDslBuilder String ( final String name ) { } }
final Item item = new Item ( BinType . STRING , name , this . byteOrder ) ; this . addItem ( item ) ; return this ;
public class JShellTool { /** * Handle incoming snippet events - - return true on failure */ private boolean handleEvent ( SnippetEvent ste ) { } }
Snippet sn = ste . snippet ( ) ; if ( sn == null ) { debug ( "Event with null key: %s" , ste ) ; return false ; } List < Diag > diagnostics = state . diagnostics ( sn ) . collect ( toList ( ) ) ; String source = sn . source ( ) ; if ( ste . causeSnippet ( ) == null ) { // main event for ( Diag d : diagnostics ) { hardmsg ( d . isError ( ) ? "jshell.msg.error" : "jshell.msg.warning" ) ; List < String > disp = new ArrayList < > ( ) ; displayDiagnostics ( source , d , disp ) ; disp . stream ( ) . forEach ( l -> hard ( "%s" , l ) ) ; } if ( ste . status ( ) != Status . REJECTED ) { if ( ste . exception ( ) != null ) { if ( ste . exception ( ) instanceof EvalException ) { printEvalException ( ( EvalException ) ste . exception ( ) ) ; return true ; } else if ( ste . exception ( ) instanceof UnresolvedReferenceException ) { printUnresolvedException ( ( UnresolvedReferenceException ) ste . exception ( ) ) ; } else { hard ( "Unexpected execution exception: %s" , ste . exception ( ) ) ; return true ; } } else { new DisplayEvent ( ste , FormatWhen . PRIMARY , ste . value ( ) , diagnostics ) . displayDeclarationAndValue ( ) ; } } else { if ( diagnostics . isEmpty ( ) ) { errormsg ( "jshell.err.failed" ) ; } return true ; } } else { // Update if ( sn instanceof DeclarationSnippet ) { List < Diag > other = errorsOnly ( diagnostics ) ; // display update information new DisplayEvent ( ste , FormatWhen . UPDATE , ste . value ( ) , other ) . displayDeclarationAndValue ( ) ; } } return false ;
public class TcpConnector { /** * Called when the new socket channel has successfully been registered * with the nio dispatcher . * @ param event the event * @ throws InterruptedException the interrupted exception * @ throws IOException Signals that an I / O exception has occurred . */ @ Handler ( channels = Self . class ) public void onRegistered ( NioRegistration . Completed event ) throws InterruptedException , IOException { } }
NioHandler handler = event . event ( ) . handler ( ) ; if ( ! ( handler instanceof TcpChannelImpl ) ) { return ; } if ( event . event ( ) . get ( ) == null ) { fire ( new Error ( event , "Registration failed, no NioDispatcher?" , new Throwable ( ) ) ) ; return ; } TcpChannelImpl channel = ( TcpChannelImpl ) handler ; channel . registrationComplete ( event . event ( ) ) ; channel . downPipeline ( ) . fire ( new Connected ( channel . nioChannel ( ) . getLocalAddress ( ) , channel . nioChannel ( ) . getRemoteAddress ( ) ) , channel ) ;
public class DurationFormatUtils { /** * Parses a classic date format string into Tokens * @ param format the format to parse , not null * @ return array of Token [ ] */ static Token [ ] lexx ( final String format ) { } }
final ArrayList < Token > list = new ArrayList < > ( format . length ( ) ) ; boolean inLiteral = false ; // Although the buffer is stored in a Token , the Tokens are only // used internally , so cannot be accessed by other threads StringBuilder buffer = null ; Token previous = null ; for ( int i = 0 ; i < format . length ( ) ; i ++ ) { final char ch = format . charAt ( i ) ; if ( inLiteral && ch != '\'' ) { buffer . append ( ch ) ; // buffer can ' t be null if inLiteral is true continue ; } Object value = null ; switch ( ch ) { // TODO : Need to handle escaping of ' case '\'' : if ( inLiteral ) { buffer = null ; inLiteral = false ; } else { buffer = new StringBuilder ( ) ; list . add ( new Token ( buffer ) ) ; inLiteral = true ; } break ; case 'y' : value = y ; break ; case 'M' : value = M ; break ; case 'd' : value = d ; break ; case 'H' : value = H ; break ; case 'm' : value = m ; break ; case 's' : value = s ; break ; case 'S' : value = S ; break ; default : if ( buffer == null ) { buffer = new StringBuilder ( ) ; list . add ( new Token ( buffer ) ) ; } buffer . append ( ch ) ; } if ( value != null ) { if ( previous != null && previous . getValue ( ) . equals ( value ) ) { previous . increment ( ) ; } else { final Token token = new Token ( value ) ; list . add ( token ) ; previous = token ; } buffer = null ; } } if ( inLiteral ) { // i . e . we have not found the end of the literal throw new IllegalArgumentException ( "Unmatched quote in format: " + format ) ; } return list . toArray ( new Token [ list . size ( ) ] ) ;
public class BufferUtils { /** * Convert buffer to a Hex Summary String . * @ param buffer the buffer to generate a hex byte summary from * @ return A string showing a summary of the content in hex */ public static String toHexSummary ( ByteBuffer buffer ) { } }
if ( buffer == null ) return "null" ; StringBuilder buf = new StringBuilder ( ) ; buf . append ( "b[" ) . append ( buffer . remaining ( ) ) . append ( "]=" ) ; for ( int i = buffer . position ( ) ; i < buffer . limit ( ) ; i ++ ) { TypeUtils . toHex ( buffer . get ( i ) , buf ) ; if ( i == buffer . position ( ) + 24 && buffer . limit ( ) > buffer . position ( ) + 32 ) { buf . append ( "..." ) ; i = buffer . limit ( ) - 8 ; } } return buf . toString ( ) ;
public class PackageDeclaration { /** * Sets the name of this package declaration . * @ param name * the name to set */ public void setName ( NameExpr name ) { } }
if ( this . name != null ) { updateReferences ( this . name ) ; } this . name = name ; setAsParentNodeOf ( name ) ;
public class PortletFilterUtils { /** * Call doFilter and use the { @ link javax . portlet . PortletRequest # LIFECYCLE _ PHASE } attribute to figure out what * type of request / response are in use and call the appropriate doFilter method on { @ link javax . portlet . filter . FilterChain } * @ param request a { @ link javax . portlet . PortletRequest } object . * @ param response a { @ link javax . portlet . PortletResponse } object . * @ param chain a { @ link javax . portlet . filter . FilterChain } object . * @ throws java . io . IOException if any . * @ throws javax . portlet . PortletException if any . */ public static void doFilter ( PortletRequest request , PortletResponse response , FilterChain chain ) throws IOException , PortletException { } }
final Object phase = request . getAttribute ( PortletRequest . LIFECYCLE_PHASE ) ; if ( PortletRequest . ACTION_PHASE . equals ( phase ) ) { chain . doFilter ( ( ActionRequest ) request , ( ActionResponse ) response ) ; } else if ( PortletRequest . EVENT_PHASE . equals ( phase ) ) { chain . doFilter ( ( EventRequest ) request , ( EventResponse ) response ) ; } else if ( PortletRequest . RENDER_PHASE . equals ( phase ) ) { chain . doFilter ( ( RenderRequest ) request , ( RenderResponse ) response ) ; } else if ( PortletRequest . RESOURCE_PHASE . equals ( phase ) ) { chain . doFilter ( ( ResourceRequest ) request , ( ResourceResponse ) response ) ; } else { throw new IllegalArgumentException ( "Unknown Portlet Lifecycle Phase: " + phase ) ; }
public class MathContext { /** * Get the smallest of the set of float values . * @ param values the set of values * @ return The greatest of the values */ public float min ( float ... values ) { } }
float min = Float . MAX_VALUE ; for ( int i = 0 ; i < values . length ; i ++ ) { if ( i == 0 || values [ i ] < min ) { min = values [ i ] ; } } return min ;
public class EmbeddedLeaderService { /** * Callback from leader contenders when they stop their service . */ private void removeContender ( EmbeddedLeaderElectionService service ) { } }
synchronized ( lock ) { // if the service was not even started , simply do nothing if ( ! service . running || shutdown ) { return ; } try { if ( ! allLeaderContenders . remove ( service ) ) { throw new IllegalStateException ( "leader election service does not belong to this service" ) ; } // stop the service service . contender = null ; service . running = false ; service . isLeader = false ; // if that was the current leader , unset its status if ( currentLeaderConfirmed == service ) { currentLeaderConfirmed = null ; currentLeaderSessionId = null ; currentLeaderAddress = null ; } if ( currentLeaderProposed == service ) { currentLeaderProposed = null ; currentLeaderSessionId = null ; } updateLeader ( ) . whenComplete ( ( aVoid , throwable ) -> { if ( throwable != null ) { fatalError ( throwable ) ; } } ) ; } catch ( Throwable t ) { fatalError ( t ) ; } }
public class Base64Utils { /** * Decode a base64 string into a byte array . * @ param data the encoded data . * @ return a byte array . * @ see # fromBase64 ( String ) */ public static byte [ ] fromBase64 ( String data ) { } }
if ( data == null ) { return null ; } int len = data . length ( ) ; assert ( len % 4 ) == 0 ; if ( len == 0 ) { return new byte [ 0 ] ; } char [ ] chars = new char [ len ] ; data . getChars ( 0 , len , chars , 0 ) ; int olen = 3 * ( len / 4 ) ; if ( chars [ len - 2 ] == '=' ) { -- olen ; } if ( chars [ len - 1 ] == '=' ) { -- olen ; } byte [ ] bytes = new byte [ olen ] ; int iidx = 0 ; int oidx = 0 ; while ( iidx < len ) { int c0 = base64Values [ chars [ iidx ++ ] & 0xff ] ; int c1 = base64Values [ chars [ iidx ++ ] & 0xff ] ; int c2 = base64Values [ chars [ iidx ++ ] & 0xff ] ; int c3 = base64Values [ chars [ iidx ++ ] & 0xff ] ; int c24 = ( c0 << 18 ) | ( c1 << 12 ) | ( c2 << 6 ) | c3 ; bytes [ oidx ++ ] = ( byte ) ( c24 >> 16 ) ; if ( oidx == olen ) { break ; } bytes [ oidx ++ ] = ( byte ) ( c24 >> 8 ) ; if ( oidx == olen ) { break ; } bytes [ oidx ++ ] = ( byte ) c24 ; } return bytes ;
public class PluginMonitoringFilter { /** * { @ inheritDoc } */ @ Override public void doFilter ( ServletRequest request , ServletResponse response , FilterChain chain ) throws IOException , ServletException { } }
if ( ! ( request instanceof HttpServletRequest ) ) { super . doFilter ( request , response , chain ) ; return ; } final HttpServletRequest httpRequest = ( HttpServletRequest ) request ; registerSessionIfNeeded ( httpRequest ) ; super . doFilter ( request , response , chain ) ; // si logout on prend en compte de suite la destruction de la session unregisterSessionIfNeeded ( httpRequest ) ;
public class AlertWindow { /** * Dismiss the */ public void dismiss ( ) { } }
if ( ! isShowing ) { Log . w ( "AlertWindow" , "AlertWindow is not displayed." ) ; } else if ( mContentView != null ) { isShowing = false ; mWindowManager . removeViewImmediate ( mContentView ) ; }
public class AdminJoblogAction { private HtmlResponse asListHtml ( ) { } }
return asHtml ( path_AdminJoblog_AdminJoblogJsp ) . renderWith ( data -> { RenderDataUtil . register ( data , "jobLogItems" , jobLogService . getJobLogList ( jobLogPager ) ) ; // page navi } ) . useForm ( SearchForm . class , setup -> { setup . setup ( form -> { copyBeanToBean ( jobLogPager , form , op -> op . include ( "id" ) ) ; } ) ; } ) ;
public class UIUtils { /** * to get nimbus client , we should reset ZK config */ public static Map resetZKConfig ( Map conf , String clusterName ) { } }
ClusterConfig nimbus = clusterConfig . get ( clusterName ) ; if ( nimbus == null ) return conf ; conf . put ( Config . STORM_ZOOKEEPER_ROOT , nimbus . getZkRoot ( ) ) ; conf . put ( Config . STORM_ZOOKEEPER_SERVERS , nimbus . getZkServers ( ) ) ; conf . put ( Config . STORM_ZOOKEEPER_PORT , nimbus . getZkPort ( ) ) ; return conf ;
public class ConfigValueHelper { /** * 检查字符串是否是正常值 , 不是则抛出异常 * @ param configKey 配置项 * @ param configValue 配置值 * @ throws SofaRpcRuntimeException 非法异常 */ protected static void checkNormal ( String configKey , String configValue ) throws SofaRpcRuntimeException { } }
checkPattern ( configKey , configValue , NORMAL , "only allow a-zA-Z0-9 '-' '_' '.'" ) ;
public class DfState { /** * Primary match function , if this function returns true the algorithm * has found a match . Calling it again will backtrack and find the next * match . * @ return a mapping was found */ boolean matchNext ( ) { } }
if ( numAtoms == 0 ) return false ; if ( sptr > 1 ) backtrack ( ) ; main : while ( sptr != 0 ) { final int bidx = currBondIdx ( ) ; if ( bidx == numBonds ) { // done if ( numMapped == numAtoms ) return true ; // handle disconnected atoms for ( IAtom qatom : query . atoms ( ) ) { if ( amap [ qatom . getIndex ( ) ] == UNMAPPED ) { Iterator < IAtom > iter = atoms ( ) ; while ( iter . hasNext ( ) ) { IAtom atom = iter . next ( ) ; if ( feasible ( bidx , ( IQueryAtom ) qatom , atom ) ) continue main ; } break ; } } backtrack ( ) ; continue ; } IQueryBond qbond = qbonds [ bidx ] ; IQueryAtom qbeg = ( IQueryAtom ) qbond . getBegin ( ) ; IQueryAtom qend = ( IQueryAtom ) qbond . getEnd ( ) ; int begIdx = amap [ qbeg . getIndex ( ) ] ; int endIdx = amap [ qend . getIndex ( ) ] ; // both atoms matched , there must be a bond between them if ( begIdx != UNMAPPED && endIdx != UNMAPPED ) { IBond bond = mol . getAtom ( begIdx ) . getBond ( mol . getAtom ( endIdx ) ) ; if ( feasible ( qbond , bond ) ) continue ; } // ' beg ' is mapped , find a feasible ' end ' from it ' s neighbor list else if ( begIdx != UNMAPPED ) { IAtom beg = mol . getAtom ( begIdx ) ; Iterator < IBond > biter = bonds ( beg ) ; while ( biter . hasNext ( ) ) { IBond bond = biter . next ( ) ; IAtom end = bond . getOther ( beg ) ; if ( qbond . matches ( bond ) && feasible ( bidx + 1 , qend , end ) ) continue main ; } } // ' end ' is mapped , find a feasible ' beg ' from it ' s neighbor list else if ( endIdx != UNMAPPED ) { IAtom end = mol . getAtom ( endIdx ) ; Iterator < IBond > biter = bonds ( end ) ; while ( biter . hasNext ( ) ) { IBond bond = biter . next ( ) ; IAtom beg = bond . getOther ( end ) ; if ( qbond . matches ( bond ) && feasible ( bidx + 1 , qbeg , beg ) ) continue main ; } } // ' beg ' nor ' end ' matched , find a feasible mapping from // any atom in the molecule else { Iterator < IAtom > aiter = atoms ( ) ; while ( aiter . hasNext ( ) ) { if ( feasible ( bidx , qbeg , aiter . next ( ) ) ) continue main ; } } backtrack ( ) ; } return false ;
public class MongoServer { /** * Closes the server socket . No new clients are accepted afterwards . */ public void stopListenting ( ) { } }
if ( channel != null ) { log . info ( "closing server channel" ) ; channel . close ( ) . syncUninterruptibly ( ) ; channel = null ; }
public class JsonServiceDocumentWriter { /** * Writes the name of the entity * It is a MUST element . * @ param jsonGenerator jsonGenerator * @ param entity entity from the container */ private void writeName ( JsonGenerator jsonGenerator , Object entity ) throws IOException { } }
jsonGenerator . writeFieldName ( NAME ) ; if ( entity instanceof EntitySet ) { jsonGenerator . writeObject ( ( ( EntitySet ) entity ) . getName ( ) ) ; } else { jsonGenerator . writeObject ( ( ( Singleton ) entity ) . getName ( ) ) ; }
public class DivOpAxis { /** * { @ inheritDoc } */ @ Override protected Type getReturnType ( final int mOp1 , final int mOp2 ) throws TTXPathException { } }
Type type1 ; Type type2 ; try { type1 = Type . getType ( mOp1 ) . getPrimitiveBaseType ( ) ; type2 = Type . getType ( mOp2 ) . getPrimitiveBaseType ( ) ; } catch ( final IllegalStateException e ) { throw new XPathError ( ErrorType . XPTY0004 ) ; } if ( type1 . isNumericType ( ) && type2 . isNumericType ( ) ) { // if both have the same numeric type , return it if ( type1 == type2 ) { return type1 ; } if ( type1 == Type . DOUBLE || type2 == Type . DOUBLE ) { return Type . DOUBLE ; } else if ( type1 == Type . FLOAT || type2 == Type . FLOAT ) { return Type . FLOAT ; } else { assert ( type1 == Type . DECIMAL || type2 == Type . DECIMAL ) ; return Type . DECIMAL ; } } else { switch ( type1 ) { case YEAR_MONTH_DURATION : if ( type2 == Type . YEAR_MONTH_DURATION ) { return Type . DECIMAL ; } if ( type2 . isNumericType ( ) ) { return type1 ; } break ; case DAY_TIME_DURATION : if ( type2 == Type . DAY_TIME_DURATION ) { return Type . DECIMAL ; } if ( type2 . isNumericType ( ) ) { return type1 ; } break ; default : throw new XPathError ( ErrorType . XPTY0004 ) ; } throw new XPathError ( ErrorType . XPTY0004 ) ; }
public class AnalysisJobBuilder { /** * Creates a filter job builder like the incoming filter job . Note that * input ( columns and requirements ) will not be mapped since these depend on * the context of the { @ link FilterJob } and may not be matched in the * { @ link AnalysisJobBuilder } . * @ param filterJob * @ return the builder object for the specific component */ protected Object addComponent ( ComponentJob componentJob ) { } }
final AbstractBeanJobBuilder < ? , ? , ? > builder ; if ( componentJob instanceof FilterJob ) { builder = addFilter ( ( FilterBeanDescriptor < ? , ? > ) componentJob . getDescriptor ( ) ) ; } else if ( componentJob instanceof TransformerJob ) { builder = addTransformer ( ( TransformerBeanDescriptor < ? > ) componentJob . getDescriptor ( ) ) ; } else if ( componentJob instanceof AnalyzerJob ) { builder = addAnalyzer ( ( AnalyzerBeanDescriptor < ? > ) componentJob . getDescriptor ( ) ) ; } else { throw new UnsupportedOperationException ( "Unknown component job type: " + componentJob ) ; } builder . setName ( componentJob . getName ( ) ) ; if ( componentJob instanceof ConfigurableBeanJob < ? > ) { ConfigurableBeanJob < ? > configurableBeanJob = ( ConfigurableBeanJob < ? > ) componentJob ; builder . setConfiguredProperties ( configurableBeanJob . getConfiguration ( ) ) ; } if ( componentJob instanceof InputColumnSourceJob ) { InputColumn < ? > [ ] output = ( ( InputColumnSourceJob ) componentJob ) . getOutput ( ) ; TransformerJobBuilder < ? > transformerJobBuilder = ( TransformerJobBuilder < ? > ) builder ; List < MutableInputColumn < ? > > outputColumns = transformerJobBuilder . getOutputColumns ( ) ; assert output . length == outputColumns . size ( ) ; for ( int i = 0 ; i < output . length ; i ++ ) { MutableInputColumn < ? > mutableOutputColumn = outputColumns . get ( i ) ; mutableOutputColumn . setName ( output [ i ] . getName ( ) ) ; } } return builder ;
public class PluginConfigSupport { /** * Returns an array of all of the Plugins . * @ return the array of Plugins . */ public Plugin [ ] getPlugins ( ) { } }
if ( mPluginContext != null ) { Collection c = mPluginContext . getPlugins ( ) . values ( ) ; if ( c != null ) { return ( Plugin [ ] ) c . toArray ( new Plugin [ c . size ( ) ] ) ; } } return new Plugin [ 0 ] ;
public class MapStoredSessionProviderService { /** * ( non - Javadoc ) * @ see * org . exoplatform . services . jcr . ext . app . SessionProviderService # getSessionProvider ( java . lang . Object */ public SessionProvider getSessionProvider ( Object key ) { } }
if ( providers . containsKey ( key ) ) { return providers . get ( key ) ; } else { throw new IllegalArgumentException ( "SessionProvider is not initialized" ) ; }
public class MetricsSystem { /** * Resets all the counters to 0 for testing . */ public static void resetAllCounters ( ) { } }
for ( Map . Entry < String , Counter > entry : METRIC_REGISTRY . getCounters ( ) . entrySet ( ) ) { entry . getValue ( ) . dec ( entry . getValue ( ) . getCount ( ) ) ; }
public class GoogleMapShape { /** * Set the z index * @ param zIndex z index * @ since 2.0.1 */ public void setZIndex ( float zIndex ) { } }
switch ( shapeType ) { case MARKER_OPTIONS : ( ( MarkerOptions ) shape ) . zIndex ( zIndex ) ; break ; case POLYLINE_OPTIONS : ( ( PolylineOptions ) shape ) . zIndex ( zIndex ) ; break ; case POLYGON_OPTIONS : ( ( PolygonOptions ) shape ) . zIndex ( zIndex ) ; break ; case MULTI_POLYLINE_OPTIONS : ( ( MultiPolylineOptions ) shape ) . zIndex ( zIndex ) ; break ; case MULTI_POLYGON_OPTIONS : ( ( MultiPolygonOptions ) shape ) . zIndex ( zIndex ) ; break ; case MARKER : ( ( Marker ) shape ) . setZIndex ( zIndex ) ; break ; case POLYGON : ( ( Polygon ) shape ) . setZIndex ( zIndex ) ; break ; case POLYLINE : ( ( Polyline ) shape ) . setZIndex ( zIndex ) ; break ; case MULTI_MARKER : ( ( MultiMarker ) shape ) . setZIndex ( zIndex ) ; break ; case MULTI_POLYLINE : ( ( MultiPolyline ) shape ) . setZIndex ( zIndex ) ; break ; case MULTI_POLYGON : ( ( MultiPolygon ) shape ) . setZIndex ( zIndex ) ; break ; case POLYLINE_MARKERS : ( ( PolylineMarkers ) shape ) . setZIndex ( zIndex ) ; break ; case POLYGON_MARKERS : ( ( PolygonMarkers ) shape ) . setZIndex ( zIndex ) ; break ; case MULTI_POLYLINE_MARKERS : ( ( MultiPolylineMarkers ) shape ) . setZIndex ( zIndex ) ; break ; case MULTI_POLYGON_MARKERS : ( ( MultiPolygonMarkers ) shape ) . setZIndex ( zIndex ) ; break ; case COLLECTION : @ SuppressWarnings ( "unchecked" ) List < GoogleMapShape > shapeList = ( List < GoogleMapShape > ) shape ; for ( GoogleMapShape shapeListItem : shapeList ) { shapeListItem . setZIndex ( zIndex ) ; } break ; default : }
public class CPDefinitionSpecificationOptionValueLocalServiceBaseImpl { /** * Returns the cp definition specification option value matching the UUID and group . * @ param uuid the cp definition specification option value ' s UUID * @ param groupId the primary key of the group * @ return the matching cp definition specification option value * @ throws PortalException if a matching cp definition specification option value could not be found */ @ Override public CPDefinitionSpecificationOptionValue getCPDefinitionSpecificationOptionValueByUuidAndGroupId ( String uuid , long groupId ) throws PortalException { } }
return cpDefinitionSpecificationOptionValuePersistence . findByUUID_G ( uuid , groupId ) ;
public class TextBox { /** * Create a new box from the same DOM node in the same context * @ return the new TextBox */ public TextBox copyTextBox ( ) { } }
TextBox ret = new TextBox ( textNode , g , ctx ) ; ret . copyValues ( this ) ; return ret ;
public class MediaApi { /** * Switch to monitor * Switch to the monitor mode for the specified chat . The supervisor can & # 39 ; t send messages in this mode and only another supervisor can see that the monitoring supervisor joined the chat . * @ param mediatype The media channel . ( required ) * @ param id The ID of the chat interaction . ( required ) * @ param mediaSwicthToCoachData Request parameters . ( optional ) * @ return ApiSuccessResponse * @ throws ApiException If fail to call the API , e . g . server error or cannot deserialize the response body */ public ApiSuccessResponse mediaSwicthToMonitor ( String mediatype , String id , MediaSwicthToCoachData2 mediaSwicthToCoachData ) throws ApiException { } }
ApiResponse < ApiSuccessResponse > resp = mediaSwicthToMonitorWithHttpInfo ( mediatype , id , mediaSwicthToCoachData ) ; return resp . getData ( ) ;
public class SecondaryStatusTransitionMarshaller { /** * Marshall the given parameter object . */ public void marshall ( SecondaryStatusTransition secondaryStatusTransition , ProtocolMarshaller protocolMarshaller ) { } }
if ( secondaryStatusTransition == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( secondaryStatusTransition . getStatus ( ) , STATUS_BINDING ) ; protocolMarshaller . marshall ( secondaryStatusTransition . getStartTime ( ) , STARTTIME_BINDING ) ; protocolMarshaller . marshall ( secondaryStatusTransition . getEndTime ( ) , ENDTIME_BINDING ) ; protocolMarshaller . marshall ( secondaryStatusTransition . getStatusMessage ( ) , STATUSMESSAGE_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class Reflection { /** * Utility method kept for backwards compatibility . Annotation checking used to be problematic on GWT . * @ param field might be annotated . Can be null . * @ param annotationType class of the annotation that the field is checked against . * @ return true if field is annotated with the specified annotation . */ public static boolean isAnnotationPresent ( final Field field , final Class < ? extends Annotation > annotationType ) { } }
return field != null && field . isAnnotationPresent ( annotationType ) ;
public class AbstractBpmnModelElementBuilder { /** * Finishes the building of an embedded sub - process . * @ return the parent sub - process builder * @ throws BpmnModelException if no parent sub - process can be found */ public SubProcessBuilder subProcessDone ( ) { } }
BpmnModelElementInstance lastSubProcess = element . getScope ( ) ; if ( lastSubProcess != null && lastSubProcess instanceof SubProcess ) { return ( ( SubProcess ) lastSubProcess ) . builder ( ) ; } else { throw new BpmnModelException ( "Unable to find a parent subProcess." ) ; }
public class DividedDateTimeField { /** * Get the amount of scaled units from the specified time instant . * @ param instant the time instant in millis to query . * @ return the amount of scaled units extracted from the input . */ public int get ( long instant ) { } }
int value = getWrappedField ( ) . get ( instant ) ; if ( value >= 0 ) { return value / iDivisor ; } else { return ( ( value + 1 ) / iDivisor ) - 1 ; }
public class CommandSupport { /** * Scans classpath to find commands and register them . * Commands are instantiated using default constructor , but { @ link io . dropwizard . cli . EnvironmentCommand } * must have constructor with { @ link io . dropwizard . Application } argument . * @ param bootstrap bootstrap object * @ param scanner configured scanner instance * @ param context configuration context * @ return list of installed commands */ public static List < Command > registerCommands ( final Bootstrap bootstrap , final ClasspathScanner scanner , final ConfigurationContext context ) { } }
final Stopwatch timer = context . stat ( ) . timer ( CommandTime ) ; final CommandClassVisitor visitor = new CommandClassVisitor ( bootstrap ) ; scanner . scan ( visitor ) ; context . registerCommands ( visitor . getCommands ( ) ) ; timer . stop ( ) ; return visitor . getCommandList ( ) ;
public class ShowHelpDialogAction { /** * { @ inheritDoc } */ @ Override public void actionPerformed ( final ActionEvent e ) { } }
helpWindow . setLocationRelativeTo ( null ) ; try { UIManager . setLookAndFeel ( this . lookAndFeels . getLookAndFeelName ( ) ) ; } catch ( final Exception ex ) { log . log ( Level . SEVERE , ex . getLocalizedMessage ( ) , ex ) ; } SwingUtilities . updateComponentTreeUI ( helpWindow ) ;
public class AzureAsyncOperation { /** * Creates AzureAsyncOperation from the given HTTP response . * @ param serializerAdapter the adapter to use for deserialization * @ param response the response * @ return the async operation object * @ throws CloudException if the deserialization fails or response contains invalid body */ static AzureAsyncOperation fromResponse ( SerializerAdapter < ? > serializerAdapter , Response < ResponseBody > response ) throws CloudException { } }
AzureAsyncOperation asyncOperation = null ; String rawString = null ; if ( response . body ( ) != null ) { try { rawString = response . body ( ) . string ( ) ; asyncOperation = serializerAdapter . deserialize ( rawString , AzureAsyncOperation . class ) ; } catch ( IOException exception ) { // Exception will be handled below } finally { response . body ( ) . close ( ) ; } } if ( asyncOperation == null || asyncOperation . status ( ) == null ) { throw new CloudException ( "polling response does not contain a valid body: " + rawString , response ) ; } else { asyncOperation . rawString = rawString ; } return asyncOperation ;
public class RedisJobStore { /** * Store the given < code > { @ link org . quartz . Calendar } < / code > . * @ param name The name of the calendar * @ param calendar The < code > Calendar < / code > to be stored . * @ param replaceExisting If < code > true < / code > , any < code > Calendar < / code > existing * in the < code > JobStore < / code > with the same name & group * should be over - written . * @ param updateTriggers If < code > true < / code > , any < code > Trigger < / code > s existing * in the < code > JobStore < / code > that reference an existing * Calendar with the same name with have their next fire time * re - computed with the new < code > Calendar < / code > . * @ throws org . quartz . ObjectAlreadyExistsException if a < code > Calendar < / code > with the same name already * exists , and replaceExisting is set to false . */ @ Override public void storeCalendar ( final String name , final Calendar calendar , final boolean replaceExisting , final boolean updateTriggers ) throws ObjectAlreadyExistsException , JobPersistenceException { } }
doWithLock ( new LockCallbackWithoutResult ( ) { @ Override public Void doWithLock ( JedisCommands jedis ) throws JobPersistenceException { storage . storeCalendar ( name , calendar , replaceExisting , updateTriggers , jedis ) ; return null ; } } , "Could not store calendar." ) ;
public class HashtableOnDisk { /** * This preps the reusable input buffer for reading object headers to reduce * the number of physical reads necessary to read an object from disk . */ private void initReadBuffer ( long seek ) throws IOException { } }
if ( headerinbuf == null ) { int buflen = DWORDSIZE + // room for next SWORDSIZE + // room for hash DWORDSIZE + // room for old format : last update // new format : validator expiration time ( VET ) DWORDSIZE + // room for old format : last reference // new format : H - WORD - unused ; L - WORD - hashcode for cache value DWORDSIZE + // room for first creation DWORDSIZE + // room for expiration DWORDSIZE + // room for old format : grace // new format : magic , version & data size SWORDSIZE ; // room for key size headerinbuf = new byte [ buflen ] ; headerinbytestream = new ByteArrayInputStream ( headerinbuf ) ; headerin = new DataInputStream ( headerinbytestream ) ; } filemgr . seek ( seek ) ; filemgr . read ( headerinbuf ) ; headerinbytestream . reset ( ) ;
public class Main { /** * Main entry point . Creates a debugger attached to a Rhino * { @ link org . mozilla . javascript . tools . shell . Main } shell session . */ public static void main ( String [ ] args ) { } }
Main main = new Main ( "Rhino JavaScript Debugger" ) ; main . doBreak ( ) ; main . setExitAction ( new IProxy ( IProxy . EXIT_ACTION ) ) ; System . setIn ( main . getIn ( ) ) ; System . setOut ( main . getOut ( ) ) ; System . setErr ( main . getErr ( ) ) ; Global global = org . mozilla . javascript . tools . shell . Main . getGlobal ( ) ; global . setIn ( main . getIn ( ) ) ; global . setOut ( main . getOut ( ) ) ; global . setErr ( main . getErr ( ) ) ; main . attachTo ( org . mozilla . javascript . tools . shell . Main . shellContextFactory ) ; main . setScope ( global ) ; main . pack ( ) ; main . setSize ( 600 , 460 ) ; main . setVisible ( true ) ; org . mozilla . javascript . tools . shell . Main . exec ( args ) ;
public class DBCleanerTool { /** * Executes SQL scripts for finishing clean operations if needed . * It can be adding indexes , constraints , removing temporary objects etc * ( related to specific database ) or does nothing . * < br > * This method does not invoke commit or rollback on { @ link Connection } but * needed autocommit mode can be set . * @ throws DBCleanException */ public void commit ( ) throws DBCleanException { } }
try { execute ( committingScripts ) ; } catch ( SQLException e ) { throw new DBCleanException ( JDBCUtils . getFullMessage ( e ) , e ) ; }
public class CommandRunner { /** * Executes < code > command < / code > and returns an execution wrapper that * provides safe access to and management of the underlying streams of data . * @ param command * The command to execute * @ return An execution wrapper that allows you to process the streams * @ throws JSchException * If ssh execution fails * @ throws IOException * If unable to read the result data */ public ChannelExecWrapper open ( String command ) throws JSchException , IOException { } }
logger . debug ( "executing {} on {}" , command , sessionManager ) ; return new ChannelExecWrapper ( sessionManager . getSession ( ) , command , null , null , null ) ;
public class HighLevelEncoder { /** * the " result " list . */ private void updateStateForChar ( State state , int index , Collection < State > result ) { } }
char ch = ( char ) ( text [ index ] & 0xFF ) ; boolean charInCurrentTable = CHAR_MAP [ state . getMode ( ) ] [ ch ] > 0 ; State stateNoBinary = null ; for ( int mode = 0 ; mode <= MODE_PUNCT ; mode ++ ) { int charInMode = CHAR_MAP [ mode ] [ ch ] ; if ( charInMode > 0 ) { if ( stateNoBinary == null ) { // Only create stateNoBinary the first time it ' s required . stateNoBinary = state . endBinaryShift ( index ) ; } // Try generating the character by latching to its mode if ( ! charInCurrentTable || mode == state . getMode ( ) || mode == MODE_DIGIT ) { // If the character is in the current table , we don ' t want to latch to // any other mode except possibly digit ( which uses only 4 bits ) . Any // other latch would be equally successful * after * this character , and // so wouldn ' t save any bits . State latchState = stateNoBinary . latchAndAppend ( mode , charInMode ) ; result . add ( latchState ) ; } // Try generating the character by switching to its mode . if ( ! charInCurrentTable && SHIFT_TABLE [ state . getMode ( ) ] [ mode ] >= 0 ) { // It never makes sense to temporarily shift to another mode if the // character exists in the current mode . That can never save bits . State shiftState = stateNoBinary . shiftAndAppend ( mode , charInMode ) ; result . add ( shiftState ) ; } } } if ( state . getBinaryShiftByteCount ( ) > 0 || CHAR_MAP [ state . getMode ( ) ] [ ch ] == 0 ) { // It ' s never worthwhile to go into binary shift mode if you ' re not already // in binary shift mode , and the character exists in your current mode . // That can never save bits over just outputting the char in the current mode . State binaryState = state . addBinaryShiftChar ( index ) ; result . add ( binaryState ) ; }
public class ThriftServer { /** * Runs the example with given { @ code args } . * @ param args the argument list */ public static void main ( String [ ] args ) throws TimeoutException , InterruptedException { } }
// # thriftserverapi Hello . FutureIface impl = new HelloImpl ( ) ; ListeningServer server = Thrift . server ( ) . serveIface ( "localhost:8080" , impl ) ; Await . ready ( server ) ; // # thriftserverapi
public class AbstractGelfTransport { /** * { @ inheritDoc } * < p > This implementation is backed by a { @ link java . util . concurrent . BlockingQueue } . When this method returns the * message has been added to the { @ link java . util . concurrent . BlockingQueue } but has not been sent to the remote * host yet . < / p > * @ param message message to send to the remote host * @ return true if the message could be dispatched , false otherwise */ @ Override public boolean trySend ( final GelfMessage message ) { } }
LOG . debug ( "Trying to send message: {}" , message ) ; return queue . offer ( message ) ;
public class FileUtilities { /** * Save the data , represented as a byte array to a file * @ param file The location / name of the file to be saved . * @ param fileContents The data that is to be written to the file . * @ throws IOException */ public static void saveFile ( final File file , byte [ ] fileContents ) throws IOException { } }
if ( file . isDirectory ( ) ) { throw new IOException ( "Unable to save file contents as a directory." ) ; } final FileOutputStream fos = new FileOutputStream ( file ) ; fos . write ( fileContents ) ; fos . flush ( ) ; fos . close ( ) ;
public class DescribeNetworkInterfacesAction { /** * Describes one or more of your network interfaces . * @ param endpoint Optional - Endpoint to which request will be sent . * Default : " https : / / ec2 . amazonaws . com " * @ param identity ID of the secret access key associated with your Amazon AWS or * IAM account . * Example : " AKIAIOSFODNN7EXAMPLE " * @ param credential Secret access key associated with your Amazon AWS or IAM account . * Example : " wJalrXUtnFEMI / K7MDENG / bPxRfiCYEXAMPLEKEY " * @ param proxyHost Optional - proxy server used to connect to Amazon API . If empty no * proxy will be used . * @ param proxyPort Optional - proxy server port . You must either specify values for both * proxyHost and proxyPort inputs or leave them both empty . * @ param proxyUsername Optional - proxy server user name . * Default : " " * @ param proxyPassword Optional - proxy server password associated with the proxyUsername * input value . * @ param version Optional - Version of the web service to made the call against it . * Example : " 2016-11-15" * Default : " 2016-11-15" * @ param headers Optional - string containing the headers to use for the request * separated by new line ( CRLF ) . The header name - value pair will be * separated by " : " * Format : Conforming with HTTP standard for headers ( RFC 2616) * Examples : " Accept : text / plain " * Default : " " * @ param queryParams Optional - string containing query parameters that will be appended * to the URL . The names and the values must not be URL encoded because * if they are encoded then a double encoded will occur . The separator * between name - value pairs is " & " symbol . The query name will be * separated from query value by " = " * Examples : " parameterName1 = parameterValue1 & parameterName2 = parameterValue2" * Default : " " * @ param delimiter Optional - Delimiter that will be used . * @ param filterAddressesPrivateIpAddress Optional - The private IPv4 addresses associated with the network * interface . * @ param filterAddressesPrimary Optional - Whether the private IPv4 address is the primary IP address * associated with the network interface . * @ param filterAddressesAssociationPublicIp Optional - The association ID returned when the network interface was * associated with the Elastic IP address ( IPv4 ) . * @ param filterAddressesAssociationOwnerId Optional - The owner ID of the addresses associated with the network * interface . * @ param filterAssociationAssociationId Optional - The association ID returned when the network interface * was associated with an IPv4 address . * @ param filterAssociationAllocationId Optional - The allocation ID returned when you allocated the Elastic * IP address ( IPv4 ) for your network interface . * @ param filterAssociationIpOwnerId Optional - The owner of the Elastic IP address ( IPv4 ) associated * with the network interface . * @ param filterAssociationPublicIp Optional - The address of the Elastic IP address ( IPv4 ) bound to the * network interface . * @ param filterAssociationPublicDnsName Optional - The public DNS name for the network interface ( IPv4 ) . * @ param filterAttachmentAttachmentId Optional - The ID of the interface attachment . * @ param filterAttachmentAttachTime Optional - The time that the network interface was attached to an * instance . * @ param filterAttachmentDeleteOnTermination Optional - Indicates whether the attachment is deleted when an * instance is terminated . * @ param filterAttachmentDeviceIndex Optional - The device index to which the network interface is attached . * @ param filterAttachmentInstanceId Optional - The ID of the instance to which the network interface is * attached . * @ param filterAttachmentInstanceOwnerId Optional - The owner ID of the instance to which the network * interface is attached . * @ param filterAttachmentNatGatewayId Optional - The ID of the NAT gateway to which the network interface * is attached . * @ param filterAttachmentStatus Optional - The status of the attachment . * Valid values : attaching , attached , detaching , detached . * @ param filterAvailabilityZone Optional - The Availability Zone of the network interface . * @ param filterDescription Optional - The description of the network interface . * @ param filterGroupId Optional - The ID of a security group associated with the network * interface . * @ param filterGroupName Optional - The name of a security group associated with the network * interface . * @ param filterIpv6AddressesIpv6Address Optional - An IPv6 address associated with the network interface . * @ param filterMacAddress Optional - The MAC address of the network interface . * @ param filterNetworkInterfaceId Optional - The ID of the network interface . * @ param filterOwnerId Optional - The AWS account ID of the network interface owner . * @ param filterPrivateIpAddress Optional - The private IPv4 address or addresses of the network * interface . * @ param filterPrivateDnsName Optional - The private DNS name of the network interface ( IPv4 ) . * @ param filterRequesterId Optional - The ID of the entity that launched the instance on your * behalf ( for example , AWS Management Console , Auto Scaling , and so on ) . * @ param filterRequesterManaged Optional - Indicates whether the network interface is being managed * by an AWS service ( for example , AWS Management Console , Auto Scaling , * and so on ) . * @ param filterSourceDestCheck Optional - Indicates whether the network interface performs * source / destination checking . A value of true means checking is * enabled , and false means checking is disabled . The value must be * false for the network interface to perform network address * translation ( NAT ) in your VPC . * @ param filterStatus Optional - The status of the network interface . If the network * interface is not attached to an instance , the status is available ; * if a network interface is attached to an instance the status is in - use . * Valid values : in - use , available . * @ param filterSubnetId Optional - The ID of the subnet for the network interface . * @ param filterTag Optional - The key / value combination of a tag assigned to the resource . * Specify the key of the tag in the filter name and the value of the * tag in the filter value . * Example : Purpose1 = X , Purpose2 = B * @ param filterTagKey Optional - The key of a tag assigned to the resource . This filter is * independent of the filterTagValue filter . For example , if you use both * filterTagKey = " Purpose " and filterTagValue = " X " , you get any * resources assigned both the tag key Purpose ( regardless of what * the tag ' s value is ) , and the tag value X ( regardless of what the * tag ' s key is ) . If you want to list only resources where Purpose is X , * see the filterTag . * @ param filterTagValue Optional - The value of a tag assigned to the resource . This filter * is independent of the filterTagKey . * @ param filterVpcId Optional - The ID of the VPC for the network interface . * @ param networkInterfaceId Optional - String that contains one or more network interface IDs . * Example : " eni - 12345678 , eni - 87654321" * Default : " " * @ return A map with strings as keys and strings as values that contains : outcome of the action ( or failure message * and the exception if there is one ) , returnCode of the operation and the ID of the request */ @ Action ( name = "Describe Network Interfaces" , outputs = { } }
@ Output ( RETURN_CODE ) , @ Output ( RETURN_RESULT ) , @ Output ( EXCEPTION ) } , responses = { @ Response ( text = SUCCESS , field = RETURN_CODE , value = ReturnCodes . SUCCESS , matchType = MatchType . COMPARE_EQUAL , responseType = ResponseType . RESOLVED ) , @ Response ( text = FAILURE , field = RETURN_CODE , value = ReturnCodes . FAILURE , matchType = MatchType . COMPARE_EQUAL , responseType = ResponseType . ERROR , isOnFail = true ) } ) public Map < String , String > execute ( @ Param ( value = ENDPOINT ) String endpoint , @ Param ( value = IDENTITY , required = true ) String identity , @ Param ( value = CREDENTIAL , required = true , encrypted = true ) String credential , @ Param ( value = PROXY_HOST ) String proxyHost , @ Param ( value = PROXY_PORT ) String proxyPort , @ Param ( value = PROXY_USERNAME ) String proxyUsername , @ Param ( value = PROXY_PASSWORD , encrypted = true ) String proxyPassword , @ Param ( value = HEADERS ) String headers , @ Param ( value = QUERY_PARAMS ) String queryParams , @ Param ( value = VERSION ) String version , @ Param ( value = DELIMITER ) String delimiter , @ Param ( value = FILTER_ADDRESSES_PRIVATE_IP_ADDRESS ) String filterAddressesPrivateIpAddress , @ Param ( value = FILTER_ADDRESSES_PRIMARY ) String filterAddressesPrimary , @ Param ( value = FILTER_ADDRESSES_ASSOCIATION_PUBLIC_IP ) String filterAddressesAssociationPublicIp , @ Param ( value = FILTER_ADDRESSES_ASSOCIATION_OWNER_ID ) String filterAddressesAssociationOwnerId , @ Param ( value = FILTER_ASSOCIATION_ASSOCIATION_ID ) String filterAssociationAssociationId , @ Param ( value = FILTER_ASSOCIATION_ALLOCATION_ID ) String filterAssociationAllocationId , @ Param ( value = FILTER_ASSOCIATION_IP_OWNER_ID ) String filterAssociationIpOwnerId , @ Param ( value = FILTER_ASSOCIATION_PUBLIC_IP ) String filterAssociationPublicIp , @ Param ( value = FILTER_ASSOCIATION_PUBLIC_DNS_NAME ) String filterAssociationPublicDnsName , @ Param ( value = FILTER_ATTACHMENT_ATTACHMENT_ID ) String filterAttachmentAttachmentId , @ Param ( value = FILTER_ATTACHMENT_ATTACH_TIME ) String filterAttachmentAttachTime , @ Param ( value = FILTER_ATTACHMENT_DELETE_ON_TERMINATION ) String filterAttachmentDeleteOnTermination , @ Param ( value = FILTER_ATTACHMENT_DEVICE_INDEX ) String filterAttachmentDeviceIndex , @ Param ( value = FILTER_ATTACHMENT_INSTANCE_ID ) String filterAttachmentInstanceId , @ Param ( value = FILTER_ATTACHMENT_INSTANCE_OWNER_ID ) String filterAttachmentInstanceOwnerId , @ Param ( value = FILTER_ATTACHMENT_NAT_GATEWAY_ID ) String filterAttachmentNatGatewayId , @ Param ( value = FILTER_ATTACHMENT_STATUS ) String filterAttachmentStatus , @ Param ( value = FILTER_AVAILABILITY_ZONE ) String filterAvailabilityZone , @ Param ( value = FILTER_DESCRIPTION ) String filterDescription , @ Param ( value = FILTER_GROUP_ID ) String filterGroupId , @ Param ( value = FILTER_GROUP_NAME ) String filterGroupName , @ Param ( value = FILTER_IPV6_ADDRESSES_IPV6_ADDRESS ) String filterIpv6AddressesIpv6Address , @ Param ( value = FILTER_MAC_ADDRESS ) String filterMacAddress , @ Param ( value = FILTER_NETWORK_INTERFACE_ID ) String filterNetworkInterfaceId , @ Param ( value = FILTER_OWNER_ID ) String filterOwnerId , @ Param ( value = FILTER_PRIVATE_IP_ADDRESS ) String filterPrivateIpAddress , @ Param ( value = FILTER_PRIVATE_DNS_NAME ) String filterPrivateDnsName , @ Param ( value = FILTER_REQUESTER_ID ) String filterRequesterId , @ Param ( value = FILTER_REQUESTER_MANAGED ) String filterRequesterManaged , @ Param ( value = FILTER_SOURCE_DEST_CHECK ) String filterSourceDestCheck , @ Param ( value = FILTER_STATUS ) String filterStatus , @ Param ( value = FILTER_SUBNET_ID ) String filterSubnetId , @ Param ( value = FILTER_TAG ) String filterTag , @ Param ( value = FILTER_TAG_KEY ) String filterTagKey , @ Param ( value = FILTER_TAG_VALUE ) String filterTagValue , @ Param ( value = FILTER_VPC_ID ) String filterVpcId , @ Param ( value = NETWORK_INTERFACE_ID ) String networkInterfaceId ) { try { version = getDefaultStringInput ( version , NETWORK_DEFAULT_API_VERSION ) ; final CommonInputs commonInputs = new CommonInputs . Builder ( ) . withEndpoint ( endpoint , EC2_API , EMPTY ) . withIdentity ( identity ) . withCredential ( credential ) . withProxyHost ( proxyHost ) . withProxyPort ( proxyPort ) . withProxyUsername ( proxyUsername ) . withProxyPassword ( proxyPassword ) . withHeaders ( headers ) . withQueryParams ( queryParams ) . withVersion ( version ) . withDelimiter ( delimiter ) . withAction ( DESCRIBE_NETWORK_INTERFACES ) . withApiService ( EC2_API ) . withRequestUri ( EMPTY ) . withRequestPayload ( EMPTY ) . withHttpClientMethod ( HTTP_CLIENT_METHOD_GET ) . build ( ) ; final NetworkInputs networkInputs = new NetworkInputs . Builder ( ) . withNetworkInterfaceId ( networkInterfaceId ) . build ( ) ; final List < ImmutablePair < String , String > > filterPairs = Arrays . asList ( of ( NetworkFilter . ADDRESSES_ASSOCIATION_OWNER_ID , filterAttachmentAttachTime ) , of ( NetworkFilter . ADDRESSES_PRIVATE_IP_ADDRESS , filterAddressesPrivateIpAddress ) , of ( NetworkFilter . ADDRESSES_PRIMARY , filterAddressesPrimary ) , of ( NetworkFilter . ADDRESSES_ASSOCIATION_PUBLIC_IP , filterAddressesAssociationPublicIp ) , of ( NetworkFilter . ADDRESSES_ASSOCIATION_OWNER_ID , filterAddressesAssociationOwnerId ) , of ( NetworkFilter . ASSOCIATION_ASSOCIATION_ID , filterAssociationAssociationId ) , of ( NetworkFilter . ASSOCIATION_ALLOCATION_ID , filterAssociationAllocationId ) , of ( NetworkFilter . ASSOCIATION_IP_OWNER_ID , filterAssociationIpOwnerId ) , of ( NetworkFilter . ASSOCIATION_PUBLIC_IP , filterAssociationPublicIp ) , of ( NetworkFilter . ASSOCIATION_PUBLIC_DNS_NAME , filterAssociationPublicDnsName ) , of ( NetworkFilter . ATTACHMENT_ATTACHMENT_ID , filterAttachmentAttachmentId ) , of ( NetworkFilter . ATTACHMENT_ATTACH_TIME , filterAttachmentAttachTime ) , of ( NetworkFilter . ATTACHMENT_DELETE_ON_TERMINATION , filterAttachmentDeleteOnTermination ) , of ( NetworkFilter . ATTACHMENT_DEVICE_INDEX , filterAttachmentDeviceIndex ) , of ( NetworkFilter . ATTACHMENT_INSTANCE_ID , filterAttachmentInstanceId ) , of ( NetworkFilter . ATTACHMENT_INSTANCE_OWNER_ID , filterAttachmentInstanceOwnerId ) , of ( NetworkFilter . ATTACHMENT_NAT_GATEWAY_ID , filterAttachmentNatGatewayId ) , of ( NetworkFilter . ATTACHMENT_STATUS , filterAttachmentStatus ) , of ( NetworkFilter . AVAILABILITY_ZONE , filterAvailabilityZone ) , of ( NetworkFilter . DESCRIPTION , filterDescription ) , of ( NetworkFilter . GROUP_ID , filterGroupId ) , of ( NetworkFilter . GROUP_NAME , filterGroupName ) , of ( NetworkFilter . IPV6_ADDRESSES_IPV6_ADDRESS , filterIpv6AddressesIpv6Address ) , of ( NetworkFilter . MAC_ADDRESS , filterMacAddress ) , of ( NetworkFilter . NETWORK_INTERFACE_ID , filterNetworkInterfaceId ) , of ( NetworkFilter . OWNER_ID , filterOwnerId ) , of ( NetworkFilter . PRIVATE_IP_ADDRESS , filterPrivateIpAddress ) , of ( NetworkFilter . PRIVATE_DNS_NAME , filterPrivateDnsName ) , of ( NetworkFilter . REQUESTER_ID , filterRequesterId ) , of ( NetworkFilter . REQUESTER_MANAGED , filterRequesterManaged ) , of ( NetworkFilter . SOURCE_DEST_CHECK , filterSourceDestCheck ) , of ( NetworkFilter . STATUS , filterStatus ) , of ( NetworkFilter . SUBNET_ID , filterSubnetId ) , of ( NetworkFilter . TAG_KEY , filterTagKey ) , of ( NetworkFilter . TAG_VALUE , filterTagValue ) , of ( NetworkFilter . VPC_ID , filterVpcId ) ) ; final FilterInputs . Builder filterInputsBuilder = new FilterInputs . Builder ( ) . withDelimiter ( commonInputs . getDelimiter ( ) ) ; for ( ImmutablePair < String , String > filterPair : filterPairs ) { if ( isNotEmpty ( filterPair . getRight ( ) ) ) { filterInputsBuilder . withNewFilter ( filterPair . getLeft ( ) , filterPair . getRight ( ) ) ; } } if ( isNotEmpty ( filterTag ) ) { processTagFilter ( filterTag , commonInputs . getDelimiter ( ) , filterInputsBuilder ) ; } final FilterInputs filterInputs = filterInputsBuilder . build ( ) ; return new QueryApiExecutor ( ) . execute ( commonInputs , networkInputs , filterInputs ) ; } catch ( Exception exception ) { return ExceptionProcessor . getExceptionResult ( exception ) ; }
public class ESQuery { /** * Gets the aggregation . * @ param expression * the expression * @ param entityMetadata * the entity metadata * @ return the aggregation */ private MetricsAggregationBuilder getMetricsAggregation ( Expression expression , EntityMetadata entityMetadata ) { } }
AggregateFunction function = ( AggregateFunction ) expression ; MetamodelImpl metaModel = ( MetamodelImpl ) kunderaMetadata . getApplicationMetadata ( ) . getMetamodel ( entityMetadata . getPersistenceUnit ( ) ) ; String jPAColumnName = KunderaCoreUtils . getJPAColumnName ( function . toParsedText ( ) , entityMetadata , metaModel ) ; MetricsAggregationBuilder aggregationBuilder = null ; switch ( function . getIdentifier ( ) ) { case Expression . MIN : aggregationBuilder = AggregationBuilders . min ( function . toParsedText ( ) ) . field ( jPAColumnName ) ; break ; case Expression . MAX : aggregationBuilder = AggregationBuilders . max ( function . toParsedText ( ) ) . field ( jPAColumnName ) ; break ; case Expression . SUM : aggregationBuilder = AggregationBuilders . sum ( function . toParsedText ( ) ) . field ( jPAColumnName ) ; break ; case Expression . AVG : aggregationBuilder = AggregationBuilders . avg ( function . toParsedText ( ) ) . field ( jPAColumnName ) ; break ; case Expression . COUNT : aggregationBuilder = AggregationBuilders . count ( function . toParsedText ( ) ) . field ( jPAColumnName ) ; break ; } return aggregationBuilder ;
public class ConfigurationMetadataBuilder { /** * Visit a { @ link io . micronaut . context . annotation . ConfigurationProperties } class . * @ param type The type of the { @ link io . micronaut . context . annotation . ConfigurationProperties } * @ param description A description * @ return This { @ link ConfigurationMetadata } */ public ConfigurationMetadata visitProperties ( T type , @ Nullable String description ) { } }
String path = buildTypePath ( type , type ) ; ConfigurationMetadata configurationMetadata = new ConfigurationMetadata ( ) ; configurationMetadata . name = NameUtils . hyphenate ( path , true ) ; configurationMetadata . type = getTypeString ( type ) ; configurationMetadata . description = description ; this . configurations . add ( configurationMetadata ) ; return configurationMetadata ;
public class HelpFormatter { /** * Finds the next text wrap position after < code > startPos < / code > for the text * in < code > text < / code > with the column width < code > width < / code > . The wrap * point is the last position before startPos + width having a whitespace * character ( space , \ n , \ r ) . If there is no whitespace character before * startPos + width , it will return startPos + width . * @ param sText * The text being searched for the wrap position * @ param nWidth * width of the wrapped text * @ param nStartPos * position from which to start the lookup whitespace character * @ return position on which the text must be wrapped or - 1 if the wrap * position is at the end of the text */ @ CheckForSigned protected static int findWrapPos ( final String sText , final int nWidth , final int nStartPos ) { } }
// the line ends before the max wrap pos or a new line char found int pos = sText . indexOf ( '\n' , nStartPos ) ; if ( pos != - 1 && pos <= nWidth ) { return pos + 1 ; } pos = sText . indexOf ( '\t' , nStartPos ) ; if ( pos != - 1 && pos <= nWidth ) { return pos + 1 ; } if ( nStartPos + nWidth >= sText . length ( ) ) { return - 1 ; } // look for the last whitespace character before startPos + width for ( pos = nStartPos + nWidth ; pos >= nStartPos ; -- pos ) { final char c = sText . charAt ( pos ) ; if ( c == ' ' || c == '\n' || c == '\r' ) { break ; } } // if we found it - just return if ( pos > nStartPos ) { return pos ; } // if we didn ' t find one , simply chop at startPos + width pos = nStartPos + nWidth ; return pos == sText . length ( ) ? - 1 : pos ;
public class StreamConduit { /** * Create the pump to handle process output . * @ param is the < code > InputStream < / code > . * @ param os the < code > OutputStream < / code > . */ private void createProcessOutputPump ( InputStream is , OutputStream os ) { } }
outputThread = createPump ( is , os , CLOSE_STDOUT_AND_STDERR_INSTREAMS_WHEN_EXHAUSTED ) ;
public class PageableParameterBuilderPlugin { /** * Create a sort parameter . * Override it if needed . Set a default value or further description for example . * @ param context { @ link Pageable } parameter context * @ return The sort parameter */ protected Parameter createSortParameter ( ParameterContext context ) { } }
ModelReference stringModel = createModelRefFactory ( context ) . apply ( resolver . resolve ( List . class , String . class ) ) ; return new ParameterBuilder ( ) . name ( getSortName ( ) ) . parameterType ( SORT_TYPE ) . modelRef ( stringModel ) . allowMultiple ( true ) . description ( SORT_DESCRIPTION ) . build ( ) ;
public class JRDF { /** * Tells whether the given resources are equivalent , with one given as a URI * string . * @ param u1 * first resource . * @ param u2 * second resource , given as a URI string . * @ return true if equivalent , false otherwise . */ public static boolean sameResource ( URIReference u1 , String u2 ) { } }
return u1 . getURI ( ) . toString ( ) . equals ( u2 ) ;
public class XmlUtil { /** * Convert XML { @ link Document } to its string representation . * @ param document for conversion * @ return - string representation of XML { @ link Document } * @ throws Exception - if { @ link DocumentBuilder } is not initialized */ public static String xmlToString ( Document document ) throws Exception { } }
if ( transformer == null ) { throw new Exception ( "Transformer is null" ) ; } Source xmlSource = new DOMSource ( document ) ; StringWriter stringWriter = new StringWriter ( ) ; Result result = new StreamResult ( stringWriter ) ; transformer . transform ( xmlSource , result ) ; return stringWriter . toString ( ) ;
public class MemorySegment { /** * Bulk copy method . Copies { @ code numBytes } bytes from this memory segment , starting at position * { @ code offset } to the target memory segment . The bytes will be put into the target segment * starting at position { @ code targetOffset } . * @ param offset The position where the bytes are started to be read from in this memory segment . * @ param target The memory segment to copy the bytes to . * @ param targetOffset The position in the target memory segment to copy the chunk to . * @ param numBytes The number of bytes to copy . * @ throws IndexOutOfBoundsException If either of the offsets is invalid , or the source segment does not * contain the given number of bytes ( starting from offset ) , or the target segment does * not have enough space for the bytes ( counting from targetOffset ) . */ public final void copyTo ( int offset , MemorySegment target , int targetOffset , int numBytes ) { } }
final byte [ ] thisHeapRef = this . heapMemory ; final byte [ ] otherHeapRef = target . heapMemory ; final long thisPointer = this . address + offset ; final long otherPointer = target . address + targetOffset ; if ( ( numBytes | offset | targetOffset ) >= 0 && thisPointer <= this . addressLimit - numBytes && otherPointer <= target . addressLimit - numBytes ) { UNSAFE . copyMemory ( thisHeapRef , thisPointer , otherHeapRef , otherPointer , numBytes ) ; } else if ( this . address > this . addressLimit ) { throw new IllegalStateException ( "this memory segment has been freed." ) ; } else if ( target . address > target . addressLimit ) { throw new IllegalStateException ( "target memory segment has been freed." ) ; } else { throw new IndexOutOfBoundsException ( String . format ( "offset=%d, targetOffset=%d, numBytes=%d, address=%d, targetAddress=%d" , offset , targetOffset , numBytes , this . address , target . address ) ) ; }
public class HttpFields { /** * / * Write Extra HTTP headers . */ public void write ( Writer writer ) throws IOException { } }
synchronized ( writer ) { for ( int i = 0 ; i < _fields . size ( ) ; i ++ ) { Field field = ( Field ) _fields . get ( i ) ; if ( field != null ) field . write ( writer , _version ) ; } writer . write ( __CRLF ) ; }
public class LSrtIntConsumerBuilder { /** * One of ways of creating builder . This is possibly the least verbose way where compiler should be able to guess the generic parameters . */ @ Nonnull public static LSrtIntConsumer srtIntConsumerFrom ( Consumer < LSrtIntConsumerBuilder > buildingFunction ) { } }
LSrtIntConsumerBuilder builder = new LSrtIntConsumerBuilder ( ) ; buildingFunction . accept ( builder ) ; return builder . build ( ) ;
public class KuduDBDataHandler { /** * Checks for column . * @ param schema * the schema * @ param columnName * the column name * @ return true , if successful */ public static boolean hasColumn ( Schema schema , String columnName ) { } }
try { schema . getColumn ( columnName ) ; return true ; } catch ( IllegalArgumentException e ) { return false ; }
public class MultiVertexCentricQueryBuilder { /** * Query Construction */ @ Override public TitanMultiVertexQuery addVertex ( Vertex vertex ) { } }
assert vertex != null ; assert vertex instanceof InternalVertex ; vertices . add ( ( ( InternalVertex ) vertex ) . it ( ) ) ; return this ;
public class ProjectAnalysisTaskContainerPopulator { /** * List of all objects to be injected in the picocontainer dedicated to computation stack . * Does not contain the steps declared in { @ link ReportComputationSteps # orderedStepClasses ( ) } . */ private static List < Object > componentClasses ( ) { } }
return Arrays . asList ( PostProjectAnalysisTasksExecutor . class , ComputationStepExecutor . class , // messages / warnings CeTaskMessagesImpl . class , FileSourceDataWarnings . class , // File System new ComputationTempFolderProvider ( ) , DbMigrationModule . class , ReportModulesPath . class , MetricModule . class , // holders AnalysisMetadataHolderImpl . class , CrossProjectDuplicationStatusHolderImpl . class , BatchReportDirectoryHolderImpl . class , TreeRootHolderImpl . class , PeriodHolderImpl . class , QualityGateHolderImpl . class , QualityGateStatusHolderImpl . class , RatingSettings . class , ActiveRulesHolderImpl . class , MeasureComputersHolderImpl . class , MutableTaskResultHolderImpl . class , BatchReportReaderImpl . class , MergeAndTargetBranchComponentUuids . class , SiblingComponentsWithOpenIssues . class , // repositories LanguageRepositoryImpl . class , MeasureRepositoryImpl . class , EventRepositoryImpl . class , ConfigurationRepositoryImpl . class , DbIdsRepositoryImpl . class , DisabledComponentsHolderImpl . class , QualityGateServiceImpl . class , EvaluationResultTextConverterImpl . class , SourceLinesRepositoryImpl . class , SourceHashRepositoryImpl . class , SourceLinesDiffImpl . class , ScmInfoRepositoryImpl . class , ScmInfoDbLoader . class , DuplicationRepositoryImpl . class , SourceLinesHashRepositoryImpl . class , DbLineHashVersion . class , SignificantCodeRepository . class , SourceLinesHashCache . class , NewLinesRepository . class , FileSourceDataComputer . class , SourceLineReadersFactory . class , QProfileStatusRepositoryImpl . class , // issues RuleRepositoryImpl . class , ScmAccountToUserLoader . class , ScmAccountToUser . class , IssueCache . class , DefaultAssignee . class , IssueVisitors . class , IssueLifecycle . class , ComponentsWithUnprocessedIssues . class , ComponentIssuesRepositoryImpl . class , IssueFilter . class , // common rules CommonRuleEngineImpl . class , BranchCoverageRule . class , LineCoverageRule . class , CommentDensityRule . class , DuplicatedBlockRule . class , TestErrorRule . class , SkippedTestRule . class , // order is important : RuleTypeCopier must be the first one . And DebtAggregator must be before NewDebtAggregator ( new debt requires // debt ) RuleTagsCopier . class , IssueCreationDateCalculator . class , DebtCalculator . class , EffortAggregator . class , NewEffortAggregator . class , IssueAssigner . class , IssueCounter . class , MovedIssueVisitor . class , IssuesRepositoryVisitor . class , RemoveProcessedComponentsVisitor . class , // visitors : order is important , measure computers must be executed at the end in order to access to every measures / issues LoadComponentUuidsHavingOpenIssuesVisitor . class , IntegrateIssuesVisitor . class , CloseIssuesOnRemovedComponentsVisitor . class , MaintainabilityMeasuresVisitor . class , NewMaintainabilityMeasuresVisitor . class , ReliabilityAndSecurityRatingMeasuresVisitor . class , NewReliabilityAndSecurityRatingMeasuresVisitor . class , LastCommitVisitor . class , MeasureComputersVisitor . class , UpdateConflictResolver . class , TrackerBaseInputFactory . class , TrackerRawInputFactory . class , TrackerMergeOrTargetBranchInputFactory . class , ClosedIssuesInputFactory . class , Tracker . class , TrackerExecution . class , ShortBranchOrPullRequestTrackerExecution . class , MergeBranchTrackerExecution . class , ComponentIssuesLoader . class , BaseIssuesLoader . class , IssueTrackingDelegator . class , BranchPersisterImpl . class , SiblingsIssuesLoader . class , SiblingsIssueMerger . class , // filemove ScoreMatrixDumperImpl . class , SourceSimilarityImpl . class , FileSimilarityImpl . class , MutableMovedFilesRepositoryImpl . class , AddedFileRepositoryImpl . class , // duplication IntegrateCrossProjectDuplications . class , DuplicationMeasures . class , // views ViewIndex . class , BranchLoader . class , MeasureToMeasureDto . class , SmallChangesetQualityGateSpecialCase . class , // webhooks WebhookPostTask . class , // notifications NotificationFactory . class ) ;
public class HttpsHealthCheckClient { /** * Returns the specified HttpsHealthCheck resource . Gets a list of available HTTPS health checks * by making a list ( ) request . * < p > Sample code : * < pre > < code > * try ( HttpsHealthCheckClient httpsHealthCheckClient = HttpsHealthCheckClient . create ( ) ) { * ProjectGlobalHttpsHealthCheckName httpsHealthCheck = ProjectGlobalHttpsHealthCheckName . of ( " [ PROJECT ] " , " [ HTTPS _ HEALTH _ CHECK ] " ) ; * HttpsHealthCheck2 response = httpsHealthCheckClient . getHttpsHealthCheck ( httpsHealthCheck . toString ( ) ) ; * < / code > < / pre > * @ param httpsHealthCheck Name of the HttpsHealthCheck resource to return . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi public final HttpsHealthCheck2 getHttpsHealthCheck ( String httpsHealthCheck ) { } }
GetHttpsHealthCheckHttpRequest request = GetHttpsHealthCheckHttpRequest . newBuilder ( ) . setHttpsHealthCheck ( httpsHealthCheck ) . build ( ) ; return getHttpsHealthCheck ( request ) ;
public class GenerateSubsystemsDefinition { /** * arg [ 0 ] - subsystems definition spec ( e . g logging : osgi , osgi : eager , deployment - scanner ) * arg [ 1 ] - subsytem profiles ( e . g . default , ha , full , full - ha ) * arg [ 2 ] - subsystem path prefix ( e . g . configuration / subsystems ) * arg [ 4 ] - the output file ( e . g . domain - subsystems . xml ) */ public static void main ( String [ ] args ) throws Exception { } }
if ( args == null ) throw new IllegalArgumentException ( "Null args" ) ; if ( args . length < 4 ) throw new IllegalArgumentException ( "Invalid args: " + Arrays . asList ( args ) ) ; // spec : = subsystem : supplement // definitions : = definitions , spec int index = 0 ; if ( args [ index ] == null || args [ index ] . isEmpty ( ) ) { throw new IllegalArgumentException ( "No configured subsystems" ) ; } String definitions = args [ index ++ ] ; String [ ] profiles = new String [ ] { "" } ; if ( args [ index ] != null && ! args [ index ] . isEmpty ( ) ) { profiles = args [ index ] . split ( "," ) ; } index ++ ; if ( args [ index ] == null || args [ index ] . isEmpty ( ) ) { throw new IllegalArgumentException ( "No file prefix" ) ; } String filePrefix = args [ index ++ ] ; if ( ! filePrefix . endsWith ( "/" ) ) filePrefix += "/" ; if ( args [ index ] == null || args [ index ] . isEmpty ( ) ) { throw new IllegalArgumentException ( "No output file" ) ; } File outputFile = new File ( args [ index ++ ] ) ; List < SubsystemConfig > configs = new ArrayList < SubsystemConfig > ( ) ; for ( String spec : definitions . split ( "," ) ) { String [ ] split = spec . split ( ":" ) ; String subsystem = split [ 0 ] ; String supplement = split . length > 1 ? split [ 1 ] : null ; configs . add ( new SubsystemConfig ( subsystem , supplement ) ) ; } new GenerateSubsystemsDefinition ( configs , profiles , filePrefix , outputFile ) . process ( ) ;
public class Connection { /** * Called when there is a failure reading or writing to this connection . We notify the * connection manager and close ourselves down . */ public void networkFailure ( IOException ioe ) { } }
// if we ' re already closed , then something is seriously funny if ( isClosed ( ) ) { log . warning ( "Failure reported on closed connection " + this + "." , new Exception ( ) ) ; return ; } // let the connection manager know we ' re hosed _cmgr . connectionFailed ( this , ioe ) ; // and close our socket closeSocket ( ) ;
public class MessagingTransportFactory { /** * Creates a transport . * @ param hostAddress a host address * @ param port a listening port * @ param clientStage a client stage * @ param serverStage a server stage * @ param numberOfTries a number of tries * @ param retryTimeout a timeout for retry */ @ Override public Transport newInstance ( final String hostAddress , final int port , final EStage < TransportEvent > clientStage , final EStage < TransportEvent > serverStage , final int numberOfTries , final int retryTimeout ) { } }
try { TcpPortProvider tcpPortProvider = Tang . Factory . getTang ( ) . newInjector ( ) . getInstance ( TcpPortProvider . class ) ; return newInstance ( hostAddress , port , clientStage , serverStage , numberOfTries , retryTimeout , tcpPortProvider ) ; } catch ( final InjectionException e ) { throw new RuntimeException ( e ) ; }
public class ZooKeeper { /** * The Asynchronous version of exists . The request doesn ' t actually until * the asynchronous callback is called . * @ see # exists ( String , boolean ) */ public void exists ( String path , boolean watch , StatCallback cb , Object ctx ) { } }
exists ( path , watch ? watchManager . defaultWatcher : null , cb , ctx ) ;
public class ScoreBasedEvictionComparatorSupplier { /** * Calculates an eviction score . * Entries with a higher eviction score should be evicted first . */ @ VisibleForTesting float calculateScore ( DiskStorage . Entry entry , long now ) { } }
long ageMs = now - entry . getTimestamp ( ) ; long bytes = entry . getSize ( ) ; return mAgeWeight * ageMs + mSizeWeight * bytes ;
public class SimpleDOReader { /** * { @ inheritDoc } */ @ Override public boolean hasRelationship ( SubjectNode subject , PredicateNode predicate , ObjectNode object ) { } }
return m_obj . hasRelationship ( subject , predicate , object ) ;
public class DisconfAspectJ { /** * 获取配置文件数据 , 只有开启disconf远程才会进行切面 * @ throws Throwable */ @ Around ( "anyPublicMethod() && @annotation(disconfFileItem)" ) public Object decideAccess ( ProceedingJoinPoint pjp , DisconfFileItem disconfFileItem ) throws Throwable { } }
if ( DisClientConfig . getInstance ( ) . ENABLE_DISCONF ) { MethodSignature ms = ( MethodSignature ) pjp . getSignature ( ) ; Method method = ms . getMethod ( ) ; // 文件名 Class < ? > cls = method . getDeclaringClass ( ) ; DisconfFile disconfFile = cls . getAnnotation ( DisconfFile . class ) ; // Field名 Field field = MethodUtils . getFieldFromMethod ( method , cls . getDeclaredFields ( ) , DisConfigTypeEnum . FILE ) ; if ( field != null ) { // 请求仓库配置数据 DisconfStoreProcessor disconfStoreProcessor = DisconfStoreProcessorFactory . getDisconfStoreFileProcessor ( ) ; Object ret = disconfStoreProcessor . getConfig ( disconfFile . filename ( ) , disconfFileItem . name ( ) ) ; if ( ret != null ) { LOGGER . debug ( "using disconf store value: " + disconfFile . filename ( ) + " (" + disconfFileItem . name ( ) + " , " + ret + ")" ) ; return ret ; } } } Object rtnOb ; try { // 返回原值 rtnOb = pjp . proceed ( ) ; } catch ( Throwable t ) { LOGGER . info ( t . getMessage ( ) ) ; throw t ; } return rtnOb ;
public class WidgetsUtils { /** * This method detach a widget of its parent without doing a physical * detach ( DOM manipulation ) . */ public static void doLogicalDetachFromHtmlPanel ( Widget w ) { } }
Widget parent = w . getParent ( ) ; if ( parent instanceof HTMLPanel ) { complexPanelGetChildren ( ( HTMLPanel ) parent ) . remove ( w ) ; widgetSetParent ( w , null ) ; } else { throw new IllegalStateException ( "You can only use this method to detach a child from an HTMLPanel" ) ; }
public class appfwpolicy_lbvserver_binding { /** * Use this API to fetch appfwpolicy _ lbvserver _ binding resources of given name . */ public static appfwpolicy_lbvserver_binding [ ] get ( nitro_service service , String name ) throws Exception { } }
appfwpolicy_lbvserver_binding obj = new appfwpolicy_lbvserver_binding ( ) ; obj . set_name ( name ) ; appfwpolicy_lbvserver_binding response [ ] = ( appfwpolicy_lbvserver_binding [ ] ) obj . get_resources ( service ) ; return response ;
public class Samples { /** * Starts the { @ code Samples } . * @ param args command line args : expects the map files as multiple parameters * with possible SRTM hgt folder as 1st argument . */ public static void main ( String [ ] args ) { } }
// Multithreaded map rendering Parameters . NUMBER_OF_THREADS = 2 ; // Square frame buffer Parameters . SQUARE_FRAME_BUFFER = false ; HillsRenderConfig hillsCfg = null ; File demFolder = getDemFolder ( args ) ; if ( demFolder != null ) { MemoryCachingHgtReaderTileSource tileSource = new MemoryCachingHgtReaderTileSource ( demFolder , new DiffuseLightShadingAlgorithm ( ) , AwtGraphicFactory . INSTANCE ) ; tileSource . setEnableInterpolationOverlap ( true ) ; hillsCfg = new HillsRenderConfig ( tileSource ) ; hillsCfg . indexOnThread ( ) ; args = Arrays . copyOfRange ( args , 1 , args . length ) ; } List < File > mapFiles = SHOW_RASTER_MAP ? null : getMapFiles ( args ) ; final MapView mapView = createMapView ( ) ; final BoundingBox boundingBox = addLayers ( mapView , mapFiles , hillsCfg ) ; final PreferencesFacade preferencesFacade = new JavaPreferences ( Preferences . userNodeForPackage ( Samples . class ) ) ; final JFrame frame = new JFrame ( ) ; frame . setTitle ( "Mapsforge Samples" ) ; frame . add ( mapView ) ; frame . pack ( ) ; frame . setSize ( 1024 , 768 ) ; frame . setLocationRelativeTo ( null ) ; frame . setDefaultCloseOperation ( WindowConstants . DO_NOTHING_ON_CLOSE ) ; frame . addWindowListener ( new WindowAdapter ( ) { @ Override public void windowClosing ( WindowEvent e ) { int result = JOptionPane . showConfirmDialog ( frame , MESSAGE , TITLE , JOptionPane . YES_NO_OPTION ) ; if ( result == JOptionPane . YES_OPTION ) { mapView . getModel ( ) . save ( preferencesFacade ) ; mapView . destroyAll ( ) ; AwtGraphicFactory . clearResourceMemoryCache ( ) ; frame . setDefaultCloseOperation ( WindowConstants . EXIT_ON_CLOSE ) ; } } @ Override public void windowOpened ( WindowEvent e ) { final Model model = mapView . getModel ( ) ; model . init ( preferencesFacade ) ; if ( model . mapViewPosition . getZoomLevel ( ) == 0 || ! boundingBox . contains ( model . mapViewPosition . getCenter ( ) ) ) { byte zoomLevel = LatLongUtils . zoomForBounds ( model . mapViewDimension . getDimension ( ) , boundingBox , model . displayModel . getTileSize ( ) ) ; model . mapViewPosition . setMapPosition ( new MapPosition ( boundingBox . getCenterPoint ( ) , zoomLevel ) ) ; } } } ) ; frame . setVisible ( true ) ;
public class RasterTilePainter { /** * The actual painting function . Draws the groups . * @ param paintable * A { @ link RasterTile } object . * @ param group * The group where the object resides in ( optional ) . * @ param context * A MapContext object , responsible for actual drawing . */ public void paint ( Paintable paintable , Object group , MapContext context ) { } }
RasterTile tile = ( RasterTile ) paintable ; context . getRasterContext ( ) . drawImage ( tile . getStore ( ) . getLayer ( ) , tile . getCode ( ) . toString ( ) , tile . getUrl ( ) , tile . getBounds ( ) , new PictureStyle ( 1.0 ) ) ;