signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class DependentHostedNumberOrderReader { /** * Make the request to the Twilio API to perform the read . * @ param client TwilioRestClient with which to make the request * @ return DependentHostedNumberOrder ResourceSet */ @ Override public ResourceSet < DependentHostedNumberOrder > read ( final TwilioRestClient client ) { } }
return new ResourceSet < > ( this , client , firstPage ( client ) ) ;
public class Matchers { /** * Matches an AST node that represents a local variable or parameter . */ public static Matcher < ExpressionTree > isVariable ( ) { } }
return new Matcher < ExpressionTree > ( ) { @ Override public boolean matches ( ExpressionTree expressionTree , VisitorState state ) { Symbol symbol = ASTHelpers . getSymbol ( expressionTree ) ; if ( symbol == null ) { return false ; } return symbol . getKind ( ) == ElementKind . LOCAL_VARIABLE || symbol . getKind ( ) == ElementKind . PARAMETER ; } } ;
public class TransportFactory { /** * Primary transport getting method . Tries to connect and test UDP Transport . * If UDP failed , tries TCP transport . If it fails , too , throws IOException * @ param addr * @ return Transport instance * @ throws IOException */ public static Transport getTransport ( SocketAddress addr ) throws IOException { } }
Transport trans ; try { log . debug ( "Connecting TCP" ) ; trans = TCPInstance ( addr ) ; if ( ! trans . test ( ) ) { throw new IOException ( "Agent is unreachable via TCP" ) ; } return trans ; } catch ( IOException e ) { log . info ( "Can't connect TCP transport for host: " + addr . toString ( ) , e ) ; try { log . debug ( "Connecting UDP" ) ; trans = UDPInstance ( addr ) ; if ( ! trans . test ( ) ) { throw new IOException ( "Agent is unreachable via UDP" ) ; } return trans ; } catch ( IOException ex ) { log . info ( "Can't connect UDP transport for host: " + addr . toString ( ) , ex ) ; throw ex ; } }
public class RecyclerView { /** * Focus handling */ @ Override public View focusSearch ( View focused , int direction ) { } }
View result = mLayout . onInterceptFocusSearch ( focused , direction ) ; if ( result != null ) { return result ; } final FocusFinder ff = FocusFinder . getInstance ( ) ; result = ff . findNextFocus ( this , focused , direction ) ; if ( result == null && mAdapter != null ) { eatRequestLayout ( ) ; result = mLayout . onFocusSearchFailed ( focused , direction , mRecycler , mState ) ; resumeRequestLayout ( false ) ; } return result != null ? result : super . focusSearch ( focused , direction ) ;
public class ModelsImpl { /** * Adds an entity extractor to the application . * @ param appId The application ID . * @ param versionId The version ID . * @ param addEntityOptionalParameter the object representing the optional parameters to be set before calling this API * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws ErrorResponseException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the UUID object if successful . */ public UUID addEntity ( UUID appId , String versionId , AddEntityOptionalParameter addEntityOptionalParameter ) { } }
return addEntityWithServiceResponseAsync ( appId , versionId , addEntityOptionalParameter ) . toBlocking ( ) . single ( ) . body ( ) ;
public class SeleniumDriverFixture { /** * < p > < code > * | start browser | < i > firefox < / i > | on url | < i > http : / / localhost < / i > | * < / code > < / p > * @ param browser * @ param browserUrl */ public void startBrowserOnUrl ( final String browser , final String browserUrl ) { } }
setBrowser ( browser ) ; startDriverOnUrl ( defaultWebDriverInstance ( ) , browserUrl ) ;
public class DoCopy { /** * helper method of copy ( ) recursively copies the FOLDER at source path to destination path * @ param transaction indicates that the method is within the scope of a WebDAV transaction * @ param sourcePath where to read * @ param destinationPath where to write * @ param errorList all errors that ocurred * @ param req HttpServletRequest * @ param resp HttpServletResponse * @ throws WebdavException if an error in the underlying store occurs */ private void copyFolder ( ITransaction transaction , String sourcePath , String destinationPath , Hashtable < String , Integer > errorList , HttpServletRequest req , HttpServletResponse resp ) throws WebdavException { } }
store . createFolder ( transaction , destinationPath ) ; boolean infiniteDepth = true ; String depth = req . getHeader ( "Depth" ) ; if ( depth != null ) { if ( depth . equals ( "0" ) ) { infiniteDepth = false ; } } if ( infiniteDepth ) { String [ ] children = store . getChildrenNames ( transaction , sourcePath ) ; children = children == null ? new String [ ] { } : children ; StoredObject childSo ; for ( int i = children . length - 1 ; i >= 0 ; i -- ) { children [ i ] = "/" + children [ i ] ; try { childSo = store . getStoredObject ( transaction , ( sourcePath + children [ i ] ) ) ; if ( childSo == null ) { errorList . put ( destinationPath + children [ i ] , WebdavStatus . SC_NOT_FOUND ) ; continue ; } if ( childSo . isResource ( ) ) { store . createResource ( transaction , destinationPath + children [ i ] ) ; long resourceLength = store . setResourceContent ( transaction , destinationPath + children [ i ] , store . getResourceContent ( transaction , sourcePath + children [ i ] ) , null , null ) ; if ( resourceLength != - 1 ) { StoredObject destinationSo = store . getStoredObject ( transaction , destinationPath + children [ i ] ) ; destinationSo . setResourceLength ( resourceLength ) ; } } else { copyFolder ( transaction , sourcePath + children [ i ] , destinationPath + children [ i ] , errorList , req , resp ) ; } } catch ( AccessDeniedException e ) { errorList . put ( destinationPath + children [ i ] , WebdavStatus . SC_FORBIDDEN ) ; } catch ( ObjectNotFoundException e ) { errorList . put ( destinationPath + children [ i ] , WebdavStatus . SC_NOT_FOUND ) ; } catch ( ObjectAlreadyExistsException e ) { errorList . put ( destinationPath + children [ i ] , WebdavStatus . SC_CONFLICT ) ; } catch ( WebdavException e ) { errorList . put ( destinationPath + children [ i ] , WebdavStatus . SC_INTERNAL_SERVER_ERROR ) ; } } }
public class JavaParser { /** * src / main / resources / org / drools / compiler / semantics / java / parser / Java . g : 1114:1 : constantExpression : expression ; */ public final void constantExpression ( ) throws RecognitionException { } }
int constantExpression_StartIndex = input . index ( ) ; try { if ( state . backtracking > 0 && alreadyParsedRule ( input , 107 ) ) { return ; } // src / main / resources / org / drools / compiler / semantics / java / parser / Java . g : 1115:5 : ( expression ) // src / main / resources / org / drools / compiler / semantics / java / parser / Java . g : 1115:7 : expression { pushFollow ( FOLLOW_expression_in_constantExpression4858 ) ; expression ( ) ; state . _fsp -- ; if ( state . failed ) return ; } } catch ( RecognitionException re ) { reportError ( re ) ; recover ( input , re ) ; } finally { // do for sure before leaving if ( state . backtracking > 0 ) { memoize ( input , 107 , constantExpression_StartIndex ) ; } }
public class SecondsBasedEntryTaskScheduler { /** * package private for testing */ static int findRelativeSecond ( long delayMillis ) { } }
long now = Clock . currentTimeMillis ( ) ; long d = ( now + delayMillis - INITIAL_TIME_MILLIS ) ; return ceilToSecond ( d ) ;
public class L3ToSBGNPDConverter { /** * Creates a glyph for the complex member . * @ param pe PhysicalEntity to represent as complex member * @ param container Glyph for the complex shell */ private Glyph createComplexMember ( PhysicalEntity pe , Glyph container ) { } }
Glyph g = createGlyphBasics ( pe , false ) ; container . getGlyph ( ) . add ( g ) ; // A PhysicalEntity may appear in many complexes - - we identify the member using its complex g . setId ( g . getId ( ) + "_" + ModelUtils . md5hex ( container . getId ( ) ) ) ; glyphMap . put ( g . getId ( ) , g ) ; Set < String > uris = new HashSet < String > ( ) ; uris . add ( pe . getUri ( ) ) ; sbgn2BPMap . put ( g . getId ( ) , uris ) ; if ( "or" . equalsIgnoreCase ( g . getClazz ( ) ) ) { buildGeneric ( pe , g , container ) ; } return g ;
public class RequestContext { /** * Renders with { " code " : int } . * @ param code the specified code * @ return this context */ public RequestContext renderCode ( final int code ) { } }
if ( renderer instanceof JsonRenderer ) { final JsonRenderer r = ( JsonRenderer ) renderer ; final JSONObject ret = r . getJSONObject ( ) ; ret . put ( Keys . CODE , code ) ; } return this ;
public class JdbcDatabaseMetaDataGenerator { /** * Integer [ 0 ] is the column size and Integer [ 1 ] is the radix */ private Integer [ ] getParamPrecisionAndRadix ( ProcParameter param ) { } }
VoltType type = VoltType . get ( ( byte ) param . getType ( ) ) ; return type . getTypePrecisionAndRadix ( ) ;
public class SQLiteSession { /** * Begins a transaction . * Transactions may nest . If the transaction is not in progress , * then a database connection is obtained and a new transaction is started . * Otherwise , a nested transaction is started . * < / p > < p > * Each call to { @ link # beginTransaction } must be matched exactly by a call * to { @ link # endTransaction } . To mark a transaction as successful , * call { @ link # setTransactionSuccessful } before calling { @ link # endTransaction } . * If the transaction is not successful , or if any of its nested * transactions were not successful , then the entire transaction will * be rolled back when the outermost transaction is ended . * @ param transactionMode The transaction mode . One of : { @ link # TRANSACTION _ MODE _ DEFERRED } , * { @ link # TRANSACTION _ MODE _ IMMEDIATE } , or { @ link # TRANSACTION _ MODE _ EXCLUSIVE } . * Ignored when creating a nested transaction . * @ param transactionListener The transaction listener , or null if none . * @ param connectionFlags The connection flags to use if a connection must be * acquired by this operation . Refer to { @ link SQLiteConnectionPool } . * @ param cancellationSignal A signal to cancel the operation in progress , or null if none . * @ throws IllegalStateException if { @ link # setTransactionSuccessful } has already been * called for the current transaction . * @ throws SQLiteException if an error occurs . * @ throws OperationCanceledException if the operation was canceled . * @ see # setTransactionSuccessful * @ see # yieldTransaction * @ see # endTransaction */ public void beginTransaction ( int transactionMode , SQLiteTransactionListener transactionListener , int connectionFlags , CancellationSignal cancellationSignal ) { } }
throwIfTransactionMarkedSuccessful ( ) ; beginTransactionUnchecked ( transactionMode , transactionListener , connectionFlags , cancellationSignal ) ;
public class BuilderFactory { /** * Return the builder for the class . * @ param typeElement the class being documented . * @ param prevClass the previous class that was documented . * @ param nextClass the next class being documented . * @ param classTree the class tree . * @ return the writer for the class . Return null if this * writer is not supported by the doclet . */ public AbstractBuilder getClassBuilder ( TypeElement typeElement , TypeElement prevClass , TypeElement nextClass , ClassTree classTree ) { } }
return ClassBuilder . getInstance ( context , typeElement , writerFactory . getClassWriter ( typeElement , prevClass , nextClass , classTree ) ) ;
public class XMeans { /** * Split an existing centroid into two initial centers . * @ param parentCluster Existing cluster * @ param relation Data relation * @ return List of new centroids */ protected double [ ] [ ] splitCentroid ( Cluster < ? extends MeanModel > parentCluster , Relation < V > relation ) { } }
double [ ] parentCentroid = parentCluster . getModel ( ) . getMean ( ) ; // Compute size of cluster / region double radius = 0. ; for ( DBIDIter it = parentCluster . getIDs ( ) . iter ( ) ; it . valid ( ) ; it . advance ( ) ) { double d = getDistanceFunction ( ) . distance ( relation . get ( it ) , DoubleVector . wrap ( parentCentroid ) ) ; radius = ( d > radius ) ? d : radius ; } // Choose random vector Random random = rnd . getSingleThreadedRandom ( ) ; final int dim = RelationUtil . dimensionality ( relation ) ; double [ ] randomVector = normalize ( MathUtil . randomDoubleArray ( dim , random ) ) ; timesEquals ( randomVector , ( .4 + random . nextDouble ( ) * .5 ) * radius ) ; // Get the new centroids for ( int d = 0 ; d < dim ; d ++ ) { double a = parentCentroid [ d ] , b = randomVector [ d ] ; parentCentroid [ d ] = a - b ; randomVector [ d ] = a + b ; } return new double [ ] [ ] { parentCentroid , randomVector } ;
public class HttpHealthCheckClient { /** * Deletes the specified HttpHealthCheck resource . * < p > Sample code : * < pre > < code > * try ( HttpHealthCheckClient httpHealthCheckClient = HttpHealthCheckClient . create ( ) ) { * ProjectGlobalHttpHealthCheckName httpHealthCheck = ProjectGlobalHttpHealthCheckName . of ( " [ PROJECT ] " , " [ HTTP _ HEALTH _ CHECK ] " ) ; * Operation response = httpHealthCheckClient . deleteHttpHealthCheck ( httpHealthCheck . toString ( ) ) ; * < / code > < / pre > * @ param httpHealthCheck Name of the HttpHealthCheck resource to delete . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi public final Operation deleteHttpHealthCheck ( String httpHealthCheck ) { } }
DeleteHttpHealthCheckHttpRequest request = DeleteHttpHealthCheckHttpRequest . newBuilder ( ) . setHttpHealthCheck ( httpHealthCheck ) . build ( ) ; return deleteHttpHealthCheck ( request ) ;
public class StringHelper { /** * Cross between { @ link # collapse } and { @ link # partiallyUnqualify } . Functions much like { @ link # collapse } * except that only the qualifierBase is collapsed . For example , with a base of ' org . hibernate ' the name * ' org . hibernate . internal . util . StringHelper ' would become ' o . h . util . StringHelper ' . * @ param name The ( potentially ) qualified name . * @ param qualifierBase The qualifier base . * @ return The name itself if it does not begin with the qualifierBase , or the properly collapsed form otherwise . */ public static String collapseQualifierBase ( String name , String qualifierBase ) { } }
if ( name == null || ! name . startsWith ( qualifierBase ) ) { return collapse ( name ) ; } return collapseQualifier ( qualifierBase , true ) + name . substring ( qualifierBase . length ( ) ) ;
public class ThroughputInfo { /** * 对应size的数据统计平均值 */ public Long getQuantity ( ) { } }
Long quantity = 0L ; if ( items . size ( ) != 0 ) { for ( ThroughputStat item : items ) { if ( item . getEndTime ( ) . equals ( item . getStartTime ( ) ) ) { quantity += item . getSize ( ) ; } else { quantity += item . getSize ( ) * 1000 / ( item . getEndTime ( ) . getTime ( ) - item . getStartTime ( ) . getTime ( ) ) ; } } if ( seconds != 0 ) { quantity = quantity / items . size ( ) ; } } return quantity ;
public class FactoryVisualOdometry { /** * Creates a stereo visual odometry algorithm that independently tracks features in left and right camera . * @ see VisOdomDualTrackPnP * @ param thresholdAdd When the number of inliers is below this number new features are detected * @ param thresholdRetire When a feature has not been in the inlier list for this many ticks it is dropped * @ param inlierPixelTol Tolerance in pixels for defining an inlier during robust model matching . Typically 1.5 * @ param epipolarPixelTol Tolerance in pixels for enforcing the epipolar constraint * @ param ransacIterations Number of iterations performed by RANSAC . Try 300 or more . * @ param refineIterations Number of iterations done during non - linear optimization . Try 50 or more . * @ param trackerLeft Tracker used for left camera * @ param trackerRight Tracker used for right camera * @ param imageType Type of image being processed * @ return Stereo visual odometry algorithm . */ public static < T extends ImageGray < T > , Desc extends TupleDesc > StereoVisualOdometry < T > stereoDualTrackerPnP ( int thresholdAdd , int thresholdRetire , double inlierPixelTol , double epipolarPixelTol , int ransacIterations , int refineIterations , PointTracker < T > trackerLeft , PointTracker < T > trackerRight , DescribeRegionPoint < T , Desc > descriptor , Class < T > imageType ) { } }
EstimateNofPnP pnp = FactoryMultiView . pnp_N ( EnumPNP . P3P_FINSTERWALDER , - 1 ) ; DistanceFromModelMultiView < Se3_F64 , Point2D3D > distanceMono = new PnPDistanceReprojectionSq ( ) ; PnPStereoDistanceReprojectionSq distanceStereo = new PnPStereoDistanceReprojectionSq ( ) ; PnPStereoEstimator pnpStereo = new PnPStereoEstimator ( pnp , distanceMono , 0 ) ; ModelManagerSe3_F64 manager = new ModelManagerSe3_F64 ( ) ; EstimatorToGenerator < Se3_F64 , Stereo2D3D > generator = new EstimatorToGenerator < > ( pnpStereo ) ; // Pixel tolerance for RANSAC inliers - euclidean error squared from left + right images double ransacTOL = 2 * inlierPixelTol * inlierPixelTol ; ModelMatcher < Se3_F64 , Stereo2D3D > motion = new Ransac < > ( 2323 , manager , generator , distanceStereo , ransacIterations , ransacTOL ) ; RefinePnPStereo refinePnP = null ; Class < Desc > descType = descriptor . getDescriptionType ( ) ; ScoreAssociation < Desc > scorer = FactoryAssociation . defaultScore ( descType ) ; AssociateStereo2D < Desc > associateStereo = new AssociateStereo2D < > ( scorer , epipolarPixelTol , descType ) ; // need to make sure associations are unique AssociateDescription2D < Desc > associateUnique = associateStereo ; if ( ! associateStereo . uniqueDestination ( ) || ! associateStereo . uniqueSource ( ) ) { associateUnique = new EnforceUniqueByScore . Describe2D < > ( associateStereo , true , true ) ; } if ( refineIterations > 0 ) { refinePnP = new PnPStereoRefineRodrigues ( 1e-12 , refineIterations ) ; } Triangulate2ViewsMetric triangulate = FactoryMultiView . triangulate2ViewMetric ( new ConfigTriangulation ( ConfigTriangulation . Type . GEOMETRIC ) ) ; VisOdomDualTrackPnP < T , Desc > alg = new VisOdomDualTrackPnP < > ( thresholdAdd , thresholdRetire , epipolarPixelTol , trackerLeft , trackerRight , descriptor , associateUnique , triangulate , motion , refinePnP ) ; return new WrapVisOdomDualTrackPnP < > ( pnpStereo , distanceMono , distanceStereo , associateStereo , alg , refinePnP , imageType ) ;
public class ThriftClientPool { /** * get a client from pool * @ return * @ throws ThriftException * @ throws NoBackendServiceException if * { @ link PoolConfig # setFailover ( boolean ) } is set and no * service can connect to * @ throws ConnectionFailException if * { @ link PoolConfig # setFailover ( boolean ) } not set and * connection fail */ public ThriftClient < T > getClient ( ) throws ThriftException { } }
try { return pool . borrowObject ( ) ; } catch ( Exception e ) { if ( e instanceof ThriftException ) { throw ( ThriftException ) e ; } throw new ThriftException ( "Get client from pool failed." , e ) ; }
public class CProductUtil { /** * Removes the c product with the primary key from the database . Also notifies the appropriate model listeners . * @ param CProductId the primary key of the c product * @ return the c product that was removed * @ throws NoSuchCProductException if a c product with the primary key could not be found */ public static CProduct remove ( long CProductId ) throws com . liferay . commerce . product . exception . NoSuchCProductException { } }
return getPersistence ( ) . remove ( CProductId ) ;
public class PortableNavigatorContext { /** * Populates the context with multi - positions that have to be processed later on in the navigation process . * The contract is that the cell [ 0 ] path is read in the non - multi - position navigation . * Cells [ 1 , len - 1 ] are stored in the multi - positions and will be followed up on later on . */ void populateAnyNavigationFrames ( int pathTokenIndex , int len ) { } }
// populate " recursive " multi - positions if ( multiPositions == null ) { // lazy - init only if necessary multiPositions = new ArrayDeque < NavigationFrame > ( ) ; } for ( int cellIndex = len - 1 ; cellIndex > 0 ; cellIndex -- ) { multiPositions . addFirst ( new NavigationFrame ( cd , pathTokenIndex , cellIndex , in . position ( ) , offset ) ) ; }
public class EJBModuleMetaDataImpl { /** * Gets the application exception status of the specified exception class * from either the deployment descriptor or from the annotation . * @ param klass is the Throwable class * @ return the settings , or null if no application - exception was provided * for the specified class . */ private ApplicationException getApplicationException ( Class < ? > klass ) // F743-14982 { } }
ApplicationException result = null ; if ( ivApplicationExceptionMap != null ) { result = ivApplicationExceptionMap . get ( klass . getName ( ) ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) && result != null ) { Tr . debug ( tc , "found application-exception for " + klass . getName ( ) + ", rollback=" + result . rollback ( ) + ", inherited=" + result . inherited ( ) ) ; } } if ( result == null ) { result = klass . getAnnotation ( ApplicationException . class ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) && result != null ) { Tr . debug ( tc , "found ApplicationException for " + klass . getName ( ) + ", rollback=" + result . rollback ( ) + ", inherited=" + result . inherited ( ) ) ; } } return result ;
public class HlpEntitiesPage { /** * < p > Make SQL WHERE clause for enum if need . < / p > * @ param pSbWhere result clause * @ param pRequestData - Request Data * @ param pEntityClass - entity class * @ param pFldNm - field name * @ param pFilterMap - map to store current filter * @ param pFilterAppearance - set to store current filter appearance * if null - not required * @ throws Exception - an Exception */ public final void tryMakeWhereEnum ( final StringBuffer pSbWhere , final IRequestData pRequestData , final Class < ? > pEntityClass , final String pFldNm , final Map < String , Object > pFilterMap , final Set < String > pFilterAppearance ) throws Exception { } }
String nmRnd = pRequestData . getParameter ( "nmRnd" ) ; String fltOrdPrefix ; if ( nmRnd . contains ( "pickerDub" ) ) { fltOrdPrefix = "fltordPD" ; } else if ( nmRnd . contains ( "picker" ) ) { fltOrdPrefix = "fltordP" ; } else { fltOrdPrefix = "fltordM" ; } String fltforcedName = fltOrdPrefix + "forcedFor" ; String fltforced = pRequestData . getParameter ( fltforcedName ) ; if ( fltforced != null ) { pFilterMap . put ( fltforcedName , fltforced ) ; } String nmFldVal = fltOrdPrefix + pFldNm + "Val" ; String fltVal = pRequestData . getParameter ( nmFldVal ) ; String nmFldOpr = fltOrdPrefix + pFldNm + "Opr" ; String valFldOpr = pRequestData . getParameter ( nmFldOpr ) ; if ( fltVal != null && fltVal . length ( ) > 0 && valFldOpr != null && ! valFldOpr . equals ( "disabled" ) && ! valFldOpr . equals ( "" ) ) { String val ; String valAppear ; Field fldEnum = this . fieldsRapiHolder . getFor ( pEntityClass , pFldNm ) ; Class classEnum = fldEnum . getType ( ) ; if ( valFldOpr . equals ( "in" ) ) { StringBuffer sbVal = new StringBuffer ( "(" ) ; StringBuffer sbValAppear = new StringBuffer ( "(" ) ; boolean isFirst = true ; for ( String vl : fltVal . split ( "," ) ) { if ( isFirst ) { isFirst = false ; } else { sbVal . append ( ", " ) ; sbValAppear . append ( ", " ) ; } Enum enVal = Enum . valueOf ( classEnum , vl ) ; sbVal . append ( String . valueOf ( enVal . ordinal ( ) ) ) ; sbValAppear . append ( getSrvI18n ( ) . getMsg ( vl ) ) ; } val = sbVal . toString ( ) + ")" ; valAppear = sbValAppear . toString ( ) + ")" ; } else { Enum enVal = Enum . valueOf ( classEnum , fltVal ) ; val = String . valueOf ( enVal . ordinal ( ) ) ; valAppear = getSrvI18n ( ) . getMsg ( fltVal ) ; } pFilterMap . put ( fltOrdPrefix + pFldNm + "ValAppearance" , valAppear ) ; pFilterMap . put ( nmFldVal , fltVal ) ; pFilterMap . put ( nmFldOpr , valFldOpr ) ; String cond = pEntityClass . getSimpleName ( ) . toUpperCase ( ) + "." + pFldNm . toUpperCase ( ) + " " + toSqlOperator ( valFldOpr ) + " " + val ; if ( pSbWhere . toString ( ) . length ( ) == 0 ) { pSbWhere . append ( cond ) ; } else { pSbWhere . append ( " and " + cond ) ; } if ( pFilterAppearance != null ) { pFilterAppearance . add ( getSrvI18n ( ) . getMsg ( pFldNm ) + " " + getSrvI18n ( ) . getMsg ( valFldOpr ) + " " + valAppear ) ; } }
public class AWSBudgetsClient { /** * Creates a subscriber . You must create the associated budget and notification before you create the subscriber . * @ param createSubscriberRequest * Request of CreateSubscriber * @ return Result of the CreateSubscriber operation returned by the service . * @ throws InternalErrorException * An error on the server occurred during the processing of your request . Try again later . * @ throws InvalidParameterException * An error on the client occurred . Typically , the cause is an invalid input value . * @ throws CreationLimitExceededException * You ' ve exceeded the notification or subscriber limit . * @ throws DuplicateRecordException * The budget name already exists . Budget names must be unique within an account . * @ throws NotFoundException * We can ’ t locate the resource that you specified . * @ sample AWSBudgets . CreateSubscriber */ @ Override public CreateSubscriberResult createSubscriber ( CreateSubscriberRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeCreateSubscriber ( request ) ;
public class WSKeyStore { /** * Print a warning about a certificate being expired or soon to be expired in * the keystore . * @ param daysBeforeExpireWarning * @ param keyStoreName * @ param alias * @ param cert */ public void printWarning ( int daysBeforeExpireWarning , String keyStoreName , String alias , X509Certificate cert ) { } }
try { long millisDelta = ( ( ( ( daysBeforeExpireWarning * 24L ) * 60L ) * 60L ) * 1000L ) ; long millisBeforeExpiration = cert . getNotAfter ( ) . getTime ( ) - System . currentTimeMillis ( ) ; long daysLeft = ( ( ( ( millisBeforeExpiration / 1000L ) / 60L ) / 60L ) / 24L ) ; // cert is already expired if ( millisBeforeExpiration < 0 ) { Tr . error ( tc , "ssl.expiration.expired.CWPKI0017E" , new Object [ ] { alias , keyStoreName } ) ; } else if ( millisBeforeExpiration < millisDelta ) { Tr . warning ( tc , "ssl.expiration.warning.CWPKI0016W" , new Object [ ] { alias , keyStoreName , Long . valueOf ( daysLeft ) } ) ; } else { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "The certificate with alias " + alias + " from keyStore " + keyStoreName + " has " + daysLeft + " days left before expiring." ) ; } } catch ( Exception e ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "Exception reading KeyStore certificates during expiration check; " + e ) ; FFDCFilter . processException ( e , getClass ( ) . getName ( ) , "printWarning" , this ) ; }
public class CmsFlexCache { /** * Copies the key set of a map while synchronizing on the map . < p > * @ param map the map whose key set should be copied * @ return the copied key set */ private static < K , V > Set < K > synchronizedCopyKeys ( Map < K , V > map ) { } }
if ( map == null ) { return new HashSet < K > ( ) ; } synchronized ( map ) { return new HashSet < K > ( map . keySet ( ) ) ; }
public class AdapterUtil { /** * Display the javax . xa . XAResource Resource Manager vote constant corresponding to the * value supplied . * @ param level a valid javax . xa . XAResource vote constant . * @ return the name of the constant , or a string indicating the constant is unknown . */ public static String getXAResourceVoteString ( int vote ) { } }
switch ( vote ) { case XAResource . XA_OK : return "XA_OK (" + vote + ')' ; case XAResource . XA_RDONLY : return "XA_RDONLY (" + vote + ')' ; } return "UNKNOWN XA RESOURCE VOTE (" + vote + ')' ;
public class SQLTable { /** * Method to initialize the Cache of this CacheObjectInterface . * @ param _ class Clas that started the initialization */ public static void initialize ( final Class < ? > _class ) { } }
if ( InfinispanCache . get ( ) . exists ( SQLTable . UUIDCACHE ) ) { InfinispanCache . get ( ) . < UUID , SQLTable > getCache ( SQLTable . UUIDCACHE ) . clear ( ) ; } else { InfinispanCache . get ( ) . < UUID , SQLTable > getCache ( SQLTable . UUIDCACHE ) . addListener ( new CacheLogListener ( SQLTable . LOG ) ) ; } if ( InfinispanCache . get ( ) . exists ( SQLTable . IDCACHE ) ) { InfinispanCache . get ( ) . < Long , SQLTable > getCache ( SQLTable . IDCACHE ) . clear ( ) ; } else { InfinispanCache . get ( ) . < Long , SQLTable > getCache ( SQLTable . IDCACHE ) . addListener ( new CacheLogListener ( SQLTable . LOG ) ) ; } if ( InfinispanCache . get ( ) . exists ( SQLTable . NAMECACHE ) ) { InfinispanCache . get ( ) . < String , SQLTable > getCache ( SQLTable . NAMECACHE ) . clear ( ) ; } else { InfinispanCache . get ( ) . < String , SQLTable > getCache ( SQLTable . NAMECACHE ) . addListener ( new CacheLogListener ( SQLTable . LOG ) ) ; }
public class ZipFileIndex { /** * Tests if a specific path exists in the zip . This method will return true * for file entries and directories . * @ param path A path within the zip . * @ return True if the path is a file or dir , false otherwise . */ public synchronized boolean contains ( RelativePath path ) { } }
try { checkIndex ( ) ; return getZipIndexEntry ( path ) != null ; } catch ( IOException e ) { return false ; }
public class RString { /** * they can be used again . */ public void clear ( ) { } }
for ( Placeholder p : _placeholders . values ( ) ) { p . start . removeTill ( p . end ) ; }
public class DocEnv { /** * Create a FieldDoc for a var symbol . */ protected void makeFieldDoc ( VarSymbol var , TreePath treePath ) { } }
FieldDocImpl result = fieldMap . get ( var ) ; if ( result != null ) { if ( treePath != null ) result . setTreePath ( treePath ) ; } else { result = new FieldDocImpl ( this , var , treePath ) ; fieldMap . put ( var , result ) ; }
public class AbstractIoBufferEx { /** * { @ inheritDoc } */ @ Override public AbstractIoBufferEx putEnumInt ( int index , Enum < ? > e ) { } }
return putInt ( index , e . ordinal ( ) ) ;
public class AbstractConsoleEditor { /** * Starts the editor . * This methods actually creates the { @ link Reader } . */ public void start ( ) { } }
running = true ; try { init ( ) ; show ( ) ; while ( running ) { EditorOperation operation = readOperation ( ) ; if ( operation != null ) { Command cmd = create ( operation ) ; onCommand ( cmd ) ; } else { break ; } } } catch ( Exception e ) { // noop . }
public class DecodedHttpRequest { /** * Aborts the { @ link HttpResponse } which responds to this request if it exists . * @ see Http2RequestDecoder # onRstStreamRead ( ChannelHandlerContext , int , long ) */ void abortResponse ( Throwable cause ) { } }
isResponseAborted = true ; // Try to close the request first , then abort the response if it is already closed . if ( ! tryClose ( cause ) && response != null && ! response . isComplete ( ) ) { response . abort ( ) ; }
public class KeyValueStoreSessionManager { @ Override protected Object save ( final NoSqlSession session , final Object version , final boolean activateAfterSave ) { } }
try { log . debug ( "save:" + session ) ; session . willPassivate ( ) ; if ( ! session . isValid ( ) ) { log . debug ( "save: skip saving invalidated session: id=" + session . getId ( ) ) ; deleteKey ( session . getId ( ) ) ; return null ; } ISerializableSession data ; synchronized ( session ) { data = getSessionFactory ( ) . create ( session ) ; } data . setDomain ( _cookieDomain ) ; data . setPath ( _cookiePath ) ; long longVersion = 1 ; // default version for new sessions if ( version != null ) { longVersion = ( Long ) version + 1L ; } data . setVersion ( longVersion ) ; try { if ( ! setKey ( session . getId ( ) , data ) ) { throw ( new RuntimeException ( "unable to set key: data=" + data ) ) ; } } catch ( TranscoderException error ) { throw ( new IllegalArgumentException ( "unable to serialize session: id=" + session . getId ( ) + ", data=" + data , error ) ) ; } log . debug ( "save:db.sessions.update(" + session . getId ( ) + "," + data + ")" ) ; if ( activateAfterSave ) { session . didActivate ( ) ; } return longVersion ; } catch ( Exception e ) { log . warn ( e ) ; } return null ;
public class GodHandableAction { protected ActionResponse actuallyExecute ( OptionalThing < VirtualForm > optForm , ActionHook hook ) { } }
showAction ( runtime ) ; final Object [ ] requestArgs = toRequestArgs ( optForm ) ; final Object result = invokeExecuteMethod ( execute . getExecuteMethod ( ) , requestArgs ) ; // # to _ action redCardableAssist . assertExecuteReturnNotNull ( requestArgs , result ) ; redCardableAssist . assertExecuteMethodReturnTypeActionResponse ( requestArgs , result ) ; final ActionResponse response = ( ActionResponse ) result ; runtime . manageActionResponse ( response ) ; // always set here because of main return response ;
public class Asm { /** * Create dword ( 4 Bytes ) pointer operand . */ public static final Mem dword_ptr ( Label label , Register index , int shift , long disp ) { } }
return _ptr_build ( label , index , shift , disp , SIZE_DWORD ) ;
public class ZookeeperConfigGroup { /** * 加载节点并监听节点变化 */ void loadNode ( ) { } }
final String nodePath = ZKPaths . makePath ( configProfile . getVersionedRootNode ( ) , node ) ; final GetChildrenBuilder childrenBuilder = client . getChildren ( ) ; try { final List < String > children = childrenBuilder . watched ( ) . forPath ( nodePath ) ; if ( children != null ) { final Map < String , String > configs = new HashMap < > ( ) ; for ( String child : children ) { final Tuple < String , String > keyValue = loadKey ( ZKPaths . makePath ( nodePath , child ) ) ; if ( keyValue != null ) { configs . put ( keyValue . getFirst ( ) , keyValue . getSecond ( ) ) ; } } cleanAndPutAll ( configs ) ; } } catch ( Exception e ) { throw new RuntimeException ( e ) ; } if ( getConfigLocalCache ( ) != null ) { getConfigLocalCache ( ) . saveLocalCache ( this , getNode ( ) ) ; }
public class CmsRole { /** * Returns a role violation exception configured with a localized , role specific message * for this role . < p > * @ param requestContext the current users OpenCms request context * @ param orgUnitFqn the organizational unit used for the role check , it may be < code > null < / code > * @ return a role violation exception configured with a localized , role specific message * for this role */ public CmsRoleViolationException createRoleViolationExceptionForOrgUnit ( CmsRequestContext requestContext , String orgUnitFqn ) { } }
return new CmsRoleViolationException ( Messages . get ( ) . container ( Messages . ERR_USER_NOT_IN_ROLE_FOR_ORGUNIT_3 , requestContext . getCurrentUser ( ) . getName ( ) , getName ( requestContext . getLocale ( ) ) , orgUnitFqn ) ) ;
public class ListConfigurationSetsResult { /** * A list of configuration sets . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setConfigurationSets ( java . util . Collection ) } or { @ link # withConfigurationSets ( java . util . Collection ) } if * you want to override the existing values . * @ param configurationSets * A list of configuration sets . * @ return Returns a reference to this object so that method calls can be chained together . */ public ListConfigurationSetsResult withConfigurationSets ( ConfigurationSet ... configurationSets ) { } }
if ( this . configurationSets == null ) { setConfigurationSets ( new com . amazonaws . internal . SdkInternalList < ConfigurationSet > ( configurationSets . length ) ) ; } for ( ConfigurationSet ele : configurationSets ) { this . configurationSets . add ( ele ) ; } return this ;
public class ResolutionReportHelper { /** * Should be called just prior to the resolve bundle operation from the same thread . * @ param bContext bundleContext */ public void startHelper ( BundleContext bContext ) { } }
_resolvingThread = Thread . currentThread ( ) . getId ( ) ; ResolverHookFactoryReg = bContext . registerService ( ResolverHookFactory . class , this , null ) ;
public class GetMsgSendIntentApi { /** * HuaweiApiClient 连接结果回调 * @ param rst 结果码 * @ param client HuaweiApiClient 实例 */ @ Override public void onConnect ( int rst , HuaweiApiClient client ) { } }
if ( client == null || ! ApiClientMgr . INST . isConnect ( client ) ) { HMSAgentLog . e ( "client not connted" ) ; onSnsGetMsgIntentResult ( rst , null ) ; return ; } PendingResult < IntentResult > sendMsgResult = HuaweiSns . HuaweiSnsApi . getMsgSendIntent ( client , msg , needResult ) ; sendMsgResult . setResultCallback ( new ResultCallback < IntentResult > ( ) { @ Override public void onResult ( IntentResult result ) { if ( result == null ) { HMSAgentLog . e ( "result is null" ) ; onSnsGetMsgIntentResult ( HMSAgent . AgentResultCode . RESULT_IS_NULL , null ) ; return ; } Status status = result . getStatus ( ) ; if ( status == null ) { HMSAgentLog . e ( "status is null" ) ; onSnsGetMsgIntentResult ( HMSAgent . AgentResultCode . STATUS_IS_NULL , null ) ; return ; } int rstCode = status . getStatusCode ( ) ; HMSAgentLog . d ( "status=" + status ) ; // 需要重试的错误码 , 并且可以重试 if ( ( rstCode == CommonCode . ErrorCode . SESSION_INVALID || rstCode == CommonCode . ErrorCode . CLIENT_API_INVALID ) && retryTimes > 0 ) { retryTimes -- ; connect ( ) ; } else if ( rstCode == SNSCode . OK ) { // 回调GetMsgIntent结果 Intent nxtIntent = result . getIntent ( ) ; if ( nxtIntent == null ) { HMSAgentLog . e ( "nxtIntent is null" ) ; onSnsGetMsgIntentResult ( HMSAgent . AgentResultCode . RESULT_IS_NULL , null ) ; return ; } else { onSnsGetMsgIntentResult ( rstCode , nxtIntent ) ; return ; } } else { onSnsGetMsgIntentResult ( rstCode , null ) ; } } } ) ;
public class JJTMithraQLState { /** * / * A definite node is constructed from a specified number of * children . That number of nodes are popped from the stack and * made the children of the definite node . Then the definite node * is pushed on to the stack . */ void closeNodeScope ( Node n , int num ) { } }
mk = ( ( Integer ) marks . pop ( ) ) . intValue ( ) ; while ( num -- > 0 ) { Node c = popNode ( ) ; c . jjtSetParent ( n ) ; n . jjtAddChild ( c , num ) ; } n . jjtClose ( ) ; pushNode ( n ) ; node_created = true ;
public class CmsResultFacets { /** * Selects the given field facet . < p > * @ param field the field name * @ param value the value */ void selectFieldFacet ( String field , String value ) { } }
m_selectedFieldFacets . clear ( ) ; m_selectedRangeFacets . clear ( ) ; m_selectedFieldFacets . put ( field , Collections . singletonList ( value ) ) ; m_manager . search ( m_selectedFieldFacets , m_selectedRangeFacets ) ;
public class AddExpiration { /** * Adds a long constructor assignment . */ private void addTimeConstructorAssignment ( MethodSpec . Builder constructor , String field ) { } }
constructor . addStatement ( "$T.UNSAFE.putLong(this, $N, $N)" , UNSAFE_ACCESS , offsetName ( field ) , "now" ) ;
public class HttpHelper { /** * Attempt to send all the batches totalling numMetrics in the allowed time . * @ return The total number of metrics sent . */ public int sendAll ( Iterable < Observable < Integer > > batches , final int numMetrics , long timeoutMillis ) { } }
final AtomicBoolean err = new AtomicBoolean ( false ) ; final AtomicInteger updated = new AtomicInteger ( 0 ) ; LOGGER . debug ( "Got {} ms to send {} metrics" , timeoutMillis , numMetrics ) ; try { final CountDownLatch completed = new CountDownLatch ( 1 ) ; final Subscription s = Observable . mergeDelayError ( Observable . from ( batches ) ) . timeout ( timeoutMillis , TimeUnit . MILLISECONDS ) . subscribeOn ( Schedulers . immediate ( ) ) . subscribe ( updated :: addAndGet , exc -> { logErr ( "onError caught" , exc , updated . get ( ) , numMetrics ) ; err . set ( true ) ; completed . countDown ( ) ; } , completed :: countDown ) ; try { completed . await ( timeoutMillis , TimeUnit . MILLISECONDS ) ; } catch ( InterruptedException interrupted ) { err . set ( true ) ; s . unsubscribe ( ) ; LOGGER . warn ( "Timed out sending metrics. {}/{} sent" , updated . get ( ) , numMetrics ) ; } } catch ( Exception e ) { err . set ( true ) ; logErr ( "Unexpected " , e , updated . get ( ) , numMetrics ) ; } if ( updated . get ( ) < numMetrics && ! err . get ( ) ) { LOGGER . warn ( "No error caught, but only {}/{} sent." , updated . get ( ) , numMetrics ) ; } return updated . get ( ) ;
public class FileManagerImpl { /** * Allocate a block of storage . It may or may not be cleared to 0. * @ param size Number of bytes to allocate . * @ return address in file that is allocated . * @ exception FileManagerException * @ exception IOException * @ exception EOFException */ public long allocate ( int request_size ) throws IOException { } }
// System . out . println ( " * * * allocate filename = " + filename + " size = " + request _ size ) ; if ( readOnly ) { throw ( new FileManagerException ( "Attempt to allocate in read only mode" ) ) ; } allocs ++ ; request_size = request_size + HDR_SIZE ; if ( request_size <= last_quick_size_block ) { return allocate_from_ql ( request_size ) ; } else { // round up to nearest multiple of a grain _ size request_size = ( ( request_size + grain_size - 1 ) / grain_size ) * grain_size ; return search_ml ( request_size ) ; }
public class MirrorUtil { /** * Normalizes the specified { @ code path } . A path which starts and ends with { @ code / } would be returned . * Also , it would not have consecutive { @ code / } . */ public static String normalizePath ( String path ) { } }
requireNonNull ( path , "path" ) ; if ( path . isEmpty ( ) ) { return "/" ; } if ( ! path . startsWith ( "/" ) ) { path = '/' + path ; } if ( ! path . endsWith ( "/" ) ) { path += '/' ; } return path . replaceAll ( "//+" , "/" ) ;
public class ListBinding { /** * Updates the selection model with the selected values from the value model . */ protected void updateSelectedItemsFromValueModel ( ) { } }
Object value = getValue ( ) ; Object [ ] selectedValues = EMPTY_VALUES ; if ( value != null ) { selectedValues = ( Object [ ] ) convertValue ( value , Object [ ] . class ) ; } // flag is used to avoid a round trip while we are selecting the values selectingValues = true ; try { ListSelectionModel selectionModel = getList ( ) . getSelectionModel ( ) ; selectionModel . setValueIsAdjusting ( true ) ; try { int [ ] valueIndexes = determineValueIndexes ( selectedValues ) ; int selectionMode = getSelectionMode ( ) ; if ( selectionMode == ListSelectionModel . SINGLE_SELECTION && valueIndexes . length > 1 ) { getList ( ) . setSelectedIndex ( valueIndexes [ 0 ] ) ; } else { getList ( ) . setSelectedIndices ( valueIndexes ) ; } // update value model if selectedValues contain elements which where not found in the list model // elements if ( valueIndexes . length != selectedValues . length && ! isReadOnly ( ) && isEnabled ( ) || ( selectionMode == ListSelectionModel . SINGLE_SELECTION && valueIndexes . length > 1 ) ) { updateSelectedItemsFromSelectionModel ( ) ; } } finally { selectionModel . setValueIsAdjusting ( false ) ; } } finally { selectingValues = false ; }
public class DnsCacheManipulator { /** * Set JVM DNS negative cache policy * @ param negativeCacheSeconds set default dns cache time . Special input case : * < ul > * < li > { @ code - 1 } means never expired . ( In effect , all negative value ) < / li > * < li > { @ code 0 } never cached . < / li > * < / ul > * @ throws DnsCacheManipulatorException Operation fail * @ since 1.3.0 */ public static void setDnsNegativeCachePolicy ( int negativeCacheSeconds ) { } }
try { InetAddressCacheUtil . setDnsNegativeCachePolicy ( negativeCacheSeconds ) ; } catch ( Exception e ) { throw new DnsCacheManipulatorException ( "Fail to setDnsNegativeCachePolicy, cause: " + e . toString ( ) , e ) ; }
public class GenericHibernateDao { /** * This method returns a { @ link Map } that maps { @ link PersistentObject } s * to PermissionCollections for the passed { @ link UserGroup } . I . e . the keySet * of the map is the collection of all { @ link PersistentObject } s where the * user group has at least one permission and the corresponding value contains * the { @ link PermissionCollection } for the passed user group on the entity . * @ param userGroup * @ return */ @ SuppressWarnings ( { } }
"unchecked" } ) public Map < PersistentObject , PermissionCollection > findAllUserGroupPermissionsOfUserGroup ( UserGroup userGroup ) { Criteria criteria = getSession ( ) . createCriteria ( PersistentObject . class ) ; // by only setting the alias , we will only get those entities where // there is at least one permission set . . . // it is hard ( or even impossible in this scenario ) to create a // restriction that filters for permissions of the given user group only . // using HQL here is no option as the PersistentObject is // a MappedSuperclass ( without table ) . // another efficient way would be a SQL query , but then the SQL // would be written in an explicit SQL dialect . . . criteria . createAlias ( "groupPermissions" , "gp" ) ; criteria . setResultTransformer ( Criteria . DISTINCT_ROOT_ENTITY ) ; List < PersistentObject > entitiesWithPermissions = criteria . list ( ) ; Map < PersistentObject , PermissionCollection > userGroupPermissions = new HashMap < PersistentObject , PermissionCollection > ( ) ; // TODO find a better way than iterating over all entities of the system // that have at least one permission ( for any user ) ( see comment above ) for ( PersistentObject entity : entitiesWithPermissions ) { Map < UserGroup , PermissionCollection > entityUserGroupPermissions = entity . getGroupPermissions ( ) ; if ( entityUserGroupPermissions . containsKey ( userGroup ) ) { userGroupPermissions . put ( entity , entityUserGroupPermissions . get ( userGroup ) ) ; } } return userGroupPermissions ;
public class DomLayersModelRenderer { @ Override public void registerLayerRenderer ( Layer layer , LayerRenderer layerRenderer ) { } }
if ( layerRenderers . containsKey ( layer ) ) { layerRenderers . remove ( layer ) ; } layerRenderers . put ( layer , layerRenderer ) ;
public class UDPRelayServer { public void run ( ) { } }
try { if ( Thread . currentThread ( ) . getName ( ) . equals ( "pipe1" ) ) pipe ( remote_sock , client_sock , false ) ; else pipe ( client_sock , remote_sock , true ) ; } catch ( IOException ioe ) { } finally { abort ( ) ; log ( "UDP Pipe thread " + Thread . currentThread ( ) . getName ( ) + " stopped." ) ; }
public class PushNotificationManager { /** * Stop and restart the current connection to the Apple server using server settings from the previous connection . * @ throws CommunicationException thrown if a communication error occurs * @ throws KeystoreException thrown if there is a problem with your keystore */ private void restartPreviousConnection ( ) throws CommunicationException , KeystoreException { } }
try { logger . debug ( "Closing connection to restart previous one" ) ; this . socket . close ( ) ; } catch ( Exception e ) { /* Do not complain if connection is already closed . . . */ } initializePreviousConnection ( ) ;
public class SearchExpression { /** * A list of search expression objects . * @ param subExpressions * A list of search expression objects . */ public void setSubExpressions ( java . util . Collection < SearchExpression > subExpressions ) { } }
if ( subExpressions == null ) { this . subExpressions = null ; return ; } this . subExpressions = new java . util . ArrayList < SearchExpression > ( subExpressions ) ;
public class KeyManagementServiceClient { /** * Returns metadata for a given [ KeyRing ] [ google . cloud . kms . v1 . KeyRing ] . * < p > Sample code : * < pre > < code > * try ( KeyManagementServiceClient keyManagementServiceClient = KeyManagementServiceClient . create ( ) ) { * KeyRingName name = KeyRingName . of ( " [ PROJECT ] " , " [ LOCATION ] " , " [ KEY _ RING ] " ) ; * KeyRing response = keyManagementServiceClient . getKeyRing ( name ) ; * < / code > < / pre > * @ param name The [ name ] [ google . cloud . kms . v1 . KeyRing . name ] of the * [ KeyRing ] [ google . cloud . kms . v1 . KeyRing ] to get . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ public final KeyRing getKeyRing ( KeyRingName name ) { } }
GetKeyRingRequest request = GetKeyRingRequest . newBuilder ( ) . setName ( name == null ? null : name . toString ( ) ) . build ( ) ; return getKeyRing ( request ) ;
public class MetamodelConfiguration { /** * Method to prepare class simple name to list of pu ' s mapping . 1 class can * be mapped to multiple persistence units , in case of RDBMS , in other cases * it will only be 1! * @ param clazz * entity class to be mapped . * @ param pu * current persistence unit name * @ param clazzToPuMap * collection holding mapping . * @ return map holding mapping . */ private Map < String , List < String > > mapClazztoPu ( Class < ? > clazz , String pu , Map < String , List < String > > clazzToPuMap ) { } }
List < String > puCol = new ArrayList < String > ( 1 ) ; if ( clazzToPuMap == null ) { clazzToPuMap = new HashMap < String , List < String > > ( ) ; } else { if ( clazzToPuMap . containsKey ( clazz . getName ( ) ) ) { puCol = clazzToPuMap . get ( clazz . getName ( ) ) ; } } if ( ! puCol . contains ( pu ) ) { puCol . add ( pu ) ; clazzToPuMap . put ( clazz . getName ( ) , puCol ) ; String annotateEntityName = clazz . getAnnotation ( Entity . class ) . name ( ) ; if ( ! StringUtils . isBlank ( annotateEntityName ) ) { clazzToPuMap . put ( annotateEntityName , puCol ) ; } } return clazzToPuMap ;
public class ClusterControllerClient { /** * Deletes a cluster in a project . * < p > Sample code : * < pre > < code > * try ( ClusterControllerClient clusterControllerClient = ClusterControllerClient . create ( ) ) { * String projectId = " " ; * String region = " " ; * String clusterName = " " ; * clusterControllerClient . deleteClusterAsync ( projectId , region , clusterName ) . get ( ) ; * < / code > < / pre > * @ param projectId Required . The ID of the Google Cloud Platform project that the cluster belongs * to . * @ param region Required . The Cloud Dataproc region in which to handle the request . * @ param clusterName Required . The cluster name . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi ( "The surface for long-running operations is not stable yet and may change in the future." ) public final OperationFuture < Empty , ClusterOperationMetadata > deleteClusterAsync ( String projectId , String region , String clusterName ) { } }
DeleteClusterRequest request = DeleteClusterRequest . newBuilder ( ) . setProjectId ( projectId ) . setRegion ( region ) . setClusterName ( clusterName ) . build ( ) ; return deleteClusterAsync ( request ) ;
public class ExpressRouteCrossConnectionsInner { /** * Gets the route table summary associated with the express route cross connection in a resource group . * @ param resourceGroupName The name of the resource group . * @ param crossConnectionName The name of the ExpressRouteCrossConnection . * @ param peeringName The name of the peering . * @ param devicePath The path of the device . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable for the request */ public Observable < ServiceResponse < ExpressRouteCrossConnectionsRoutesTableSummaryListResultInner > > listRoutesTableSummaryWithServiceResponseAsync ( String resourceGroupName , String crossConnectionName , String peeringName , String devicePath ) { } }
if ( resourceGroupName == null ) { throw new IllegalArgumentException ( "Parameter resourceGroupName is required and cannot be null." ) ; } if ( crossConnectionName == null ) { throw new IllegalArgumentException ( "Parameter crossConnectionName is required and cannot be null." ) ; } if ( peeringName == null ) { throw new IllegalArgumentException ( "Parameter peeringName is required and cannot be null." ) ; } if ( devicePath == null ) { throw new IllegalArgumentException ( "Parameter devicePath is required and cannot be null." ) ; } if ( this . client . subscriptionId ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.subscriptionId() is required and cannot be null." ) ; } final String apiVersion = "2018-06-01" ; Observable < Response < ResponseBody > > observable = service . listRoutesTableSummary ( resourceGroupName , crossConnectionName , peeringName , devicePath , this . client . subscriptionId ( ) , apiVersion , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) ; return client . getAzureClient ( ) . getPostOrDeleteResultAsync ( observable , new TypeToken < ExpressRouteCrossConnectionsRoutesTableSummaryListResultInner > ( ) { } . getType ( ) ) ;
public class AbstractRegistry { /** * If this registry depends on other registries , this method can be used to tell this registry if all depending registries are consistent . * @ return The method returns should return false if at least one depending registry is not consistent ! */ protected boolean isDependingOnConsistentRegistries ( ) { } }
dependingRegistryMapLock . readLock ( ) . lock ( ) ; try { return dependingRegistryMap . keySet ( ) . stream ( ) . noneMatch ( ( registry ) -> ( ! registry . isConsistent ( ) ) ) ; } finally { dependingRegistryMapLock . readLock ( ) . unlock ( ) ; }
public class CreateVaultRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( CreateVaultRequest createVaultRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( createVaultRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( createVaultRequest . getAccountId ( ) , ACCOUNTID_BINDING ) ; protocolMarshaller . marshall ( createVaultRequest . getVaultName ( ) , VAULTNAME_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class StunHandler { /** * All STUN messages MUST start with a 20 - byte header followed by zero or more Attributes . * The STUN header contains a STUN message type , magic cookie , transaction ID , and message length . * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * | 0 0 | STUN Message Type | Message Length | * | Magic Cookie | * | Transaction ID ( 96 bits ) | * @ param data * @ param length * @ return * @ see < a href = " http : / / tools . ietf . org / html / rfc5389 # page - 10 " > RFC5389 < / a > */ @ Override public boolean canHandle ( byte [ ] data , int length , int offset ) { } }
/* * All STUN messages MUST start with a 20 - byte header followed by zero * or more Attributes . */ if ( length >= 20 ) { // The most significant 2 bits of every STUN message MUST be zeroes . byte b0 = data [ offset ] ; boolean firstBitsValid = ( ( b0 & 0xC0 ) == 0 ) ; // The magic cookie field MUST contain the fixed value 0x2112A442 in network byte order . boolean hasMagicCookie = data [ offset + 4 ] == StunMessage . MAGIC_COOKIE [ 0 ] && data [ offset + 5 ] == StunMessage . MAGIC_COOKIE [ 1 ] && data [ offset + 6 ] == StunMessage . MAGIC_COOKIE [ 2 ] && data [ offset + 7 ] == StunMessage . MAGIC_COOKIE [ 3 ] ; return firstBitsValid && hasMagicCookie ; } return false ;
public class AbstractMetaCache { /** * Looks up the object from the cache * @ param oid The Identity to look up the object for * @ return The object if found , otherwise null */ public Object lookup ( Identity oid ) { } }
Object ret = null ; if ( oid != null ) { ObjectCache cache = getCache ( oid , null , METHOD_LOOKUP ) ; if ( cache != null ) { ret = cache . lookup ( oid ) ; } } return ret ;
public class DerInputStream { /** * Return a set of encoded entities . ASN . 1 sets are unordered , * though DER may specify an order for some kinds of sets ( such * as the attributes in an X . 500 relative distinguished name ) * to facilitate binary comparisons of encoded values . * @ param startLen guess about how large the set will be * ( used to initialize an auto - growing data structure ) * @ return array of the values in the sequence */ public DerValue [ ] getSet ( int startLen ) throws IOException { } }
tag = ( byte ) buffer . read ( ) ; if ( tag != DerValue . tag_Set ) throw new IOException ( "Set tag error" ) ; return readVector ( startLen ) ;
public class Stream { /** * Zip together the iterators until one of them runs out of values . * Each array of values is combined into a single value using the supplied zipFunction function . * @ param c * @ param zipFunction * @ return */ @ SuppressWarnings ( "resource" ) public static < R > Stream < R > zip ( final Collection < ? extends ByteStream > c , final ByteNFunction < R > zipFunction ) { } }
if ( N . isNullOrEmpty ( c ) ) { return Stream . empty ( ) ; } final int len = c . size ( ) ; final ByteIterator [ ] iters = new ByteIterator [ len ] ; int i = 0 ; for ( ByteStream s : c ) { iters [ i ++ ] = s . iteratorEx ( ) ; } return new IteratorStream < > ( new ObjIteratorEx < R > ( ) { @ Override public boolean hasNext ( ) { for ( int i = 0 ; i < len ; i ++ ) { if ( iters [ i ] . hasNext ( ) == false ) { return false ; } } return true ; } @ Override public R next ( ) { final byte [ ] args = new byte [ len ] ; for ( int i = 0 ; i < len ; i ++ ) { args [ i ] = iters [ i ] . nextByte ( ) ; } return zipFunction . apply ( args ) ; } } ) . onClose ( newCloseHandler ( c ) ) ;
public class Device { /** * The instances belonging to this device . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setInstances ( java . util . Collection ) } or { @ link # withInstances ( java . util . Collection ) } if you want to * override the existing values . * @ param instances * The instances belonging to this device . * @ return Returns a reference to this object so that method calls can be chained together . */ public Device withInstances ( DeviceInstance ... instances ) { } }
if ( this . instances == null ) { setInstances ( new java . util . ArrayList < DeviceInstance > ( instances . length ) ) ; } for ( DeviceInstance ele : instances ) { this . instances . add ( ele ) ; } return this ;
public class CentralDogma { /** * Returns the primary port of the server . * @ return the primary { @ link ServerPort } if the server is started . { @ link Optional # empty ( ) } otherwise . */ public Optional < ServerPort > activePort ( ) { } }
final Server server = this . server ; return server != null ? server . activePort ( ) : Optional . empty ( ) ;
public class MariaDbDatabaseMetaData { /** * Retrieves a description of the access rights for each table available in a catalog . Note that a * table privilege applies to one or more columns in the table . It would be wrong to assume that * this privilege applies to all columns ( this may be true for some systems but is not true for * all . ) * < P > Only privileges matching the schema and table name criteria are returned . They are ordered * by < code > TABLE _ CAT < / code > , * < code > TABLE _ SCHEM < / code > , < code > TABLE _ NAME < / code > , and < code > PRIVILEGE < / code > . < / p > * < P > Each privilege description has the following columns : < / p > * < OL > < LI > < B > TABLE _ CAT < / B > String * { @ code = > } table catalog ( may be < code > null < / code > ) * < LI > < B > TABLE _ SCHEM < / B > String { @ code = > } table schema ( may be < code > null < / code > ) * < LI > < B > TABLE _ NAME < / B > String { @ code = > } table name * < LI > < B > GRANTOR < / B > String { @ code = > } grantor of access ( may be < code > null < / code > ) * < LI > < B > GRANTEE < / B > String { @ code = > } grantee of access * < LI > < B > PRIVILEGE < / B > String { @ code = > } name of access ( SELECT , INSERT , UPDATE , REFRENCES , . . . ) * < LI > < B > IS _ GRANTABLE < / B > String { @ code = > } " YES " * if grantee is permitted to grant to others ; " NO " if not ; < code > null < / code > if unknown < / OL > * @ param catalog a catalog name ; must match the catalog name as it is stored in the * database ; " " retrieves those without a catalog ; * < code > null < / code > means that the catalog name should not be used to * narrow the search * @ param schemaPattern a schema name pattern ; must match the schema name as it is stored in * the database ; " " retrieves those without a schema ; * < code > null < / code > means that the schema name should not be used to * narrow the search * @ param tableNamePattern a table name pattern ; must match the table name as it is stored in the * database * @ return < code > ResultSet < / code > - each row is a table privilege description * @ throws SQLException if a database access error occurs * @ see # getSearchStringEscape */ public ResultSet getTablePrivileges ( String catalog , String schemaPattern , String tableNamePattern ) throws SQLException { } }
String sql = "SELECT TABLE_SCHEMA TABLE_CAT,NULL TABLE_SCHEM, TABLE_NAME, NULL GRANTOR," + "GRANTEE, PRIVILEGE_TYPE PRIVILEGE, IS_GRANTABLE FROM INFORMATION_SCHEMA.TABLE_PRIVILEGES " + " WHERE " + catalogCond ( "TABLE_SCHEMA" , catalog ) + " AND " + patternCond ( "TABLE_NAME" , tableNamePattern ) + "ORDER BY TABLE_SCHEMA, TABLE_NAME, PRIVILEGE_TYPE " ; return executeQuery ( sql ) ;
public class Mutation { /** * This is equivalent to calling commit . Applies the changes to * to the keyspace that is obtained by calling Keyspace . open ( ) . */ public void apply ( ) { } }
Keyspace ks = Keyspace . open ( keyspaceName ) ; ks . apply ( this , ks . metadata . durableWrites ) ;
public class BigtableInstanceAdminClient { /** * Lists all app profiles of the specified instance . * < p > Sample code : * < pre > { @ code * List < AppProfile > appProfiles = client . listAppProfiles ( " my - instance " ) ; * } < / pre > * @ see AppProfile */ @ SuppressWarnings ( "WeakerAccess" ) public List < AppProfile > listAppProfiles ( String instanceId ) { } }
return ApiExceptions . callAndTranslateApiException ( listAppProfilesAsync ( instanceId ) ) ;
public class SLF4JLoggingCallback { /** * Logs Simon stop on a specified log marker . * @ param split stopped split * @ param sample stopwatch sample */ @ Override public void onStopwatchStop ( Split split , StopwatchSample sample ) { } }
logger . debug ( marker , "SIMON STOP: {} ({})" , sample . toString ( ) , split . runningFor ( ) ) ;
public class OLAPService { /** * Perform an aggregate query on the given table using the given request . * @ param tableDef { @ link TableDefinition } of table to query . * @ param request { @ link OlapAggregate } that defines query parameters . * @ return { @ link AggregateResult } containing search results . */ public AggregateResult aggregateQuery ( TableDefinition tableDef , OlapAggregate request ) { } }
checkServiceState ( ) ; AggregationResult result = m_olap . aggregate ( tableDef . getAppDef ( ) , tableDef . getTableName ( ) , request ) ; return AggregateResultConverter . create ( result , request ) ;
public class BaseObject { /** * Call a function on this object * @ param method * @ param args * @ return * @ throws Exception */ protected JSONArray callMethod ( String method , Object ... args ) throws Exception { } }
return new QueryBuilder ( ) . retrieveResult ( storedId ) . call ( method , args ) . storeResult ( "LAST_" + getStoredId ( ) ) . execute ( ) ;
public class InetSubnet { /** * Repopulates the transient fields based on the IP and prefix */ private void recache ( ) { } }
// If we ' ve already cached the values then don ' t bother recalculating ; we // assume a mask of 0 means a recompute is needed ( unless prefix is also 0) // We skip the computation completely is prefix is 0 - this is fine , since // the mask and maskedNetwork for prefix 0 result in 0 , the default values . // We need to special - case / 0 because our mask generation code doesn ' t work for // prefix = 0 ( since - 1 < < 32 ! = 0) if ( mask == 0 && prefix != 0 ) { this . mask = - 1 << ( 32 - prefix ) ; this . maskedNetwork = IpHelper . aton ( network ) & mask ; }
public class Prefix { /** * - - prefix methods */ public int first ( ) { } }
long remaining ; long next ; remaining = data ; while ( true ) { next = remaining / BASE ; if ( next == 0 ) { return ( int ) remaining - 1 ; } remaining = next ; }
public class TupleCombiner { /** * Returns all fully - combined N - tuples of values for the included input variables . */ private Collection < Tuple > getCombinedTuples ( List < VarDef > combinedVars , Collection < Tuple > tuples ) { } }
// Apply any once - only constraints . Set < Tuple > onceTuples = getOnceTupleDefs ( combinedVars ) ; if ( ! onceTuples . isEmpty ( ) ) { for ( Tuple tuple : tuples ) { tuple . setOnce ( onceTuples . contains ( tuple ) ) ; } } return tuples ;
public class IsolationRunner { /** * Run a single task * @ param args the first argument is the task directory */ public static void main ( String [ ] args ) throws ClassNotFoundException , IOException , InterruptedException { } }
if ( args . length != 1 ) { System . out . println ( "Usage: IsolationRunner <path>/job.xml" ) ; System . exit ( 1 ) ; } File jobFilename = new File ( args [ 0 ] ) ; if ( ! jobFilename . exists ( ) || ! jobFilename . isFile ( ) ) { System . out . println ( jobFilename + " is not a valid job file." ) ; System . exit ( 1 ) ; } JobConf conf = new JobConf ( new Path ( jobFilename . toString ( ) ) ) ; TaskAttemptID taskId = TaskAttemptID . forName ( conf . get ( "mapred.task.id" ) ) ; boolean isMap = conf . getBoolean ( "mapred.task.is.map" , true ) ; int partition = conf . getInt ( "mapred.task.partition" , 0 ) ; // setup the local and user working directories FileSystem local = FileSystem . getLocal ( conf ) ; LocalDirAllocator lDirAlloc = new LocalDirAllocator ( "mapred.local.dir" ) ; File workDirName = new File ( lDirAlloc . getLocalPathToRead ( TaskTracker . getLocalTaskDir ( taskId . getJobID ( ) . toString ( ) , taskId . toString ( ) ) + Path . SEPARATOR + "work" , conf ) . toString ( ) ) ; local . setWorkingDirectory ( new Path ( workDirName . toString ( ) ) ) ; FileSystem . get ( conf ) . setWorkingDirectory ( conf . getWorkingDirectory ( ) ) ; // set up a classloader with the right classpath ClassLoader classLoader = makeClassLoader ( conf , workDirName ) ; Thread . currentThread ( ) . setContextClassLoader ( classLoader ) ; conf . setClassLoader ( classLoader ) ; Task task ; if ( isMap ) { Path localSplit = new Path ( new Path ( jobFilename . toString ( ) ) . getParent ( ) , "split.dta" ) ; DataInputStream splitFile = FileSystem . getLocal ( conf ) . open ( localSplit ) ; String splitClass = Text . readString ( splitFile ) ; BytesWritable split = new BytesWritable ( ) ; split . readFields ( splitFile ) ; splitFile . close ( ) ; task = new MapTask ( jobFilename . toString ( ) , taskId , partition , splitClass , split , 1 , conf . getUser ( ) ) ; } else { int numMaps = conf . getNumMapTasks ( ) ; fillInMissingMapOutputs ( local , taskId , numMaps , conf ) ; task = new ReduceTask ( jobFilename . toString ( ) , taskId , partition , numMaps , 1 , conf . getUser ( ) ) ; } task . setConf ( conf ) ; task . run ( conf , new FakeUmbilical ( ) ) ;
public class Vacuum { /** * Get the vacuums current fan speed setting . * @ return The fan speed . * @ throws CommandExecutionException When there has been a error during the communication or the response was invalid . */ public int getFanSpeed ( ) throws CommandExecutionException { } }
int resp = sendToArray ( "get_custom_mode" ) . optInt ( 0 , - 1 ) ; if ( ( resp < 0 ) || ( resp > 100 ) ) throw new CommandExecutionException ( CommandExecutionException . Error . INVALID_RESPONSE ) ; return resp ;
public class SpanId { /** * Generates a new random { @ code SpanId } . * @ param random The random number generator . * @ return a valid new { @ code SpanId } . * @ since 0.5 */ public static SpanId generateRandomId ( Random random ) { } }
long id ; do { id = random . nextLong ( ) ; } while ( id == INVALID_ID ) ; return new SpanId ( id ) ;
public class TransformProcess { /** * Execute a TransformProcess that starts with a single ( non - sequence ) record , * and converts it to a sequence record . * < b > NOTE < / b > : This method has the following significant limitation : * if it contains a ConvertToSequence op , * it MUST be using singleStepSequencesMode - see { @ link ConvertToSequence } for details . < br > * This restriction is necessary , as ConvertToSequence . singleStepSequencesMode is false , this requires a group by * operation - i . e . , we need to group multiple independent records together by key ( s ) - this isn ' t possible here , * when providing a single example as input * @ param inputExample Input example * @ return Sequence , after processing ( or null , if it was filtered out ) */ public List < List < List < Writable > > > executeToSequenceBatch ( List < List < Writable > > inputExample ) { } }
List < List < List < Writable > > > ret = new ArrayList < > ( ) ; for ( List < Writable > record : inputExample ) ret . add ( execute ( record , null ) . getRight ( ) ) ; return ret ;
public class CMBlob { /** * Converts a CloudMe Blob to a generic CBlob * @ return CBlob */ public CBlob toCBlob ( ) { } }
CBlob cBlob = new CBlob ( getPath ( ) , length , contentType ) ; cBlob . setModificationDate ( updated ) ; return cBlob ;
public class GeoJsonReaderDriver { /** * Parses the all GeoJSON feature to create the PreparedStatement . * @ throws SQLException * @ throws IOException */ private boolean parseMetadata ( ) throws SQLException , IOException { } }
FileInputStream fis = null ; try { fis = new FileInputStream ( fileName ) ; this . fc = fis . getChannel ( ) ; this . fileSize = fc . size ( ) ; // Given the file size and an average node file size . // Skip how many nodes in order to update progression at a step of 1% readFileSizeEachNode = Math . max ( 1 , ( this . fileSize / AVERAGE_NODE_SIZE ) / 100 ) ; nodeCountProgress = 0 ; cachedColumnNames = new LinkedHashMap < String , String > ( ) ; finalGeometryTypes = new HashSet < String > ( ) ; try ( JsonParser jp = jsFactory . createParser ( fis ) ) { jp . nextToken ( ) ; // START _ OBJECT jp . nextToken ( ) ; // field _ name ( type ) jp . nextToken ( ) ; // value _ string ( FeatureCollection ) String geomType = jp . getText ( ) ; if ( geomType . equalsIgnoreCase ( GeoJsonField . FEATURECOLLECTION ) ) { parseFeaturesMetadata ( jp ) ; } else { throw new SQLException ( "Malformed GeoJSON file. Expected 'FeatureCollection', found '" + geomType + "'" ) ; } } // START _ OBJECT } catch ( FileNotFoundException ex ) { throw new SQLException ( ex ) ; } finally { try { if ( fis != null ) { fis . close ( ) ; } } catch ( IOException ex ) { throw new IOException ( ex ) ; } } // Now we create the table if there is at least one geometry field . if ( hasGeometryField ) { StringBuilder createTable = new StringBuilder ( ) ; createTable . append ( "CREATE TABLE " ) ; createTable . append ( tableLocation ) ; createTable . append ( " (" ) ; // Add the geometry column if ( isH2 ) { createTable . append ( "THE_GEOM GEOMETRY" ) ; } else { createTable . append ( "THE_GEOM GEOMETRY(geometry," ) . append ( parsedSRID ) . append ( ")" ) ; } cachedColumnIndex = new HashMap < String , Integer > ( ) ; StringBuilder insertTable = new StringBuilder ( "INSERT INTO " ) ; insertTable . append ( tableLocation ) . append ( " VALUES(?" ) ; int i = 1 ; for ( Map . Entry < String , String > columns : cachedColumnNames . entrySet ( ) ) { String columnName = columns . getKey ( ) ; cachedColumnIndex . put ( columnName , i ++ ) ; createTable . append ( "," ) . append ( columns . getKey ( ) ) . append ( " " ) . append ( columns . getValue ( ) ) ; insertTable . append ( "," ) . append ( "?" ) ; } createTable . append ( ")" ) ; insertTable . append ( ")" ) ; try ( Statement stmt = connection . createStatement ( ) ) { stmt . execute ( createTable . toString ( ) ) ; } preparedStatement = connection . prepareStatement ( insertTable . toString ( ) ) ; return true ; } else { throw new SQLException ( "The geojson file does not contain any geometry." ) ; }
public class BlockingDataCollector { /** * Starts a data collecting Thread that will call { @ link # collectData ( ) } * @ return */ @ Override public final ReportDataHolderImpl collect ( ) { } }
holder = new BlockingDataHolder ( dataQueueSize , queueTimeout ) ; Runnable r = new collectThread ( ) ; Thread t = new Thread ( r , DATA_COLLECTOR_THREAD ) ; t . setUncaughtExceptionHandler ( this ) ; t . start ( ) ; return holder ;
public class Image { /** * Draw this image at a specified location and size * @ param x The x location to draw the image at * @ param y The y location to draw the image at * @ param width The width to render the image at * @ param height The height to render the image at * @ param filter The color to filter with while drawing */ @ Override public void draw ( float x , float y , float width , float height , Color filter ) { } }
if ( alpha != 1 ) { if ( filter == null ) { filter = Color . white ; } filter = new Color ( filter ) ; filter . a *= alpha ; } if ( filter != null ) { filter . bind ( ) ; } texture . bind ( ) ; GL . glTranslatef ( x , y , 0 ) ; if ( angle != 0 ) { GL . glTranslatef ( centerX , centerY , 0.0f ) ; GL . glRotatef ( angle , 0.0f , 0.0f , 1.0f ) ; GL . glTranslatef ( - centerX , - centerY , 0.0f ) ; } GL . glBegin ( SGL . GL_QUADS ) ; drawEmbedded ( 0 , 0 , width , height ) ; GL . glEnd ( ) ; if ( angle != 0 ) { GL . glTranslatef ( centerX , centerY , 0.0f ) ; GL . glRotatef ( - angle , 0.0f , 0.0f , 1.0f ) ; GL . glTranslatef ( - centerX , - centerY , 0.0f ) ; } GL . glTranslatef ( - x , - y , 0 ) ;
public class ProgressionUtil { /** * Set the value of the given task progression , * if not < code > null < / code > . * The value must be greater than the current progression value . * @ param model is the progression to change * @ param value is the value to add to the progression value . * @ param comment is the comment associated to the progression . */ public static void setValue ( Progression model , int value , String comment ) { } }
if ( model != null && value > model . getValue ( ) ) { model . setValue ( value , comment ) ; }
public class AsmInvokeDistributeFactory { /** * 创建if分支的byte code * @ param mv MethodVisitor * @ param method Method * @ param next 下一个分支的起始位置 * @ param start 该分支的结束位置 */ private static void createIf ( MethodVisitor mv , Method method , Label next , Label start , String className , Class < ? > parentClass ) { } }
// 标记分支开始位置 mv . visitLabel ( start ) ; mv . visitFrame ( F_SAME , 0 , null , 0 , null ) ; // 比较方法声明类 stringEquals ( mv , ( ) -> mv . visitVarInsn ( ALOAD , 1 ) , convert ( method . getDeclaringClass ( ) ) , next , ( ) -> { // 比较方法名 stringEquals ( mv , ( ) -> mv . visitVarInsn ( ALOAD , 2 ) , method . getName ( ) , next , ( ) -> { // 方法名一致再比较方法说明 stringEquals ( mv , ( ) -> mv . visitVarInsn ( ALOAD , 3 ) , ByteCodeUtils . getMethodDesc ( method ) , next , ( ) -> { // 方法说明也一致后执行方法 invokeMethod ( mv , method , ( ) -> { // 调用代理对象对应的方法而不是本代理的方法 mv . visitVarInsn ( ALOAD , 0 ) ; mv . visitFieldInsn ( GETFIELD , convert ( className ) , TARGET_FIELD_NAME , getByteCodeType ( parentClass ) ) ; // 获取参数数量 , 用于载入参数 int count = method . getParameterCount ( ) ; Class < ? > [ ] types = method . getParameterTypes ( ) ; // 循环载入参数 for ( int i = 0 ; i < count ; i ++ ) { mv . visitVarInsn ( Opcodes . ALOAD , 4 ) ; // 这里注意 , 访问数组下标0-5和6 - 无穷是不一样的 // 访问0-5对应的byte code : aload | iconst _ [ 0-5 ] | aaload // 访问下标大于5的byte code : aload | bipush [ 6 - 无穷 ] aaload if ( i <= 5 ) { mv . visitInsn ( ICONST_0 + i ) ; } else { mv . visitIntInsn ( BIPUSH , i ) ; } mv . visitInsn ( Opcodes . AALOAD ) ; mv . visitTypeInsn ( CHECKCAST , convert ( types [ i ] ) ) ; } } ) ; } ) ; } ) ; } ) ;
public class ConsoleConsumer { /** * Creates and starts a daemon thread which consumes a { @ linkplain Process processes } * { @ link Process # getInputStream ( ) stdout } stream and pipes the date to the output stream . * Note that when using this method the { @ link ProcessBuilder # redirectErrorStream ( boolean ) } should likely be * { @ code true } . Otherwise another { @ linkplain # start ( InputStream , OutputStream ) thread } should be created to * consume { @ link Process # getErrorStream ( ) stderr } . * @ param process the process * @ param out the output stream where the data should be written * @ return the thread that was started */ public static Thread start ( final Process process , final OutputStream out ) { } }
return start ( process . getInputStream ( ) , out ) ;
public class JPAAuditLogService { /** * / * ( non - Javadoc ) * @ see org . jbpm . process . audit . AuditLogService # clear ( ) */ @ Override public void clear ( ) { } }
EntityManager em = getEntityManager ( ) ; Object newTx = joinTransaction ( em ) ; try { int deletedNodes = em . createQuery ( "delete FROM NodeInstanceLog WHERE processInstanceId in (select spl.processInstanceId FROM ProcessInstanceLog spl WHERE spl.status in (2, 3))" ) . executeUpdate ( ) ; logger . debug ( "CLEAR:: deleted node instances {}" , deletedNodes ) ; int deletedVariables = em . createQuery ( "delete FROM VariableInstanceLog WHERE processInstanceId in (select spl.processInstanceId FROM ProcessInstanceLog spl WHERE spl.status in (2, 3))" ) . executeUpdate ( ) ; logger . debug ( "CLEAR:: deleted variable instances {}" , deletedVariables ) ; int deletedProcesses = em . createQuery ( "delete FROM ProcessInstanceLog WHERE status in (2, 3)" ) . executeUpdate ( ) ; logger . debug ( "CLEAR:: deleted process instances {}" , deletedProcesses ) ; } finally { closeEntityManager ( em , newTx ) ; }
public class JavacProcessingEnvironment { /** * Called retroactively to determine if a class loader was required , * after we have failed to create one . */ private boolean needClassLoader ( String procNames , Iterable < ? extends File > workingpath ) { } }
if ( procNames != null ) return true ; URL [ ] urls = new URL [ 1 ] ; for ( File pathElement : workingpath ) { try { urls [ 0 ] = pathElement . toURI ( ) . toURL ( ) ; if ( ServiceProxy . hasService ( Processor . class , urls ) ) return true ; } catch ( MalformedURLException ex ) { throw new AssertionError ( ex ) ; } catch ( ServiceProxy . ServiceConfigurationError e ) { log . error ( "proc.bad.config.file" , e . getLocalizedMessage ( ) ) ; return true ; } } return false ;
public class CmsJspResourceWrapper { /** * Returns the folder name of this resource from the root site . < p > * In case this resource already is a { @ link CmsFolder } , the folder path is returned without modification . * In case it is a { @ link CmsFile } , the parent folder name of the file is returned . < p > * @ return the folder name of this resource from the root site */ public String getRootPathFolder ( ) { } }
String result ; if ( isFile ( ) ) { result = getRootPathParentFolder ( ) ; } else { result = getRootPath ( ) ; } return result ;
public class TtlTimerTask { /** * Unwrap { @ link TtlTimerTask } to the original / underneath one . * this method is { @ code null } - safe , when input { @ code TimerTask } parameter is { @ code null } , return { @ code null } ; * if input { @ code TimerTask } parameter is not a { @ link TtlTimerTask } just return input { @ code TimerTask } . * @ see # get ( TimerTask ) * @ since 2.10.2 */ @ Nullable public static TimerTask unwrap ( @ Nullable TimerTask timerTask ) { } }
if ( ! ( timerTask instanceof TtlTimerTask ) ) return timerTask ; else return ( ( TtlTimerTask ) timerTask ) . getTimerTask ( ) ;
public class AdSenseSettings { /** * Sets the fontSize value for this AdSenseSettings . * @ param fontSize * Specifies the font size of the { @ link AdUnit } . This attribute * is optional * and defaults to the ad unit ' s parent or ancestor ' s * setting if one has been * set . If no ancestor of the ad unit has set { @ code * fontSize } , the * attribute is defaulted to { @ link FontSize # DEFAULT } . */ public void setFontSize ( com . google . api . ads . admanager . axis . v201902 . AdSenseSettingsFontSize fontSize ) { } }
this . fontSize = fontSize ;
public class IfcRepresentationImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ SuppressWarnings ( "unchecked" ) @ Override public EList < IfcRepresentationMap > getRepresentationMap ( ) { } }
return ( EList < IfcRepresentationMap > ) eGet ( Ifc4Package . Literals . IFC_REPRESENTATION__REPRESENTATION_MAP , true ) ;
public class HawtioManagementConfiguration { @ Bean public Redirector redirector ( ) { } }
final Redirector redirector = new Redirector ( ) ; redirector . setApplicationContextPath ( hawtioPath ) ; return redirector ;
public class CryptUtils { /** * 根据密匙进行DES加密 * @ param key * 密匙 * @ param info * 要加密的信息 * @ return String 加密后的信息 */ public static String encryptToDES ( SecretKey key , String info ) { } }
// 定义 加密算法 , 可用 DES , DESede , Blowfish String Algorithm = "DES" ; // 加密随机数生成器 ( RNG ) , ( 可以不写 ) SecureRandom sr = new SecureRandom ( ) ; // 定义要生成的密文 byte [ ] cipherByte = null ; try { // 得到加密 / 解密器 Cipher c1 = Cipher . getInstance ( Algorithm ) ; // 用指定的密钥和模式初始化Cipher对象 // 参数 : ( ENCRYPT _ MODE , DECRYPT _ MODE , WRAP _ MODE , UNWRAP _ MODE ) c1 . init ( Cipher . ENCRYPT_MODE , key , sr ) ; // 对要加密的内容进行编码处理 , cipherByte = c1 . doFinal ( info . getBytes ( ) ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } // 返回密文的十六进制形式 return byte2hex ( cipherByte ) ;
public class Funnel { /** * Construct the jsonifiable arguments for this request . * @ return A jsonifiable Map to use for the request body . */ @ Override Map < String , Object > constructRequestArgs ( ) { } }
Map < String , Object > args = new HashMap < String , Object > ( ) ; args . put ( KeenQueryConstants . STEPS , this . steps . constructParameterRequestArgs ( ) ) ; if ( null != this . timeframe ) { args . putAll ( timeframe . constructTimeframeArgs ( ) ) ; } return args ;
public class FieldType { /** * Create a shell object and assign its id field . */ private < FT , FID > FT createForeignShell ( ConnectionSource connectionSource , Object val , ObjectCache objectCache ) throws SQLException { } }
@ SuppressWarnings ( "unchecked" ) Dao < FT , FID > castDao = ( Dao < FT , FID > ) foreignDao ; FT foreignObject = castDao . createObjectInstance ( ) ; foreignIdField . assignField ( connectionSource , foreignObject , val , false , objectCache ) ; return foreignObject ;
public class Alert { /** * Creates a new instance of { @ code Alert } with same members . * @ return a new { @ code Alert } instance */ public Alert newInstance ( ) { } }
Alert item = new Alert ( this . pluginId ) ; item . setRiskConfidence ( this . risk , this . confidence ) ; item . setName ( this . name ) ; item . setDetail ( this . description , this . uri , this . param , this . attack , this . otherInfo , this . solution , this . reference , this . historyRef ) ; item . setSource ( this . source ) ; return item ;
public class UnsavedRevision { /** * Sets the attachment with the given name . The Attachment data will be written * to the Database when the Revision is saved . * @ param name The name of the Attachment to set . * @ param contentType The content - type of the Attachment . * @ param contentStreamURL The URL that contains the Attachment content . */ @ InterfaceAudience . Public public void setAttachment ( String name , String contentType , URL contentStreamURL ) { } }
try { InputStream inputStream = contentStreamURL . openStream ( ) ; setAttachment ( name , contentType , inputStream ) ; } catch ( IOException e ) { Log . e ( Database . TAG , "Error opening stream for url: %s" , contentStreamURL ) ; throw new RuntimeException ( e ) ; }