signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class CharacterApi { /** * Get character corporation roles Returns a character & # 39 ; s corporation * roles - - - This route is cached for up to 3600 seconds SSO Scope : * esi - characters . read _ corporation _ roles . v1 * @ param characterId * An EVE character ID ( required ) * @ param datasource * The server name you would like data from ( optional , default to * tranquility ) * @ param ifNoneMatch * ETag from a previous request . A 304 will be returned if this * matches the current ETag ( optional ) * @ param token * Access token to use if unable to set a header ( optional ) * @ return CharacterRolesResponse * @ throws ApiException * If fail to call the API , e . g . server error or cannot * deserialize the response body */ public CharacterRolesResponse getCharactersCharacterIdRoles ( Integer characterId , String datasource , String ifNoneMatch , String token ) throws ApiException { } }
ApiResponse < CharacterRolesResponse > resp = getCharactersCharacterIdRolesWithHttpInfo ( characterId , datasource , ifNoneMatch , token ) ; return resp . getData ( ) ;
public class PathService { /** * Returns a { @ link PathMatcher } for the given syntax and pattern as specified by { @ link * FileSystem # getPathMatcher ( String ) } . */ public PathMatcher createPathMatcher ( String syntaxAndPattern ) { } }
return PathMatchers . getPathMatcher ( syntaxAndPattern , type . getSeparator ( ) + type . getOtherSeparators ( ) , displayNormalizations ) ;
public class Instance { /** * Sets the scheduling options for this instance . * @ return a zone operation if the set request was issued correctly , { @ code null } if the instance * was not found * @ throws ComputeException upon failure */ public Operation setSchedulingOptions ( SchedulingOptions scheduling , OperationOption ... options ) { } }
return compute . setSchedulingOptions ( getInstanceId ( ) , scheduling , options ) ;
public class ComponentFactoryDecorator { /** * { @ inheritDoc } */ @ Override public JScrollPane createScrollPane ( Component view , int vsbPolicy , int hsbPolicy ) { } }
return this . getDecoratedComponentFactory ( ) . createScrollPane ( view , vsbPolicy , hsbPolicy ) ;
public class IPUtil { /** * Decides whether an IP is local or not */ public static boolean isValidLocalIP ( InetAddress ip ) throws SocketException { } }
String ipStr = ip . getHostAddress ( ) ; Iterator ips = getAllIPAddresses ( ) . iterator ( ) ; while ( ips . hasNext ( ) ) { InetAddress ip2 = ( InetAddress ) ips . next ( ) ; if ( ip2 . getHostAddress ( ) . equals ( ipStr ) ) { return true ; } } return false ;
public class CmsLoginHelper { /** * Gets the list of OUs which should be selectable in the login dialog . < p > * @ param cms the CMS context to use * @ param predefOu the predefined OU * @ return the list of organizational units for the OU selector */ public static List < CmsOrganizationalUnit > getOrgUnitsForLoginDialog ( CmsObject cms , String predefOu ) { } }
List < CmsOrganizationalUnit > result = new ArrayList < CmsOrganizationalUnit > ( ) ; try { if ( predefOu == null ) { result . add ( OpenCms . getOrgUnitManager ( ) . readOrganizationalUnit ( cms , "" ) ) ; result . addAll ( OpenCms . getOrgUnitManager ( ) . getOrganizationalUnits ( cms , "" , true ) ) ; Iterator < CmsOrganizationalUnit > itOus = result . iterator ( ) ; while ( itOus . hasNext ( ) ) { CmsOrganizationalUnit ou = itOus . next ( ) ; if ( ou . hasFlagHideLogin ( ) || ou . hasFlagWebuser ( ) ) { itOus . remove ( ) ; } } } else { result . add ( OpenCms . getOrgUnitManager ( ) . readOrganizationalUnit ( cms , predefOu ) ) ; } } catch ( CmsException e ) { LOG . error ( e . getLocalizedMessage ( ) , e ) ; } return result ;
public class QueryBuilder { /** * Increases the offset by the { @ code amount } . * @ param amount the amount to increase the offset * @ return a reference to this object */ @ Override public QueryBuilder < V > increaseOffsetBy ( Integer amount ) { } }
if ( offset == null ) { offset = 0 ; } this . offset += amount ; return this ;
public class Gauge { /** * The factor defines the width of the minor tick mark . * It can be in the range from 0 - 1. * @ param FACTOR */ public void setMinorTickMarkWidthFactor ( final double FACTOR ) { } }
if ( null == minorTickMarkWidthFactor ) { _minorTickMarkWidthFactor = Helper . clamp ( 0.0 , 1.0 , FACTOR ) ; fireUpdateEvent ( REDRAW_EVENT ) ; } else { minorTickMarkWidthFactor . set ( FACTOR ) ; }
public class RemoteQueuePointIterator { /** * Move to the next stream which has active ranges */ private void nextQueue ( ) { } }
if ( super . hasNext ( ) ) { do { if ( _currentSubIterator != null ) _currentSubIterator . finished ( ) ; _currentSubIterator = null ; _currentQueue = ( SIMPQueueControllable ) super . next ( ) ; try { _currentSubIterator = _currentQueue . getRemoteQueuePointIterator ( ) ; } catch ( SIMPException e ) { // No FFDC code needed if ( tc . isDebugEnabled ( ) ) SibTr . exception ( tc , e ) ; } } // Retry if we failed to get an iterator , that iterator does not // have any remote queue points in it and we have more queues available . while ( ( _currentSubIterator == null || ! _currentSubIterator . hasNext ( ) ) && super . hasNext ( ) ) ; } else { _currentQueue = null ; if ( _currentSubIterator != null ) _currentSubIterator . finished ( ) ; _currentSubIterator = null ; }
public class SymmTridiagEVD { /** * Convenience method for computing the full eigenvalue decomposition of the * given matrix * @ param A * Matrix to factorize . Main diagonal and superdiagonal is * copied , and the matrix is not modified * @ return Newly allocated decomposition * @ throws NotConvergedException */ public static SymmTridiagEVD factorize ( Matrix A ) throws NotConvergedException { } }
return new SymmTridiagEVD ( A . numRows ( ) ) . factor ( new SymmTridiagMatrix ( A ) ) ;
public class AttributeQuery { /** * Create the SQL statement . * @ param _ idx the _ idx * @ return StringBuilder containing the statement * @ throws EFapsException on error */ public String getSQLStatement ( final Integer _idx ) throws EFapsException { } }
prepareQuery ( ) ; final SQLSelect select = new SQLSelect ( "S" + _idx + "T" ) . column ( getSqlTable2Index ( ) . get ( this . attribute . getTable ( ) ) , this . attribute . getSqlColNames ( ) . get ( 0 ) ) . from ( getBaseType ( ) . getMainTable ( ) . getSqlTable ( ) , 0 ) ; // add child tables if ( getSqlTable2Index ( ) . size ( ) > 0 ) { for ( final Entry < SQLTable , Integer > entry : getSqlTable2Index ( ) . entrySet ( ) ) { if ( entry . getValue ( ) > 0 ) { select . leftJoin ( entry . getKey ( ) . getSqlTable ( ) , entry . getValue ( ) , "ID" , 0 , "ID" ) ; } } } select . addSection ( getWhere ( ) ) ; select . addSection ( getOrderBy ( ) ) ; select . addSection ( getLimit ( ) ) ; if ( AbstractObjectQuery . LOG . isDebugEnabled ( ) ) { AbstractObjectQuery . LOG . debug ( select . getSQL ( ) ) ; } return select . getSQL ( ) ;
public class Bernoulli { /** * Set a coefficient in the internal table . * @ param n the zero - based index of the coefficient . n = 0 for the constant term . * @ param value the new value of the coefficient . */ protected void set ( final int n , final Rational value ) { } }
final int nindx = n / 2 ; if ( nindx < a . size ( ) ) { a . set ( nindx , value ) ; } else { while ( a . size ( ) < nindx ) { a . add ( Rational . ZERO ) ; } a . add ( value ) ; }
public class InstrumentedExtractorBase { /** * Generates metrics for the instrumentation of this class . */ protected void regenerateMetrics ( ) { } }
if ( isInstrumentationEnabled ( ) ) { this . readRecordsMeter = Optional . of ( this . metricContext . meter ( MetricNames . ExtractorMetrics . RECORDS_READ_METER ) ) ; this . dataRecordExceptionsMeter = Optional . of ( this . metricContext . meter ( MetricNames . ExtractorMetrics . RECORDS_FAILED_METER ) ) ; this . extractorTimer = Optional . < Timer > of ( this . metricContext . timer ( MetricNames . ExtractorMetrics . EXTRACT_TIMER ) ) ; } else { this . readRecordsMeter = Optional . absent ( ) ; this . dataRecordExceptionsMeter = Optional . absent ( ) ; this . extractorTimer = Optional . absent ( ) ; }
public class FileComparator { /** * for testing via main */ private static void print ( String kind , File [ ] f ) { } }
System . out . println ( kind ) ; for ( File element : f ) { System . out . println ( getComparable ( element ) ) ; } System . out . println ( ) ;
public class WebDAVUtils { /** * Recursively copy the resources under the provided identifier . * @ param services the trellis services * @ param session the session * @ param identifier the source identifier * @ param destination the destination identifier * @ param baseUrl the baseURL * @ return the next stage of completion */ public static CompletionStage < Void > recursiveCopy ( final ServiceBundler services , final Session session , final IRI identifier , final IRI destination , final String baseUrl ) { } }
return services . getResourceService ( ) . get ( identifier ) . thenCompose ( res -> recursiveCopy ( services , session , res , destination , baseUrl ) ) ;
public class ESRIBounds { /** * Replies the 2D bounds . * @ return the 2D bounds */ @ Pure public Rectangle2d toRectangle2d ( ) { } }
final Rectangle2d bounds = new Rectangle2d ( ) ; bounds . setFromCorners ( this . minx , this . miny , this . maxx , this . maxy ) ; return bounds ;
public class Base64 { /** * Reads < tt > infile < / tt > and encodes it to < tt > outfile < / tt > . * @ param infile Input file * @ param outfile Output file * @ throws java . io . IOException if there is an error * @ since 2.2 */ public static void encodeFileToFile ( String infile , String outfile ) throws IOException { } }
String encoded = Base64 . encodeFromFile ( infile ) ; java . io . OutputStream out = null ; try { out = new java . io . BufferedOutputStream ( new FileOutputStream ( outfile ) ) ; out . write ( encoded . getBytes ( "US-ASCII" ) ) ; // Strict , 7 - bit output . } catch ( IOException e ) { throw e ; // Catch and release to execute finally { } } finally { try { if ( out != null ) { out . close ( ) ; } } catch ( Exception ex ) { } }
public class DBInstance { /** * The status of a Read Replica . If the instance is not a Read Replica , this is blank . * @ return The status of a Read Replica . If the instance is not a Read Replica , this is blank . */ public java . util . List < DBInstanceStatusInfo > getStatusInfos ( ) { } }
if ( statusInfos == null ) { statusInfos = new com . amazonaws . internal . SdkInternalList < DBInstanceStatusInfo > ( ) ; } return statusInfos ;
public class ReversePurgeItemHashMap { /** * Gets the current value with the given key * @ param key the given key * @ return the positive value the key corresponds to or zero if the key is not found in the * hash map . */ long get ( final T key ) { } }
if ( key == null ) { return 0 ; } final int probe = hashProbe ( key ) ; if ( states [ probe ] > 0 ) { assert ( keys [ probe ] ) . equals ( key ) ; return values [ probe ] ; } return 0 ;
public class DateField { /** * example new DataField ( ) . setDate ( " 19 " , " 05 " , " 2013 " ) * @ param day String ' dd ' * @ param month String ' MMM ' * @ param year String ' yyyy ' * @ return true if is selected date , false when DataField doesn ' t exist */ private boolean setDate ( String day , String month , String year ) { } }
String fullDate = RetryUtils . retry ( 4 , ( ) -> monthYearButton . getText ( ) ) . trim ( ) ; if ( ! fullDate . contains ( month ) || ! fullDate . contains ( year ) ) { monthYearButton . click ( ) ; if ( ! yearAndMonth . ready ( ) ) { monthYearButton . click ( ) ; } goToYear ( year , fullDate ) ; WebLink monthEl = new WebLink ( monthContainer ) . setText ( month , SearchType . EQUALS ) . setInfoMessage ( "month " + month ) ; monthEl . click ( ) ; selectOkButton . click ( ) ; } WebLocator dayEl = new WebLocator ( dayContainer ) . setText ( day , SearchType . EQUALS ) . setVisibility ( true ) . setInfoMessage ( "day " + day ) ; Utils . sleep ( 50 ) ; return dayEl . click ( ) ;
public class GrpcUtil { /** * Verify { @ code authority } is valid for use with gRPC . The syntax must be valid and it must not * include userinfo . * @ return the { @ code authority } provided */ public static String checkAuthority ( String authority ) { } }
URI uri = authorityToUri ( authority ) ; checkArgument ( uri . getHost ( ) != null , "No host in authority '%s'" , authority ) ; checkArgument ( uri . getUserInfo ( ) == null , "Userinfo must not be present on authority: '%s'" , authority ) ; return authority ;
public class AmazonRDSClient { /** * Starts an Amazon Aurora DB cluster that was stopped using the AWS console , the stop - db - cluster AWS CLI command , * or the StopDBCluster action . * For more information , see < a * href = " https : / / docs . aws . amazon . com / AmazonRDS / latest / AuroraUserGuide / aurora - cluster - stop - start . html " > Stopping and * Starting an Aurora Cluster < / a > in the < i > Amazon Aurora User Guide . < / i > * < note > * This action only applies to Aurora DB clusters . * < / note > * @ param startDBClusterRequest * @ return Result of the StartDBCluster operation returned by the service . * @ throws DBClusterNotFoundException * < i > DBClusterIdentifier < / i > doesn ' t refer to an existing DB cluster . * @ throws InvalidDBClusterStateException * The requested operation can ' t be performed while the cluster is in this state . * @ throws InvalidDBInstanceStateException * The DB instance isn ' t in a valid state . * @ sample AmazonRDS . StartDBCluster * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / rds - 2014-10-31 / StartDBCluster " target = " _ top " > AWS API * Documentation < / a > */ @ Override public DBCluster startDBCluster ( StartDBClusterRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeStartDBCluster ( request ) ;
public class ExecuteArgAnalyzer { protected void checkNonGenericParameter ( Method executeMethod , Parameter parameter ) { } }
if ( isNonGenericCheckTargetType ( parameter . getType ( ) ) ) { // e . g . List final Type paramedType = parameter . getParameterizedType ( ) ; if ( paramedType == null ) { // no way ? no check just in case return ; } if ( paramedType instanceof ParameterizedType ) { final Type [ ] typeArgs = ( ( ParameterizedType ) paramedType ) . getActualTypeArguments ( ) ; if ( typeArgs != null && typeArgs . length > 0 && "?" . equals ( typeArgs [ 0 ] . getTypeName ( ) ) ) { // e . g . List < ? > throwActionFormWildcardOnlyListParameterException ( executeMethod , parameter ) ; } } else { throwActionFormNonGenericListParameterException ( executeMethod , parameter ) ; } }
public class BandLU { /** * Creates an LU decomposition of the given matrix * @ param A * Matrix to decompose . Not modified * @ return A LU decomposition of the matrix */ public static BandLU factorize ( BandMatrix A ) { } }
return new BandLU ( A . numRows ( ) , A . kl , A . ku ) . factor ( A , false ) ;
public class FluentSelect { /** * Select all options that have a value matching the argument . That is , when given " foo " this * would select an option like : * & lt ; option value = " foo " & gt ; Bar & lt ; / option & gt ; * @ param value The value to match against */ public FluentSelect selectByValue ( final String value ) { } }
executeAndWrapReThrowIfNeeded ( new SelectByValue ( value ) , Context . singular ( context , "selectByValue" , null , value ) , true ) ; return new FluentSelect ( super . delegate , currentElement . getFound ( ) , this . context , monitor , booleanInsteadOfNotFoundException ) ;
public class Base64 { /** * Base64 encode a byte array . * @ param data * the data to encode . * @ return a StringBuffer containing the encoded lines . */ public static StringBuffer encode ( byte [ ] data ) { } }
int rem = data . length % 3 ; int num = data . length / 3 ; StringBuffer result = new StringBuffer ( ) ; int p = 0 ; // int c = 0; for ( int i = 0 ; i < num ; i ++ ) { int b1 = ( data [ p ] & 0xfc ) >> 2 ; int b2 = ( data [ p ] & 0x03 ) << 4 | ( data [ p + 1 ] & 0xf0 ) >> 4 ; int b3 = ( data [ p + 1 ] & 0x0f ) << 2 | ( data [ p + 2 ] & 0xc0 ) >> 6 ; int b4 = data [ p + 2 ] & 0x3f ; result . append ( b64encode ( b1 ) ) ; result . append ( b64encode ( b2 ) ) ; result . append ( b64encode ( b3 ) ) ; result . append ( b64encode ( b4 ) ) ; p += 3 ; // c + = 4; } switch ( rem ) { case 0 : break ; case 1 : { int b1 = ( data [ p ] & 0xfc ) >> 2 ; int b2 = ( data [ p ] & 0x03 ) << 4 ; result . append ( b64encode ( b1 ) ) ; result . append ( b64encode ( b2 ) ) ; result . append ( '=' ) ; result . append ( '=' ) ; break ; } case 2 : { int b1 = ( data [ p ] & 0xfc ) >> 2 ; int b2 = ( data [ p ] & 0x03 ) << 4 | ( data [ p + 1 ] & 0xf0 ) >> 4 ; int b3 = ( data [ p + 1 ] & 0x0f ) << 2 ; result . append ( b64encode ( b1 ) ) ; result . append ( b64encode ( b2 ) ) ; result . append ( b64encode ( b3 ) ) ; result . append ( '=' ) ; break ; } } return result ;
public class CertificateUtil { /** * Decode either a sequence of DER - encoded X . 509 certificates or a PKCS # 7 certificate chain . * @ param certificateBytes the byte [ ] to decode from . * @ return a { @ link List } of certificates deocded from { @ code certificateBytes } . * @ throws UaException if decoding fails . */ public static List < X509Certificate > decodeCertificates ( byte [ ] certificateBytes ) throws UaException { } }
Preconditions . checkNotNull ( certificateBytes , "certificateBytes cannot be null" ) ; CertificateFactory factory ; try { factory = CertificateFactory . getInstance ( "X.509" ) ; } catch ( CertificateException e ) { throw new UaException ( StatusCodes . Bad_InternalError , e ) ; } try { Collection < ? extends Certificate > certificates = factory . generateCertificates ( new ByteArrayInputStream ( certificateBytes ) ) ; return certificates . stream ( ) . map ( X509Certificate . class :: cast ) . collect ( Collectors . toList ( ) ) ; } catch ( CertificateException e ) { throw new UaException ( StatusCodes . Bad_CertificateInvalid , e ) ; }
public class SQLiteUpdateTaskHelper { /** * Execute SQL . * @ param database * the database * @ param commands * the commands */ public static void executeSQL ( final SQLiteDatabase database , List < String > commands ) { } }
for ( String command : commands ) { executeSQL ( database , command ) ; } // commands . forEach ( command - > { // executeSQL ( database , command ) ;
public class DistributedCache { /** * Get the locally cached file or archive ; it could either be * previously cached ( and valid ) or copy it from the { @ link FileSystem } now . * @ param cache the cache to be localized , this should be specified as * new URI ( hdfs : / / hostname : port / absolute _ path _ to _ file # LINKNAME ) . If no schema * or hostname : port is provided the file is assumed to be in the filesystem * being used in the Configuration * @ param conf The Confguration file which contains the filesystem * @ param subDir The sub cache Dir where you want to localize the * files / archives * @ param fileStatus The file status on the dfs . * @ param isArchive if the cache is an archive or a file . In case it is an * archive with a . zip or . jar or . tar or . tgz or . tar . gz extension it will * be unzipped / unjarred / untarred automatically * and the directory where the archive is unzipped / unjarred / untarred is * returned as the Path . * In case of a file , the path to the file is returned * @ param confFileStamp this is the hdfs file modification timestamp to verify * that the file to be cached hasn ' t changed since the job started * @ param fileLength this is the length of the cache file * @ param currentWorkDir this is the directory where you would want to create * symlinks for the locally cached files / archives * @ param honorSymLinkConf if this is false , then the symlinks are not * created even if conf says so ( this is required for an optimization in task * launches * @ param lDirAllocator LocalDirAllocator of the tracker * @ return the path to directory where the archives are unjarred in case of * archives , the path to the file where the file is copied locally * @ throws IOException */ private static Path getLocalCache ( URI cache , Configuration conf , Path subDir , FileStatus fileStatus , boolean isArchive , long confFileStamp , long fileLength , Path currentWorkDir , boolean honorSymLinkConf , MRAsyncDiskService asyncDiskService , LocalDirAllocator lDirAllocator ) throws IOException { } }
String key = getKey ( cache , conf , confFileStamp ) ; CacheStatus lcacheStatus ; Path localizedPath ; synchronized ( cachedArchives ) { lcacheStatus = cachedArchives . get ( key ) ; if ( lcacheStatus == null ) { // was never localized Path uniqueParentDir = new Path ( subDir , String . valueOf ( random . nextLong ( ) ) ) ; String cachePath = new Path ( uniqueParentDir , makeRelative ( cache , conf ) ) . toString ( ) ; Path localPath = lDirAllocator . getLocalPathForWrite ( cachePath , fileLength , conf ) ; lcacheStatus = new CacheStatus ( new Path ( localPath . toString ( ) . replace ( cachePath , "" ) ) , localPath , uniqueParentDir ) ; cachedArchives . put ( key , lcacheStatus ) ; } lcacheStatus . refcount ++ ; } boolean initSuccessful = false ; try { synchronized ( lcacheStatus ) { if ( ! lcacheStatus . isInited ( ) ) { localizedPath = localizeCache ( conf , cache , confFileStamp , lcacheStatus , isArchive ) ; lcacheStatus . initComplete ( ) ; } else { if ( fileStatus != null ) { localizedPath = checkCacheStatusValidity ( conf , cache , confFileStamp , lcacheStatus , fileStatus , isArchive ) ; } else { // if fileStatus is null , then the md5 must be correct // so there is no need to check for cache validity localizedPath = lcacheStatus . localizedLoadPath ; } } createSymlink ( conf , cache , lcacheStatus , isArchive , currentWorkDir , honorSymLinkConf ) ; } // try deleting stuff if you can long size = 0 ; int numberSubDir = 0 ; synchronized ( lcacheStatus ) { synchronized ( baseDirSize ) { Long get = baseDirSize . get ( lcacheStatus . getBaseDir ( ) ) ; if ( get != null ) { size = get . longValue ( ) ; } else { LOG . warn ( "Cannot find size of baseDir: " + lcacheStatus . getBaseDir ( ) ) ; } } synchronized ( baseDirNumberSubDir ) { Integer get = baseDirNumberSubDir . get ( lcacheStatus . getBaseDir ( ) ) ; if ( get != null ) { numberSubDir = get . intValue ( ) ; } else { LOG . warn ( "Cannot find subdirectories limit of baseDir: " + lcacheStatus . getBaseDir ( ) ) ; } } } // setting the cache size to a default of 10GB long allowedSize = conf . getLong ( "local.cache.size" , DEFAULT_CACHE_SIZE ) ; long allowedNumberSubDir = conf . getLong ( "local.cache.numbersubdir" , DEFAULT_CACHE_SUBDIR_LIMIT ) ; if ( allowedSize < size || allowedNumberSubDir < numberSubDir ) { // try some cache deletions LOG . debug ( "Start deleting released cache because" + " [size, allowedSize, numberSubDir, allowedNumberSubDir] =" + " [" + size + ", " + allowedSize + ", " + numberSubDir + ", " + allowedNumberSubDir + "]" ) ; deleteCache ( conf , asyncDiskService ) ; } initSuccessful = true ; return localizedPath ; } finally { if ( ! initSuccessful ) { synchronized ( cachedArchives ) { lcacheStatus . refcount -- ; } } }
public class StringUtils { /** * Split string by whitespace . * @ param value string to split * @ return list of tokens */ public static Collection < String > split ( final String value ) { } }
if ( value == null ) { return Collections . emptyList ( ) ; } final String [ ] tokens = value . trim ( ) . split ( "\\s+" ) ; return asList ( tokens ) ;
public class CmsJspActionElement { /** * Returns all properties of the selected file . < p > * Please see the description of the class { @ link org . opencms . jsp . CmsJspTagProperty } for * valid options of the < code > file < / code > parameter . < p > * @ param file the file ( or folder ) to look at for the properties * @ return Map all properties of the current file * ( and optional of the folders containing the file ) * @ see org . opencms . jsp . CmsJspTagProperty */ public Map < String , String > properties ( String file ) { } }
Map < String , String > props = new HashMap < String , String > ( ) ; if ( isNotInitialized ( ) ) { return props ; } try { props = CmsJspTagProperty . propertiesTagAction ( file , getRequest ( ) ) ; } catch ( Throwable t ) { handleException ( t ) ; } return props ;
public class PropertyData { /** * Resolves the copy generator . */ public void resolveBuilderGen ( ) { } }
if ( getBean ( ) . isMutable ( ) ) { if ( ! getBean ( ) . isBuilderScopeVisible ( ) && ! getBean ( ) . isBeanStyleLightOrMinimal ( ) ) { return ; // no builder } } if ( isDerived ( ) ) { builderGen = BuilderGen . NoBuilderGen . INSTANCE ; } else { BuilderGen builder = config . getBuilderGenerators ( ) . get ( getFieldTypeRaw ( ) ) ; if ( builder != null ) { builderGen = builder ; } else { builderGen = new BuilderGen . SimpleBuilderGen ( ) ; } }
public class JSTalkBackFilter { /** * Writes { @ link HttpServletResponse # SC _ NO _ CONTENT } to response and flushes the stream . * This means that only header and empty body will be returned to the caller script . * @ param response { @ link HttpServletResponse } * @ throws IOException on errors */ private void writeNoContentResponse ( final HttpServletResponse response ) throws IOException { } }
response . setStatus ( HttpServletResponse . SC_NO_CONTENT ) ; response . setContentType ( "image/gif" ) ; final PrintWriter out = response . getWriter ( ) ; out . flush ( ) ;
public class JdonJavaSerializer { /** * Finds any matching setter . */ private Method findSetter ( Method [ ] methods , String getterName , Class < ? > arg ) { } }
String setterName = "set" + getterName . substring ( INT_VALUE ) ; for ( int i = 0 ; i < methods . length ; i ++ ) { Method method = methods [ i ] ; if ( ! method . getName ( ) . equals ( setterName ) ) { continue ; } if ( ! method . getReturnType ( ) . equals ( void . class ) ) { continue ; } Class < ? > [ ] params = method . getParameterTypes ( ) ; if ( params . length == 1 && params [ 0 ] . equals ( arg ) ) { return method ; } } return null ;
public class Operations { /** * Get an operand as a numeric type . * @ param op * operand as String * @ return Double value or null if op is not numeric */ private static Double getOperandAsNumeric ( String op ) { } }
Double d = null ; try { d = Double . valueOf ( op ) ; } catch ( Exception e ) { // Null is returned if not numeric . } return d ;
public class KubernetesClient { /** * Retrieves POD addresses for all services in the specified { @ code namespace } filtered by { @ code serviceLabel } * and { @ code serviceLabelValue } . * @ param serviceLabel label used to filter responses * @ param serviceLabelValue label value used to filter responses * @ return all POD addresses from the specified { @ code namespace } filtered by the label * @ see < a href = " https : / / kubernetes . io / docs / reference / generated / kubernetes - api / v1.11 / # list - 143 " > Kubernetes Endpoint API < / a > */ List < Endpoint > endpointsByLabel ( String serviceLabel , String serviceLabelValue ) { } }
try { String param = String . format ( "labelSelector=%s=%s" , serviceLabel , serviceLabelValue ) ; String urlString = String . format ( "%s/api/v1/namespaces/%s/endpoints?%s" , kubernetesMaster , namespace , param ) ; return enrichWithPublicAddresses ( parseEndpointsList ( callGet ( urlString ) ) ) ; } catch ( RestClientException e ) { return handleKnownException ( e ) ; }
public class Calendar { /** * < strong > [ icu ] < / strong > Returns true if the given Calendar object is equivalent to this * one . An equivalent Calendar will behave exactly as this one * does , but it may be set to a different time . By contrast , for * the equals ( ) method to return true , the other Calendar must * be set to the same time . * @ param other the Calendar to be compared with this Calendar */ public boolean isEquivalentTo ( Calendar other ) { } }
return this . getClass ( ) == other . getClass ( ) && isLenient ( ) == other . isLenient ( ) && getFirstDayOfWeek ( ) == other . getFirstDayOfWeek ( ) && getMinimalDaysInFirstWeek ( ) == other . getMinimalDaysInFirstWeek ( ) && getTimeZone ( ) . equals ( other . getTimeZone ( ) ) && getRepeatedWallTimeOption ( ) == other . getRepeatedWallTimeOption ( ) && getSkippedWallTimeOption ( ) == other . getSkippedWallTimeOption ( ) ;
public class ResourceGroovyMethods { /** * Creates a buffered reader for this URL using the given encoding . * @ param url a URL * @ param parameters connection parameters * @ param charset opens the stream with a specified charset * @ return a BufferedReader for the URL * @ throws MalformedURLException is thrown if the URL is not well formed * @ throws IOException if an I / O error occurs while creating the input stream * @ since 1.8.1 */ public static BufferedReader newReader ( URL url , Map parameters , String charset ) throws MalformedURLException , IOException { } }
return new BufferedReader ( new InputStreamReader ( configuredInputStream ( parameters , url ) , charset ) ) ;
public class Token { /** * for ( int i = 0 ; i < tokens . length ; i + + ) { * if ( tokens [ i ] . schemaObjectIdentifier instanceof Expression ) { * ColumnSchema column = * ( ( Expression ) tokens [ i ] . schemaObjectIdentifier ) * . getColumn ( ) ; * tokens [ i ] . schemaObjectIdentifier = column . getName ( ) ; */ static String getSQL ( Token [ ] statement ) { } }
boolean wasDelimiter = true ; StringBuffer sb = new StringBuffer ( ) ; for ( int i = 0 ; i < statement . length ; i ++ ) { // when the previous statement is ' ) ' , there should be a blank before the non - delimiter statement . if ( ! statement [ i ] . isDelimiter && ( ! wasDelimiter || ( i > 0 && ")" . equals ( statement [ i - 1 ] . getSQL ( ) ) ) ) ) { sb . append ( ' ' ) ; } sb . append ( statement [ i ] . getSQL ( ) ) ; wasDelimiter = statement [ i ] . isDelimiter ; } return sb . toString ( ) ;
public class AfplibPackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EEnum getBDDCOLOR ( ) { } }
if ( bddcolorEEnum == null ) { bddcolorEEnum = ( EEnum ) EPackage . Registry . INSTANCE . getEPackage ( AfplibPackage . eNS_URI ) . getEClassifiers ( ) . get ( 6 ) ; } return bddcolorEEnum ;
public class LibertyCustomizeBindingOutInterceptor { /** * Customize the client properties . * @ param message */ protected void customizeClientProperties ( Message message ) { } }
if ( null == configPropertiesSet ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "There are no client properties." ) ; } return ; } Bus bus = message . getExchange ( ) . getBus ( ) ; if ( null == bus ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "The bus is null" ) ; } return ; } for ( ConfigProperties configProps : configPropertiesSet ) { if ( JaxWsConstants . HTTP_CONDUITS_SERVICE_FACTORY_PID . equals ( configProps . getFactoryPid ( ) ) ) { customizeHttpConduitProperties ( message , bus , configProps ) ; } }
public class BlockHeader { /** * Reads the header data from a block . * @ param buffer block data * @ param offset current offset into block data * @ param postHeaderSkipBytes bytes to skip after reading the header * @ return current BlockHeader instance */ public BlockHeader read ( byte [ ] buffer , int offset , int postHeaderSkipBytes ) { } }
m_offset = offset ; System . arraycopy ( buffer , m_offset , m_header , 0 , 8 ) ; m_offset += 8 ; int nameLength = FastTrackUtility . getInt ( buffer , m_offset ) ; m_offset += 4 ; if ( nameLength < 1 || nameLength > 255 ) { throw new UnexpectedStructureException ( ) ; } m_name = new String ( buffer , m_offset , nameLength , CharsetHelper . UTF16LE ) ; m_offset += nameLength ; m_columnType = FastTrackUtility . getShort ( buffer , m_offset ) ; m_offset += 2 ; m_flags = FastTrackUtility . getShort ( buffer , m_offset ) ; m_offset += 2 ; m_skip = new byte [ postHeaderSkipBytes ] ; System . arraycopy ( buffer , m_offset , m_skip , 0 , postHeaderSkipBytes ) ; m_offset += postHeaderSkipBytes ; return this ;
public class ItemStreamLink { /** * / * ( non - Javadoc ) * @ see com . ibm . ws . sib . msgstore . impl . AbstractItemLink # xmlWriteChildrenOn ( com . ibm . ws . sib . msgstore . FormatBuffer ) */ protected void xmlWriteChildrenOn ( FormattedWriter writer ) throws IOException { } }
super . xmlWriteChildrenOn ( writer ) ; if ( null != _items ) { _items . xmlWriteChildrenOn ( writer , XML_ITEMS ) ; } if ( null != _itemStreams ) { _itemStreams . xmlWriteOn ( writer , XML_ITEM_STREAMS ) ; } if ( null != _referenceStreams ) { _referenceStreams . xmlWriteOn ( writer , XML_REFERENCE_STREAMS ) ; }
public class CoreGraphAlgorithms { /** * fn union _ find < G : EdgeMapper > ( graph : & G , nodes : u32 ) { * let mut root : Vec < u32 > = ( 0 . . nodes ) . collect ( ) ; * let mut rank : Vec < u8 > = ( 0 . . nodes ) . map ( | _ | 0u8 ) . collect ( ) ; * graph . map _ edges ( | mut x , mut y | { * while x ! = root [ x ] { x = root [ x ] ; } * while y ! = root [ y ] { y = root [ y ] ; } * if x ! = y { * match rank [ x ] . cmp ( & rank [ y ] ) { * Less = > root [ x ] = y , * Greater = > root [ y ] = x , * Equal = > { root [ y ] = x ; rank [ x ] + = 1 } , */ public int [ ] unionFind ( ) { } }
byte [ ] rank = new byte [ nodeCount ] ; int [ ] root = new int [ nodeCount ] ; for ( int nodeId = 0 ; nodeId < nodeCount ; nodeId ++ ) root [ nodeId ] = nodeId ; runProgram ( ( x , y ) -> { while ( x != root [ x ] ) x = root [ x ] ; while ( y != root [ y ] ) y = root [ y ] ; if ( x != y ) { if ( rank [ x ] >= rank [ y ] ) { root [ y ] = x ; if ( rank [ x ] == rank [ y ] ) rank [ x ] += 1 ; } else { root [ x ] = y ; } } } ) ; return root ;
public class JarEntry { /** * Reads the jar ' s manifest . */ private void loadManifest ( ) { } }
if ( _isManifestRead ) return ; synchronized ( this ) { if ( _isManifestRead ) return ; try { _manifest = _jarPath . getManifest ( ) ; if ( _manifest == null ) return ; Attributes attr = _manifest . getMainAttributes ( ) ; if ( attr != null ) addManifestPackage ( "" , attr ) ; Map < String , Attributes > entries = _manifest . getEntries ( ) ; for ( Map . Entry < String , Attributes > entry : entries . entrySet ( ) ) { String pkg = entry . getKey ( ) ; attr = entry . getValue ( ) ; if ( attr == null ) continue ; addManifestPackage ( pkg , attr ) ; } } catch ( IOException e ) { log . log ( Level . WARNING , e . toString ( ) , e ) ; } finally { _isManifestRead = true ; } }
public class Fingerprint { /** * Records that a build of a job has used this file . */ public synchronized void add ( @ Nonnull String jobFullName , int n ) throws IOException { } }
addWithoutSaving ( jobFullName , n ) ; save ( ) ;
public class File { /** * Creates an empty temporary file using the given prefix and suffix as part * of the file name . If { @ code suffix } is null , { @ code . tmp } is used . This * method is a convenience method that calls * { @ link # createTempFile ( String , String , File ) } with the third argument * being { @ code null } . * @ param prefix * the prefix to the temp file name . * @ param suffix * the suffix to the temp file name . * @ return the temporary file . * @ throws IOException * if an error occurs when writing the file . */ public static File createTempFile ( String prefix , String suffix ) throws IOException { } }
return createTempFile ( prefix , suffix , null ) ;
public class BootstrapDrawableFactory { /** * Creates arrow icon that depends on ExpandDirection * @ param context context * @ param width width of icon in pixels * @ param height height of icon in pixels * @ param color arrow color * @ param expandDirection arrow direction * @ return icon drawable */ private static Drawable createArrowIcon ( Context context , int width , int height , int color , ExpandDirection expandDirection ) { } }
Bitmap canvasBitmap = Bitmap . createBitmap ( width , height , Bitmap . Config . ARGB_8888 ) ; Canvas canvas = new Canvas ( canvasBitmap ) ; Paint paint = new Paint ( ) ; paint . setStyle ( Paint . Style . FILL_AND_STROKE ) ; paint . setStrokeWidth ( 1 ) ; paint . setColor ( color ) ; paint . setAntiAlias ( true ) ; Path path = new Path ( ) ; path . setFillType ( Path . FillType . EVEN_ODD ) ; switch ( expandDirection ) { case UP : path . moveTo ( 0 , ( height / 3 ) * 2 ) ; path . lineTo ( width , ( height / 3 ) * 2 ) ; path . lineTo ( width / 2 , height / 3 ) ; path . lineTo ( 0 , ( height / 3 ) * 2 ) ; break ; case DOWN : path . moveTo ( 0 , height / 3 ) ; path . lineTo ( width , height / 3 ) ; path . lineTo ( width / 2 , ( height / 3 ) * 2 ) ; path . lineTo ( 0 , height / 3 ) ; break ; } path . close ( ) ; canvas . drawPath ( path , paint ) ; return new BitmapDrawable ( context . getResources ( ) , canvasBitmap ) ;
public class PageShellInterceptor { /** * Renders the content before the backing component . * @ param writer the writer to write to . */ protected void beforePaint ( final PrintWriter writer ) { } }
PageShell pageShell = Factory . newInstance ( PageShell . class ) ; pageShell . openDoc ( writer ) ; pageShell . writeHeader ( writer ) ;
public class ParameterUtil { /** * Get parameter value from a string represenation using a pattern * @ param parameterClass parameter class * @ param value string value representation * @ param pattern value pattern * @ return parameter value from string representation using pattern * @ throws Exception if string value cannot be parse */ public static Object getParameterValueFromStringWithPattern ( String parameterClass , String value , String pattern ) throws Exception { } }
if ( pattern == null ) { return getParameterValueFromString ( parameterClass , value ) ; } else { if ( QueryParameter . DATE_VALUE . equals ( parameterClass ) || QueryParameter . TIME_VALUE . equals ( parameterClass ) || QueryParameter . TIMESTAMP_VALUE . equals ( parameterClass ) ) { SimpleDateFormat sdf = new SimpleDateFormat ( pattern ) ; return getParameterValueFromString ( parameterClass , value , sdf ) ; } else { return getParameterValueFromString ( parameterClass , value ) ; } }
public class JNRPEBuilder { /** * Builds the configured JNRPE instance . * @ return the configured JNRPE instance */ public JNRPE build ( ) { } }
JNRPE jnrpe = new JNRPE ( pluginRepository , commandRepository , charset , acceptParams , acceptedHosts , maxAcceptedConnections , readTimeout , writeTimeout ) ; IJNRPEEventBus eventBus = jnrpe . getExecutionContext ( ) . getEventBus ( ) ; for ( Object obj : eventListeners ) { eventBus . register ( obj ) ; } return jnrpe ;
public class BinaryArrayWeakHeap { /** * { @ inheritDoc } */ @ Override @ LogarithmicTime ( amortized = true ) public K deleteMin ( ) { } }
if ( Constants . NOT_BENCHMARK && size == 0 ) { throw new NoSuchElementException ( ) ; } K result = array [ 0 ] ; size -- ; array [ 0 ] = array [ size ] ; array [ size ] = null ; if ( size > 1 ) { if ( comparator == null ) { fixdown ( 0 ) ; } else { fixdownWithComparator ( 0 ) ; } } if ( Constants . NOT_BENCHMARK ) { if ( 2 * minCapacity <= array . length && 4 * size < array . length ) { ensureCapacity ( array . length / 2 ) ; } } return result ;
public class ContentHandlerImporter { /** * { @ inheritDoc } */ public void endDocument ( ) throws SAXException { } }
try { dataKeeper . save ( importer . getChanges ( ) ) ; } catch ( RepositoryException e ) { // e . printStackTrace ( ) ; throw new SAXException ( e ) ; } catch ( IllegalStateException e ) { throw new SAXException ( e ) ; }
public class MapEventPublisherImpl { /** * Publish the event to the specified listener { @ code registrations } if * the event passes the filters specified by the { @ link FilteringStrategy } . * The method uses the hashcode of the { @ code dataKey } to order the * events in the event subsystem . This means that all events for the same * key will be ordered . Events with different keys need not be ordered . * @ param registrations the listener registrations to which we are publishing * @ param caller the address of the caller that caused the event * @ param mapName the map name * @ param eventType the event type * @ param dataKey the key of the event map entry * @ param oldValue the old value of the map entry * @ param newValue the new value of the map entry * @ param mergingValue the value used when performing a merge * operation in case of a { @ link EntryEventType # MERGED } event . * This value together with the old value produced the new value . */ private void publishEvent ( Collection < EventRegistration > registrations , Address caller , String mapName , EntryEventType eventType , Data dataKey , Object oldValue , Object newValue , Object mergingValue ) { } }
EntryEventDataCache eventDataCache = filteringStrategy . getEntryEventDataCache ( ) ; int orderKey = pickOrderKey ( dataKey ) ; for ( EventRegistration registration : registrations ) { EventFilter filter = registration . getFilter ( ) ; // a filtering strategy determines whether the event must be published on the specific // event registration and may alter the type of event to be published int eventTypeForPublishing = filteringStrategy . doFilter ( filter , dataKey , oldValue , newValue , eventType , mapName ) ; if ( eventTypeForPublishing == FILTER_DOES_NOT_MATCH ) { continue ; } EntryEventData eventDataToBePublished = eventDataCache . getOrCreateEventData ( mapName , caller , dataKey , newValue , oldValue , mergingValue , eventTypeForPublishing , isIncludeValue ( filter ) ) ; eventService . publishEvent ( SERVICE_NAME , registration , eventDataToBePublished , orderKey ) ; } // if events were generated , execute the post - publish hook on each one if ( ! eventDataCache . isEmpty ( ) ) { postPublishEvent ( eventDataCache . eventDataIncludingValues ( ) , eventDataCache . eventDataExcludingValues ( ) ) ; }
public class Stylesheet { /** * Set the " xsl : attribute - set " property . * @ see < a href = " http : / / www . w3 . org / TR / xslt # attribute - sets " > attribute - sets in XSLT Specification < / a > * @ param attrSet ElemAttributeSet to add to the list of attribute sets */ public void setAttributeSet ( ElemAttributeSet attrSet ) { } }
if ( null == m_attributeSets ) { m_attributeSets = new Vector ( ) ; } m_attributeSets . addElement ( attrSet ) ;
public class DisconfCenterFile { /** * 配置文件的路径 */ public String getFilePath ( ) { } }
// 不放到classpath , 则文件路径根据 userDefineDownloadDir 来设置 if ( ! DisClientConfig . getInstance ( ) . enableLocalDownloadDirInClassPath ) { return OsUtil . pathJoin ( DisClientConfig . getInstance ( ) . userDefineDownloadDir , fileName ) ; } if ( targetDirPath != null ) { if ( targetDirPath . startsWith ( "/" ) ) { return OsUtil . pathJoin ( targetDirPath , fileName ) ; } return OsUtil . pathJoin ( ClassLoaderUtil . getClassPath ( ) , targetDirPath , fileName ) ; } return OsUtil . pathJoin ( ClassLoaderUtil . getClassPath ( ) , fileName ) ;
public class PDBFileParser { /** * Handler for TURN lines * < pre > * COLUMNS DATA TYPE FIELD DEFINITION * 1 - 6 Record name " TURN " * 8 - 10 Integer seq Turn number ; starts with 1 and * increments by one . * 12 - 14 LString ( 3 ) turnId Turn identifier * 16 - 18 Residue name initResName Residue name of initial residue in * turn . * 20 Character initChainId Chain identifier for the chain * containing this turn . * 21 - 24 Integer initSeqNum Sequence number of initial residue * in turn . * 25 AChar initICode Insertion code of initial residue * in turn . * 27 - 29 Residue name endResName Residue name of terminal residue * of turn . * 31 Character endChainId Chain identifier for the chain * containing this turn . * 32 - 35 Integer endSeqNum Sequence number of terminal * residue of turn . * 36 AChar endICode Insertion code of terminal residue * of turn . * 41 - 70 String comment Associated comment . * < / pre > * @ param line */ private void pdb_TURN_Handler ( String line ) { } }
if ( params . isHeaderOnly ( ) ) return ; if ( line . length ( ) < 36 ) { logger . info ( "TURN line has length under 36. Ignoring it." ) ; return ; } String initResName = line . substring ( 15 , 18 ) . trim ( ) ; String initChainId = line . substring ( 19 , 20 ) ; String initSeqNum = line . substring ( 20 , 24 ) . trim ( ) ; String initICode = line . substring ( 24 , 25 ) ; String endResName = line . substring ( 26 , 29 ) . trim ( ) ; String endChainId = line . substring ( 30 , 31 ) ; String endSeqNum = line . substring ( 31 , 35 ) . trim ( ) ; String endICode = line . substring ( 35 , 36 ) ; // System . out . println ( initResName + " " + initChainId + " " + initSeqNum + " " + initICode + " " + // endResName + " " + endChainId + " " + endSeqNum + " " + endICode ) ; Map < String , String > m = new HashMap < String , String > ( ) ; m . put ( "initResName" , initResName ) ; m . put ( "initChainId" , initChainId ) ; m . put ( "initSeqNum" , initSeqNum ) ; m . put ( "initICode" , initICode ) ; m . put ( "endResName" , endResName ) ; m . put ( "endChainId" , endChainId ) ; m . put ( "endSeqNum" , endSeqNum ) ; m . put ( "endICode" , endICode ) ; turnList . add ( m ) ;
public class MDWMessageCreator { /** * ( non - Javadoc ) * @ see * org . springframework . jms . core . MessageCreator # createMessage ( javax . jms * . Session ) */ @ Override public Message createMessage ( Session session ) throws JMSException { } }
Message message = session . createTextMessage ( requestMessage ) ; if ( correlationId != null ) message . setJMSCorrelationID ( correlationId ) ; if ( replyQueue != null ) { message . setJMSReplyTo ( replyQueue ) ; } if ( delaySeconds > 0 ) { ApplicationContext . getJmsProvider ( ) . setMessageDelay ( null , message , delaySeconds ) ; } return message ;
public class SSLUtils { /** * The purpose of this method is to take two SSL engines and have them do an * SSL handshake . If an exception is thrown , then the handshake was not successful . * @ param clientEngine * @ param serverEngine * @ throws SSLException if handshake fails */ public static void handleHandshake ( SSLEngine clientEngine , SSLEngine serverEngine ) throws SSLException { } }
final boolean bTrace = TraceComponent . isAnyTracingEnabled ( ) ; if ( bTrace && tc . isEntryEnabled ( ) ) { Tr . entry ( tc , "handleHandshake" ) ; } if ( clientEngine == null || serverEngine == null ) { throw new SSLException ( "Null engine found: engine1=" + clientEngine + ", engine2=" + serverEngine ) ; } SSLEngine currentEngine = clientEngine ; SSLEngine otherEngine = serverEngine ; if ( bTrace && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Parameters: engine1=" + currentEngine . hashCode ( ) + ", engine2=" + otherEngine . hashCode ( ) ) ; } WsByteBuffer netBuffer1 = allocateByteBuffer ( currentEngine . getSession ( ) . getPacketBufferSize ( ) , false ) ; WsByteBuffer netBuffer = netBuffer1 ; WsByteBuffer decryptedNetBuffer1 = allocateByteBuffer ( currentEngine . getSession ( ) . getApplicationBufferSize ( ) , false ) ; WsByteBuffer decryptedNetBuffer = decryptedNetBuffer1 ; WsByteBuffer encryptedAppBuffer1 = allocateByteBuffer ( currentEngine . getSession ( ) . getPacketBufferSize ( ) , false ) ; WsByteBuffer encryptedAppBuffer = encryptedAppBuffer1 ; SSLEngineResult result ; Runnable task = null ; SSLEngine tempEngine = null ; HandshakeStatus tempStatus = null ; HandshakeStatus currentStatus = HandshakeStatus . NEED_WRAP ; HandshakeStatus otherStatus = HandshakeStatus . NEED_UNWRAP ; if ( bTrace && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "current engine= " + currentEngine . hashCode ( ) + ", status=" + currentStatus ) ; } // Loop until both engines are finished handshaking . while ( isHandshaking ( currentEngine ) || isHandshaking ( otherEngine ) || ( currentStatus != HandshakeStatus . FINISHED ) || ( otherStatus != HandshakeStatus . FINISHED ) ) { // Check if data must be written to other engine . if ( ( currentStatus == HandshakeStatus . NEED_WRAP ) && ( encryptedAppBuffer . limit ( ) == encryptedAppBuffer . capacity ( ) ) ) { if ( bTrace && tc . isEventEnabled ( ) ) { Tr . event ( tc , "before wrap: encBuf: " + getBufferTraceInfo ( encryptedAppBuffer ) ) ; } result = currentEngine . wrap ( emptyBuffer , encryptedAppBuffer . getWrappedByteBuffer ( ) ) ; if ( 0 < result . bytesProduced ( ) ) { encryptedAppBuffer . flip ( ) ; } currentStatus = result . getHandshakeStatus ( ) ; if ( bTrace && tc . isEventEnabled ( ) ) { Tr . event ( tc , "after wrap: encBuf: " + getBufferTraceInfo ( encryptedAppBuffer ) + "\r\n\tstatus=" + result . getStatus ( ) + " HSstatus=" + currentStatus + " consumed=" + result . bytesConsumed ( ) + " produced=" + result . bytesProduced ( ) ) ; } } // Check if data must be read from other engine . else if ( ( currentStatus == HandshakeStatus . FINISHED || currentStatus == HandshakeStatus . NEED_UNWRAP ) && ( netBuffer . limit ( ) != netBuffer . capacity ( ) ) ) { if ( bTrace && tc . isEventEnabled ( ) ) { Tr . event ( tc , "before unwrap: \r\n\tnetBuf: " + getBufferTraceInfo ( netBuffer ) + "\r\n\tdecBuf: " + getBufferTraceInfo ( decryptedNetBuffer ) ) ; } result = currentEngine . unwrap ( netBuffer . getWrappedByteBuffer ( ) , decryptedNetBuffer . getWrappedByteBuffer ( ) ) ; // handshakes shouldn ' t produce output so no need to flip the dec buffer currentStatus = result . getHandshakeStatus ( ) ; if ( bTrace && tc . isEventEnabled ( ) ) { Tr . event ( tc , "after unwrap: \r\n\tnetBuf: " + getBufferTraceInfo ( netBuffer ) + "\r\n\tdecBuf: " + getBufferTraceInfo ( decryptedNetBuffer ) + "\r\n\tstatus=" + result . getStatus ( ) + " HSstatus=" + currentStatus + " consumed=" + result . bytesConsumed ( ) + " produced=" + result . bytesProduced ( ) ) ; } // Clear netBuffer for reuse if all data is drained . if ( netBuffer . remaining ( ) == 0 ) { netBuffer . clear ( ) ; } } // Handle anything extra that must be done within the engine . if ( currentStatus == HandshakeStatus . NEED_TASK ) { while ( currentStatus == HandshakeStatus . NEED_TASK ) { task = currentEngine . getDelegatedTask ( ) ; if ( task != null ) { if ( bTrace && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Run task" ) ; } // have a blocking task , go ahead and block this thread task . run ( ) ; // then loop around and see if we have more to send to peer currentStatus = currentEngine . getHandshakeStatus ( ) ; if ( bTrace && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "After task, handshake status=" + currentStatus ) ; } } else { if ( bTrace && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "No task, setting status to HS_NEED_WRAP" ) ; } // we were told there was something to do , but got no task currentStatus = HandshakeStatus . NEED_WRAP ; // guess that there ' s some data to be sent now } } // Check if more data needs to be sent from this engine . if ( currentStatus == HandshakeStatus . NEED_WRAP ) { // More data to send . Start again at the top of the loop without switching engines . continue ; } } // end of NEED _ TASK if ( bTrace && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "Switching engines" ) ; } // Encryped app output data from the current engine becomes network // input data for the other engine . netBuffer = encryptedAppBuffer ; // Save aside the engine that was used in this loop . tempEngine = currentEngine ; // Assign the engine to be used in the next loop . currentEngine = otherEngine ; // Save the engine used in this loop for assignment next time around . otherEngine = tempEngine ; // Save aside the status that was used in this loop tempStatus = currentStatus ; // Assign the status to be used in the next loop . currentStatus = otherStatus ; // Save the engine used in this loop for assignment next time around . otherStatus = tempStatus ; if ( bTrace && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "current engine= " + currentEngine . hashCode ( ) + ", status=" + currentStatus ) ; } } if ( bTrace && tc . isEntryEnabled ( ) ) { Tr . exit ( tc , "handleHandshake" ) ; }
public class DelegatedClientFactory { /** * Configure OAuth client . * @ param properties the properties */ protected void configureOAuth20Client ( final Collection < BaseClient > properties ) { } }
val index = new AtomicInteger ( ) ; pac4jProperties . getOauth2 ( ) . stream ( ) . filter ( oauth -> StringUtils . isNotBlank ( oauth . getId ( ) ) && StringUtils . isNotBlank ( oauth . getSecret ( ) ) ) . forEach ( oauth -> { val client = new GenericOAuth20Client ( ) ; client . setKey ( oauth . getId ( ) ) ; client . setSecret ( oauth . getSecret ( ) ) ; client . setProfileAttrs ( oauth . getProfileAttrs ( ) ) ; client . setProfileNodePath ( oauth . getProfilePath ( ) ) ; client . setProfileUrl ( oauth . getProfileUrl ( ) ) ; client . setProfileVerb ( Verb . valueOf ( oauth . getProfileVerb ( ) . toUpperCase ( ) ) ) ; client . setTokenUrl ( oauth . getTokenUrl ( ) ) ; client . setAuthUrl ( oauth . getAuthUrl ( ) ) ; client . setCustomParams ( oauth . getCustomParams ( ) ) ; val count = index . intValue ( ) ; if ( StringUtils . isBlank ( oauth . getClientName ( ) ) ) { client . setName ( client . getClass ( ) . getSimpleName ( ) + count ) ; } client . setCallbackUrlResolver ( new PathParameterCallbackUrlResolver ( ) ) ; configureClient ( client , oauth ) ; index . incrementAndGet ( ) ; LOGGER . debug ( "Created client [{}]" , client ) ; properties . add ( client ) ; } ) ;
public class LongBitSet { /** * this = this AND NOT other */ void andNot ( LongBitSet other ) { } }
int pos = Math . min ( numWords , other . numWords ) ; while ( -- pos >= 0 ) { bits [ pos ] &= ~ other . bits [ pos ] ; }
public class AmazonElastiCacheClient { /** * Dynamically decreases the number of replics in a Redis ( cluster mode disabled ) replication group or the number of * replica nodes in one or more node groups ( shards ) of a Redis ( cluster mode enabled ) replication group . This * operation is performed with no cluster down time . * @ param decreaseReplicaCountRequest * @ return Result of the DecreaseReplicaCount operation returned by the service . * @ throws ReplicationGroupNotFoundException * The specified replication group does not exist . * @ throws InvalidReplicationGroupStateException * The requested replication group is not in the < code > available < / code > state . * @ throws InvalidCacheClusterStateException * The requested cluster is not in the < code > available < / code > state . * @ throws InvalidVPCNetworkStateException * The VPC network is in an invalid state . * @ throws InsufficientCacheClusterCapacityException * The requested cache node type is not available in the specified Availability Zone . * @ throws ClusterQuotaForCustomerExceededException * The request cannot be processed because it would exceed the allowed number of clusters per customer . * @ throws NodeGroupsPerReplicationGroupQuotaExceededException * The request cannot be processed because it would exceed the maximum allowed number of node groups * ( shards ) in a single replication group . The default maximum is 15 * @ throws NodeQuotaForCustomerExceededException * The request cannot be processed because it would exceed the allowed number of cache nodes per customer . * @ throws ServiceLinkedRoleNotFoundException * The specified service linked role ( SLR ) was not found . * @ throws NoOperationException * The operation was not performed because no changes were required . * @ throws InvalidParameterValueException * The value for a parameter is invalid . * @ throws InvalidParameterCombinationException * Two or more incompatible parameters were specified . * @ sample AmazonElastiCache . DecreaseReplicaCount * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / elasticache - 2015-02-02 / DecreaseReplicaCount " * target = " _ top " > AWS API Documentation < / a > */ @ Override public ReplicationGroup decreaseReplicaCount ( DecreaseReplicaCountRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDecreaseReplicaCount ( request ) ;
public class YearWeek { /** * Obtains the current year - week from the specified clock . * This will query the specified clock to obtain the current year - week . * Using this method allows the use of an alternate clock for testing . * The alternate clock may be introduced using { @ link Clock dependency injection } . * @ param clock the clock to use , not null * @ return the current year - week , not null */ public static YearWeek now ( Clock clock ) { } }
final LocalDate now = LocalDate . now ( clock ) ; // called once return YearWeek . of ( now . get ( WEEK_BASED_YEAR ) , now . get ( WEEK_OF_WEEK_BASED_YEAR ) ) ;
public class AbstractLoggerInjector { /** * { @ inheritDoc } */ public final void injectMembers ( Object target ) { } }
if ( isFinal ( field . getModifiers ( ) ) ) { return ; } try { if ( field . get ( target ) == null ) { field . set ( target , createLogger ( target . getClass ( ) ) ) ; } } catch ( Exception e ) { throw new ProvisionException ( format ( "Impossible to set logger for field '%s', see nested exception: %s" , field , e . getMessage ( ) ) ) ; }
public class InternalTimersSnapshotReaderWriters { public static < K , N > InternalTimersSnapshotWriter getWriterForVersion ( int version , InternalTimersSnapshot < K , N > timersSnapshot , TypeSerializer < K > keySerializer , TypeSerializer < N > namespaceSerializer ) { } }
switch ( version ) { case NO_VERSION : return new InternalTimersSnapshotWriterPreVersioned < > ( timersSnapshot , keySerializer , namespaceSerializer ) ; case 1 : return new InternalTimersSnapshotWriterV1 < > ( timersSnapshot , keySerializer , namespaceSerializer ) ; case InternalTimerServiceSerializationProxy . VERSION : return new InternalTimersSnapshotWriterV2 < > ( timersSnapshot , keySerializer , namespaceSerializer ) ; default : // guard for future throw new IllegalStateException ( "Unrecognized internal timers snapshot writer version: " + version ) ; }
public class ClientInitializer { /** * Returns the annotation map for the specified element . */ public static PropertyMap getAnnotationMap ( ControlBeanContext cbc , AnnotatedElement annotElem ) { } }
if ( cbc == null ) return new AnnotatedElementMap ( annotElem ) ; return cbc . getAnnotationMap ( annotElem ) ;
public class RAAnnotationProcessor { /** * Create a RaConnector xml object and all its associated xml objects * that represents the combined ra . xml , wlp - ra . xml , and annotations , if any * that are present in the rar file . * @ return RaConnector that represents the resource adapter instance * @ throws ResourceAdapterInternalException if any JCA spec violations are detected */ public RaConnector getProcessedConnector ( ) throws ResourceAdapterInternalException { } }
final boolean trace = TraceComponent . isAnyTracingEnabled ( ) ; String jcaVersion = getAdapterVersion ( deploymentDescriptor ) ; boolean processAnno = checkProcessAnnotations ( deploymentDescriptor , jcaVersion ) ; if ( ! processAnno ) { if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "Skip annotation processing and return the RaConnector that was passed in" ) ; return deploymentDescriptor ; } findAnnotatedClasses ( ) ; // JCA 1.6 spec // The implementation class name of the ResourceAdapter interface is specified in // the resource adapter deployment descriptor or through the Connector annotation // described in Section 18.4 , “ @ Connector ” on page 18-6. // It is optional for a resource adapter implementation to bundle a JavaBean class // implementing the javax . resource . spi . ResourceAdapter interface ( see // Section 5.3.1 , “ ResourceAdapter JavaBean and Bootstrapping a Resource Adapter // Instance ” on page 5-4 ) . In particular , a resource adapter implementation that only // performs outbound communication to the EIS might not provide a JavaBean that // implements the ResourceAdapter interface or a JavaBean annotated with the // Connector annotation . // If the descriptor has a resource adapter descriptor that has the name of the resource adapter class // then // If there are one or more @ Connector , // then need to verify the class is annotated by only one of them or none of them // If no classes are annotated with @ Connector , then verify the class can be loaded // If there isn ' t a resource adapter class specified in the descriptor or there isn ' t a ra . xml , // then verify there is only one class annotated with @ Connector // It is not necessary to locate a JavaBean that implements the ResourceAdapter interface . Class < ? > resourceAdapterClass = null ; if ( deploymentDescriptor != null ) { RaResourceAdapter rxRA = deploymentDescriptor . getResourceAdapter ( ) ; if ( rxRA != null ) { String rxAdapterClassName = rxRA . getResourceAdapterClass ( ) ; if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "rxAdapterClassName: " , rxAdapterClassName ) ; if ( rxAdapterClassName != null ) { // look to see if this class name is in the list of classes annotated with @ Connector for ( Class < ? > connectorClass : connectorClasses ) { if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "connectorClass to examine: " , connectorClass ) ; if ( rxAdapterClassName . equals ( connectorClass . getName ( ) ) ) { resourceAdapterClass = connectorClass ; if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "connectorClasses - resourceAdapterClass: " , resourceAdapterClass ) ; break ; } } // end for ClassInfo : connectorClasses // if an annotated class was not found , check the < resourceadapter - class > is present by loading it if ( resourceAdapterClass == null ) { try { resourceAdapterClass = raClassLoader . loadClass ( rxAdapterClassName ) ; if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "raClassLoader - resourceAdapterClass: " , resourceAdapterClass ) ; } catch ( ClassNotFoundException e ) { throw new ResourceAdapterInternalException ( Tr . formatMessage ( tc , "J2CA9904.required.raclass.missing" , rxAdapterClassName , adapterName ) , e ) ; } } // end adapterClass = = null } else { // rxAdapterClass = = null , check for class annotated with @ Connector if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "ra.xml does not contain a <resourceadapter-class> entry" ) ; } } else { // ra . xml does not have a < resourceadapter > entry if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "ra.xml does not contain a <resourceadapter> entry" ) ; } } else { // rar does not have a ra . xml if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "rar does not contain a ra.xml" , resourceAdapterClass ) ; } // If resource adapter class was not found , do @ Connector annotation validation and try to get the // resource adapter class from there . if ( resourceAdapterClass == null ) { if ( connectorClasses . size ( ) == 0 ) { if ( trace && tc . isDebugEnabled ( ) ) { Tr . debug ( this , tc , "rar does not contain a class annotated with @Connector" ) ; // throw new ResourceAdapterInternalException ( Tr . formatMessage ( tc , " J2CA9923 . connector . anno . missing " , adapterName ) ) ; } } else if ( connectorClasses . size ( ) > 1 ) { throw new ResourceAdapterInternalException ( Tr . formatMessage ( tc , "J2CA9922.multiple.connector.anno.found" , adapterName ) ) ; } else { // there is only one annotated connectorClass resourceAdapterClass = connectorClasses . get ( 0 ) ; } } RaConnector connector = processConnector ( resourceAdapterClass , deploymentDescriptor ) ; return connector ;
public class Tile { /** * Defines if the date of the clock will be drawn . * @ param VISIBLE */ public void setDateVisible ( final boolean VISIBLE ) { } }
if ( null == dateVisible ) { _dateVisible = VISIBLE ; fireTileEvent ( VISIBILITY_EVENT ) ; } else { dateVisible . set ( VISIBLE ) ; }
public class LoggingManager { /** * Add an file appender for a device * @ param fileName * @ param deviceName * @ throws DevFailed */ public void addFileAppender ( final String fileName , final String deviceName ) throws DevFailed { } }
if ( rootLoggerBack != null ) { logger . debug ( "add file appender of {} in {}" , deviceName , fileName ) ; final String deviceNameLower = deviceName . toLowerCase ( Locale . ENGLISH ) ; final File f = new File ( fileName ) ; if ( ! f . exists ( ) ) { try { f . createNewFile ( ) ; } catch ( final IOException e ) { throw DevFailedUtils . newDevFailed ( ExceptionMessages . CANNOT_OPEN_FILE , "impossible to open file " + fileName ) ; } } if ( ! f . canWrite ( ) ) { throw DevFailedUtils . newDevFailed ( ExceptionMessages . CANNOT_OPEN_FILE , "impossible to open file " + fileName ) ; } // debug level by default // setLoggingLevel ( deviceName , LoggingLevel . DEBUG . toInt ( ) ) ; System . out . println ( "create file " + f ) ; final LoggerContext loggerContext = ( LoggerContext ) LoggerFactory . getILoggerFactory ( ) ; final FileAppender rfAppender = new FileAppender ( deviceNameLower ) ; fileAppenders . put ( deviceNameLower , rfAppender ) ; rfAppender . setName ( "FILE-" + deviceNameLower ) ; rfAppender . setLevel ( rootLoggingLevel ) ; // rfAppender . setContext ( appender . getContext ( ) ) ; rfAppender . setFile ( fileName ) ; rfAppender . setAppend ( true ) ; rfAppender . setContext ( loggerContext ) ; final FixedWindowRollingPolicy rollingPolicy = new FixedWindowRollingPolicy ( ) ; // rolling policies need to know their parent // it ' s one of the rare cases , where a sub - component knows about its parent rollingPolicy . setParent ( rfAppender ) ; rollingPolicy . setContext ( loggerContext ) ; rollingPolicy . setFileNamePattern ( fileName + "%i" ) ; rollingPolicy . setMaxIndex ( 1 ) ; rollingPolicy . setMaxIndex ( 3 ) ; rollingPolicy . start ( ) ; final SizeBasedTriggeringPolicy < ILoggingEvent > triggeringPolicy = new SizeBasedTriggeringPolicy < ILoggingEvent > ( ) ; triggeringPolicy . setMaxFileSize ( FileSize . valueOf ( "5 mb" ) ) ; triggeringPolicy . setContext ( loggerContext ) ; triggeringPolicy . start ( ) ; final PatternLayoutEncoder encoder = new PatternLayoutEncoder ( ) ; encoder . setContext ( loggerContext ) ; encoder . setPattern ( "%-5level %d %X{deviceName} - %thread | %logger{25}.%M:%L - %msg%n" ) ; encoder . start ( ) ; rfAppender . setEncoder ( encoder ) ; rfAppender . setRollingPolicy ( rollingPolicy ) ; rfAppender . setTriggeringPolicy ( triggeringPolicy ) ; rfAppender . start ( ) ; rootLoggerBack . addAppender ( rfAppender ) ; rfAppender . start ( ) ; // OPTIONAL : print logback internal status messages // StatusPrinter . print ( loggerContext ) ; }
public class DITypeInfo { /** * Retrieves whether values of this type have a fixed precision and * scale . < p > * @ return whether values of this type have a fixed * precision and scale . */ Boolean isFixedPrecisionScale ( ) { } }
switch ( type ) { case Types . SQL_BIGINT : case Types . SQL_DECIMAL : case Types . SQL_DOUBLE : case Types . SQL_FLOAT : case Types . SQL_INTEGER : case Types . SQL_NUMERIC : case Types . SQL_REAL : case Types . SQL_SMALLINT : case Types . TINYINT : return Boolean . FALSE ; default : return null ; }
public class Ec2MachineConfigurator { /** * Checks whether a VM is started or not ( which is stronger than { @ link # checkVmIsKnown ( ) } ) . * @ return true if the VM is started , false otherwise */ private boolean checkVmIsStarted ( ) { } }
DescribeInstancesRequest dis = new DescribeInstancesRequest ( ) ; dis . setInstanceIds ( Collections . singletonList ( this . machineId ) ) ; DescribeInstancesResult disresult = this . ec2Api . describeInstances ( dis ) ; // Obtain availability zone ( for later use , eg . volume attachment ) . // Necessary if no availability zone is specified in configuration // ( because volumes must be attached to instances in the same availability zone ) . this . availabilityZone = disresult . getReservations ( ) . get ( 0 ) . getInstances ( ) . get ( 0 ) . getPlacement ( ) . getAvailabilityZone ( ) ; return "running" . equalsIgnoreCase ( disresult . getReservations ( ) . get ( 0 ) . getInstances ( ) . get ( 0 ) . getState ( ) . getName ( ) ) ;
public class HttpUtilities { /** * Obtain newline - delimited headers from response * @ param response HttpServletResponse to scan * @ return newline - delimited headers */ public static String getHeaders ( HttpServletResponse response ) { } }
String headerString = "" ; Collection < String > headerNames = response . getHeaderNames ( ) ; for ( String headerName : headerNames ) { // there may be multiple headers per header name for ( String headerValue : response . getHeaders ( headerName ) ) { if ( headerString . length ( ) != 0 ) { headerString += "\n" ; } headerString += headerName + ": " + headerValue ; } } return headerString ;
public class Sorting { /** * Hybrid sorting mechanism similar to Introsort by David Musser . Uses quicksort ' s * partitioning mechanism recursively until the resulting array is small or the * recursion is too deep , and then use insertion sort for the rest . * This is the same basic algorithm used by the GNU Standard C + + library . */ public static void hybridSort ( Object [ ] a , Comparator < Object > cmp ) { } }
hybridSort ( a , 0 , a . length - 1 , cmp , log2 ( a . length ) * 2 ) ;
public class RowKey { /** * Checks a row key to determine if it contains the metric UID . If salting is * enabled , we skip the salt bytes . * @ param metric The metric UID to match * @ param row _ key The row key to match on * @ return 0 if the two arrays are identical , otherwise the difference * between the first two different bytes ( treated as unsigned ) , otherwise * the different between their lengths . * @ throws IndexOutOfBoundsException if either array isn ' t large enough . */ public static int rowKeyContainsMetric ( final byte [ ] metric , final byte [ ] row_key ) { } }
int idx = Const . SALT_WIDTH ( ) ; for ( int i = 0 ; i < metric . length ; i ++ , idx ++ ) { if ( metric [ i ] != row_key [ idx ] ) { return ( metric [ i ] & 0xFF ) - ( row_key [ idx ] & 0xFF ) ; // " promote " to unsigned . } } return 0 ;
public class QueryRpc { /** * Parses a query string legacy style query from the URI * @ param tsdb The TSDB we belong to * @ param query The HTTP Query for parsing * @ param expressions A list of parsed expression trees filled from the URI . * If this is null , it means any expressions in the URI will be skipped . * @ return A TSQuery if parsing was successful * @ throws BadRequestException if parsing was unsuccessful * @ since 2.3 */ public static TSQuery parseQuery ( final TSDB tsdb , final HttpQuery query , final List < ExpressionTree > expressions ) { } }
final TSQuery data_query = new TSQuery ( ) ; data_query . setStart ( query . getRequiredQueryStringParam ( "start" ) ) ; data_query . setEnd ( query . getQueryStringParam ( "end" ) ) ; if ( query . hasQueryStringParam ( "padding" ) ) { data_query . setPadding ( true ) ; } if ( query . hasQueryStringParam ( "no_annotations" ) ) { data_query . setNoAnnotations ( true ) ; } if ( query . hasQueryStringParam ( "global_annotations" ) ) { data_query . setGlobalAnnotations ( true ) ; } if ( query . hasQueryStringParam ( "show_tsuids" ) ) { data_query . setShowTSUIDs ( true ) ; } if ( query . hasQueryStringParam ( "ms" ) ) { data_query . setMsResolution ( true ) ; } if ( query . hasQueryStringParam ( "show_query" ) ) { data_query . setShowQuery ( true ) ; } if ( query . hasQueryStringParam ( "show_stats" ) ) { data_query . setShowStats ( true ) ; } if ( query . hasQueryStringParam ( "show_summary" ) ) { data_query . setShowSummary ( true ) ; } // handle tsuid queries first if ( query . hasQueryStringParam ( "tsuid" ) ) { final List < String > tsuids = query . getQueryStringParams ( "tsuid" ) ; for ( String q : tsuids ) { parseTsuidTypeSubQuery ( q , data_query ) ; } } if ( query . hasQueryStringParam ( "m" ) ) { final List < String > legacy_queries = query . getQueryStringParams ( "m" ) ; for ( String q : legacy_queries ) { parseMTypeSubQuery ( q , data_query ) ; } } // TODO - testing out the graphite style expressions here with the " exp " // param that could stand for experimental or expression ; ) if ( expressions != null ) { if ( query . hasQueryStringParam ( "exp" ) ) { final List < String > uri_expressions = query . getQueryStringParams ( "exp" ) ; final List < String > metric_queries = new ArrayList < String > ( uri_expressions . size ( ) ) ; // parse the expressions into their trees . If one or more expressions // are improper then it will toss an exception up expressions . addAll ( Expressions . parseExpressions ( uri_expressions , data_query , metric_queries ) ) ; // iterate over each of the parsed metric queries and store it in the // TSQuery list so that we fetch the data for them . for ( final String mq : metric_queries ) { parseMTypeSubQuery ( mq , data_query ) ; } } } else { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Received a request with an expression but at the " + "wrong endpoint: " + query ) ; } } if ( data_query . getQueries ( ) == null || data_query . getQueries ( ) . size ( ) < 1 ) { throw new BadRequestException ( "Missing sub queries" ) ; } // Filter out duplicate queries Set < TSSubQuery > query_set = new LinkedHashSet < TSSubQuery > ( data_query . getQueries ( ) ) ; data_query . getQueries ( ) . clear ( ) ; data_query . getQueries ( ) . addAll ( query_set ) ; return data_query ;
public class BaseFunction { /** * Make value as DontEnum , DontDelete , ReadOnly * prototype property of this Function object */ public void setImmunePrototypeProperty ( Object value ) { } }
if ( ( prototypePropertyAttributes & READONLY ) != 0 ) { throw new IllegalStateException ( ) ; } prototypeProperty = ( value != null ) ? value : UniqueTag . NULL_VALUE ; prototypePropertyAttributes = DONTENUM | PERMANENT | READONLY ;
public class RuleTerminalNode { @ SuppressWarnings ( "unchecked" ) public void readExternal ( ObjectInput in ) throws IOException , ClassNotFoundException { } }
super . readExternal ( in ) ; rule = ( RuleImpl ) in . readObject ( ) ; subrule = ( GroupElement ) in . readObject ( ) ; subruleIndex = in . readInt ( ) ; previousTupleSinkNode = ( LeftTupleSinkNode ) in . readObject ( ) ; nextTupleSinkNode = ( LeftTupleSinkNode ) in . readObject ( ) ; salienceDeclarations = ( Declaration [ ] ) in . readObject ( ) ; enabledDeclarations = ( Declaration [ ] ) in . readObject ( ) ; consequenceName = ( String ) in . readObject ( ) ; fireDirect = rule . getActivationListener ( ) . equals ( "direct" ) ; initDeclarations ( ) ;
public class MembershipManager { /** * Invoked on the master to send the member list ( see { @ link MembersUpdateOp } ) to non - master nodes . */ private void sendMemberListToOthers ( ) { } }
if ( ! clusterService . isMaster ( ) || ! clusterService . isJoined ( ) || clusterService . getClusterJoinManager ( ) . isMastershipClaimInProgress ( ) ) { if ( logger . isFineEnabled ( ) ) { logger . fine ( "Cannot publish member list to cluster. Is-master: " + clusterService . isMaster ( ) + ", joined: " + clusterService . isJoined ( ) + " , mastership claim in progress: " + clusterService . getClusterJoinManager ( ) . isMastershipClaimInProgress ( ) ) ; } return ; } MemberMap memberMap = getMemberMap ( ) ; MembersView membersView = memberMap . toMembersView ( ) ; if ( logger . isFineEnabled ( ) ) { logger . fine ( "Sending member list to the non-master nodes: " + memberListString ( ) ) ; } for ( MemberImpl member : memberMap . getMembers ( ) ) { if ( member . localMember ( ) ) { continue ; } MembersUpdateOp op = new MembersUpdateOp ( member . getUuid ( ) , membersView , clusterService . getClusterTime ( ) , null , false ) ; op . setCallerUuid ( clusterService . getThisUuid ( ) ) ; nodeEngine . getOperationService ( ) . send ( op , member . getAddress ( ) ) ; }
public class AbstractSpecWithPrimaryKey { /** * Sets the primary key . */ public AbstractSpecWithPrimaryKey < T > withPrimaryKey ( PrimaryKey primaryKey ) { } }
if ( primaryKey == null ) this . keyComponents = null ; else { this . keyComponents = primaryKey . getComponents ( ) ; } return this ;
public class SPX { /** * / * [ deutsch ] * < p > Implementierungsmethode des Interface { @ link Externalizable } . < / p > * < p > Das erste Byte enth & auml ; lt den Typ des zu serialisierenden Objekts . * Danach folgen die Daten - Bits in einer vielleicht bit - komprimierten Darstellung . < / p > * @ serialData data layout see { @ code writeReplace ( ) } - method of object * to be serialized * @ param out output stream * @ throws IOException in any case of IO - failures */ @ Override public void writeExternal ( ObjectOutput out ) throws IOException { } }
out . writeByte ( this . type ) ; switch ( this . type ) { case FRENCH_REV : this . writeFrenchRev ( out ) ; break ; default : throw new InvalidClassException ( "Unsupported calendar type." ) ; }
public class ContextUtils { /** * Get the { @ link android . app . DownloadManager } service for this context . * @ param context the context . * @ return the { @ link android . app . DownloadManager } */ @ TargetApi ( Build . VERSION_CODES . GINGERBREAD ) public static DownloadManager getDownloadManager ( Context context ) { } }
return ( DownloadManager ) context . getSystemService ( Context . DOWNLOAD_SERVICE ) ;
public class NameResolverRegistry { /** * Returns the default registry that loads providers via the Java service loader mechanism . */ public static synchronized NameResolverRegistry getDefaultRegistry ( ) { } }
if ( instance == null ) { List < NameResolverProvider > providerList = ServiceProviders . loadAll ( NameResolverProvider . class , getHardCodedClasses ( ) , NameResolverProvider . class . getClassLoader ( ) , new NameResolverPriorityAccessor ( ) ) ; if ( providerList . isEmpty ( ) ) { logger . warning ( "No NameResolverProviders found via ServiceLoader, including for DNS. This " + "is probably due to a broken build. If using ProGuard, check your configuration" ) ; } instance = new NameResolverRegistry ( ) ; for ( NameResolverProvider provider : providerList ) { logger . fine ( "Service loader found " + provider ) ; if ( provider . isAvailable ( ) ) { instance . addProvider ( provider ) ; } } instance . refreshProviders ( ) ; } return instance ;
public class NameAllocator { /** * Retrieve a name created with { @ link # newName ( String , Object ) } . */ public String get ( Object tag ) { } }
String result = tagToName . get ( tag ) ; if ( result == null ) { throw new IllegalArgumentException ( "unknown tag: " + tag ) ; } return result ;
public class SnackBar { /** * Set the horizontal padding between this SnackBar and it ' s text and button . * @ param padding * @ return This SnackBar for chaining methods . */ public SnackBar horizontalPadding ( int padding ) { } }
mText . setPadding ( padding , mText . getPaddingTop ( ) , padding , mText . getPaddingBottom ( ) ) ; mAction . setPadding ( padding , mAction . getPaddingTop ( ) , padding , mAction . getPaddingBottom ( ) ) ; return this ;
public class VariableNeighbourhoodSearch { /** * Performs a step of VNS . One step consists of : * < ol > * < li > Shaking using the current shaking neighbourhood < / li > * < li > Modification using a new instance of the local search algorithm < / li > * < li > * Acceptance of modified solution if it is a global improvement . In such case , the search continues * with the first shaking neighbourhood ; else , the next shaking neighbourhood will be used ( cyclically ) . * < / li > * < / ol > * @ throws JamesRuntimeException if depending on malfunctioning components ( problem , neighbourhood , . . . ) */ @ Override protected void searchStep ( ) { } }
// cyclically reset s to zero if no more shaking neighbourhoods are available if ( s >= getNeighbourhoods ( ) . size ( ) ) { s = 0 ; } // create copy of current solution to shake and modify by applying local search procedure SolutionType shakedSolution = Solution . checkedCopy ( getCurrentSolution ( ) ) ; // 1 ) SHAKING // get random move from current shaking neighbourhood Move < ? super SolutionType > shakeMove = getNeighbourhoods ( ) . get ( s ) . getRandomMove ( shakedSolution , getRandom ( ) ) ; // shake ( only if a move was obtained ) Evaluation shakedEval = getCurrentSolutionEvaluation ( ) ; Validation shakedVal = getCurrentSolutionValidation ( ) ; if ( shakeMove != null ) { shakedEval = evaluate ( shakeMove ) ; shakedVal = validate ( shakeMove ) ; shakeMove . apply ( shakedSolution ) ; } // 2 ) LOCAL SEARCH // create instance of local search algorithm LocalSearch < SolutionType > localSearch = localSearchFactory . create ( getProblem ( ) ) ; // set initial solution to be modified localSearch . setCurrentSolution ( shakedSolution , shakedEval , shakedVal ) ; // interrupt local search algorithm when main VNS search wants to terminate localSearch . addStopCriterion ( _search -> getStatus ( ) == SearchStatus . TERMINATING ) ; // run local search localSearch . start ( ) ; // dispose local search when completed localSearch . dispose ( ) ; // 3 ) ACCEPTANCE SolutionType lsBestSolution = localSearch . getBestSolution ( ) ; Evaluation lsBestSolutionEvaluation = localSearch . getBestSolutionEvaluation ( ) ; Validation lsBestSolutionValidation = localSearch . getBestSolutionValidation ( ) ; // check improvement if ( lsBestSolution != null && lsBestSolutionValidation . passed ( ) // should always be true but it doesn ' t hurt to check && computeDelta ( lsBestSolutionEvaluation , getCurrentSolutionEvaluation ( ) ) > 0 ) { // improvement : increase number of accepted moves incNumAcceptedMoves ( 1 ) ; // update current and best solution updateCurrentAndBestSolution ( lsBestSolution , lsBestSolutionEvaluation , lsBestSolutionValidation ) ; // reset shaking neighbourhood s = 0 ; } else { // no improvement : stick with current solution , adopt next shaking neighbourhood incNumRejectedMoves ( 1 ) ; s ++ ; }
public class AWSBackupClient { /** * Returns the AWS resource types supported by AWS Backup . * @ param getSupportedResourceTypesRequest * @ return Result of the GetSupportedResourceTypes operation returned by the service . * @ throws ServiceUnavailableException * The request failed due to a temporary failure of the server . * @ sample AWSBackup . GetSupportedResourceTypes * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / backup - 2018-11-15 / GetSupportedResourceTypes " * target = " _ top " > AWS API Documentation < / a > */ @ Override public GetSupportedResourceTypesResult getSupportedResourceTypes ( GetSupportedResourceTypesRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeGetSupportedResourceTypes ( request ) ;
public class ByteCodeParser { /** * Parses a 64 - bit int . */ long readLong ( ) throws IOException { } }
return ( ( ( long ) _is . read ( ) << 56 ) | ( ( long ) _is . read ( ) << 48 ) | ( ( long ) _is . read ( ) << 40 ) | ( ( long ) _is . read ( ) << 32 ) | ( ( long ) _is . read ( ) << 24 ) | ( ( long ) _is . read ( ) << 16 ) | ( ( long ) _is . read ( ) << 8 ) | ( ( long ) _is . read ( ) ) ) ;
public class PipelineSummaryMarshaller { /** * Marshall the given parameter object . */ public void marshall ( PipelineSummary pipelineSummary , ProtocolMarshaller protocolMarshaller ) { } }
if ( pipelineSummary == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( pipelineSummary . getPipelineName ( ) , PIPELINENAME_BINDING ) ; protocolMarshaller . marshall ( pipelineSummary . getReprocessingSummaries ( ) , REPROCESSINGSUMMARIES_BINDING ) ; protocolMarshaller . marshall ( pipelineSummary . getCreationTime ( ) , CREATIONTIME_BINDING ) ; protocolMarshaller . marshall ( pipelineSummary . getLastUpdateTime ( ) , LASTUPDATETIME_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class AbstractCache { /** * Reads a value from the cache by probing the in - memory cache , and if enabled and the in - memory * probe was a miss , the disk cache . * @ param elementKey * the cache key * @ return the cached value , or null if element was not cached */ @ SuppressWarnings ( "unchecked" ) public synchronized ValT get ( Object elementKey ) { } }
KeyT key = ( KeyT ) elementKey ; ValT value = cache . get ( key ) ; if ( value != null ) { // memory hit Log . d ( name , "MEM cache hit for " + key . toString ( ) ) ; return value ; } // memory miss , try reading from disk File file = getFileForKey ( key ) ; if ( file . exists ( ) ) { // if file older than expirationInMinutes , remove it long lastModified = file . lastModified ( ) ; Date now = new Date ( ) ; long ageInMinutes = ( ( now . getTime ( ) - lastModified ) / ( 1000 * 60 ) ) ; if ( ageInMinutes >= expirationInMinutes ) { Log . d ( name , "DISK cache expiration for file " + file . toString ( ) ) ; file . delete ( ) ; return null ; } // disk hit Log . d ( name , "DISK cache hit for " + key . toString ( ) ) ; try { value = readValueFromDisk ( file ) ; } catch ( IOException e ) { // treat decoding errors as a cache miss e . printStackTrace ( ) ; return null ; } if ( value == null ) { return null ; } cache . put ( key , value ) ; return value ; } // cache miss return null ;
public class HierarchicalTransformer { /** * Computes an horizontal margin for a given node . * We used to have a static horizontal margin , but images were * truncated for long names . See roboconf - platform # 315 * @ param input a node ( can be null ) * @ return a positive integer */ private static int computeHMargin ( AbstractType input ) { } }
int basis = MIN_H_MARGIN ; if ( input != null && input . getName ( ) . length ( ) > 17 ) { // Beyond 17 characters , we give 3 pixels for every new characters . basis += 3 * ( input . getName ( ) . length ( ) - 17 ) ; } return basis ;
public class CreatePipelineRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( CreatePipelineRequest createPipelineRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( createPipelineRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( createPipelineRequest . getName ( ) , NAME_BINDING ) ; protocolMarshaller . marshall ( createPipelineRequest . getUniqueId ( ) , UNIQUEID_BINDING ) ; protocolMarshaller . marshall ( createPipelineRequest . getDescription ( ) , DESCRIPTION_BINDING ) ; protocolMarshaller . marshall ( createPipelineRequest . getTags ( ) , TAGS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class FileUtil { /** * Delete the file or directory recursively . * @ param path The file or directory path . * @ throws IOException If delete failed at some point . */ public static void deleteRecursively ( Path path ) throws IOException { } }
if ( Files . isDirectory ( path ) ) { Files . list ( path ) . forEach ( file -> { try { deleteRecursively ( file ) ; } catch ( IOException e ) { throw new UncheckedIOException ( e . getMessage ( ) , e ) ; } } ) ; } Files . delete ( path ) ;
public class Configuration { /** * Gets the short value for < code > key < / code > or < code > defaultValue < / code > if not found . * @ param key key to get value for * @ param defaultValue default value if key not found * @ return value or < code > defaultValue < / code > if not found */ public short getShort ( String key , short defaultValue ) { } }
if ( containsKey ( key ) ) { return Short . parseShort ( get ( key ) ) ; } else { return defaultValue ; }
public class CmsJspVfsAccessBean { /** * Returns a map that lazily calculates links to files in the OpenCms VFS , * which have been adjusted according to the web application path and the * OpenCms static export rules . < p > * Please note that the target is always assumed to be in the OpenCms VFS , so you can ' t use * this method for links external to OpenCms . < p > * Relative links are converted to absolute links , using the current element URI as base . < p > * Relative links are converted to absolute links , using the current OpenCms request context URI as base . < p > * Usage example on a JSP with the EL : < pre > * Link to the " / index . html " file : $ { cms : vfs ( pageContext ) . link [ ' / index . html ' ] } * < / pre > * Usage example on a JSP with the < code > & lt ; cms : contentaccess & gt ; < / code > tag : < pre > * & lt ; cms : contentload . . . & gt ; * & lt ; cms : contentaccess var = " content " / & gt ; * Link to the " / index . html " file : $ { content . vfs . link [ ' / index . html ' ] } * & lt ; / cms : contentload & gt ; < / pre > * @ return a map that lazily calculates links to resources in the OpenCms VFS * @ see org . opencms . jsp . CmsJspActionElement # link ( String ) * @ see org . opencms . jsp . CmsJspTagLink # linkTagAction ( String , javax . servlet . ServletRequest ) */ public Map < String , String > getLink ( ) { } }
if ( m_links == null ) { // create lazy map only on demand m_links = CmsCollectionsGenericWrapper . createLazyMap ( new CmsVfsLinkTransformer ( ) ) ; } return m_links ;
public class DatabaseClientSnippets { /** * [ VARIABLE my _ singer _ id ] */ public Timestamp singleUseReadOnlyTransaction ( long singerId ) { } }
// [ START singleUseReadOnlyTransaction ] String column = "FirstName" ; ReadOnlyTransaction txn = dbClient . singleUseReadOnlyTransaction ( ) ; Struct row = txn . readRow ( "Singers" , Key . of ( singerId ) , Collections . singleton ( column ) ) ; row . getString ( column ) ; Timestamp timestamp = txn . getReadTimestamp ( ) ; // [ END singleUseReadOnlyTransaction ] return timestamp ;
public class LearnTool { /** * 公司名称学习 . * @ param graph */ public void learn ( Graph graph , SplitWord splitWord , Forest ... forests ) { } }
this . splitWord = splitWord ; this . forests = forests ; // 亚洲人名识别 if ( isAsianName ) { findAsianPerson ( graph ) ; } // 外国人名识别 if ( isForeignName ) { findForeignPerson ( graph ) ; }
public class CLI { /** * Set up the TCP socket for annotation . */ public final void server ( ) { } }
String port = parsedArguments . getString ( "port" ) ; String model = parsedArguments . getString ( MODEL ) ; String lexer = parsedArguments . getString ( "lexer" ) ; String dictTag = parsedArguments . getString ( "dictTag" ) ; String dictPath = parsedArguments . getString ( "dictPath" ) ; String clearFeatures = parsedArguments . getString ( "clearFeatures" ) ; String outputFormat = parsedArguments . getString ( "outputFormat" ) ; String lang = parsedArguments . getString ( "language" ) ; Properties serverproperties = setNameServerProperties ( port , model , lang , lexer , dictTag , dictPath , clearFeatures , outputFormat ) ; new NERTaggerServer ( serverproperties ) ;
public class PDBDomainProvider { /** * Handles fetching and processing REST requests . The actual XML parsing is handled * by the handler , which is also in charge of storing interesting data . * @ param url REST request * @ param handler SAX XML parser * @ throws SAXException * @ throws IOException * @ throws ParserConfigurationException */ private static void handleRestRequest ( String url , DefaultHandler handler ) throws SAXException , IOException , ParserConfigurationException { } }
// Fetch XML stream URL u = new URL ( url ) ; InputStream response = URLConnectionTools . getInputStream ( u ) ; InputSource xml = new InputSource ( response ) ; // Parse XML SAXParserFactory factory = SAXParserFactory . newInstance ( ) ; SAXParser saxParser = factory . newSAXParser ( ) ; saxParser . parse ( xml , handler ) ;
public class FragmentManagerUtils { /** * Find a fragment that is under { @ link android . app . FragmentManager } ' s control by the id . * @ param manager the fragment manager . * @ param id the fragment id . * @ param < F > the concrete fragment class parameter . * @ return the fragment . */ @ SuppressWarnings ( "unchecked" ) // we know that the returning fragment is child of fragment . @ TargetApi ( Build . VERSION_CODES . HONEYCOMB ) public static < F extends android . app . Fragment > F findFragmentById ( android . app . FragmentManager manager , int id ) { } }
return ( F ) manager . findFragmentById ( id ) ;
public class ModeUtil { /** * translate a string mode ( 777 or drwxrwxrwx to a octal value ) * @ param strMode * @ return */ public static int toOctalMode ( String strMode ) throws IOException { } }
strMode = strMode . trim ( ) . toLowerCase ( ) ; if ( strMode . length ( ) == 9 || strMode . length ( ) == 10 ) return _toOctalMode ( strMode ) ; if ( strMode . length ( ) <= 4 && strMode . length ( ) > 0 ) return Integer . parseInt ( strMode , 8 ) ; throw new IOException ( "can't translate [" + strMode + "] to a mode value" ) ;