signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class RecyclableArrayList { /** * Create a new empty { @ link RecyclableArrayList } instance with the given capacity . */ public static RecyclableArrayList newInstance ( int minCapacity ) { } }
RecyclableArrayList ret = RECYCLER . get ( ) ; ret . ensureCapacity ( minCapacity ) ; return ret ;
public class DFSInputStream { /** * Get blocks in the specified range . The locations of all blocks * overlapping with the given segment of the file are retrieved . Fetch them * from the namenode if not cached . * @ param offset the offset of the segment to read * @ param length the length of the segment to read * @ return consequent segment of located blocks * @ throws IOException */ private List < LocatedBlock > getBlockRange ( final long offset , final long length ) throws IOException { } }
List < LocatedBlock > blockRange = new ArrayList < LocatedBlock > ( ) ; // Zero length . Not sure this ever happens in practice . if ( length == 0 ) return blockRange ; // A defensive measure to ensure that we never loop here eternally . // With a 256 M block size , 10000 blocks will correspond to 2.5 TB . // No one should read this much data at once in practice . int maxLoops = 10000 ; // Copy locatedBlocks to a local data structure . This ensures that // a concurrent invocation of openInfo ( ) works OK , the reason being // that openInfo may completely replace locatedBlocks . DFSLocatedBlocks locatedBlocks = this . locatedBlocks ; if ( locatedBlocks == null ) { // Make this an IO exception because this is input / output code error . throw new IOException ( "locatedBlocks is null" ) ; } locatedBlocks . blockLocationInfoExpiresIfNeeded ( ) ; long remaining = length ; long curOff = offset ; while ( remaining > 0 ) { // a defensive check to bail out of this loop at all costs if ( -- maxLoops < 0 ) { String msg = "Failed to getBlockRange at offset " + offset + ", length=" + length + ", curOff=" + curOff + ", remaining=" + remaining + ". Aborting..." ; DFSClient . LOG . warn ( msg ) ; throw new IOException ( msg ) ; } LocatedBlock blk = locatedBlocks . getBlockContainingOffset ( curOff ) ; if ( blk == null ) { LocatedBlocks newBlocks ; newBlocks = getLocatedBlocks ( src , curOff , remaining ) ; if ( newBlocks == null ) { throw new IOException ( "Could not get block locations for curOff=" + curOff + ", remaining=" + remaining + " (offset=" + offset + ")" ) ; } locatedBlocks . insertRange ( newBlocks . getLocatedBlocks ( ) ) ; continue ; } blockRange . add ( blk ) ; long bytesRead = blk . getStartOffset ( ) + blk . getBlockSize ( ) - curOff ; remaining -= bytesRead ; curOff += bytesRead ; } DFSClient . checkBlockRange ( blockRange , offset , length ) ; return blockRange ;
public class SqlInfoBuilder { /** * 根据指定的模式 ` pattern ` 来构建like模糊查询需要的SqlInfo信息 . * @ param fieldText 数据库字段的文本 * @ param pattern like匹配的模式 * @ return sqlInfo */ public SqlInfo buildLikePatternSql ( String fieldText , String pattern ) { } }
this . suffix = StringHelper . isBlank ( this . suffix ) ? ZealotConst . LIKE_KEY : this . suffix ; join . append ( prefix ) . append ( fieldText ) . append ( this . suffix ) . append ( "'" ) . append ( pattern ) . append ( "' " ) ; return sqlInfo . setJoin ( join ) ;
public class HThriftClient { /** * { @ inheritDoc } */ public Cassandra . Client getCassandra ( String keyspaceNameArg ) { } }
getCassandra ( ) ; if ( keyspaceNameArg != null && ! StringUtils . equals ( keyspaceName , keyspaceNameArg ) ) { if ( log . isDebugEnabled ( ) ) log . debug ( "keyspace reseting from {} to {}" , keyspaceName , keyspaceNameArg ) ; try { cassandraClient . set_keyspace ( keyspaceNameArg ) ; } catch ( InvalidRequestException ire ) { throw new HInvalidRequestException ( ire ) ; } catch ( TException e ) { throw exceptionsTranslator . translate ( e ) ; } keyspaceName = keyspaceNameArg ; } return cassandraClient ;
public class JobFilePartitioner { /** * Do the actual work . * @ see org . apache . hadoop . util . Tool # run ( java . lang . String [ ] ) */ @ Override public int run ( String [ ] args ) throws Exception { } }
myConf = getConf ( ) ; // Presume this is all HDFS paths , even when access as file : / / hdfs = FileSystem . get ( myConf ) ; // Grab input args and allow for - Dxyz style arguments String [ ] otherArgs = new GenericOptionsParser ( myConf , args ) . getRemainingArgs ( ) ; // Grab the arguments we ' re looking for . CommandLine commandLine = parseArgs ( otherArgs ) ; // Grab the input path argument input = commandLine . getOptionValue ( "i" ) ; LOG . info ( "input=" + input ) ; // Grab the input path argument String output = commandLine . getOptionValue ( "o" ) ; LOG . info ( "output=" + output ) ; skipExisting = commandLine . hasOption ( "s" ) ; LOG . info ( "skipExisting=" + skipExisting ) ; moveFiles = commandLine . hasOption ( "m" ) ; LOG . info ( "moveFiles=" + moveFiles ) ; if ( skipExisting && moveFiles ) { throw new IllegalArgumentException ( "Cannot use both options skipExisting and move simultaneously." ) ; } if ( commandLine . hasOption ( "x" ) ) { try { maXretention = Integer . parseInt ( commandLine . getOptionValue ( "x" ) ) ; } catch ( NumberFormatException nfe ) { throw new IllegalArgumentException ( "maXretention option -x is is not a valid number: " + commandLine . getOptionValue ( "x" ) , nfe ) ; } // Additional check if ( maXretention < 0 ) { throw new IllegalArgumentException ( "Cannot retain less than 0 files. Specified maXretention option -x is: " + commandLine . getOptionValue ( "x" ) ) ; } LOG . info ( "maXretention=" + maXretention ) ; if ( moveFiles ) { throw new IllegalArgumentException ( "Cannot use both options maXretention and move simultaneously." ) ; } } else { maXretention = Integer . MAX_VALUE ; } outputPath = new Path ( output ) ; FileStatus outputFileStatus = hdfs . getFileStatus ( outputPath ) ; if ( ! outputFileStatus . isDir ( ) ) { throw new IOException ( "Output is not a directory" + outputFileStatus . getPath ( ) . getName ( ) ) ; } Path inputPath = new Path ( input ) ; URI inputURI = inputPath . toUri ( ) ; String inputScheme = inputURI . getScheme ( ) ; LOG . info ( "input scheme is: " + inputScheme ) ; // If input directory is HDFS , then process as such . Assume not scheme is // HDFS if ( ( inputScheme == null ) || ( hdfs . getUri ( ) . getScheme ( ) . equals ( inputScheme ) ) ) { processHDFSSources ( inputPath ) ; } else if ( inputScheme . equals ( "file" ) ) { if ( moveFiles ) { throw new IllegalArgumentException ( "Cannot move files that are not already in hdfs. Input is not HDFS: " + input ) ; } processPlainFileSources ( inputURI ) ; } else { throw new IllegalArgumentException ( "Cannot process files from this URI scheme: " + inputScheme ) ; } Statistics statistics = FileSystem . getStatistics ( outputPath . toUri ( ) . getScheme ( ) , hdfs . getClass ( ) ) ; if ( statistics != null ) { LOG . info ( "HDFS bytes read: " + statistics . getBytesRead ( ) ) ; LOG . info ( "HDFS bytes written: " + statistics . getBytesWritten ( ) ) ; LOG . info ( "HDFS read ops: " + statistics . getReadOps ( ) ) ; System . out . println ( "HDFS large read ops: " + statistics . getLargeReadOps ( ) ) ; LOG . info ( "HDFS write ops: " + statistics . getWriteOps ( ) ) ; } return 0 ;
public class ParamConfig { /** * Prepares the parameter ' s datasource , passing it the extra options and if necessary executing the appropriate * code and caching the value . * @ param extra */ public void prepareParameter ( Map < String , Object > extra ) { } }
if ( from != null ) { from . prepareParameter ( extra ) ; }
public class Util { /** * Generate UUID across the entire app and it is used for correlationId . * @ return String correlationId */ public static String getUUID ( ) { } }
UUID id = UUID . randomUUID ( ) ; ByteBuffer bb = ByteBuffer . wrap ( new byte [ 16 ] ) ; bb . putLong ( id . getMostSignificantBits ( ) ) ; bb . putLong ( id . getLeastSignificantBits ( ) ) ; return Base64 . encodeBase64URLSafeString ( bb . array ( ) ) ;
public class LinearClassifierFactor { /** * Returns a vector ( 1 - dimensional tensor ) containing the feature weights used * to predict { @ code outputClass } . { @ code outputClass } must contain a value * for every output variable of this factor . * @ param outputClass * @ return */ public Tensor getFeatureWeightsForClass ( Assignment outputClass ) { } }
int [ ] classIndexes = getOutputVariables ( ) . assignmentToIntArray ( outputClass ) ; int [ ] dimensionNums = getOutputVariables ( ) . getVariableNumsArray ( ) ; return logWeights . slice ( dimensionNums , classIndexes ) ;
public class DownloadRequestQueue { /** * Perform construction with custom thread pool size . */ private void initialize ( Handler callbackHandler , int threadPoolSize ) { } }
mDownloadDispatchers = new DownloadDispatcher [ threadPoolSize ] ; mDelivery = new CallBackDelivery ( callbackHandler ) ;
public class TaskSchedulerBuilder { /** * Set the prefix to use for the names of newly created threads . * @ param threadNamePrefix the thread name prefix to set * @ return a new builder instance */ public TaskSchedulerBuilder threadNamePrefix ( String threadNamePrefix ) { } }
return new TaskSchedulerBuilder ( this . poolSize , this . awaitTermination , this . awaitTerminationPeriod , threadNamePrefix , this . customizers ) ;
public class ExtensionUtils { /** * find the Require - Bundle name from the specified jar file . */ static private String getRequiredBundles ( File file ) throws IOException { } }
JarFile jar = new JarFile ( file ) ; Attributes attr = jar . getManifest ( ) . getMainAttributes ( ) ; jar . close ( ) ; return attr . getValue ( "Require-Bundle" ) ;
public class FaultFormatTextDecorator { /** * Converts the target fault into a formatted text . * @ see nyla . solutions . core . data . Textable # getText ( ) */ @ Override public String getText ( ) { } }
if ( this . target == null ) return null ; try { // Check if load of template needed if ( ( this . template == null || this . template . length ( ) == 0 ) && this . templateName != null ) { try { this . template = Text . loadTemplate ( templateName ) ; } catch ( Exception e ) { throw new SetupException ( "Cannot load template:" + templateName , e ) ; } } Map < Object , Object > faultMap = JavaBean . toMap ( this . target ) ; if ( this . argumentTextDecorator != null ) { this . argumentTextDecorator . setTarget ( this . target . getArgument ( ) ) ; faultMap . put ( this . argumentKeyName , this . argumentTextDecorator . getText ( ) ) ; } return Text . format ( this . template , faultMap ) ; } catch ( FormatException e ) { throw new FormatFaultException ( this . template , e ) ; }
public class ServiceDirectoryClient { /** * Convert the ProvidedServiceInstance Json String to ProvidedServiceInstance . * @ param jsonString * the ProvidedServiceInstance Json String . * @ return * the ProvidedServiceInstance Object . * @ throws JsonParseException * @ throws JsonMappingException * @ throws IOException */ private ProvidedServiceInstance jsonToProvidedServiceInstance ( String jsonString ) throws JsonParseException , JsonMappingException , IOException { } }
return deserialize ( jsonString . getBytes ( ) , ProvidedServiceInstance . class ) ;
public class Matrix2D { public void print ( ) { } }
int big = ( int ) Math . abs ( Math . max ( max ( Math . abs ( m00 ) , Math . abs ( m01 ) , Math . abs ( m02 ) ) , max ( Math . abs ( m10 ) , Math . abs ( m11 ) , Math . abs ( m12 ) ) ) ) ; int digits = 1 ; if ( Double . isNaN ( big ) || Double . isInfinite ( big ) ) { // avoid infinite loop digits = 5 ; } else { while ( ( big /= 10 ) != 0 ) digits ++ ; // cheap log ( ) } System . out . println ( nfs ( m00 , digits , 4 ) + " " + nfs ( m01 , digits , 4 ) + " " + nfs ( m02 , digits , 4 ) ) ; System . out . println ( nfs ( m10 , digits , 4 ) + " " + nfs ( m11 , digits , 4 ) + " " + nfs ( m12 , digits , 4 ) ) ; System . out . println ( ) ;
public class HighestValueFilter { /** * ~ Methods * * * * * */ @ Override public List < Metric > filter ( Map < Metric , String > extendedSortedMap , String limit ) { } }
SystemAssert . requireArgument ( extendedSortedMap != null && ! extendedSortedMap . isEmpty ( ) , "New map is not constructed successfully!" ) ; SystemAssert . requireArgument ( limit != null && ! limit . equals ( "" ) , "Limit must be provided!" ) ; List < Metric > result = new ArrayList < Metric > ( ) ; for ( Metric metric : extendedSortedMap . keySet ( ) ) { result . add ( 0 , metric ) ; } if ( result . size ( ) <= Integer . parseInt ( limit ) ) { return result ; } return result . subList ( 0 , Integer . parseInt ( limit ) ) ;
public class EvaluationTools { /** * Given a { @ link ROC } chart , export the ROC chart and precision vs . recall charts to a stand - alone HTML file * @ param roc ROC to export * @ param file File to export to */ public static void exportRocChartsToHtmlFile ( ROC roc , File file ) throws IOException { } }
String rocAsHtml = rocChartToHtml ( roc ) ; FileUtils . writeStringToFile ( file , rocAsHtml ) ;
public class CollectionsDeserializationBenchmark { /** * Benchmark to measure deserializing objects by hand */ public void timeCollectionsStreaming ( int reps ) throws IOException { } }
for ( int i = 0 ; i < reps ; ++ i ) { StringReader reader = new StringReader ( json ) ; JsonReader jr = new JsonReader ( reader ) ; jr . beginArray ( ) ; List < BagOfPrimitives > bags = new ArrayList < BagOfPrimitives > ( ) ; while ( jr . hasNext ( ) ) { jr . beginObject ( ) ; long longValue = 0 ; int intValue = 0 ; boolean booleanValue = false ; String stringValue = null ; while ( jr . hasNext ( ) ) { String name = jr . nextName ( ) ; if ( name . equals ( "longValue" ) ) { longValue = jr . nextLong ( ) ; } else if ( name . equals ( "intValue" ) ) { intValue = jr . nextInt ( ) ; } else if ( name . equals ( "booleanValue" ) ) { booleanValue = jr . nextBoolean ( ) ; } else if ( name . equals ( "stringValue" ) ) { stringValue = jr . nextString ( ) ; } else { throw new IOException ( "Unexpected name: " + name ) ; } } jr . endObject ( ) ; bags . add ( new BagOfPrimitives ( longValue , intValue , booleanValue , stringValue ) ) ; } jr . endArray ( ) ; }
public class SnorocketOWLReasoner { /** * Transforms a { @ link ClassNode } into a { @ link Node } of { @ link OWLClass } es . * @ param n may be null , in which case an empty Node is returned * @ return */ private Node < OWLClass > nodeToOwlClassNode ( au . csiro . ontology . Node n ) { } }
if ( n == null ) return new OWLClassNode ( ) ; final Set < OWLClass > classes = new HashSet < > ( ) ; for ( Object eq : n . getEquivalentConcepts ( ) ) { classes . add ( getOWLClass ( eq ) ) ; } return new OWLClassNode ( classes ) ;
public class ForwardingRuleClient { /** * Changes target URL for forwarding rule . The new target should be of the same type as the old * target . * < p > Sample code : * < pre > < code > * try ( ForwardingRuleClient forwardingRuleClient = ForwardingRuleClient . create ( ) ) { * ProjectRegionForwardingRuleName forwardingRule = ProjectRegionForwardingRuleName . of ( " [ PROJECT ] " , " [ REGION ] " , " [ FORWARDING _ RULE ] " ) ; * TargetReference targetReferenceResource = TargetReference . newBuilder ( ) . build ( ) ; * Operation response = forwardingRuleClient . setTargetForwardingRule ( forwardingRule . toString ( ) , targetReferenceResource ) ; * < / code > < / pre > * @ param forwardingRule Name of the ForwardingRule resource in which target is to be set . * @ param targetReferenceResource * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi public final Operation setTargetForwardingRule ( String forwardingRule , TargetReference targetReferenceResource ) { } }
SetTargetForwardingRuleHttpRequest request = SetTargetForwardingRuleHttpRequest . newBuilder ( ) . setForwardingRule ( forwardingRule ) . setTargetReferenceResource ( targetReferenceResource ) . build ( ) ; return setTargetForwardingRule ( request ) ;
public class CloudSpannerPooledConnection { /** * Gets a handle for a client to use . This is a wrapper around the physical connection , so the * client can call close and it will just return the connection to the pool without really closing * the physical connection . * According to the JDBC 2.0 Optional Package spec ( 6.2.3 ) , only one client may have an active * handle to the connection at a time , so if there is a previous handle active when this is * called , the previous one is forcibly closed and its work rolled back . */ @ Override public ICloudSpannerConnection getConnection ( ) throws SQLException { } }
if ( con == null ) { // Before throwing the exception , let ' s notify the registered // listeners about the error SQLException sqlException = new CloudSpannerSQLException ( "This PooledConnection has already been closed." , Code . FAILED_PRECONDITION ) ; fireConnectionFatalError ( sqlException ) ; throw sqlException ; } // If any error occurs while opening a new connection , the listeners // have to be notified . This gives a chance to connection pools to // eliminate bad pooled connections . try { // Only one connection can be open at a time from this // PooledConnection . See JDBC 2.0 Optional // Package spec section 6.2.3 if ( last != null ) { last . close ( ) ; if ( ! con . getAutoCommit ( ) ) { rollbackAndIgnoreException ( ) ; } con . clearWarnings ( ) ; } /* * In XA - mode , autocommit is handled in PGXAConnection , because it depends on whether an * XA - transaction is open or not */ if ( ! isXA ) { con . setAutoCommit ( autoCommit ) ; } } catch ( SQLException sqlException ) { fireConnectionFatalError ( sqlException ) ; throw ( SQLException ) sqlException . fillInStackTrace ( ) ; } ConnectionHandler handler = new ConnectionHandler ( con ) ; last = handler ; ICloudSpannerConnection proxyCon = ( ICloudSpannerConnection ) Proxy . newProxyInstance ( getClass ( ) . getClassLoader ( ) , new Class [ ] { Connection . class , ICloudSpannerConnection . class } , handler ) ; last . setProxy ( proxyCon ) ; return proxyCon ;
public class RebalanceTaskInfo { /** * Returns the total count of partitions across all stores . * @ return returns the total count of partitions across all stores . */ public synchronized int getPartitionStoreCount ( ) { } }
int count = 0 ; for ( String store : storeToPartitionIds . keySet ( ) ) { count += storeToPartitionIds . get ( store ) . size ( ) ; } return count ;
public class IdentifierSequences { /** * Returns the last index ( ID count ) returned for a specific * value of the { @ code id } attribute ( without incrementing * the count ) . * @ param id the ID for which the last count will be retrieved * @ return the count */ public Integer getPreviousIDSeq ( final String id ) { } }
Validate . notNull ( id , "ID cannot be null" ) ; final Integer count = this . idCounts . get ( id ) ; if ( count == null ) { throw new TemplateProcessingException ( "Cannot obtain previous ID count for ID \"" + id + "\"" ) ; } return Integer . valueOf ( count . intValue ( ) - 1 ) ;
public class MapsInner { /** * Gets an integration account map . * @ param resourceGroupName The resource group name . * @ param integrationAccountName The integration account name . * @ param mapName The integration account map name . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the IntegrationAccountMapInner object if successful . */ public IntegrationAccountMapInner get ( String resourceGroupName , String integrationAccountName , String mapName ) { } }
return getWithServiceResponseAsync ( resourceGroupName , integrationAccountName , mapName ) . toBlocking ( ) . single ( ) . body ( ) ;
public class ElementFilter { /** * Returns a list of packages in { @ code elements } . * @ return a list of packages in { @ code elements } * @ param elements the elements to filter */ public static List < PackageElement > packagesIn ( Iterable < ? extends Element > elements ) { } }
return listFilter ( elements , PACKAGE_KIND , PackageElement . class ) ;
public class MongoDBFactory { /** * Create a MongoDB client with params . * @ param hosts list of hosts of the form * " mongodb : / / [ username : password @ ] host1 [ : port1 ] [ , host2 [ : port2 ] , . . . [ , hostN [ : portN ] ] ] [ / [ database ] [ ? options ] ] " * @ return MongoDB client * @ see < a href = " https : / / docs . mongodb . com / manual / reference / connection - string / " > Mongo docs < / a > */ public static MongoClient create ( String hosts ) { } }
MongoClientURI connectionString = new MongoClientURI ( hosts ) ; return new MongoClient ( connectionString ) ;
public class CreateApplicationRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( CreateApplicationRequest createApplicationRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( createApplicationRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( createApplicationRequest . getApplicationName ( ) , APPLICATIONNAME_BINDING ) ; protocolMarshaller . marshall ( createApplicationRequest . getComputePlatform ( ) , COMPUTEPLATFORM_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class CPDefinitionLinkPersistenceImpl { /** * Clears the cache for all cp definition links . * The { @ link EntityCache } and { @ link FinderCache } are both cleared by this method . */ @ Override public void clearCache ( ) { } }
entityCache . clearCache ( CPDefinitionLinkImpl . class ) ; finderCache . clearCache ( FINDER_CLASS_NAME_ENTITY ) ; finderCache . clearCache ( FINDER_CLASS_NAME_LIST_WITH_PAGINATION ) ; finderCache . clearCache ( FINDER_CLASS_NAME_LIST_WITHOUT_PAGINATION ) ;
public class SwiftConnectionManager { /** * Creates custom retry handler to be used if HTTP exception happens * @ return retry handler */ private HttpRequestRetryHandler getRetryHandler ( ) { } }
final HttpRequestRetryHandler myRetryHandler = new HttpRequestRetryHandler ( ) { public boolean retryRequest ( IOException exception , int executionCount , HttpContext context ) { if ( executionCount >= connectionConfiguration . getExecutionCount ( ) ) { // Do not retry if over max retry count LOG . debug ( "Execution count {} is bigger than threshold. Stop" , executionCount ) ; return false ; } if ( exception instanceof NoHttpResponseException ) { LOG . debug ( "NoHttpResponseException exception. Retry count {}" , executionCount ) ; return true ; } if ( exception instanceof UnknownHostException ) { LOG . debug ( "UnknownHostException. Retry count {}" , executionCount ) ; return true ; } if ( exception instanceof ConnectTimeoutException ) { LOG . debug ( "ConnectTimeoutException. Retry count {}" , executionCount ) ; return true ; } if ( exception instanceof SocketTimeoutException || exception . getClass ( ) == SocketTimeoutException . class || exception . getClass ( ) . isInstance ( SocketTimeoutException . class ) ) { // Connection refused LOG . debug ( "socketTimeoutException Retry count {}" , executionCount ) ; return true ; } if ( exception instanceof InterruptedIOException ) { // Timeout LOG . debug ( "InterruptedIOException Retry count {}" , executionCount ) ; return true ; } if ( exception instanceof SSLException ) { LOG . debug ( "SSLException Retry count {}" , executionCount ) ; return true ; } final HttpClientContext clientContext = HttpClientContext . adapt ( context ) ; final HttpRequest request = clientContext . getRequest ( ) ; boolean idempotent = ! ( request instanceof HttpEntityEnclosingRequest ) ; if ( idempotent ) { LOG . debug ( "HttpEntityEnclosingRequest. Retry count {}" , executionCount ) ; return true ; } LOG . debug ( "Retry stopped. Retry count {}" , executionCount ) ; return false ; } } ; return myRetryHandler ;
public class HttpOutboundServiceContextImpl { /** * Retrieve the next buffer of the response message ' s body . This will give * the buffer without any modifications , avoiding decompression or chunked * encoding removal . * A null buffer will be returned if there is no more data to get . * The caller is responsible for releasing these buffers when complete as the HTTP Channel does not keep track of them . * @ return WsByteBuffer * @ throws IOException * - - if a socket exceptions happens * @ throws IllegalHttpBodyException * - - if the body was malformed */ @ Override public WsByteBuffer getRawResponseBodyBuffer ( ) throws IOException , IllegalHttpBodyException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . entry ( tc , "getRawResponseBodyBuffer(sync)" ) ; } setRawBody ( true ) ; WsByteBuffer buffer = getResponseBodyBuffer ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . exit ( tc , "getRawResponseBodyBuffer(sync): " + buffer ) ; } return buffer ;
public class XMLRPCClient { /** * Cancel a specific asynchronous call . * @ param id The id of the call as returned by the callAsync method . */ public void cancel ( long id ) { } }
// Lookup the background call for the given id . Caller cancel = backgroundCalls . get ( id ) ; if ( cancel == null ) { return ; } // Cancel the thread cancel . cancel ( ) ; try { // Wait for the thread cancel . join ( ) ; } catch ( InterruptedException ex ) { // Ignore this }
public class CmsADEManager { /** * Saves an element list to the user additional infos . < p > * @ param cms the cms context * @ param elementList the element list * @ param listKey the list key * @ throws CmsException if something goes wrong */ private void saveElementList ( CmsObject cms , List < CmsContainerElementBean > elementList , String listKey ) throws CmsException { } }
// limit the favorite list size to avoid the additional info size limit if ( elementList . size ( ) > DEFAULT_ELEMENT_LIST_SIZE ) { elementList = elementList . subList ( 0 , DEFAULT_ELEMENT_LIST_SIZE ) ; } JSONArray data = new JSONArray ( ) ; Set < String > excludedSettings = new HashSet < String > ( ) ; // do not store the template contexts , since dragging an element into the page which might be invisible // doesn ' t make sense excludedSettings . add ( CmsTemplateContextInfo . SETTING ) ; for ( CmsContainerElementBean element : elementList ) { data . put ( elementToJson ( element , excludedSettings ) ) ; } CmsUser user = cms . getRequestContext ( ) . getCurrentUser ( ) ; user . setAdditionalInfo ( listKey , data . toString ( ) ) ; cms . writeUser ( user ) ;
public class MetadataService { /** * Creates a new { @ link Token } with the specified { @ code appId } , { @ code isAdmin } and an auto - generated * secret . */ public CompletableFuture < Revision > createToken ( Author author , String appId , boolean isAdmin ) { } }
return createToken ( author , appId , SECRET_PREFIX + UUID . randomUUID ( ) , isAdmin ) ;
public class SoundLoader { /** * Read the data from the resource manager . */ protected byte [ ] loadClipData ( String bundle , String path ) throws IOException { } }
InputStream clipin = null ; try { clipin = getSound ( bundle , path ) ; } catch ( FileNotFoundException fnfe ) { // only play the default sound if we have verbose sound debugging turned on . if ( JavaSoundPlayer . _verbose . getValue ( ) ) { log . warning ( "Could not locate sound data" , "bundle" , bundle , "path" , path ) ; if ( _defaultClipPath != null ) { try { clipin = _rmgr . getResource ( _defaultClipBundle , _defaultClipPath ) ; } catch ( FileNotFoundException fnfe3 ) { try { clipin = _rmgr . getResource ( _defaultClipPath ) ; } catch ( FileNotFoundException fnfe4 ) { log . warning ( "Additionally, the default fallback sound could not be located" , "bundle" , _defaultClipBundle , "path" , _defaultClipPath ) ; } } } else { log . warning ( "No fallback default sound specified!" ) ; } } // if we couldn ' t load the default , rethrow if ( clipin == null ) { throw fnfe ; } } return StreamUtil . toByteArray ( clipin ) ;
public class SimpleJob { /** * Job { @ link Summarizer } class setting . * @ param clazz { @ link Summarizer } class * @ param combine If true is set the combiner in the Summarizer * @ param cache In - Mapper Combine output cahce number . Default value is 200. * @ return this */ public SimpleJob setSummarizer ( Class < ? extends Reducer < Key , Value , Key , Value > > clazz , boolean combine , int cache ) { } }
super . setReducerClass ( clazz ) ; reducer = true ; if ( combine ) { setCombiner ( clazz , cache ) ; } return this ;
public class JavacFileManager { /** * Open a new zip file directory , and cache it . */ private Archive openArchive ( File zipFileName , boolean useOptimizedZip ) throws IOException { } }
File origZipFileName = zipFileName ; if ( symbolFileEnabled && locations . isDefaultBootClassPathRtJar ( zipFileName ) ) { File file = zipFileName . getParentFile ( ) . getParentFile ( ) ; // $ { java . home } if ( new File ( file . getName ( ) ) . equals ( new File ( "jre" ) ) ) file = file . getParentFile ( ) ; // file = = $ { jdk . home } for ( String name : symbolFileLocation ) file = new File ( file , name ) ; // file = = $ { jdk . home } / lib / ct . sym if ( file . exists ( ) ) zipFileName = file ; } Archive archive ; try { ZipFile zdir = null ; boolean usePreindexedCache = false ; String preindexCacheLocation = null ; if ( ! useOptimizedZip ) { zdir = new ZipFile ( zipFileName ) ; } else { usePreindexedCache = options . isSet ( "usezipindex" ) ; preindexCacheLocation = options . get ( "java.io.tmpdir" ) ; String optCacheLoc = options . get ( "cachezipindexdir" ) ; if ( optCacheLoc != null && optCacheLoc . length ( ) != 0 ) { if ( optCacheLoc . startsWith ( "\"" ) ) { if ( optCacheLoc . endsWith ( "\"" ) ) { optCacheLoc = optCacheLoc . substring ( 1 , optCacheLoc . length ( ) - 1 ) ; } else { optCacheLoc = optCacheLoc . substring ( 1 ) ; } } File cacheDir = new File ( optCacheLoc ) ; if ( cacheDir . exists ( ) && cacheDir . canWrite ( ) ) { preindexCacheLocation = optCacheLoc ; if ( ! preindexCacheLocation . endsWith ( "/" ) && ! preindexCacheLocation . endsWith ( File . separator ) ) { preindexCacheLocation += File . separator ; } } } } if ( origZipFileName == zipFileName ) { if ( ! useOptimizedZip ) { archive = new ZipArchive ( this , zdir ) ; } else { archive = new ZipFileIndexArchive ( this , zipFileIndexCache . getZipFileIndex ( zipFileName , null , usePreindexedCache , preindexCacheLocation , options . isSet ( "writezipindexfiles" ) ) ) ; } } else { if ( ! useOptimizedZip ) { archive = new SymbolArchive ( this , origZipFileName , zdir , symbolFilePrefix ) ; } else { archive = new ZipFileIndexArchive ( this , zipFileIndexCache . getZipFileIndex ( zipFileName , symbolFilePrefix , usePreindexedCache , preindexCacheLocation , options . isSet ( "writezipindexfiles" ) ) ) ; } } } catch ( FileNotFoundException ex ) { archive = new MissingArchive ( zipFileName ) ; } catch ( ZipFileIndex . ZipFormatException zfe ) { throw zfe ; } catch ( IOException ex ) { if ( zipFileName . exists ( ) ) log . error ( "error.reading.file" , zipFileName , getMessage ( ex ) ) ; archive = new MissingArchive ( zipFileName ) ; } archives . put ( origZipFileName , archive ) ; return archive ;
public class KeePassDatabase { /** * Opens a KeePass database with the given password and keyfile stream and * returns the KeePassFile for further processing . * If the database cannot be decrypted with the provided password and * keyfile stream an exception will be thrown . * @ param password * the password to open the database * @ param keyFileStream * the keyfile to open the database as stream * @ return a KeePassFile * @ see KeePassFile */ public KeePassFile openDatabase ( String password , InputStream keyFileStream ) { } }
if ( password == null ) { throw new IllegalArgumentException ( MSG_EMPTY_MASTER_KEY ) ; } if ( keyFileStream == null ) { throw new IllegalArgumentException ( "You must provide a non-empty KeePass keyfile stream." ) ; } try { byte [ ] passwordBytes = password . getBytes ( UTF_8 ) ; byte [ ] hashedPassword = Sha256 . hash ( passwordBytes ) ; byte [ ] protectedBuffer = new KeyFileReader ( ) . readKeyFile ( keyFileStream ) ; return new KeePassDatabaseReader ( keepassHeader ) . decryptAndParseDatabase ( ByteUtils . concat ( hashedPassword , protectedBuffer ) , keepassFile ) ; } catch ( UnsupportedEncodingException e ) { throw new UnsupportedOperationException ( MSG_UTF8_NOT_SUPPORTED , e ) ; }
public class DocumentIdentifier { /** * The operating system platform . * @ return The operating system platform . * @ see PlatformType */ public java . util . List < String > getPlatformTypes ( ) { } }
if ( platformTypes == null ) { platformTypes = new com . amazonaws . internal . SdkInternalList < String > ( ) ; } return platformTypes ;
public class RedshiftDestinationUpdateMarshaller { /** * Marshall the given parameter object . */ public void marshall ( RedshiftDestinationUpdate redshiftDestinationUpdate , ProtocolMarshaller protocolMarshaller ) { } }
if ( redshiftDestinationUpdate == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( redshiftDestinationUpdate . getRoleARN ( ) , ROLEARN_BINDING ) ; protocolMarshaller . marshall ( redshiftDestinationUpdate . getClusterJDBCURL ( ) , CLUSTERJDBCURL_BINDING ) ; protocolMarshaller . marshall ( redshiftDestinationUpdate . getCopyCommand ( ) , COPYCOMMAND_BINDING ) ; protocolMarshaller . marshall ( redshiftDestinationUpdate . getUsername ( ) , USERNAME_BINDING ) ; protocolMarshaller . marshall ( redshiftDestinationUpdate . getPassword ( ) , PASSWORD_BINDING ) ; protocolMarshaller . marshall ( redshiftDestinationUpdate . getRetryOptions ( ) , RETRYOPTIONS_BINDING ) ; protocolMarshaller . marshall ( redshiftDestinationUpdate . getS3Update ( ) , S3UPDATE_BINDING ) ; protocolMarshaller . marshall ( redshiftDestinationUpdate . getProcessingConfiguration ( ) , PROCESSINGCONFIGURATION_BINDING ) ; protocolMarshaller . marshall ( redshiftDestinationUpdate . getS3BackupMode ( ) , S3BACKUPMODE_BINDING ) ; protocolMarshaller . marshall ( redshiftDestinationUpdate . getS3BackupUpdate ( ) , S3BACKUPUPDATE_BINDING ) ; protocolMarshaller . marshall ( redshiftDestinationUpdate . getCloudWatchLoggingOptions ( ) , CLOUDWATCHLOGGINGOPTIONS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class ListeFilme { /** * Get the age of the film list . * @ return Age in seconds . */ public int getAge ( ) { } }
int ret = 0 ; Date now = new Date ( System . currentTimeMillis ( ) ) ; Date filmDate = getAgeAsDate ( ) ; if ( filmDate != null ) { ret = Math . round ( ( now . getTime ( ) - filmDate . getTime ( ) ) / ( 1000 ) ) ; if ( ret < 0 ) { ret = 0 ; } } return ret ;
public class ASegment { /** * get the next punctuation pair word from the current position * of the input stream . * @ param c * @ param pos * @ return IWord could be null and that mean we reached a stop word * @ throws IOException */ protected IWord getNextPunctuationPairWord ( int c , int pos ) throws IOException { } }
IWord w = null , w2 = null ; String text = getPairPunctuationText ( c ) ; // handle the punctuation . String str = String . valueOf ( ( char ) c ) ; if ( ! ( config . CLEAR_STOPWORD && dic . match ( ILexicon . STOP_WORD , str ) ) ) { w = new Word ( str , IWord . T_PUNCTUATION ) ; w . setPartSpeech ( IWord . PUNCTUATION ) ; w . setPosition ( pos ) ; } // handle the pair text . if ( text != null && text . length ( ) > 0 && ! ( config . CLEAR_STOPWORD && dic . match ( ILexicon . STOP_WORD , text ) ) ) { w2 = new Word ( text , ILexicon . CJK_WORD ) ; w2 . setPartSpeech ( IWord . PPT_POSPEECH ) ; w2 . setPosition ( pos + 1 ) ; if ( w == null ) w = w2 ; else wordPool . add ( w2 ) ; } /* here : * 1 . the punctuation is clear . * 2 . the pair text is null or being cleared . * @ date 2013-09-06 */ if ( w == null && w2 == null ) { return null ; } return w ;
public class DBInitializerHelper { /** * Returns path where SQL scripts for database initialization is stored . */ public static String scriptPath ( String dbDialect , boolean multiDb ) { } }
String suffix = multiDb ? "m" : "s" ; String sqlPath = null ; if ( dbDialect . startsWith ( DBConstants . DB_DIALECT_ORACLE ) ) { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.ora.sql" ; } else if ( dbDialect . startsWith ( DBConstants . DB_DIALECT_PGSQL ) ) { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.pgsql.sql" ; } else if ( dbDialect . equals ( DBConstants . DB_DIALECT_MYSQL ) ) { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.mysql.sql" ; } else if ( dbDialect . equals ( DBConstants . DB_DIALECT_MYSQL_NDB ) ) { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.mysql-ndb.sql" ; } else if ( dbDialect . equals ( DBConstants . DB_DIALECT_MYSQL_NDB_UTF8 ) ) { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.mysql-ndb-utf8.sql" ; } else if ( dbDialect . equals ( DBConstants . DB_DIALECT_MYSQL_MYISAM ) ) { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.mysql-myisam.sql" ; } else if ( dbDialect . equals ( DBConstants . DB_DIALECT_MYSQL_UTF8 ) ) { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.mysql-utf8.sql" ; } else if ( dbDialect . equals ( DBConstants . DB_DIALECT_MYSQL_MYISAM_UTF8 ) ) { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.mysql-myisam-utf8.sql" ; } else if ( dbDialect . startsWith ( DBConstants . DB_DIALECT_MSSQL ) ) { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.mssql.sql" ; } else if ( dbDialect . startsWith ( DBConstants . DB_DIALECT_DERBY ) ) { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.derby.sql" ; } else if ( dbDialect . equals ( DBConstants . DB_DIALECT_DB2V8 ) ) { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.db2v8.sql" ; } else if ( dbDialect . startsWith ( DBConstants . DB_DIALECT_DB2 ) ) { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.db2.sql" ; } else if ( dbDialect . startsWith ( DBConstants . DB_DIALECT_SYBASE ) ) { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.sybase.sql" ; } else if ( dbDialect . startsWith ( DBConstants . DB_DIALECT_INGRES ) ) { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.ingres.sql" ; } else if ( dbDialect . startsWith ( DBConstants . DB_DIALECT_H2 ) ) { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.h2.sql" ; } else if ( dbDialect . startsWith ( DBConstants . DB_DIALECT_HSQLDB ) ) { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.sql" ; } else { sqlPath = "/conf/storage/jcr-" + suffix + "jdbc.sql" ; } return sqlPath ;
public class Environment { /** * to be used with Runtime . exec ( String [ ] cmdarray , String [ ] envp ) */ String [ ] toArray ( ) { } }
String [ ] arr = new String [ super . size ( ) ] ; Enumeration it = super . keys ( ) ; int i = - 1 ; while ( it . hasMoreElements ( ) ) { String key = ( String ) it . nextElement ( ) ; String val = ( String ) get ( key ) ; i ++ ; arr [ i ] = key + "=" + val ; } return arr ;
public class BagArrayFrom { /** * from a resource , with the mime type specified */ static public BagArray resource ( Class context , String name ) { } }
return resource ( context , name , ( ) -> null ) ;
public class Block { /** * The type of entity . The following can be returned : * < ul > * < li > * < i > KEY < / i > - An identifier for a field on the document . * < / li > * < li > * < i > VALUE < / i > - The field text . * < / li > * < / ul > * < code > EntityTypes < / code > isn ' t returned by < code > DetectDocumentText < / code > and * < code > GetDocumentTextDetection < / code > . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setEntityTypes ( java . util . Collection ) } or { @ link # withEntityTypes ( java . util . Collection ) } if you want to * override the existing values . * @ param entityTypes * The type of entity . The following can be returned : < / p > * < ul > * < li > * < i > KEY < / i > - An identifier for a field on the document . * < / li > * < li > * < i > VALUE < / i > - The field text . * < / li > * < / ul > * < code > EntityTypes < / code > isn ' t returned by < code > DetectDocumentText < / code > and * < code > GetDocumentTextDetection < / code > . * @ return Returns a reference to this object so that method calls can be chained together . * @ see EntityType */ public Block withEntityTypes ( String ... entityTypes ) { } }
if ( this . entityTypes == null ) { setEntityTypes ( new java . util . ArrayList < String > ( entityTypes . length ) ) ; } for ( String ele : entityTypes ) { this . entityTypes . add ( ele ) ; } return this ;
public class TaskStatus { /** * Update the status of the task . * @ param status updated status */ synchronized void statusUpdate ( TaskStatus status ) { } }
setProgress ( status . getProgress ( ) ) ; this . runState = status . getRunState ( ) ; this . stateString = status . getStateString ( ) ; this . nextRecordRange = status . getNextRecordRange ( ) ; setDiagnosticInfo ( status . getDiagnosticInfo ( ) ) ; if ( status . getStartTime ( ) > 0 ) { this . startTime = status . getStartTime ( ) ; } if ( status . getFinishTime ( ) > 0 ) { setFinishTime ( status . getFinishTime ( ) ) ; } this . phase = status . getPhase ( ) ; this . counters = status . getCounters ( ) ; this . outputSize = status . outputSize ;
public class TransliteratorParser { /** * Return true if the given rule looks like a pragma . * @ param pos offset to the first non - whitespace character * of the rule . * @ param limit pointer past the last character of the rule . */ static boolean resemblesPragma ( String rule , int pos , int limit ) { } }
// Must start with / use \ s / i return Utility . parsePattern ( rule , pos , limit , "use " , null ) >= 0 ;
public class ApplicationScoreService { /** * Process scores for each widget based on widget settings * @ param widgets List of widgets * @ param scoreCriteriaSettings Score Criteria Settings * @ return List of widget scores */ private List < ScoreWeight > processWidgetScores ( List < Widget > widgets , ScoreCriteriaSettings scoreCriteriaSettings ) { } }
List < ScoreWeight > scoreWeights = new ArrayList < > ( ) ; Map < String , ScoreComponentSettings > scoreParamSettingsMap = generateWidgetSettings ( scoreCriteriaSettings ) ; Set < String > widgetTypes = scoreParamSettingsMap . keySet ( ) ; if ( widgetTypes . isEmpty ( ) ) { return null ; } // For each widget calculate score for ( String widgetType : widgetTypes ) { ScoreComponentSettings scoreSettings = scoreParamSettingsMap . get ( widgetType ) ; WidgetScore widgetScore = getWidgetScoreByType ( widgetType ) ; ScoreWeight score = widgetScore . processWidgetScore ( getWidgetByName ( widgets , widgetType ) , scoreSettings ) ; LOGGER . info ( "Widget for type: " + widgetType + " score" + score ) ; if ( null != score ) { setWidgetAlert ( score , scoreCriteriaSettings . getComponentAlert ( ) ) ; scoreWeights . add ( score ) ; } } return scoreWeights ;
public class FacesConfigTypeImpl { /** * Returns all < code > navigation - rule < / code > elements * @ return list of < code > navigation - rule < / code > */ public List < FacesConfigNavigationRuleType < FacesConfigType < T > > > getAllNavigationRule ( ) { } }
List < FacesConfigNavigationRuleType < FacesConfigType < T > > > list = new ArrayList < FacesConfigNavigationRuleType < FacesConfigType < T > > > ( ) ; List < Node > nodeList = childNode . get ( "navigation-rule" ) ; for ( Node node : nodeList ) { FacesConfigNavigationRuleType < FacesConfigType < T > > type = new FacesConfigNavigationRuleTypeImpl < FacesConfigType < T > > ( this , "navigation-rule" , childNode , node ) ; list . add ( type ) ; } return list ;
public class CloudPlatform { /** * IoT Connector configuration */ @ Bean AMQPComponent amqp ( @ Value ( "${AMQP_SERVICE_HOST:localhost}" ) String amqpBrokerUrl , @ Value ( "${AMQP_SERVICE_PORT:5672}" ) int amqpBrokerPort ) throws MalformedURLException { } }
LOG . debug ( "About to create AMQP component {}:{}" , amqpBrokerUrl , amqpBrokerPort ) ; return amqp10Component ( "amqp://" + amqpBrokerUrl + ":" + amqpBrokerPort ) ;
public class SSHLauncher { public String logConfiguration ( ) { } }
final StringBuilder sb = new StringBuilder ( "SSHLauncher{" ) ; sb . append ( "host='" ) . append ( getHost ( ) ) . append ( '\'' ) ; sb . append ( ", port=" ) . append ( getPort ( ) ) ; sb . append ( ", credentialsId='" ) . append ( Util . fixNull ( credentialsId ) ) . append ( '\'' ) ; sb . append ( ", jvmOptions='" ) . append ( getJvmOptions ( ) ) . append ( '\'' ) ; sb . append ( ", javaPath='" ) . append ( Util . fixNull ( javaPath ) ) . append ( '\'' ) ; sb . append ( ", prefixStartSlaveCmd='" ) . append ( getPrefixStartSlaveCmd ( ) ) . append ( '\'' ) ; sb . append ( ", suffixStartSlaveCmd='" ) . append ( getSuffixStartSlaveCmd ( ) ) . append ( '\'' ) ; sb . append ( ", launchTimeoutSeconds=" ) . append ( getLaunchTimeoutSeconds ( ) ) ; sb . append ( ", maxNumRetries=" ) . append ( getMaxNumRetries ( ) ) ; sb . append ( ", retryWaitTime=" ) . append ( getRetryWaitTime ( ) ) ; sb . append ( ", sshHostKeyVerificationStrategy=" ) . append ( sshHostKeyVerificationStrategy != null ? sshHostKeyVerificationStrategy . getClass ( ) . getName ( ) : "None" ) ; sb . append ( ", tcpNoDelay=" ) . append ( getTcpNoDelay ( ) ) ; sb . append ( ", trackCredentials=" ) . append ( getTrackCredentials ( ) ) ; sb . append ( '}' ) ; return sb . toString ( ) ;
public class UnorderedCollection { /** * Swaps the contents of this { @ link UnorderedCollection } with another one storing the same elements . This operation * runs in constant time , by only swapping storage references . * @ param other * the { @ link UnorderedCollection } to swap contents with . */ public void swap ( UnorderedCollection < E > other ) { } }
int sizeTmp = size ; size = other . size ; other . size = sizeTmp ; storage . swap ( other . storage ) ;
public class SnowflakeAzureClient { /** * Handles exceptions thrown by Azure Storage * It will retry transient errors as defined by the Azure Client retry policy * It will re - create the client if the SAS token has expired , and re - try * @ param ex the exception to handle * @ param retryCount current number of retries , incremented by the caller before each call * @ param operation string that indicates the function / operation that was taking place , * when the exception was raised , for example " upload " * @ param connection the current SFSession object used by the client * @ param command the command attempted at the time of the exception * @ param azClient the current Snowflake Azure client object * @ throws SnowflakeSQLException exceptions not handled */ private static void handleAzureException ( Exception ex , int retryCount , String operation , SFSession connection , String command , SnowflakeAzureClient azClient ) throws SnowflakeSQLException { } }
// no need to retry if it is invalid key exception if ( ex . getCause ( ) instanceof InvalidKeyException ) { // Most likely cause is that the unlimited strength policy files are not installed // Log the error and throw a message that explains the cause SnowflakeFileTransferAgent . throwJCEMissingError ( operation , ex ) ; } if ( ( ( StorageException ) ex ) . getHttpStatusCode ( ) == 403 ) { // A 403 indicates that the SAS token has expired , // we need to refresh the Azure client with the new token SnowflakeFileTransferAgent . renewExpiredToken ( connection , command , azClient ) ; } if ( ex instanceof StorageException ) { StorageException se = ( StorageException ) ex ; // If we have exceeded the max number of retries , propagate the error if ( retryCount > azClient . getMaxRetries ( ) ) { throw new SnowflakeSQLException ( se , SqlState . SYSTEM_ERROR , ErrorCode . AZURE_SERVICE_ERROR . getMessageCode ( ) , operation , se . getErrorCode ( ) , se . getExtendedErrorInformation ( ) , se . getHttpStatusCode ( ) , se . getMessage ( ) ) ; } else { logger . debug ( "Encountered exception ({}) during {}, retry count: {}" , ex . getMessage ( ) , operation , retryCount ) ; logger . debug ( "Stack trace: " , ex ) ; // exponential backoff up to a limit int backoffInMillis = azClient . getRetryBackoffMin ( ) ; if ( retryCount > 1 ) { backoffInMillis <<= ( Math . min ( retryCount - 1 , azClient . getRetryBackoffMaxExponent ( ) ) ) ; } try { logger . debug ( "Sleep for {} milliseconds before retry" , backoffInMillis ) ; Thread . sleep ( backoffInMillis ) ; } catch ( InterruptedException ex1 ) { // ignore } if ( se . getHttpStatusCode ( ) == 403 ) { // A 403 indicates that the SAS token has expired , // we need to refresh the Azure client with the new token SnowflakeFileTransferAgent . renewExpiredToken ( connection , command , azClient ) ; } } } else { if ( ex instanceof InterruptedException || SnowflakeUtil . getRootCause ( ex ) instanceof SocketTimeoutException ) { if ( retryCount > azClient . getMaxRetries ( ) ) { throw new SnowflakeSQLException ( ex , SqlState . SYSTEM_ERROR , ErrorCode . IO_ERROR . getMessageCode ( ) , "Encountered exception during " + operation + ": " + ex . getMessage ( ) ) ; } else { logger . debug ( "Encountered exception ({}) during {}, retry count: {}" , ex . getMessage ( ) , operation , retryCount ) ; } } else { throw new SnowflakeSQLException ( ex , SqlState . SYSTEM_ERROR , ErrorCode . IO_ERROR . getMessageCode ( ) , "Encountered exception during " + operation + ": " + ex . getMessage ( ) ) ; } }
public class ByteArrayDiskQueue { /** * Creates a new disk - based queue of byte arrays using an existing file . * < p > Note that you have to supply the correct number of byte arrays contained in the dump file of * the underlying { @ link ByteDiskQueue } . Failure to do so will cause unpredictable behaviour . * @ param size the number of byte arrays contained in { @ code file } . * @ param file the file that will be used to dump the queue on disk . * @ param bufferSize the number of items in the circular buffer ( will be possibly decreased so to be a power of two ) . * @ param direct whether the { @ link ByteBuffer } used by this queue should be { @ linkplain ByteBuffer # allocateDirect ( int ) allocated directly } . * @ see ByteDiskQueue # createFromFile ( File , int , boolean ) */ public static ByteArrayDiskQueue createFromFile ( final long size , final File file , final int bufferSize , final boolean direct ) throws IOException { } }
final ByteArrayDiskQueue byteArrayDiskQueue = new ByteArrayDiskQueue ( ByteDiskQueue . createFromFile ( file , bufferSize , direct ) ) ; byteArrayDiskQueue . size = size ; return byteArrayDiskQueue ;
public class DeltaT { /** * Estimate Delta T for the given Calendar . This is based on Espenak and Meeus , " Five Millennium Canon of * Solar Eclipses : - 1999 to + 3000 " ( NASA / TP - 2006-214141 ) . * @ param forDate date and time * @ return estimated delta T value ( seconds ) */ public static double estimate ( final GregorianCalendar forDate ) { } }
final double year = decimalYear ( forDate ) ; final double deltaT ; if ( year < - 500 ) { double u = ( year - 1820 ) / 100 ; deltaT = - 20 + 32 * pow ( u , 2 ) ; } else if ( year < 500 ) { double u = year / 100 ; deltaT = 10583.6 - 1014.41 * u + 33.78311 * pow ( u , 2 ) - 5.952053 * pow ( u , 3 ) - 0.1798452 * pow ( u , 4 ) + 0.022174192 * pow ( u , 5 ) + 0.0090316521 * pow ( u , 6 ) ; } else if ( year < 1600 ) { double u = ( year - 1000 ) / 100 ; deltaT = 1574.2 - 556.01 * u + 71.23472 * pow ( u , 2 ) + 0.319781 * pow ( u , 3 ) - 0.8503463 * pow ( u , 4 ) - 0.005050998 * pow ( u , 5 ) + 0.0083572073 * pow ( u , 6 ) ; } else if ( year < 1700 ) { double t = year - 1600 ; deltaT = 120 - 0.9808 * t - 0.01532 * pow ( t , 2 ) + pow ( t , 3 ) / 7129 ; } else if ( year < 1800 ) { double t = year - 1700 ; deltaT = 8.83 + 0.1603 * t - 0.0059285 * pow ( t , 2 ) + 0.00013336 * pow ( t , 3 ) - pow ( t , 4 ) / 1174000 ; } else if ( year < 1860 ) { double t = year - 1800 ; deltaT = 13.72 - 0.332447 * t + 0.0068612 * pow ( t , 2 ) + 0.0041116 * pow ( t , 3 ) - 0.00037436 * pow ( t , 4 ) + 0.0000121272 * pow ( t , 5 ) - 0.0000001699 * pow ( t , 6 ) + 0.000000000875 * pow ( t , 7 ) ; } else if ( year < 1900 ) { double t = year - 1860 ; deltaT = 7.62 + 0.5737 * t - 0.251754 * pow ( t , 2 ) + 0.01680668 * pow ( t , 3 ) - 0.0004473624 * pow ( t , 4 ) + pow ( t , 5 ) / 233174 ; } else if ( year < 1920 ) { double t = year - 1900 ; deltaT = - 2.79 + 1.494119 * t - 0.0598939 * pow ( t , 2 ) + 0.0061966 * pow ( t , 3 ) - 0.000197 * pow ( t , 4 ) ; } else if ( year < 1941 ) { double t = year - 1920 ; deltaT = 21.20 + 0.84493 * t - 0.076100 * pow ( t , 2 ) + 0.0020936 * pow ( t , 3 ) ; } else if ( year < 1961 ) { double t = year - 1950 ; deltaT = 29.07 + 0.407 * t - pow ( t , 2 ) / 233 + pow ( t , 3 ) / 2547 ; } else if ( year < 1986 ) { double t = year - 1975 ; deltaT = 45.45 + 1.067 * t - pow ( t , 2 ) / 260 - pow ( t , 3 ) / 718 ; } else if ( year < 2005 ) { double t = year - 2000 ; deltaT = 63.86 + 0.3345 * t - 0.060374 * pow ( t , 2 ) + 0.0017275 * pow ( t , 3 ) + 0.000651814 * pow ( t , 4 ) + 0.00002373599 * pow ( t , 5 ) ; } else if ( year < 2050 ) { double t = year - 2000 ; deltaT = 62.92 + 0.32217 * t + 0.005589 * pow ( t , 2 ) ; } else if ( year < 2150 ) { deltaT = - 20 + 32 * pow ( ( ( year - 1820 ) / 100 ) , 2 ) - 0.5628 * ( 2150 - year ) ; } else { double u = ( year - 1820 ) / 100 ; deltaT = - 20 + 32 * pow ( u , 2 ) ; } return deltaT ;
public class MavenModelScannerPlugin { /** * Adds declared and managed dependencies to the given * { @ link MavenDependentDescriptor } . * @ param dependentDescriptor * The { @ link MavenDependentDescriptor } . * @ param model * The { @ link ModelBase } providing the dependencies . * @ param scannerContext * The scanner context . */ private void addDependencies ( MavenDependentDescriptor dependentDescriptor , Class < ? extends AbstractDependencyDescriptor > declaresDependencyType , Class < ? extends AbstractDependencyDescriptor > managesDependencyType , ModelBase model , ScannerContext scannerContext ) { } }
dependentDescriptor . getDeclaresDependencies ( ) . addAll ( getDependencies ( dependentDescriptor , model . getDependencies ( ) , declaresDependencyType , scannerContext ) ) ; dependentDescriptor . getManagesDependencies ( ) . addAll ( addManagedDependencies ( dependentDescriptor , model . getDependencyManagement ( ) , scannerContext , managesDependencyType ) ) ;
public class Tags { /** * Parses an optional metric and tags out of the given string , any of * which may be null . Requires at least one metric , tagk or tagv . * @ param metric A string of the form " metric " or " metric { tag = value , . . . } " * or even " { tag = value , . . . } " where the metric may be missing . * @ param tags The list to populate with parsed tag pairs * @ return The name of the metric if it exists , null otherwise * @ throws IllegalArgumentException if the metric is malformed . * @ since 2.1 */ public static String parseWithMetric ( final String metric , final List < Pair < String , String > > tags ) { } }
final int curly = metric . indexOf ( '{' ) ; if ( curly < 0 ) { if ( metric . isEmpty ( ) ) { throw new IllegalArgumentException ( "Metric string was empty" ) ; } return metric ; } final int len = metric . length ( ) ; if ( metric . charAt ( len - 1 ) != '}' ) { // " foo { " throw new IllegalArgumentException ( "Missing '}' at the end of: " + metric ) ; } else if ( curly == len - 2 ) { // " foo { } " if ( metric . charAt ( 0 ) == '{' ) { throw new IllegalArgumentException ( "Missing metric and tags: " + metric ) ; } return metric . substring ( 0 , len - 2 ) ; } // substring the tags out of " foo { a = b , . . . , x = y } " and parse them . for ( final String tag : splitString ( metric . substring ( curly + 1 , len - 1 ) , ',' ) ) { try { parse ( tags , tag ) ; } catch ( IllegalArgumentException e ) { throw new IllegalArgumentException ( "When parsing tag '" + tag + "': " + e . getMessage ( ) ) ; } } // Return the " foo " part of " foo { a = b , . . . , x = y } " if ( metric . charAt ( 0 ) == '{' ) { return null ; } return metric . substring ( 0 , curly ) ;
public class RpcWrapper { /** * Make the call to a specified IP address . * @ param request * The request to send . * @ param response * A response to hold the returned data . * @ param ipAddress * The IP address to use for communication . * @ throws RpcException */ public void callRpcNaked ( S request , T response , String ipAddress ) throws RpcException { } }
Xdr xdr = new Xdr ( _maximumRequestSize ) ; request . marshalling ( xdr ) ; response . unmarshalling ( callRpc ( ipAddress , xdr , request . isUsePrivilegedPort ( ) ) ) ;
public class RC4 { /** * 加密或解密指定值 , 调用此方法前需初始化密钥 * @ param msg 要加密或解密的消息 * @ return 加密或解密后的值 */ public byte [ ] crypt ( final byte [ ] msg ) { } }
final ReadLock readLock = this . lock . readLock ( ) ; readLock . lock ( ) ; byte [ ] code ; try { final int [ ] sbox = this . sbox . clone ( ) ; code = new byte [ msg . length ] ; int i = 0 ; int j = 0 ; for ( int n = 0 ; n < msg . length ; n ++ ) { i = ( i + 1 ) % SBOX_LENGTH ; j = ( j + sbox [ i ] ) % SBOX_LENGTH ; swap ( i , j , sbox ) ; int rand = sbox [ ( sbox [ i ] + sbox [ j ] ) % SBOX_LENGTH ] ; code [ n ] = ( byte ) ( rand ^ msg [ n ] ) ; } } finally { readLock . unlock ( ) ; } return code ;
public class VirtualMachineExtensionImagesInner { /** * Gets a virtual machine extension image . * @ param location The name of a supported Azure region . * @ param publisherName the String value * @ param type the String value * @ param version the String value * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the VirtualMachineExtensionImageInner object */ public Observable < ServiceResponse < VirtualMachineExtensionImageInner > > getWithServiceResponseAsync ( String location , String publisherName , String type , String version ) { } }
if ( location == null ) { throw new IllegalArgumentException ( "Parameter location is required and cannot be null." ) ; } if ( publisherName == null ) { throw new IllegalArgumentException ( "Parameter publisherName is required and cannot be null." ) ; } if ( type == null ) { throw new IllegalArgumentException ( "Parameter type is required and cannot be null." ) ; } if ( version == null ) { throw new IllegalArgumentException ( "Parameter version is required and cannot be null." ) ; } if ( this . client . subscriptionId ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.subscriptionId() is required and cannot be null." ) ; } if ( this . client . apiVersion ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.apiVersion() is required and cannot be null." ) ; } return service . get ( location , publisherName , type , version , this . client . subscriptionId ( ) , this . client . apiVersion ( ) , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < VirtualMachineExtensionImageInner > > > ( ) { @ Override public Observable < ServiceResponse < VirtualMachineExtensionImageInner > > call ( Response < ResponseBody > response ) { try { ServiceResponse < VirtualMachineExtensionImageInner > clientResponse = getDelegate ( response ) ; return Observable . just ( clientResponse ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ;
public class RegExHelper { /** * Convert an identifier to a programming language identifier by replacing all * non - word characters with an underscore . * @ param s * The string to convert . May be < code > null < / code > or empty . * @ param sReplacement * The replacement string to be used for all non - identifier characters . * May be empty but not be < code > null < / code > . * @ return The converted string or < code > null < / code > if the input string is * < code > null < / code > . Maybe an invalid identifier , if the replacement * is empty , and the identifier starts with an illegal character , or * if the replacement is empty and the source string only contains * invalid characters ! */ @ Nullable public static String getAsIdentifier ( @ Nullable final String s , @ Nonnull final String sReplacement ) { } }
ValueEnforcer . notNull ( sReplacement , "Replacement" ) ; if ( StringHelper . hasNoText ( s ) ) return s ; // replace all non - word characters with the replacement character // Important : replacement does not need to be quoted , because it is not // treated as a regular expression ! final String ret = stringReplacePattern ( "\\W" , s , sReplacement ) ; if ( ret . length ( ) == 0 ) return sReplacement ; if ( ! Character . isJavaIdentifierStart ( ret . charAt ( 0 ) ) ) return sReplacement + ret ; return ret ;
public class AbstractVFState { /** * { @ inheritDoc } */ @ Override final void remove ( int n , int m ) { } }
m1 [ n ] = m2 [ m ] = UNMAPPED ; size = size - 1 ; for ( int w : g1 [ n ] ) if ( t1 [ w ] > size ) t1 [ w ] = 0 ; for ( int w : g2 [ m ] ) if ( t2 [ w ] > size ) t2 [ w ] = 0 ;
public class EncodingImpl { /** * { @ inheritDoc } */ @ Override public Encoding addHeader ( String key , Header header ) { } }
if ( header == null ) { return this ; } if ( this . headers == null ) { this . headers = new HashMap < > ( ) ; } this . headers . put ( key , header ) ; return this ;
public class IPv4PacketImpl { /** * Algorithm adopted from RFC 1071 - Computing the Internet Checksum * @ return */ private int calculateChecksum ( ) { } }
long sum = 0 ; for ( int i = 0 ; i < this . headers . capacity ( ) - 1 ; i += 2 ) { if ( i != 10 ) { sum += this . headers . getUnsignedShort ( i ) ; } } while ( sum >> 16 != 0 ) { sum = ( sum & 0xffff ) + ( sum >> 16 ) ; } return ( int ) ~ sum & 0xFFFF ;
public class AmazonSimpleEmailServiceClient { /** * Returns the custom MAIL FROM attributes for a list of identities ( email addresses : domains ) . * This operation is throttled at one request per second and can only get custom MAIL FROM attributes for up to 100 * identities at a time . * @ param getIdentityMailFromDomainAttributesRequest * Represents a request to return the Amazon SES custom MAIL FROM attributes for a list of identities . For * information about using a custom MAIL FROM domain , see the < a * href = " http : / / docs . aws . amazon . com / ses / latest / DeveloperGuide / mail - from . html " > Amazon SES Developer Guide < / a > . * @ return Result of the GetIdentityMailFromDomainAttributes operation returned by the service . * @ sample AmazonSimpleEmailService . GetIdentityMailFromDomainAttributes * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / email - 2010-12-01 / GetIdentityMailFromDomainAttributes " * target = " _ top " > AWS API Documentation < / a > */ @ Override public GetIdentityMailFromDomainAttributesResult getIdentityMailFromDomainAttributes ( GetIdentityMailFromDomainAttributesRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeGetIdentityMailFromDomainAttributes ( request ) ;
public class StatusPanel { /** * Creates the default pane . */ @ Override public void afterInitialized ( BaseComponent comp ) { } }
super . afterInitialized ( comp ) ; createLabel ( "default" ) ; getEventManager ( ) . subscribe ( EventUtil . STATUS_EVENT , this ) ;
public class JAXBUtils { /** * Converts an XML name to a Java identifier according to the mapping * algorithm outlined in the JAXB specification * @ param name the XML name * @ return the Java identifier */ public static String nameToIdentifier ( String name , IdentifierType type ) { } }
if ( null == name || name . length ( ) == 0 ) { return name ; } // algorithm will not change an XML name that is already a legal and // conventional ( ! ) Java class , method , or constant identifier boolean legalIdentifier = false ; StringBuilder buf = new StringBuilder ( name ) ; boolean hasUnderscore = false ; legalIdentifier = Character . isJavaIdentifierStart ( buf . charAt ( 0 ) ) ; for ( int i = 1 ; i < name . length ( ) && legalIdentifier ; i ++ ) { legalIdentifier &= Character . isJavaIdentifierPart ( buf . charAt ( i ) ) ; hasUnderscore |= '_' == buf . charAt ( i ) ; } boolean conventionalIdentifier = isConventionalIdentifier ( buf , type ) ; if ( legalIdentifier && conventionalIdentifier ) { if ( JAXBUtils . isJavaKeyword ( name ) && type == IdentifierType . VARIABLE ) { name = normalizePackageNamePart ( name ) ; } if ( ! hasUnderscore || IdentifierType . CLASS != type ) { return name ; } } // split into words List < String > words = new ArrayList < > ( ) ; StringTokenizer st = new StringTokenizer ( name , XML_NAME_PUNCTUATION_STRING ) ; while ( st . hasMoreTokens ( ) ) { words . add ( st . nextToken ( ) ) ; } for ( int i = 0 ; i < words . size ( ) ; i ++ ) { splitWord ( words , i ) ; } return makeConventionalIdentifier ( words , type ) ;
public class StoredServerChannel { /** * Gets the canonical { @ link PaymentChannelServerState } object for this channel , either by returning an existing one * or by creating a new one . * @ param wallet The wallet which holds the { @ link PaymentChannelServerState } in which this is saved and which will * be used to complete transactions * @ param broadcaster The { @ link TransactionBroadcaster } which will be used to broadcast contract / payment transactions . */ public synchronized PaymentChannelServerState getOrCreateState ( Wallet wallet , TransactionBroadcaster broadcaster ) throws VerificationException { } }
if ( state == null ) { switch ( majorVersion ) { case 1 : state = new PaymentChannelV1ServerState ( this , wallet , broadcaster ) ; break ; case 2 : state = new PaymentChannelV2ServerState ( this , wallet , broadcaster ) ; break ; default : throw new IllegalStateException ( "Invalid version number found" ) ; } } checkArgument ( wallet == state . wallet ) ; return state ;
public class SCMDescriptor { /** * Returns the list of { @ link RepositoryBrowser } { @ link Descriptor } * that can be used with this SCM . * @ return * can be empty but never null . */ public List < Descriptor < RepositoryBrowser < ? > > > getBrowserDescriptors ( ) { } }
if ( repositoryBrowser == null ) return Collections . emptyList ( ) ; return RepositoryBrowsers . filter ( repositoryBrowser ) ;
public class JSONArray { /** * Get the object value associated with an index . * @ param index The index must be between 0 and length ( ) - 1. * @ return An object value . * @ throws JSONException If there is no value for the index . */ public Object get ( int index ) { } }
Object o = opt ( index ) ; if ( o == null ) { throw new RuntimeException ( new JSONException ( "JSONArray[" + index + "] not found." ) ) ; } return o ;
public class Logging { /** * Given to TANGO logging level , converts it to lo4j level */ public Level tango_to_log4j_level ( String level ) { } }
level = level . toUpperCase ( ) ; if ( level . equals ( LOGGING_LEVELS [ LOGGING_OFF ] ) ) { return Level . OFF ; } if ( level . equals ( LOGGING_LEVELS [ LOGGING_FATAL ] ) ) { return Level . FATAL ; } if ( level . equals ( LOGGING_LEVELS [ LOGGING_ERROR ] ) ) { return Level . ERROR ; } if ( level . equals ( LOGGING_LEVELS [ LOGGING_INFO ] ) ) { return Level . INFO ; } if ( level . equals ( LOGGING_LEVELS [ LOGGING_DEBUG ] ) ) { return Level . DEBUG ; } return Level . WARN ;
public class AbstractResultSetWrapper { /** * { @ inheritDoc } * @ see java . sql . ResultSet # updateArray ( java . lang . String , java . sql . Array ) */ @ Override public void updateArray ( final String columnLabel , final Array x ) throws SQLException { } }
wrapped . updateArray ( columnLabel , x ) ;
public class WsMessagePullParser { /** * Returns the next binary message received on this connection . This method * will block till a binar message is received . Any text messages that may * arrive will be ignored . A null is returned when the connection is closed . * An IOException is thrown if the connection has not been established * before invoking this method . * @ return ByteBuffer the payload of the binary message * @ throws IOException if the connection has not been established */ public ByteBuffer nextBinary ( ) throws IOException { } }
WebSocketMessageType msgType = null ; while ( ( msgType = _messageReader . next ( ) ) != WebSocketMessageType . EOS ) { if ( msgType == WebSocketMessageType . BINARY ) { return _messageReader . getBinary ( ) ; } } return null ;
public class IOUtils { /** * Convert either a file or jar url into a local canonical file , or null if the file is a different scheme . * @ param fileURL the url to resolve to a canonical file . * @ return null if given URL is null , not using the jar scheme , or not using the file scheme . Otherwise , returns the * String path to the local canonical file . * @ throws URISyntaxException If the given URL cannot be transformed into a URI * @ throws IOException If the jar cannot be read or if the canonical file cannot be determined */ public static String toCanonicalFilePath ( URL fileURL ) throws URISyntaxException , IOException { } }
if ( fileURL == null ) { return null ; } // Only handle jar : and file : schemes if ( ! "jar" . equals ( fileURL . getProtocol ( ) ) && ! "file" . equals ( fileURL . getProtocol ( ) ) ) { return null ; } // Parse the jar file location from the jar url . Doesn ' t open any resources . if ( "jar" . equals ( fileURL . getProtocol ( ) ) ) { JarURLConnection jarURLConnection = ( JarURLConnection ) fileURL . openConnection ( ) ; fileURL = jarURLConnection . getJarFileURL ( ) ; } URI fileURI = fileURL . toURI ( ) ; File file = new File ( fileURI ) ; // Use filesystem to resolve any sym links or dots in the path to // a singular unique file path File canonicalFile = file . getCanonicalFile ( ) ; return canonicalFile . toURI ( ) . toString ( ) ;
public class Postcard { public void pushLogging ( String key , Object value ) { } }
assertArgumentNotNull ( "key" , key ) ; assertArgumentNotNull ( "value" , value ) ; if ( pushedLoggingMap == null ) { pushedLoggingMap = new LinkedHashMap < String , Object > ( 4 ) ; } pushedLoggingMap . put ( key , value ) ;
public class CassandraDefs { /** * Create a SlicePredicate that selects a single column . * @ param colName Column name as a byte [ ] . * @ return SlicePredicate that select the given column name only . */ static SlicePredicate slicePredicateColName ( byte [ ] colName ) { } }
SlicePredicate slicePred = new SlicePredicate ( ) ; slicePred . addToColumn_names ( ByteBuffer . wrap ( colName ) ) ; return slicePred ;
public class SubtitleChatOverlay { /** * Update the chat glyphs in the specified list to be set to the current dimmed setting . */ protected void updateDimmed ( List < ? extends ChatGlyph > glyphs ) { } }
for ( ChatGlyph glyph : glyphs ) { glyph . setDim ( _dimmed ) ; }
public class TraceEventHelper { /** * Is end state * @ param te The event * @ return The value */ public static boolean isEndState ( TraceEvent te ) { } }
if ( te . getType ( ) == TraceEvent . RETURN_CONNECTION_LISTENER || te . getType ( ) == TraceEvent . RETURN_CONNECTION_LISTENER_WITH_KILL || te . getType ( ) == TraceEvent . RETURN_INTERLEAVING_CONNECTION_LISTENER || te . getType ( ) == TraceEvent . RETURN_INTERLEAVING_CONNECTION_LISTENER_WITH_KILL || te . getType ( ) == TraceEvent . CLEAR_CONNECTION_LISTENER ) return true ; return false ;
public class HealthCheckRegistry { /** * Runs the registered health checks in parallel and returns a map of the results . * @ param executor object to launch and track health checks progress * @ return a map of the health check results */ public SortedMap < String , HealthCheck . Result > runHealthChecks ( ExecutorService executor ) { } }
return runHealthChecks ( executor , HealthCheckFilter . ALL ) ;
public class MultiFormatParser { /** * / * [ deutsch ] * < p > Erzeugt einen neuen Multiformatinterpretierer . < / p > * @ param < T > generic type of chronological entity * @ param formats array of multiple formats * @ return new immutable instance of MultiFormatParser * @ since 3.14/4.11 */ @ SafeVarargs public static < T extends ChronoEntity < T > > MultiFormatParser < T > of ( ChronoFormatter < T > ... formats ) { } }
ChronoFormatter < T > [ ] parsers = Arrays . copyOf ( formats , formats . length ) ; return new MultiFormatParser < > ( parsers ) ;
public class Sftp { /** * 获取远程文件 * @ param src 远程文件路径 * @ param dest 目标文件路径 * @ return this */ public Sftp get ( String src , String dest ) { } }
try { channel . get ( src , dest ) ; } catch ( SftpException e ) { throw new JschRuntimeException ( e ) ; } return this ;
public class BloodhoundDatum { /** * Utility method to get all tokens from a single value * @ param sValue * The value to use . May not be < code > null < / code > . * @ return An array of tokens to use . Never < code > null < / code > . */ @ Nonnull public static String [ ] getTokensFromValue ( @ Nonnull final String sValue ) { } }
return RegExHelper . getSplitToArray ( StringHelper . trim ( sValue ) , "\\W+" ) ;
public class OComparatorFactory { /** * Returns { @ link Comparator } instance if applicable one exist or < code > null < / code > otherwise . * @ param clazz * Class of object that is going to be compared . * @ param < T > * Class of object that is going to be compared . * @ return { @ link Comparator } instance if applicable one exist or < code > null < / code > otherwise . */ @ SuppressWarnings ( "unchecked" ) public < T > Comparator < T > getComparator ( Class < T > clazz ) { } }
if ( clazz . equals ( byte [ ] . class ) ) { if ( unsafeWasDetected ) return ( Comparator < T > ) OUnsafeByteArrayComparator . INSTANCE ; return ( Comparator < T > ) OByteArrayComparator . INSTANCE ; } return null ;
public class TokenFilter { /** * Given an arbitrary map containing String values , replace each non - null * value with the corresponding filtered value . * @ param map * The map whose values should be filtered . */ public void filterValues ( Map < ? , String > map ) { } }
// For each map entry for ( Map . Entry < ? , String > entry : map . entrySet ( ) ) { // If value is non - null , filter value through this TokenFilter String value = entry . getValue ( ) ; if ( value != null ) entry . setValue ( filter ( value ) ) ; }
public class TapeArchiveMarshaller { /** * Marshall the given parameter object . */ public void marshall ( TapeArchive tapeArchive , ProtocolMarshaller protocolMarshaller ) { } }
if ( tapeArchive == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( tapeArchive . getTapeARN ( ) , TAPEARN_BINDING ) ; protocolMarshaller . marshall ( tapeArchive . getTapeBarcode ( ) , TAPEBARCODE_BINDING ) ; protocolMarshaller . marshall ( tapeArchive . getTapeCreatedDate ( ) , TAPECREATEDDATE_BINDING ) ; protocolMarshaller . marshall ( tapeArchive . getTapeSizeInBytes ( ) , TAPESIZEINBYTES_BINDING ) ; protocolMarshaller . marshall ( tapeArchive . getCompletionTime ( ) , COMPLETIONTIME_BINDING ) ; protocolMarshaller . marshall ( tapeArchive . getRetrievedTo ( ) , RETRIEVEDTO_BINDING ) ; protocolMarshaller . marshall ( tapeArchive . getTapeStatus ( ) , TAPESTATUS_BINDING ) ; protocolMarshaller . marshall ( tapeArchive . getTapeUsedInBytes ( ) , TAPEUSEDINBYTES_BINDING ) ; protocolMarshaller . marshall ( tapeArchive . getKMSKey ( ) , KMSKEY_BINDING ) ; protocolMarshaller . marshall ( tapeArchive . getPoolId ( ) , POOLID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class Contextualizer { /** * Contextualizes operation with contexts given by { @ link OperationalContext } which is given by provided * { @ link OperationalContextRetriver } * @ param retriever the context retriever * @ param instance the instance to wrap ( must comply with given interface ) * @ param interfaze the interface of return object * @ param contextPropagatingInterfaces when a return type of any invocation is one of these interfaces , the given result will be call contextually as well */ @ SuppressWarnings ( "unchecked" ) public static < T > T contextualize ( final OperationalContextRetriver retriver , final T instance , Class < ? > interfaze , final Class < ? > ... contextPropagatingInterfaces ) { } }
return ( T ) Proxy . newProxyInstance ( instance . getClass ( ) . getClassLoader ( ) , new Class < ? > [ ] { interfaze } , new InvocationHandler ( ) { public Object invoke ( Object proxy , Method method , Object [ ] args ) throws Throwable { OperationalContext context = retriver . retrieve ( ) ; context . activate ( ) ; try { Object result = method . invoke ( instance , args ) ; Class < ? > type = method . getReturnType ( ) ; if ( result != null && type != null && type . isInterface ( ) && Arrays . asList ( contextPropagatingInterfaces ) . contains ( type ) ) { return contextualize ( retriver , result , type , contextPropagatingInterfaces ) ; } else { return result ; } } catch ( InvocationTargetException e ) { throw e . getTargetException ( ) ; } finally { context . deactivate ( ) ; } } } ) ;
public class EvaluatorSetupHelper { /** * Assembles the configuration for an Evaluator . * @ param resourceLaunchEvent * @ return * @ throws IOException */ private Configuration makeEvaluatorConfiguration ( final ResourceLaunchEvent resourceLaunchEvent ) throws IOException { } }
return Tang . Factory . getTang ( ) . newConfigurationBuilder ( resourceLaunchEvent . getEvaluatorConf ( ) ) . build ( ) ;
public class GLINERGImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public Object eGet ( int featureID , boolean resolve , boolean coreType ) { } }
switch ( featureID ) { case AfplibPackage . GLINERG__XPOS : return getXPOS ( ) ; case AfplibPackage . GLINERG__YPOS : return getYPOS ( ) ; } return super . eGet ( featureID , resolve , coreType ) ;
public class BackupManagerImpl { /** * { @ inheritDoc } */ public void restoreExistingWorkspace ( File workspaceBackupSetDir , boolean asynchronous ) throws BackupOperationException , BackupConfigurationException { } }
File [ ] cfs = PrivilegedFileHelper . listFiles ( workspaceBackupSetDir , new BackupLogsFilter ( ) ) ; if ( cfs . length == 0 ) { throw new BackupConfigurationException ( "Can not found workspace backup log in directory : " + workspaceBackupSetDir . getPath ( ) ) ; } if ( cfs . length > 1 ) { throw new BackupConfigurationException ( "Backup set directory should contains only one workspace backup log : " + workspaceBackupSetDir . getPath ( ) ) ; } BackupChainLog backupChainLog = new BackupChainLog ( cfs [ 0 ] ) ; this . restoreExistingWorkspace ( backupChainLog , backupChainLog . getBackupConfig ( ) . getRepository ( ) , backupChainLog . getOriginalWorkspaceEntry ( ) , asynchronous ) ;
public class DeleteProcedureDescriptor { /** * @ see XmlCapable # toXML ( ) */ public String toXML ( ) { } }
RepositoryTags tags = RepositoryTags . getInstance ( ) ; String eol = System . getProperty ( "line.separator" ) ; // The result StringBuffer result = new StringBuffer ( 1024 ) ; result . append ( eol ) ; result . append ( " " ) ; // Opening tag and attributes result . append ( " " ) ; result . append ( tags . getOpeningTagNonClosingById ( DELETE_PROCEDURE ) ) ; result . append ( " " ) ; result . append ( tags . getAttribute ( NAME , this . getName ( ) ) ) ; if ( this . hasReturnValue ( ) ) { result . append ( " " ) ; result . append ( tags . getAttribute ( RETURN_FIELD_REF , this . getReturnValueFieldRefName ( ) ) ) ; } result . append ( " " ) ; result . append ( tags . getAttribute ( INCLUDE_PK_FIELDS_ONLY , String . valueOf ( this . getIncludePkFieldsOnly ( ) ) ) ) ; result . append ( ">" ) ; result . append ( eol ) ; // Write all arguments only if we ' re not including all fields . if ( ! this . getIncludePkFieldsOnly ( ) ) { Iterator args = this . getArguments ( ) . iterator ( ) ; while ( args . hasNext ( ) ) { result . append ( ( ( ArgumentDescriptor ) args . next ( ) ) . toXML ( ) ) ; } } // Closing tag result . append ( " " ) ; result . append ( tags . getClosingTagById ( DELETE_PROCEDURE ) ) ; result . append ( eol ) ; return result . toString ( ) ;
public class JsonObject { /** * Returns the value mapped by { @ code name } if it exists and is a { @ code * JsonObject } , or null otherwise . */ public JsonObject optJsonObject ( String name ) { } }
JsonElement el = null ; try { el = get ( name ) ; } catch ( JsonException e ) { return null ; } if ( ! el . isJsonObject ( ) ) { return null ; } return el . asJsonObject ( ) ;
public class MessageProcessorControl { /** * / * ( non - Javadoc ) * @ see com . ibm . ws . sib . processor . runtime . MessageProcessorControllable # getVirtualLinkByID ( java . lang . String ) */ public SIMPVirtualLinkControllable getVirtualLinkByName ( String name ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "getVirtualLinkByName" , new Object [ ] { name } ) ; LinkHandler link = destinationManager . getLink ( name ) ; // See if the controllable is in the preReconstituted cache VirtualLinkControl control = ( VirtualLinkControl ) links . get ( name ) ; if ( link != null ) { VirtualLinkControl linkControl = ( VirtualLinkControl ) link . getControlAdapter ( ) ; if ( control != null ) { // Merge the controllables linkControl . merge ( control ) ; links . remove ( control ) ; } control = linkControl ; } else if ( control == null ) { // If not in the linkIndex and not in the cache then add a new one to the cache control = new VirtualLinkControl ( messageProcessor ) ; links . put ( name , control ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "getVirtualLinkByName" , control ) ; return control ;
public class BsonObjectTraversingParser { /** * / * Closeable implementation */ @ Override public void close ( ) throws IOException { } }
if ( ! closed ) { closed = true ; nodeCursor = null ; _currToken = null ; }
public class DFSOutputStream { /** * All data is written out to datanodes . It is not guaranteed * that data has been flushed to persistent store on the * datanode . Block allocations are persisted on namenode . */ public void sync ( ) throws IOException { } }
long start = System . currentTimeMillis ( ) ; try { long toWaitFor ; synchronized ( this ) { eventStartSync ( ) ; /* Record current blockOffset . This might be changed inside * flushBuffer ( ) where a partial checksum chunk might be flushed . * After the flush , reset the bytesCurBlock back to its previous value , * any partial checksum chunk will be sent now and in next packet . */ long saveOffset = bytesCurBlock ; DFSOutputStreamPacket oldCurrentPacket = currentPacket ; // flush checksum buffer as an incomplete chunk flushBuffer ( false , shouldKeepPartialChunkData ( ) ) ; // bytesCurBlock potentially incremented if there was buffered data eventSyncStartWaitAck ( ) ; if ( DFSClient . LOG . isDebugEnabled ( ) ) { DFSClient . LOG . debug ( "DFSClient flush() : bytesCurBlock " + bytesCurBlock + " lastFlushOffset " + lastFlushOffset ) ; } // Flush only if we haven ' t already flushed till this offset . if ( lastFlushOffset != bytesCurBlock ) { assert bytesCurBlock > lastFlushOffset ; // record the valid offset of this flush lastFlushOffset = bytesCurBlock ; enqueueCurrentPacket ( ) ; } else { // just discard the current packet since it is already been sent . if ( oldCurrentPacket == null && currentPacket != null ) { // If we didn ' t previously have a packet queued , and now we do , // but we don ' t plan on sending it , then we should not // skip a sequence number for it ! currentSeqno -- ; } currentPacket = null ; } if ( shouldKeepPartialChunkData ( ) ) { // Restore state of stream . Record the last flush offset // of the last full chunk that was flushed . bytesCurBlock = saveOffset ; } toWaitFor = lastQueuedSeqno ; } waitForAckedSeqno ( toWaitFor ) ; eventSyncPktAcked ( ) ; // If any new blocks were allocated since the last flush , // then persist block locations on namenode . boolean willPersist ; synchronized ( this ) { willPersist = persistBlocks ; persistBlocks = false ; } if ( willPersist ) { dfsClient . namenode . fsync ( src , dfsClient . clientName ) ; } long timeval = System . currentTimeMillis ( ) - start ; dfsClient . metrics . incSyncTime ( timeval ) ; eventEndSync ( ) ; } catch ( IOException e ) { lastException = new IOException ( "IOException flush:" , e ) ; closed = true ; closeThreads ( ) ; throw e ; }
public class ChannelUpdateHandler { /** * Handles a server channel update . * @ param jsonChannel The channel data . */ private void handleServerChannel ( JsonNode jsonChannel ) { } }
long channelId = jsonChannel . get ( "id" ) . asLong ( ) ; long guildId = jsonChannel . get ( "guild_id" ) . asLong ( ) ; ServerImpl server = api . getPossiblyUnreadyServerById ( guildId ) . map ( ServerImpl . class :: cast ) . orElse ( null ) ; if ( server == null ) { return ; } ServerChannelImpl channel = server . getChannelById ( channelId ) . map ( ServerChannelImpl . class :: cast ) . orElse ( null ) ; if ( channel == null ) { return ; } String oldName = channel . getName ( ) ; String newName = jsonChannel . get ( "name" ) . asText ( ) ; if ( ! Objects . deepEquals ( oldName , newName ) ) { channel . setName ( newName ) ; ServerChannelChangeNameEvent event = new ServerChannelChangeNameEventImpl ( channel , newName , oldName ) ; if ( server . isReady ( ) ) { api . getEventDispatcher ( ) . dispatchServerChannelChangeNameEvent ( ( DispatchQueueSelector ) channel . getServer ( ) , channel . getServer ( ) , channel , event ) ; } } final AtomicBoolean areYouAffected = new AtomicBoolean ( false ) ; ChannelCategory oldCategory = channel . asCategorizable ( ) . flatMap ( Categorizable :: getCategory ) . orElse ( null ) ; ChannelCategory newCategory = jsonChannel . hasNonNull ( "parent_id" ) ? channel . getServer ( ) . getChannelCategoryById ( jsonChannel . get ( "parent_id" ) . asLong ( - 1 ) ) . orElse ( null ) : null ; int oldRawPosition = channel . getRawPosition ( ) ; int newRawPosition = jsonChannel . get ( "position" ) . asInt ( ) ; if ( oldRawPosition != newRawPosition || ! Objects . deepEquals ( oldCategory , newCategory ) ) { int oldPosition = channel . getPosition ( ) ; if ( channel instanceof ServerTextChannelImpl ) { ( ( ServerTextChannelImpl ) channel ) . setParentId ( newCategory == null ? - 1 : newCategory . getId ( ) ) ; } else if ( channel instanceof ServerVoiceChannelImpl ) { ( ( ServerVoiceChannelImpl ) channel ) . setParentId ( newCategory == null ? - 1 : newCategory . getId ( ) ) ; } channel . setPosition ( newRawPosition ) ; int newPosition = channel . getPosition ( ) ; ServerChannelChangePositionEvent event = new ServerChannelChangePositionEventImpl ( channel , newPosition , oldPosition , newRawPosition , oldRawPosition , newCategory , oldCategory ) ; if ( server . isReady ( ) ) { api . getEventDispatcher ( ) . dispatchServerChannelChangePositionEvent ( ( DispatchQueueSelector ) channel . getServer ( ) , channel . getServer ( ) , channel , event ) ; } } Collection < Long > rolesWithOverwrittenPermissions = new HashSet < > ( ) ; Collection < Long > usersWithOverwrittenPermissions = new HashSet < > ( ) ; if ( jsonChannel . has ( "permission_overwrites" ) && ! jsonChannel . get ( "permission_overwrites" ) . isNull ( ) ) { for ( JsonNode permissionOverwriteJson : jsonChannel . get ( "permission_overwrites" ) ) { Permissions oldOverwrittenPermissions ; DiscordEntity entity ; ConcurrentHashMap < Long , Permissions > overwrittenPermissions = null ; switch ( permissionOverwriteJson . get ( "type" ) . asText ( ) ) { case "role" : entity = server . getRoleById ( permissionOverwriteJson . get ( "id" ) . asText ( ) ) . orElseThrow ( ( ) -> new IllegalStateException ( "Received channel update event with unknown role!" ) ) ; oldOverwrittenPermissions = channel . getOverwrittenPermissions ( ( Role ) entity ) ; overwrittenPermissions = channel . getInternalOverwrittenRolePermissions ( ) ; rolesWithOverwrittenPermissions . add ( entity . getId ( ) ) ; break ; case "member" : entity = api . getCachedUserById ( permissionOverwriteJson . get ( "id" ) . asText ( ) ) . orElseThrow ( ( ) -> new IllegalStateException ( "Received channel update event with unknown user!" ) ) ; oldOverwrittenPermissions = channel . getOverwrittenPermissions ( ( User ) entity ) ; overwrittenPermissions = channel . getInternalOverwrittenUserPermissions ( ) ; usersWithOverwrittenPermissions . add ( entity . getId ( ) ) ; break ; default : throw new IllegalStateException ( "Permission overwrite object with unknown type: " + permissionOverwriteJson . toString ( ) ) ; } int allow = permissionOverwriteJson . get ( "allow" ) . asInt ( 0 ) ; int deny = permissionOverwriteJson . get ( "deny" ) . asInt ( 0 ) ; Permissions newOverwrittenPermissions = new PermissionsImpl ( allow , deny ) ; if ( ! newOverwrittenPermissions . equals ( oldOverwrittenPermissions ) ) { overwrittenPermissions . put ( entity . getId ( ) , newOverwrittenPermissions ) ; if ( server . isReady ( ) ) { dispatchServerChannelChangeOverwrittenPermissionsEvent ( channel , newOverwrittenPermissions , oldOverwrittenPermissions , entity ) ; areYouAffected . compareAndSet ( false , entity instanceof User && ( ( User ) entity ) . isYourself ( ) ) ; areYouAffected . compareAndSet ( false , entity instanceof Role && ( ( Role ) entity ) . getUsers ( ) . stream ( ) . anyMatch ( User :: isYourself ) ) ; } } } } ConcurrentHashMap < Long , Permissions > overwrittenRolePermissions = null ; ConcurrentHashMap < Long , Permissions > overwrittenUserPermissions = null ; overwrittenRolePermissions = channel . getInternalOverwrittenRolePermissions ( ) ; overwrittenUserPermissions = channel . getInternalOverwrittenUserPermissions ( ) ; Iterator < Map . Entry < Long , Permissions > > userIt = overwrittenUserPermissions . entrySet ( ) . iterator ( ) ; while ( userIt . hasNext ( ) ) { Map . Entry < Long , Permissions > entry = userIt . next ( ) ; if ( usersWithOverwrittenPermissions . contains ( entry . getKey ( ) ) ) { continue ; } api . getCachedUserById ( entry . getKey ( ) ) . ifPresent ( user -> { Permissions oldPermissions = entry . getValue ( ) ; userIt . remove ( ) ; if ( server . isReady ( ) ) { dispatchServerChannelChangeOverwrittenPermissionsEvent ( channel , PermissionsImpl . EMPTY_PERMISSIONS , oldPermissions , user ) ; areYouAffected . compareAndSet ( false , user . isYourself ( ) ) ; } } ) ; } Iterator < Map . Entry < Long , Permissions > > roleIt = overwrittenRolePermissions . entrySet ( ) . iterator ( ) ; while ( roleIt . hasNext ( ) ) { Map . Entry < Long , Permissions > entry = roleIt . next ( ) ; if ( rolesWithOverwrittenPermissions . contains ( entry . getKey ( ) ) ) { continue ; } api . getRoleById ( entry . getKey ( ) ) . ifPresent ( role -> { Permissions oldPermissions = entry . getValue ( ) ; roleIt . remove ( ) ; if ( server . isReady ( ) ) { dispatchServerChannelChangeOverwrittenPermissionsEvent ( channel , PermissionsImpl . EMPTY_PERMISSIONS , oldPermissions , role ) ; areYouAffected . compareAndSet ( false , role . getUsers ( ) . stream ( ) . anyMatch ( User :: isYourself ) ) ; } } ) ; } if ( areYouAffected . get ( ) && ! channel . canYouSee ( ) ) { api . forEachCachedMessageWhere ( msg -> msg . getChannel ( ) . getId ( ) == channelId , msg -> api . removeMessageFromCache ( msg . getId ( ) ) ) ; }
public class BaseBuffer { /** * Constructor . * @ param data The physical data to initialize this buffer to ( optional ) . * @ param iFieldTypes The default field types to cache . */ public void init ( Object data , int iFieldsTypes ) { } }
if ( data == null ) this . clearBuffer ( ) ; else { m_iHeaderCount = 0 ; this . setPhysicalData ( data ) ; this . resetPosition ( ) ; } m_iFieldsTypes = iFieldsTypes ; // Default - assume all fields ( call bufferToFields ( xxx , false ) if not )
public class CompareRetinaApiImpl { /** * { @ inheritDoc } */ @ Override public Metric compare ( String jsonModel1 , Model model2 ) throws JsonProcessingException , ApiException { } }
validateRequiredModels ( model2 ) ; LOG . debug ( "Compare models: model1: " + jsonModel1 + " model: " + model2 . toJson ( ) ) ; return compareApi . compare ( "[ " + jsonModel1 + ", " + model2 . toJson ( ) + " ]" , this . retinaName ) ;
public class Engine { /** * Configures the engine with clones of the given operators . * @ param conjunction is the operator to process the propositions joined by * ` and ` in the antecedent of the rules * @ param disjunction is the operator to process the propositions joined by * ` or ` in the antecedent of the rules * @ param implication is the operator to modify the consequents of the rules * based on the activation degree of the antecedents of the rules * @ param aggregation is the operator to aggregate the resulting implications * of the rules * @ param defuzzifier is the operator to transform the aggregated implications * into a single scalar value * @ param activation is the activation method to activate and trigger the rule * blocks */ public void configure ( TNorm conjunction , SNorm disjunction , TNorm implication , SNorm aggregation , Defuzzifier defuzzifier , Activation activation ) { } }
try { for ( RuleBlock ruleblock : this . ruleBlocks ) { ruleblock . setConjunction ( conjunction == null ? null : conjunction . clone ( ) ) ; ruleblock . setDisjunction ( disjunction == null ? null : disjunction . clone ( ) ) ; ruleblock . setImplication ( implication == null ? null : implication . clone ( ) ) ; ruleblock . setActivation ( activation == null ? new General ( ) : activation . clone ( ) ) ; } for ( OutputVariable outputVariable : this . outputVariables ) { outputVariable . setDefuzzifier ( defuzzifier == null ? null : defuzzifier . clone ( ) ) ; outputVariable . setAggregation ( aggregation == null ? null : aggregation . clone ( ) ) ; } } catch ( Exception ex ) { throw new RuntimeException ( ex ) ; }
public class EDIImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ SuppressWarnings ( "unchecked" ) @ Override public void eSet ( int featureID , Object newValue ) { } }
switch ( featureID ) { case AfplibPackage . EDI__INDX_NAME : setIndxName ( ( String ) newValue ) ; return ; case AfplibPackage . EDI__TRIPLETS : getTriplets ( ) . clear ( ) ; getTriplets ( ) . addAll ( ( Collection < ? extends Triplet > ) newValue ) ; return ; } super . eSet ( featureID , newValue ) ;
public class EntityInfo { /** * 获取Entity的DELETE SQL * @ param bean Entity对象 * @ return String */ public String getDeletePrepareSQL ( T bean ) { } }
if ( this . tableStrategy == null ) return deletePrepareSQL ; return deletePrepareSQL . replace ( "${newtable}" , getTable ( bean ) ) ;
public class BookKeeperServiceRunner { /** * Resumes ZooKeeper ( if it had previously been suspended ) . * @ throws Exception If an exception got thrown . */ public void resumeZooKeeper ( ) throws Exception { } }
val zk = new ZooKeeperServiceRunner ( this . zkPort , this . secureZK , this . tLSKeyStore , this . tLSKeyStorePasswordPath , this . tlsTrustStore ) ; if ( this . zkServer . compareAndSet ( null , zk ) ) { // Initialize ZK runner ( since nobody else did it for us ) . zk . initialize ( ) ; log . info ( "ZooKeeper initialized." ) ; } else { zk . close ( ) ; } // Start or resume ZK . this . zkServer . get ( ) . start ( ) ; log . info ( "ZooKeeper resumed." ) ;