signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class ToStream { /** * Process the attributes , which means to write out the currently * collected attributes to the writer . The attributes are not * cleared by this method * @ param writer the writer to write processed attributes to . * @ param nAttrs the number of attributes in m _ attributes * to be processed * @ throws java . io . IOException * @ throws org . xml . sax . SAXException */ public void processAttributes ( java . io . Writer writer , int nAttrs ) throws IOException , SAXException { } }
/* real SAX attributes are not passed in , so process the * attributes that were collected after the startElement call . * _ attribVector is a " cheap " list for Stream serializer output * accumulated over a series of calls to attribute ( name , value ) */ String encoding = getEncoding ( ) ; for ( int i = 0 ; i < nAttrs ; i ++ ) { // elementAt is JDK 1.1.8 final String name = m_attributes . getQName ( i ) ; final String value = m_attributes . getValue ( i ) ; writer . write ( ' ' ) ; writer . write ( name ) ; writer . write ( "=\"" ) ; writeAttrString ( writer , value , encoding ) ; writer . write ( '\"' ) ; }
public class Workarounds { /** * Get generated , fixed cfg - files and push them to app - resources - list . */ public void applyWorkaround205 ( String appName , List < NativeLauncher > secondaryLaunchers , Map < String , Object > params ) { } }
// to workaround , we are gathering the fixed versions of the previous executed " app - bundler " // and assume they all are existing Set < File > filenameFixedConfigFiles = new HashSet < > ( ) ; // get cfg - file of main native launcher Path appPath = nativeOutputDir . toPath ( ) . resolve ( appName ) . resolve ( "app" ) . toAbsolutePath ( ) ; if ( appName . contains ( "." ) ) { String newConfigFileName = appName . substring ( 0 , appName . lastIndexOf ( "." ) ) ; File mainAppNameCfgFile = appPath . resolve ( newConfigFileName + CONFIG_FILE_EXTENSION ) . toFile ( ) ; if ( mainAppNameCfgFile . exists ( ) ) { getLogger ( ) . info ( "Found main native application configuration file (" + mainAppNameCfgFile . toString ( ) + ")." ) ; } filenameFixedConfigFiles . add ( mainAppNameCfgFile ) ; } // when having secondary native launchers , we need their cfg - files too Optional . ofNullable ( secondaryLaunchers ) . ifPresent ( launchers -> { launchers . stream ( ) . map ( launcher -> { return launcher . getAppName ( ) ; } ) . forEach ( secondaryLauncherAppName -> { if ( secondaryLauncherAppName . contains ( "." ) ) { String newSecondaryLauncherConfigFileName = secondaryLauncherAppName . substring ( 0 , secondaryLauncherAppName . lastIndexOf ( "." ) ) ; filenameFixedConfigFiles . add ( appPath . resolve ( newSecondaryLauncherConfigFileName + CONFIG_FILE_EXTENSION ) . toFile ( ) ) ; } } ) ; } ) ; if ( filenameFixedConfigFiles . isEmpty ( ) ) { // it wasn ' t required to apply this workaround getLogger ( ) . info ( "No workaround for native launcher issue 205 needed. Continuing." ) ; return ; } getLogger ( ) . info ( "Applying workaround for native launcher issue 205 by modifying application resources." ) ; // since 1.8.0_60 there exists some APP _ RESOURCES _ LIST , which contains multiple RelativeFileSet - instances // this is the more easy way ; ) List < RelativeFileSet > appResourcesList = new ArrayList < > ( ) ; RelativeFileSet appResources = StandardBundlerParam . APP_RESOURCES . fetchFrom ( params ) ; // original application resources appResourcesList . add ( appResources ) ; // additional filename - fixed cfg - files appResourcesList . add ( new RelativeFileSet ( appPath . toFile ( ) , filenameFixedConfigFiles ) ) ; // special workaround when having some jdk before update 60 if ( JavaDetectionTools . IS_JAVA_8 && ! JavaDetectionTools . isAtLeastOracleJavaUpdateVersion ( 60 ) ) { try { // pre - update60 did not contain any list of RelativeFileSets , which requires to rework APP _ RESOURCES : / Path tempResourcesDirectory = Files . createTempDirectory ( "jfxmp-workaround205-" ) . toAbsolutePath ( ) ; File tempResourcesDirAsFile = tempResourcesDirectory . toFile ( ) ; getLogger ( ) . info ( "Modifying application resources for native launcher issue 205 by copying into temporary folder (" + tempResourcesDirAsFile . toString ( ) + ")." ) ; for ( RelativeFileSet sources : appResourcesList ) { File baseDir = sources . getBaseDirectory ( ) ; for ( String fname : appResources . getIncludedFiles ( ) ) { IOUtils . copyFile ( new File ( baseDir , fname ) , new File ( tempResourcesDirAsFile , fname ) ) ; } } // might not work for gradle , but maven does not hold up any JVM ; ) // might rework this later into cleanup - phase tempResourcesDirAsFile . deleteOnExit ( ) ; // generate new RelativeFileSet with fixed cfg - file Set < File > fixedResourceFiles = new HashSet < > ( ) ; try ( Stream < Path > walkstream = Files . walk ( tempResourcesDirectory ) ) { walkstream . map ( p -> p . toFile ( ) ) . filter ( File :: isFile ) . filter ( File :: canRead ) . forEach ( f -> { getLogger ( ) . info ( String . format ( "Add %s file to application resources." , f ) ) ; fixedResourceFiles . add ( f ) ; } ) ; } catch ( IOException ignored ) { // NO - OP } params . put ( StandardBundlerParam . APP_RESOURCES . getID ( ) , new RelativeFileSet ( tempResourcesDirAsFile , fixedResourceFiles ) ) ; } catch ( IOException ex ) { getLogger ( ) . warn ( null , ex ) ; } return ; } /* * Backward - compatibility note : * When using JDK 1.8.0u51 on travis - ci it would results into " cannot find symbol : variable APP _ RESOURCES _ LIST " ! * To solve this , we are using some hard - coded map - key : / ( please no hacky workaround via reflections . . urgh ) */ params . put ( StandardBundlerParam . APP_RESOURCES . getID ( ) + "List" , appResourcesList ) ;
public class CassandraUtils { /** * Get tables of a keyspace . * @ param keyspace * @ return { @ code List < String > } */ public List < String > getTables ( String keyspace ) { } }
ArrayList < String > result = new ArrayList < String > ( ) ; this . metadata = this . cluster . getMetadata ( ) ; if ( ( ! existsKeyspace ( keyspace , false ) ) || ( this . metadata . getKeyspace ( keyspace ) . getTables ( ) . isEmpty ( ) ) ) { return result ; } for ( TableMetadata t : this . metadata . getKeyspace ( keyspace ) . getTables ( ) ) { result . add ( t . getName ( ) ) ; } return result ;
public class LangPropsService { /** * Gets a value with the specified key and locale . * @ param key the specified key * @ param locale the specified locale * @ return value */ public String get ( final String key , final Locale locale ) { } }
return get ( Keys . LANGUAGE , key , locale ) ;
public class RandomVariableLazyEvaluation { /** * / * ( non - Javadoc ) * @ see net . finmath . stochastic . RandomVariable # getStandardDeviation ( net . finmath . stochastic . RandomVariable ) */ @ Override public double getStandardDeviation ( RandomVariable probabilities ) { } }
if ( isDeterministic ( ) ) { return 0.0 ; } if ( size ( ) == 0 ) { return Double . NaN ; } return Math . sqrt ( getVariance ( probabilities ) ) ;
public class DeregistrationPanel { /** * { @ inheritDoc } */ @ Override public void onBeforeRender ( ) { } }
addOrReplace ( contentPanel = newContentPanel ( "contentPanel" ) ) ; addOrReplace ( form = newForm ( "form" , getModel ( ) ) ) ; form . addOrReplace ( motivation = newMotivation ( "motivation" , getModel ( ) ) ) ; // Create submit button for the form submitButton = newButton ( "submitButton" ) ; submitButton . addOrReplace ( buttonLabel = newButtonLabel ( "buttonLabel" , "sem.main.global.deregistration.user.label" , "Deregister" ) ) ; form . addOrReplace ( submitButton ) ; super . onBeforeRender ( ) ;
public class AttachedViewRecycler { /** * Sets the comparator , which allows to determine the order , which should be used to add views * to the parent . When setting a comparator , which is different from the current one , the * currently attached views are reordered . * @ param comparator * The comparator , which allows to determine the order , which should be used to add * views to the parent , as an instance of the type { @ link Comparator } or null , if the * views should be added in the order of their inflation */ public final void setComparator ( @ Nullable final Comparator < ItemType > comparator ) { } }
this . comparator = comparator ; if ( comparator != null ) { if ( items . size ( ) > 0 ) { List < ItemType > newItems = new ArrayList < > ( ) ; List < View > views = new ArrayList < > ( ) ; for ( int i = items . size ( ) - 1 ; i >= 0 ; i -- ) { ItemType item = items . get ( i ) ; int index = binarySearch ( newItems , item , comparator ) ; newItems . add ( index , item ) ; View view = parent . getChildAt ( i ) ; parent . removeViewAt ( i ) ; views . add ( index , view ) ; } parent . removeAllViews ( ) ; for ( View view : views ) { parent . addView ( view ) ; } this . items = newItems ; getLogger ( ) . logDebug ( getClass ( ) , "Comparator changed. Views have been reordered" ) ; } else { getLogger ( ) . logDebug ( getClass ( ) , "Comparator changed" ) ; } } else { getLogger ( ) . logDebug ( getClass ( ) , "Comparator set to null" ) ; }
public class CloseableIterators { /** * Creates a { @ link CloseableIterable } from a standard iterable , while closing the provided * closeable . * Intentionally left package private . */ static < T > CloseableIterator < T > wrap ( final Iterator < T > iterator , final AutoCloseable closeable ) { } }
return new CloseableIterator < T > ( ) { @ Override public void close ( ) { try { closeable . close ( ) ; } catch ( RuntimeException re ) { throw re ; } catch ( Exception e ) { throw new IllegalStateException ( e ) ; } } @ Override public boolean hasNext ( ) { return iterator . hasNext ( ) ; } @ Override public T next ( ) { return iterator . next ( ) ; } @ Override public void remove ( ) { iterator . remove ( ) ; } } ;
public class br_replicateconfig { /** * < pre > * Performs generic data validation for the operation to be performed * < / pre > */ protected void validate ( String operationType ) throws Exception { } }
super . validate ( operationType ) ; MPSIPAddress source_ip_address_validator = new MPSIPAddress ( ) ; source_ip_address_validator . validate ( operationType , source_ip_address , "\"source_ip_address\"" ) ; MPSIPAddress target_ip_address_arr_validator = new MPSIPAddress ( ) ; if ( target_ip_address_arr != null ) { for ( int i = 0 ; i < target_ip_address_arr . length ; i ++ ) { target_ip_address_arr_validator . validate ( operationType , target_ip_address_arr [ i ] , "target_ip_address_arr[" + i + "]" ) ; } }
public class PrimitiveUtils { /** * Read long . * @ param value the value * @ param defaultValue the default value * @ return the long */ public static Long readLong ( String value , Long defaultValue ) { } }
if ( ! StringUtils . hasText ( value ) ) return defaultValue ; return Long . valueOf ( value ) ;
public class Layer { /** * Renders this layer to { @ code surf } , including its children . */ public final void paint ( Surface surf ) { } }
if ( ! visible ( ) ) return ; int otint = surf . combineTint ( tint ) ; QuadBatch obatch = surf . pushBatch ( batch ) ; surf . concatenate ( transform ( ) , originX ( ) , originY ( ) ) ; try { paintImpl ( surf ) ; if ( DEBUG_RECTS ) { drawDebugRect ( surf ) ; } } finally { surf . popBatch ( obatch ) ; surf . setTint ( otint ) ; }
public class AbstractStreamMetadataStore { /** * List the streams in scope . * @ param scopeName Name of scope * @ return A map of streams in scope to their configs . */ @ Override public CompletableFuture < Map < String , StreamConfiguration > > listStreamsInScope ( final String scopeName ) { } }
return getScope ( scopeName ) . listStreamsInScope ( ) . thenCompose ( streams -> { HashMap < String , CompletableFuture < Optional < StreamConfiguration > > > result = new HashMap < > ( ) ; for ( String s : streams ) { Stream stream = getStream ( scopeName , s , null ) ; result . put ( stream . getName ( ) , Futures . exceptionallyExpecting ( stream . getConfiguration ( ) , e -> e instanceof StoreException . DataNotFoundException , null ) . thenApply ( Optional :: ofNullable ) ) ; } return Futures . allOfWithResults ( result ) . thenApply ( x -> { return x . entrySet ( ) . stream ( ) . filter ( y -> y . getValue ( ) . isPresent ( ) ) . collect ( Collectors . toMap ( Map . Entry :: getKey , entry -> entry . getValue ( ) . get ( ) ) ) ; } ) ; } ) ;
public class VoltCompiler { /** * Compile from DDL files ( only ) . * @ param ddlFilePaths input ddl files * @ return compiled catalog * @ throws VoltCompilerException */ public Catalog compileCatalogFromDDL ( final String ... ddlFilePaths ) throws VoltCompilerException { } }
InMemoryJarfile jarOutput = new InMemoryJarfile ( ) ; return compileCatalogInternal ( null , null , DDLPathsToReaderList ( ddlFilePaths ) , jarOutput ) ;
public class MapWithProtoValuesSubject { /** * Compares float fields with these explicitly specified top - level field numbers using the * provided absolute tolerance . * @ param tolerance A finite , non - negative tolerance . */ public MapWithProtoValuesFluentAssertion < M > usingFloatToleranceForFieldDescriptorsForValues ( float tolerance , Iterable < FieldDescriptor > fieldDescriptors ) { } }
return usingConfig ( config . usingFloatToleranceForFieldDescriptors ( tolerance , fieldDescriptors ) ) ;
public class NetworkWatchersInner { /** * Lists all available internet service providers for a specified Azure region . * @ param resourceGroupName The name of the network watcher resource group . * @ param networkWatcherName The name of the network watcher resource . * @ param parameters Parameters that scope the list of available providers . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable for the request */ public Observable < ServiceResponse < AvailableProvidersListInner > > listAvailableProvidersWithServiceResponseAsync ( String resourceGroupName , String networkWatcherName , AvailableProvidersListParameters parameters ) { } }
if ( resourceGroupName == null ) { throw new IllegalArgumentException ( "Parameter resourceGroupName is required and cannot be null." ) ; } if ( networkWatcherName == null ) { throw new IllegalArgumentException ( "Parameter networkWatcherName is required and cannot be null." ) ; } if ( this . client . subscriptionId ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.subscriptionId() is required and cannot be null." ) ; } if ( parameters == null ) { throw new IllegalArgumentException ( "Parameter parameters is required and cannot be null." ) ; } Validator . validate ( parameters ) ; final String apiVersion = "2018-06-01" ; Observable < Response < ResponseBody > > observable = service . listAvailableProviders ( resourceGroupName , networkWatcherName , this . client . subscriptionId ( ) , parameters , apiVersion , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) ; return client . getAzureClient ( ) . getPostOrDeleteResultAsync ( observable , new TypeToken < AvailableProvidersListInner > ( ) { } . getType ( ) ) ;
public class UpdateJobResult { /** * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setArtifactList ( java . util . Collection ) } or { @ link # withArtifactList ( java . util . Collection ) } if you want to * override the existing values . * @ param artifactList * @ return Returns a reference to this object so that method calls can be chained together . */ public UpdateJobResult withArtifactList ( Artifact ... artifactList ) { } }
if ( this . artifactList == null ) { setArtifactList ( new com . amazonaws . internal . SdkInternalList < Artifact > ( artifactList . length ) ) ; } for ( Artifact ele : artifactList ) { this . artifactList . add ( ele ) ; } return this ;
public class SourceMapResolver { /** * Based on https : / / developer . mozilla . org / en - US / docs / Web / HTTP / Basics _ of _ HTTP / Data _ URIs * @ param url * @ return String or null . */ @ Nullable private static String extractBase64String ( String url ) { } }
if ( url . startsWith ( BASE64_URL_PREFIX ) && url . contains ( BASE64_START ) ) { int base64StartIndex = url . indexOf ( BASE64_START ) ; String mediaType = url . substring ( BASE64_URL_PREFIX . length ( ) , base64StartIndex ) ; if ( ACCEPTED_MEDIA_TYPES . contains ( mediaType ) ) { byte [ ] data = BaseEncoding . base64 ( ) . decode ( url . substring ( base64StartIndex + BASE64_START . length ( ) ) ) ; return new String ( data , UTF_8 ) ; } } return null ;
public class Cifar10DataSetIterator { /** * Get the labels - either in " categories " ( imagenet synsets format , " n01910747 " or similar ) or human - readable format , * such as " jellyfish " * @ param categories If true : return category / synset format ; false : return " human readable " label format * @ return Labels */ public static List < String > getLabels ( boolean categories ) { } }
List < String > rawLabels = new Cifar10DataSetIterator ( 1 ) . getLabels ( ) ; if ( categories ) { return rawLabels ; } // Otherwise , convert to human - readable format , using ' words . txt ' file File baseDir = DL4JResources . getDirectory ( ResourceType . DATASET , Cifar10Fetcher . LOCAL_CACHE_NAME ) ; File labelFile = new File ( baseDir , Cifar10Fetcher . LABELS_FILENAME ) ; List < String > lines ; try { lines = FileUtils . readLines ( labelFile , StandardCharsets . UTF_8 ) ; } catch ( IOException e ) { throw new RuntimeException ( "Error reading label file" , e ) ; } Map < String , String > map = new HashMap < > ( ) ; for ( String line : lines ) { String [ ] split = line . split ( "\t" ) ; map . put ( split [ 0 ] , split [ 1 ] ) ; } List < String > outLabels = new ArrayList < > ( rawLabels . size ( ) ) ; for ( String s : rawLabels ) { String s2 = map . get ( s ) ; Preconditions . checkState ( s2 != null , "Label \"%s\" not found in labels.txt file" ) ; outLabels . add ( s2 ) ; } return outLabels ;
public class AbstractTable { /** * execution helpers */ protected long executeSelectCount ( PreparedStatement preparedStatement ) { } }
try ( ResultSet resultSet = preparedStatement . executeQuery ( ) ) { resultSet . next ( ) ; return resultSet . getLong ( 1 ) ; } catch ( SQLException x ) { throw new RuntimeException ( x . getMessage ( ) ) ; }
public class ScrollableScroller { /** * / * private */ int getSplineFlingDuration ( float velocity ) { } }
final double l = getSplineDeceleration ( velocity ) ; final double decelMinusOne = DECELERATION_RATE - 1.0 ; return ( int ) ( 1000.0 * Math . exp ( l / decelMinusOne ) ) ;
public class BaseXMLReader { /** * / * - - - - - [ Properties ] - - - - - */ protected boolean _setProperty ( String name , Object value ) throws SAXNotSupportedException { } }
if ( LEXICAL_HANDLER . equals ( name ) || LEXICAL_HANDLER_ALT . equals ( name ) ) { if ( value == null || value instanceof LexicalHandler ) { handler . setLexicalHandler ( ( LexicalHandler ) value ) ; return true ; } else throw new SAXNotSupportedException ( "value must implement " + LexicalHandler . class ) ; } else if ( DECL_HANDLER . equals ( name ) || DECL_HANDLER_ALT . equals ( name ) ) { if ( value == null || value instanceof DeclHandler ) { handler . setDeclHandler ( ( DeclHandler ) value ) ; return true ; } else throw new SAXNotSupportedException ( "value must implement " + DeclHandler . class ) ; } else return false ;
public class TempRecordPage { /** * Selection sort . The values of sort directions are defined in * { @ link RecordComparator } . * @ param sortFlds * the list of sorted fields * @ param sortDirs * the list of sorting directions */ public void sortbyselection ( List < String > sortFlds , List < Integer > sortDirs ) { } }
moveToId ( - 1 ) ; int i = 0 ; while ( super . next ( ) ) { int minId = findSmallestFrom ( i , sortFlds , sortDirs ) ; if ( minId != i ) { swapRecords ( i , minId ) ; } moveToId ( i ) ; i ++ ; }
public class AccountingChronology { /** * Obtains a Accounting zoned date - time from another date - time object . * @ param temporal the date - time object to convert , not null * @ return the Accounting zoned date - time , not null * @ throws DateTimeException if unable to create the date - time */ @ Override @ SuppressWarnings ( "unchecked" ) public ChronoZonedDateTime < AccountingDate > zonedDateTime ( TemporalAccessor temporal ) { } }
return ( ChronoZonedDateTime < AccountingDate > ) super . zonedDateTime ( temporal ) ;
public class Times { /** * Gets the week start time with the specified time . * @ param time the specified time * @ return week start time */ public static long getWeekStartTime ( final long time ) { } }
final Calendar start = Calendar . getInstance ( ) ; start . setFirstDayOfWeek ( Calendar . MONDAY ) ; start . setTimeInMillis ( time ) ; start . set ( Calendar . DAY_OF_WEEK , Calendar . MONDAY ) ; start . set ( Calendar . HOUR , 0 ) ; start . set ( Calendar . MINUTE , 0 ) ; start . set ( Calendar . SECOND , 0 ) ; start . set ( Calendar . MILLISECOND , 0 ) ; return start . getTimeInMillis ( ) ;
public class AbstractApplicationPage { /** * Closes this < code > ApplicationPage < / code > . This method calls { @ link # close ( PageComponent ) } for each open * < code > PageComponent < / code > . * @ return < code > true < / code > if the operation was successful , < code > false < / code > otherwise . */ public boolean close ( ) { } }
for ( Iterator < PageComponent > iter = new HashSet < PageComponent > ( pageComponents ) . iterator ( ) ; iter . hasNext ( ) ; ) { PageComponent component = iter . next ( ) ; if ( ! close ( component ) ) return false ; } return true ;
public class ScopInstallation { /** * / * ( non - Javadoc ) * @ see org . biojava . nbio . structure . scop . ScopDatabase # getByCategory ( org . biojava . nbio . structure . scop . ScopCategory ) */ @ Override public List < ScopDescription > getByCategory ( ScopCategory category ) { } }
try { ensureDesInstalled ( ) ; } catch ( IOException e ) { throw new ScopIOException ( e ) ; } List < ScopDescription > matches = new ArrayList < ScopDescription > ( ) ; for ( Integer i : sunidMap . keySet ( ) ) { ScopDescription sc = sunidMap . get ( i ) ; if ( sc . getCategory ( ) . equals ( category ) ) try { matches . add ( ( ScopDescription ) sc . clone ( ) ) ; } catch ( CloneNotSupportedException e ) { throw new RuntimeException ( "Could not clone " + ScopDescription . class + " subclass" , e ) ; } } return matches ;
public class JNStorage { /** * Get name for destination file used for log syncing , after a journal node * crashed . */ File getSyncLogDestFile ( long segmentTxId , long endTxId ) { } }
String name = NNStorage . getFinalizedEditsFileName ( segmentTxId , endTxId ) ; return new File ( sd . getCurrentDir ( ) , name ) ;
public class BatchDescribeSimulationJobResult { /** * A list of simulation jobs . * @ param jobs * A list of simulation jobs . */ public void setJobs ( java . util . Collection < SimulationJob > jobs ) { } }
if ( jobs == null ) { this . jobs = null ; return ; } this . jobs = new java . util . ArrayList < SimulationJob > ( jobs ) ;
public class ManagerUtil { /** * Returns the internal action id contained in the given action id . * @ param actionId the action id prefixed by the internal action id as * received from Asterisk . * @ return the internal action id that has been added before . * @ see # addInternalActionId ( String , String ) */ public static String getInternalActionId ( String actionId ) { } }
final int delimiterIndex ; if ( actionId == null ) { return null ; } delimiterIndex = actionId . indexOf ( INTERNAL_ACTION_ID_DELIMITER ) ; if ( delimiterIndex > 0 ) { return actionId . substring ( 0 , delimiterIndex ) ; } return null ;
public class DoCopy { /** * Parses and normalizes the destination header . * @ param req * Servlet request * @ param resp * Servlet response * @ return destinationPath * @ throws IOException * if an error occurs while sending response */ private String parseDestinationHeader ( final WebdavRequest req , final WebdavResponse resp ) throws IOException { } }
String destinationPath = req . getHeader ( "Destination" ) ; if ( destinationPath == null ) { resp . sendError ( WebdavStatus . SC_BAD_REQUEST ) ; return null ; } // Remove url encoding from destination destinationPath = RequestUtil . URLDecode ( destinationPath , "UTF8" ) ; final int protocolIndex = destinationPath . indexOf ( "://" ) ; if ( protocolIndex >= 0 ) { // if the Destination URL contains the protocol , we can safely // trim everything upto the first " / " character after " : / / " final int firstSeparator = destinationPath . indexOf ( "/" , protocolIndex + 4 ) ; if ( firstSeparator < 0 ) { destinationPath = "/" ; } else { destinationPath = destinationPath . substring ( firstSeparator ) ; } } else { final String hostName = req . getServerName ( ) ; if ( ( hostName != null ) && ( destinationPath . startsWith ( hostName ) ) ) { destinationPath = destinationPath . substring ( hostName . length ( ) ) ; } final int portIndex = destinationPath . indexOf ( ":" ) ; if ( portIndex >= 0 ) { destinationPath = destinationPath . substring ( portIndex ) ; } if ( destinationPath . startsWith ( ":" ) ) { final int firstSeparator = destinationPath . indexOf ( "/" ) ; if ( firstSeparator < 0 ) { destinationPath = "/" ; } else { destinationPath = destinationPath . substring ( firstSeparator ) ; } } } // Normalize destination path ( remove ' . ' and ' . . ' ) destinationPath = normalize ( destinationPath ) ; final String contextPath = req . getContextPath ( ) ; if ( ( contextPath != null ) && ( destinationPath . startsWith ( contextPath ) ) ) { destinationPath = destinationPath . substring ( contextPath . length ( ) ) ; } final String pathInfo = req . getPathInfo ( ) ; if ( pathInfo != null ) { final String servletPath = req . getServicePath ( ) ; if ( ( servletPath != null ) && ( destinationPath . startsWith ( servletPath ) ) ) { destinationPath = destinationPath . substring ( servletPath . length ( ) ) ; } } return destinationPath ;
public class SocketChannelListener { /** * @ see org . browsermob . proxy . jetty . http . HttpListener # setPort ( int ) */ public void setPort ( int port ) { } }
if ( _address == null || _address . getHostName ( ) == null ) _address = new InetSocketAddress ( port ) ; else _address = new InetSocketAddress ( _address . getHostName ( ) , port ) ;
public class UStats { /** * Present the results from this task in a formatted string output . * @ param tUnit the units in which to display the times ( see { @ link UStats # getGoodUnit ( ) } for a suggestion ) . * @ return A string representing the statistics . * @ see UStats # getGoodUnit ( ) */ public String formatResults ( TimeUnit tUnit ) { } }
double avg = getAverage ( tUnit ) ; double fast = getFastest ( tUnit ) ; double slow = getSlowest ( tUnit ) ; double t95p = get95thPercentile ( tUnit ) ; double t99p = get99thPercentile ( tUnit ) ; int width = Math . max ( 8 , DoubleStream . of ( avg , fast , slow , t95p , t99p ) . mapToObj ( d -> String . format ( "%.4f" , d ) ) . mapToInt ( String :: length ) . max ( ) . getAsInt ( ) ) ; return String . format ( "Task %s -> %s: (Unit: %s)\n" + " Count : %" + width + "d Average : %" + width + ".4f\n" + " Fastest : %" + width + ".4f Slowest : %" + width + ".4f\n" + " 95Pctile : %" + width + ".4f 99Pctile : %" + width + ".4f\n" + " TimeBlock : %s\n" + " Histogram : %s\n" , suite , name , unitName [ tUnit . ordinal ( ) ] , results . length , avg , fast , slow , t95p , t99p , formatZoneTime ( getZoneTimes ( 10 , tUnit ) ) , formatHisto ( getDoublingHistogram ( ) ) ) ;
public class MutablePeriod { /** * Sets all the fields in one go from two instants representing an interval . * The chronology of the start instant is used , unless that is null when the * chronology of the end instant is used instead . * @ param start the start instant , null means now * @ param end the end instant , null means now * @ throws ArithmeticException if the set exceeds the capacity of the period */ public void setPeriod ( ReadableInstant start , ReadableInstant end ) { } }
if ( start == end ) { setPeriod ( 0L ) ; } else { long startMillis = DateTimeUtils . getInstantMillis ( start ) ; long endMillis = DateTimeUtils . getInstantMillis ( end ) ; Chronology chrono = DateTimeUtils . getIntervalChronology ( start , end ) ; setPeriod ( startMillis , endMillis , chrono ) ; }
public class LoadBalancerPoolService { /** * Delete array of load balancer pool * @ param loadBalancerPool array of load balancer pool * @ return OperationFuture wrapper for load balancer pool list */ public OperationFuture < List < LoadBalancerPool > > delete ( LoadBalancerPool ... loadBalancerPool ) { } }
return delete ( Arrays . asList ( loadBalancerPool ) ) ;
public class ESResponseWrapper { /** * Filter buckets . * @ param buckets * the buckets * @ param query * the query * @ return the terms */ private Terms filterBuckets ( Terms buckets , KunderaQuery query ) { } }
Expression havingClause = query . getSelectStatement ( ) . getHavingClause ( ) ; if ( ! ( havingClause instanceof NullExpression ) && havingClause != null ) { Expression conditionalExpression = ( ( HavingClause ) havingClause ) . getConditionalExpression ( ) ; for ( Iterator < Bucket > bucketIterator = buckets . getBuckets ( ) . iterator ( ) ; bucketIterator . hasNext ( ) ; ) { InternalAggregations internalAgg = ( InternalAggregations ) bucketIterator . next ( ) . getAggregations ( ) ; if ( ! isValidBucket ( internalAgg , query , conditionalExpression ) ) { bucketIterator . remove ( ) ; } } } return buckets ;
public class Right { /** * construction */ public boolean contains ( Right right ) { } }
if ( isFull ) { return true ; } if ( right . isFull ) { return false ; } if ( ! containsRights ( isFullSelect , selectColumnSet , right . selectColumnSet , right . isFullSelect ) ) { return false ; } if ( ! containsRights ( isFullInsert , insertColumnSet , right . insertColumnSet , right . isFullInsert ) ) { return false ; } if ( ! containsRights ( isFullUpdate , updateColumnSet , right . updateColumnSet , right . isFullUpdate ) ) { return false ; } if ( ! containsRights ( isFullReferences , referencesColumnSet , right . referencesColumnSet , right . isFullReferences ) ) { return false ; } if ( ! containsRights ( isFullTrigger , triggerColumnSet , right . triggerColumnSet , right . isFullTrigger ) ) { return false ; } if ( ! isFullDelete && right . isFullDelete ) { return false ; } return true ;
public class DefaultRetryPolicy { /** * Returns whether a failed request should be retried according to the given request context . In the following * circumstances , the request will fail directly without consulting this method : * < ul > * < li > if it has already reached the max retry limit , * < li > if the request contains non - repeatable content , * < li > if any RuntimeException or Error is thrown when executing the request . * < / ul > * @ param exception the exception from the failed request , represented as a BceClientException object . * @ param retriesAttempted the number of times the current request has been attempted . * @ return true if the failed request should be retried . */ protected boolean shouldRetry ( BceClientException exception , int retriesAttempted ) { } }
// Always retry on client exceptions caused by IOException if ( exception . getCause ( ) instanceof IOException ) { logger . debug ( "Retry for IOException." ) ; return true ; } // Only retry on a subset of service exceptions if ( exception instanceof BceServiceException ) { BceServiceException e = ( BceServiceException ) exception ; /* * For 500 internal server errors and 503 service unavailable errors and 502 service bad gateway , we want to retry , but we need to use * an exponential back - off strategy so that we don ' t overload a server with a flood of retries . */ if ( e . getStatusCode ( ) == HttpStatus . SC_INTERNAL_SERVER_ERROR ) { logger . debug ( "Retry for internal server error." ) ; return true ; } if ( e . getStatusCode ( ) == HttpStatus . SC_BAD_GATEWAY ) { logger . debug ( "Retry for bad gateway." ) ; return true ; } if ( e . getStatusCode ( ) == HttpStatus . SC_SERVICE_UNAVAILABLE ) { logger . debug ( "Retry for service unavailable." ) ; return true ; } String errorCode = e . getErrorCode ( ) ; if ( ErrorCode . REQUEST_EXPIRED . equals ( errorCode ) ) { logger . debug ( "Retry for request expired." ) ; return true ; } } return false ;
public class AbstractOptions { /** * Takes map and produces a submap using a key . * For example , all foo . bar elements are inserted into the new map . * @ param mapIn config map in * @ param subkey subkey to determine the submap * @ return the submap */ public static Map < String , String > getSubmap ( Map < String , String > mapIn , String subkey ) { } }
if ( mapIn == null || mapIn . isEmpty ( ) ) { return Collections . emptyMap ( ) ; } // Get map sub - element . return mapIn . entrySet ( ) . stream ( ) . filter ( entry -> entry . getKey ( ) . toLowerCase ( ) . startsWith ( subkey . toLowerCase ( ) ) ) . map ( entry -> { String newKey = entry . getKey ( ) . substring ( subkey . length ( ) , entry . getKey ( ) . length ( ) ) ; return new AbstractMap . SimpleImmutableEntry < > ( newKey , entry . getValue ( ) ) ; } ) . collect ( Collectors . toMap ( Entry :: getKey , Entry :: getValue ) ) ;
public class FeatureScopes { /** * This method serves as an entry point for the content assist scoping for simple feature calls . * @ param context the context e . g . a for loop expression , a block or a catch clause */ public IScope createSimpleFeatureCallScope ( EObject context , IFeatureScopeSession session , IResolvedTypes resolvedTypes ) { } }
IScope root = IScope . NULLSCOPE ; if ( context instanceof XFeatureCall ) { XFeatureCall featureCall = ( XFeatureCall ) context ; if ( ! featureCall . isExplicitOperationCallOrBuilderSyntax ( ) ) { root = createTypeLiteralScope ( context , QualifiedName . EMPTY , root , session , resolvedTypes ) ; if ( isDefiniteTypeLiteral ( featureCall ) ) { return root ; } } } IScope staticImports = createStaticFeaturesScope ( context , root , session ) ; IScope staticMembers = createStaticScope ( asAbstractFeatureCall ( context ) , null , null , staticImports , session , resolvedTypes ) ; IScope staticExtensions = createStaticExtensionsScope ( null , null , context , staticMembers , session , resolvedTypes ) ; // we don ' t want to use captured instances of ' IT ' as dynamic extension implicit argument // thus the dynamic extension scope only works for the * real * local variables IScope dynamicExtensions = createDynamicExtensionsScope ( null , null , context , staticExtensions , session , resolvedTypes ) ; IScope localVariables = createImplicitFeatureCallAndLocalVariableScope ( context , dynamicExtensions , session , resolvedTypes ) ; return localVariables ;
public class FileSystemDatasets { /** * Convert a path to a partition directory in a filesystem dataset to a { @ link View } * object representing that partition . * @ param dataset the ( partitioned ) filesystem dataset * @ param path the path to the partition directory * @ return a view of the partition */ public static < E > View < E > viewForPath ( Dataset < E > dataset , Path path ) { } }
if ( dataset instanceof FileSystemDataset ) { return ( ( FileSystemDataset < E > ) dataset ) . viewForUri ( path . toUri ( ) ) ; } throw new IllegalArgumentException ( "Not a file system dataset: " + dataset ) ;
public class WebColor { /** * { @ inheritDoc } */ @ Override public void parse ( final String ... parameters ) { } }
if ( parameters . length >= 1 ) { String hexValue = parameters [ 0 ] ; if ( hexValue . startsWith ( "0x" ) ) { hexValue = hexValue . substring ( 2 ) ; } if ( hexValue . charAt ( 0 ) == '#' ) { hexValue = hexValue . substring ( 1 ) ; } switch ( hexValue . length ( ) ) { case 3 : // 0x r g b this . hexProperty . set ( hexValue ) ; break ; case 8 : // 0x rr gg bb oo // Not managed yet break ; case 6 : // 0x rr gg bb default : this . hexProperty . set ( hexValue ) ; break ; } } // Opacity if ( parameters . length == 2 ) { opacityProperty ( ) . set ( Double . parseDouble ( parameters [ 1 ] ) ) ; }
public class NumericUtil { /** * check if the specified string is a Chinese numeric string * @ param str * @ return boolean */ public static boolean isCNNumericString ( String str , int sIdx , int eIdx ) { } }
for ( int i = sIdx ; i < eIdx ; i ++ ) { if ( ! cnNumeric . containsKey ( str . charAt ( i ) ) ) { return false ; } } return true ;
public class KeyVaultClientBaseImpl { /** * Recovers the deleted key to its latest version . * The Recover Deleted Key operation is applicable for deleted keys in soft - delete enabled vaults . It recovers the deleted key back to its latest version under / keys . An attempt to recover an non - deleted key will return an error . Consider this the inverse of the delete operation on soft - delete enabled vaults . This operation requires the keys / recover permission . * @ param vaultBaseUrl The vault name , for example https : / / myvault . vault . azure . net . * @ param keyName The name of the deleted key . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the KeyBundle object */ public Observable < KeyBundle > recoverDeletedKeyAsync ( String vaultBaseUrl , String keyName ) { } }
return recoverDeletedKeyWithServiceResponseAsync ( vaultBaseUrl , keyName ) . map ( new Func1 < ServiceResponse < KeyBundle > , KeyBundle > ( ) { @ Override public KeyBundle call ( ServiceResponse < KeyBundle > response ) { return response . body ( ) ; } } ) ;
public class Try { /** * { @ inheritDoc } */ @ Override public < U extends Throwable , D > Try < U , D > biMap ( Function < ? super T , ? extends U > lFn , Function < ? super A , ? extends D > rFn ) { } }
return match ( t -> failure ( lFn . apply ( t ) ) , a -> success ( rFn . apply ( a ) ) ) ;
public class MOEA { /** * Collector of { @ link Phenotype } objects , who ' s ( multi - objective ) fitness * value is part of the < a href = " https : / / en . wikipedia . org / wiki / Pareto _ efficiency " > * pareto front < / a > . * @ see # toParetoSet ( IntRange ) * @ param size the allowed size range of the returned pareto set . If the * size of the pareto set is bigger than { @ code size . getMax ( ) } , * during the collection , it is reduced to { @ code size . getMin ( ) } . * Pareto set elements which are close to each other are removed firsts . * @ param dominance the pareto dominance measure of the fitness result type * { @ code C } * @ param comparator the comparator of the elements of the vector type * { @ code C } * @ param distance the distance function of two elements of the vector * type { @ code C } * @ param dimension the dimensionality of the result vector { @ code C } . * Usually { @ code Vec : : length } . * @ param < G > the gene type * @ param < C > the multi object result vector . E . g . { @ code Vec < double [ ] > } * @ return the pareto set collector * @ throws NullPointerException if one the arguments is { @ code null } * @ throws IllegalArgumentException if the minimal pareto set { @ code size } * is smaller than one */ public static < G extends Gene < ? , G > , C extends Comparable < ? super C > > Collector < EvolutionResult < G , C > , ? , ISeq < Phenotype < G , C > > > toParetoSet ( final IntRange size , final Comparator < ? super C > dominance , final ElementComparator < ? super C > comparator , final ElementDistance < ? super C > distance , final ToIntFunction < ? super C > dimension ) { } }
requireNonNull ( size ) ; requireNonNull ( dominance ) ; requireNonNull ( distance ) ; if ( size . getMin ( ) < 1 ) { throw new IllegalArgumentException ( format ( "Minimal pareto set size must be greater than zero: %d" , size . getMin ( ) ) ) ; } return Collector . of ( ( ) -> new Front < G , C > ( size , dominance , comparator , distance , dimension ) , Front :: add , Front :: merge , Front :: toISeq ) ;
public class ApiConfigurations { /** * Explicitly configure the API * @ param apiUrl API URL * @ param apiKey API Key * @ param application Configured application name * @ param environment Configured environment name * @ param allowComDotStackify Configured allow com . stackify . * logging * @ return ApiConfiguration */ public static ApiConfiguration fromPropertiesWithOverrides ( final String apiUrl , final String apiKey , final String application , final String environment , final String allowComDotStackify ) { } }
ApiConfiguration props = ApiConfigurations . fromProperties ( ) ; String mergedApiUrl = ( ( apiUrl != null ) && ( 0 < apiUrl . length ( ) ) ) ? apiUrl : props . getApiUrl ( ) ; String mergedApiKey = ( ( apiKey != null ) && ( 0 < apiKey . length ( ) ) ) ? apiKey : props . getApiKey ( ) ; String mergedApplication = ( ( application != null ) && ( 0 < application . length ( ) ) ) ? application : props . getApplication ( ) ; String mergedEnvironment = ( ( environment != null ) && ( 0 < environment . length ( ) ) ) ? environment : props . getEnvironment ( ) ; ApiConfiguration . Builder builder = ApiConfiguration . newBuilder ( ) ; builder . apiUrl ( mergedApiUrl ) ; builder . apiKey ( mergedApiKey ) ; builder . application ( mergedApplication ) ; builder . environment ( mergedEnvironment ) ; builder . envDetail ( EnvironmentDetails . getEnvironmentDetail ( mergedApplication , mergedEnvironment ) ) ; builder . allowComDotStackify ( Boolean . valueOf ( allowComDotStackify ) ) ; return builder . build ( ) ;
public class AlipayLogger { /** * 通讯错误日志 */ public static void logCommError ( Exception e , String url , String appKey , String method , byte [ ] content ) { } }
if ( ! needEnableLogger ) { return ; } String contentString = null ; try { contentString = new String ( content , "UTF-8" ) ; logCommError ( e , url , appKey , method , contentString ) ; } catch ( UnsupportedEncodingException e1 ) { e1 . printStackTrace ( ) ; }
public class InternalXbaseWithAnnotationsParser { /** * $ ANTLR start synpred2 _ InternalXbaseWithAnnotations */ public final void synpred2_InternalXbaseWithAnnotations_fragment ( ) throws RecognitionException { } }
// InternalXbaseWithAnnotations . g : 121:7 : ( ( ( ( ruleValidID ) ) ' = ' ) ) // InternalXbaseWithAnnotations . g : 121:8 : ( ( ( ruleValidID ) ) ' = ' ) { // InternalXbaseWithAnnotations . g : 121:8 : ( ( ( ruleValidID ) ) ' = ' ) // InternalXbaseWithAnnotations . g : 122:8 : ( ( ruleValidID ) ) ' = ' { // InternalXbaseWithAnnotations . g : 122:8 : ( ( ruleValidID ) ) // InternalXbaseWithAnnotations . g : 123:9 : ( ruleValidID ) { // InternalXbaseWithAnnotations . g : 123:9 : ( ruleValidID ) // InternalXbaseWithAnnotations . g : 124:10 : ruleValidID { pushFollow ( FOLLOW_8 ) ; ruleValidID ( ) ; state . _fsp -- ; if ( state . failed ) return ; } } match ( input , 17 , FOLLOW_2 ) ; if ( state . failed ) return ; } }
public class HttpResponse { /** * Sends a redirect response to the client using the specified redirect * location URL . * @ param location the redirect location URL * @ exception IOException If an I / O error has occurred . */ public void sendRedirect ( String location ) throws IOException { } }
if ( isCommitted ( ) ) throw new IllegalStateException ( "Commited" ) ; _header . put ( HttpFields . __Location , location ) ; setStatus ( __302_Moved_Temporarily ) ; commit ( ) ;
public class BackgroundService { /** * Stops the background service / thread */ public void stop ( ) { } }
lock . lock ( ) ; try { currentFuture . cancel ( false ) ; try { shutDown ( ) ; } catch ( Throwable t ) { LOGGER . info ( "Exception in service shut down" , t ) ; } try { executorService . shutdown ( ) ; executorService . awaitTermination ( 5 , TimeUnit . SECONDS ) ; } catch ( Throwable t ) { LOGGER . info ( "Exception in service termination" , t ) ; } } finally { lock . unlock ( ) ; }
public class Preconditions { /** * Ensures that an object reference passed as a parameter to the calling method is not null . * @ param reference an object reference * @ param errorMessageTemplate a template for the exception message should the check fail . The * message is formed by replacing each { @ code % s } placeholder in the template with an * argument . These are matched by position - the first { @ code % s } gets { @ code * errorMessageArgs [ 0 ] } , etc . Unmatched arguments will be appended to the formatted message in * square braces . Unmatched placeholders will be left as - is . * @ param errorMessageArgs the arguments to be substituted into the message template . Arguments * are converted to strings using { @ link String # valueOf ( Object ) } . * @ return the non - null reference that was validated * @ throws NullPointerException if { @ code reference } is null */ @ CanIgnoreReturnValue public static < T > T checkNotNull ( T reference , @ Nullable String errorMessageTemplate , @ Nullable Object ... errorMessageArgs ) { } }
if ( reference == null ) { // If either of these parameters is null , the right thing happens anyway throw new NullPointerException ( format ( errorMessageTemplate , errorMessageArgs ) ) ; } return reference ;
public class XTraceBufferedImpl { /** * ( non - Javadoc ) * @ see java . util . List # toArray ( ) */ public Object [ ] toArray ( ) { } }
XEvent array [ ] = new XEvent [ events . size ( ) ] ; for ( int i = 0 ; i < events . size ( ) ; i ++ ) { try { array [ i ] = events . get ( i ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } } return array ;
public class Column { /** * Returns the portion of the field name after the last dot , as field names * may actually be paths . */ private static String toColumnName ( String fieldName ) { } }
int lastDot = fieldName . indexOf ( '.' ) ; if ( lastDot > - 1 ) { return fieldName . substring ( lastDot + 1 ) ; } else { return fieldName ; }
public class AbstractDatabase { /** * Adds a new unique key to given table name . * @ param _ con SQL connection * @ param _ tableName name of table for which the unique key must be created * @ param _ uniqueKeyName name of unique key * @ param _ columns comma separated list of column names for which the unique * key is created * @ return this instance * @ throws SQLException if the unique key could not be created */ public T addUniqueKey ( final Connection _con , final String _tableName , final String _uniqueKeyName , final String _columns ) throws SQLException { } }
final StringBuilder cmd = new StringBuilder ( ) ; cmd . append ( "alter table " ) . append ( _tableName ) . append ( " " ) . append ( "add constraint " ) . append ( _uniqueKeyName ) . append ( " " ) . append ( "unique(" ) . append ( _columns ) . append ( ")" ) ; AbstractDatabase . LOG . debug ( " ..SQL> " + cmd . toString ( ) ) ; final Statement stmt = _con . createStatement ( ) ; try { stmt . execute ( cmd . toString ( ) ) ; } finally { stmt . close ( ) ; } @ SuppressWarnings ( "unchecked" ) final T ret = ( T ) this ; return ret ;
public class BaseDataJsonFieldBo { /** * Set the whole " data " field . * @ param data * must be a valid JSON string * @ return */ public BaseDataJsonFieldBo setData ( String data ) { } }
setAttribute ( ATTR_DATA , data != null ? data . trim ( ) : "{}" ) ; return this ;
public class AttributeMapper { /** * Find index of editorAttribute in editorEntityType lookupAttributes or return null */ private Integer getLookupAttributeIndex ( EditorAttribute editorAttribute , EditorEntityType editorEntityType ) { } }
String editorAttributeId = editorAttribute . getId ( ) ; int index = editorEntityType . getLookupAttributes ( ) . stream ( ) . map ( EditorAttributeIdentifier :: getId ) . collect ( toList ( ) ) . indexOf ( editorAttributeId ) ; return index != - 1 ? index : null ;
public class ColumnFamilyMetrics { /** * Create a gauge that will be part of a merged version of all column families . The global gauge * will merge each CF gauge by adding their values */ protected < T extends Number > Gauge < T > createColumnFamilyGauge ( final String name , Gauge < T > gauge ) { } }
return createColumnFamilyGauge ( name , gauge , new Gauge < Long > ( ) { public Long value ( ) { long total = 0 ; for ( Metric cfGauge : allColumnFamilyMetrics . get ( name ) ) { total = total + ( ( Gauge < ? extends Number > ) cfGauge ) . value ( ) . longValue ( ) ; } return total ; } } ) ;
public class DuplicatingCheckpointOutputStream { /** * Returns the state handle from the { @ link # secondaryOutputStream } . Also reports suppressed exceptions from earlier * interactions with that stream . */ public StreamStateHandle closeAndGetSecondaryHandle ( ) throws IOException { } }
if ( secondaryStreamException == null ) { flushInternalBuffer ( ) ; return secondaryOutputStream . closeAndGetHandle ( ) ; } else { throw new IOException ( "Secondary stream previously failed exceptionally" , secondaryStreamException ) ; }
public class OBOOntology { /** * Read a . obo file * @ param f * The . obo file to read . * @ throws Exception */ public void read ( File f ) throws IOException { } }
FileInputStream fis = new FileInputStream ( f ) ; try { read ( new BufferedReader ( new InputStreamReader ( fis , "UTF-8" ) ) ) ; } finally { IOUtils . closeQuietly ( fis ) ; }
public class ApiOvhSms { /** * Alter this object properties * REST : PUT / sms / { serviceName } / users / { login } * @ param body [ required ] New object properties * @ param serviceName [ required ] The internal name of your SMS offer * @ param login [ required ] The sms user login */ public void serviceName_users_login_PUT ( String serviceName , String login , OvhUser body ) throws IOException { } }
String qPath = "/sms/{serviceName}/users/{login}" ; StringBuilder sb = path ( qPath , serviceName , login ) ; exec ( qPath , "PUT" , sb . toString ( ) , body ) ;
public class GroovyShell { /** * Evaluates some script against the current Binding and returns the result * @ param in the stream reading the script * @ param fileName is the logical file name of the script ( which is used to create the class name of the script ) */ public Object evaluate ( Reader in , String fileName ) throws CompilationFailedException { } }
Script script = null ; try { script = parse ( in , fileName ) ; return script . run ( ) ; } finally { if ( script != null ) { InvokerHelper . removeClass ( script . getClass ( ) ) ; } }
public class Utils { /** * Rounds a double to the next nearest integer value in a probabilistic * fashion ( e . g . 0.8 has a 20 % chance of being rounded down to 0 and a * 80 % chance of being rounded up to 1 ) . In the limit , the average of * the rounded numbers generated by this procedure should converge to * the original double . * @ param value the double value * @ param rand the random number generator * @ return the resulting integer value */ public static int probRound ( double value , Random rand ) { } }
if ( value >= 0 ) { double lower = Math . floor ( value ) ; double prob = value - lower ; if ( rand . nextDouble ( ) < prob ) { return ( int ) lower + 1 ; } else { return ( int ) lower ; } } else { double lower = Math . floor ( Math . abs ( value ) ) ; double prob = Math . abs ( value ) - lower ; if ( rand . nextDouble ( ) < prob ) { return - ( ( int ) lower + 1 ) ; } else { return - ( int ) lower ; } }
public class BackupPlanInput { /** * An array of < code > BackupRule < / code > objects , each of which specifies a scheduled task that is used to back up a * selection of resources . * @ param rules * An array of < code > BackupRule < / code > objects , each of which specifies a scheduled task that is used to back * up a selection of resources . */ public void setRules ( java . util . Collection < BackupRuleInput > rules ) { } }
if ( rules == null ) { this . rules = null ; return ; } this . rules = new java . util . ArrayList < BackupRuleInput > ( rules ) ;
public class TimerNpImpl { /** * Called by the TimerTaskHandler after successfully completing a timer * expiration to schedule the next expiration . The timer will be cancelled * if there are no further expirations . < p > * { @ link # calculateNextExpiration ( ) } must be called prior to this method * or the timer will not be scheduled properly . < p > */ synchronized void scheduleNext ( ) { } }
// Synchronized to insure a destroyed or cancelled ( ivTaskHanler = null ) // timer is not re - scheduled . If the timer is in the cancelled state // but not yet destroyed , then nothing should be done with it here // as the canceling thread will either destroy it or rollback and // and call schedule to re - create the task handler . RTC107334 if ( ! ivDestroyed ) { if ( ivExpiration != 0 ) { if ( ivTaskHandler != null ) { ivTaskHandler . scheduleNext ( ivExpiration ) ; } else { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "scheduleNext: not scheduled, timer in cancelled state : " + ivTaskId ) ; } } else { remove ( true ) ; // permanently remove the timer since it will never expire again } }
public class ReflectionHelper { /** * Try to determine the names of the method parameters . Does not work on pre Java 8 jdk and * given method owner has to be compiled with " - parameters " option . NOTICE : The correct function * depends on the JVM implementation . * @ param m * @ return Empty list if no parameters present or names could not be determined . List of * parameter names else . */ public static Map < String , Integer > getMethodParameterIndexes ( final Method m ) { } }
if ( ( GETPARAMETERS == null ) || ( m == null ) ) { return Collections . emptyMap ( ) ; } Map < String , Integer > paramNames = new HashMap < String , Integer > ( ) ; try { Object [ ] params = ( Object [ ] ) GETPARAMETERS . invoke ( m ) ; if ( params . length == 0 ) { return Collections . emptyMap ( ) ; } Method getName = findMethodByName ( params [ 0 ] . getClass ( ) , "getName" ) ; if ( getName == null ) { return Collections . emptyMap ( ) ; } int i = - 1 ; for ( Object o : params ) { ++ i ; String name = ( String ) getName . invoke ( o ) ; if ( name == null ) { continue ; } paramNames . put ( name . toUpperCase ( Locale . ENGLISH ) , i ) ; } return Collections . unmodifiableMap ( paramNames ) ; } catch ( IllegalArgumentException e ) { throw new RuntimeException ( e ) ; } catch ( IllegalAccessException e ) { throw new RuntimeException ( e ) ; } catch ( InvocationTargetException e ) { throw new RuntimeException ( e ) ; }
public class BundleEventAdapter { /** * Receive notification of a bundle lifecycle change event and adapt it to * the format required for the < code > EventAdmin < / code > service . * @ param bundleEvent * the bundle lifecycle event to publish as an < code > Event < / code > */ public void bundleChanged ( BundleEvent bundleEvent ) { } }
final String topic = getTopic ( bundleEvent ) ; // Bail quickly if the event is one that should be ignored if ( topic == null ) { return ; } // Event properties Map < String , Object > eventProperties = new HashMap < String , Object > ( ) ; // " event " - - > the original event object eventProperties . put ( EventConstants . EVENT , bundleEvent ) ; // Non - null result from getBundle // " bundle . id " - - > source bundle id as Long // " bundle . symbolicName " - - > source bundle ' s symbolic name // " bundle " - - > the source bundle object Bundle bundle = bundleEvent . getBundle ( ) ; if ( bundle != null ) { eventProperties . put ( EventConstants . BUNDLE_ID , Long . valueOf ( bundle . getBundleId ( ) ) ) ; String symbolicName = bundle . getSymbolicName ( ) ; if ( symbolicName != null ) { eventProperties . put ( EventConstants . BUNDLE_SYMBOLICNAME , symbolicName ) ; } eventProperties . put ( EventConstants . BUNDLE , bundle ) ; } // Construct and fire the event Event event = new Event ( topic , eventProperties ) ; eventAdmin . postEvent ( event ) ;
public class LogFileHandle { /** * Forces the contents of the memory - mapped view of the log file to disk . * Having invoked this method the caller can be certain that any data added * to the log as part of a prior log write is now stored persistently on disk . */ protected void force ( ) throws InternalLogException { } }
if ( tc . isEntryEnabled ( ) ) Tr . entry ( tc , "force" , this ) ; try { if ( _isMapped ) { // Note : on Win2K we can get an IOException from this even though it is not declared ( ( MappedByteBuffer ) _fileBuffer ) . force ( ) ; } else { // Write the " pending " WritableLogRecords . writePendingToFile ( ) ; _fileChannel . force ( false ) ; } } catch ( java . io . IOException ioe ) { FFDCFilter . processException ( ioe , "com.ibm.ws.recoverylog.spi.LogFileHandle.force" , "1049" , this ) ; _exceptionInForce = true ; if ( tc . isEventEnabled ( ) ) Tr . event ( tc , "Unable to force file " + _fileName ) ; // d453958 : moved terminateserver code to MultiScopeRecoveryLog . markFailed method . if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "force" , "InternalLogException" ) ; throw new InternalLogException ( ioe ) ; } catch ( InternalLogException exc ) { FFDCFilter . processException ( exc , "com.ibm.ws.recoverylog.spi.LogFileHandle.force" , "1056" , this ) ; _exceptionInForce = true ; if ( tc . isEventEnabled ( ) ) Tr . event ( tc , "Unable to force file " + _fileName ) ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "force" , "InternalLogException" ) ; throw exc ; } catch ( LogIncompatibleException exc ) { FFDCFilter . processException ( exc , "com.ibm.ws.recoverylog.spi.LogFileHandle.force" , "1096" , this ) ; _exceptionInForce = true ; if ( tc . isEventEnabled ( ) ) Tr . event ( tc , "Unable to force file " + _fileName ) ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "force" , "InternalLogException" ) ; throw new InternalLogException ( exc ) ; } if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "force" ) ;
public class h2odriver { /** * The run method called by ToolRunner . * @ param args Arguments after ToolRunner arguments have been removed . * @ return Exit value of program . */ @ Override public int run ( String [ ] args ) { } }
int rv = - 1 ; try { rv = run2 ( args ) ; } catch ( org . apache . hadoop . mapred . FileAlreadyExistsException e ) { if ( ctrlc != null ) { ctrlc . setComplete ( ) ; } System . out . println ( "ERROR: " + ( e . getMessage ( ) != null ? e . getMessage ( ) : "(null)" ) ) ; System . exit ( 1 ) ; } catch ( Exception e ) { System . out . println ( "ERROR: " + ( e . getMessage ( ) != null ? e . getMessage ( ) : "(null)" ) ) ; e . printStackTrace ( ) ; System . exit ( 1 ) ; } return rv ;
public class ListWorkerBlocksRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ListWorkerBlocksRequest listWorkerBlocksRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( listWorkerBlocksRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( listWorkerBlocksRequest . getNextToken ( ) , NEXTTOKEN_BINDING ) ; protocolMarshaller . marshall ( listWorkerBlocksRequest . getMaxResults ( ) , MAXRESULTS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class RoomInfoImpl { /** * / * ( non - Javadoc ) * @ see com . tvd12 . ezyfox . core . command . UpdateRoomInfo # setPassword ( java . lang . String ) */ @ Override public void setPassword ( String password ) { } }
room . setPassword ( password ) ; apiRoom . setPassword ( password ) ;
public class ConditionalMultibind { /** * Conditionally bind target to the set . If " condition " returns true , add a binding to " target " . * @ param property the property to inspect on * @ param condition the predicate used to verify whether to add a binding to " target " * @ param target the target type to which it adds a binding . * @ return self to support a continuous syntax for adding more conditional bindings . */ @ PublicApi public ConditionalMultibind < T > addConditionBinding ( String property , Predicate < String > condition , TypeLiteral < T > target ) { } }
if ( matchCondition ( property , condition ) ) { multibinder . addBinding ( ) . to ( target ) ; } return this ;
public class WatchTable { /** * Notification on a table put . * @ param key the key of the updated row * @ param type the notification type ( local / remote ) */ @ Override public void onPut ( byte [ ] key , TypePut type ) { } }
// _ watchKey . init ( key ) ; WatchKey watchKey = new WatchKey ( key ) ; switch ( type ) { case LOCAL : ArrayList < WatchEntry > listLocal = _entryMapLocal . get ( watchKey ) ; onPut ( listLocal , key ) ; break ; case REMOTE : { int hash = _table . getPodHash ( key ) ; TablePodNodeAmp node = _table . getTablePod ( ) . getNode ( hash ) ; if ( node . isSelfCopy ( ) ) { // copies are responsible for their own local watch events onPut ( key , TypePut . LOCAL ) ; } if ( node . isSelfOwner ( ) ) { // only the owner sends remote watch events /* System . out . println ( " NSO : " + BartenderSystem . getCurrentSelfServer ( ) . getDisplayName ( ) + " " + node . isSelfOwner ( ) + " " + node + " " + Hex . toHex ( key ) ) ; */ ArrayList < WatchEntry > listRemote = _entryMapRemote . get ( watchKey ) ; onPut ( listRemote , key ) ; } break ; } default : throw new IllegalArgumentException ( String . valueOf ( type ) ) ; }
public class Tuple7 { /** * Split this tuple into two tuples of degree 2 and 5. */ public final Tuple2 < Tuple2 < T1 , T2 > , Tuple5 < T3 , T4 , T5 , T6 , T7 > > split2 ( ) { } }
return new Tuple2 < > ( limit2 ( ) , skip2 ( ) ) ;
public class MusicController { /** * Accepts HTTP GET requests . * URL : / musics / { id } * View : / WEB - INF / jsp / music / show . jsp * Shows the page with information about given Music * We should only use GET HTTP verb for safe operations . For * instance , showing a Music has no side effects , so GET is fine . * We can use templates for Paths , so VRaptor will automatically extract * variables of the matched URI , and set the fields on parameters . * In this case , GET / musics / 15 will execute the method below , and * there will be a parameter music . id = 15 on request , causing music . getId ( ) * equal to 15. */ @ Path ( "/musics/{music.id}" ) @ Get public void show ( Music music ) { } }
result . include ( "music" , musicDao . load ( music ) ) ;
public class TransactionWrapper { public int replay_completion ( ) throws NotPrepared { } }
if ( tc . isEntryEnabled ( ) ) Tr . entry ( tc , "replay_completion" ) ; int result = Status . STATUS_ROLLEDBACK ; final int status = _transaction . getStatus ( ) ; final int numberOfResources = _transaction . getResources ( ) . numRegistered ( ) ; if ( tc . isDebugEnabled ( ) ) Tr . debug ( tc , "replay_completion status:" + status ) ; switch ( status ) { // If the transaction is still active , raise the NotPrepared exception . // The transaction must be marked rollback - only at this point because we // cannot allow the transaction to complete if a participant has failed . case javax . transaction . Status . STATUS_ACTIVE : try { _transaction . setRollbackOnly ( ) ; } catch ( Throwable exc ) { FFDCFilter . processException ( exc , "com.ibm.tx.remote.TransactionWrapper.replay_completion" , "171" , this ) ; if ( tc . isEventEnabled ( ) ) Tr . event ( tc , "replay_completion caught exception setting coordinator rollback_only" , exc ) ; } case javax . transaction . Status . STATUS_MARKED_ROLLBACK : final NotPrepared npe = new NotPrepared ( ) ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "replay_completion" , npe ) ; throw npe ; // If the transaction is prepared , the caller must wait for the // Coordinator to tell it what to do , so return an unknown status , and // do nothing . Note that if this Coordinator is sitting waiting for // its superior , this could take a long time . case javax . transaction . Status . STATUS_PREPARED : result = Status . STATUS_UNKNOWN ; break ; case javax . transaction . Status . STATUS_PREPARING : if ( numberOfResources == 1 ) { // There is only 1 resource registered and it is the caller so // we will be issuing commit _ one _ phase to it , ie will have delegated // responsibility to it to complete the txn , so we " no longer exist " // as part of the transaction , so raise object _ not _ exist . final OBJECT_NOT_EXIST one = new OBJECT_NOT_EXIST ( ) ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "replay_completion" , one ) ; throw one ; } result = Status . STATUS_UNKNOWN ; break ; // If the transaction has been committed , the caller will receive a commit . case javax . transaction . Status . STATUS_COMMITTING : result = Status . STATUS_COMMITTING ; break ; case javax . transaction . Status . STATUS_COMMITTED : if ( numberOfResources == 1 ) { // There is only 1 resource registered and it is the caller so // we have issued commit _ one _ phase to it , ie will have delegated // responsibility to it to complete the txn , so we " no longer exist " // as part of the transaction , so raise object _ not _ exist . final OBJECT_NOT_EXIST one = new OBJECT_NOT_EXIST ( ) ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "replay_completion" , one ) ; throw one ; } result = Status . STATUS_COMMITTED ; break ; case javax . transaction . Status . STATUS_NO_TRANSACTION : result = Status . STATUS_NO_TRANSACTION ; break ; case javax . transaction . Status . STATUS_ROLLEDBACK : if ( numberOfResources == 1 ) { // There is only 1 resource registered and it is the caller so // we have issued commit _ one _ phase to it , ie will have delegated // responsibility to it to complete the txn , so we " no longer exist " // as part of the transaction , so raise object _ not _ exist . final OBJECT_NOT_EXIST one = new OBJECT_NOT_EXIST ( ) ; if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "replay_completion" , one ) ; throw one ; } // In any other situation , assume that the transaction has been rolled // back . As there is a Coordinator , it will direct the Resource to roll // back . default : result = Status . STATUS_ROLLEDBACK ; } if ( tc . isEntryEnabled ( ) ) Tr . exit ( tc , "replay_completion" , result ) ; return result ;
public class ZealotKhala { /** * 生成like模糊查询的SQL片段 . * @ param field 数据库字段 * @ param value 值 * @ return ZealotKhala实例 */ public ZealotKhala like ( String field , Object value ) { } }
return this . doLike ( ZealotConst . ONE_SPACE , field , value , true , true ) ;
public class AbstractBlockParser { /** * to override */ protected void startElementInternal ( String uri , String localName , String qName , Attributes attributes ) throws SAXException { } }
// no op
public class DefaultCacheManagerService { /** * { @ inheritDoc } */ @ Override public void updateResourceInCache ( ResourceType type , String location ) { } }
try { if ( ResourceType . NAMESPACES . equals ( type ) ) { rebuildNamespace ( location ) ; } else { downloadResource ( location , type ) ; } reportable . output ( "Updated resource: " + location ) ; } catch ( ResourceDownloadError e ) { reportable . error ( e . getUserFacingMessage ( ) ) ; } catch ( IndexingFailure e ) { reportable . error ( e . getUserFacingMessage ( ) ) ; } catch ( IOException e ) { logExceptionSimple ( location , e ) ; }
public class Future { /** * Construct a Future syncrhonously that contains a single value extracted from the supplied reactive - streams Publisher * < pre > * { @ code * ReactiveSeq < Integer > stream = ReactiveSeq . of ( 1,2,3 ) ; * Future < Integer > future = Future . fromPublisher ( stream ) ; * Future [ 1] * < / pre > * @ param pub Publisher to extract value from * @ return Future populated syncrhonously from Publisher */ public static < T > Future < T > fromPublisher ( final Publisher < ? extends T > pub ) { } }
if ( pub instanceof Future ) { return ( Future < T > ) pub ; } Future < T > result = future ( ) ; pub . subscribe ( new Subscriber < T > ( ) { @ Override public void onSubscribe ( Subscription s ) { s . request ( 1l ) ; } @ Override public void onNext ( T t ) { result . complete ( t ) ; } @ Override public void onError ( Throwable t ) { result . completeExceptionally ( t ) ; } @ Override public void onComplete ( ) { if ( ! result . isDone ( ) ) { result . completeExceptionally ( new NoSuchElementException ( ) ) ; } } } ) ; return result ;
public class IfcDistributionControlElementImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ SuppressWarnings ( "unchecked" ) public EList < IfcRelFlowControlElements > getAssignedToFlowElement ( ) { } }
return ( EList < IfcRelFlowControlElements > ) eGet ( Ifc2x3tc1Package . Literals . IFC_DISTRIBUTION_CONTROL_ELEMENT__ASSIGNED_TO_FLOW_ELEMENT , true ) ;
public class PrcMoveItemsLineSave { /** * < p > Process entity request . < / p > * @ param pAddParam additional param , e . g . return this line ' s * document in " nextEntity " for farther process * @ param pRequestData Request Data * @ param pEntity Entity to process * @ return Entity processed for farther process or null * @ throws Exception - an exception */ @ Override public final MoveItemsLine process ( final Map < String , Object > pAddParam , final MoveItemsLine pEntity , final IRequestData pRequestData ) throws Exception { } }
if ( pEntity . getIsNew ( ) ) { if ( pEntity . getItsQuantity ( ) . doubleValue ( ) <= 0 ) { throw new ExceptionWithCode ( ExceptionWithCode . WRONG_PARAMETER , "quantity_less_or_equal_zero" ) ; } AccSettings accSettings = getSrvAccSettings ( ) . lazyGetAccSettings ( pAddParam ) ; pEntity . setItsQuantity ( pEntity . getItsQuantity ( ) . setScale ( accSettings . getQuantityPrecision ( ) , accSettings . getRoundingMode ( ) ) ) ; // Beige - Orm refresh : pEntity . setItsOwner ( getSrvOrm ( ) . retrieveEntity ( pAddParam , pEntity . getItsOwner ( ) ) ) ; getSrvOrm ( ) . insertEntity ( pAddParam , pEntity ) ; pEntity . setIsNew ( false ) ; this . srvWarehouseEntry . move ( pAddParam , pEntity , pEntity . getWarehouseSiteFrom ( ) , pEntity . getWarehouseSiteTo ( ) ) ; // optimistic locking ( dirty check ) : Long ownerVersion = Long . valueOf ( pRequestData . getParameter ( MoveItems . class . getSimpleName ( ) + ".ownerVersion" ) ) ; pEntity . getItsOwner ( ) . setItsVersion ( ownerVersion ) ; getSrvOrm ( ) . updateEntity ( pAddParam , pEntity . getItsOwner ( ) ) ; pAddParam . put ( "nextEntity" , pEntity . getItsOwner ( ) ) ; pAddParam . put ( "nameOwnerEntity" , MoveItems . class . getSimpleName ( ) ) ; return null ; } else { throw new ExceptionWithCode ( ExceptionWithCode . FORBIDDEN , "Attempt to update Sales Return line by " + pAddParam . get ( "user" ) ) ; }
public class ValidationMappingDescriptorImpl { /** * If not already created , a new < code > bean < / code > element will be created and returned . * Otherwise , the first existing < code > bean < / code > element will be returned . * @ return the instance defined for the element < code > bean < / code > */ public BeanType < ValidationMappingDescriptor > getOrCreateBean ( ) { } }
List < Node > nodeList = model . get ( "bean" ) ; if ( nodeList != null && nodeList . size ( ) > 0 ) { return new BeanTypeImpl < ValidationMappingDescriptor > ( this , "bean" , model , nodeList . get ( 0 ) ) ; } return createBean ( ) ;
public class EntropyInjector { /** * Removes the entropy marker string from the path , if the given file system is an * entropy - injecting file system ( implements { @ link EntropyInjectingFileSystem } ) and * the entropy marker key is present . Otherwise , this returns the path as is . * @ param path The path to filter . * @ return The path without the marker string . */ public static Path removeEntropyMarkerIfPresent ( FileSystem fs , Path path ) { } }
final EntropyInjectingFileSystem efs = getEntropyFs ( fs ) ; if ( efs == null ) { return path ; } else { try { return resolveEntropy ( path , efs , false ) ; } catch ( IOException e ) { // this should never happen , because the path was valid before and we only remove characters . // rethrow to silence the compiler throw new FlinkRuntimeException ( e . getMessage ( ) , e ) ; } }
public class CompactStartElement { /** * Internal methods */ protected Attribute constructAttr ( String [ ] raw , int rawIndex , boolean isDef ) { } }
return new AttributeEventImpl ( getLocation ( ) , raw [ rawIndex ] , raw [ rawIndex + 1 ] , raw [ rawIndex + 2 ] , raw [ rawIndex + 3 ] , ! isDef ) ;
public class ApiOvhLicensevirtuozzo { /** * Get the orderable Virtuozzo versions * REST : GET / license / virtuozzo / orderableVersions * @ param ip [ required ] Your license Ip */ public ArrayList < OvhVirtuozzoOrderConfiguration > orderableVersions_GET ( String ip ) throws IOException { } }
String qPath = "/license/virtuozzo/orderableVersions" ; StringBuilder sb = path ( qPath ) ; query ( sb , "ip" , ip ) ; String resp = exec ( qPath , "GET" , sb . toString ( ) , null ) ; return convertTo ( resp , t4 ) ;
public class MainApplication { /** * Release the user ' s preferences . */ public void free ( ) { } }
Record recUserRegistration = ( Record ) m_systemRecordOwner . getRecord ( UserRegistrationModel . USER_REGISTRATION_FILE ) ; if ( recUserRegistration != null ) { for ( UserProperties regKey : m_htRegistration . values ( ) ) { regKey . free ( ) ; // Remove yourself } recUserRegistration . free ( ) ; recUserRegistration = null ; } Record recUserInfo = ( Record ) this . getUserInfo ( ) ; if ( recUserInfo != null ) { if ( recUserInfo . isModified ( false ) ) { try { if ( recUserInfo . getEditMode ( ) == Constants . EDIT_ADD ) recUserInfo . add ( ) ; else recUserInfo . set ( ) ; } catch ( DBException ex ) { ex . printStackTrace ( ) ; } } recUserInfo . free ( ) ; recUserInfo = null ; } super . free ( ) ;
public class RingSet { /** * Returns a vector of all rings that this bond is part of . * @ param bond The bond to be checked * @ return A vector of all rings that this bond is part of */ @ Override public IRingSet getRings ( IBond bond ) { } }
IRingSet rings = bond . getBuilder ( ) . newInstance ( IRingSet . class ) ; Ring ring ; for ( int i = 0 ; i < getAtomContainerCount ( ) ; i ++ ) { ring = ( Ring ) getAtomContainer ( i ) ; if ( ring . contains ( bond ) ) { rings . addAtomContainer ( ring ) ; } } return rings ;
public class DescribeStackEventsResult { /** * A list of < code > StackEvents < / code > structures . * @ return A list of < code > StackEvents < / code > structures . */ public java . util . List < StackEvent > getStackEvents ( ) { } }
if ( stackEvents == null ) { stackEvents = new com . amazonaws . internal . SdkInternalList < StackEvent > ( ) ; } return stackEvents ;
public class AttributeCertificate { /** * Produce an object suitable for an ASN1OutputStream . * < pre > * AttributeCertificate : : = SEQUENCE { * acinfo AttributeCertificateInfo , * signatureAlgorithm AlgorithmIdentifier , * signatureValue BIT STRING * < / pre > */ public DERObject toASN1Object ( ) { } }
ASN1EncodableVector v = new ASN1EncodableVector ( ) ; v . add ( acinfo ) ; v . add ( signatureAlgorithm ) ; v . add ( signatureValue ) ; return new DERSequence ( v ) ;
public class Utility { /** * 获取指定时间的160202格式的int值 * @ param time 指定时间 * @ return 毫秒数 */ public static int yyMMdd ( long time ) { } }
Calendar cal = Calendar . getInstance ( ) ; cal . setTimeInMillis ( time ) ; return cal . get ( Calendar . YEAR ) % 100 * 10000 + ( cal . get ( Calendar . MONTH ) + 1 ) * 100 + cal . get ( Calendar . DAY_OF_MONTH ) ;
public class LDAPRule { /** * Method description * @ return * @ throws LDAPException */ private InMemoryDirectoryServer createDirectoryServer ( ) throws LDAPException { } }
try { InMemoryDirectoryServerConfig cfg = new InMemoryDirectoryServerConfig ( baseDN ) ; InetAddress address = InetAddress . getLocalHost ( ) ; host = address . getHostName ( ) ; cfg . addAdditionalBindCredentials ( additionalBindDN , additionalBindPassword ) ; cfg . setListenerConfigs ( new InMemoryListenerConfig ( "listener" , address , port , ServerSocketFactory . getDefault ( ) , SocketFactory . getDefault ( ) , null ) ) ; // disable schema check cfg . setSchema ( null ) ; return new InMemoryDirectoryServer ( cfg ) ; } catch ( UnknownHostException ex ) { throw new RuntimeException ( "could not read localhost address" , ex ) ; }
public class CmsMemoryObjectCache { /** * Returns an object from the cache . < p > * @ param owner the owner class of the cached object ( used to ensure keys don ' t overlap ) * @ param key the key to lookup the object for * @ return an object from the cache , or < code > null < / code > if no object matches the given key */ public Object getCachedObject ( Class < ? > owner , String key ) { } }
key = owner . getName ( ) . concat ( key ) ; return OpenCms . getMemoryMonitor ( ) . getCachedMemObject ( key ) ;
public class JcrTools { /** * Get or create a node at the specified path and node type . * @ param session the JCR session . may not be null * @ param path the path of the desired node to be found or created . may not be null * @ param nodeType the node type . may be null * @ return the existing or newly created node * @ throws RepositoryException * @ throws IllegalArgumentException if either the session or path argument is null */ public Node findOrCreateNode ( Session session , String path , String nodeType ) throws RepositoryException { } }
return findOrCreateNode ( session , path , nodeType , nodeType ) ;
public class JDefaultBase { /** * Finds map of values based on a key * @ param key to lookup * @ return map of key value pairs */ protected static Object fetchObject ( String key ) { } }
String [ ] path = key . split ( "\\." ) ; Object currentValue = fakeValuesMap ; for ( String pathSection : path ) { currentValue = ( ( Map < String , Object > ) currentValue ) . get ( pathSection ) ; } return currentValue ;
public class ServerSupport { /** * the default implementation does parallel launches and throws an exception only if it is unable to launch any virtual machines */ @ Override public @ Nonnull Iterable < String > launchMany ( final @ Nonnull VMLaunchOptions withLaunchOptions , final @ Nonnegative int count ) throws CloudException , InternalException { } }
if ( count < 1 ) { throw new InternalException ( "Invalid attempt to launch less than 1 virtual machine (requested " + count + ")." ) ; } if ( count == 1 ) { return Collections . singleton ( launch ( withLaunchOptions ) . getProviderVirtualMachineId ( ) ) ; } final List < Future < String > > results = new ArrayList < Future < String > > ( ) ; // windows on GCE follows same naming constraints as regular instances , 1-62 lower and numbers , must begin with a letter . NamingConstraints c = NamingConstraints . getAlphaNumeric ( 1 , 63 ) . withNoSpaces ( ) . withRegularExpression ( "(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)" ) . lowerCaseOnly ( ) . constrainedBy ( '-' ) ; String baseHost = c . convertToValidName ( withLaunchOptions . getHostName ( ) , Locale . US ) ; if ( baseHost == null ) { baseHost = withLaunchOptions . getHostName ( ) ; } for ( int i = 1 ; i <= count ; i ++ ) { String hostName = c . incrementName ( baseHost , i ) ; String friendlyName = withLaunchOptions . getFriendlyName ( ) + "-" + i ; VMLaunchOptions options = withLaunchOptions . copy ( hostName == null ? withLaunchOptions . getHostName ( ) + "-" + i : hostName , friendlyName ) ; results . add ( launchAsync ( options ) ) ; } PopulatorThread < String > populator = new PopulatorThread < String > ( new JiteratorPopulator < String > ( ) { @ Override public void populate ( @ Nonnull Jiterator < String > iterator ) throws Exception { List < Future < String > > original = results ; List < Future < String > > copy = new ArrayList < Future < String > > ( ) ; Exception exception = null ; boolean loaded = false ; while ( ! original . isEmpty ( ) ) { for ( Future < String > result : original ) { if ( result . isDone ( ) ) { try { iterator . push ( result . get ( ) ) ; loaded = true ; } catch ( Exception e ) { exception = e ; } } else { copy . add ( result ) ; } } original = copy ; // copy has to be a new list else we ' ll get into concurrently modified list state copy = new ArrayList < Future < String > > ( ) ; } if ( exception != null && ! loaded ) { throw exception ; } } } ) ; populator . populate ( ) ; return populator . getResult ( ) ;
public class WSRdbManagedConnectionImpl { /** * Creates a new connection handle for the underlying physical connection * represented by the ManagedConnection instance . The physical connection here * is the JDBC connection . * This connection handle is used by the application code to refer to the * underlying physical connection . A connection handle is tied to its * ManagedConnection instance in a resource adapter implementation specific way . * The ManagedConnection uses the Subject and additional ConnectionRequest Info * ( which is specific to resource adapter and opaque to application server ) to * set the state of the physical connection . * This instance manages multiple connection handles . Although the caller must * manage the connection handle pool , We still manage a connection handle pool here * in order to reuse the CCIConnection handles and reduce the number of WSInteractionImpls * created . * @ param Subject subject - a caller ' s security context * @ param ConnectionRequestInfo cxRequestInfo - ConnectionRequestInfo instance * @ return an Object that represents a connection handle . There are two types of * connection handles which may be returned from this method : * < ul > * < li > WSJdbcConnection - a JDBC Connection handle < / li > * < li > WSRdbConnectionImpl - a CCI Connection handle < / li > * < / ul > * @ exception ResourceException - generic exception if operation fails * @ exception SecurityException - ? ? ? ? ? */ public Object getConnection ( Subject subject , ConnectionRequestInfo cxRequestInfo ) throws ResourceException { } }
final boolean isTraceOn = TraceComponent . isAnyTracingEnabled ( ) ; // cannot print subject - causes security violation // At least trace whether the Subject is null or not . if ( isTraceOn && tc . isEntryEnabled ( ) ) Tr . entry ( this , tc , "getConnection" , subject == null ? null : "subject" , AdapterUtil . toString ( cxRequestInfo ) ) ; // if the MC marked Stale , it means the user requested a purge pool with an immediate option // so don ' t allow any connection handles to be created on this MC , instead throw a SCE if ( _mcStale ) { if ( isTraceOn && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "MC is stale" ) ; throw new DataStoreAdapterException ( "INVALID_CONNECTION" , AdapterUtil . staleX ( ) , WSRdbManagedConnectionImpl . class ) ; } // if you aren ' t in a valid state when doing getConnection , you can ' t get a connection // from this MC int transactionState = stateMgr . transtate ; if ( ( transactionState != WSStateManager . LOCAL_TRANSACTION_ACTIVE ) && ( transactionState != WSStateManager . GLOBAL_TRANSACTION_ACTIVE ) && ( transactionState != WSStateManager . RRS_GLOBAL_TRANSACTION_ACTIVE ) && ( transactionState != WSStateManager . NO_TRANSACTION_ACTIVE ) ) { String message = "Operation 'getConnection' is not permitted for transaction state: " + getTransactionStateAsString ( ) ; ResourceException resX = new DataStoreAdapterException ( "WS_INTERNAL_ERROR" , null , getClass ( ) , message ) ; // Use FFDC to log the possible components list . Comment out for now . FFDCFilter . processException ( resX , "com.ibm.ws.rsadapter.spi.WSRdbManagedConnectionImpl.cleanupTransactions" , "939" , this , new Object [ ] { message , ". Possible components: Connection Manager" } ) ; if ( isTraceOn && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "getConnection" , "bad transaction state " + getTransactionStateAsString ( ) ) ; throw resX ; } // The Subject must match the existing subject . User and password of the CRI must // match the existing values . Other CRI properties may be modified if there aren ' t // any handles on this ManagedConnection yet . // Subject matching requires doPrivileged code , which is costly . We can trust the // WebSphere ConnectionManager to always send us a matching Subject . Until it becomes // necessary for the RRA to work with other ConnectionManagers , we will NOT compare // Subjects . WSConnectionRequestInfoImpl newCRI = ( WSConnectionRequestInfoImpl ) cxRequestInfo ; // - Before , isolation level is only allowed to switch between CCI connections . // Now , the isolation level switching is allowed between JDBC and CCI connections . if ( ! supportIsolvlSwitching ) { if ( ! cri . equals ( newCRI ) ) replaceCRI ( newCRI ) ; } else // must be CMP { replaceCRIForCCI ( newCRI ) ; } // since the CRI of managed connection doesn ' t reflect the real // connection property values in the managed connection . we need to // synchronize the properties . // Avoid resetting properties when a handle has already been created . if ( numHandlesInUse == 0 ) { // Refresh our copy of the connectionSharing setting with each first connection handle connectionSharing = dsConfig . get ( ) . connectionSharing ; /* * 1 ) setTransactionIsolation OK as it will only happen if no transaction is happening * 2 ) setReadOnly OK as it will only happen if no transaction is happening * 3 ) setTypeMap OK as it will only happen if no transaction is happening * 4 ) setHoldability OK as it will only happen if no transaction is happening */ synchronizePropertiesWithCRI ( ) ; if ( stateMgr . getState ( ) == WSStateManager . NO_TRANSACTION_ACTIVE || ( stateMgr . getState ( ) == WSStateManager . RRS_GLOBAL_TRANSACTION_ACTIVE && ! rrsGlobalTransactionReallyActive ) ) { // setting the new subject in the managed connection , this may be the same // as the existing one , however , in the claimedVictim path it won ' t . Setting it all the time . this . subject = subject ; // now reset the _ claimedVictim status , since at this point , the subject should match // the sqlConn this . _claimedVictim = false ; } } else { // - we should allow to change the isolation level is the switching is supported // this value is used by the connection handle to save this value . So this isolation // level must be set before creating a handle . if ( supportIsolvlSwitching && currentTransactionIsolation != cri . ivIsoLevel ) try { setTransactionIsolation ( cri . ivIsoLevel ) ; } catch ( SQLException sqlX ) { FFDCFilter . processException ( sqlX , getClass ( ) . getName ( ) + ".getConnection" , "1867" , this ) ; if ( isTraceOn && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "getConnection" , sqlX ) ; throw AdapterUtil . translateSQLException ( sqlX , this , true , getClass ( ) ) ; } } // - Subject and CRI are required for handle reassociation . // These values must be forwarded to the CM to reassociate with a new MC . // The JDBC handle will not modify the CRI or Subject . // The Connection handle will request the CRI and Subject only // when dissociated , to take a snapshot of the current Connection properties the // handle wishes to be reassociated to . // If the ManagedConnection was just taken out the pool , the thread id may not // be recorded yet . In this case , use the current thread id . // Use the already - casted CRI here . WSJdbcConnection handle = mcf . jdbcRuntime . newConnection ( this , sqlConn , key , threadID ) ; addHandle ( handle ) ; // here is one of two boundaries to enable / disable tracing // if logwriter was enabled dynamicall , then we will check every time since we don ' t know then // if the user enabled it or diabled it dynamically , ( i . e . perfomance won ' t be as good ) if ( helper . shouldTraceBeEnabled ( this ) ) helper . enableJdbcLogging ( this ) ; else if ( helper . shouldTraceBeDisabled ( this ) ) helper . disableJdbcLogging ( this ) ; if ( isTraceOn && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "numHandlesInUse" , numHandlesInUse ) ; // Record the number of fatal connection errors found on connections created by the // parent ManagedConnectionFactory at the time the last handle was created for this // ManagedConnection . This allows us to determine whether it ' s safe to return this // ManagedConnection to the pool . If the ManagedConnectionFactory ' s fatal error count // is greater than that of the ManagedConnection , it is not safe to pool the // ManagedConnection , as this shows fatal errors may have occurred on other // connections between the time the connection was last used and when it was closed . // This indicator is used to the implement the purge policy of " all open connections " // used by the default connection manager . fatalErrorCount = mcf . fatalErrorCount . get ( ) ; if ( isTraceOn && tc . isEntryEnabled ( ) ) Tr . exit ( this , tc , "getConnection" , handle ) ; return handle ;
public class Stream { /** * Merges elements of two iterators according to the supplied selector function . * < p > Example 1 — Merge two sorted iterators : * < pre > * iterator1 : [ 1 , 3 , 8 , 10] * iterator2 : [ 2 , 5 , 6 , 12] * selector : ( a , b ) - & gt ; a & lt ; b ? TAKE _ FIRST : TAKE _ SECOND * result : [ 1 , 2 , 3 , 5 , 6 , 8 , 10 , 12] * < / pre > * < p > Example 2 — Concat two iterators : * < pre > * iterator1 : [ 0 , 3 , 1] * iterator2 : [ 2 , 5 , 6 , 1] * selector : ( a , b ) - & gt ; TAKE _ SECOND * result : [ 2 , 5 , 6 , 1 , 0 , 3 , 1] * < / pre > * @ param < T > the type of the elements * @ param iterator1 the first iterator * @ param iterator2 the second iterator * @ param selector the selector function used to choose elements * @ return the new stream * @ throws NullPointerException if { @ code iterator1 } or { @ code iterator2 } is null * @ since 1.1.9 */ public static < T > Stream < T > merge ( @ NotNull Iterator < ? extends T > iterator1 , @ NotNull Iterator < ? extends T > iterator2 , @ NotNull BiFunction < ? super T , ? super T , ObjMerge . MergeResult > selector ) { } }
Objects . requireNonNull ( iterator1 ) ; Objects . requireNonNull ( iterator2 ) ; return new Stream < T > ( new ObjMerge < T > ( iterator1 , iterator2 , selector ) ) ;
public class CPDefinitionOptionRelLocalServiceBaseImpl { /** * Returns all the cp definition option rels matching the UUID and company . * @ param uuid the UUID of the cp definition option rels * @ param companyId the primary key of the company * @ return the matching cp definition option rels , or an empty list if no matches were found */ @ Override public List < CPDefinitionOptionRel > getCPDefinitionOptionRelsByUuidAndCompanyId ( String uuid , long companyId ) { } }
return cpDefinitionOptionRelPersistence . findByUuid_C ( uuid , companyId ) ;
public class DisambiguateProperties { /** * Chooses a name to use for renaming in each equivalence class and maps * the representative type of that class to that name . */ private Map < JSType , String > buildPropNames ( Property prop ) { } }
UnionFind < JSType > pTypes = prop . getTypes ( ) ; String pname = prop . name ; Map < JSType , String > names = new HashMap < > ( ) ; for ( Set < JSType > set : pTypes . allEquivalenceClasses ( ) ) { checkState ( ! set . isEmpty ( ) ) ; JSType representative = pTypes . find ( set . iterator ( ) . next ( ) ) ; String typeName = null ; for ( JSType type : set ) { String typeString = type . toString ( ) ; if ( typeName == null || typeString . compareTo ( typeName ) < 0 ) { typeName = typeString ; } } String newName ; if ( "{...}" . equals ( typeName ) ) { newName = pname ; } else { newName = NONWORD_PATTERN . matcher ( typeName ) . replaceAll ( "_" ) + '$' + pname ; } names . put ( representative , newName ) ; } return names ;
public class ProgressionUtil { /** * Create and replies a task progression model that is for * a subtask of the task associated to the given progression model . * < p > The subtask progression model is not initialized . * When the subtask progression model reaches its end , * the value of the task progression is { @ code s + extent } * where { @ code s } is the value of the task progression when * this function is called . * @ param model is the model to derivate * @ param extent is the size of the subtask progression that is * covered in the task progression . * @ return the subtask progression model ; or < code > null < / code > . */ public static Progression sub ( Progression model , int extent ) { } }
if ( model != null ) { return model . subTask ( extent ) ; } return null ;