signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class JsonApiQueryParamsParser { /** * Returns a list of all of the strings contained in parametersToParse . If * any of the strings contained in parametersToParse is a comma - delimited * list , that string will be split into substrings and each substring will * be added to the returned set ( in place of the delimited list ) . */ private static Set < String > parseDelimitedParameters ( Set < String > parametersToParse ) { } }
Set < String > parsedParameters = new LinkedHashSet < > ( ) ; if ( parametersToParse != null && ! parametersToParse . isEmpty ( ) ) { for ( String parameterToParse : parametersToParse ) { parsedParameters . addAll ( Arrays . asList ( parameterToParse . split ( JSON_API_PARAM_DELIMITER ) ) ) ; } } return parsedParameters ;
public class ApiOvhSms { /** * Create a phonebook contact . Return identifier of the phonebook contact . * REST : POST / sms / { serviceName } / phonebooks / { bookKey } / phonebookContact * @ param homeMobile [ required ] Home mobile phone number of the contact * @ param surname [ required ] Contact surname * @ param homePhone [ required ] Home landline phone number of the contact * @ param name [ required ] Name of the contact * @ param group [ required ] Group name of the phonebook * @ param workMobile [ required ] Mobile phone office number of the contact * @ param workPhone [ required ] Landline phone office number of the contact * @ param serviceName [ required ] The internal name of your SMS offer * @ param bookKey [ required ] Identifier of the phonebook */ public Long serviceName_phonebooks_bookKey_phonebookContact_POST ( String serviceName , String bookKey , String group , String homeMobile , String homePhone , String name , String surname , String workMobile , String workPhone ) throws IOException { } }
String qPath = "/sms/{serviceName}/phonebooks/{bookKey}/phonebookContact" ; StringBuilder sb = path ( qPath , serviceName , bookKey ) ; HashMap < String , Object > o = new HashMap < String , Object > ( ) ; addBody ( o , "group" , group ) ; addBody ( o , "homeMobile" , homeMobile ) ; addBody ( o , "homePhone" , homePhone ) ; addBody ( o , "name" , name ) ; addBody ( o , "surname" , surname ) ; addBody ( o , "workMobile" , workMobile ) ; addBody ( o , "workPhone" , workPhone ) ; String resp = exec ( qPath , "POST" , sb . toString ( ) , o ) ; return convertTo ( resp , Long . class ) ;
public class TerminateSessionAction { /** * Destroy application session . * Also kills all delegated authn profiles via pac4j . * @ param request the request * @ param response the response */ protected void destroyApplicationSession ( final HttpServletRequest request , final HttpServletResponse response ) { } }
LOGGER . trace ( "Destroying application session" ) ; val context = new J2EContext ( request , response , new J2ESessionStore ( ) ) ; val manager = new ProfileManager < > ( context , context . getSessionStore ( ) ) ; manager . logout ( ) ; val session = request . getSession ( false ) ; if ( session != null ) { val requestedUrl = session . getAttribute ( Pac4jConstants . REQUESTED_URL ) ; session . invalidate ( ) ; if ( requestedUrl != null && ! requestedUrl . equals ( StringUtils . EMPTY ) ) { request . getSession ( true ) . setAttribute ( Pac4jConstants . REQUESTED_URL , requestedUrl ) ; } }
public class RegistriesInner { /** * Gets the quota usages for the specified container registry . * @ param resourceGroupName The name of the resource group to which the container registry belongs . * @ param registryName The name of the container registry . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the RegistryUsageListResultInner object */ public Observable < RegistryUsageListResultInner > listUsagesAsync ( String resourceGroupName , String registryName ) { } }
return listUsagesWithServiceResponseAsync ( resourceGroupName , registryName ) . map ( new Func1 < ServiceResponse < RegistryUsageListResultInner > , RegistryUsageListResultInner > ( ) { @ Override public RegistryUsageListResultInner call ( ServiceResponse < RegistryUsageListResultInner > response ) { return response . body ( ) ; } } ) ;
public class WebDavServiceImpl { /** * Gives access to the current session . * @ param repoName repository name * @ param wsName workspace name * @ param lockTokens Lock tokens * @ return current session * @ throws Exception { @ link Exception } */ protected Session session ( String repoName , String wsName , List < String > lockTokens ) throws Exception , NoSuchWorkspaceException { } }
// To be cloud compliant we need now to ignore the provided repository name ( more details in JCR - 2138) ManageableRepository repo = repositoryService . getCurrentRepository ( ) ; if ( PropertyManager . isDevelopping ( ) && log . isWarnEnabled ( ) ) { String currentRepositoryName = repo . getConfiguration ( ) . getName ( ) ; if ( ! currentRepositoryName . equals ( repoName ) ) { log . warn ( "The expected repository was '" + repoName + "' but we will use the current repository instead which is '" + currentRepositoryName + "'" ) ; } } SessionProvider sp = sessionProviderService . getSessionProvider ( null ) ; if ( sp == null ) throw new RepositoryException ( "SessionProvider is not properly set. Make the application calls" + "SessionProviderService.setSessionProvider(..) somewhere before (" + "for instance in Servlet Filter for WEB application)" ) ; Session session = sp . getSession ( wsName , repo ) ; if ( lockTokens != null ) { String [ ] presentLockTokens = session . getLockTokens ( ) ; ArrayList < String > presentLockTokensList = new ArrayList < String > ( ) ; for ( int i = 0 ; i < presentLockTokens . length ; i ++ ) { presentLockTokensList . add ( presentLockTokens [ i ] ) ; } for ( int i = 0 ; i < lockTokens . size ( ) ; i ++ ) { String lockToken = lockTokens . get ( i ) ; if ( ! presentLockTokensList . contains ( lockToken ) ) { session . addLockToken ( lockToken ) ; } } } return session ;
public class MainClassFinder { /** * Perform the given callback operation on all main classes from the given jar . * @ param < T > the result type * @ param jarFile the jar file to search * @ param classesLocation the location within the jar containing classes * @ param callback the callback * @ return the first callback result or { @ code null } * @ throws IOException in case of I / O errors */ static < T > T doWithMainClasses ( JarFile jarFile , String classesLocation , ClassNameCallback < T > callback ) throws IOException { } }
List < JarEntry > classEntries = getClassEntries ( jarFile , classesLocation ) ; Collections . sort ( classEntries , new ClassEntryComparator ( ) ) ; for ( JarEntry entry : classEntries ) { InputStream inputStream = new BufferedInputStream ( jarFile . getInputStream ( entry ) ) ; try { if ( isMainClass ( inputStream ) ) { String className = convertToClassName ( entry . getName ( ) , classesLocation ) ; T result = callback . doWith ( className ) ; if ( result != null ) { return result ; } } } finally { inputStream . close ( ) ; } } return null ;
public class Document { /** * Returns with the text for a certain line without the trailing LF . Throws an { @ link IndexOutOfBoundsException } if the zero - based { @ code lineNumber } * argument is negative or exceeds the number of lines in the document . */ public String getLineContent ( final int lineNumber ) throws IndexOutOfBoundsException { } }
if ( ( lineNumber < 0 ) ) { String _xifexpression = null ; if ( this . printSourceOnError ) { _xifexpression = "" ; } else { _xifexpression = ( " text was : " + this . contents ) ; } String _plus = ( Integer . valueOf ( lineNumber ) + _xifexpression ) ; throw new IndexOutOfBoundsException ( _plus ) ; } final char NL = '\n' ; final int l = this . contents . length ( ) ; final StringBuilder lineContent = new StringBuilder ( ) ; int line = 0 ; for ( int i = 0 ; ( i < l ) ; i ++ ) { { if ( ( line > lineNumber ) ) { return lineContent . toString ( ) ; } final char ch = this . contents . charAt ( i ) ; if ( ( ( line == lineNumber ) && ( ch != NL ) ) ) { lineContent . append ( ch ) ; } if ( ( ch == NL ) ) { line ++ ; } } } if ( ( line < lineNumber ) ) { String _xifexpression_1 = null ; if ( this . printSourceOnError ) { _xifexpression_1 = "" ; } else { _xifexpression_1 = ( " text was : " + this . contents ) ; } String _plus_1 = ( Integer . valueOf ( lineNumber ) + _xifexpression_1 ) ; throw new IndexOutOfBoundsException ( _plus_1 ) ; } return lineContent . toString ( ) ;
public class BitmapUtils { /** * Lazily create { @ link BitmapFactory . Options } based in given * { @ link Request } , only instantiating them if needed . */ @ Nullable static BitmapFactory . Options createBitmapOptions ( Request data ) { } }
final boolean justBounds = data . hasSize ( ) ; BitmapFactory . Options options = null ; if ( justBounds || data . config != null || data . purgeable ) { options = new BitmapFactory . Options ( ) ; options . inJustDecodeBounds = justBounds ; options . inInputShareable = data . purgeable ; options . inPurgeable = data . purgeable ; if ( data . config != null ) { options . inPreferredConfig = data . config ; } } return options ;
public class OpenSslSessionStats { /** * Returns the number of times a client presented a ticket derived from the primary key . */ public long ticketKeyResume ( ) { } }
Lock readerLock = context . ctxLock . readLock ( ) ; readerLock . lock ( ) ; try { return SSLContext . sessionTicketKeyResume ( context . ctx ) ; } finally { readerLock . unlock ( ) ; }
public class LoggingInterceptorSupport { /** * Logs response message from message context if any . SOAP messages get logged with envelope transformation * other messages with serialization . * @ param logMessage * @ param messageContext * @ param incoming * @ throws TransformerException */ protected void logResponse ( String logMessage , MessageContext messageContext , boolean incoming ) throws TransformerException { } }
if ( messageContext . hasResponse ( ) ) { if ( messageContext . getResponse ( ) instanceof SoapMessage ) { logSoapMessage ( logMessage , ( SoapMessage ) messageContext . getResponse ( ) , incoming ) ; } else { logWebServiceMessage ( logMessage , messageContext . getResponse ( ) , incoming ) ; } }
public class Matrix4d { /** * Set the value of the matrix element at column 2 and row 2. * @ param m22 * the new value * @ return this */ public Matrix4d m22 ( double m22 ) { } }
this . m22 = m22 ; properties &= ~ PROPERTY_ORTHONORMAL ; if ( m22 != 1.0 ) properties &= ~ ( PROPERTY_IDENTITY | PROPERTY_TRANSLATION ) ; return this ;
public class DocFile { /** * Reads a line of characters from an input stream opened from a given resource . * If an IOException occurs , it is wrapped in a ResourceIOException . * @ param resource the resource for the stream * @ param in the stream * @ return the line of text , or { @ code null } if at end of stream * @ throws ResourceIOException if an exception occurred while reading the stream */ private static String readResourceLine ( DocPath docPath , BufferedReader in ) throws ResourceIOException { } }
try { return in . readLine ( ) ; } catch ( IOException e ) { throw new ResourceIOException ( docPath , e ) ; }
public class CmsSetupXmlHelper { /** * Replaces a attibute ' s value in the given node addressed by the xPath . < p > * @ param document the document to replace the node attribute * @ param xPath the xPath to the node * @ param attribute the attribute to replace the value of * @ param value the new value to set * @ return < code > true < / code > if successful < code > false < / code > otherwise */ public static boolean setAttribute ( Document document , String xPath , String attribute , String value ) { } }
Node node = document . selectSingleNode ( xPath ) ; Element e = ( Element ) node ; @ SuppressWarnings ( "unchecked" ) List < Attribute > attributes = e . attributes ( ) ; for ( Attribute a : attributes ) { if ( a . getName ( ) . equals ( attribute ) ) { a . setValue ( value ) ; return true ; } } return false ;
public class YearWeek { /** * Obtains an instance of { @ code YearWeek } from a temporal object . * This obtains a year - week based on the specified temporal . * A { @ code TemporalAccessor } represents an arbitrary set of date and time information , * which this factory converts to an instance of { @ code YearWeek } . * The conversion extracts the { @ link IsoFields # WEEK _ BASED _ YEAR WEEK _ BASED _ YEAR } and * { @ link IsoFields # WEEK _ OF _ WEEK _ BASED _ YEAR WEEK _ OF _ WEEK _ BASED _ YEAR } fields . * The extraction is only permitted if the temporal object has an ISO * chronology , or can be converted to a { @ code LocalDate } . * This method matches the signature of the functional interface { @ link TemporalQuery } * allowing it to be used in queries via method reference , { @ code YearWeek : : from } . * @ param temporal the temporal object to convert , not null * @ return the year - week , not null * @ throws DateTimeException if unable to convert to a { @ code YearWeek } */ public static YearWeek from ( TemporalAccessor temporal ) { } }
if ( temporal instanceof YearWeek ) { return ( YearWeek ) temporal ; } Objects . requireNonNull ( temporal , "temporal" ) ; try { if ( ! IsoChronology . INSTANCE . equals ( Chronology . from ( temporal ) ) ) { temporal = LocalDate . from ( temporal ) ; } // need to use getLong ( ) as JDK Parsed class get ( ) doesn ' t work properly int year = Math . toIntExact ( temporal . getLong ( WEEK_BASED_YEAR ) ) ; int week = Math . toIntExact ( temporal . getLong ( WEEK_OF_WEEK_BASED_YEAR ) ) ; return of ( year , week ) ; } catch ( DateTimeException ex ) { throw new DateTimeException ( "Unable to obtain YearWeek from TemporalAccessor: " + temporal + " of type " + temporal . getClass ( ) . getName ( ) , ex ) ; }
public class CaffeineCacheMetrics { /** * Record metrics on a Caffeine cache . You must call { @ link Caffeine # recordStats ( ) } prior to building the cache * for metrics to be recorded . * @ param registry The registry to bind metrics to . * @ param cache The cache to instrument . * @ param cacheName Will be used to tag metrics with " cache " . * @ param tags Tags to apply to all recorded metrics . * @ param < C > The cache type . * @ return The instrumented cache , unchanged . The original cache is not wrapped or proxied in any way . * @ see CacheStats */ public static < C extends Cache < ? , ? > > C monitor ( MeterRegistry registry , C cache , String cacheName , Iterable < Tag > tags ) { } }
new CaffeineCacheMetrics ( cache , cacheName , tags ) . bindTo ( registry ) ; return cache ;
public class ScreenInScreen { /** * Set up all the screen fields . */ public void setupSFields ( ) { } }
this . getRecord ( ScreenIn . SCREEN_IN_FILE ) . getField ( ScreenIn . SCREEN_ITEM_NUMBER ) . setupDefaultView ( this . getNextLocation ( ScreenConstants . NEXT_LOGICAL , ScreenConstants . ANCHOR_DEFAULT ) , this , ScreenConstants . DEFAULT_DISPLAY ) ; this . getRecord ( ScreenIn . SCREEN_IN_FILE ) . getField ( ScreenIn . SCREEN_FILE_NAME ) . setupDefaultView ( this . getNextLocation ( ScreenConstants . NEXT_LOGICAL , ScreenConstants . ANCHOR_DEFAULT ) , this , ScreenConstants . DEFAULT_DISPLAY ) ; this . getRecord ( ScreenIn . SCREEN_IN_FILE ) . getField ( ScreenIn . SCREEN_FIELD_NAME ) . setupDefaultView ( this . getNextLocation ( ScreenConstants . NEXT_LOGICAL , ScreenConstants . ANCHOR_DEFAULT ) , this , ScreenConstants . DEFAULT_DISPLAY ) ; this . getRecord ( ScreenIn . SCREEN_IN_FILE ) . getField ( ScreenIn . SCREEN_ROW ) . setupDefaultView ( this . getNextLocation ( ScreenConstants . NEXT_LOGICAL , ScreenConstants . ANCHOR_DEFAULT ) , this , ScreenConstants . DEFAULT_DISPLAY ) ; this . getRecord ( ScreenIn . SCREEN_IN_FILE ) . getField ( ScreenIn . SCREEN_COL ) . setupDefaultView ( this . getNextLocation ( ScreenConstants . NEXT_LOGICAL , ScreenConstants . ANCHOR_DEFAULT ) , this , ScreenConstants . DEFAULT_DISPLAY ) ; this . getRecord ( ScreenIn . SCREEN_IN_FILE ) . getField ( ScreenIn . SCREEN_GROUP ) . setupDefaultView ( this . getNextLocation ( ScreenConstants . NEXT_LOGICAL , ScreenConstants . ANCHOR_DEFAULT ) , this , ScreenConstants . DEFAULT_DISPLAY ) ; this . getRecord ( ScreenIn . SCREEN_IN_FILE ) . getField ( ScreenIn . SCREEN_IN_PROG_NAME ) . setupDefaultView ( this . getNextLocation ( ScreenConstants . NEXT_LOGICAL , ScreenConstants . ANCHOR_DEFAULT ) , this , ScreenConstants . DEFAULT_DISPLAY ) ; this . getRecord ( ScreenIn . SCREEN_IN_FILE ) . getField ( ScreenIn . SCREEN_OUT_NUMBER ) . setupDefaultView ( this . getNextLocation ( ScreenConstants . NEXT_LOGICAL , ScreenConstants . ANCHOR_DEFAULT ) , this , ScreenConstants . DEFAULT_DISPLAY ) ; this . getRecord ( ScreenIn . SCREEN_IN_FILE ) . getField ( ScreenIn . SCREEN_PHYSICAL_NUM ) . setupDefaultView ( this . getNextLocation ( ScreenConstants . NEXT_LOGICAL , ScreenConstants . ANCHOR_DEFAULT ) , this , ScreenConstants . DEFAULT_DISPLAY ) ; this . getRecord ( ScreenIn . SCREEN_IN_FILE ) . getField ( ScreenIn . SCREEN_LOCATION ) . setupDefaultView ( this . getNextLocation ( ScreenConstants . NEXT_LOGICAL , ScreenConstants . ANCHOR_DEFAULT ) , this , ScreenConstants . DEFAULT_DISPLAY ) ; this . getRecord ( ScreenIn . SCREEN_IN_FILE ) . getField ( ScreenIn . SCREEN_ANCHOR ) . setupDefaultView ( this . getNextLocation ( ScreenConstants . NEXT_LOGICAL , ScreenConstants . ANCHOR_DEFAULT ) , this , ScreenConstants . DEFAULT_DISPLAY ) ; this . getRecord ( ScreenIn . SCREEN_IN_FILE ) . getField ( ScreenIn . SCREEN_FIELD_DESC ) . setupDefaultView ( this . getNextLocation ( ScreenConstants . NEXT_LOGICAL , ScreenConstants . ANCHOR_DEFAULT ) , this , ScreenConstants . DEFAULT_DISPLAY ) ; this . getRecord ( ScreenIn . SCREEN_IN_FILE ) . getField ( ScreenIn . SCREEN_CONTROL_TYPE ) . setupDefaultView ( this . getNextLocation ( ScreenConstants . NEXT_LOGICAL , ScreenConstants . ANCHOR_DEFAULT ) , this , ScreenConstants . DEFAULT_DISPLAY ) ; this . getRecord ( ScreenIn . SCREEN_IN_FILE ) . getField ( ScreenIn . SCREEN_TEXT ) . setupDefaultView ( this . getNextLocation ( ScreenConstants . NEXT_LOGICAL , ScreenConstants . ANCHOR_DEFAULT ) , this , ScreenConstants . DEFAULT_DISPLAY ) ;
public class TableSlice { /** * Returns a 0 based int iterator for use with , for example , get ( ) . When it returns 0 for the first row , * get will transform that to the 0th row in the selection , which may not be the 0th row in the underlying * table . */ @ Override public IntIterator iterator ( ) { } }
return new IntIterator ( ) { private int i = 0 ; @ Override public int nextInt ( ) { return i ++ ; } @ Override public int skip ( int k ) { return i + k ; } @ Override public boolean hasNext ( ) { return i < rowCount ( ) ; } } ;
public class TypeResolver { /** * Resolves the raw class for the given { @ code genericType } , using the type variable information * from the { @ code targetType } . */ public static Class < ? > resolveClass ( Type genericType , Class < ? > targetType ) { } }
if ( genericType instanceof Class ) { return ( Class < ? > ) genericType ; } else if ( genericType instanceof ParameterizedType ) { return resolveClass ( ( ( ParameterizedType ) genericType ) . getRawType ( ) , targetType ) ; } else if ( genericType instanceof GenericArrayType ) { GenericArrayType arrayType = ( GenericArrayType ) genericType ; Class < ? > compoment = resolveClass ( arrayType . getGenericComponentType ( ) , targetType ) ; return Array . newInstance ( compoment , 0 ) . getClass ( ) ; } else if ( genericType instanceof TypeVariable ) { TypeVariable < ? > variable = ( TypeVariable < ? > ) genericType ; genericType = getTypeVariableMap ( targetType ) . get ( variable ) ; genericType = genericType == null ? resolveBound ( variable ) : resolveClass ( genericType , targetType ) ; } return genericType instanceof Class ? ( Class < ? > ) genericType : Unknown . class ;
public class JDBCCallableStatement { /** * # ifdef JAVA6 */ public synchronized void setClob ( String parameterName , Reader reader ) throws SQLException { } }
super . setClob ( findParameterIndex ( parameterName ) , reader ) ;
public class AbstractResourceAdapterDeployer { /** * Apply validation to pool configuration * @ param pc The pool configuration * @ param v The validation definition */ private void applyPoolConfiguration ( PoolConfiguration pc , org . ironjacamar . common . api . metadata . common . Validation v ) { } }
if ( v != null ) { if ( v . isValidateOnMatch ( ) != null ) pc . setValidateOnMatch ( v . isValidateOnMatch ( ) . booleanValue ( ) ) ; if ( v . isBackgroundValidation ( ) != null ) pc . setBackgroundValidation ( v . isBackgroundValidation ( ) . booleanValue ( ) ) ; if ( v . getBackgroundValidationMillis ( ) != null ) pc . setBackgroundValidationMillis ( v . getBackgroundValidationMillis ( ) . longValue ( ) ) ; if ( v . isUseFastFail ( ) != null ) pc . setUseFastFail ( v . isUseFastFail ( ) . booleanValue ( ) ) ; }
public class PageFlowControlContainerFactory { /** * This method will return the < code > PageFlowControlContainer < / code > that is acting as the * control container for the page flow runtime . * @ param request The current request * @ param servletContext The servlet context * @ return The < code > pageFLowControlContainer < / code > acting as the control container . */ public static synchronized PageFlowControlContainer getControlContainer ( HttpServletRequest request , ServletContext servletContext ) { } }
PageFlowControlContainer pfcc = ( PageFlowControlContainer ) getSessionVar ( request , servletContext , PAGEFLOW_CONTROL_CONTAINER ) ; if ( pfcc != null ) return pfcc ; pfcc = new PageFlowControlContainerImpl ( ) ; setSessionVar ( request , servletContext , PAGEFLOW_CONTROL_CONTAINER , pfcc ) ; return pfcc ;
public class JvmAgent { /** * Entry point for the agent , using command line attach * ( that is via - javaagent command line argument ) * @ param agentArgs arguments as given on the command line */ public static void premain ( String agentArgs , Instrumentation inst ) { } }
startAgent ( new JvmAgentConfig ( agentArgs ) , true /* register and detect lazy */ , inst ) ;
public class ConsumerSessionProxy { /** * Internal method called when a stoppable asynchronous session is stopped , the session is put into stopped state and the registered * application consumerSessionStopped ( ) method is invoked to inform the application that the session has been stopped . */ public void stoppableConsumerSessionStopped ( ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "stoppableConsumerSessionStopped" ) ; try { stopInternal ( false ) ; // No need to notify our peer as the stop came from our peer } catch ( Exception e ) { FFDCFilter . processException ( e , CLASS_NAME + ".processAsyncSessionStoppedCallback" , CommsConstants . CONSUMERSESSIONPROXY_SESSION_STOPPED_03 , this ) ; } final AsynchConsumerCallback asynchConsumerCallback = proxyQueue . getAsynchConsumerCallback ( ) ; if ( asynchConsumerCallback instanceof StoppableAsynchConsumerCallback ) { final StoppableAsynchConsumerCallback stoppableAsynchConsumerCallback = ( StoppableAsynchConsumerCallback ) asynchConsumerCallback ; // Protect ourselves from the application as far as possible then call the application try { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "** Calling application StoppableAsynchConsumerCallback" ) ; stoppableAsynchConsumerCallback . consumerSessionStopped ( ) ; } catch ( Throwable e ) { FFDCFilter . processException ( e , CLASS_NAME + ".stoppableConsumerSessionStopped" , CommsConstants . CONSUMERSESSIONPROXY_SESSION_STOPPED_01 , this ) ; } } else { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) SibTr . debug ( this , tc , "asynchConsumerCallback not an instance of StoppableAsynchConsumerCallback is an instance of " + asynchConsumerCallback . getClass ( ) . getName ( ) ) ; SIErrorException e = new SIErrorException ( nls . getFormattedMessage ( "WRONG_CLASS_CWSICO8022" , new Object [ ] { asynchConsumerCallback . getClass ( ) . getName ( ) } , null ) ) ; FFDCFilter . processException ( e , CLASS_NAME + ".processAsyncSessionStoppedCallback" , CommsConstants . CONSUMERSESSIONPROXY_SESSION_STOPPED_02 , this ) ; SibTr . error ( tc , "WRONG_CLASS_CWSICO8022" , e ) ; throw e ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "stoppableConsumerSessionStopped" ) ;
public class BookKeeperLog { /** * Loads the metadata for the current log , as stored in ZooKeeper . * @ return A new LogMetadata object with the desired information , or null if no such node exists . * @ throws DataLogInitializationException If an Exception ( other than NoNodeException ) occurred . */ @ VisibleForTesting LogMetadata loadMetadata ( ) throws DataLogInitializationException { } }
try { Stat storingStatIn = new Stat ( ) ; byte [ ] serializedMetadata = this . zkClient . getData ( ) . storingStatIn ( storingStatIn ) . forPath ( this . logNodePath ) ; LogMetadata result = LogMetadata . SERIALIZER . deserialize ( serializedMetadata ) ; result . withUpdateVersion ( storingStatIn . getVersion ( ) ) ; return result ; } catch ( KeeperException . NoNodeException nne ) { // Node does not exist : this is the first time we are accessing this log . log . warn ( "{}: No ZNode found for path '{}{}'. This is OK if this is the first time accessing this log." , this . traceObjectId , this . zkClient . getNamespace ( ) , this . logNodePath ) ; return null ; } catch ( Exception ex ) { throw new DataLogInitializationException ( String . format ( "Unable to load ZNode contents for path '%s%s'." , this . zkClient . getNamespace ( ) , this . logNodePath ) , ex ) ; }
public class MiniTemplatorParser { /** * Returns false if the command is not recognized and should be treatet as normal temlate text . */ private boolean processShortFormTemplateCommand ( String cmdLine , int cmdTPosBegin , int cmdTPosEnd ) throws MiniTemplator . TemplateSyntaxException { } }
int p0 = skipBlanks ( cmdLine , 0 ) ; if ( p0 >= cmdLine . length ( ) ) { return false ; } int p = p0 ; char cmd1 = cmdLine . charAt ( p ++ ) ; if ( cmd1 == '/' && p < cmdLine . length ( ) && ! Character . isWhitespace ( cmdLine . charAt ( p ) ) ) { p ++ ; } String cmd = cmdLine . substring ( p0 , p ) ; String parms = cmdLine . substring ( p ) . trim ( ) ; /* select */ if ( cmd . equals ( "?" ) ) { processIfCmd ( parms , cmdTPosBegin , cmdTPosEnd ) ; } else if ( cmd . equals ( ":" ) ) { if ( parms . length ( ) > 0 ) { processElseIfCmd ( parms , cmdTPosBegin , cmdTPosEnd ) ; } else { processElseCmd ( parms , cmdTPosBegin , cmdTPosEnd ) ; } } else if ( cmd . equals ( "/?" ) ) { processEndIfCmd ( parms , cmdTPosBegin , cmdTPosEnd ) ; } else { return false ; } return true ;
public class ElementMatchers { /** * Matches any type description that is a subtype of the given type . * @ param type The type to be checked for being a subtype of the matched type . * @ param < T > The type of the matched object . * @ return A matcher that matches any type description that represents a sub type of the given type . */ public static < T extends TypeDescription > ElementMatcher . Junction < T > isSubTypeOf ( TypeDescription type ) { } }
return new SubTypeMatcher < T > ( type ) ;
public class EJSContainer { /** * d112604.5 * Method called after first set of 25 CMP11 entities returned * during a lazy enumeration custom finder execution . Called for each set of * instances to be hydrated via the RemoteEnumerator code path . */ public void setCustomFinderAccessIntentThreadState ( boolean cfwithupdateaccess , boolean readonly , String methodname ) { } }
final boolean isTraceOn = TraceComponent . isAnyTracingEnabled ( ) ; // d532639.2 // create a thread local context for ContainerManagedBeanO to determine CF Access Information if ( isTraceOn && tc . isEntryEnabled ( ) ) Tr . entry ( tc , "setCustomFinderAccessIntentThreadState" ) ; // CMP11CustomFinderAIContext = new WSThreadLocal ( ) ; deleted PQ95614 CMP11CustomFinderAccIntentState _state = new CMP11CustomFinderAccIntentState ( methodname , cfwithupdateaccess , readonly ) ; if ( isTraceOn && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "current thread CMP11 Finder state" + _state ) ; svThreadData . get ( ) . ivCMP11CustomFinderAccIntentState = _state ; // d630940 if ( isTraceOn && tc . isEntryEnabled ( ) ) // PQ95614 Tr . exit ( tc , "setCustomFinderAccessIntentThreadState" ) ;
public class EditText { /** * @ return the maximum width of the TextView , in pixels or - 1 if the maximum width * was set in ems instead ( using { @ link # setMaxEms ( int ) } or { @ link # setEms ( int ) } ) . * @ see # setMaxWidth ( int ) * @ see # setWidth ( int ) * @ attr ref android . R . styleable # TextView _ maxWidth */ @ TargetApi ( Build . VERSION_CODES . JELLY_BEAN ) public int getMaxWidth ( ) { } }
if ( Build . VERSION . SDK_INT >= Build . VERSION_CODES . JELLY_BEAN ) return mInputView . getMaxWidth ( ) ; return - 1 ;
public class DateTimeFormatterBuilder { /** * Instructs the printer to emit a field value as a decimal number , and the * parser to expect a signed decimal number . * @ param fieldType type of field to append * @ param minDigits minimum number of digits to < i > print < / i > * @ param maxDigits maximum number of digits to < i > parse < / i > , or the estimated * maximum number of digits to print * @ return this DateTimeFormatterBuilder , for chaining * @ throws IllegalArgumentException if field type is null */ public DateTimeFormatterBuilder appendSignedDecimal ( DateTimeFieldType fieldType , int minDigits , int maxDigits ) { } }
if ( fieldType == null ) { throw new IllegalArgumentException ( "Field type must not be null" ) ; } if ( maxDigits < minDigits ) { maxDigits = minDigits ; } if ( minDigits < 0 || maxDigits <= 0 ) { throw new IllegalArgumentException ( ) ; } if ( minDigits <= 1 ) { return append0 ( new UnpaddedNumber ( fieldType , maxDigits , true ) ) ; } else { return append0 ( new PaddedNumber ( fieldType , maxDigits , true , minDigits ) ) ; }
public class GatewayMetrics { /** * Register default Gateway Metrics to given MetricsCollector * @ param metricsCollector the MetricsCollector to register Metrics on */ public void registerMetrics ( MetricsCollector metricsCollector ) { } }
SystemConfig systemConfig = ( SystemConfig ) SingletonRegistry . INSTANCE . getSingleton ( SystemConfig . HERON_SYSTEM_CONFIG ) ; int interval = ( int ) systemConfig . getHeronMetricsExportInterval ( ) . getSeconds ( ) ; metricsCollector . registerMetric ( "__gateway-received-packets-size" , receivedPacketsSize , interval ) ; metricsCollector . registerMetric ( "__gateway-sent-packets-size" , sentPacketsSize , interval ) ; metricsCollector . registerMetric ( "__gateway-received-packets-count" , receivedPacketsCount , interval ) ; metricsCollector . registerMetric ( "__gateway-sent-packets-count" , sentPacketsCount , interval ) ; metricsCollector . registerMetric ( "__gateway-sent-metrics-size" , sentMetricsSize , interval ) ; metricsCollector . registerMetric ( "__gateway-sent-metrics-packets-count" , sentMetricsPacketsCount , interval ) ; metricsCollector . registerMetric ( "__gateway-sent-metrics-count" , sentMetricsCount , interval ) ; metricsCollector . registerMetric ( "__gateway-sent-exceptions-count" , sentExceptionsCount , interval ) ; metricsCollector . registerMetric ( "__gateway-in-stream-queue-size" , inStreamQueueSize , interval ) ; metricsCollector . registerMetric ( "__gateway-out-stream-queue-size" , outStreamQueueSize , interval ) ; metricsCollector . registerMetric ( "__gateway-in-stream-queue-expected-capacity" , inStreamQueueExpectedCapacity , interval ) ; metricsCollector . registerMetric ( "__gateway-out-stream-queue-expected-capacity" , outStreamQueueExpectedCapacity , interval ) ; metricsCollector . registerMetric ( "__gateway-in-queue-full-count" , inQueueFullCount , interval ) ;
public class ActivityDataMap { /** * Returns the value of the named attribute from the session adapter * without storing it in the cache . * If no attribute of the given name exists , returns null . * @ param name a { @ code String } specifying the name of the attribute * @ return an { @ code Object } containing the value of the attribute , * or { @ code null } if the attribute does not exist * @ see SessionAdapter # getAttribute */ public Object getSessionAttributeWithoutCache ( String name ) { } }
if ( activity . getSessionAdapter ( ) != null ) { return activity . getSessionAdapter ( ) . getAttribute ( name ) ; } else { return null ; }
public class NetworkMarshaller { /** * Marshall the given parameter object . */ public void marshall ( Network network , ProtocolMarshaller protocolMarshaller ) { } }
if ( network == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( network . getDirection ( ) , DIRECTION_BINDING ) ; protocolMarshaller . marshall ( network . getProtocol ( ) , PROTOCOL_BINDING ) ; protocolMarshaller . marshall ( network . getSourceIpV4 ( ) , SOURCEIPV4_BINDING ) ; protocolMarshaller . marshall ( network . getSourceIpV6 ( ) , SOURCEIPV6_BINDING ) ; protocolMarshaller . marshall ( network . getSourcePort ( ) , SOURCEPORT_BINDING ) ; protocolMarshaller . marshall ( network . getSourceDomain ( ) , SOURCEDOMAIN_BINDING ) ; protocolMarshaller . marshall ( network . getSourceMac ( ) , SOURCEMAC_BINDING ) ; protocolMarshaller . marshall ( network . getDestinationIpV4 ( ) , DESTINATIONIPV4_BINDING ) ; protocolMarshaller . marshall ( network . getDestinationIpV6 ( ) , DESTINATIONIPV6_BINDING ) ; protocolMarshaller . marshall ( network . getDestinationPort ( ) , DESTINATIONPORT_BINDING ) ; protocolMarshaller . marshall ( network . getDestinationDomain ( ) , DESTINATIONDOMAIN_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class DomainAccessFactory { /** * Create a domain accessor which works with a generic domain model . * @ param dbAccess the graph database connection * @ param domainName * @ param domainLabelUse - - < b > Note : < / b > Consistency may be corrupted , if you change domainLabelUse * on different creations of DomainAccess to the same domain . * @ return */ public static IGenericDomainAccess createGenericDomainAccess ( IDBAccess dbAccess , String domainName , DomainLabelUse domainLabelUse ) { } }
return IDomainAccessFactory . INSTANCE . createGenericDomainAccess ( dbAccess , domainName , domainLabelUse ) ;
public class Ifc2x3tc1PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public EClass getIfcPointOrVertexPoint ( ) { } }
if ( ifcPointOrVertexPointEClass == null ) { ifcPointOrVertexPointEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc2x3tc1Package . eNS_URI ) . getEClassifiers ( ) . get ( 966 ) ; } return ifcPointOrVertexPointEClass ;
public class MapWithProtoValuesSubject { /** * Specifies that extra repeated field elements for these explicitly specified top - level field * numbers should be ignored . Sub - fields must be specified explicitly ( via { @ link * FieldDescriptor } ) if their extra elements are to be ignored as well . * < p > Use { @ link # ignoringExtraRepeatedFieldElementsForValues ( ) } instead to ignore these for all * fields . * @ see # ignoringExtraRepeatedFieldElementsForValues ( ) for details . */ public MapWithProtoValuesFluentAssertion < M > ignoringExtraRepeatedFieldElementsOfFieldsForValues ( Iterable < Integer > fieldNumbers ) { } }
return usingConfig ( config . ignoringExtraRepeatedFieldElementsOfFields ( fieldNumbers ) ) ;
public class MultiChoiceListPreference { /** * Adds a new value to the preference . By adding a value , the changes will be persisted . * @ param value * The value , which should be added , as a { @ link String } . The value may not be null */ public final void addValue ( @ NonNull final String value ) { } }
Condition . INSTANCE . ensureNotNull ( value , "The value may not be null" ) ; if ( this . values != null ) { if ( this . values . add ( value ) ) { if ( persistSet ( this . values ) ) { notifyChanged ( ) ; } } } else { Set < String > newValues = new HashSet < > ( ) ; newValues . add ( value ) ; setValues ( newValues ) ; }
public class VelocityUtil { /** * 将Request中的数据转换为模板引擎 < br > * 取值包括Session和Request * @ param context 内容 * @ param request 请求对象 * @ return VelocityContext */ public static VelocityContext parseRequest ( VelocityContext context , javax . servlet . http . HttpServletRequest request ) { } }
final Enumeration < String > attrs = request . getAttributeNames ( ) ; if ( attrs != null ) { String attrName = null ; while ( attrs . hasMoreElements ( ) ) { attrName = attrs . nextElement ( ) ; context . put ( attrName , request . getAttribute ( attrName ) ) ; } } return context ;
public class SqlInjectionMatchSetUpdateMarshaller { /** * Marshall the given parameter object . */ public void marshall ( SqlInjectionMatchSetUpdate sqlInjectionMatchSetUpdate , ProtocolMarshaller protocolMarshaller ) { } }
if ( sqlInjectionMatchSetUpdate == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( sqlInjectionMatchSetUpdate . getAction ( ) , ACTION_BINDING ) ; protocolMarshaller . marshall ( sqlInjectionMatchSetUpdate . getSqlInjectionMatchTuple ( ) , SQLINJECTIONMATCHTUPLE_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class DataManager { /** * Populates basic application / device data if app is running for the first time . */ private void onetimeDeviceSetup ( Context context ) { } }
if ( TextUtils . isEmpty ( deviceDAO . device ( ) . getDeviceId ( ) ) ) { deviceDAO . setDeviceId ( DeviceHelper . generateDeviceId ( context ) ) ; try { deviceDAO . setInstanceId ( FirebaseInstanceId . getInstance ( ) . getId ( ) ) ; } catch ( IllegalStateException e ) { deviceDAO . setInstanceId ( "empty" ) ; } deviceDAO . setAppVer ( DeviceHelper . getAppVersion ( context ) ) ; }
public class PersistInterfaceService { /** * If the context locking mode is activated , this method releases the lock for the given context for writing * operations . */ private void releaseContext ( String contextId ) { } }
if ( mode == ContextLockingMode . DEACTIVATED ) { return ; } synchronized ( activeWritingContexts ) { activeWritingContexts . remove ( contextId ) ; }
public class VerificationConditionGenerator { /** * Translate a break statement . This takes the current context and pushes it * into the enclosing loop scope . It will then be extracted later and used . * @ param stmt * @ param wyalFile */ private Context translateBreak ( WyilFile . Stmt . Break stmt , Context context ) { } }
LoopScope enclosingLoop = context . getEnclosingLoopScope ( ) ; enclosingLoop . addBreakContext ( context ) ; return null ;
public class FastMath { /** * Get the largest whole number smaller than x . * @ param x number from which floor is requested * @ return a double number f such that f is an integer f < = x < f + 1.0 */ public static double floor ( double x ) { } }
long y ; if ( x != x ) { // NaN return x ; } if ( x >= TWO_POWER_52 || x <= - TWO_POWER_52 ) { return x ; } y = ( long ) x ; if ( x < 0 && y != x ) { y -- ; } if ( y == 0 ) { return x * y ; } return y ;
public class WebsocketClientTransport { /** * Creates a new instance connecting to localhost * @ param port the port to connect to * @ return a new instance */ public static WebsocketClientTransport create ( int port ) { } }
TcpClient client = TcpClient . create ( ) . port ( port ) ; return create ( client ) ;
public class IzouSoundSourceDataLine { /** * Writes audio data to the mixer via this source data line . The requested * number of bytes of data are read from the specified array , * starting at the given offset into the array , and written to the data * line ' s buffer . If the caller attempts to write more data than can * currently be written ( see < code > DataLine # available available < / code > ) , * this method blocks until the requested amount of data has been written . * This applies even if the requested amount of data to write is greater * than the data line ' s buffer size . However , if the data line is closed , * stopped , or flushed before the requested amount has been written , * the method no longer blocks , but returns the number of bytes * written thus far . * The number of bytes that can be written without blocking can be ascertained * using the < code > DataLine # available available < / code > method of the * < code > DataLine < / code > interface . ( While it is guaranteed that * this number of bytes can be written without blocking , there is no guarantee * that attempts to write additional data will block . ) * The number of bytes to write must represent an integral number of * sample frames , such that : * < br > * < center > < code > [ bytes written ] % [ frame size in bytes ] = = 0 < / code > < / center > * < br > * The return value will always meet this requirement . A request to write a * number of bytes representing a non - integral number of sample frames cannot * be fulfilled and may result in an < code > IllegalArgumentException < / code > . * @ param b a byte array containing data to be written to the data line * @ param off the offset from the beginning of the array , in bytes * @ param len the length , in bytes , of the valid data in the array * ( in other words , the requested amount of data to write , in bytes ) * @ return the number of bytes actually written * @ throws IllegalArgumentException if the requested number of bytes does * not represent an integral number of sample frames , * or if < code > len < / code > is negative * @ throws ArrayIndexOutOfBoundsException if < code > off < / code > is negative , * or < code > off + len < / code > is greater than the length of the array * < code > b < / code > . */ @ Override public int write ( byte [ ] b , int off , int len ) { } }
if ( isMutable ) { return sourceDataLine . write ( b , off , len ) ; } else { if ( isMutedFromSystem ) { byte [ ] newArr = new byte [ b . length ] ; return sourceDataLine . write ( newArr , off , len ) ; } else { return sourceDataLine . write ( b , off , len ) ; } }
public class AmazonElastiCacheClient { /** * Creates a Redis ( cluster mode disabled ) or a Redis ( cluster mode enabled ) replication group . * A Redis ( cluster mode disabled ) replication group is a collection of clusters , where one of the clusters is a * read / write primary and the others are read - only replicas . Writes to the primary are asynchronously propagated to * the replicas . * A Redis ( cluster mode enabled ) replication group is a collection of 1 to 15 node groups ( shards ) . Each node group * ( shard ) has one read / write primary node and up to 5 read - only replica nodes . Writes to the primary are * asynchronously propagated to the replicas . Redis ( cluster mode enabled ) replication groups partition the data * across node groups ( shards ) . * When a Redis ( cluster mode disabled ) replication group has been successfully created , you can add one or more * read replicas to it , up to a total of 5 read replicas . You cannot alter a Redis ( cluster mode enabled ) * replication group after it has been created . However , if you need to increase or decrease the number of node * groups ( console : shards ) , you can avail yourself of ElastiCache for Redis ' enhanced backup and restore . For more * information , see < a * href = " http : / / docs . aws . amazon . com / AmazonElastiCache / latest / red - ug / backups - restoring . html " > Restoring From a Backup * with Cluster Resizing < / a > in the < i > ElastiCache User Guide < / i > . * < note > * This operation is valid for Redis only . * < / note > * @ param createReplicationGroupRequest * Represents the input of a < code > CreateReplicationGroup < / code > operation . * @ return Result of the CreateReplicationGroup operation returned by the service . * @ throws CacheClusterNotFoundException * The requested cluster ID does not refer to an existing cluster . * @ throws InvalidCacheClusterStateException * The requested cluster is not in the < code > available < / code > state . * @ throws ReplicationGroupAlreadyExistsException * The specified replication group already exists . * @ throws InsufficientCacheClusterCapacityException * The requested cache node type is not available in the specified Availability Zone . * @ throws CacheSecurityGroupNotFoundException * The requested cache security group name does not refer to an existing cache security group . * @ throws CacheSubnetGroupNotFoundException * The requested cache subnet group name does not refer to an existing cache subnet group . * @ throws ClusterQuotaForCustomerExceededException * The request cannot be processed because it would exceed the allowed number of clusters per customer . * @ throws NodeQuotaForClusterExceededException * The request cannot be processed because it would exceed the allowed number of cache nodes in a single * cluster . * @ throws NodeQuotaForCustomerExceededException * The request cannot be processed because it would exceed the allowed number of cache nodes per customer . * @ throws CacheParameterGroupNotFoundException * The requested cache parameter group name does not refer to an existing cache parameter group . * @ throws InvalidVPCNetworkStateException * The VPC network is in an invalid state . * @ throws TagQuotaPerResourceExceededException * The request cannot be processed because it would cause the resource to have more than the allowed number * of tags . The maximum number of tags permitted on a resource is 50. * @ throws NodeGroupsPerReplicationGroupQuotaExceededException * The request cannot be processed because it would exceed the maximum allowed number of node groups * ( shards ) in a single replication group . The default maximum is 15 * @ throws InvalidParameterValueException * The value for a parameter is invalid . * @ throws InvalidParameterCombinationException * Two or more incompatible parameters were specified . * @ sample AmazonElastiCache . CreateReplicationGroup * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / elasticache - 2015-02-02 / CreateReplicationGroup " * target = " _ top " > AWS API Documentation < / a > */ @ Override public ReplicationGroup createReplicationGroup ( CreateReplicationGroupRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeCreateReplicationGroup ( request ) ;
public class XMLTokener { /** * Get the text in the CDATA block . * @ return The string up to the < code > ] ] & gt ; < / code > . * @ throws JSONException If the < code > ] ] & gt ; < / code > is not found . */ public String nextCDATA ( ) throws JSONException { } }
char c ; int i ; StringBuffer sb = new StringBuffer ( ) ; for ( ; ; ) { c = next ( ) ; if ( c == 0 ) { throw syntaxError ( "Unclosed CDATA" ) ; } sb . append ( c ) ; i = sb . length ( ) - 3 ; if ( i >= 0 && sb . charAt ( i ) == ']' && sb . charAt ( i + 1 ) == ']' && sb . charAt ( i + 2 ) == '>' ) { sb . setLength ( i ) ; return sb . toString ( ) ; } }
public class ChannelImpl { /** * Called by Peer when we have been hungup . This can happen when Peer * receives a HangupEvent or during a periodic sweep done by PeerMonitor to * find the status of all channels . Notify any listeners that this channel * has been hung up . */ @ Override public void notifyHangupListeners ( Integer cause , String causeText ) { } }
this . _isLive = false ; if ( this . hangupListener != null ) { this . hangupListener . channelHangup ( this , cause , causeText ) ; } else { logger . warn ( "Hangup listener is null" ) ; }
public class Date { /** * setter for day - sets day of the month , C * @ generated * @ param v value to set into the feature */ public void setDay ( int v ) { } }
if ( Date_Type . featOkTst && ( ( Date_Type ) jcasType ) . casFeat_day == null ) jcasType . jcas . throwFeatMissing ( "day" , "de.julielab.jules.types.Date" ) ; jcasType . ll_cas . ll_setIntValue ( addr , ( ( Date_Type ) jcasType ) . casFeatCode_day , v ) ;
public class Utils { /** * Convenience method for creating a new { @ link ImmutableSet } with concatenated iterable . */ @ NonNull public static < E > ImmutableSet < E > concatSet ( @ NonNull ImmutableSet < E > set , @ NonNull Iterable < E > toConcat ) { } }
return ImmutableSet . < E > builder ( ) . addAll ( set ) . addAll ( toConcat ) . build ( ) ;
public class StrictnessSelector { /** * Determines the actual strictness in the following importance order : * 1st - strictness configured when declaring stubbing ; * 2nd - strictness configured at mock level ; * 3rd - strictness configured at test level ( rule , mockito session ) * @ param stubbing stubbing to check for strictness . Null permitted . * @ param mockSettings settings of the mock object , may or may not have strictness configured . Must not be null . * @ param testLevelStrictness strictness configured using the test - level configuration ( rule , mockito session ) . Null permitted . * @ return actual strictness , can be null . */ public static Strictness determineStrictness ( Stubbing stubbing , MockCreationSettings mockSettings , Strictness testLevelStrictness ) { } }
if ( stubbing != null && stubbing . getStrictness ( ) != null ) { return stubbing . getStrictness ( ) ; } if ( mockSettings . isLenient ( ) ) { return Strictness . LENIENT ; } return testLevelStrictness ;
public class DescribeTransitGatewaysRequest { /** * This method is intended for internal use only . Returns the marshaled request configured with additional * parameters to enable operation dry - run . */ @ Override public Request < DescribeTransitGatewaysRequest > getDryRunRequest ( ) { } }
Request < DescribeTransitGatewaysRequest > request = new DescribeTransitGatewaysRequestMarshaller ( ) . marshall ( this ) ; request . addParameter ( "DryRun" , Boolean . toString ( true ) ) ; return request ;
public class ServerStateMachine { /** * Applies a configuration entry to the internal state machine . * Configuration entries are applied to internal server state when written to the log . Thus , no significant * logic needs to take place in the handling of configuration entries . We simply release the previous configuration * entry since it was overwritten by a more recent committed configuration entry . */ private CompletableFuture < Void > apply ( ConfigurationEntry entry ) { } }
// Clean the configuration entry from the log . The entry will be retained until it has been stored // on all servers . log . release ( entry . getIndex ( ) ) ; return CompletableFuture . completedFuture ( null ) ;
public class MarcValueTransformers { /** * Transform value . * @ param field the MARC field where values are transformed * @ return a new MARC field with transformed values */ public MarcField transformValue ( MarcField field ) { } }
String key = field . toTagIndicatorKey ( ) ; if ( marcValueTransformerMap . isEmpty ( ) ) { return field ; } final MarcValueTransformer transformer = marcValueTransformerMap . containsKey ( key ) ? marcValueTransformerMap . get ( key ) : marcValueTransformerMap . get ( DEFAULT ) ; if ( transformer != null ) { MarcField . Builder builder = MarcField . builder ( ) ; builder . tag ( field . getTag ( ) ) . indicator ( field . getIndicator ( ) ) ; if ( field . getValue ( ) != null ) { builder . value ( transformer . transform ( field . getValue ( ) ) ) ; } // select only subfields configured for this tag String subs = subfieldMap . containsKey ( key ) ? subfieldMap . get ( key ) : field . getSubfieldIds ( ) ; field . getSubfields ( ) . forEach ( subfield -> builder . subfield ( subfield . getId ( ) , subs . contains ( subfield . getId ( ) ) ? transformer . transform ( subfield . getValue ( ) ) : subfield . getValue ( ) ) ) ; return builder . build ( ) ; } return field ;
public class KeyVaultClientCustomImpl { /** * Gets information about a specified certificate . * @ param vaultBaseUrl * The vault name , e . g . https : / / myvault . vault . azure . net * @ param certificateName * The name of the certificate in the given vault * @ param serviceCallback * the async ServiceCallback to handle successful and failed * responses . * @ return the { @ link ServiceFuture } object */ public ServiceFuture < CertificateBundle > getCertificateAsync ( String vaultBaseUrl , String certificateName , final ServiceCallback < CertificateBundle > serviceCallback ) { } }
return getCertificateAsync ( vaultBaseUrl , certificateName , "" , serviceCallback ) ;
public class OperatorTopologyImpl { /** * Only refreshes the effective topology with deletion msgs from . * deletionDeltas queue * @ throws ParentDeadException */ private void refreshEffectiveTopology ( ) throws ParentDeadException { } }
LOG . entering ( "OperatorTopologyImpl" , "refreshEffectiveTopology" , getQualifiedName ( ) ) ; LOG . finest ( getQualifiedName ( ) + "Waiting to acquire topoLock" ) ; synchronized ( topologyLock ) { LOG . finest ( getQualifiedName ( ) + "Acquired topoLock" ) ; assert effectiveTopology != null ; final Set < GroupCommunicationMessage > deletionDeltasSet = new HashSet < > ( ) ; copyDeletionDeltas ( deletionDeltasSet ) ; LOG . finest ( getQualifiedName ( ) + "Updating effective topology struct with deletion msgs" ) ; effectiveTopology . update ( deletionDeltasSet ) ; LOG . finest ( getQualifiedName ( ) + "Released topoLock" ) ; } LOG . exiting ( "OperatorTopologyImpl" , "refreshEffectiveTopology" , getQualifiedName ( ) ) ;
public class RequestAttributeSourceFilter { /** * Add request headers to the attributes map * @ param httpServletRequest Http Servlet Request * @ param attributes Map of attributes to add additional attributes to from the Http Request */ protected void addRequestHeaders ( final HttpServletRequest httpServletRequest , final Map < String , List < Object > > attributes ) { } }
for ( final Map . Entry < String , Set < String > > headerAttributeEntry : this . headerAttributeMapping . entrySet ( ) ) { final String headerName = headerAttributeEntry . getKey ( ) ; final String value = httpServletRequest . getHeader ( headerName ) ; if ( value != null ) { for ( final String attributeName : headerAttributeEntry . getValue ( ) ) { attributes . put ( attributeName , headersToIgnoreSemicolons . contains ( headerName ) ? list ( value ) : splitOnSemiColonHandlingBackslashEscaping ( value ) ) ; } } }
public class GraphHelper { /** * For the given type , finds an unique attribute and checks if there is an existing instance with the same * unique value * @ param classType * @ param instance * @ return * @ throws AtlasException */ public AtlasVertex getVertexForInstanceByUniqueAttribute ( ClassType classType , IReferenceableInstance instance ) throws AtlasException { } }
if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Checking if there is an instance with the same unique attributes for instance {}" , instance . toShortString ( ) ) ; } AtlasVertex result = null ; for ( AttributeInfo attributeInfo : classType . fieldMapping ( ) . fields . values ( ) ) { if ( attributeInfo . isUnique ) { String propertyKey = getQualifiedFieldName ( classType , attributeInfo . name ) ; try { result = findVertex ( propertyKey , instance . get ( attributeInfo . name ) , Constants . ENTITY_TYPE_PROPERTY_KEY , classType . getName ( ) , Constants . STATE_PROPERTY_KEY , Id . EntityState . ACTIVE . name ( ) ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Found vertex by unique attribute : {}={}" , propertyKey , instance . get ( attributeInfo . name ) ) ; } } catch ( EntityNotFoundException e ) { // Its ok if there is no entity with the same unique value } } } return result ;
public class SQLiteDatabase { /** * Query the given URL , returning a { @ link Cursor } over the result set . * @ param distinct true if you want each row to be unique , false otherwise . * @ param table The table name to compile the query against . * @ param columns A list of which columns to return . Passing null will * return all columns , which is discouraged to prevent reading * data from storage that isn ' t going to be used . * @ param selection A filter declaring which rows to return , formatted as an * SQL WHERE clause ( excluding the WHERE itself ) . Passing null * will return all rows for the given table . * @ param selectionArgs You may include ? s in selection , which will be * replaced by the values from selectionArgs , in order that they * appear in the selection . The values will be bound as Strings . * @ param groupBy A filter declaring how to group rows , formatted as an SQL * GROUP BY clause ( excluding the GROUP BY itself ) . Passing null * will cause the rows to not be grouped . * @ param having A filter declare which row groups to include in the cursor , * if row grouping is being used , formatted as an SQL HAVING * clause ( excluding the HAVING itself ) . Passing null will cause * all row groups to be included , and is required when row * grouping is not being used . * @ param orderBy How to order the rows , formatted as an SQL ORDER BY clause * ( excluding the ORDER BY itself ) . Passing null will use the * default sort order , which may be unordered . * @ param limit Limits the number of rows returned by the query , * formatted as LIMIT clause . Passing null denotes no LIMIT clause . * @ return A { @ link Cursor } object , which is positioned before the first entry . Note that * { @ link Cursor } s are not synchronized , see the documentation for more details . * @ see Cursor */ public Cursor query ( boolean distinct , String table , String [ ] columns , String selection , String [ ] selectionArgs , String groupBy , String having , String orderBy , String limit ) { } }
return queryWithFactory ( null , distinct , table , columns , selection , selectionArgs , groupBy , having , orderBy , limit , null ) ;
public class EventBus { /** * Bind a { @ link ActEventListener eventListener } to an event type extended * from { @ link EventObject } synchronously . * @ param eventType * the target event type - should be a sub class of { @ link EventObject } * @ param eventListener * the listener - an instance of { @ link ActEventListener } or it ' s sub class * @ return this event bus instance * @ see # bind ( Class , ActEventListener ) */ public EventBus bindSync ( Class < ? extends EventObject > eventType , ActEventListener eventListener ) { } }
return _bind ( actEventListeners , eventType , eventListener , 0 ) ;
public class DatePartitionHiveVersionFinder { /** * Create a { @ link TimestampedHiveDatasetVersion } from a { @ link Partition } . The hive table is expected * to be date partitioned by { @ link # partitionKeyName } . The partition value format must be { @ link # pattern } * @ throws IllegalArgumentException when { @ link # partitionKeyName } is not found in the < code > < / code > * @ throws IllegalArgumentException when a value can not be found for { @ link # partitionKeyName } in the < code > partition < / code > * @ throws IllegalArgumentException if the partition value can not be parsed with { @ link # pattern } * { @ inheritDoc } */ @ Override protected TimestampedHiveDatasetVersion getDatasetVersion ( Partition partition ) { } }
int index = Iterables . indexOf ( partition . getTable ( ) . getPartitionKeys ( ) , this . partitionKeyNamePredicate ) ; if ( index == - 1 ) { throw new IllegalArgumentException ( String . format ( "Failed to find partition key %s in the table %s" , this . partitionKeyName , partition . getTable ( ) . getCompleteName ( ) ) ) ; } if ( index >= partition . getValues ( ) . size ( ) ) { throw new IllegalArgumentException ( String . format ( "Failed to find partition value for key %s in the partition %s" , this . partitionKeyName , partition . getName ( ) ) ) ; } return new TimestampedHiveDatasetVersion ( this . formatter . parseDateTime ( partition . getValues ( ) . get ( index ) . trim ( ) . substring ( 0 , this . pattern . length ( ) ) ) , partition ) ;
public class GaliosFieldOps { /** * Implementation of multiplication with a primitive polynomial . The result will be a member of the same field * as the inputs , provided primitive is an appropriate irreducible polynomial for that field . * Uses ' Russian Peasant Multiplication ' that should be a faster algorithm . * @ param x polynomial * @ param y polynomial * @ param primitive Primitive polynomial which is irreducible . * @ param domain Value of a the largest possible value plus 1 . E . g . GF ( 2 * * 8 ) would be 256 * @ return result polynomial */ public static int multiply ( int x , int y , int primitive , int domain ) { } }
int r = 0 ; while ( y > 0 ) { if ( ( y & 1 ) != 0 ) { r = r ^ x ; } y = y >> 1 ; x = x << 1 ; if ( x >= domain ) { x ^= primitive ; } } return r ;
public class SignOutUserRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( SignOutUserRequest signOutUserRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( signOutUserRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( signOutUserRequest . getFleetArn ( ) , FLEETARN_BINDING ) ; protocolMarshaller . marshall ( signOutUserRequest . getUsername ( ) , USERNAME_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class TrieIterator { /** * Internal block value calculations * Performs calculations on a data block to find codepoints in m _ nextBlock _ * after the index m _ nextBlockIndex _ that has the same value . * Note m _ * _ variables at this point is the next codepoint whose value * has not been calculated . * But when returned with false , it will be the last codepoint whose * value has been calculated . * @ param currentValue the value which other codepoints are tested against * @ return true if the whole block has the same value as currentValue or if * the whole block has been calculated , false otherwise . */ private final boolean checkBlockDetail ( int currentValue ) { } }
while ( m_nextBlockIndex_ < DATA_BLOCK_LENGTH_ ) { m_nextValue_ = extract ( m_trie_ . getValue ( m_nextBlock_ + m_nextBlockIndex_ ) ) ; if ( m_nextValue_ != currentValue ) { return false ; } ++ m_nextBlockIndex_ ; ++ m_nextCodepoint_ ; } return true ;
public class SQLExpressions { /** * Start a window function expression * @ param expr expression * @ return max ( expr ) */ public static < T extends Comparable > WindowOver < T > max ( Expression < T > expr ) { } }
return new WindowOver < T > ( expr . getType ( ) , Ops . AggOps . MAX_AGG , expr ) ;
public class GDLLoader { /** * Creates a cache containing a mapping from variables to query elements . The cache is filled * with elements from the user cache and / or the auto cache depending on the specified flags . * @ param userCache element user cache * @ param autoCache element auto cache * @ param includeUserDefined true , iff user cache elements shall be included * @ param includeAutoGenerated true , iff auto cache elements shall be included * @ param < T > query element type * @ return immutable cache */ private < T > Map < String , T > getCache ( Map < String , T > userCache , Map < String , T > autoCache , boolean includeUserDefined , boolean includeAutoGenerated ) { } }
Map < String , T > cache = new HashMap < > ( ) ; if ( includeUserDefined ) { cache . putAll ( userCache ) ; } if ( includeAutoGenerated ) { cache . putAll ( autoCache ) ; } return Collections . unmodifiableMap ( cache ) ;
public class SibRaCommonEndpointActivation { /** * This method will try to connect to an ME using the supplied target properties * @ param targetType - The type of target * @ param targetSignificance - Target significane ( preferred or required ) * @ param target - The name of the target * @ throws ResourceException */ public void connectUsingTrmWithTargetData ( String targetType , String targetSignificance , String target ) throws ResourceException { } }
final String methodName = "connectUsingTrmWithTargetData" ; if ( TraceComponent . isAnyTracingEnabled ( ) && TRACE . isEntryEnabled ( ) ) { SibTr . entry ( this , TRACE , methodName , new Object [ ] { targetType , targetSignificance , target } ) ; } synchronized ( _connections ) { // At this point in the code path we can be sure that : // 1 . There are no local preferred ME ' s ( this method would not be called if there were ) // 2 . Target data has been set ( we are in the else block of the " if ( target = = null ) " block // Since we have been passed target data it means we are attempting to create a connection // to a preferred ( or required ) ME ( no target data is ever passed if we are trying to create // a connection to a non preferred ME ) . With this in mind , if we are currently connected to // a non preferred ME ( _ connectToPreferred is false ) or we do not currently have a connection // then we ' ll try and create a connection that matches our target data . If a connection // is created then we should close any non preferred connections ( if they are any ) . if ( ( ! _connectedToPreferred ) || ( _connections . size ( ) == 0 ) ) { // Set to required ( even if user opted for preferred ) // Pass targetType and target from user data . SibRaMessagingEngineConnection newConnection = null ; try { newConnection = new SibRaMessagingEngineConnection ( this , _endpointConfiguration . getBusName ( ) , targetType , SibTrmConstants . TARGET_SIGNIFICANCE_REQUIRED , target ) ; dropNonPreferredConnections ( ) ; if ( newConnection . getConnection ( ) != null ) { _connections . put ( newConnection . getConnection ( ) . getMeUuid ( ) , newConnection ) ; createListener ( newConnection ) ; } if ( _connections . size ( ) > 0 ) { _connectedRemotely = checkIfRemote ( _connections . values ( ) . iterator ( ) . next ( ) ) ; _connectedToPreferred = true ; if ( TraceComponent . isAnyTracingEnabled ( ) && TRACE . isDebugEnabled ( ) ) { SibTr . debug ( TRACE , "We have connect <remote=" + _connectedRemotely + " > <preferred=" + _connectedToPreferred + ">" ) ; } } } catch ( final SIResourceException exception ) { // No FFDC code needed // We are potentially connecting remotely so this error may be transient // Possibly the remote ME is not available SibTr . warning ( TRACE , SibTr . Suppressor . ALL_FOR_A_WHILE , "TARGETTED_CONNECTION_FAILED_CWSIV0787" , new Object [ ] { targetType , targetSignificance , target , _endpointConfiguration . getBusName ( ) } ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && TRACE . isDebugEnabled ( ) ) { SibTr . debug ( TRACE , "Failed to obtain a connection - retry after a set interval" ) ; } } catch ( final SIException exception ) { FFDCFilter . processException ( exception , CLASS_NAME + "." + methodName , "1:711:1.45" , this ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && TRACE . isEventEnabled ( ) ) { SibTr . exception ( this , TRACE , exception ) ; } throw new ResourceException ( NLS . getFormattedMessage ( "CONNECT_FAILED_CWSIV0782" , new Object [ ] { _endpointConfiguration . getDestination ( ) . getDestinationName ( ) , _endpointConfiguration . getBusName ( ) , this , exception } , null ) , exception ) ; } } } // Sync block // Failed to get a preferred one , try for any ME next . // Also if we have connected to a remote non preferred ME we may wish to try again for a local // non preferred ME ( We know there is not a local preferred ME as we would not be in this // method if there were ) . if ( ( ( _connections . size ( ) == 0 ) || ( _connectedRemotely && ! _connectedToPreferred && _destinationStrategy . isDropRemoteNonPreferredForLocalNonPreferred ( ) ) ) && ( targetSignificance . equals ( SibTrmConstants . TARGET_SIGNIFICANCE_PREFERRED ) ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && TRACE . isDebugEnabled ( ) ) { SibTr . debug ( TRACE , "Could not obtain the preferred connection - try again without any target preferences" ) ; } // For durable pub sub there are no local preferred MEs , for point to point and non durable pub // sub then they all count as preferred connect ( _destinationStrategy . isDurablePubsSub ( ) ? null : getMEsToCheck ( ) , null , null , null , false ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && TRACE . isEntryEnabled ( ) ) { SibTr . exit ( TRACE , methodName ) ; }
public class QueueContainer { /** * Tries to obtain an item by removing the head of the * queue or removing an item previously reserved by invoking * { @ link # txnOfferReserve ( String ) } with { @ code reservedOfferId } . * If the queue item does not have data in - memory it will load the * data from the queue store if the queue store is configured and enabled . * @ param reservedOfferId the ID of the reserved item to be returned if the queue is empty * @ param transactionId the transaction ID for which this poll is invoked * @ return the head of the queue or a reserved item with the { @ code reservedOfferId } if there is any */ public QueueItem txnPollReserve ( long reservedOfferId , String transactionId ) { } }
QueueItem item = getItemQueue ( ) . peek ( ) ; if ( item == null ) { TxQueueItem txItem = txMap . remove ( reservedOfferId ) ; if ( txItem == null ) { return null ; } item = new QueueItem ( this , txItem . getItemId ( ) , txItem . getData ( ) ) ; return item ; } if ( store . isEnabled ( ) && item . getData ( ) == null ) { try { load ( item ) ; } catch ( Exception e ) { throw new HazelcastException ( e ) ; } } getItemQueue ( ) . poll ( ) ; txMap . put ( item . getItemId ( ) , new TxQueueItem ( item ) . setPollOperation ( true ) . setTransactionId ( transactionId ) ) ; return item ;
public class LookaheadChainingListener { /** * { @ inheritDoc } * @ since 2.0RC1 */ @ Override public void beginDefinitionList ( Map < String , String > parameters ) { } }
this . previousEvents . beginDefinitionList ( parameters ) ; firePreviousEvent ( ) ;
public class DebuggableScheduledThreadPoolExecutor { /** * We need this as well as the wrapper for the benefit of non - repeating tasks */ @ Override public void afterExecute ( Runnable r , Throwable t ) { } }
super . afterExecute ( r , t ) ; DebuggableThreadPoolExecutor . logExceptionsAfterExecute ( r , t ) ;
public class AfplibFactoryImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public String convertBeginSegmentCommandFLAG2ToString ( EDataType eDataType , Object instanceValue ) { } }
return instanceValue == null ? null : instanceValue . toString ( ) ;
public class FieldDefinitionBuilder { /** * Registers CronField in ParserDefinitionBuilder and returns its instance . * @ return ParserDefinitionBuilder instance obtained from constructor */ public CronDefinitionBuilder and ( ) { } }
cronDefinitionBuilder . register ( new FieldDefinition ( fieldName , constraints . createConstraintsInstance ( ) , optional ) ) ; return cronDefinitionBuilder ;
public class StringTextTemplate { /** * { @ inheritDoc } */ @ Override public TextTemplate interpolate ( final Map < String , ? > variables ) { } }
if ( variables != null ) { final String result = new MapVariableInterpolator ( buffer . toString ( ) , variables ) . toString ( ) ; buffer . delete ( 0 , buffer . length ( ) ) ; buffer . append ( result ) ; } return this ;
public class GZipUtils { /** * 文件压缩 * @ param path * @ param delete 是否删除原始文件 * @ throws Exception */ public static void compress ( String path , boolean delete ) throws Exception { } }
File file = new File ( path ) ; compress ( file , delete ) ;
public class CmsExplorerTypeSettings { /** * Sets if the title property should automatically be added on resource creation . < p > * @ param autoSetTitle true if title should be added , otherwise false */ public void setAutoSetTitle ( String autoSetTitle ) { } }
m_autoSetTitle = Boolean . valueOf ( autoSetTitle ) . booleanValue ( ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( Messages . get ( ) . getBundle ( ) . key ( Messages . LOG_SET_AUTO_TITLE_1 , autoSetTitle ) ) ; }
public class InternalService { /** * Returns observable to add a participant to . * @ param conversationId ID of a conversation to add a participant to . * @ return Observable to get a list of conversation participants . */ public Observable < ComapiResult < List < Participant > > > getParticipants ( @ NonNull final String conversationId ) { } }
final String token = getToken ( ) ; if ( sessionController . isCreatingSession ( ) ) { return getTaskQueue ( ) . queueGetParticipants ( conversationId ) ; } else if ( TextUtils . isEmpty ( token ) ) { return Observable . error ( getSessionStateErrorDescription ( ) ) ; } else { return doGetParticipants ( token , conversationId ) ; }
public class CancellationTokenSource { /** * Cancels the token if it has not already been cancelled . */ public void cancel ( ) { } }
List < CancellationTokenRegistration > registrations ; synchronized ( lock ) { throwIfClosed ( ) ; if ( cancellationRequested ) { return ; } cancelScheduledCancellation ( ) ; cancellationRequested = true ; registrations = new ArrayList < > ( this . registrations ) ; } notifyListeners ( registrations ) ;
public class MerkleTreeConfig { /** * Sets the depth of the merkle tree . The depth must be between * { @ value MAX _ DEPTH } and { @ value MIN _ DEPTH } ( exclusive ) . * @ param depth the depth of the merkle tree * @ return the updated config * @ throws ConfigurationException if the { @ code depth } is greater than * { @ value MAX _ DEPTH } or less than { @ value MIN _ DEPTH } */ public MerkleTreeConfig setDepth ( int depth ) { } }
if ( depth < MIN_DEPTH || depth > MAX_DEPTH ) { throw new IllegalArgumentException ( "Merkle tree depth " + depth + " is outside of the allowed range " + MIN_DEPTH + "-" + MAX_DEPTH + ". " ) ; } this . depth = depth ; return this ;
public class PreferenceActivity { /** * Sets the width of the navigation , when using the split screen layout . * @ param width * The width , which should be set , in pixels as an { @ link Integer } value . The width must * be greater than 0 */ public final void setNavigationWidth ( @ Px final int width ) { } }
Condition . INSTANCE . ensureGreater ( width , 0 , "The width must be greater than 0" ) ; this . navigationWidth = width ; adaptNavigationWidth ( ) ;
public class NodeIndexer { /** * Returns < code > true < / code > if the content of the property with the given * name should the used to create an excerpt . * @ param propertyName the name of a property . * @ return < code > true < / code > if it should be used to create an excerpt ; * < code > false < / code > otherwise . */ protected boolean useInExcerpt ( InternalQName propertyName ) { } }
if ( indexingConfig == null ) { return true ; } else { return indexingConfig . useInExcerpt ( node , propertyName ) ; }
public class SAMFileMerger { /** * Merge part file shards produced by { @ link KeyIgnoringAnySAMOutputFormat } into a * single file with the given header . * @ param partDirectory the directory containing part files * @ param outputFile the file to write the merged file to * @ param samOutputFormat the format ( must be BAM or CRAM ; SAM is not supported ) * @ param header the header for the merged file * @ throws IOException */ public static void mergeParts ( final String partDirectory , final String outputFile , final SAMFormat samOutputFormat , final SAMFileHeader header ) throws IOException { } }
// First , check for the _ SUCCESS file . final Path partPath = asPath ( partDirectory ) ; final Path successPath = partPath . resolve ( "_SUCCESS" ) ; if ( ! Files . exists ( successPath ) ) { throw new NoSuchFileException ( successPath . toString ( ) , null , "Unable to find _SUCCESS file" ) ; } final Path outputPath = asPath ( outputFile ) ; if ( partPath . equals ( outputPath ) ) { throw new IllegalArgumentException ( "Cannot merge parts into output with same " + "path: " + partPath ) ; } List < Path > parts = getFilesMatching ( partPath , NIOFileUtil . PARTS_GLOB , SplittingBAMIndexer . OUTPUT_FILE_EXTENSION ) ; if ( parts . isEmpty ( ) ) { throw new IllegalArgumentException ( "Could not write bam file because no part " + "files were found in " + partPath ) ; } Files . deleteIfExists ( outputPath ) ; long headerLength ; try ( final CountingOutputStream out = new CountingOutputStream ( Files . newOutputStream ( outputPath ) ) ) { if ( header != null ) { new SAMOutputPreparer ( ) . prepareForRecords ( out , samOutputFormat , header ) ; // write the header } headerLength = out . getCount ( ) ; mergeInto ( parts , out ) ; writeTerminatorBlock ( out , samOutputFormat ) ; } long fileLength = Files . size ( outputPath ) ; final Path outputSplittingBaiPath = outputPath . resolveSibling ( outputPath . getFileName ( ) + SplittingBAMIndexer . OUTPUT_FILE_EXTENSION ) ; Files . deleteIfExists ( outputSplittingBaiPath ) ; try ( final OutputStream out = Files . newOutputStream ( outputSplittingBaiPath ) ) { mergeSplittingBaiFiles ( out , partPath , headerLength , fileLength ) ; } catch ( IOException e ) { deleteRecursive ( outputSplittingBaiPath ) ; throw e ; } deleteRecursive ( partPath ) ;
public class Util { /** * Reject { @ code null } , empty , and blank strings with a good exception type and message . */ static String checkStringArgument ( String s , String name ) { } }
checkNotNull ( s , name ) ; checkArgument ( ! s . trim ( ) . isEmpty ( ) , "'" + name + "' must not be blank. Was: '" + s + "'" ) ; return s ;
public class PtoPInputHandler { /** * Method eventPrecommitAdd . * @ param msg * @ param transaction * @ throws SIStoreException * @ throws SIResourceException */ final protected void eventPrecommitAdd ( MessageItem msg , final TransactionCommon transaction ) throws SIDiscriminatorSyntaxException , SIResourceException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "eventPrecommitAdd" , new Object [ ] { msg , transaction } ) ; if ( ! ( _destination . isToBeDeleted ( ) ) ) { if ( msg . isTransacted ( ) && ( ! ( msg . isToBeStoredAtSendTime ( ) ) ) ) { // LockR the destination to prevent reallocation from occurring on the chosen localisation LockManager reallocationLock = _destination . getReallocationLockManager ( ) ; reallocationLock . lock ( ) ; try { // If we fixed the ME it ' ll be in the routing address in the message SIBUuid8 fixedME = null ; JsDestinationAddress routingAddr = msg . getMessage ( ) . getRoutingDestination ( ) ; if ( routingAddr != null ) fixedME = routingAddr . getME ( ) ; // If the sender prefers any local ME over others then we ' ll give it a go SIBUuid8 preferredME = null ; if ( msg . preferLocal ( ) ) { if ( _destination . hasLocal ( ) ) preferredME = _messageProcessor . getMessagingEngineUuid ( ) ; } // 176658.3.5 OutputHandler handler = _destination . choosePtoPOutputHandler ( fixedME , preferredME , ! msg . isFromRemoteME ( ) , msg . isForcePut ( ) , null ) ; if ( handler == null ) { // We can ' t find a suitable localisation . // Although a queue must have at least one localisation this is // possible if the sender restricted the potential localisations // using a fixed ME or a scoping alias ( to an out - of - date set of // localisation ) if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { SibTr . debug ( tc , "No suitable OutputHandler found for " + _destination . getName ( ) + " (" + fixedME + ")" ) ; } // Put the message to the exception destination . handleUndeliverableMessage ( _destination , null // null LinkHandler , msg , SIRCConstants . SIRC0026_NO_LOCALISATIONS_FOUND_ERROR , new String [ ] { _destination . getName ( ) } , transaction ) ; } else { // Indicate in the message if this was a guess msg . setStreamIsGuess ( handler . isWLMGuess ( ) ) ; // put the message to the output handler handler . put ( msg , transaction , null , true ) ; } } finally { // unlock the reallocation lock reallocationLock . unlock ( ) ; } } } else { // The destination has been deleted . Put the message to the exception destination # ExceptionDestinationHandlerImpl exceptionDestinationHandlerImpl = ( ExceptionDestinationHandlerImpl ) _messageProcessor . createExceptionDestinationHandler ( null ) ; // Set indicator to send the message to the exception destination immediately , // rather than registering it for pre - prepare of the transaction , as this is // pre - prepare of the transaction ! msg . setStoreAtSendTime ( true ) ; String destName = _destination . getName ( ) ; if ( _destination . isLink ( ) ) destName = ( ( LinkHandler ) _destination ) . getBusName ( ) ; final UndeliverableReturnCode rc = exceptionDestinationHandlerImpl . handleUndeliverableMessage ( msg , transaction , SIRCConstants . SIRC0032_DESTINATION_DELETED_ERROR , new String [ ] { destName , _messageProcessor . getMessagingEngineName ( ) } ) ; if ( rc != UndeliverableReturnCode . OK ) { if ( rc == UndeliverableReturnCode . DISCARD ) { // The message is to be discarded . Do nothing and it will disappear . } else { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "eventPrecommitAdd" , "WsRuntimeException" ) ; // We cannot put the message to the exception destination . All we can // do in this case is rollback the users transaction . This is done by // throwing an exception that is caught by the transaction manager . throw new WsRuntimeException ( nls . getFormattedMessage ( "DESTINATION_DELETED_ERROR_CWSIP0247" , new Object [ ] { _destination . getName ( ) , rc } , null ) ) ; } } } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "eventPrecommitAdd" ) ;
public class DatasourceConnectionPool { /** * do not change interface , used by argus monitor */ public Map < String , Integer > openConnections ( ) { } }
Map < String , Integer > map = new HashMap < String , Integer > ( ) ; Iterator < DCStack > it = dcs . values ( ) . iterator ( ) ; // all connections in pool DCStack dcstack ; while ( it . hasNext ( ) ) { dcstack = it . next ( ) ; Integer val = map . get ( dcstack . getDatasource ( ) . getName ( ) ) ; if ( val == null ) val = dcstack . openConnections ( ) ; else val = val . intValue ( ) + dcstack . openConnections ( ) ; map . put ( dcstack . getDatasource ( ) . getName ( ) , val ) ; } return map ;
public class EditorUtilities { public static void displayMessageBox ( String strMsg , final int iType , boolean bWrapText ) { } }
final String strWrappedMsg = bWrapText ? wrapText ( strMsg ) : strMsg ; Runnable logMsgBox = new Runnable ( ) { public void run ( ) { JOptionPane . showMessageDialog ( getWindow ( ) , strWrappedMsg , "" , iType ) ; } } ; if ( EventQueue . isDispatchThread ( ) ) { logMsgBox . run ( ) ; } else { try { EventQueue . invokeAndWait ( logMsgBox ) ; } catch ( Throwable t ) { t . printStackTrace ( ) ; } }
public class WordChoiceEvaluationRunner { /** * Evaluates the performance of a given { @ code SemanticSpace } on a given * { @ code WordChoiceEvaluation } using the provided similarity metric . * Returns a { @ link WordChoiceReport } detailing the performance . * @ param sspace The { @ link SemanticSpace } to test against * @ param test The { @ link WordChoiceEvaluation } providing a set of multiple * choice options * @ param vectorComparisonType The similarity measture to use * @ return A { @ link WordChoiceReport } detailing the performance */ public static WordChoiceReport evaluate ( SemanticSpace sspace , WordChoiceEvaluation test , Similarity . SimType vectorComparisonType ) { } }
Collection < MultipleChoiceQuestion > questions = test . getQuestions ( ) ; int correct = 0 ; int unanswerable = 0 ; question_loop : // Answer each question by using the vectors from the provided Semantic // Space for ( MultipleChoiceQuestion question : questions ) { String promptWord = question . getPrompt ( ) ; // get the vector for the prompt Vector promptVector = sspace . getVector ( promptWord ) ; // check that the s - space had the prompt word if ( promptVector == null ) { unanswerable ++ ; continue ; } int answerIndex = 0 ; double closestOption = Double . MIN_VALUE ; // find the options whose vector has the highest similarity ( or // equivalent comparison measure ) to the prompt word . The // running assumption hear is that for the value returned by the // comparison method , a high value implies more similar vectors . int optionIndex = 0 ; for ( String optionWord : question . getOptions ( ) ) { // Get the vector for the option Vector optionVector = sspace . getVector ( optionWord ) ; // check that the s - space had the option word if ( optionVector == null ) { unanswerable ++ ; continue question_loop ; } double similarity = Similarity . getSimilarity ( vectorComparisonType , promptVector , optionVector ) ; if ( similarity > closestOption ) { answerIndex = optionIndex ; closestOption = similarity ; } optionIndex ++ ; } // see whether our guess matched with the correct index if ( answerIndex == question . getCorrectAnswer ( ) ) { correct ++ ; } } return new SimpleReport ( questions . size ( ) , correct , unanswerable ) ;
public class MFPPush { /** * Checks whether push notification is supported . * @ return true if push is supported , false otherwise . */ public boolean isPushSupported ( ) { } }
String version = android . os . Build . VERSION . RELEASE . substring ( 0 , 3 ) ; return ( Double . valueOf ( version ) >= MIN_SUPPORTED_ANDRIOD_VERSION ) ;
public class GeometryColumnsSfSqlDao { /** * { @ inheritDoc } */ @ Override public int delete ( GeometryColumnsSfSql data ) throws SQLException { } }
DeleteBuilder < GeometryColumnsSfSql , TableColumnKey > db = deleteBuilder ( ) ; db . where ( ) . eq ( GeometryColumnsSfSql . COLUMN_F_TABLE_NAME , data . getFTableName ( ) ) . and ( ) . eq ( GeometryColumnsSfSql . COLUMN_F_GEOMETRY_COLUMN , data . getFGeometryColumn ( ) ) ; PreparedDelete < GeometryColumnsSfSql > deleteQuery = db . prepare ( ) ; int deleted = delete ( deleteQuery ) ; return deleted ;
public class Zipper { /** * Write a string . * @ param string The string to write . * @ throws JSONException */ private void writeString ( String string ) throws JSONException { } }
// Special case for empty strings . if ( string . length ( ) == 0 ) { zero ( ) ; write ( end , this . stringhuff ) ; } else { Kim kim = new Kim ( string ) ; // Look for the string in the strings keep . If it is found , emit its // integer and count that as a use . int integer = this . stringkeep . find ( kim ) ; if ( integer != none ) { one ( ) ; write ( integer , this . stringkeep ) ; } else { // But if it is not found , emit the string ' s characters . Register the string // so that a later lookup can succeed . zero ( ) ; write ( kim , this . stringhuff , this . stringhuffext ) ; write ( end , this . stringhuff ) ; this . stringkeep . register ( kim ) ; } }
public class ExecutionEnvironment { /** * Creates a DataSet from the given non - empty collection . Note that this operation will result * in a non - parallel data source , i . e . a data source with a parallelism of one . * < p > The returned DataSet is typed to the given TypeInformation . * @ param data The collection of elements to create the data set from . * @ param type The TypeInformation for the produced data set . * @ return A DataSet representing the given collection . * @ see # fromCollection ( Collection ) */ public < X > DataSource < X > fromCollection ( Collection < X > data , TypeInformation < X > type ) { } }
return fromCollection ( data , type , Utils . getCallLocationName ( ) ) ;
public class AccessSet { /** * Read the related { @ link org . efaps . admin . datamodel . Status } . * @ throws CacheReloadException on error */ private void readLinks2Status ( ) throws CacheReloadException { } }
Connection con = null ; try { final List < Long > values = new ArrayList < > ( ) ; con = Context . getConnection ( ) ; PreparedStatement stmt = null ; try { stmt = con . prepareStatement ( AccessSet . SQL_SET2STATUS ) ; stmt . setObject ( 1 , getId ( ) ) ; final ResultSet rs = stmt . executeQuery ( ) ; while ( rs . next ( ) ) { values . add ( rs . getLong ( 1 ) ) ; } rs . close ( ) ; } finally { if ( stmt != null ) { stmt . close ( ) ; } } con . commit ( ) ; for ( final Long statusId : values ) { final Status status = Status . get ( statusId ) ; if ( status == null ) { AccessSet . LOG . error ( "could not found status with id " + "'" + statusId + "'" ) ; } else { AccessSet . LOG . debug ( "read link from AccessSet '{}' (id = {}, uuid = {}) to status '{}' (id = {})" , getName ( ) , getId ( ) , getUUID ( ) , status . getKey ( ) , status . getId ( ) ) ; getStatuses ( ) . add ( status ) ; } } } catch ( final SQLException e ) { throw new CacheReloadException ( "could not read roles" , e ) ; } catch ( final EFapsException e ) { throw new CacheReloadException ( "could not read roles" , e ) ; } finally { try { if ( con != null && ! con . isClosed ( ) ) { con . close ( ) ; } } catch ( final SQLException e ) { throw new CacheReloadException ( "Cannot read a type for an attribute." , e ) ; } }
public class MultipleSelect { /** * Returns the selected items list . If no item is selected , this method * returns an empty list . * @ return the selected items list */ public List < Option > getSelectedItems ( ) { } }
final List < Option > items = new ArrayList < > ( 0 ) ; for ( Entry < OptionElement , Option > entry : itemMap . entrySet ( ) ) { Option opt = entry . getValue ( ) ; if ( opt . isSelected ( ) ) items . add ( opt ) ; } return items ;
public class Ansi { /** * Wrapps given < code > message < / message > with special ansi control sequences and returns it */ public String colorize ( String message ) { } }
if ( SUPPORTED ) { StringBuilder buff = new StringBuilder ( start . length ( ) + message . length ( ) + END . length ( ) ) ; buff . append ( start ) . append ( message ) . append ( END ) ; return buff . toString ( ) ; } else return message ;
public class SQLRebuilder { /** * Get the names of all Fedora tables listed in the server ' s dbSpec file . * Names will be returned in ALL CAPS so that case - insensitive comparisons * can be done . */ private List < String > getFedoraTables ( ) { } }
try { InputStream in = getClass ( ) . getClassLoader ( ) . getResourceAsStream ( DBSPEC_LOCATION ) ; List < TableSpec > specs = TableSpec . getTableSpecs ( in ) ; ArrayList < String > names = new ArrayList < String > ( ) ; for ( TableSpec spec : specs ) { names . add ( spec . getName ( ) . toUpperCase ( ) ) ; } return names ; } catch ( Exception e ) { e . printStackTrace ( ) ; throw new RuntimeException ( "Unexpected error reading dbspec file" , e ) ; }
public class PresenceSubscriber { /** * This method is the same as refreshBuddy ( duration , eventId , timeout ) except that instead of * creating the SUBSCRIBE request from parameters passed in , the given request message parameter * is used for sending out the SUBSCRIBE message . * The Request parameter passed into this method should come from calling createSubscribeMessage ( ) * - see that javadoc . The subscription duration is reset to the passed in Request ' s expiry value . * If it is 0 , this is an unsubscribe . Note , the buddy stays in the buddy list even though the * subscription won ' t be active . The event " id " in the given request will be used subsequently * ( for error checking SUBSCRIBE responses and NOTIFYs from the server as well as for sending * subsequent SUBSCRIBEs ) . */ public boolean refreshBuddy ( Request req , long timeout ) { } }
if ( parent . getBuddyList ( ) . get ( targetUri ) == null ) { setReturnCode ( SipSession . INVALID_ARGUMENT ) ; setErrorMessage ( "Buddy refresh for URI " + targetUri + " failed, uri was not found in the buddy list. Use fetchPresenceInfo() for users not in the buddy list" ) ; return false ; } return refreshSubscription ( req , timeout , parent . getProxyHost ( ) != null ) ;
public class TcpServerTransport { /** * Creates a new instance binding to localhost * @ param port the port to bind to * @ return a new instance */ public static TcpServerTransport create ( int port ) { } }
TcpServer server = TcpServer . create ( ) . port ( port ) ; return create ( server ) ;
public class BlockdevRefOrNull { /** * This overrides @ JsonUnwrapped . */ @ JsonValue public Object toJsonValue ( ) { } }
if ( definition != null ) return definition ; if ( reference != null ) return reference ; if ( _null != null ) return _null ; return null ;
public class DimFilterUtils { /** * Filter the given iterable of objects by removing any object whose ShardSpec , obtained from the converter function , * does not fit in the RangeSet of the dimFilter { @ link DimFilter # getDimensionRangeSet ( String ) } . The returned set * contains the filtered objects in the same order as they appear in input . * If you plan to call this multiple times with the same dimFilter , consider using * { @ link # filterShards ( DimFilter , Iterable , Function , Map ) } instead with a cached map * @ param dimFilter The filter to use * @ param input The iterable of objects to be filtered * @ param converter The function to convert T to ShardSpec that can be filtered by * @ param < T > This can be any type , as long as transform function is provided to convert this to ShardSpec * @ return The set of filtered object , in the same order as input */ public static < T > Set < T > filterShards ( DimFilter dimFilter , Iterable < T > input , Function < T , ShardSpec > converter ) { } }
return filterShards ( dimFilter , input , converter , new HashMap < String , Optional < RangeSet < String > > > ( ) ) ;
public class CallableProcedureStatement { /** * < p > Registers the designated output parameter . * This version of the method < code > registerOutParameter < / code > should be used for a user - defined * or < code > REF < / code > output parameter . Examples of user - defined types include : * < code > STRUCT < / code > , < code > DISTINCT < / code > , * < code > JAVA _ OBJECT < / code > , and named array types . < / p > * < p > All OUT parameters must be registered * before a stored procedure is executed . < / p > * < p > For a user - defined parameter , the fully - qualified SQL * type name of the parameter should also be given , while a < code > REF < / code > parameter requires * that the fully - qualified type name of the referenced type be given . A JDBC driver that does * not need the type code and type name information may ignore it . To be portable , however , * applications should always provide these values for user - defined and < code > REF < / code > * parameters . < / p > * < p > Although it is intended for user - defined and < code > REF < / code > parameters , * this method may be used to register a parameter of any JDBC type . If the parameter does not * have a user - defined or < code > REF < / code > type , the * < i > typeName < / i > parameter is ignored . < / p > * < p > < B > Note : < / B > When reading the value of an out parameter , you * must use the getter method whose Java type corresponds to the parameter ' s registered SQL * type . < / p > * @ param parameterIndex the first parameter is 1 , the second is 2 , . . . * @ param sqlType a value from { @ link Types } * @ param typeName the fully - qualified name of an SQL structured type * @ throws SQLException if the parameterIndex is not valid ; if a database * access error occurs or this method is called on a * closed < code > CallableStatement < / code > * @ see Types */ public void registerOutParameter ( int parameterIndex , int sqlType , String typeName ) throws SQLException { } }
CallParameter callParameter = getParameter ( parameterIndex ) ; callParameter . setOutputSqlType ( sqlType ) ; callParameter . setTypeName ( typeName ) ; callParameter . setOutput ( true ) ;
public class MimeType { /** * Returns a mime type string by parsing the file extension of a file string . If the extension is not found or * unknown the default value is returned . * @ param file path to a file with extension * @ param defaultMimeType what to return if not found * @ return mime type */ public static String getMime ( @ NotNull String file , String defaultMimeType ) { } }
int sep = file . lastIndexOf ( '.' ) ; if ( sep != - 1 ) { String extension = file . substring ( sep + 1 , file . length ( ) ) ; String mime = mimes . get ( extension ) ; if ( mime != null ) { return mime ; } } return defaultMimeType ;
public class AmazonWebServiceRequest { /** * Sets the optional credentials provider to use for this request , overriding the default credentials * provider at the client level . * @ param credentialsProvider * The optional AWS security credentials provider to use for this request , overriding the * default credentials provider at the client level . * @ return A reference to this updated object so that method calls can be chained together . */ public < T extends AmazonWebServiceRequest > T withRequestCredentialsProvider ( final AWSCredentialsProvider credentialsProvider ) { } }
setRequestCredentialsProvider ( credentialsProvider ) ; @ SuppressWarnings ( "unchecked" ) T t = ( T ) this ; return t ;