signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class Spy { /** * Executes the { @ link Runnable # run ( ) } method on provided argument and verifies the expectations * @ throws SniffyAssertionError if wrong number of queries was executed * @ since 2.0 */ public C run ( Runnable runnable ) throws SniffyAssertionError { } }
checkOpened ( ) ; try { runnable . run ( ) ; } catch ( Throwable e ) { throw verifyAndAddToException ( e ) ; } verify ( ) ; return self ( ) ;
public class CEMILData { /** * / * ( non - Javadoc ) * @ see tuwien . auto . calimero . cemi . CEMI # toByteArray ( ) */ @ Override public byte [ ] toByteArray ( ) { } }
final ByteArrayOutputStream os = new ByteArrayOutputStream ( ) ; os . write ( mc ) ; writeAddInfo ( os ) ; setCtrlPriority ( ) ; os . write ( ctrl1 ) ; os . write ( ctrl2 ) ; byte [ ] buf = source . toByteArray ( ) ; os . write ( buf , 0 , buf . length ) ; buf = dst . toByteArray ( ) ; os . write ( buf , 0 , buf . length ) ; writePayload ( os ) ; return os . toByteArray ( ) ;
public class Currency { /** * Returns the exchange rate between 2 currency . * @ param otherCurrency The other currency to exchange to * @ return The exchange rate or Double . MIN _ VALUE if no exchange information are found . * @ throws com . greatmancode . craftconomy3 . utils . NoExchangeRate If there ' s no exchange rate between the 2 currencies . */ public double getExchangeRate ( Currency otherCurrency ) throws NoExchangeRate { } }
return Common . getInstance ( ) . getStorageHandler ( ) . getStorageEngine ( ) . getExchangeRate ( this , otherCurrency ) ;
public class ProtectedBranchesApi { /** * Gets a Stream of protected branches from a project . * < pre > < code > GitLab Endpoint : GET / projects / : id / protected _ branches < / code > < / pre > * @ param projectIdOrPath the project in the form of an Integer ( ID ) , String ( path ) , or Project instance * @ return the Stream of protected branches for the project * @ throws GitLabApiException if any exception occurs */ public Stream < ProtectedBranch > getProtectedBranchesStream ( Object projectIdOrPath ) throws GitLabApiException { } }
return ( getProtectedBranches ( projectIdOrPath , this . getDefaultPerPage ( ) ) . stream ( ) ) ;
public class AdjustPolygonForThresholdBias { /** * Processes and adjusts the polygon . If after adjustment a corner needs to be removed because two sides are * parallel then the size of the polygon can be changed . * @ param polygon The polygon that is to be adjusted . Modified . * @ param clockwise Is the polygon in a lockwise orientation ? */ public void process ( Polygon2D_F64 polygon , boolean clockwise ) { } }
int N = polygon . size ( ) ; segments . resize ( N ) ; // Apply the adjustment independently to each side for ( int i = N - 1 , j = 0 ; j < N ; i = j , j ++ ) { int ii , jj ; if ( clockwise ) { ii = i ; jj = j ; } else { ii = j ; jj = i ; } Point2D_F64 a = polygon . get ( ii ) , b = polygon . get ( jj ) ; double dx = b . x - a . x ; double dy = b . y - a . y ; double l = Math . sqrt ( dx * dx + dy * dy ) ; if ( l == 0 ) { throw new RuntimeException ( "Two identical corners!" ) ; } // only needs to be shifted in two directions if ( dx < 0 ) dx = 0 ; if ( dy > 0 ) dy = 0 ; LineSegment2D_F64 s = segments . get ( ii ) ; s . a . x = a . x - dy / l ; s . a . y = a . y + dx / l ; s . b . x = b . x - dy / l ; s . b . y = b . y + dx / l ; } // Find the intersection between the adjusted lines to convert it back into polygon format for ( int i = N - 1 , j = 0 ; j < N ; i = j , j ++ ) { int ii , jj ; if ( clockwise ) { ii = i ; jj = j ; } else { ii = j ; jj = i ; } UtilLine2D_F64 . convert ( segments . get ( ii ) , ga ) ; UtilLine2D_F64 . convert ( segments . get ( jj ) , gb ) ; if ( null != Intersection2D_F64 . intersection ( ga , gb , intersection ) ) { // very acute angles can cause a large delta . This is conservative and prevents that if ( intersection . distance2 ( polygon . get ( jj ) ) < 20 ) { polygon . get ( jj ) . set ( intersection ) ; } } } // if two corners have a distance of 1 there are some conditions which exist where the corners can be shifted // such that two points will now be equal . Avoiding the shift isn ' t a good idea shift the shift should happen // there might be a more elegant solution to this problem but this is probably the simplest UtilPolygons2D_F64 . removeAdjacentDuplicates ( polygon , 1e-8 ) ;
public class SipParser { /** * Frame the supplied buffer into a { @ link SipMessage } . No deep analysis of the message will be * performed by this framer so there is no guarantee that this { @ link SipMessage } is actually a * well formed message . * @ param buffer * @ return the framed { @ link SipMessage } */ public static SipMessage frame ( final Buffer buffer ) throws IOException { } }
if ( true ) return frame2 ( buffer ) ; if ( ! couldBeSipMessage ( buffer ) ) { throw new SipParseException ( 0 , "Cannot be a SIP message because is doesnt start with \"SIP\" " + "(for responses) or a method (for requests)" ) ; } // we just assume that the initial line // indeed is a correct sip line final Buffer rawInitialLine = buffer . readLine ( ) ; // which means that the headers are about // to start now . final int startHeaders = buffer . getReaderIndex ( ) ; Buffer currentLine = null ; while ( ( currentLine = buffer . readLine ( ) ) != null && currentLine . hasReadableBytes ( ) ) { // just moving along , we don ' t really care why // we stop , we have found what we want anyway , which // is the boundary between headers and the potential // payload ( or end of message ) } final Buffer headers = buffer . slice ( startHeaders , buffer . getReaderIndex ( ) ) ; Buffer payload = null ; if ( buffer . hasReadableBytes ( ) ) { payload = buffer . slice ( ) ; } if ( SipInitialLine . isResponseLine ( rawInitialLine ) ) { throw new RuntimeException ( "No longer using the old mutable sip messages" ) ; // return new SipResponseImpl ( rawInitialLine , headers , payload ) ; } else { throw new RuntimeException ( "No longer using the old mutable sip messages" ) ; // return new SipRequestImpl ( rawInitialLine , headers , payload ) ; }
public class CodeTypeField { /** * GetCodeType Method . */ public ClassProject . CodeType getCodeType ( ) { } }
String code = this . toString ( ) ; if ( "THICK" . equalsIgnoreCase ( code ) ) return ClassProject . CodeType . THICK ; if ( "THIN" . equalsIgnoreCase ( code ) ) return ClassProject . CodeType . THIN ; if ( "RESOURCE_CODE" . equalsIgnoreCase ( code ) ) return ClassProject . CodeType . RESOURCE_CODE ; if ( "RESOURCE_PROPERTIES" . equalsIgnoreCase ( code ) ) return ClassProject . CodeType . RESOURCE_PROPERTIES ; if ( "INTERFACE" . equalsIgnoreCase ( code ) ) return ClassProject . CodeType . INTERFACE ; return null ;
public class BeanHelper { /** * ( g / s ) etField - > property * Field - > property * @ param etter * @ return */ public static final String property ( String etter ) { } }
if ( etter . startsWith ( "get" ) || etter . startsWith ( "set" ) ) { etter = etter . substring ( 3 ) ; } if ( etter . startsWith ( "is" ) ) { etter = etter . substring ( 2 ) ; } return lower ( etter ) ;
public class StatefulBeanO { /** * LI2281.07 */ @ Override public MessageContext getMessageContext ( ) throws IllegalStateException { } }
IllegalStateException ise ; // Not an allowed method for Stateful beans per EJB Specification . ise = new IllegalStateException ( "StatefulBean: getMessageContext not " + "allowed from Stateful Session Bean" ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "getMessageContext: " + ise ) ; throw ise ;
public class DefaultGroovyMethods { /** * Finds all values matching the closure condition . * < pre class = " groovyTestCase " > assert [ 2,4 ] = = [ 1,2,3,4 ] . findAll { it % 2 = = 0 } < / pre > * @ param self a Collection * @ param closure a closure condition * @ return a Collection of matching values * @ since 1.5.6 */ public static < T > Collection < T > findAll ( Collection < T > self , @ ClosureParams ( FirstParam . FirstGenericType . class ) Closure closure ) { } }
Collection < T > answer = createSimilarCollection ( self ) ; Iterator < T > iter = self . iterator ( ) ; return findAll ( closure , answer , iter ) ;
public class CommerceTierPriceEntryPersistenceImpl { /** * Returns a range of all the commerce tier price entries . * Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link CommerceTierPriceEntryModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order . * @ param start the lower bound of the range of commerce tier price entries * @ param end the upper bound of the range of commerce tier price entries ( not inclusive ) * @ return the range of commerce tier price entries */ @ Override public List < CommerceTierPriceEntry > findAll ( int start , int end ) { } }
return findAll ( start , end , null ) ;
public class ChatHistory { /** * Returns this user ' s chat history , creating one if necessary . If the given name implements * { @ link KeepNoHistory } , null is returned . */ protected List < Entry > getList ( Name username ) { } }
if ( username instanceof KeepNoHistory ) { return null ; } List < Entry > history = _histories . get ( username ) ; if ( history == null ) { _histories . put ( username , history = Lists . newArrayList ( ) ) ; } return history ;
public class SourceStreamManager { /** * Create a new Source Stream and store it in the given StreamSet * @ param streamSet * @ param priority * @ param reliability * @ return a new SourceStream * @ throws SIResourceException */ private SourceStream createStream ( StreamSet streamSet , int priority , Reliability reliability ) throws SIResourceException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "createStream" , new Object [ ] { streamSet , new Integer ( priority ) , reliability } ) ; SourceStream stream = null ; // there is no source stream for express messages if ( reliability . compareTo ( Reliability . BEST_EFFORT_NONPERSISTENT ) > 0 ) { // Warning - this assumes that ASSURED is always the highest Reliability // and that UNKNOWN is always the lowest ( 0 ) . stream = new SourceStream ( priority , // priority reliability , // reliability downControl , new ArrayList ( ) , streamSet , messageProcessor . getAlarmManager ( ) , destinationHandler ) ; } streamSet . setStream ( priority , reliability , stream ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "createStream" , stream ) ; return stream ;
public class Multiplexing { /** * Demultiplexes elements from the source iterable into an iterator of channels . * < code > * unchain ( 2 , [ 1,2,3,4,5 ] ) - > [ [ 1,2 ] , [ 3,4 ] , [ 5 ] ] * < / code > * @ param < C > the channel collection type * @ param < E > the element type * @ param channelSize maximum size of the channel * @ param iterable the source iterable * @ param channelProvider the supplier used to create channels * @ return an iterator of channels */ public static < C extends Collection < E > , E > Iterator < C > unchain ( int channelSize , Iterable < E > iterable , Supplier < C > channelProvider ) { } }
dbc . precondition ( iterable != null , "cannot unchain a null iterable" ) ; return new UnchainIterator < C , E > ( channelSize , iterable . iterator ( ) , channelProvider ) ;
public class Expression { /** * checkValidCheckConstraint */ public void checkValidCheckConstraint ( ) { } }
HsqlArrayList set = new HsqlArrayList ( ) ; Expression . collectAllExpressions ( set , this , subqueryExpressionSet , emptyExpressionSet ) ; if ( ! set . isEmpty ( ) ) { throw Error . error ( ErrorCode . X_0A000 , "subquery in check constraint" ) ; }
public class LssClient { /** * get detail of your stream by app name and stream name * @ param request The request object containing all parameters for query app stream . * @ return the response */ public GetAppStreamResponse queryAppStream ( GetAppStreamRequest request ) { } }
checkNotNull ( request , "The parameter request should NOT be null." ) ; checkStringNotEmpty ( request . getApp ( ) , "The parameter app should NOT be null or empty string." ) ; checkStringNotEmpty ( request . getStream ( ) , "The parameter stream should NOT be null or empty string." ) ; InternalRequest internalRequest = createRequest ( HttpMethodName . GET , request , LIVE_APP , request . getApp ( ) , LIVE_SESSION , request . getStream ( ) ) ; return invokeHttpClient ( internalRequest , GetAppStreamResponse . class ) ;
public class ArrayContainer { /** * shares lots of code with inot ; candidate for refactoring */ @ Override public Container not ( final int firstOfRange , final int lastOfRange ) { } }
// TODO : may need to convert to a RunContainer if ( firstOfRange >= lastOfRange ) { return clone ( ) ; // empty range } // determine the span of array indices to be affected int startIndex = Util . unsignedBinarySearch ( content , 0 , cardinality , ( short ) firstOfRange ) ; if ( startIndex < 0 ) { startIndex = - startIndex - 1 ; } int lastIndex = Util . unsignedBinarySearch ( content , 0 , cardinality , ( short ) ( lastOfRange - 1 ) ) ; if ( lastIndex < 0 ) { lastIndex = - lastIndex - 2 ; } final int currentValuesInRange = lastIndex - startIndex + 1 ; final int spanToBeFlipped = lastOfRange - firstOfRange ; final int newValuesInRange = spanToBeFlipped - currentValuesInRange ; final int cardinalityChange = newValuesInRange - currentValuesInRange ; final int newCardinality = cardinality + cardinalityChange ; if ( newCardinality > DEFAULT_MAX_SIZE ) { return toBitmapContainer ( ) . not ( firstOfRange , lastOfRange ) ; } ArrayContainer answer = new ArrayContainer ( newCardinality ) ; // copy stuff before the active area System . arraycopy ( content , 0 , answer . content , 0 , startIndex ) ; int outPos = startIndex ; int inPos = startIndex ; // item at inPos always > = valInRange int valInRange = firstOfRange ; for ( ; valInRange < lastOfRange && inPos <= lastIndex ; ++ valInRange ) { if ( ( short ) valInRange != content [ inPos ] ) { answer . content [ outPos ++ ] = ( short ) valInRange ; } else { ++ inPos ; } } for ( ; valInRange < lastOfRange ; ++ valInRange ) { answer . content [ outPos ++ ] = ( short ) valInRange ; } // content after the active range for ( int i = lastIndex + 1 ; i < cardinality ; ++ i ) { answer . content [ outPos ++ ] = content [ i ] ; } answer . cardinality = newCardinality ; return answer ;
public class EvolutionDurations { /** * Returns a copy of this duration with the specified duration added . * This instance is immutable and unaffected by this method call . * @ param other the duration to add * @ return a { @ code EvolutionDurations } based on this duration with the * specified duration added * @ throws NullPointerException if the { @ code other } duration is { @ code null } * @ throws ArithmeticException if numeric overflow occurs */ public EvolutionDurations plus ( final EvolutionDurations other ) { } }
requireNonNull ( other ) ; return of ( _offspringSelectionDuration . plus ( other . _offspringSelectionDuration ) , _survivorsSelectionDuration . plus ( other . _survivorsSelectionDuration ) , _offspringAlterDuration . plus ( other . _offspringAlterDuration ) , _offspringFilterDuration . plus ( other . _offspringFilterDuration ) , _survivorFilterDuration . plus ( other . _survivorFilterDuration ) , _evaluationDuration . plus ( other . _evaluationDuration ) , _evolveDuration . plus ( other . _evolveDuration ) ) ;
public class MemoryCache { /** * Clears component state . */ private void cleanup ( ) { } }
CacheEntry oldest = null ; _count = 0 ; // Cleanup obsolete entries and find the oldest for ( Map . Entry < String , CacheEntry > e : _cache . entrySet ( ) ) { String key = e . getKey ( ) ; CacheEntry entry = e . getValue ( ) ; // Remove obsolete entry if ( entry . isExpired ( ) ) { _cache . remove ( key ) ; } // Count the remaining entry else { _count ++ ; if ( oldest == null || oldest . getExpiration ( ) > entry . getExpiration ( ) ) oldest = entry ; } } // Remove the oldest if cache size exceeded maximum if ( _count > _maxSize && oldest != null ) { _cache . remove ( oldest . getKey ( ) ) ; _count -- ; }
public class DeleteAccountAuditConfigurationRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( DeleteAccountAuditConfigurationRequest deleteAccountAuditConfigurationRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( deleteAccountAuditConfigurationRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( deleteAccountAuditConfigurationRequest . getDeleteScheduledAudits ( ) , DELETESCHEDULEDAUDITS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class OutputHandler { /** * The task output a record with a partition number attached . */ public void partitionedOutput ( int reduce , K key , V value ) throws IOException { } }
PipesPartitioner . setNextPartition ( reduce ) ; collector . collect ( key , value ) ;
public class AbstractPlugin { /** * Gets language label with the specified locale and key . * @ param locale the specified locale * @ param key the specified key * @ return language label */ public String getLang ( final Locale locale , final String key ) { } }
return langs . get ( locale . toString ( ) ) . getProperty ( key ) ;
public class HtmlDocletWriter { /** * Adds the annotation types for the given packageElement . * @ param packageElement the package to write annotations for . * @ param htmltree the documentation tree to which the annotation info will be * added */ public void addAnnotationInfo ( PackageElement packageElement , Content htmltree ) { } }
addAnnotationInfo ( packageElement , packageElement . getAnnotationMirrors ( ) , htmltree ) ;
public class JdbcExtractor { /** * Build / Format input query in the required format * @ param schema * @ param entity * @ param inputQuery * @ return formatted extract query */ private String getExtractQuery ( String schema , String entity , String inputQuery ) { } }
String inputColProjection = this . getInputColumnProjection ( ) ; String outputColProjection = this . getOutputColumnProjection ( ) ; String query = inputQuery ; if ( query == null ) { // if input query is null , build the query from metadata query = "SELECT " + outputColProjection + " FROM " + schema + "." + entity ; } else { // replace input column projection with output column projection if ( StringUtils . isNotBlank ( inputColProjection ) ) { query = query . replace ( inputColProjection , outputColProjection ) ; } } query = addOptionalWatermarkPredicate ( query ) ; return query ;
public class PeekView { /** * Show the PeekView over the point of motion * @ param startX * @ param startY */ private void setContentOffset ( int startX , int startY , Translation translation , int movementAmount ) { } }
if ( translation == Translation . VERTICAL ) { // center the X around the start point int originalStartX = startX ; startX -= contentParams . width / 2 ; // if Y is in the lower half , we want it to go up , otherwise , leave it the same boolean moveDown = true ; if ( startY + contentParams . height + FINGER_SIZE > screenHeight ) { startY -= contentParams . height ; moveDown = false ; if ( movementAmount > 0 ) { movementAmount *= - 1 ; } } // when moving the peek view below the finger location , we want to offset it a bit to the right // or left as well , just so the hand doesn ' t cover it up . int extraXOffset = 0 ; if ( moveDown ) { extraXOffset = DensityUtils . toPx ( getContext ( ) , 200 ) ; if ( originalStartX > screenWidth / 2 ) { extraXOffset = extraXOffset * - 1 ; // move it a bit to the left } } // make sure they aren ' t outside of the layout bounds and move them with the movementAmount // I move the x just a bit to the right or left here as well , because it just makes things look better startX = ensureWithinBounds ( startX + extraXOffset , screenWidth , contentParams . width ) ; startY = ensureWithinBounds ( startY + movementAmount , screenHeight , contentParams . height ) ; } else { // center the Y around the start point startY -= contentParams . height / 2 ; // if X is in the right half , we want it to go left if ( startX + contentParams . width + FINGER_SIZE > screenWidth ) { startX -= contentParams . width ; if ( movementAmount > 0 ) { movementAmount *= - 1 ; } } // make sure they aren ' t outside of the layout bounds and move them with the movementAmount startX = ensureWithinBounds ( startX + movementAmount , screenWidth , contentParams . width ) ; startY = ensureWithinBounds ( startY , screenHeight , contentParams . height ) ; } // check to see if the system bars are covering anything int statusBar = NavigationUtils . getStatusBarHeight ( getContext ( ) ) ; if ( startY < statusBar ) { // if it is above the status bar and action bar startY = statusBar + 10 ; } else if ( NavigationUtils . hasNavBar ( getContext ( ) ) && startY + contentParams . height > screenHeight - NavigationUtils . getNavBarHeight ( getContext ( ) ) ) { // if there is a nav bar and the popup is underneath it startY = screenHeight - contentParams . height - NavigationUtils . getNavBarHeight ( getContext ( ) ) - DensityUtils . toDp ( getContext ( ) , 10 ) ; } else if ( ! NavigationUtils . hasNavBar ( getContext ( ) ) && startY + contentParams . height > screenHeight ) { startY = screenHeight - contentParams . height - DensityUtils . toDp ( getContext ( ) , 10 ) ; } // set the newly computed distances from the start and top sides setDistanceFromLeft ( startX ) ; setDistanceFromTop ( startY ) ;
public class DrizzleConnection { /** * Sets whether this connection is auto commited . * @ param autoCommit if it should be auto commited . * @ throws SQLException if something goes wrong talking to the server . */ public void setAutoCommit ( final boolean autoCommit ) throws SQLException { } }
Statement stmt = createStatement ( ) ; String clause ; if ( autoCommit ) { clause = "1" ; } else { clause = "0" ; } stmt . executeUpdate ( "set autocommit=" + clause ) ;
public class GitlabAPI { /** * Gets all members of a Group * @ param groupId The id of the GitLab Group * @ return The Group Members */ public List < GitlabGroupMember > getGroupMembers ( Integer groupId ) { } }
String tailUrl = GitlabGroup . URL + "/" + groupId + GitlabGroupMember . URL + PARAM_MAX_ITEMS_PER_PAGE ; return retrieve ( ) . getAll ( tailUrl , GitlabGroupMember [ ] . class ) ;
public class IntAVLTree { /** * Find a node in this tree . */ public int find ( ) { } }
for ( int node = root ; node != NIL ; ) { final int cmp = compare ( node ) ; if ( cmp < 0 ) { node = left ( node ) ; } else if ( cmp > 0 ) { node = right ( node ) ; } else { return node ; } } return NIL ;
public class HttpConnection { /** * Reads up to < tt > " \ n " < / tt > from the ( unchunked ) input stream . * If the stream ends before the line terminator is found , * the last part of the string will still be returned . * @ throws IllegalStateException if the connection is not open * @ throws IOException if an I / O problem occurs * @ return a line from the response * @ deprecated use # readLine ( String ) */ @ Deprecated public String readLine ( ) throws IOException , IllegalStateException { } }
LOG . trace ( "enter HttpConnection.readLine()" ) ; assertOpen ( ) ; return HttpParser . readLine ( inputStream ) ;
public class MediaApi { /** * Log out of a media channel * Log out the current agent on the specified media channels . You can make a [ / media / { mediatype } / ready ] ( / reference / workspace / Media / index . html # readyAgentState ) or [ / media / { mediatype } / not - ready ] ( / reference / workspace / Media / index . html # notReadyAgentState ) request to log in to the media channel again . * @ param mediatype The media channel . ( required ) * @ param logoutMediaData ( required ) * @ return ApiSuccessResponse * @ throws ApiException If fail to call the API , e . g . server error or cannot deserialize the response body */ public ApiSuccessResponse removeMedia ( String mediatype , LogoutMediaData logoutMediaData ) throws ApiException { } }
ApiResponse < ApiSuccessResponse > resp = removeMediaWithHttpInfo ( mediatype , logoutMediaData ) ; return resp . getData ( ) ;
public class CasConfigurationModifiedEvent { /** * Is eligible for context refresh ? * @ return the boolean */ public boolean isEligibleForContextRefresh ( ) { } }
if ( this . override ) { return true ; } if ( getFile ( ) != null ) { return CONFIG_FILE_PATTERN . matcher ( getFile ( ) . toFile ( ) . getName ( ) ) . find ( ) ; } return false ;
public class CompletableFuture { /** * Pushes the given completion unless it completes while trying . Caller should first check that * result is null . */ final void unipush ( Completion c ) { } }
if ( c != null ) { while ( ! tryPushStack ( c ) ) { if ( result != null ) { lazySetNext ( c , null ) ; break ; } } if ( result != null ) c . tryFire ( SYNC ) ; }
public class TransitFactory { /** * Creates a reader instance . * @ param type the format to read in * @ param in the input stream to read from * @ param customHandlers a map of custom ReadHandlers to use in addition * or in place of the default ReadHandlers * @ return a reader */ public static Reader reader ( Format type , InputStream in , Map < String , ReadHandler < ? , ? > > customHandlers ) { } }
return reader ( type , in , customHandlers , null ) ;
public class Util { /** * Ensure string ends with suffix * @ param subject Examined string * @ param suffix Desired suffix * @ return Original subject in case it already ends with suffix , null in * case subject was null and subject + suffix otherwise . * @ since 1.505 */ @ Nullable public static String ensureEndsWith ( @ CheckForNull String subject , @ CheckForNull String suffix ) { } }
if ( subject == null ) return null ; if ( subject . endsWith ( suffix ) ) return subject ; return subject + suffix ;
public class Parser { /** * May return an { @ link ArrayLiteral } or { @ link ArrayComprehension } . */ private AstNode arrayLiteral ( ) throws IOException { } }
if ( currentToken != Token . LB ) codeBug ( ) ; int pos = ts . tokenBeg , end = ts . tokenEnd ; List < AstNode > elements = new ArrayList < AstNode > ( ) ; ArrayLiteral pn = new ArrayLiteral ( pos ) ; boolean after_lb_or_comma = true ; int afterComma = - 1 ; int skipCount = 0 ; for ( ; ; ) { int tt = peekToken ( ) ; if ( tt == Token . COMMA ) { consumeToken ( ) ; afterComma = ts . tokenEnd ; if ( ! after_lb_or_comma ) { after_lb_or_comma = true ; } else { elements . add ( new EmptyExpression ( ts . tokenBeg , 1 ) ) ; skipCount ++ ; } } else if ( tt == Token . RB ) { consumeToken ( ) ; // for ( [ a , ] in obj ) is legal , but for ( [ a ] in obj ) is // not since we have both key and value supplied . The // trick is that [ a , ] and [ a ] are equivalent in other // array literal contexts . So we calculate a special // length value just for destructuring assignment . end = ts . tokenEnd ; pn . setDestructuringLength ( elements . size ( ) + ( after_lb_or_comma ? 1 : 0 ) ) ; pn . setSkipCount ( skipCount ) ; if ( afterComma != - 1 ) warnTrailingComma ( pos , elements , afterComma ) ; break ; } else if ( tt == Token . FOR && ! after_lb_or_comma && elements . size ( ) == 1 ) { return arrayComprehension ( elements . get ( 0 ) , pos ) ; } else if ( tt == Token . EOF ) { reportError ( "msg.no.bracket.arg" ) ; break ; } else { if ( ! after_lb_or_comma ) { reportError ( "msg.no.bracket.arg" ) ; } elements . add ( assignExpr ( ) ) ; after_lb_or_comma = false ; afterComma = - 1 ; } } for ( AstNode e : elements ) { pn . addElement ( e ) ; } pn . setLength ( end - pos ) ; return pn ;
public class RegistriesInner { /** * Updates a container registry with the specified parameters . * @ param resourceGroupName The name of the resource group to which the container registry belongs . * @ param registryName The name of the container registry . * @ param registryUpdateParameters The parameters for updating a container registry . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the RegistryInner object if successful . */ public RegistryInner update ( String resourceGroupName , String registryName , RegistryUpdateParameters registryUpdateParameters ) { } }
return updateWithServiceResponseAsync ( resourceGroupName , registryName , registryUpdateParameters ) . toBlocking ( ) . last ( ) . body ( ) ;
public class SessionManager { /** * Returns a participant in a session * @ param sessionId identifier of the session * @ param participantPrivateId private identifier of the participant * @ return { @ link Participant } * @ throws OpenViduException in case the session doesn ' t exist or the * participant doesn ' t belong to it */ public Participant getParticipant ( String sessionId , String participantPrivateId ) throws OpenViduException { } }
Session session = sessions . get ( sessionId ) ; if ( session == null ) { throw new OpenViduException ( Code . ROOM_NOT_FOUND_ERROR_CODE , "Session '" + sessionId + "' not found" ) ; } Participant participant = session . getParticipantByPrivateId ( participantPrivateId ) ; if ( participant == null ) { throw new OpenViduException ( Code . USER_NOT_FOUND_ERROR_CODE , "Participant '" + participantPrivateId + "' not found in session '" + sessionId + "'" ) ; } return participant ;
public class AzureBatchEvaluatorShimManager { /** * This method is called when a resource is requested . It will add a task to the existing Azure Batch job which * is equivalent to requesting a container in Azure Batch . When the request is fulfilled and the evaluator shim is * started , it will send a message back to the driver which signals that a resource request was fulfilled . * @ param resourceRequestEvent resource request event . * @ param containerId container id for the resource . It will be used as the task id for Azure Batch task . * @ param jarFileUri Azure Storage SAS URI of the JAR file containing libraries required by the evaluator shim . */ public void onResourceRequested ( final ResourceRequestEvent resourceRequestEvent , final String containerId , final URI jarFileUri ) { } }
try { createAzureBatchTask ( containerId , jarFileUri ) ; this . outstandingResourceRequests . put ( containerId , resourceRequestEvent ) ; this . outstandingResourceRequestCount . incrementAndGet ( ) ; this . updateRuntimeStatus ( ) ; } catch ( IOException e ) { LOG . log ( Level . SEVERE , "Failed to create Azure Batch task with the following exception: {0}" , e ) ; throw new RuntimeException ( e ) ; }
public class MultiRestServiceAdapter { /** * Returns the endpoint URLs for the RESTful service . Supports property specifiers * via the syntax for { @ link # AdapterActivityBase . getAttributeValueSmart ( String ) } . * @ throws ActivityException */ protected List < String > getEndpointUris ( ) throws AdapterException { } }
List < String > urlmap = new ArrayList < String > ( ) ; try { String map = getAttributeValue ( ENDPOINT_URI ) ; List < String [ ] > urlmaparray ; if ( map == null ) urlmaparray = new ArrayList < String [ ] > ( ) ; else urlmaparray = StringHelper . parseTable ( map , ',' , ';' , 1 ) ; for ( String [ ] entry : urlmaparray ) urlmap . add ( getValueSmart ( entry [ 0 ] , "" ) ) ; } catch ( Exception ex ) { throw new AdapterException ( - 1 , ex . getMessage ( ) , ex ) ; } return urlmap ;
public class DescribePipelinesRequest { /** * The IDs of the pipelines to describe . You can pass as many as 25 identifiers in a single call . To obtain pipeline * IDs , call < a > ListPipelines < / a > . * @ param pipelineIds * The IDs of the pipelines to describe . You can pass as many as 25 identifiers in a single call . To obtain * pipeline IDs , call < a > ListPipelines < / a > . */ public void setPipelineIds ( java . util . Collection < String > pipelineIds ) { } }
if ( pipelineIds == null ) { this . pipelineIds = null ; return ; } this . pipelineIds = new com . amazonaws . internal . SdkInternalList < String > ( pipelineIds ) ;
public class CanonicalXML { /** * Create canonical XML silently , throwing exceptions rather than displaying messages * @ param parser * @ param inputSource * @ param stripSpace * @ return * @ throws Exception */ public String toCanonicalXml2 ( XMLReader parser , InputSource inputSource , boolean stripSpace ) throws Exception { } }
mStrip = stripSpace ; mOut = new StringWriter ( ) ; parser . setContentHandler ( this ) ; parser . setErrorHandler ( this ) ; parser . parse ( inputSource ) ; return mOut . toString ( ) ;
public class AmazonIdentityManagementClient { /** * Simulate how a set of IAM policies attached to an IAM entity works with a list of API operations and AWS * resources to determine the policies ' effective permissions . The entity can be an IAM user , group , or role . If you * specify a user , then the simulation also includes all of the policies that are attached to groups that the user * belongs to . * You can optionally include a list of one or more additional policies specified as strings to include in the * simulation . If you want to simulate only policies specified as strings , use < a > SimulateCustomPolicy < / a > instead . * You can also optionally include one resource - based policy to be evaluated with each of the resources included in * the simulation . * The simulation does not perform the API operations ; it only checks the authorization to determine if the * simulated policies allow or deny the operations . * < b > Note : < / b > This API discloses information about the permissions granted to other users . If you do not want * users to see other user ' s permissions , then consider allowing them to use < a > SimulateCustomPolicy < / a > instead . * Context keys are variables maintained by AWS and its services that provide details about the context of an API * query request . You can use the < code > Condition < / code > element of an IAM policy to evaluate context keys . To get * the list of context keys that the policies require for correct simulation , use * < a > GetContextKeysForPrincipalPolicy < / a > . * If the output is long , you can use the < code > MaxItems < / code > and < code > Marker < / code > parameters to paginate the * results . * @ param simulatePrincipalPolicyRequest * @ return Result of the SimulatePrincipalPolicy operation returned by the service . * @ throws NoSuchEntityException * The request was rejected because it referenced a resource entity that does not exist . The error message * describes the resource . * @ throws InvalidInputException * The request was rejected because an invalid or out - of - range value was supplied for an input parameter . * @ throws PolicyEvaluationException * The request failed because a provided policy could not be successfully evaluated . An additional detailed * message indicates the source of the failure . * @ sample AmazonIdentityManagement . SimulatePrincipalPolicy * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / iam - 2010-05-08 / SimulatePrincipalPolicy " target = " _ top " > AWS * API Documentation < / a > */ @ Override public SimulatePrincipalPolicyResult simulatePrincipalPolicy ( SimulatePrincipalPolicyRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeSimulatePrincipalPolicy ( request ) ;
public class LdapHelper { /** * Returns the DN ( Distinguished Name ) for a Node . * @ param node the given Node . * @ return the DN of that Node . */ public String getDNForNode ( final LdapNode node ) { } }
if ( node instanceof LdapGroup ) { return String . format ( LdapKeys . GROUP_CN_FORMAT , groupIdentifyer , node . get ( groupIdentifyer ) , baseGroupDn ) ; } else { return String . format ( LdapKeys . USER_UID_FORMAT , userIdentifyer , node . get ( userIdentifyer ) , basePeopleDn ) ; }
public class ClassInfoRepository { /** * Java { @ code Class . getCanonicalName ( ) } sometimes will throw out * { @ code InternalError } with message : " { code Malformed class name } " * We just ignore it * @ param c the class on which canonical name is returned * @ return the canonical name of the class specified or { @ code null } if no * canonical name found or error returned canonical name on the class */ public static String canonicalName ( Class c ) { } }
try { return c . getCanonicalName ( ) ; } catch ( InternalError e ) { return null ; } catch ( IllegalAccessError e ) { return null ; }
public class SchedulerForType { /** * Update the pool metrics . The update is atomic at the map level . */ private void collectPoolInfoMetrics ( ) { } }
Map < PoolInfo , PoolInfoMetrics > newPoolNameToMetrics = new HashMap < PoolInfo , PoolInfoMetrics > ( ) ; long now = ClusterManager . clock . getTime ( ) ; Map < PoolInfo , Long > poolInfoAverageFirstWaitMs = sessionManager . getTypePoolInfoAveFirstWaitMs ( type ) ; // The gets + puts below are OK because only one thread is doing it . for ( PoolGroupSchedulable poolGroup : poolGroupManager . getPoolGroups ( ) ) { int poolGroupSessions = 0 ; for ( PoolSchedulable pool : poolGroup . getPools ( ) ) { MetricsRecord poolRecord = poolInfoToMetricsRecord . get ( pool . getPoolInfo ( ) ) ; if ( poolRecord == null ) { poolRecord = metrics . getContext ( ) . createRecord ( "pool-" + pool . getName ( ) ) ; poolInfoToMetricsRecord . put ( pool . getPoolInfo ( ) , poolRecord ) ; } PoolInfoMetrics poolMetrics = new PoolInfoMetrics ( pool . getPoolInfo ( ) , type , poolRecord ) ; poolMetrics . setCounter ( MetricName . GRANTED , pool . getGranted ( ) ) ; poolMetrics . setCounter ( MetricName . REQUESTED , pool . getRequested ( ) ) ; poolMetrics . setCounter ( MetricName . SHARE , ( long ) pool . getShare ( ) ) ; poolMetrics . setCounter ( MetricName . MIN , pool . getMinimum ( ) ) ; poolMetrics . setCounter ( MetricName . MAX , pool . getMaximum ( ) ) ; poolMetrics . setCounter ( MetricName . WEIGHT , ( long ) pool . getWeight ( ) ) ; poolMetrics . setCounter ( MetricName . SESSIONS , pool . getScheduleQueue ( ) . size ( ) ) ; poolMetrics . setCounter ( MetricName . STARVING , pool . getStarvingTime ( now ) / 1000 ) ; Long averageFirstTypeMs = poolInfoAverageFirstWaitMs . get ( pool . getPoolInfo ( ) ) ; poolMetrics . setCounter ( MetricName . AVE_FIRST_WAIT_MS , ( averageFirstTypeMs == null ) ? 0 : averageFirstTypeMs . longValue ( ) ) ; newPoolNameToMetrics . put ( pool . getPoolInfo ( ) , poolMetrics ) ; poolGroupSessions += pool . getScheduleQueue ( ) . size ( ) ; } MetricsRecord poolGroupRecord = poolInfoToMetricsRecord . get ( poolGroup . getName ( ) ) ; if ( poolGroupRecord == null ) { poolGroupRecord = metrics . getContext ( ) . createRecord ( "poolgroup-" + poolGroup . getName ( ) ) ; poolInfoToMetricsRecord . put ( poolGroup . getPoolInfo ( ) , poolGroupRecord ) ; } PoolInfoMetrics poolGroupMetrics = new PoolInfoMetrics ( poolGroup . getPoolInfo ( ) , type , poolGroupRecord ) ; poolGroupMetrics . setCounter ( MetricName . GRANTED , poolGroup . getGranted ( ) ) ; poolGroupMetrics . setCounter ( MetricName . REQUESTED , poolGroup . getRequested ( ) ) ; poolGroupMetrics . setCounter ( MetricName . SHARE , ( long ) poolGroup . getShare ( ) ) ; poolGroupMetrics . setCounter ( MetricName . MIN , poolGroup . getMinimum ( ) ) ; poolGroupMetrics . setCounter ( MetricName . MAX , poolGroup . getMaximum ( ) ) ; poolGroupMetrics . setCounter ( MetricName . SESSIONS , poolGroupSessions ) ; newPoolNameToMetrics . put ( poolGroup . getPoolInfo ( ) , poolGroupMetrics ) ; } poolInfoToMetrics = newPoolNameToMetrics ;
public class PathOverrideService { /** * Generate a path select string * @ return Select query string */ private String getPathSelectString ( ) { } }
String queryString = "SELECT " + Constants . DB_TABLE_REQUEST_RESPONSE + "." + Constants . GENERIC_CLIENT_UUID + "," + Constants . DB_TABLE_PATH + "." + Constants . GENERIC_ID + "," + Constants . PATH_PROFILE_PATHNAME + "," + Constants . PATH_PROFILE_ACTUAL_PATH + "," + Constants . PATH_PROFILE_BODY_FILTER + "," + Constants . PATH_PROFILE_GROUP_IDS + "," + Constants . DB_TABLE_PATH + "." + Constants . PATH_PROFILE_PROFILE_ID + "," + Constants . PATH_PROFILE_PATH_ORDER + "," + Constants . REQUEST_RESPONSE_REPEAT_NUMBER + "," + Constants . REQUEST_RESPONSE_REQUEST_ENABLED + "," + Constants . REQUEST_RESPONSE_RESPONSE_ENABLED + "," + Constants . PATH_PROFILE_CONTENT_TYPE + "," + Constants . PATH_PROFILE_REQUEST_TYPE + "," + Constants . PATH_PROFILE_GLOBAL + " FROM " + Constants . DB_TABLE_PATH + " JOIN " + Constants . DB_TABLE_REQUEST_RESPONSE + " ON " + Constants . DB_TABLE_PATH + "." + Constants . GENERIC_ID + "=" + Constants . DB_TABLE_REQUEST_RESPONSE + "." + Constants . REQUEST_RESPONSE_PATH_ID + " AND " + Constants . DB_TABLE_REQUEST_RESPONSE + "." + Constants . GENERIC_CLIENT_UUID + " = ?" ; return queryString ;
public class ClasspathWorkspaceReader { /** * Returns if two artifacts are equivalent , that is , have the same groupId , artifactId and Version * @ param artifact * left side artifact to be compared * @ param foundArtifact * right side artifact to be compared * @ return true if the groupId , artifactId and version matches */ private boolean areEquivalent ( final Artifact artifact , final Artifact foundArtifact ) { } }
boolean areEquivalent = ( foundArtifact . getGroupId ( ) . equals ( artifact . getGroupId ( ) ) && foundArtifact . getArtifactId ( ) . equals ( artifact . getArtifactId ( ) ) && foundArtifact . getVersion ( ) . equals ( artifact . getVersion ( ) ) ) ; return areEquivalent ;
public class Calc { /** * Rotate a structure object . The rotation Matrix must be a * pre - multiplication Matrix . * @ param structure * the structure to be rotated * @ param m * rotation matrix to be applied */ public static final void rotate ( Structure structure , Matrix m ) { } }
AtomIterator iter = new AtomIterator ( structure ) ; while ( iter . hasNext ( ) ) { Atom atom = iter . next ( ) ; rotate ( atom , m ) ; }
public class MalisisRegistry { /** * Registers a { @ link IRegisterable } . < br > * The object has to be either a { @ link Block } or an { @ link Item } . * @ param registerable the registerable */ public static void register ( IRegisterable < ? > registerable ) { } }
ResourceLocation name = registerable . getName ( ) ; if ( name == null ) throw new IllegalArgumentException ( "No name specified for registration for " + registerable . getClass ( ) . getName ( ) ) ; if ( ! ( registerable instanceof Block || registerable instanceof Item ) ) throw new IllegalArgumentException ( "Cannot register " + registerable . getClass ( ) . getName ( ) + " (" + name + ") because it's neither a block or an item." ) ; if ( registerable instanceof Block ) { Block block = ( Block ) registerable ; ForgeRegistries . BLOCKS . register ( block ) ; Item item = registerable . getItem ( block ) ; if ( item != null ) ForgeRegistries . ITEMS . register ( item ) ; // register the mapper for the block and the model for the item if ( MalisisCore . isClient ( ) ) { ModelLoader . setCustomStateMapper ( block , b -> ImmutableMap . of ( ) ) ; if ( item != null ) EmptyModelLoader . register ( item ) ; } ClientNotificationManager . discover ( block ) ; } else if ( registerable instanceof Item ) { Item item = ( Item ) registerable ; ForgeRegistries . ITEMS . register ( item ) ; if ( MalisisCore . isClient ( ) ) EmptyModelLoader . register ( item ) ; }
public class Splash { /** * Display splash , and launch app . * @ param args the command line arguments */ public static void main ( String [ ] args ) { } }
String splashImage = Splash . getParam ( args , SPLASH ) ; if ( ( splashImage == null ) || ( splashImage . length ( ) == 0 ) ) splashImage = DEFAULT_SPLASH ; URL url = Splash . class . getResource ( splashImage ) ; if ( url == null ) if ( ClassServiceUtility . getClassService ( ) . getClassFinder ( null ) != null ) url = ClassServiceUtility . getClassService ( ) . getClassFinder ( null ) . findResourceURL ( splashImage , null ) ; Container container = null ; Splash . splash ( container , url ) ; String main = Splash . getParam ( args , MAIN ) ; if ( ( main == null ) || ( main . length ( ) == 0 ) ) main = ROOT_PACKAGE + "Thin" ; else if ( main . charAt ( 0 ) == '.' ) main = ROOT_PACKAGE + main . substring ( 1 ) ; Splash . invokeMain ( main , args ) ; Splash . disposeSplash ( ) ;
public class AfplibFactoryImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public PresentationControlPRSFlg createPresentationControlPRSFlgFromString ( EDataType eDataType , String initialValue ) { } }
PresentationControlPRSFlg result = PresentationControlPRSFlg . get ( initialValue ) ; if ( result == null ) throw new IllegalArgumentException ( "The value '" + initialValue + "' is not a valid enumerator of '" + eDataType . getName ( ) + "'" ) ; return result ;
public class ResourceService { /** * execute with a resource * @ param copyResourcesMojo * @ param resource * @ param outputDirectory * @ param flatten * @ throws ResourceExecutionException */ public static void execute ( CopyResourcesMojo copyResourcesMojo , Resource resource , File outputDirectory ) throws ResourceExecutionException { } }
copyResourcesMojo . getLog ( ) . debug ( "Execute resource : " + resource ) ; // choose a location to checkout project File workspacePlugin = PathUtils . getWorkspace ( copyResourcesMojo ) ; // security if ( workspacePlugin . exists ( ) ) { copyResourcesMojo . getLog ( ) . debug ( "delete workspacePlugin resource because already exist : '" + workspacePlugin . getAbsolutePath ( ) + "'" ) ; if ( workspacePlugin . delete ( ) ) { copyResourcesMojo . getLog ( ) . debug ( "Unable to delete workspace plugin directory '" + workspacePlugin + "'" ) ; } } // find correct strategy ProtocolStrategy strategy ; try { strategy = ProtocolService . getStrategy ( resource ) ; copyResourcesMojo . getLog ( ) . debug ( "current strategy is " + strategy . getClass ( ) . getSimpleName ( ) ) ; } catch ( ProtocolException e ) { throw new ResourceExecutionException ( "Protocol implementation not found" , e ) ; } // strategy return a source folder String sourceFolder = strategy . getSourceFolder ( resource , copyResourcesMojo , workspacePlugin ) ; // source folder is copied into destination try { boolean flatten = resource . getFlatten ( ) == null ? false : resource . getFlatten ( ) ; FileService . copyFilesIntoOutputDirectory ( copyResourcesMojo , new File ( sourceFolder ) , outputDirectory , resource , flatten ) ; } catch ( FileNotFoundException e ) { throw new ResourceExecutionException ( e ) ; } catch ( InvalidSourceException e ) { throw new ResourceExecutionException ( e ) ; } catch ( IOException e ) { throw new ResourceExecutionException ( e ) ; }
public class Task { /** * The Baseline Duration field shows the original span of time planned * to complete a task . * @ return - duration string */ public Duration getBaselineDuration ( ) { } }
Object result = getCachedValue ( TaskField . BASELINE_DURATION ) ; if ( result == null ) { result = getCachedValue ( TaskField . BASELINE_ESTIMATED_DURATION ) ; } if ( ! ( result instanceof Duration ) ) { result = null ; } return ( Duration ) result ;
public class Node { /** * syck _ seq _ empty */ public void seqEmpty ( ) { } }
Data . Seq s = ( Data . Seq ) data ; s . idx = 0 ; s . capa = YAML . ALLOC_CT ; s . items = new Object [ s . capa ] ;
public class JsonUtil { /** * Writes the given value with the given writer . * @ param writer * @ param values * @ throws IOException * @ author vvakame */ public static void putLongList ( Writer writer , List < Long > values ) throws IOException { } }
if ( values == null ) { writer . write ( "null" ) ; } else { startArray ( writer ) ; for ( int i = 0 ; i < values . size ( ) ; i ++ ) { put ( writer , values . get ( i ) ) ; if ( i != values . size ( ) - 1 ) { addSeparator ( writer ) ; } } endArray ( writer ) ; }
public class ManagerReaderImpl { /** * Reads line by line from the asterisk server , sets the protocol identifier * ( using a generated * { @ link org . asteriskjava . manager . event . ProtocolIdentifierReceivedEvent } ) * as soon as it is received and dispatches the received events and * responses via the associated dispatcher . * @ see org . asteriskjava . manager . internal . Dispatcher # dispatchEvent ( ManagerEvent ) * @ see org . asteriskjava . manager . internal . Dispatcher # dispatchResponse ( ManagerResponse ) */ public void run ( ) { } }
final Map < String , Object > buffer = new HashMap < > ( ) ; String line ; if ( socket == null ) { throw new IllegalStateException ( "Unable to run: socket is null." ) ; } this . die = false ; this . dead = false ; try { // main loop while ( ! this . die && ( line = socket . readLine ( ) ) != null ) { // maybe we will find a better way to identify the protocol // identifier but for now // this works quite well . if ( line . startsWith ( "Asterisk Call Manager/" ) || line . startsWith ( "Asterisk Call Manager Proxy/" ) || line . startsWith ( "Asterisk Manager Proxy/" ) || line . startsWith ( "OpenPBX Call Manager/" ) || line . startsWith ( "CallWeaver Call Manager/" ) ) { ProtocolIdentifierReceivedEvent protocolIdentifierReceivedEvent ; protocolIdentifierReceivedEvent = new ProtocolIdentifierReceivedEvent ( source ) ; protocolIdentifierReceivedEvent . setProtocolIdentifier ( line ) ; protocolIdentifierReceivedEvent . setDateReceived ( DateUtil . getDate ( ) ) ; dispatcher . dispatchEvent ( protocolIdentifierReceivedEvent ) ; continue ; } /* * Special handling for " Response : Follows " ( CommandResponse ) As * we are using " \ r \ n " as the delimiter for line this also * handles multiline results as long as they only contain " \ n " . */ if ( "Follows" . equals ( buffer . get ( "response" ) ) && line . endsWith ( "--END COMMAND--" ) ) { buffer . put ( COMMAND_RESULT_RESPONSE_KEY , line ) ; continue ; } if ( line . length ( ) > 0 ) { // begin of workaround for Astersik bug 13319 // see AJ - 77 // Use this workaround only when line starts from " From " // and " To " int isFromAtStart = line . indexOf ( "From " ) ; int isToAtStart = line . indexOf ( "To " ) ; int delimiterIndex = isFromAtStart == 0 || isToAtStart == 0 ? line . indexOf ( " " ) : line . indexOf ( ":" ) ; // end of workaround for Astersik bug 13319 int delimiterLength = 1 ; if ( delimiterIndex > 0 && line . length ( ) > delimiterIndex + delimiterLength ) { String name = line . substring ( 0 , delimiterIndex ) . toLowerCase ( Locale . ENGLISH ) . trim ( ) ; String value = line . substring ( delimiterIndex + delimiterLength ) . trim ( ) ; addToBuffer ( buffer , name , value ) ; // TODO tracing // logger . debug ( " Got name [ " + name + " ] , value : [ " + // value + " ] " ) ; } } // an empty line indicates a normal response ' s or event ' s end so // we build // the corresponding value object and dispatch it through the // ManagerConnection . if ( line . length ( ) == 0 ) { if ( buffer . containsKey ( "event" ) ) { // TODO tracing // logger . debug ( " attempting to build event : " + // buffer . get ( " event " ) ) ; ManagerEvent event = buildEvent ( source , buffer ) ; if ( event != null ) { dispatcher . dispatchEvent ( event ) ; // Backwards compatibility for bridge events . // Asterisk 13 uses BridgeCreate , // BridgeEnter , BridgeLeave and BridgeDestroy // events . // So here we track active bridges and simulate // BridgeEvent ' s for them allowing legacy code to // still work with BridgeEvent ' s ManagerEvent secondaryEvent = compatibility . handleEvent ( event ) ; if ( secondaryEvent != null ) { dispatcher . dispatchEvent ( secondaryEvent ) ; } } else { logger . debug ( "buildEvent returned null" ) ; } } else if ( buffer . containsKey ( "response" ) ) { ManagerResponse response = buildResponse ( buffer ) ; // TODO tracing // logger . debug ( " attempting to build response " ) ; if ( response != null ) { dispatcher . dispatchResponse ( response ) ; } } else { if ( ! buffer . isEmpty ( ) ) { logger . debug ( "Buffer contains neither response nor event" ) ; } } buffer . clear ( ) ; } } this . dead = true ; logger . debug ( "Reached end of stream, terminating reader." ) ; } catch ( IOException e ) { this . terminationException = e ; this . dead = true ; logger . info ( "Terminating reader thread: " + e . getMessage ( ) ) ; } finally { this . dead = true ; // cleans resources and reconnects if needed DisconnectEvent disconnectEvent = new DisconnectEvent ( source ) ; disconnectEvent . setDateReceived ( DateUtil . getDate ( ) ) ; dispatcher . dispatchEvent ( disconnectEvent ) ; }
public class AbstractFileLookup { /** * Looks up the file , see : { @ link DefaultFileLookup } . * @ param filename might be the name of the file ( too look it up in the class path ) or an url to a file . * @ return an input stream to the file or null if nothing found through all lookup steps . * @ throws FileNotFoundException if file cannot be found */ @ Override public InputStream lookupFileStrict ( String filename , ClassLoader cl ) throws FileNotFoundException { } }
InputStream is = filename == null || filename . length ( ) == 0 ? null : getAsInputStreamFromClassLoader ( filename , cl ) ; if ( is == null ) { if ( log . isDebugEnabled ( ) ) log . debugf ( "Unable to find file %s in classpath; searching for this file on the filesystem instead." , filename ) ; return new FileInputStream ( filename ) ; } return is ;
public class IPDImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public void eSet ( int featureID , Object newValue ) { } }
switch ( featureID ) { case AfplibPackage . IPD__IOC_ADAT : setIOCAdat ( ( byte [ ] ) newValue ) ; return ; case AfplibPackage . IPD__IMAGE_DATA : setImageData ( ( byte [ ] ) newValue ) ; return ; } super . eSet ( featureID , newValue ) ;
public class JavaParser { /** * src / main / resources / org / drools / compiler / semantics / java / parser / Java . g : 950:1 : retractStatement : s = ' retract ' ' ( ' expression c = ' ) ' ; */ public final void retractStatement ( ) throws RecognitionException { } }
int retractStatement_StartIndex = input . index ( ) ; Token s = null ; Token c = null ; ParserRuleReturnScope expression9 = null ; try { if ( state . backtracking > 0 && alreadyParsedRule ( input , 91 ) ) { return ; } // src / main / resources / org / drools / compiler / semantics / java / parser / Java . g : 951:5 : ( s = ' retract ' ' ( ' expression c = ' ) ' ) // src / main / resources / org / drools / compiler / semantics / java / parser / Java . g : 951:7 : s = ' retract ' ' ( ' expression c = ' ) ' { s = ( Token ) match ( input , 103 , FOLLOW_103_in_retractStatement4097 ) ; if ( state . failed ) return ; match ( input , 36 , FOLLOW_36_in_retractStatement4099 ) ; if ( state . failed ) return ; pushFollow ( FOLLOW_expression_in_retractStatement4105 ) ; expression9 = expression ( ) ; state . _fsp -- ; if ( state . failed ) return ; c = ( Token ) match ( input , 37 , FOLLOW_37_in_retractStatement4115 ) ; if ( state . failed ) return ; if ( state . backtracking == 0 ) { JavaStatementBlockDescr d = new JavaStatementBlockDescr ( ( expression9 != null ? input . toString ( expression9 . start , expression9 . stop ) : null ) , JavaBlockDescr . BlockType . DELETE ) ; d . setStart ( ( ( CommonToken ) s ) . getStartIndex ( ) ) ; this . addBlockDescr ( d ) ; d . setEnd ( ( ( CommonToken ) c ) . getStopIndex ( ) ) ; } } } catch ( RecognitionException re ) { reportError ( re ) ; recover ( input , re ) ; } finally { // do for sure before leaving if ( state . backtracking > 0 ) { memoize ( input , 91 , retractStatement_StartIndex ) ; } }
public class NettyClient { ChannelFuture connect ( final InetSocketAddress serverSocketAddress ) { } }
checkState ( bootstrap != null , "Client has not been initialized yet." ) ; // Child channel pipeline for accepted connections bootstrap . handler ( new ChannelInitializer < SocketChannel > ( ) { @ Override public void initChannel ( SocketChannel channel ) throws Exception { // SSL handler should be added first in the pipeline if ( clientSSLFactory != null ) { SslHandler sslHandler = clientSSLFactory . createNettySSLHandler ( serverSocketAddress . getAddress ( ) . getCanonicalHostName ( ) , serverSocketAddress . getPort ( ) ) ; channel . pipeline ( ) . addLast ( "ssl" , sslHandler ) ; } channel . pipeline ( ) . addLast ( protocol . getClientChannelHandlers ( ) ) ; } } ) ; try { return bootstrap . connect ( serverSocketAddress ) ; } catch ( ChannelException e ) { if ( ( e . getCause ( ) instanceof java . net . SocketException && e . getCause ( ) . getMessage ( ) . equals ( "Too many open files" ) ) || ( e . getCause ( ) instanceof ChannelException && e . getCause ( ) . getCause ( ) instanceof java . net . SocketException && e . getCause ( ) . getCause ( ) . getMessage ( ) . equals ( "Too many open files" ) ) ) { throw new ChannelException ( "The operating system does not offer enough file handles to open the network connection. " + "Please increase the number of available file handles." , e . getCause ( ) ) ; } else { throw e ; } }
public class Utils { /** * Get the MIME type of a file * @ param url * @ return */ public static String getMimeType ( String url ) { } }
String type = null ; String extension = MimeTypeMap . getFileExtensionFromUrl ( url ) ; if ( extension != null ) { MimeTypeMap mime = MimeTypeMap . getSingleton ( ) ; type = mime . getMimeTypeFromExtension ( extension ) ; } return type ;
public class Database { public DbDatum [ ] get_property ( String name , DbDatum [ ] properties ) throws DevFailed { } }
return databaseDAO . get_property ( this , name , properties ) ;
public class StandardRoadNetwork { /** * Compute the bounds of this element . * This function does not update the internal * attribute replied by { @ link # getBoundingBox ( ) } */ @ Override @ Pure protected Rectangle2d calcBounds ( ) { } }
final Rectangle2d rect = new Rectangle2d ( ) ; boolean first = true ; Rectangle2d rs ; for ( final RoadSegment segment : getRoadSegments ( ) ) { rs = segment . getBoundingBox ( ) ; if ( rs != null && ! rs . isEmpty ( ) ) { if ( first ) { first = false ; rect . set ( rs ) ; } else { rect . setUnion ( rs ) ; } } } return first ? null : rect ;
public class TaskAddCollectionOptions { /** * Set the time the request was issued . Client libraries typically set this to the current system clock time ; set it explicitly if you are calling the REST API directly . * @ param ocpDate the ocpDate value to set * @ return the TaskAddCollectionOptions object itself . */ public TaskAddCollectionOptions withOcpDate ( DateTime ocpDate ) { } }
if ( ocpDate == null ) { this . ocpDate = null ; } else { this . ocpDate = new DateTimeRfc1123 ( ocpDate ) ; } return this ;
public class ZookeeperServerManager { public void startup ( ) { } }
// Dictionary < ? , ? > dict = cmPropertyPlaceholder . getConfigAdmin ( ) // . getConfiguration ( cmPropertyPlaceholder . getPersistentId ( ) ) . getProperties ( ) ; Dictionary < ? , ? > dict = properties ; // System . out . println ( " # # # ZOOKEEPER : : dictionary : " + dict ) ; LOG . info ( "Staring up ZooKeeper server" ) ; if ( dict == null ) { LOG . info ( "Ignoring configuration update because updated configuration is empty." ) ; shutdown ( ) ; return ; } if ( main != null ) { // stop the current instance shutdown ( ) ; // then reconfigure and start again . } if ( dict . get ( "clientPort" ) == null ) { LOG . info ( "Ignoring configuration update because required property 'clientPort' isn't set." ) ; return ; } Properties props = new Properties ( ) ; for ( Enumeration < ? > e = dict . keys ( ) ; e . hasMoreElements ( ) ; ) { Object key = e . nextElement ( ) ; props . put ( key , dict . get ( key ) ) ; } try { main = ZookeeperServerImpl . getZookeeperServer ( props ) ; zkMainThread = new Thread ( new Runnable ( ) { public void run ( ) { try { main . startup ( ) ; } catch ( IOException e ) { LOG . log ( Level . SEVERE , "Problem running ZooKeeper server." , e ) ; } } } ) ; zkMainThread . start ( ) ; LOG . info ( "Applied configuration update :" + props ) ; } catch ( Exception th ) { LOG . log ( Level . SEVERE , "Problem applying configuration update: " + props , th ) ; }
public class HeartbeatBackground { /** * Schedule the next heartbeat */ private void scheduleHeartbeat ( ) { } }
// elapsed time in seconds since the last heartbeat long elapsedSecsSinceLastHeartBeat = System . currentTimeMillis ( ) / 1000 - lastHeartbeatStartTimeInSecs ; /* * The initial delay for the new scheduling is 0 if the elapsed * time is more than the heartbeat time interval , otherwise it is the * difference between the heartbeat time interval and the elapsed time . */ long initialDelay = Math . max ( heartBeatIntervalInSecs - elapsedSecsSinceLastHeartBeat , 0 ) ; LOGGER . debug ( "schedule heartbeat task with initial delay of {} seconds" , initialDelay ) ; // Creates and executes a periodic action to send heartbeats this . heartbeatFuture = this . scheduler . schedule ( this , initialDelay , TimeUnit . SECONDS ) ;
public class PropertiesManagerCore { /** * Close all GeoPackages in the manager */ public void closeGeoPackages ( ) { } }
for ( PropertiesCoreExtension < T , ? , ? , ? > properties : propertiesMap . values ( ) ) { properties . getGeoPackage ( ) . close ( ) ; } propertiesMap . clear ( ) ;
public class ItemProcessorPipeline { /** * Adds several processors to the pipeline of processors . */ public void addProcessors ( Collection < ItemProcessor > processors ) { } }
if ( processors == null ) { processors = new ArrayList < > ( ) ; } processors . addAll ( processors ) ;
public class TransactionHelper { /** * Starts a new Hibernate transaction . Note that the caller accepts responsibility for closing the transaction * @ return */ public HibernateTransaction start ( ) { } }
final Session session = sessionProvider . get ( ) ; final Transaction tx = session . beginTransaction ( ) ; return new HibernateTransaction ( tx ) ;
public class AWSIotClient { /** * Lists the active violations for a given Device Defender security profile . * @ param listActiveViolationsRequest * @ return Result of the ListActiveViolations operation returned by the service . * @ throws InvalidRequestException * The request is not valid . * @ throws ResourceNotFoundException * The specified resource does not exist . * @ throws ThrottlingException * The rate exceeds the limit . * @ throws InternalFailureException * An unexpected error has occurred . * @ sample AWSIot . ListActiveViolations */ @ Override public ListActiveViolationsResult listActiveViolations ( ListActiveViolationsRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeListActiveViolations ( request ) ;
public class FastAdapter { /** * Gets the adapter for the given position * @ param position the global position * @ return the adapter responsible for this global position */ @ Nullable public IAdapter < Item > getAdapter ( int position ) { } }
// if we are out of range just return null if ( position < 0 || position >= mGlobalSize ) { return null ; } if ( mVerbose ) Log . v ( TAG , "getAdapter" ) ; // now get the adapter which is responsible for the given position return mAdapterSizes . valueAt ( floorIndex ( mAdapterSizes , position ) ) ;
public class tmtrafficaction { /** * Use this API to update tmtrafficaction . */ public static base_response update ( nitro_service client , tmtrafficaction resource ) throws Exception { } }
tmtrafficaction updateresource = new tmtrafficaction ( ) ; updateresource . name = resource . name ; updateresource . apptimeout = resource . apptimeout ; updateresource . sso = resource . sso ; updateresource . formssoaction = resource . formssoaction ; updateresource . persistentcookie = resource . persistentcookie ; updateresource . initiatelogout = resource . initiatelogout ; updateresource . kcdaccount = resource . kcdaccount ; updateresource . samlssoprofile = resource . samlssoprofile ; return updateresource . update_resource ( client ) ;
public class DescribeRdsDbInstancesResult { /** * An a array of < code > RdsDbInstance < / code > objects that describe the instances . * @ return An a array of < code > RdsDbInstance < / code > objects that describe the instances . */ public java . util . List < RdsDbInstance > getRdsDbInstances ( ) { } }
if ( rdsDbInstances == null ) { rdsDbInstances = new com . amazonaws . internal . SdkInternalList < RdsDbInstance > ( ) ; } return rdsDbInstances ;
public class LikeIgnoreCase { /** * / * ( non - Javadoc ) * @ see net . leadware . persistence . tools . api . utils . restrictions . Predicate # generateJPAPredicate ( javax . persistence . criteria . CriteriaBuilder , javax . persistence . criteria . Root ) */ @ Override public Predicate generateJPAPredicate ( CriteriaBuilder criteriaBuilder , Root < ? > root ) { } }
// On retourne le predicat return criteriaBuilder . like ( criteriaBuilder . lower ( this . < String > buildPropertyPath ( root , property ) ) , value . toLowerCase ( ) ) ;
public class DerivedByRemovalFrom { /** * Gets the value of the others property . * This accessor method returns a reference to the live list , * not a snapshot . Therefore any modification you make to the * returned list will be present inside the JAXB object . * This is why there is not a < CODE > set < / CODE > method for the others property . * For example , to add a new item , do as follows : * < pre > * getOthers ( ) . add ( newItem ) ; * < / pre > * Objects of the following type ( s ) are allowed in the list * { @ link Other } */ public List < Other > getOther ( ) { } }
if ( others == null ) { others = AttributeList . populateKnownAttributes ( this , all , org . openprovenance . prov . model . Other . class ) ; } return this . others ;
public class DefaultGroovyMethods { /** * internal helper method */ protected static < T > T callClosureForLine ( @ ClosureParams ( value = FromString . class , options = { } }
"String" , "String,Integer" } ) Closure < T > closure , String line , int counter ) { if ( closure . getMaximumNumberOfParameters ( ) == 2 ) { return closure . call ( line , counter ) ; } return closure . call ( line ) ;
public class UpdateMethodResponseResult { /** * A key - value map specifying required or optional response parameters that API Gateway can send back to the caller . * A key defines a method response header and the value specifies whether the associated method response header is * required or not . The expression of the key must match the pattern < code > method . response . header . { name } < / code > , * where < code > name < / code > is a valid and unique header name . API Gateway passes certain integration response data * to the method response headers specified here according to the mapping you prescribe in the API ' s * < a > IntegrationResponse < / a > . The integration response data that can be mapped include an integration response * header expressed in < code > integration . response . header . { name } < / code > , a static value enclosed within a pair of * single quotes ( e . g . , < code > ' application / json ' < / code > ) , or a JSON expression from the back - end response payload in * the form of < code > integration . response . body . { JSON - expression } < / code > , where < code > JSON - expression < / code > is a * valid JSON expression without the < code > $ < / code > prefix . ) * @ param responseParameters * A key - value map specifying required or optional response parameters that API Gateway can send back to the * caller . A key defines a method response header and the value specifies whether the associated method * response header is required or not . The expression of the key must match the pattern * < code > method . response . header . { name } < / code > , where < code > name < / code > is a valid and unique header name . API * Gateway passes certain integration response data to the method response headers specified here according * to the mapping you prescribe in the API ' s < a > IntegrationResponse < / a > . The integration response data that * can be mapped include an integration response header expressed in * < code > integration . response . header . { name } < / code > , a static value enclosed within a pair of single quotes * ( e . g . , < code > ' application / json ' < / code > ) , or a JSON expression from the back - end response payload in the * form of < code > integration . response . body . { JSON - expression } < / code > , where < code > JSON - expression < / code > is a * valid JSON expression without the < code > $ < / code > prefix . ) * @ return Returns a reference to this object so that method calls can be chained together . */ public UpdateMethodResponseResult withResponseParameters ( java . util . Map < String , Boolean > responseParameters ) { } }
setResponseParameters ( responseParameters ) ; return this ;
public class JsonView { /** * Creates a JSON [ JSONObject , JSONArray , JSONNUll ] from the model values . */ protected JSON createJSON ( Map model , HttpServletRequest request , HttpServletResponse response ) { } }
return defaultCreateJSON ( model ) ;
public class Fibers { /** * Runs an action in a new fiber and awaits the fiber ' s termination . * @ param scheduler the { @ link FiberScheduler } to use when scheduling the fiber . * @ param target the operation * @ throws ExecutionException * @ throws InterruptedException */ public static void runInFiber ( FiberScheduler scheduler , SuspendableRunnable target ) throws ExecutionException , InterruptedException { } }
FiberUtil . runInFiber ( scheduler , target ) ;
public class SessionDialog { /** * Creates the UI shared context for the given context . * Should be called when a new context is added to the session and before adding its panels . * @ param context the context * @ since 2.6.0 */ public void createUISharedContext ( Context context ) { } }
if ( session != null ) { uiContexts . put ( context . getIndex ( ) , context . duplicate ( ) ) ; }
public class UInteger { /** * Figure out the size of the precache . * @ return The parsed value of the system property * { @ link # PRECACHE _ PROPERTY } or { @ link # DEFAULT _ PRECACHE _ SIZE } if * the property is not set , not a number or retrieving results in a * { @ link SecurityException } . If the parsed value is zero or * negative no cache will be created . If the value is larger than * { @ link Integer # MAX _ VALUE } then Integer # MAX _ VALUE will be used . */ private static final int getPrecacheSize ( ) { } }
String prop = null ; long propParsed ; try { prop = System . getProperty ( PRECACHE_PROPERTY ) ; } catch ( SecurityException e ) { // security manager stopped us so use default // FIXME : should we log this somewhere ? return DEFAULT_PRECACHE_SIZE ; } if ( prop == null ) return DEFAULT_PRECACHE_SIZE ; // empty value // FIXME : should we log this somewhere ? if ( prop . length ( ) <= 0 ) return DEFAULT_PRECACHE_SIZE ; try { propParsed = Long . parseLong ( prop ) ; } catch ( NumberFormatException e ) { // not a valid number // FIXME : should we log this somewhere ? return DEFAULT_PRECACHE_SIZE ; } // treat negative value as no cache . . . if ( propParsed < 0 ) return 0 ; // FIXME : should we log this somewhere ? if ( propParsed > Integer . MAX_VALUE ) return Integer . MAX_VALUE ; return ( int ) propParsed ;
public class StaticWord2Vec { /** * This method returns mean vector , built from words / labels passed in * @ param labels * @ return */ @ Override public INDArray getWordVectorsMean ( Collection < String > labels ) { } }
INDArray matrix = getWordVectors ( labels ) ; // TODO : check this ( 1) return matrix . mean ( 1 ) ;
public class GenericResource { /** * Returns the information about lock . * @ param token lock token * @ param lockOwner lockowner * @ param timeOut lock timeout * @ return lock information */ public static HierarchicalProperty lockDiscovery ( String token , String lockOwner , String timeOut ) { } }
HierarchicalProperty lockDiscovery = new HierarchicalProperty ( new QName ( "DAV:" , "lockdiscovery" ) ) ; HierarchicalProperty activeLock = lockDiscovery . addChild ( new HierarchicalProperty ( new QName ( "DAV:" , "activelock" ) ) ) ; HierarchicalProperty lockType = activeLock . addChild ( new HierarchicalProperty ( new QName ( "DAV:" , "locktype" ) ) ) ; lockType . addChild ( new HierarchicalProperty ( new QName ( "DAV:" , "write" ) ) ) ; HierarchicalProperty lockScope = activeLock . addChild ( new HierarchicalProperty ( new QName ( "DAV:" , "lockscope" ) ) ) ; lockScope . addChild ( new HierarchicalProperty ( new QName ( "DAV:" , "exclusive" ) ) ) ; HierarchicalProperty depth = activeLock . addChild ( new HierarchicalProperty ( new QName ( "DAV:" , "depth" ) ) ) ; depth . setValue ( "Infinity" ) ; if ( lockOwner != null ) { HierarchicalProperty owner = activeLock . addChild ( new HierarchicalProperty ( new QName ( "DAV:" , "owner" ) ) ) ; owner . setValue ( lockOwner ) ; } HierarchicalProperty timeout = activeLock . addChild ( new HierarchicalProperty ( new QName ( "DAV:" , "timeout" ) ) ) ; timeout . setValue ( "Second-" + timeOut ) ; if ( token != null ) { HierarchicalProperty lockToken = activeLock . addChild ( new HierarchicalProperty ( new QName ( "DAV:" , "locktoken" ) ) ) ; HierarchicalProperty lockHref = lockToken . addChild ( new HierarchicalProperty ( new QName ( "DAV:" , "href" ) ) ) ; lockHref . setValue ( token ) ; } return lockDiscovery ;
public class MolaDbClient { /** * Delete a table from moladb * @ param request Container for the necessary parameters to * execute the delete table service method on Moladb . * @ return The responseContent from the Delete table service method , as returned by * Moladb . * @ throws BceClientException * If any internal errors are encountered inside the client while * attempting to make the request or handle the responseContent . For example * if a network connection is not available . * @ throws BceServiceException * If an error responseContent is returned by Moladb indicating * either a problem with the data in the request , or a server side issue . */ public DeleteTableResponse deleteTable ( DeleteTableRequest request ) { } }
checkNotNull ( request , "request should not be null." ) ; InternalRequest httpRequest = createRequestUnderInstance ( HttpMethodName . DELETE , MolaDbConstants . URI_TABLE , request . getTableName ( ) ) ; DeleteTableResponse ret = this . invokeHttpClient ( httpRequest , DeleteTableResponse . class ) ; return ret ;
public class EC2Context { /** * < br > * Needed AWS actions : * < ul > * < li > autoscaling : DescribeAutoScalingGroups < / li > * < / ul > * @ return the tags of the given auto sclaing group */ public Map < String , String > getAutoScalingGroupTags ( String autoScalingGroupName ) { } }
Preconditions . checkArgument ( autoScalingGroupName != null && ! autoScalingGroupName . isEmpty ( ) ) ; AutoScalingGroup group = this . getAutoScalingGroup ( autoScalingGroupName ) ; Map < String , String > tags = new HashMap < > ( ) ; for ( TagDescription tagDescription : group . getTags ( ) ) { tags . put ( tagDescription . getKey ( ) , tagDescription . getValue ( ) ) ; } return tags ;
public class TemplatedEmail { /** * Creates the mime message for the e - mail . * @ param messageBody * @ return the message */ private Message buildMessage ( String messageBody ) throws MessagingException { } }
Message message = new MimeMessage ( getSession ( ) ) ; message . setFrom ( new InternetAddress ( fromAddress ) ) ; message . setSubject ( substitute ( subject ) ) ; Multipart multiPart = new MimeMultipart ( ) ; // html body part BodyPart bodyPart = new MimeBodyPart ( ) ; if ( messageBody == null ) messageBody = getBody ( ) ; bodyPart . setContent ( messageBody , html ? "text/html" : "text/plain" ) ; multiPart . addBodyPart ( bodyPart ) ; // image body parts if ( ! images . isEmpty ( ) ) { for ( String imageId : images . keySet ( ) ) { String imageFile = images . get ( imageId ) ; BodyPart imageBodyPart = new MimeBodyPart ( ) ; DataSource imageDataSource = null ; URL url = Thread . currentThread ( ) . getContextClassLoader ( ) . getResource ( imageFile ) ; if ( url == null ) { final Asset imageAsset = AssetCache . getAsset ( imageFile , "IMAGE_" + imageFile . substring ( imageFile . lastIndexOf ( '.' ) + 1 ) . toUpperCase ( ) ) ; if ( imageAsset == null ) throw new MessagingException ( "Image not found: " + imageFile ) ; imageDataSource = new DataSource ( ) { public String getContentType ( ) { return imageAsset . getContentType ( ) ; } public InputStream getInputStream ( ) throws IOException { byte [ ] bytes = imageAsset . getContent ( ) ; return new ByteArrayInputStream ( bytes ) ; } public String getName ( ) { return imageAsset . getName ( ) ; } public OutputStream getOutputStream ( ) throws IOException { return null ; } } ; } else { // load from file imageDataSource = new FileDataSource ( getFilePath ( url ) ) ; } imageBodyPart . setDataHandler ( new DataHandler ( imageDataSource ) ) ; imageBodyPart . setHeader ( "Content-ID" , "<" + imageId + ">" ) ; multiPart . addBodyPart ( imageBodyPart ) ; } } if ( ! attachments . isEmpty ( ) ) { for ( String name : attachments . keySet ( ) ) { File file = attachments . get ( name ) ; BodyPart attachBodyPart = new MimeBodyPart ( ) ; DataSource fds = new FileDataSource ( file ) ; attachBodyPart . setDataHandler ( new DataHandler ( fds ) ) ; attachBodyPart . setFileName ( name ) ; multiPart . addBodyPart ( attachBodyPart ) ; } } message . setContent ( multiPart ) ; message . setSentDate ( new Date ( ) ) ; return message ;
public class TangoMonitor { synchronized void rel_monitor ( ) { } }
Util . out4 . println ( "In rel_monitor(), used = " + used + ", ctr = " + locked_ctr ) ; if ( used == true ) { locked_ctr -- ; if ( locked_ctr == 0 ) { Util . out4 . println ( "Signalling !" ) ; used = false ; // locking _ thread = NULL ; notify ( ) ; } }
public class LBFGS_port { /** * Start a L - BFGS optimization . * @ param n The number of variables . * @ param x The array of variables . A client program can set * default values for the optimization and receive the * optimization result through this array . This array * must be allocated by : : lbfgs _ malloc function * for libLBFGS built with SSE / SSE2 optimization routine * enabled . The library built without SSE / SSE2 * optimization does not have such a requirement . * @ param ptr _ fx The pointer to the variable that receives the final * value of the objective function for the variables . * This argument can be set to \ c null if the final * value of the objective function is unnecessary . * @ param proc _ evaluate The callback function to provide function and * gradient evaluations given a current values of * variables . A client program must implement a * callback function compatible with \ ref * lbfgs _ evaluate _ t and pass the pointer to the * callback function . * @ param proc _ progress The callback function to receive the progress * ( the number of iterations , the current value of * the objective function ) of the minimization * process . This argument can be set to \ c null if * a progress report is unnecessary . * @ param instance A user data for the client program . The callback * functions will receive the value of this argument . * @ param param The pointer to a structure representing parameters for * L - BFGS optimization . A client program can set this * parameter to \ c null to use the default parameters . * Call lbfgs _ parameter _ init ( ) function to fill a * structure with the default values . * @ retval int The status code . This function returns zero if the * minimization process terminates without an error . A * non - zero value indicates an error . */ public static StatusCode lbfgs ( double [ ] x , MutableDouble ptr_fx , LBFGSCallback cd , LBFGSPrm _param ) { } }
final int n = x . length ; StatusCode ret , ls_ret ; MutableInt ls = new MutableInt ( 0 ) ; int i , j , k , end , bound ; double step ; /* Constant parameters and their default values . */ LBFGSPrm param = ( _param != null ) ? _param : new LBFGSPrm ( ) ; final int m = param . m ; double [ ] xp ; double [ ] g , gp , pg = null ; double [ ] d , w , pf = null ; IterationData [ ] lm = null ; IterationData it = null ; double ys , yy ; double xnorm , gnorm , beta ; double fx = 0. ; double rate = 0. ; LineSearchAlgInternal linesearch_choice = LineSearchAlgInternal . morethuente ; /* Check the input parameters for errors . */ if ( n <= 0 ) { return LBFGSERR_INVALID_N ; } if ( param . epsilon < 0. ) { return LBFGSERR_INVALID_EPSILON ; } if ( param . past < 0 ) { return LBFGSERR_INVALID_TESTPERIOD ; } if ( param . delta < 0. ) { return LBFGSERR_INVALID_DELTA ; } if ( param . min_step < 0. ) { return LBFGSERR_INVALID_MINSTEP ; } if ( param . max_step < param . min_step ) { return LBFGSERR_INVALID_MAXSTEP ; } if ( param . ftol < 0. ) { return LBFGSERR_INVALID_FTOL ; } if ( param . linesearch == LBFGS_LINESEARCH_BACKTRACKING_WOLFE || param . linesearch == LBFGS_LINESEARCH_BACKTRACKING_STRONG_WOLFE ) { if ( param . wolfe <= param . ftol || 1. <= param . wolfe ) { return LBFGSERR_INVALID_WOLFE ; } } if ( param . gtol < 0. ) { return LBFGSERR_INVALID_GTOL ; } if ( param . xtol < 0. ) { return LBFGSERR_INVALID_XTOL ; } if ( param . max_linesearch <= 0 ) { return LBFGSERR_INVALID_MAXLINESEARCH ; } if ( param . orthantwise_c < 0. ) { return LBFGSERR_INVALID_ORTHANTWISE ; } if ( param . orthantwise_start < 0 || n < param . orthantwise_start ) { return LBFGSERR_INVALID_ORTHANTWISE_START ; } if ( param . orthantwise_end < 0 ) { param . orthantwise_end = n ; } if ( n < param . orthantwise_end ) { return LBFGSERR_INVALID_ORTHANTWISE_END ; } if ( param . orthantwise_c != 0. ) { switch ( param . linesearch ) { case LBFGS_LINESEARCH_BACKTRACKING_WOLFE : linesearch_choice = LineSearchAlgInternal . backtracking_owlqn ; break ; default : /* Only the backtracking method is available . */ return LBFGSERR_INVALID_LINESEARCH ; } } else { switch ( param . linesearch ) { case LBFGS_LINESEARCH_MORETHUENTE : linesearch_choice = LineSearchAlgInternal . morethuente ; break ; case LBFGS_LINESEARCH_BACKTRACKING_ARMIJO : case LBFGS_LINESEARCH_BACKTRACKING_WOLFE : case LBFGS_LINESEARCH_BACKTRACKING_STRONG_WOLFE : linesearch_choice = LineSearchAlgInternal . backtracking ; break ; default : return LBFGSERR_INVALID_LINESEARCH ; } } /* Allocate working space . */ xp = new double [ n ] ; g = new double [ n ] ; gp = new double [ n ] ; d = new double [ n ] ; w = new double [ n ] ; if ( param . orthantwise_c != 0. ) { /* Allocate working space for OW - LQN . */ pg = new double [ n ] ; } /* Allocate limited memory storage . */ lm = new IterationData [ m ] ; /* Initialize the limited memory . */ for ( i = 0 ; i < m ; ++ i ) { lm [ i ] = new IterationData ( ) ; it = lm [ i ] ; it . alpha = 0 ; it . ys = 0 ; it . s = new double [ n ] ; it . y = new double [ n ] ; } /* Allocate an array for storing previous values of the objective function . */ if ( 0 < param . past ) { pf = new double [ param . past ] ; } /* Evaluate the function value and its gradient . */ fx = cd . proc_evaluate ( x , g , 0 ) ; if ( 0. != param . orthantwise_c ) { /* Compute the L1 norm of the variable and add it to the object value . */ xnorm = owlqn_x1norm ( x , param . orthantwise_start , param . orthantwise_end ) ; fx += xnorm * param . orthantwise_c ; owlqn_pseudo_gradient ( pg , x , g , n , param . orthantwise_c , param . orthantwise_start , param . orthantwise_end ) ; } /* Store the initial value of the objective function . */ if ( pf != null ) { pf [ 0 ] = fx ; } /* Compute the direction ; we assume the initial hessian matrix H _ 0 as the identity matrix . */ if ( param . orthantwise_c == 0. ) { vecncpy ( d , g , n ) ; } else { vecncpy ( d , pg , n ) ; } /* Make sure that the initial variables are not a minimizer . */ xnorm = vec2norm ( x , n ) ; if ( param . orthantwise_c == 0. ) { gnorm = vec2norm ( g , n ) ; } else { gnorm = vec2norm ( pg , n ) ; } if ( xnorm < 1.0 ) xnorm = 1.0 ; if ( gnorm / xnorm <= param . epsilon ) { ptr_fx . v = fx ; return LBFGS_ALREADY_MINIMIZED ; } /* Compute the initial step : step = 1.0 / sqrt ( vecdot ( d , d , n ) ) */ step = vec2norminv ( d , n ) ; k = 1 ; end = 0 ; for ( ; ; ) { /* Store the current position and gradient vectors . */ veccpy ( xp , x , n ) ; veccpy ( gp , g , n ) ; /* Search for an optimal step . */ MutableDouble fxRef = new MutableDouble ( fx ) ; MutableDouble stepRef = new MutableDouble ( step ) ; if ( param . orthantwise_c == 0. ) { ls_ret = linesearch ( n , x , fxRef , g , d , stepRef , xp , gp , w , cd , param , ls , linesearch_choice ) ; } else { ls_ret = linesearch ( n , x , fxRef , g , d , stepRef , xp , pg , w , cd , param , ls , linesearch_choice ) ; owlqn_pseudo_gradient ( pg , x , g , n , param . orthantwise_c , param . orthantwise_start , param . orthantwise_end ) ; } fx = fxRef . v ; step = stepRef . v ; if ( ls_ret . ret < 0 ) { /* Revert to the previous point . */ veccpy ( x , xp , n ) ; veccpy ( g , gp , n ) ; ptr_fx . v = fx ; return ls_ret ; } /* Compute x and g norms . */ xnorm = vec2norm ( x , n ) ; if ( param . orthantwise_c == 0. ) { gnorm = vec2norm ( g , n ) ; } else { gnorm = vec2norm ( pg , n ) ; } /* Report the progress . */ ret = cd . proc_progress ( x , g , fx , xnorm , gnorm , step , k , ls . v ) ; if ( ret . ret != 0 ) { ptr_fx . v = fx ; return ret ; } /* Convergence test . The criterion is given by the following formula : | g ( x ) | / \ max ( 1 , | x | ) < \ epsilon */ if ( xnorm < 1.0 ) xnorm = 1.0 ; if ( gnorm / xnorm <= param . epsilon ) { /* Convergence . */ ret = LBFGS_SUCCESS ; break ; } /* Test for stopping criterion . The criterion is given by the following formula : ( f ( past _ x ) - f ( x ) ) / f ( x ) < \ delta */ if ( pf != null ) { /* We don ' t test the stopping criterion while k < past . */ if ( param . past <= k ) { /* Compute the relative improvement from the past . */ rate = ( pf [ k % param . past ] - fx ) / fx ; /* The stopping criterion . */ if ( rate < param . delta ) { ret = LBFGS_STOP ; break ; } } /* Store the current value of the objective function . */ pf [ k % param . past ] = fx ; } if ( param . max_iterations != 0 && param . max_iterations < k + 1 ) { /* Maximum number of iterations . */ ret = LBFGSERR_MAXIMUMITERATION ; break ; } /* Update vectors s and y : s _ { k + 1 } = x _ { k + 1 } - x _ { k } = \ step * d _ { k } . y _ { k + 1 } = g _ { k + 1 } - g _ { k } . */ it = lm [ end ] ; vecdiff ( it . s , x , xp , n ) ; vecdiff ( it . y , g , gp , n ) ; /* Compute scalars ys and yy : ys = y ^ t \ cdot s = 1 / \ rho . yy = y ^ t \ cdot y . Notice that yy is used for scaling the hessian matrix H _ 0 ( Cholesky factor ) . */ ys = vecdot ( it . y , it . s , n ) ; yy = vecdot ( it . y , it . y , n ) ; it . ys = ys ; /* Recursive formula to compute dir = - ( H \ cdot g ) . This is described in page 779 of : Jorge Nocedal . Updating Quasi - Newton Matrices with Limited Storage . Mathematics of Computation , Vol . 35 , No . 151, pp . 773 - - 782 , 1980. */ bound = ( m <= k ) ? m : k ; ++ k ; end = ( end + 1 ) % m ; /* Compute the steepest direction . */ if ( param . orthantwise_c == 0. ) { /* Compute the negative of gradients . */ vecncpy ( d , g , n ) ; } else { vecncpy ( d , pg , n ) ; } j = end ; for ( i = 0 ; i < bound ; ++ i ) { j = ( j + m - 1 ) % m ; /* if ( - - j = = - 1 ) j = m - 1; */ it = lm [ j ] ; /* \ alpha _ { j } = \ rho _ { j } s ^ { t } _ { j } \ cdot q _ { k + 1 } . */ it . alpha = vecdot ( it . s , d , n ) ; it . alpha /= it . ys ; /* q _ { i } = q _ { i + 1 } - \ alpha _ { i } y _ { i } . */ vecadd ( d , it . y , - it . alpha , n ) ; } vecscale ( d , ys / yy , n ) ; for ( i = 0 ; i < bound ; ++ i ) { it = lm [ j ] ; /* \ beta _ { j } = \ rho _ { j } y ^ t _ { j } \ cdot \ gamma _ { i } . */ beta = vecdot ( it . y , d , n ) ; beta /= it . ys ; /* \ gamma _ { i + 1 } = \ gamma _ { i } + ( \ alpha _ { j } - \ beta _ { j } ) s _ { j } . */ vecadd ( d , it . s , it . alpha - beta , n ) ; j = ( j + 1 ) % m ; /* if ( + + j = = m ) j = 0; */ } /* Constrain the search direction for orthant - wise updates . */ if ( param . orthantwise_c != 0. ) { for ( i = param . orthantwise_start ; i < param . orthantwise_end ; ++ i ) { if ( d [ i ] * pg [ i ] >= 0 ) { d [ i ] = 0 ; } } } /* Now the search direction d is ready . We try step = 1 first . */ step = 1.0 ; } /* Return the final value of the objective function . */ ptr_fx . v = fx ; return ret ;
public class StreamUtil { /** * Eats an inputstream , discarding its contents * @ param is * InputStream The input stream to read to the end of * @ return long The size of the stream */ public static long eatInputStream ( InputStream is ) { } }
try { long eaten = 0 ; try { Thread . sleep ( STREAM_SLEEP_TIME ) ; } catch ( InterruptedException e ) { // ignore } int avail = Math . min ( is . available ( ) , CHUNKSIZE ) ; byte [ ] eatingArray = new byte [ CHUNKSIZE ] ; while ( avail > 0 ) { is . read ( eatingArray , 0 , avail ) ; eaten += avail ; if ( avail < CHUNKSIZE ) { // If the buffer wasn ' t full , wait a short amount of time to let it fill up if ( STREAM_SLEEP_TIME != 0 ) try { Thread . sleep ( STREAM_SLEEP_TIME ) ; } catch ( InterruptedException e ) { // ignore } } avail = Math . min ( is . available ( ) , CHUNKSIZE ) ; } return eaten ; } catch ( IOException e ) { log . error ( e . getMessage ( ) , e ) ; return - 1 ; }
public class DataIOEditorNameTextBox { /** * Sets the invalid values for the TextBox * @ param invalidValues * @ param isCaseSensitive * @ param invalidValueErrorMessage */ public void setInvalidValues ( final Set < String > invalidValues , final boolean isCaseSensitive , final String invalidValueErrorMessage ) { } }
if ( isCaseSensitive ) { this . invalidValues = invalidValues ; } else { this . invalidValues = new HashSet < String > ( ) ; for ( String value : invalidValues ) { this . invalidValues . add ( value . toLowerCase ( ) ) ; } } this . isCaseSensitive = isCaseSensitive ; this . invalidValueErrorMessage = invalidValueErrorMessage ;
public class DocumentImpl { /** * { @ inheritDoc } * Text documents in MarkLogic cannot be cloned . * UnsupportedOperationException will be thrown if cloneNode is call on text * document . < / > * DocumentType node will not be cloned as it is not part of the Expanded * Tree . < / > */ public Node cloneNode ( boolean deep ) { } }
try { if ( isXMLDoc == UNKNOWN_TYPE ) isXMLDoc = getDocumentType ( ) ; if ( isXMLDoc == NON_XML ) { throw new UnsupportedOperationException ( "Text document cannot be cloned" ) ; } // initialize a new doc owner node initClonedOwnerDoc ( ) ; } catch ( ParserConfigurationException e ) { throw new RuntimeException ( "Internal Error:" + e ) ; } if ( deep ) { for ( NodeImpl n = ( NodeImpl ) getFirstChild ( ) ; n != null ; n = ( NodeImpl ) n . getNextSibling ( ) ) { ownerDocCloned . appendChild ( n . cloneNode ( ownerDocCloned , true ) ) ; } } return ownerDocCloned ;
public class CmsHistoryListUtil { /** * Returns the version number from a version parameter . < p > * @ param version might be negative for the online version * @ param locale if the result is for display purposes , the locale has to be < code > ! = null < / code > * @ return the display name */ public static String getDisplayVersion ( String version , Locale locale ) { } }
int ver = Integer . parseInt ( version ) ; if ( ver == CmsHistoryResourceHandler . PROJECT_OFFLINE_VERSION ) { return Messages . get ( ) . getBundle ( locale ) . key ( Messages . GUI_PROJECT_OFFLINE_0 ) ; } if ( ver < 0 ) { ver *= - 1 ; if ( locale != null ) { return Messages . get ( ) . getBundle ( locale ) . key ( Messages . GUI_PROJECT_ONLINE_1 , new Integer ( ver ) ) ; } } return "" + ver ;
public class Snackbar { /** * Sets the text to be displayed in this { @ link Snackbar } * @ param text * @ return */ public Snackbar text ( CharSequence text ) { } }
mText = text ; if ( snackbarText != null ) { snackbarText . setText ( mText ) ; } return this ;
public class SessionImpl { /** * Copy schema concept and all its subs labels to keyspace cache * @ param schemaConcept */ private void copyToCache ( SchemaConcept schemaConcept ) { } }
schemaConcept . subs ( ) . forEach ( concept -> keyspaceCache . cacheLabel ( concept . label ( ) , concept . labelId ( ) ) ) ;
public class Predicates { /** * Returns a predicate that evaluates to { @ code true } if both of its * components evaluate to { @ code true } . The components are evaluated in * order , and evaluation will be " short - circuited " as soon as a false * predicate is found . * @ param first the first * @ param second the second * @ return a predicate */ public static < T > Predicate < T > and ( Predicate < ? super T > first , Predicate < ? super T > second ) { } }
checkArgNotNull ( first , "first" ) ; checkArgNotNull ( second , "second" ) ; return new AndPredicate < T > ( ImmutableList . < Predicate < ? super T > > of ( first , second ) ) ;
public class JobTracker { /** * Run forever */ public void offerService ( ) throws InterruptedException , IOException { } }
taskScheduler . start ( ) ; // refresh the node list as the recovery manager might have added // disallowed trackers refreshHosts ( ) ; this . expireTrackersThread = new Thread ( this . expireTrackers , "expireTrackers" ) ; this . expireTrackersThread . setDaemon ( true ) ; this . expireTrackersThread . start ( ) ; this . retireJobsThread = new Thread ( this . retireJobs , "retireJobs" ) ; this . retireJobsThread . setDaemon ( true ) ; this . retireJobsThread . start ( ) ; expireLaunchingTaskThread . setDaemon ( true ) ; expireLaunchingTaskThread . start ( ) ; jobUpdaterThread . setDaemon ( true ) ; jobUpdaterThread . start ( ) ; if ( completedJobStatusStore . isActive ( ) ) { completedJobsStoreThread = new Thread ( completedJobStatusStore , "completedjobsStore-housekeeper" ) ; completedJobsStoreThread . start ( ) ; } // start the inter - tracker server once the jt is ready this . interTrackerServer . start ( ) ; synchronized ( this ) { state = State . RUNNING ; } LOG . info ( "Starting RUNNING" ) ; this . interTrackerServer . join ( ) ; LOG . info ( "Stopped interTrackerServer" ) ;
public class AwsSecurityFindingFilters { /** * The ARN of the solution that generated a related finding . * < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use * { @ link # setRelatedFindingsProductArn ( java . util . Collection ) } or * { @ link # withRelatedFindingsProductArn ( java . util . Collection ) } if you want to override the existing values . * @ param relatedFindingsProductArn * The ARN of the solution that generated a related finding . * @ return Returns a reference to this object so that method calls can be chained together . */ public AwsSecurityFindingFilters withRelatedFindingsProductArn ( StringFilter ... relatedFindingsProductArn ) { } }
if ( this . relatedFindingsProductArn == null ) { setRelatedFindingsProductArn ( new java . util . ArrayList < StringFilter > ( relatedFindingsProductArn . length ) ) ; } for ( StringFilter ele : relatedFindingsProductArn ) { this . relatedFindingsProductArn . add ( ele ) ; } return this ;
public class BinaryField { /** * Pulls out the portion of our full buffer that represents just the binary value held by the field . * Shared by both constructors to initialize the { @ link # value } field . Handles the special case of * a zero - length blob , for which no bytes at all are sent . ( Really ! The protocol requires this ! ) * @ return the portion of our byte stream that follows the tag and size . */ private ByteBuffer extractValue ( ) { } }
buffer . rewind ( ) ; if ( buffer . capacity ( ) > 0 ) { buffer . get ( ) ; // Move past the tag buffer . getInt ( ) ; // Move past the size } return buffer . slice ( ) ;
public class DurableLog { /** * region AbstractService Implementation */ @ Override protected void doStart ( ) { } }
log . info ( "{}: Starting." , this . traceObjectId ) ; this . delayedStartRetry . runAsync ( ( ) -> tryStartOnce ( ) . whenComplete ( ( v , ex ) -> { if ( ex == null ) { // We are done . notifyDelayedStartComplete ( null ) ; } else { if ( Exceptions . unwrap ( ex ) instanceof DataLogDisabledException ) { // Place the DurableLog in a Started State , but keep trying to restart . notifyStartComplete ( null ) ; } throw new CompletionException ( ex ) ; } } ) , this . executor ) . exceptionally ( this :: notifyDelayedStartComplete ) ;
public class ReactiveTypes { /** * Returns { @ literal true } if { @ code type } is a reactive wrapper type that contains no value . * @ param type must not be { @ literal null } . * @ return { @ literal true } if { @ code type } is a reactive wrapper type that contains no value . */ public static boolean isNoValueType ( Class < ? > type ) { } }
LettuceAssert . notNull ( type , "Class must not be null!" ) ; return findDescriptor ( type ) . map ( Descriptor :: isNoValue ) . orElse ( false ) ;