signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class MessageEndpointFactoryImpl { /** * Indicates what version of JCA specification the RA using * this MessageEndpointFactory requires compliance with . * @ see com . ibm . ws . jca . service . WSMessageEndpointFactory # setJCAVersion ( int , int ) */ @ Override public void setJCAVersion ( int majorJCAVer , int minorJCAVer ) { } }
majorJCAVersion = majorJCAVer ; minorJCAVersion = minorJCAVer ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { Tr . debug ( tc , "MessageEndpointFactoryImpl.setJCAVersionJCA: Version " + majorJCAVersion + "." + minorJCAVersion + " is set" ) ; }
public class RequestedGlobalProperties { /** * Parameterizes the ship strategy fields of a channel such that the channel produces the desired global properties . * @ param channel The channel to parameterize . * @ param globalDopChange * @ param localDopChange */ public void parameterizeChannel ( Channel channel , boolean globalDopChange , boolean localDopChange ) { } }
// if we request nothing , then we need no special strategy . forward , if the number of instances remains // the same , randomly repartition otherwise if ( isTrivial ( ) ) { channel . setShipStrategy ( globalDopChange ? ShipStrategyType . PARTITION_RANDOM : ShipStrategyType . FORWARD ) ; return ; } final GlobalProperties inGlobals = channel . getSource ( ) . getGlobalProperties ( ) ; // if we have no global parallelism change , check if we have already compatible global properties if ( ! globalDopChange && isMetBy ( inGlobals ) ) { if ( localDopChange ) { // if the local degree of parallelism changes , we need to adjust if ( inGlobals . getPartitioning ( ) == PartitioningProperty . HASH_PARTITIONED ) { // to preserve the hash partitioning , we need to locally hash re - partition channel . setShipStrategy ( ShipStrategyType . PARTITION_LOCAL_HASH , inGlobals . getPartitioningFields ( ) ) ; return ; } // else fall though } else { // we meet already everything , so go forward channel . setShipStrategy ( ShipStrategyType . FORWARD ) ; return ; } } // if we fall through the conditions until here , we need to re - establish switch ( this . partitioning ) { case FULL_REPLICATION : channel . setShipStrategy ( ShipStrategyType . BROADCAST ) ; break ; case ANY_PARTITIONING : case HASH_PARTITIONED : channel . setShipStrategy ( ShipStrategyType . PARTITION_HASH , Utils . createOrderedFromSet ( this . partitioningFields ) ) ; break ; case RANGE_PARTITIONED : channel . setShipStrategy ( ShipStrategyType . PARTITION_RANGE , this . ordering . getInvolvedIndexes ( ) , this . ordering . getFieldSortDirections ( ) ) ; if ( this . dataDistribution != null ) { channel . setDataDistribution ( this . dataDistribution ) ; } break ; default : throw new CompilerException ( ) ; }
public class LottieAnimationView { /** * Add a property callback for the specified { @ link KeyPath } . This { @ link KeyPath } can resolve * to multiple contents . In that case , the callback ' s value will apply to all of them . * Internally , this will check if the { @ link KeyPath } has already been resolved with * { @ link # resolveKeyPath ( KeyPath ) } and will resolve it if it hasn ' t . */ public < T > void addValueCallback ( KeyPath keyPath , T property , LottieValueCallback < T > callback ) { } }
lottieDrawable . addValueCallback ( keyPath , property , callback ) ;
public class SparseInstanceData { /** * Deletes an attribute at the given position ( 0 to numAttributes ( ) - 1 ) . * @ param position the attribute ' s position */ @ Override public void deleteAttributeAt ( int position ) { } }
int index = locateIndex ( position ) ; this . numberAttributes -- ; if ( ( index >= 0 ) && ( indexValues [ index ] == position ) ) { int [ ] tempIndices = new int [ indexValues . length - 1 ] ; double [ ] tempValues = new double [ attributeValues . length - 1 ] ; System . arraycopy ( indexValues , 0 , tempIndices , 0 , index ) ; System . arraycopy ( attributeValues , 0 , tempValues , 0 , index ) ; for ( int i = index ; i < indexValues . length - 1 ; i ++ ) { tempIndices [ i ] = indexValues [ i + 1 ] - 1 ; tempValues [ i ] = attributeValues [ i + 1 ] ; } indexValues = tempIndices ; attributeValues = tempValues ; } else { int [ ] tempIndices = new int [ indexValues . length ] ; double [ ] tempValues = new double [ attributeValues . length ] ; System . arraycopy ( indexValues , 0 , tempIndices , 0 , index + 1 ) ; System . arraycopy ( attributeValues , 0 , tempValues , 0 , index + 1 ) ; for ( int i = index + 1 ; i < indexValues . length ; i ++ ) { tempIndices [ i ] = indexValues [ i ] - 1 ; tempValues [ i ] = attributeValues [ i ] ; } indexValues = tempIndices ; attributeValues = tempValues ; }
public class AdminTaskResource { /** * Enqueue an admin task for processing . * The request will return immediately and the task will be run in a separate thread . * The execution model depending on the implementation of the queue . * @ param taskName task name * @ return */ @ GET @ Path ( "{taskName}" ) public Response enqueueTask ( @ PathParam ( "taskName" ) String taskName ) { } }
checkNotNull ( taskName ) ; taskQueue . enqueueTask ( taskName , requestParamsProvider . get ( ) ) ; return Response . ok ( ) . build ( ) ;
public class BasicParallelSearch { /** * This algorithm consists of a single search step only , in which ( 1 ) the contained subsearches are executed in * parallel , ( 2 ) the main search waits until they terminate and ( 3 ) the main search stops . A subsearch may terminate * because it has come to its natural end , because it has active stop criteria or because the main search was * requested to stop and propagated this request to the subsearches . */ @ Override protected void searchStep ( ) { } }
// (1 ) execute subsearches in parallel searches . forEach ( s -> futures . add ( pool . submit ( s ) ) ) ; // (2 ) wait for termination of subsearches while ( ! futures . isEmpty ( ) ) { try { futures . poll ( ) . get ( ) ; } catch ( InterruptedException | ExecutionException ex ) { throw new SearchException ( "An error occured during concurrent execution of searches " + "in basic parallel search." , ex ) ; } } // (3 ) stop main search stop ( ) ;
public class DayPeriodRules { /** * Helpers . */ private void add ( int startHour , int limitHour , DayPeriod period ) { } }
for ( int i = startHour ; i != limitHour ; ++ i ) { if ( i == 24 ) { i = 0 ; } dayPeriodForHour [ i ] = period ; }
public class DescribeLagsResult { /** * The LAGs . * @ param lags * The LAGs . */ public void setLags ( java . util . Collection < Lag > lags ) { } }
if ( lags == null ) { this . lags = null ; return ; } this . lags = new com . amazonaws . internal . SdkInternalList < Lag > ( lags ) ;
public class Tuple6 { /** * Split this tuple into two tuples of degree 0 and 6. */ public final Tuple2 < Tuple0 , Tuple6 < T1 , T2 , T3 , T4 , T5 , T6 > > split0 ( ) { } }
return new Tuple2 < > ( limit0 ( ) , skip0 ( ) ) ;
public class Closure { /** * Creates a caching variant of the closure with automatic cache size adjustment and lower and upper limits * on the cache size . * Whenever the closure is called , the mapping between the parameters and the return value is preserved in cache * making subsequent calls with the same arguments fast . * This variant allows the garbage collector to release entries from the cache and at the same time allows * the user to specify how many entries should be protected from the eventual gc - initiated eviction . * Cached entries exceeding the specified preservation threshold are made available for eviction based on * the LRU ( Last Recently Used ) strategy . * Given the non - deterministic nature of garbage collector , the actual cache size may grow well beyond the protected * size limits set by the user , if memory is plentiful . * Also , this variant will never exceed in size the upper size limit . Once the upper size limit has been reached , * the values in the cache start rotating using the LRU ( Last Recently Used ) strategy . * The returned function can be safely used concurrently from multiple threads , however , the implementation * values high average - scenario performance and so concurrent calls on the memoized function with identical argument values * may not necessarily be able to benefit from each other ' s cached return value . Also the protectedCacheSize parameter * might not be respected accurately in such scenarios for some periods of time . With this having been mentioned , * the performance trade - off still makes concurrent use of memoized functions safe and highly recommended . * The cache gets garbage - collected together with the memoized closure . * @ param protectedCacheSize Number of cached return values to protect from garbage collection * @ param maxCacheSize The maximum size the cache can grow to * @ return A new function forwarding to the original one while caching the results */ public Closure < V > memoizeBetween ( final int protectedCacheSize , final int maxCacheSize ) { } }
if ( protectedCacheSize < 0 ) throw new IllegalArgumentException ( "A non-negative number is required as the protectedCacheSize parameter for memoizeBetween." ) ; if ( maxCacheSize < 0 ) throw new IllegalArgumentException ( "A non-negative number is required as the maxCacheSize parameter for memoizeBetween." ) ; if ( protectedCacheSize > maxCacheSize ) throw new IllegalArgumentException ( "The maxCacheSize parameter to memoizeBetween is required to be greater or equal to the protectedCacheSize parameter." ) ; return Memoize . buildSoftReferenceMemoizeFunction ( protectedCacheSize , new ConcurrentSoftCache < Object , Object > ( maxCacheSize ) , this ) ;
public class IndexedSourceMapConsumer { /** * The list of original sources . */ @ Override public List < String > sources ( ) { } }
List < String > sources = new ArrayList < > ( ) ; for ( int i = 0 ; i < this . _sections . size ( ) ; i ++ ) { for ( int j = 0 ; j < this . _sections . get ( i ) . consumer . sources ( ) . size ( ) ; j ++ ) { sources . add ( this . _sections . get ( i ) . consumer . sources ( ) . get ( j ) ) ; } } return sources ;
public class MiscUtil { /** * Uses standard JDK java to read an inputstream to String using the given encoding ( in { @ link ByteArrayOutputStream # toString ( String ) } ) . */ @ Nonnull public static String readInputStreamToString ( @ Nonnull final InputStream inputStream , @ Nonnull final Charset charset ) throws IOException { } }
final BufferedInputStream bufferedInputStream = new BufferedInputStream ( inputStream ) ; final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream ( ) ; int result = bufferedInputStream . read ( ) ; while ( result != - 1 ) { byteArrayOutputStream . write ( ( byte ) result ) ; result = bufferedInputStream . read ( ) ; } return byteArrayOutputStream . toString ( checkNonEmptyArgument ( charset , "charset" ) . name ( ) ) ;
public class HelloServlet { @ Override protected void doGet ( HttpServletRequest request , HttpServletResponse response ) throws ServletException , IOException { } }
Window window = new Window ( ) ; window . add ( new Text ( "Hello world!" ) ) ; Reflector < Component > reflector = new HtmlComponentReflector ( ) ; response . setContentType ( reflector . getContentType ( ) ) ; try { reflector . reflect ( window , response . getWriter ( ) ) ; } catch ( ReflectorException exception ) { throw new ServletException ( "Error writing component" , exception ) ; }
public class AccessControlList { /** * { @ inheritDoc } */ public void writeExternal ( ObjectOutput out ) throws IOException { } }
// Writing owner if ( owner != null ) { out . writeInt ( owner . getBytes ( ) . length ) ; out . write ( owner . getBytes ( ) ) ; } else { out . writeInt ( 0 ) ; } // writing access control entrys size out . writeInt ( accessList . size ( ) ) ; for ( AccessControlEntry entry : accessList ) { // writing access control entrys identity out . writeInt ( entry . getIdentity ( ) . getBytes ( ) . length ) ; out . write ( entry . getIdentity ( ) . getBytes ( ) ) ; // writing permission out . writeInt ( entry . getPermission ( ) . getBytes ( ) . length ) ; out . write ( entry . getPermission ( ) . getBytes ( ) ) ; }
public class OntClassMention { /** * getter for semanticTypes - gets Names or IDs of associated semantic types . * @ generated * @ return value of the feature */ public StringArray getSemanticTypes ( ) { } }
if ( OntClassMention_Type . featOkTst && ( ( OntClassMention_Type ) jcasType ) . casFeat_semanticTypes == null ) jcasType . jcas . throwFeatMissing ( "semanticTypes" , "de.julielab.jules.types.OntClassMention" ) ; return ( StringArray ) ( jcasType . ll_cas . ll_getFSForRef ( jcasType . ll_cas . ll_getRefValue ( addr , ( ( OntClassMention_Type ) jcasType ) . casFeatCode_semanticTypes ) ) ) ;
public class Replication { /** * Sets the documents to specify as part of the replication . */ @ InterfaceAudience . Public public void setDocIds ( List < String > docIds ) { } }
properties . put ( ReplicationField . DOC_IDS , docIds ) ; replicationInternal . setDocIds ( docIds ) ;
public class UnionType { /** * Returns a more restricted union type than { @ code this } one , in which all * subtypes of { @ code type } have been removed . < p > * Examples : * < ul > * < li > { @ code ( number , string ) } restricted by { @ code number } is * { @ code string } < / li > * < li > { @ code ( null , EvalError , URIError ) } restricted by * { @ code Error } is { @ code null } < / li > * < / ul > * @ param type the supertype of the types to remove from this union type */ public JSType getRestrictedUnion ( JSType type ) { } }
UnionTypeBuilder restricted = UnionTypeBuilder . create ( registry ) ; for ( int i = 0 ; i < alternates . size ( ) ; i ++ ) { JSType t = alternates . get ( i ) ; // Keep all unknown / unresolved types . if ( t . isUnknownType ( ) || t . isNoResolvedType ( ) || ! t . isSubtypeOf ( type ) ) { restricted . addAlternate ( t ) ; } } return restricted . build ( ) ;
public class CompileUtils { /** * To output more information when an error occurs . * Generally , when cook fails , it shows which line is wrong . This line number starts at 1. */ private static String addLineNumber ( String code ) { } }
String [ ] lines = code . split ( "\n" ) ; StringBuilder builder = new StringBuilder ( ) ; for ( int i = 0 ; i < lines . length ; i ++ ) { builder . append ( "/* " ) . append ( i + 1 ) . append ( " */" ) . append ( lines [ i ] ) . append ( "\n" ) ; } return builder . toString ( ) ;
public class BottomSheet { /** * Creates and returns the layout params , which should be used to show the bottom sheet ' s root * view . * @ return The layout params , which have been created , as an instance of the class { @ link * android . widget . FrameLayout . LayoutParams } */ private FrameLayout . LayoutParams createRootViewLayoutParams ( ) { } }
FrameLayout . LayoutParams layoutParams = new FrameLayout . LayoutParams ( FrameLayout . LayoutParams . MATCH_PARENT , FrameLayout . LayoutParams . MATCH_PARENT ) ; layoutParams . gravity = Gravity . BOTTOM | Gravity . CENTER_HORIZONTAL ; return layoutParams ;
public class ImageMiscOps { /** * In - place 90 degree image rotation in the counter - clockwise direction . Only works on * square images . */ public static void rotateCCW ( GrayS64 image ) { } }
if ( image . width != image . height ) throw new IllegalArgumentException ( "Image must be square" ) ; int w = image . height / 2 + image . height % 2 ; int h = image . height / 2 ; for ( int y0 = 0 ; y0 < h ; y0 ++ ) { int y1 = image . height - y0 - 1 ; for ( int x0 = 0 ; x0 < w ; x0 ++ ) { int x1 = image . width - x0 - 1 ; int index0 = image . startIndex + y0 * image . stride + x0 ; int index1 = image . startIndex + x0 * image . stride + y1 ; int index2 = image . startIndex + y1 * image . stride + x1 ; int index3 = image . startIndex + x1 * image . stride + y0 ; long tmp0 = image . data [ index0 ] ; image . data [ index0 ] = image . data [ index1 ] ; image . data [ index1 ] = image . data [ index2 ] ; image . data [ index2 ] = image . data [ index3 ] ; image . data [ index3 ] = ( long ) tmp0 ; } }
public class MPORGImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public Object eGet ( int featureID , boolean resolve , boolean coreType ) { } }
switch ( featureID ) { case AfplibPackage . MPORG__RG_LENGTH : return getRGLength ( ) ; case AfplibPackage . MPORG__TRIPLETS : return getTriplets ( ) ; } return super . eGet ( featureID , resolve , coreType ) ;
public class DaoTemplate { /** * 增加或者更新实体 * @ param entity 实体 , 当包含主键时更新 , 否则新增 * @ return 新增或更新条数 * @ throws SQLException SQL执行异常 */ public int addOrUpdate ( Entity entity ) throws SQLException { } }
return null == entity . get ( primaryKeyField ) ? add ( entity ) : update ( entity ) ;
public class JmsJcaActivationSpecImpl { /** * ( non - Javadoc ) * @ see com . ibm . ws . sib . api . jmsra . JmsJcaActivationSpec # setClientId ( java . lang . String ) */ @ Override public void setClientId ( final String clientId ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && TRACE . isDebugEnabled ( ) ) { SibTr . debug ( this , TRACE , "setClientId" , clientId ) ; } _clientId = clientId ;
public class NotificationDelegater { /** * Initialization . * @ see NotificationDelegater # LOCAL * @ see NotificationDelegater # GLOBAL * @ see NotificationDelegater # REMOTE * @ param context * @ param components */ public static void initialize ( Context context , int components ) { } }
NotificationDelegater delegater = getInstance ( ) ; if ( delegater . center ( ) != null ) throw new IllegalStateException ( "NotificationDelegater already init." ) ; delegater . setContext ( context ) ; delegater . initComponents ( components ) ; Log . i ( TAG , "Notification delegater initialize" ) ;
public class OverviewPlot { /** * Initialize the plot . * @ param ratio Initial ratio */ public void initialize ( double ratio ) { } }
if ( ! ( ratio > 0 && ratio < Double . POSITIVE_INFINITY ) ) { LOG . warning ( "Invalid ratio: " + ratio , new Throwable ( ) ) ; ratio = 1.4 ; } this . ratio = ratio ; if ( plot != null ) { LOG . warning ( "Already initialized." ) ; lazyRefresh ( ) ; return ; } reinitialize ( ) ; // register context listener context . addResultListener ( this ) ; context . addVisualizationListener ( this ) ;
public class JingleSessionStateUnknown { /** * In the UNKNOWN state we received a < session - initiate > action . * This method processes that action . * @ throws SmackException * @ throws InterruptedException */ private IQ receiveSessionInitiateAction ( JingleSession session , Jingle inJingle ) throws SmackException , InterruptedException { } }
IQ response ; boolean shouldAck = true ; // According to XEP - 166 when we get a session - initiate we need to check for : // 1 . Initiator unknown // 2 . Receiver redirection // 3 . Does not support Jingle // 4 . Does not support any < description > formats // 5 . Does not support any < transport > formats // If all of the above are OK then we send an IQ type = result to ACK the session - initiate . // 1 . Initiator unknown // TODO // 2 . Receiver redirection // TODO // 3 . Does not support Jingle // Handled by Smack ' s lower layer . // 4 . Does not support any < description > formats // TODO // 5 . Does not support any < transport > formats // TODO if ( ! shouldAck ) { response = session . createJingleError ( inJingle , JingleError . NEGOTIATION_ERROR ) ; } else { // Create the Ack response = session . createAck ( inJingle ) ; session . setSessionState ( JingleSessionStatePending . getInstance ( ) ) ; // Now set up all of the initial content negotiators for the session . for ( JingleContent jingleContent : inJingle . getContentsList ( ) ) { // First create the content negotiator for this < content > section . ContentNegotiator contentNeg = new ContentNegotiator ( session , jingleContent . getCreator ( ) , jingleContent . getName ( ) ) ; // Get the media negotiator that goes with the < description > of this content . JingleDescription jingleDescription = jingleContent . getDescription ( ) ; // Loop through each media manager looking for the ones that matches the incoming // session - initiate < content > choices . // ( Set the first media manager as the default , so that in case things don ' t match we can still negotiate . ) JingleMediaManager chosenMediaManager = session . getMediaManagers ( ) . get ( 0 ) ; for ( JingleMediaManager mediaManager : session . getMediaManagers ( ) ) { boolean matches = true ; for ( PayloadType mediaPayloadType : mediaManager . getPayloads ( ) ) { for ( PayloadType descPayloadType2 : jingleDescription . getPayloadTypesList ( ) ) { if ( mediaPayloadType . getId ( ) != descPayloadType2 . getId ( ) ) { matches = false ; } } if ( matches ) { chosenMediaManager = mediaManager ; } } } // Create the media negotiator for this content description . contentNeg . setMediaNegotiator ( new MediaNegotiator ( session , chosenMediaManager , jingleDescription . getPayloadTypesList ( ) , contentNeg ) ) ; // For each transport type in this content , try to find the corresponding transport manager . // Then create a transport negotiator for that transport . for ( JingleTransport jingleTransport : jingleContent . getJingleTransportsList ( ) ) { for ( JingleMediaManager mediaManager : session . getMediaManagers ( ) ) { JingleTransportManager transportManager = mediaManager . getTransportManager ( ) ; TransportResolver resolver = null ; try { resolver = transportManager . getResolver ( session ) ; } catch ( XMPPException e ) { LOGGER . log ( Level . WARNING , "exception" , e ) ; } if ( resolver . getType ( ) . equals ( TransportResolver . Type . rawupd ) ) { contentNeg . setTransportNegotiator ( new TransportNegotiator . RawUdp ( session , resolver , contentNeg ) ) ; } if ( resolver . getType ( ) . equals ( TransportResolver . Type . ice ) ) { contentNeg . setTransportNegotiator ( new TransportNegotiator . Ice ( session , resolver , contentNeg ) ) ; } } } // Add the content negotiator to the session . session . addContentNegotiator ( contentNeg ) ; } // Now setup to track the media negotiators , so that we know when ( if ) to send a session - accept . session . setupListeners ( ) ; } return response ;
public class SDBaseOps { /** * Return an array with equal shape to the input , but all elements set to ' value ' * @ param name Name of the output variable * @ param in Input variable * @ param value Value to set * @ return Output variable */ public SDVariable assign ( String name , SDVariable in , Number value ) { } }
SDVariable ret = f ( ) . assign ( in , value ) ; return updateVariableNameAndReference ( ret , name ) ;
public class KernelResolverRepository { /** * Return all features and samples in the repository with the given name which don ' t apply to the installed products * @ param resourceName a short or symbolic name * @ return the features and samples which don ' t apply to the installed products */ public Collection < ApplicableToProduct > getNonApplicableResourcesForName ( String resourceName ) { } }
List < ApplicableToProduct > result = nameToNonApplicableResources . get ( resourceName ) ; if ( result == null ) { // We don ' t expect this to happen , if we ' re looking for non - applicable resources , it ' s because we failed to resolve it earlier cacheFeaturesForName ( resourceName ) ; result = nameToNonApplicableResources . get ( resourceName ) ; } if ( result == null ) { // Still null , very odd return Collections . emptySet ( ) ; } return Collections . unmodifiableList ( result ) ;
public class BusItinerary { /** * Insert newHalt after afterHalt in the ordered list of { @ link BusItineraryHalt } . * @ param afterHalt the halt where insert the new halt * @ param name name of the new halt * @ param type the type of bus halt * @ return the added bus halt , otherwise < code > null < / code > */ public BusItineraryHalt insertBusHaltAfter ( BusItineraryHalt afterHalt , String name , BusItineraryHaltType type ) { } }
return insertBusHaltAfter ( afterHalt , null , name , type ) ;
public class OrthogonalPolyLine { /** * Draws the last segment of the line to the tail . * It will take into account the correction and arrow . * Logic is applied to help draw an attractive line . Under certain conditions it will attempt to add an extra mid point . For example if you have directions * going opposite to each other , it will create a mid point so that the line goes back on itseld through this mid point . * @ param points * @ param buffer * @ param lastDirection * @ param tailDirection * @ param correction * @ param pline * @ param p0x * @ param p0y * @ param p1x * @ param p1y * @ param write * @ return */ private static int drawTail ( final Point2DArray points , final NFastDoubleArrayJSO buffer , Direction lastDirection , final Direction tailDirection , final double correction , final OrthogonalPolyLine pline , final double p0x , final double p0y , double p1x , double p1y , final boolean write ) { } }
final double tailOffset = pline . getTailOffset ( ) ; Point2D p1 = points . get ( points . size ( ) - 1 ) ; // correct for tailOffset if ( tailOffset > 0 ) { if ( ! write ) { p1 = p1 . copy ( ) ; } correctEndWithOffset ( tailOffset , tailDirection , p1 ) ; p1x = p1 . getX ( ) ; p1y = p1 . getY ( ) ; } // correct for correction if ( correction > 0 ) { // must do this off a cloned Point2D , as we still need the p1 , for the last part of the line at the end . final Point2D p1Copy = p1 . copy ( ) ; correctEndWithOffset ( correction , tailDirection , p1Copy ) ; p1x = p1Copy . getX ( ) ; p1y = p1Copy . getY ( ) ; } final double dx = ( p1x - p0x ) ; final double dy = ( p1y - p0y ) ; int corners = 0 ; boolean behind = false ; switch ( tailDirection ) { case NORTH : behind = dy < 0 ; break ; case SOUTH : behind = dy > 0 ; break ; case WEST : behind = dx < 0 ; break ; case EAST : behind = dx > 0 ; break ; case NONE : // do nothing as NONE is explicitey handled at the end break ; default : throw new IllegalStateException ( "Invalid Direction " + tailDirection ) ; } double x = p0x ; double y = p0y ; if ( behind ) { // means p0 is behind . switch ( tailDirection ) { case NORTH : case SOUTH : if ( ( ( lastDirection == NORTH ) && ( tailDirection == SOUTH ) ) || ( ( lastDirection == SOUTH ) && ( tailDirection == NORTH ) ) || ( ( dx > 0 ) && ( lastDirection == EAST ) ) || ( ( dx < 0 ) && ( lastDirection == WEST ) ) ) { // A mid point is needed to ensure an attrictive line is drawn . x = p0x + ( dx / 2 ) ; addPoint ( buffer , x , y , write ) ; if ( ( lastDirection == NORTH ) || ( lastDirection == NORTH ) ) { corners ++ ; } } y = p1y ; addPoint ( buffer , x , y , write ) ; if ( lastDirection != tailDirection ) { corners ++ ; } x = p1x ; addPoint ( buffer , x , y , write ) ; corners ++ ; y = p1 . getY ( ) ; addPoint ( buffer , x , y , write ) ; corners ++ ; break ; case WEST : case EAST : if ( ( ( lastDirection == WEST ) && ( tailDirection == EAST ) ) || ( ( lastDirection == EAST ) && ( tailDirection == WEST ) ) || ( ( dy > 0 ) && ( lastDirection == SOUTH ) ) || ( ( dy < 0 ) && ( lastDirection == NORTH ) ) ) { // A mid point is needed to ensure an attrictive line is drawn . y = p0y + ( dy / 2 ) ; addPoint ( buffer , x , y , write ) ; if ( ( lastDirection == EAST ) || ( lastDirection == WEST ) ) { corners ++ ; } } x = p1x ; addPoint ( buffer , x , y , write ) ; if ( lastDirection != tailDirection ) { corners ++ ; } y = p1y ; addPoint ( buffer , x , y , write ) ; corners ++ ; x = p1 . getX ( ) ; addPoint ( buffer , x , y , write ) ; corners ++ ; break ; default : throw new IllegalStateException ( "Invalid Direction " + tailDirection ) ; } } else { // means p0 is in front switch ( tailDirection ) { case NORTH : case SOUTH : if ( ( ( lastDirection == NORTH ) && ( tailDirection == SOUTH ) ) || ( ( lastDirection == SOUTH ) && ( tailDirection == NORTH ) ) || ( ( dx > 0 ) && ( lastDirection == WEST ) ) || ( ( dx < 0 ) && ( lastDirection == EAST ) ) ) { // A mid point is needed to ensure an attrictive line is drawn . y = p0y + ( dy / 2 ) ; addPoint ( buffer , x , y , write ) ; if ( ( lastDirection == EAST ) || ( lastDirection == WEST ) ) { lastDirection = ( dy < 0 ) ? NORTH : SOUTH ; corners ++ ; } } x = p1x ; addPoint ( buffer , x , y , write ) ; if ( ( lastDirection == NORTH ) || ( lastDirection == SOUTH ) ) { corners ++ ; } y = p1 . getY ( ) ; addPoint ( buffer , x , y , write ) ; corners ++ ; break ; case WEST : case EAST : if ( ( ( lastDirection == WEST ) && ( tailDirection == EAST ) ) || ( ( lastDirection == EAST ) && ( tailDirection == WEST ) ) || ( ( dy > 0 ) && ( lastDirection == NORTH ) ) || ( ( dy < 0 ) && ( lastDirection == SOUTH ) ) ) { // A mid point is needed to ensure an attrictive line is drawn . x = p0x + ( dx / 2 ) ; addPoint ( buffer , x , y , write ) ; if ( ( lastDirection == NORTH ) || ( lastDirection == SOUTH ) ) { lastDirection = ( dx < 0 ) ? WEST : EAST ; corners ++ ; } } y = p1y ; addPoint ( buffer , x , y , write ) ; if ( ( lastDirection == EAST ) || ( lastDirection == WEST ) ) { corners ++ ; } x = p1 . getX ( ) ; addPoint ( buffer , x , y , write ) ; corners ++ ; break ; default : throw new IllegalStateException ( "Invalid Direction " + tailDirection ) ; } } return corners ;
public class KeyVaultClientBaseImpl { /** * Retrieves a list of individual key versions with the same key name . * The full key identifier , attributes , and tags are provided in the response . This operation requires the keys / list permission . * @ param vaultBaseUrl The vault name , for example https : / / myvault . vault . azure . net . * @ param keyName The name of the key . * @ param maxresults Maximum number of results to return in a page . If not specified the service will return up to 25 results . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the PagedList & lt ; KeyItem & gt ; object wrapped in { @ link ServiceResponse } if successful . */ public Observable < ServiceResponse < Page < KeyItem > > > getKeyVersionsSinglePageAsync ( final String vaultBaseUrl , final String keyName , final Integer maxresults ) { } }
if ( vaultBaseUrl == null ) { throw new IllegalArgumentException ( "Parameter vaultBaseUrl is required and cannot be null." ) ; } if ( keyName == null ) { throw new IllegalArgumentException ( "Parameter keyName is required and cannot be null." ) ; } if ( this . apiVersion ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.apiVersion() is required and cannot be null." ) ; } String parameterizedHost = Joiner . on ( ", " ) . join ( "{vaultBaseUrl}" , vaultBaseUrl ) ; return service . getKeyVersions ( keyName , maxresults , this . apiVersion ( ) , this . acceptLanguage ( ) , parameterizedHost , this . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < Page < KeyItem > > > > ( ) { @ Override public Observable < ServiceResponse < Page < KeyItem > > > call ( Response < ResponseBody > response ) { try { ServiceResponse < PageImpl < KeyItem > > result = getKeyVersionsDelegate ( response ) ; return Observable . just ( new ServiceResponse < Page < KeyItem > > ( result . body ( ) , result . response ( ) ) ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ;
public class JVMUtil { /** * Returns the canonized number of the currently available Java language specification version . * The numbering scheme of the specification versions is structured as follows : * < ul > * < li > Java SE 7 : 1.7 < / li > * < li > Java SE 8 : 1.8 < / li > * < li > Java SE 9 : 9 < / li > * < li > Java SE 10 : 10 < / li > * < / ul > * This method returns : * < ul > * < li > Java SE 7 : 7 < / li > * < li > Java SE 8 : 8 < / li > * < li > Java SE 9 : 9 < / li > * < li > Java SE 10 : 10 < / li > * < / ul > * @ return the canonized java specification version * @ see RuntimeMXBean # getSpecVersion ( ) */ public static int getCanonicalSpecVersion ( ) { } }
final String version = ManagementFactory . getRuntimeMXBean ( ) . getSpecVersion ( ) ; final String [ ] split = version . split ( "\\." ) ; return split . length > 1 ? Integer . parseInt ( split [ 1 ] ) : Integer . parseInt ( split [ 0 ] ) ;
public class MangooUtils { /** * Retrieves the current version of the framework from the version . properties file * @ return Current mangoo I / O version */ public static String getVersion ( ) { } }
String version = Default . VERSION_UNKNOW . toString ( ) ; try ( InputStream inputStream = Resources . getResource ( Default . VERSION_PROPERTIES . toString ( ) ) . openStream ( ) ) { final Properties properties = new Properties ( ) ; properties . load ( inputStream ) ; version = String . valueOf ( properties . get ( "version" ) ) ; } catch ( final IOException e ) { LOG . error ( "Failed to get application version" , e ) ; } return version ;
public class ConfigObject { /** * Writes this config object into a String serialized representation which can later be parsed back using the parse ( ) * method * @ see groovy . lang . Writable # writeTo ( java . io . Writer ) */ public Writer writeTo ( Writer outArg ) throws IOException { } }
BufferedWriter out = new BufferedWriter ( outArg ) ; try { writeConfig ( "" , this , out , 0 , false ) ; } finally { out . flush ( ) ; } return outArg ;
public class Ix { /** * Given a per - iterator ( ) initial value , reduces the elements of this sequence into a single * value via a reducer function . * The result ' s iterator ( ) doesn ' t support remove ( ) . * @ param < C > the reduced value type * @ param initialFactory a function called for each iterator ( ) invocation and returns the first * reduced value * @ param reducer the function called with the previous ( or initial ) reduced value and the current element * and returns a new reduced value * @ return the new Ix instance * @ throws NullPointerException if initialFactory or reducer is null * @ since 1.0 * @ see # reduce ( IxFunction2) */ public final < C > Ix < C > reduce ( IxSupplier < C > initialFactory , IxFunction2 < C , T , C > reducer ) { } }
return new IxReduce < T , C > ( this , initialFactory , reducer ) ;
public class RmiJournalReceiver { /** * Request to close a file . Check that : * < ul > * < li > a file is open , < / li > * < li > we are able to close the file . < / li > * < / ul > */ public void closeFile ( ) throws JournalException { } }
if ( journalFile == null ) { throw logAndGetException ( "Attempting to close a file " + "when no file is open." ) ; } try { writer . close ( ) ; journalFile . close ( ) ; } catch ( IOException e ) { throw logAndGetException ( "Problem closing the file '" + journalFile . getName ( ) + "'" , e ) ; } logger . debug ( "closing file: '" + journalFile . getName ( ) + "'" ) ; journalFile = null ;
public class DateTimeParseContext { /** * Gets the resolved result of the parse . * @ return the result of the parse , not null */ TemporalAccessor toResolved ( ResolverStyle resolverStyle , Set < TemporalField > resolverFields ) { } }
Parsed parsed = currentParsed ( ) ; parsed . chrono = getEffectiveChronology ( ) ; parsed . zone = ( parsed . zone != null ? parsed . zone : formatter . getZone ( ) ) ; return parsed . resolve ( resolverStyle , resolverFields ) ;
public class NDArrayIndex { /** * Given an all index and * the intended indexes , return an * index array containing a combination of all elements * for slicing and overriding particular indexes where necessary * @ param arr the array to resolve indexes for * @ param intendedIndexes the indexes specified by the user * @ return the resolved indexes ( containing all where nothing is specified , and the intended index * for a particular dimension otherwise ) */ public static INDArrayIndex [ ] resolve ( INDArray arr , INDArrayIndex ... intendedIndexes ) { } }
return resolve ( NDArrayIndex . allFor ( arr ) , intendedIndexes ) ;
public class ApiOvhIp { /** * AntiDDOS option . Get statistics about your traffic in and out during this mitigation * REST : GET / ip / { ip } / mitigation / { ipOnMitigation } / stats * @ param to [ required ] End date * @ param from [ required ] Start date * @ param scale [ required ] Scale of aggregation * @ param ip [ required ] * @ param ipOnMitigation [ required ] */ public ArrayList < OvhMitigationStats > ip_mitigation_ipOnMitigation_stats_GET ( String ip , String ipOnMitigation , Date from , OvhMitigationStatsScaleEnum scale , Date to ) throws IOException { } }
String qPath = "/ip/{ip}/mitigation/{ipOnMitigation}/stats" ; StringBuilder sb = path ( qPath , ip , ipOnMitigation ) ; query ( sb , "from" , from ) ; query ( sb , "scale" , scale ) ; query ( sb , "to" , to ) ; String resp = exec ( qPath , "GET" , sb . toString ( ) , null ) ; return convertTo ( resp , t4 ) ;
public class sslvserver { /** * Use this API to fetch all the sslvserver resources that are configured on netscaler . */ public static sslvserver [ ] get ( nitro_service service ) throws Exception { } }
sslvserver obj = new sslvserver ( ) ; sslvserver [ ] response = ( sslvserver [ ] ) obj . get_resources ( service ) ; return response ;
public class CPDefinitionOptionValueRelPersistenceImpl { /** * Removes the cp definition option value rel where CPDefinitionOptionRelId = & # 63 ; and key = & # 63 ; from the database . * @ param CPDefinitionOptionRelId the cp definition option rel ID * @ param key the key * @ return the cp definition option value rel that was removed */ @ Override public CPDefinitionOptionValueRel removeByC_K ( long CPDefinitionOptionRelId , String key ) throws NoSuchCPDefinitionOptionValueRelException { } }
CPDefinitionOptionValueRel cpDefinitionOptionValueRel = findByC_K ( CPDefinitionOptionRelId , key ) ; return remove ( cpDefinitionOptionValueRel ) ;
public class RestItemHandler { /** * Performs a bulk deletion of items , using a single { @ link Session } . If any of the items cannot be deleted for whatever * reason , the entire operation fails . * @ param request the servlet request ; may not be null or unauthenticated * @ param repositoryName the URL - encoded repository name * @ param workspaceName the URL - encoded workspace name * @ param requestContent the JSON - encoded array of the nodes to remove * @ return a { @ code non - null } { @ link Response } * @ throws JSONException if the body of the request is not a valid JSON array * @ throws RepositoryException if any of the JCR operations fail * @ see RestItemHandler # deleteItem ( javax . servlet . http . HttpServletRequest , String , String , String ) */ public Response deleteItems ( HttpServletRequest request , String repositoryName , String workspaceName , String requestContent ) throws JSONException , RepositoryException { } }
JSONArray requestArray = stringToJSONArray ( requestContent ) ; if ( requestArray . length ( ) == 0 ) { return Response . ok ( ) . build ( ) ; } Session session = getSession ( request , repositoryName , workspaceName ) ; TreeSet < String > pathsInOrder = new TreeSet < > ( ) ; for ( int i = 0 ; i < requestArray . length ( ) ; i ++ ) { pathsInOrder . add ( absPath ( requestArray . get ( i ) . toString ( ) ) ) ; } List < String > pathsInOrderList = new ArrayList < > ( pathsInOrder ) ; Collections . reverse ( pathsInOrderList ) ; for ( String path : pathsInOrderList ) { try { doDelete ( path , session ) ; } catch ( NotFoundException e ) { logger . info ( "Node at path {0} already deleted" , path ) ; } } session . save ( ) ; return Response . ok ( ) . build ( ) ;
public class ResultHierarchy { /** * Informs all registered { @ link ResultListener } that a new result was added . * @ param child New child result added * @ param parent Parent result that was added to */ private void fireResultAdded ( Result child , Result parent ) { } }
if ( LOG . isDebugging ( ) ) { LOG . debug ( "Result added: " + child + " <- " + parent ) ; } for ( int i = listenerList . size ( ) ; -- i >= 0 ; ) { listenerList . get ( i ) . resultAdded ( child , parent ) ; }
public class ModbusClient { /** * pull rule */ public CreatePullRuleResponse createPullRule ( CreatePullRuleRequest request ) { } }
InternalRequest internalRequest = createRequest ( request , HttpMethodName . POST , PULL_RULE ) ; return this . invokeHttpClient ( internalRequest , CreatePullRuleResponse . class ) ;
public class CopyDither { /** * Performs a single - input / single - output dither operation , applying basic * Floyd - Steinberg error - diffusion to the image . * @ param pSource the source image * @ param pDest the destiantion image * @ return the destination image , or a new image , if { @ code pDest } was * { @ code null } . */ public final BufferedImage filter ( BufferedImage pSource , BufferedImage pDest ) { } }
// Create destination image , if none provided if ( pDest == null ) { pDest = createCompatibleDestImage ( pSource , getICM ( pSource ) ) ; } else if ( ! ( pDest . getColorModel ( ) instanceof IndexColorModel ) ) { throw new ImageFilterException ( "Only IndexColorModel allowed." ) ; } // Filter rasters filter ( pSource . getRaster ( ) , pDest . getRaster ( ) , ( IndexColorModel ) pDest . getColorModel ( ) ) ; return pDest ;
public class RuleRegressionNode { /** * Anomaly detection */ public boolean isAnomaly ( Instance instance , double uniVariateAnomalyProbabilityThreshold , double multiVariateAnomalyProbabilityThreshold , int numberOfInstanceesForAnomaly ) { } }
// AMRUles is equipped with anomaly detection . If on , compute the anomaly value . long perceptronIntancesSeen = this . perceptron . getInstancesSeen ( ) ; if ( perceptronIntancesSeen >= numberOfInstanceesForAnomaly ) { double attribSum ; double attribSquaredSum ; double D = 0.0 ; double N = 0.0 ; double anomaly ; for ( int x = 0 ; x < instance . numAttributes ( ) - 1 ; x ++ ) { // Perceptron is initialized each rule . // this is a local anomaly . int instAttIndex = modelAttIndexToInstanceAttIndex ( x , instance ) ; attribSum = this . perceptron . perceptronattributeStatistics . getValue ( x ) ; attribSquaredSum = this . perceptron . squaredperceptronattributeStatistics . getValue ( x ) ; double mean = attribSum / perceptronIntancesSeen ; double sd = computeSD ( attribSquaredSum , attribSum , perceptronIntancesSeen ) ; double probability = computeProbability ( mean , sd , instance . value ( instAttIndex ) ) ; if ( probability > 0.0 ) { D = D + Math . abs ( Math . log ( probability ) ) ; if ( probability < uniVariateAnomalyProbabilityThreshold ) { // 0.10 N = N + Math . abs ( Math . log ( probability ) ) ; } } } anomaly = 0.0 ; if ( D != 0.0 ) { anomaly = N / D ; } if ( anomaly >= multiVariateAnomalyProbabilityThreshold ) { // debuganomaly ( instance , // uniVariateAnomalyProbabilityThreshold , // multiVariateAnomalyProbabilityThreshold , // anomaly ) ; return true ; } } return false ;
public class EntrySerializer { /** * Make sure all fields are mapped in the locale - value way . * @ param src the source to be edited . * @ param type the type to be used . * @ param context the json context to be changed . * @ return a created json element . */ @ Override public JsonElement serialize ( CMAEntry src , Type type , JsonSerializationContext context ) { } }
JsonObject fields = new JsonObject ( ) ; for ( Map . Entry < String , LinkedHashMap < String , Object > > field : src . getFields ( ) . entrySet ( ) ) { LinkedHashMap < String , Object > value = field . getValue ( ) ; if ( value == null ) { continue ; } String fieldId = field . getKey ( ) ; JsonObject jsonField = serializeField ( context , field . getValue ( ) ) ; if ( jsonField != null ) { fields . add ( fieldId , jsonField ) ; } } JsonObject result = new JsonObject ( ) ; result . add ( "fields" , fields ) ; final CMASystem sys = src . getSystem ( ) ; if ( sys != null ) { result . add ( "sys" , context . serialize ( sys ) ) ; } return result ;
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public EClass getIfcReinforcingElement ( ) { } }
if ( ifcReinforcingElementEClass == null ) { ifcReinforcingElementEClass = ( EClass ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 513 ) ; } return ifcReinforcingElementEClass ;
public class PayloadProvider { /** * Reads - - body , - - json or - - yaml data as specific class instance . */ public < T > T read ( Class < T > clazz ) { } }
try { return readInternal ( clazz ) ; } catch ( Exception e ) { if ( e instanceof RuntimeException ) { throw ( RuntimeException ) e ; } if ( e instanceof IOException ) { throw new UncheckedIOException ( ( IOException ) e ) ; } throw new RuntimeException ( e ) ; }
public class XMLBuilder { /** * Construct a builder from an existing XML document . The provided XML * document will be parsed and an XMLBuilder object referencing the * document ' s root element will be returned . * @ param inputSource * an XML document input source that will be parsed into a DOM . * @ param enableExternalEntities * enable external entities ; beware of XML External Entity ( XXE ) injection . * @ param isNamespaceAware * enable or disable namespace awareness in the underlying * { @ link DocumentBuilderFactory } * @ return * a builder node that can be used to add more nodes to the XML document . * @ throws ParserConfigurationException * @ throws FactoryConfigurationError * @ throws ParserConfigurationException * @ throws IOException * @ throws SAXException */ public static XMLBuilder parse ( InputSource inputSource , boolean enableExternalEntities , boolean isNamespaceAware ) throws ParserConfigurationException , SAXException , IOException { } }
return new XMLBuilder ( parseDocumentImpl ( inputSource , enableExternalEntities , isNamespaceAware ) ) ;
public class BaseFixData { /** * FixCapitalization Method . */ public static String fixCapitalization ( String string ) { } }
if ( string != null ) { for ( int i = 0 ; i < string . length ( ) ; i ++ ) { if ( Character . isLowerCase ( string . charAt ( i ) ) ) return string ; // Already lower case } StringBuffer sb = new StringBuffer ( ) ; boolean bPreviousSpace = true ; for ( int i = 0 ; i < string . length ( ) ; i ++ ) { Character character = string . charAt ( i ) ; if ( ! bPreviousSpace ) { character = Character . toLowerCase ( character ) ; } bPreviousSpace = false ; if ( Character . isSpaceChar ( character ) ) bPreviousSpace = true ; if ( ! Character . isLetterOrDigit ( character ) ) bPreviousSpace = true ; if ( character == 'c' ) if ( i > 0 ) if ( string . charAt ( i - 1 ) == 'M' ) bPreviousSpace = true ; // McName if ( ! bPreviousSpace ) bPreviousSpace = BaseFixData . checkAbreviations ( string , i ) ; sb . append ( character ) ; } string = sb . toString ( ) ; } return string ;
public class AutoConfiguredLoadBalancerFactory { /** * Unlike a normal { @ link LoadBalancer . Factory } , this accepts a full service config rather than * the LoadBalancingConfig . * @ return null if no selection could be made . */ @ Nullable ConfigOrError selectLoadBalancerPolicy ( Map < String , ? > serviceConfig ) { } }
try { List < LbConfig > loadBalancerConfigs = null ; if ( serviceConfig != null ) { List < Map < String , ? > > rawLbConfigs = ServiceConfigUtil . getLoadBalancingConfigsFromServiceConfig ( serviceConfig ) ; loadBalancerConfigs = ServiceConfigUtil . unwrapLoadBalancingConfigList ( rawLbConfigs ) ; } if ( loadBalancerConfigs != null && ! loadBalancerConfigs . isEmpty ( ) ) { List < String > policiesTried = new ArrayList < > ( ) ; for ( LbConfig lbConfig : loadBalancerConfigs ) { String policy = lbConfig . getPolicyName ( ) ; LoadBalancerProvider provider = registry . getProvider ( policy ) ; if ( provider == null ) { policiesTried . add ( policy ) ; } else { return ConfigOrError . fromConfig ( new PolicySelection ( provider , /* serverList = */ null , lbConfig . getRawConfigValue ( ) ) ) ; } } return ConfigOrError . fromError ( Status . UNKNOWN . withDescription ( "None of " + policiesTried + " specified by Service Config are available." ) ) ; } return null ; } catch ( RuntimeException e ) { return ConfigOrError . fromError ( Status . UNKNOWN . withDescription ( "can't parse load balancer configuration" ) . withCause ( e ) ) ; }
public class ChannelManager { /** * Returns the list of receivers for transfer envelopes produced by the channel with the given source channel ID . * @ param jobID * the ID of the job the given channel ID belongs to * @ param sourceChannelID * the source channel ID for which the receiver list shall be retrieved * @ return the list of receivers or < code > null < / code > if the receiver could not be determined * @ throws IOException */ private EnvelopeReceiverList getReceiverList ( JobID jobID , ChannelID sourceChannelID , boolean reportException ) throws IOException { } }
EnvelopeReceiverList receiverList = this . receiverCache . get ( sourceChannelID ) ; if ( receiverList != null ) { return receiverList ; } while ( true ) { ConnectionInfoLookupResponse lookupResponse ; synchronized ( this . channelLookupService ) { lookupResponse = this . channelLookupService . lookupConnectionInfo ( this . connectionInfo , jobID , sourceChannelID ) ; } if ( lookupResponse . receiverReady ( ) ) { receiverList = new EnvelopeReceiverList ( lookupResponse ) ; break ; } else if ( lookupResponse . receiverNotReady ( ) ) { try { Thread . sleep ( 500 ) ; } catch ( InterruptedException e ) { if ( reportException ) { throw new IOException ( "Lookup was interrupted." ) ; } else { return null ; } } } else if ( lookupResponse . isJobAborting ( ) ) { if ( reportException ) { throw new CancelTaskException ( ) ; } else { return null ; } } else if ( lookupResponse . receiverNotFound ( ) ) { if ( reportException ) { throw new IOException ( "Could not find the receiver for Job " + jobID + ", channel with source id " + sourceChannelID ) ; } else { return null ; } } else { throw new IllegalStateException ( "Unrecognized response to channel lookup." ) ; } } this . receiverCache . put ( sourceChannelID , receiverList ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( String . format ( "Receiver for %s: %s [%s])" , sourceChannelID , receiverList . hasLocalReceiver ( ) ? receiverList . getLocalReceiver ( ) : receiverList . getRemoteReceiver ( ) , receiverList . hasLocalReceiver ( ) ? "local" : "remote" ) ) ; } return receiverList ;
public class Related { /** * Overloaded version of { @ link # asTargetBy ( String ) } that uses the * { @ link org . hawkular . inventory . api . Relationships . WellKnown } as the name of the relationship . * @ param relationship the type of the relationship * @ return a new " related " filter instance */ public static Related asTargetBy ( Relationships . WellKnown relationship ) { } }
return new Related ( null , relationship . name ( ) , EntityRole . TARGET ) ;
public class CPDefinitionLocalizationUtil { /** * Returns a range of all the cp definition localizations where CPDefinitionId = & # 63 ; . * Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link CPDefinitionLocalizationModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order . * @ param CPDefinitionId the cp definition ID * @ param start the lower bound of the range of cp definition localizations * @ param end the upper bound of the range of cp definition localizations ( not inclusive ) * @ return the range of matching cp definition localizations */ public static List < CPDefinitionLocalization > findByCPDefinitionId ( long CPDefinitionId , int start , int end ) { } }
return getPersistence ( ) . findByCPDefinitionId ( CPDefinitionId , start , end ) ;
public class Types { /** * Return the baseName of an inner class . * This should live in utilities somewhere . * @ param className the class name to modify * @ return the name before $ of a class */ public static String getBaseName ( String className ) { } }
int i = className . indexOf ( "$" ) ; if ( i == - 1 ) return className ; return className . substring ( i + 1 ) ;
public class YamlFileNetworkTopologySnitch { /** * be careful about just blindly updating ApplicationState . INTERNAL _ IP everytime we read the yaml file , * as that can cause connections to get unnecessarily reset ( via IESCS . onChange ( ) ) . */ private void maybeSetApplicationState ( ) { } }
if ( localNodeData . dcLocalAddress == null ) return ; final EndpointState es = Gossiper . instance . getEndpointStateForEndpoint ( FBUtilities . getBroadcastAddress ( ) ) ; if ( es == null ) return ; final VersionedValue vv = es . getApplicationState ( ApplicationState . INTERNAL_IP ) ; if ( ( vv != null && ! vv . value . equals ( localNodeData . dcLocalAddress . getHostAddress ( ) ) ) || vv == null ) { Gossiper . instance . addLocalApplicationState ( ApplicationState . INTERNAL_IP , StorageService . instance . valueFactory . internalIP ( localNodeData . dcLocalAddress . getHostAddress ( ) ) ) ; }
public class CompilingLoader { /** * source - extension : the path extension for source files ; defaults to * . java */ @ Configurable public void setSourceExtension ( String ext ) throws ConfigException { } }
if ( ! ext . startsWith ( "." ) ) throw new ConfigException ( L . l ( "source-extension '{0}' must begin with '.'" , ext ) ) ; _sourceExt = ext ;
public class RestClient { /** * Gets the . * @ param < T > * the generic type * @ param restPath * the rest path * @ param expectedResponse * the expected response * @ param queryParams * the query params * @ return the t */ public < T > T get ( String restPath , Class < T > expectedResponse , Map < String , String > queryParams ) { } }
return call ( HttpMethod . GET , restPath , expectedResponse , null , queryParams ) ;
public class BoltIdentity { /** * - - - - - interface Identity - - - - - */ @ Override public int compareTo ( final Object o ) { } }
final long otherId = ( ( BoltIdentity ) o ) . getId ( ) ; if ( id > otherId ) { return 1 ; } if ( id < otherId ) { return - 1 ; } return 0 ;
public class BundleUtils { /** * Returns the bundle with the given symbolic name and the given version , or null if no such * bundle exists * @ param bc bundle context * @ param symbolicName bundle symbolic name * @ param version bundle version * @ return matching bundle , or null */ public static Bundle getBundle ( BundleContext bc , String symbolicName , String version ) { } }
for ( Bundle bundle : bc . getBundles ( ) ) { if ( bundle . getSymbolicName ( ) . equals ( symbolicName ) ) { if ( version == null || version . equals ( bundle . getVersion ( ) ) ) { return bundle ; } } } return null ;
public class LToFltBiFuncMemento { /** * < editor - fold desc = " object " > */ public static boolean argEquals ( LToFltBiFuncMemento the , Object that ) { } }
return Null . < LToFltBiFuncMemento > equals ( the , that , ( one , two ) -> { if ( one . getClass ( ) != two . getClass ( ) ) { return false ; } LToFltBiFuncMemento other = ( LToFltBiFuncMemento ) two ; return LObjFltPair . argEquals ( one . function , one . lastValue ( ) , other . function , other . lastValue ( ) ) ; } ) ;
public class PrimaveraReader { /** * Parses a record containing hours and add them to a container . * @ param ranges hours container * @ param hoursRecord hours record */ private void addHours ( ProjectCalendarDateRanges ranges , Record hoursRecord ) { } }
if ( hoursRecord . getValue ( ) != null ) { String [ ] wh = hoursRecord . getValue ( ) . split ( "\\|" ) ; try { String startText ; String endText ; if ( wh [ 0 ] . equals ( "s" ) ) { startText = wh [ 1 ] ; endText = wh [ 3 ] ; } else { startText = wh [ 3 ] ; endText = wh [ 1 ] ; } // for end time treat midnight as midnight next day if ( endText . equals ( "00:00" ) ) { endText = "24:00" ; } Date start = m_calendarTimeFormat . parse ( startText ) ; Date end = m_calendarTimeFormat . parse ( endText ) ; ranges . addRange ( new DateRange ( start , end ) ) ; } catch ( ParseException e ) { // silently ignore date parse exceptions } }
public class SshPublicKeyFileFactory { /** * Take a < a href = " SshPublicKey . html " > SshPublicKey < / a > and write it to a * file * @ param key * @ param comment * @ param format * @ param toFile * @ throws IOException */ public static void createFile ( SshPublicKey key , String comment , int format , File toFile ) throws IOException { } }
SshPublicKeyFile pub = create ( key , comment , format ) ; FileOutputStream out = new FileOutputStream ( toFile ) ; try { out . write ( pub . getFormattedKey ( ) ) ; out . flush ( ) ; } finally { out . close ( ) ; }
public class Transition { /** * Match start / end values by Adapter item ID . Adds matched values to mStartValuesList * and mEndValuesList and removes them from unmatchedStart and unmatchedEnd , using * startItemIds and endItemIds as a guide for which Views have unique item IDs . */ private void matchItemIds ( @ NonNull ArrayMap < View , TransitionValues > unmatchedStart , @ NonNull ArrayMap < View , TransitionValues > unmatchedEnd , @ NonNull LongSparseArray < View > startItemIds , @ NonNull LongSparseArray < View > endItemIds ) { } }
int numStartIds = startItemIds . size ( ) ; for ( int i = 0 ; i < numStartIds ; i ++ ) { View startView = startItemIds . valueAt ( i ) ; if ( startView != null && isValidTarget ( startView ) ) { View endView = endItemIds . get ( startItemIds . keyAt ( i ) ) ; if ( endView != null && isValidTarget ( endView ) ) { TransitionValues startValues = unmatchedStart . get ( startView ) ; TransitionValues endValues = unmatchedEnd . get ( endView ) ; if ( startValues != null && endValues != null ) { mStartValuesList . add ( startValues ) ; mEndValuesList . add ( endValues ) ; unmatchedStart . remove ( startView ) ; unmatchedEnd . remove ( endView ) ; } } } }
public class AppHelper { /** * Prints a full list of actual arguments that will be used by the application after interpretation of defaults and actual argument values as passed by the user on the command line . * @ return reference to this application helper , allowing command - chaining . */ public AppHelper printActualUsage ( ) { } }
System . out . println ( "-------------------------------------------------------------------------------------" ) ; int maxLength = 24 ; for ( Argument a : Arguments ) if ( maxLength < a . Name . length ( ) ) maxLength = a . Name . length ( ) ; for ( Argument a : Arguments ) { String template = "%1$" + String . valueOf ( maxLength - 1 ) + "s : " ; System . out . printf ( template , a . Name ) ; System . out . println ( a . Value ) ; } System . out . println ( "-------------------------------------------------------------------------------------" ) ; return this ;
public class ProxyConnection { /** * { @ inheritDoc } */ @ Override public void setSchema ( String schema ) throws SQLException { } }
delegate . setSchema ( schema ) ; dbschema = schema ; dirtyBits |= DIRTY_BIT_SCHEMA ;
public class FreePool { /** * Return the mcWrapper to the free pool . * @ pre mcWrapper ! = null */ protected void returnToFreePool ( MCWrapper mcWrapper ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . entry ( this , tc , "returnToFreePool" , gConfigProps . cfName ) ; } if ( mcWrapper . shouldBeDestroyed ( ) || mcWrapper . hasFatalErrorNotificationOccurred ( fatalErrorNotificationTime ) || ( ( pm . agedTimeout != - 1 ) && ( mcWrapper . hasAgedTimedOut ( pm . agedTimeoutMillis ) ) ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) { if ( mcWrapper . shouldBeDestroyed ( ) ) { Tr . debug ( this , tc , "Connection destroy flag is set, removing connection " + mcWrapper ) ; } if ( mcWrapper . hasFatalErrorNotificationOccurred ( fatalErrorNotificationTime ) ) { Tr . debug ( this , tc , "Fatal error occurred, removing connection " + mcWrapper ) ; } if ( ( ( pm . agedTimeout != - 1 ) && ( mcWrapper . hasAgedTimedOut ( pm . agedTimeoutMillis ) ) ) ) { Tr . debug ( this , tc , "Aged timeout exceeded, removing connection " + mcWrapper ) ; } if ( mcWrapper . isDestroyState ( ) ) { Tr . debug ( this , tc , "Mbean method purgePoolContents with option immediate was used." + " Connection cleanup and destroy is being processed." ) ; } } if ( mcWrapper . isDestroyState ( ) ) { final FreePool tempFP = this ; final MCWrapper tempMCWrapper = mcWrapper ; ThreadSupportedCleanupAndDestroy tscd = new ThreadSupportedCleanupAndDestroy ( pm . tscdList , tempFP , tempMCWrapper ) ; pm . tscdList . add ( tscd ) ; pm . connectorSvc . execSvcRef . getServiceWithException ( ) . submit ( tscd ) ; } else { cleanupAndDestroyMCWrapper ( mcWrapper ) ; // cleanup , remove , then release mcWrapper // Do not return this mcWrapper back to the free pool . removeMCWrapperFromList ( mcWrapper , _mcWrapperDoesNotExistInFreePool , _synchronizeInMethod , _notifyWaiter , _decrementTotalCounter ) ; } } else { returnToFreePoolDelegated ( mcWrapper ) ; // Added to Help PMI Stuff . Unable to inject code in if / else conditional situations . } // end else - - the mcWrapper is not stale if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . exit ( this , tc , "returnToFreePool" ) ; }
public class ClientTable { /** * Move the position of the record . * @ param iRelPosition - Relative position positive or negative or FIRST _ RECORD / LAST _ RECORD . * @ return NORMAL _ RETURN - The following are NOT mutually exclusive * @ exception DBException File exception . */ public int doMove ( int iRelPosition ) throws DBException { } }
this . checkCacheMode ( Boolean . TRUE ) ; // Make sure the cache is set up correctly for this type of query ( typically needed ) int iErrorCode = DBConstants . NORMAL_RETURN ; try { Object objData = null ; synchronized ( this . getSyncObject ( ) ) { // In case this is called from another task objData = m_tableRemote . doMove ( iRelPosition , 1 ) ; } if ( objData instanceof Vector ) { Vector < Object > data = ( Vector ) objData ; // m _ tableRemote . dataToFields ( ) ; Record recordBase = this . getRecord ( ) ; int iFieldTypes = BaseBuffer . PHYSICAL_FIELDS ; if ( ! recordBase . isAllSelected ( ) ) iFieldTypes = BaseBuffer . DATA_FIELDS ; // SELECTED _ FIELDS ; ( selected and physical ) BaseBuffer buffer = new VectorBuffer ( data , iFieldTypes ) ; if ( DBParams . RECORD . equals ( buffer . getHeader ( ) ) ) { // Warning : The target record was a multitable and This is the record name ! String strTableNames = buffer . getHeader ( ) . toString ( ) ; Utility . getLogger ( ) . warning ( "ClientTable.doMove() - Warning: Multitable needs to be specified: " + strTableNames ) ; } else buffer . resetPosition ( ) ; m_dataSource = buffer ; iErrorCode = DBConstants . NORMAL_RETURN ; } else if ( objData instanceof Number ) iErrorCode = ( ( Number ) objData ) . intValue ( ) ; else iErrorCode = DBConstants . ERROR_RETURN ; // Never return iErrorCode ; } catch ( Exception ex ) { ex . printStackTrace ( ) ; throw DatabaseException . toDatabaseException ( ex ) ; }
public class ServerBuilder { /** * Binds the specified annotated service object under the specified path prefix . * @ param exceptionHandlersAndConverters an iterable object of { @ link ExceptionHandlerFunction } , * { @ link RequestConverterFunction } and / or * { @ link ResponseConverterFunction } */ public ServerBuilder annotatedService ( String pathPrefix , Object service , Function < Service < HttpRequest , HttpResponse > , ? extends Service < HttpRequest , HttpResponse > > decorator , Iterable < ? > exceptionHandlersAndConverters ) { } }
requireNonNull ( pathPrefix , "pathPrefix" ) ; requireNonNull ( service , "service" ) ; requireNonNull ( decorator , "decorator" ) ; requireNonNull ( exceptionHandlersAndConverters , "exceptionHandlersAndConverters" ) ; defaultVirtualHostBuilderUpdated ( ) ; defaultVirtualHostBuilder . annotatedService ( pathPrefix , service , decorator , exceptionHandlersAndConverters ) ; return this ;
public class AssetServlet { /** * Parses a given Range header for one or more byte ranges . * @ param rangeHeader Range header to parse * @ param resourceLength Length of the resource in bytes * @ return List of parsed ranges */ private ImmutableList < ByteRange > parseRangeHeader ( final String rangeHeader , final int resourceLength ) { } }
final ImmutableList . Builder < ByteRange > builder = ImmutableList . builder ( ) ; if ( rangeHeader . contains ( "=" ) ) { final String [ ] parts = rangeHeader . split ( "=" ) ; if ( parts . length > 1 ) { final List < String > ranges = Splitter . on ( "," ) . trimResults ( ) . splitToList ( parts [ 1 ] ) ; for ( final String range : ranges ) { builder . add ( ByteRange . parse ( range , resourceLength ) ) ; } } } return builder . build ( ) ;
public class MachineMetricFactory { /** * Returns a subset of the given list of metrics in " defaults " and the * corresponding value of each returned metric in the subset . Note if the * custom set is empty , the full set of default machine metrics and values * will be returned . ( In particular , as in set theory , a set is a subset of * itself . ) * @ param customSet * custom machine metrics specified in the SDK metrics registry * @ param defaults * the given default list of metrics * @ param values * corresponding values of each metric in " defaults " */ private MetricValues metricValues ( Set < MachineMetric > customSet , List < MachineMetric > defaults , List < Long > values ) { } }
List < MachineMetric > actualMetrics = defaults ; List < Long > actualValues = values ; if ( customSet . size ( ) > 0 ) { // custom set of machine metrics specified actualMetrics = new ArrayList < MachineMetric > ( ) ; actualValues = new ArrayList < Long > ( ) ; for ( int i = 0 ; i < defaults . size ( ) ; i ++ ) { MachineMetric mm = defaults . get ( i ) ; if ( customSet . contains ( mm ) ) { actualMetrics . add ( mm ) ; actualValues . add ( values . get ( i ) ) ; } } } return new MetricValues ( actualMetrics , actualValues ) ;
public class ResourceInjectionBinding { /** * Extract the fields from the ResourceRef , and verify they match the values in the current * binding object and / or annotation exactly . If they do indeed match , add all the * InjectionTargets on the ResourceRef parameter to the current binding . The code takes into * account the possibility of duplicates InjectionTargets and will only use one in case * where they duplicated between the two ref definitions . * @ param resourceRef * @ throws InjectionException */ public void merge ( ResourceRef resourceRef ) throws InjectionException { } }
final boolean isTraceOn = TraceComponent . isAnyTracingEnabled ( ) ; if ( isTraceOn && tc . isEntryEnabled ( ) ) Tr . entry ( tc , "merge" , resourceRef ) ; ResourceImpl curAnnotation = ( ResourceImpl ) this . getAnnotation ( ) ; String jndiName = resourceRef . getName ( ) ; String curJndiName = curAnnotation . name ( ) ; String mappedName = resourceRef . getMappedName ( ) ; String curMappedName = curAnnotation . mappedName ( ) ; String typeName = resourceRef . getType ( ) ; boolean curShareable = curAnnotation . shareable ( ) ; int resAuthType = resourceRef . getAuthValue ( ) ; int resSharingScope = resourceRef . getSharingScopeValue ( ) ; boolean shareable = resSharingScope == ResourceRef . SHARING_SCOPE_SHAREABLE ; AuthenticationType authenticationType = ResourceProcessor . convertAuthToEnum ( resAuthType ) ; // d543514 AuthenticationType curAuthenticationType = curAnnotation . authenticationType ( ) ; String lookup = resourceRef . getLookupName ( ) ; // F743-21028.4 if ( lookup != null ) { lookup = lookup . trim ( ) ; } if ( isTraceOn && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "new=" + jndiName + ":" + mappedName + ":" + authenticationType + ":" + shareable + ":" + typeName + ":" + lookup ) ; if ( isTraceOn && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "cur=" + curJndiName + ":" + curMappedName + ":" + curAuthenticationType + ":" + curShareable + ":" + getInjectionClassTypeName ( ) + ":" + ivLookup ) ; // The mappedName parameter is " optional " if ( curAnnotation . ivIsSetMappedName && mappedName != null ) { if ( ! curMappedName . equals ( mappedName ) ) { Tr . error ( tc , "CONFLICTING_XML_VALUES_CWNEN0052E" , ivComponent , ivModule , ivApplication , "mapped-name" , "resource-ref" , "res-ref-name" , getJndiName ( ) , curMappedName , mappedName ) ; // d479669 String exMsg = "The " + ivComponent + " bean in the " + ivModule + " module of the " + ivApplication + " application has conflicting configuration data in the XML" + " deployment descriptor. Conflicting " + "mapped-name" + " element values exist for multiple " + "resource-ref" + " elements with the same " + "res-ref-name" + " element value : " + getJndiName ( ) + ". The conflicting " + "mapped-name" + " element values are " + curMappedName + " and " + mappedName + "." ; // d479669 throw new InjectionConfigurationException ( exMsg ) ; } } else if ( mappedName != null && ! curAnnotation . ivIsSetMappedName ) { curAnnotation . ivMappedName = mappedName ; curAnnotation . ivIsSetMappedName = true ; } setXMLType ( typeName , "resource-ref" , "res-ref-name" , "res-type" ) ; // F743-32443 // The authenticationType parameter is " optional " . if ( curAnnotation . ivIsSetAuthenticationType && resAuthType != ResourceRef . AUTH_UNSPECIFIED ) { if ( ! ( curAnnotation . authenticationType ( ) == authenticationType ) ) { Tr . error ( tc , "CONFLICTING_XML_VALUES_CWNEN0052E" , ivComponent , ivModule , ivApplication , "res-auth" , "resource-ref" , "res-ref-name" , getJndiName ( ) , curAnnotation . authenticationType ( ) , authenticationType ) ; // d479669 String exMsg = "The " + ivComponent + " bean in the " + ivModule + " module of the " + ivApplication + " application has conflicting configuration data in the XML" + " deployment descriptor. Conflicting " + "res-auth" + " element values exist for multiple " + "resource-ref" + " elements with the same " + "res-ref-name" + " element value : " + getJndiName ( ) + ". The conflicting " + "res-auth" + " element values are " + curAnnotation . authenticationType ( ) + " and " + authenticationType + "." ; // d479669 throw new InjectionConfigurationException ( exMsg ) ; } } else if ( resAuthType != ResourceRef . AUTH_UNSPECIFIED && ! curAnnotation . ivIsSetAuthenticationType ) { curAnnotation . ivAuthenticationType = authenticationType ; // d543514 curAnnotation . ivIsSetAuthenticationType = true ; } // The resSharingScope parameter is " optional " . if ( curAnnotation . ivIsSetShareable && resSharingScope != ResourceRef . SHARING_SCOPE_UNSPECIFIED ) { if ( ! ( curAnnotation . shareable ( ) == shareable ) ) { Tr . error ( tc , "CONFLICTING_XML_VALUES_CWNEN0052E" , ivComponent , ivModule , ivApplication , "res-sharing-scope" , "resource-ref" , "res-ref-name" , getJndiName ( ) , curAnnotation . shareable ( ) , shareable ) ; // d479669 String exMsg = "The " + ivComponent + " bean in the " + ivModule + " module of the " + ivApplication + " application has conflicting configuration data in the XML" + " deployment descriptor. Conflicting " + "res-sharing-scope" + " element values exist for multiple " + "resource-ref" + " elements with the same " + "res-ref-name" + " element value : " + getJndiName ( ) + ". The conflicting " + "res-sharing-scope" + " element values are " + curAnnotation . shareable ( ) + " and " + shareable + "." ; // d479669 throw new InjectionConfigurationException ( exMsg ) ; } } else if ( resSharingScope != ResourceRef . SHARING_SCOPE_UNSPECIFIED && ! curAnnotation . ivIsSetShareable ) { curAnnotation . ivShareable = shareable ; curAnnotation . ivIsSetShareable = true ; } // Merge : lookup - " optional parameter F743-21028.4 // If present in XML , even if the empty string ( " " ) , it will override // any setting via annotations . An empty string would effectivly turn // off this setting . // When a message - destination - ref appears multiple times in XML , an empty // string is considered to be a confilct with a non - empty string , since // both were explicitly specified . if ( lookup != null ) { if ( ivLookupInXml ) { if ( ! lookup . equals ( ivLookup ) ) { Tr . error ( tc , "CONFLICTING_XML_VALUES_CWNEN0052E" , ivComponent , ivModule , ivApplication , "lookup-name" , "resource-ref" , "res-ref-name" , jndiName , ivLookup , lookup ) ; String exMsg = "The " + ivComponent + " bean in the " + ivModule + " module of the " + ivApplication + " application has conflicting configuration data in the XML" + " deployment descriptor. Conflicting " + "lookup-name" + " element values exist for multiple " + "resource-ref" + " elements with the same " + "res-ref-name" + " element value : " + jndiName + ". The conflicting " + "lookup-name" + " element values are \"" + ivLookup + "\" and \"" + lookup + "\"." ; throw new InjectionConfigurationException ( exMsg ) ; } } else { ivLookup = lookup ; ivLookupInXml = true ; curAnnotation . ivLookup = lookup ; if ( isTraceOn && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "ivLookup = " + ivLookup ) ; } } // Loop through the InjectionTargets and call addInjectionTarget . . . . which // already accounts for duplicates ( in case they duplicated some between the two ref definitions . List < InjectionTarget > targets = resourceRef . getInjectionTargets ( ) ; String targetName = null ; String targetClassName = null ; Class < ? > injectionType = loadClass ( resourceRef . getType ( ) ) ; if ( isTraceOn && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "targetType : " + injectionType ) ; if ( ! targets . isEmpty ( ) ) { for ( InjectionTarget target : targets ) { targetClassName = target . getInjectionTargetClassName ( ) ; targetName = target . getInjectionTargetName ( ) ; this . addInjectionTarget ( injectionType , targetName , targetClassName ) ; } // for loop } // targets ! null if ( isTraceOn && tc . isEntryEnabled ( ) ) Tr . exit ( tc , "merge" , this ) ;
import java . util . * ; class Main { /** * Combines two dictionaries into one . * Example usage : * > > > combine _ dicts ( { ' R ' : ' Red ' , ' B ' : ' Black ' , ' P ' : ' Pink ' } , { ' G ' : ' Green ' , ' W ' : ' White ' } ) * { ' B ' : ' Black ' , ' R ' : ' Red ' , ' P ' : ' Pink ' , ' G ' : ' Green ' , ' W ' : ' White ' } * > > > combine _ dicts ( { ' R ' : ' Red ' , ' B ' : ' Black ' , ' P ' : ' Pink ' } , { ' O ' : ' Orange ' , ' W ' : ' White ' , ' B ' : ' Black ' } ) * { ' O ' : ' Orange ' , ' P ' : ' Pink ' , ' B ' : ' Black ' , ' W ' : ' White ' , ' R ' : ' Red ' } * > > > combine _ dicts ( { ' G ' : ' Green ' , ' W ' : ' White ' } , { ' O ' : ' Orange ' , ' W ' : ' White ' , ' B ' : ' Black ' } ) * { ' W ' : ' White ' , ' O ' : ' Orange ' , ' G ' : ' Green ' , ' B ' : ' Black ' } */ public static Map < String , String > combineDicts ( Map < String , String > dictA , Map < String , String > dictB ) { } }
Map < String , String > combinedDict = new HashMap < > ( ) ; combinedDict . putAll ( dictB ) ; combinedDict . putAll ( dictA ) ; return combinedDict ;
public class smtp_server { /** * < pre > * Performs generic data validation for the operation to be performed * < / pre > */ protected void validate ( String operationType ) throws Exception { } }
super . validate ( operationType ) ; MPSString id_validator = new MPSString ( ) ; id_validator . setConstraintIsReq ( MPSConstants . DELETE_CONSTRAINT , true ) ; id_validator . setConstraintIsReq ( MPSConstants . MODIFY_CONSTRAINT , true ) ; id_validator . validate ( operationType , id , "\"id\"" ) ; MPSInternetHost server_name_validator = new MPSInternetHost ( ) ; server_name_validator . setConstraintIsReq ( MPSConstants . ADD_CONSTRAINT , true ) ; server_name_validator . validate ( operationType , server_name , "\"server_name\"" ) ; MPSInt port_validator = new MPSInt ( ) ; port_validator . setConstraintMinValue ( MPSConstants . GENERIC_CONSTRAINT , 1 ) ; port_validator . setConstraintMaxValue ( MPSConstants . GENERIC_CONSTRAINT , 65534 ) ; port_validator . validate ( operationType , port , "\"port\"" ) ; MPSString username_validator = new MPSString ( ) ; username_validator . setConstraintCharSetRegEx ( MPSConstants . GENERIC_CONSTRAINT , "[ a-zA-Z0-9_#.:@=-]+" ) ; username_validator . setConstraintMaxStrLen ( MPSConstants . GENERIC_CONSTRAINT , 128 ) ; username_validator . validate ( operationType , username , "\"username\"" ) ; MPSString password_validator = new MPSString ( ) ; password_validator . setConstraintMaxStrLen ( MPSConstants . GENERIC_CONSTRAINT , 128 ) ; password_validator . validate ( operationType , password , "\"password\"" ) ; MPSBoolean is_auth_validator = new MPSBoolean ( ) ; is_auth_validator . validate ( operationType , is_auth , "\"is_auth\"" ) ; MPSBoolean is_ssl_validator = new MPSBoolean ( ) ; is_ssl_validator . validate ( operationType , is_ssl , "\"is_ssl\"" ) ;
public class DefaultMetadataService { /** * Updates an entity , instance of the type based on the guid set . * @ param entityInstanceDefinition json array of entity definitions * @ return guids - json array of guids */ @ Override public CreateUpdateEntitiesResult updateEntities ( String entityInstanceDefinition ) throws AtlasException { } }
entityInstanceDefinition = ParamChecker . notEmpty ( entityInstanceDefinition , "Entity instance definition" ) ; ITypedReferenceableInstance [ ] typedInstances = deserializeClassInstances ( entityInstanceDefinition ) ; CreateUpdateEntitiesResult result = repository . updateEntities ( typedInstances ) ; onEntitiesAddedUpdated ( result . getEntityResult ( ) ) ; return result ;
public class AdminToolDBBrowserExampleLoader { /** * datasourceName must be set < br > * < br > * Example : < br > * < code > * { < br > * & nbsp ; & nbsp ; " datasourceName " : " datasource " , < br > * & nbsp ; & nbsp ; " cluster " : { < br > * & nbsp ; & nbsp ; & nbsp ; & nbsp ; " Maintainance " : [ { < br > * & nbsp ; & nbsp ; & nbsp ; & nbsp ; & nbsp ; & nbsp ; " statement " : " select 1 " , < br > * & nbsp ; & nbsp ; & nbsp ; & nbsp ; & nbsp ; & nbsp ; " description " : " check select " < br > * & nbsp ; & nbsp ; & nbsp ; & nbsp ; } , { < br > * & nbsp ; & nbsp ; & nbsp ; & nbsp ; & nbsp ; & nbsp ; " statement " : " select . . . " , < br > * & nbsp ; & nbsp ; & nbsp ; & nbsp ; & nbsp ; & nbsp ; " description " : " . . . " < br > * & nbsp ; & nbsp ; & nbsp ; & nbsp ; } ] < br > * & nbsp ; & nbsp ; } < br > * } < br > * < / code > * @ param jsonString * @ throws IOException * @ throws JsonMappingException * @ throws JsonParseException * @ see ExampleStatements */ public void loadExampleStatementsFromJsonString ( String jsonString ) throws JsonParseException , JsonMappingException , IOException { } }
if ( LOGGER . isTraceEnabled ( ) ) { LOGGER . trace ( "Receiving json string: " + jsonString ) ; } ObjectMapper mapper = new ObjectMapper ( ) ; ExampleStatements exampleStatements = mapper . readValue ( jsonString , ExampleStatements . class ) ; addExamples ( exampleStatements ) ;
public class JarWithFile { /** * Returns Manifest */ public Manifest getManifest ( ) throws IOException { } }
Manifest manifest ; JarFile jarFile = getJarFile ( ) ; try { if ( jarFile == null ) manifest = null ; else manifest = jarFile . getManifest ( ) ; } finally { closeJarFile ( jarFile ) ; } return manifest ;
public class AdobePathBuilder { /** * The Correct Order . . . P1 , P2 , P3 , P4 , P5 , P6 ( Closed ) moveTo ( P1) * curveTo ( P1 . cpl , P2 . cpp , P2 . ap ) ; curveTo ( P2 . cpl , P3 . cppy , P3 . ap ) ; * curveTo ( P3 . cpl , P4 . cpp , P4 . ap ) ; curveTo ( P4 . cpl , P5 . cpp , P5 . ap ) ; * curveTo ( P5 . cply , P6 . cpp , P6 . ap ) ; curveTo ( P6 . cpl , P1 . cpp , P1 . ap ) ; * closePath ( ) */ private Path2D pathToShape ( final List < List < AdobePathSegment > > paths ) { } }
GeneralPath path = new GeneralPath ( Path2D . WIND_EVEN_ODD , paths . size ( ) ) ; GeneralPath subpath = null ; for ( List < AdobePathSegment > points : paths ) { int length = points . size ( ) ; for ( int i = 0 ; i < points . size ( ) ; i ++ ) { AdobePathSegment current = points . get ( i ) ; int step = i == 0 ? 0 : i == length - 1 ? 2 : 1 ; switch ( step ) { // begin case 0 : { subpath = new GeneralPath ( Path2D . WIND_EVEN_ODD , length ) ; subpath . moveTo ( current . apx , current . apy ) ; if ( length > 1 ) { AdobePathSegment next = points . get ( ( i + 1 ) ) ; subpath . curveTo ( current . cplx , current . cply , next . cppx , next . cppy , next . apx , next . apy ) ; } else { subpath . lineTo ( current . apx , current . apy ) ; } break ; } // middle case 1 : { AdobePathSegment next = points . get ( ( i + 1 ) ) ; // we are always guaranteed one more . subpath . curveTo ( current . cplx , current . cply , next . cppx , next . cppy , next . apx , next . apy ) ; break ; } // end case 2 : { AdobePathSegment first = points . get ( 0 ) ; if ( first . selector == AdobePathSegment . CLOSED_SUBPATH_BEZIER_LINKED || first . selector == AdobePathSegment . CLOSED_SUBPATH_BEZIER_UNLINKED ) { subpath . curveTo ( current . cplx , current . cply , first . cppx , first . cppy , first . apx , first . apy ) ; subpath . closePath ( ) ; path . append ( subpath , false ) ; } else { subpath . lineTo ( current . apx , current . apy ) ; path . append ( subpath , true ) ; } break ; } } } } return path ;
public class CPDefinitionInventoryLocalServiceBaseImpl { /** * Returns the cp definition inventory matching the UUID and group . * @ param uuid the cp definition inventory ' s UUID * @ param groupId the primary key of the group * @ return the matching cp definition inventory , or < code > null < / code > if a matching cp definition inventory could not be found */ @ Override public CPDefinitionInventory fetchCPDefinitionInventoryByUuidAndGroupId ( String uuid , long groupId ) { } }
return cpDefinitionInventoryPersistence . fetchByUUID_G ( uuid , groupId ) ;
public class OpenIdServiceResponseBuilder { /** * We sign directly ( final ' true ' ) because we don ' t add extensions * response message can be either a DirectError or an AuthSuccess here . * Note : * The association handle returned in the Response is either the ' public ' * created in a previous association , or is a ' private ' handle created * specifically for the verification step when in non - association mode * @ param service the service * @ param parameters the parameters * @ param successFullAuthentication the success full authentication * @ param id the id * @ param parameterList the parameter list * @ return response response */ protected Response buildAuthenticationResponse ( final OpenIdService service , final Map < String , String > parameters , final boolean successFullAuthentication , final String id , final ParameterList parameterList ) { } }
val response = serverManager . authResponse ( parameterList , id , id , successFullAuthentication , true ) ; parameters . putAll ( response . getParameterMap ( ) ) ; LOGGER . debug ( "Parameters passed for the OpenID response are [{}]" , parameters . keySet ( ) ) ; return buildRedirect ( service , parameters ) ;
public class AcraContentProvider { /** * Provides file metadata * @ param uri the file uri * @ param projection any combination of { @ link OpenableColumns # DISPLAY _ NAME } and { @ link OpenableColumns # SIZE } * @ param selection ignored * @ param selectionArgs ignored * @ param sortOrder ignored * @ return file metadata in a cursor with a single row */ @ Nullable @ Override public Cursor query ( @ NonNull Uri uri , @ Nullable String [ ] projection , @ Nullable String selection , @ Nullable String [ ] selectionArgs , @ Nullable String sortOrder ) { } }
if ( ACRA . DEV_LOGGING ) ACRA . log . d ( ACRA . LOG_TAG , "Query: " + uri ) ; final File file = getFileForUri ( uri ) ; if ( file == null ) { return null ; } if ( projection == null ) { projection = COLUMNS ; } final Map < String , Object > columnValueMap = new LinkedHashMap < > ( ) ; for ( String column : projection ) { if ( column . equals ( OpenableColumns . DISPLAY_NAME ) ) { columnValueMap . put ( OpenableColumns . DISPLAY_NAME , file . getName ( ) ) ; } else if ( column . equals ( OpenableColumns . SIZE ) ) { columnValueMap . put ( OpenableColumns . SIZE , file . length ( ) ) ; } } final MatrixCursor cursor = new MatrixCursor ( columnValueMap . keySet ( ) . toArray ( new String [ columnValueMap . size ( ) ] ) , 1 ) ; cursor . addRow ( columnValueMap . values ( ) ) ; return cursor ;
public class Part { /** * Create a file multi - part field , from file . * This return a part equivalent to & lt ; input type = " file " / & gt ; field in multi part form . */ public static Part < File > file ( String name , File file ) { } }
return file ( name , file . getName ( ) , file ) ;
public class CmsImageLoader { /** * Returns a scaled version of the given OpenCms VFS image resource . < p > * All results are cached in disk . * If the scaled version does not exist in the cache , it is created . * Unscaled versions of the images are also stored in the cache . < p > * @ param cms the current users OpenCms context * @ param resource the base VFS resource for the image * @ param scaler the configured image scaler * @ return a scaled version of the given OpenCms VFS image resource * @ throws IOException in case of errors accessing the disk based cache * @ throws CmsException in case of errors accessing the OpenCms VFS */ protected CmsFile getScaledImage ( CmsObject cms , CmsResource resource , CmsImageScaler scaler ) throws IOException , CmsException { } }
String cacheParam = scaler . isValid ( ) ? scaler . toString ( ) : null ; String cacheName = m_vfsDiskCache . getCacheName ( resource , cacheParam ) ; byte [ ] content = m_vfsDiskCache . getCacheContent ( cacheName ) ; CmsFile file ; if ( content != null ) { if ( resource instanceof CmsFile ) { // the original file content must be modified ( required e . g . for static export ) file = ( CmsFile ) resource ; } else { // this is no file , but we don ' t want to use " upgrade " since we don ' t need to read the content from the VFS file = new CmsFile ( resource ) ; } // save the content in the file file . setContents ( content ) ; } else { // we must read the content from the VFS ( if this has not been done yet ) file = cms . readFile ( resource ) ; // upgrade the file ( load the content ) if ( scaler . isValid ( ) ) { if ( scaler . getType ( ) == 8 ) { // only need the focal point for mode 8 scaler . setFocalPoint ( CmsPreviewService . readFocalPoint ( cms , resource ) ) ; } // valid scaling parameters found , scale the content content = scaler . scaleImage ( file ) ; // exchange the content of the file with the scaled version file . setContents ( content ) ; } // save the file content in the cache m_vfsDiskCache . saveCacheFile ( cacheName , file . getContents ( ) ) ; } return file ;
public class XBooleanLiteralImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public Object eGet ( int featureID , boolean resolve , boolean coreType ) { } }
switch ( featureID ) { case XbasePackage . XBOOLEAN_LITERAL__IS_TRUE : return isIsTrue ( ) ; } return super . eGet ( featureID , resolve , coreType ) ;
public class AccessControlClient { /** * Return the best - matching policy for the requested document . * @ param url * URL of the requested document . * @ param captureDate * Date the document was archived . * @ param retrievalDate * Date of retrieval ( usually now ) . * @ param who * Group name of the user accessing the document . * @ return Access - control policy that should be enforced . eg " robots " , * " block " or " allow " . * @ throws RobotsUnavailableException * @ throws RuleOracleUnavailableException */ public String getPolicy ( String url , Date captureDate , Date retrievalDate , String who ) throws RobotsUnavailableException , RuleOracleUnavailableException { } }
return getPolicy ( url , getRule ( url , captureDate , retrievalDate , who ) ) ;
public class TransactionReplaceLogRecord { /** * Called to perform recovery action during a warm start of the ObjectManager . * @ param objectManagerState of the ObjectManager performing recovery . * @ throws ObjectManagerException */ public void performRecovery ( ObjectManagerState objectManagerState ) throws ObjectManagerException { } }
final String methodName = "performRecovery" ; if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . entry ( this , cclass , methodName , new Object [ ] { objectManagerState , logicalUnitOfWork , new Integer ( transactionState ) , token , new Integer ( managedObjectBytes . length ) } ) ; // Recover the ManagedObject from its serialized bytes . ManagedObject replacementManagedObject = ManagedObject . restoreFromSerializedBytes ( managedObjectBytes , objectManagerState ) ; // Replace the object using its original transaction . Transaction transactionForRecovery = objectManagerState . getTransaction ( logicalUnitOfWork ) ; ManagedObject existingManagedObject = token . getManagedObject ( ) ; if ( existingManagedObject == null ) { // The object may have already been deleted from the ObjectStore , // so create a dummy object to keep the transaction happy . // The Token will have the ObjecStore and storedObjectIdentifier so the the correct delete in the // ObjectStore can take place . DummyManagedObject dummyManagedObject = new DummyManagedObject ( "Created by TransactionReplaceLogRecord.performRecovery()" ) ; dummyManagedObject . state = ManagedObject . stateReady ; dummyManagedObject . owningToken = token ; existingManagedObject = token . setManagedObject ( dummyManagedObject ) ; existingManagedObject . state = ManagedObject . stateReady ; } // if ( existingManagedObject = = null ) . transactionForRecovery . lock ( existingManagedObject ) ; token . setManagedObject ( replacementManagedObject ) ; // Revert to the restored managed object . transactionForRecovery . replace ( existingManagedObject ) ; // Redo the replace . // No need to reset the transaction state because Replace can only be executed before // the transaction is prepared . // transactionForRecovery . internalTransaction . resetState ( transactionState ) ; if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . exit ( this , cclass , methodName ) ;
public class ApiClient { /** * < b > RESERVED FOR PARTNERS < / b > Request JWT Application Token * Configures the current instance of ApiClient with a fresh OAuth JWT access token from DocuSign * @ param clientId DocuSign OAuth Client Id ( AKA Integrator Key ) * @ param scopes the list of requested scopes . Values include { @ link OAuth # Scope _ SIGNATURE } , { @ link OAuth # Scope _ EXTENDED } , { @ link OAuth # Scope _ IMPERSONATION } . You can also pass any advanced scope . * @ param rsaPrivateKey the byte contents of the RSA private key * @ param expiresIn number of seconds remaining before the JWT assertion is considered as invalid * @ return OAuth . OAuthToken object . * @ throws IllegalArgumentException if one of the arguments is invalid * @ throws IOException if there is an issue with either the public or private file * @ throws ApiException if there is an error while exchanging the JWT with an access token */ public OAuth . OAuthToken requestJWTApplicationToken ( String clientId , java . util . List < String > scopes , byte [ ] rsaPrivateKey , long expiresIn ) throws IllegalArgumentException , IOException , ApiException { } }
return this . requestJWTUserToken ( clientId , null , scopes , rsaPrivateKey , expiresIn ) ;
public class GrpcServerFactoryAutoConfiguration { /** * Creates a GrpcServerFactory using the shaded netty . This is the recommended default for gRPC . * @ param properties The properties used to configure the server . * @ param serviceDiscoverer The discoverer used to identify the services that should be served . * @ param serverConfigurers The server configurers that contain additional configuration for the server . * @ return The shadedNettyGrpcServerFactory bean . */ @ ConditionalOnClass ( name = { } }
"io.grpc.netty.shaded.io.netty.channel.Channel" , "io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder" } ) @ Bean public ShadedNettyGrpcServerFactory shadedNettyGrpcServerFactory ( final GrpcServerProperties properties , final GrpcServiceDiscoverer serviceDiscoverer , final List < GrpcServerConfigurer > serverConfigurers ) { final ShadedNettyGrpcServerFactory factory = new ShadedNettyGrpcServerFactory ( properties , serverConfigurers ) ; for ( final GrpcServiceDefinition service : serviceDiscoverer . findGrpcServices ( ) ) { factory . addService ( service ) ; } return factory ;
public class FieldTable { /** * Lock the current record . * This method responds differently depending on what open mode the record is in : * OPEN _ DONT _ LOCK - A physical lock is not done . This is usually where deadlocks are possible * ( such as screens ) and where transactions are in use ( and locks are not needed ) . * OPEN _ LOCK _ ON _ EDIT - Holds a lock until an update or close . ( Update crucial data , or hold records for processing ) * Returns false is someone alreay has a lock on this record . * OPEN _ WAIT _ FOR _ LOCK - Don ' t return from edit until you get a lock . ( ie . , Add to the total ) . * Returns false if someone has a hard lock or time runs out . * @ return true if successful , false is lock failed . * @ exception DBException FILE _ NOT _ OPEN * @ exception DBException INVALID _ RECORD - Record not current . * NOTE : For a remote table it is not necessary to call edit , as edit will * be called automatically by the set ( ) call . */ public int edit ( ) throws DBException { } }
if ( ( this . getRecord ( ) . getEditMode ( ) != Constants . EDIT_CURRENT ) && ( this . getRecord ( ) . getEditMode ( ) != Constants . EDIT_IN_PROGRESS ) ) throw new DBException ( Constants . INVALID_RECORD ) ; int iErrorCode = this . doEdit ( ) ; // Only call if edit is supported by remote db if ( iErrorCode == Constants . NORMAL_RETURN ) this . getRecord ( ) . setEditMode ( Constants . EDIT_IN_PROGRESS ) ; return iErrorCode ;
public class GitlabAPI { /** * Creates a new label . * @ param projectId The ID of the project containing the new label . * @ param name The name of the label . * @ param color The color of the label ( eg # ff0000 ) . * @ return The newly created label . * @ throws IOException on gitlab api call error */ public GitlabLabel createLabel ( Serializable projectId , String name , String color ) throws IOException { } }
String tailUrl = GitlabProject . URL + "/" + sanitizeProjectId ( projectId ) + GitlabLabel . URL ; return dispatch ( ) . with ( "name" , name ) . with ( "color" , color ) . to ( tailUrl , GitlabLabel . class ) ;
public class ForwardingRuleClient { /** * Retrieves a list of ForwardingRule resources available to the specified project and region . * < p > Sample code : * < pre > < code > * try ( ForwardingRuleClient forwardingRuleClient = ForwardingRuleClient . create ( ) ) { * ProjectRegionName region = ProjectRegionName . of ( " [ PROJECT ] " , " [ REGION ] " ) ; * for ( ForwardingRule element : forwardingRuleClient . listForwardingRules ( region . toString ( ) ) . iterateAll ( ) ) { * / / doThingsWith ( element ) ; * < / code > < / pre > * @ param region Name of the region scoping this request . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi public final ListForwardingRulesPagedResponse listForwardingRules ( String region ) { } }
ListForwardingRulesHttpRequest request = ListForwardingRulesHttpRequest . newBuilder ( ) . setRegion ( region ) . build ( ) ; return listForwardingRules ( request ) ;
public class HomographyTotalLeastSquares { /** * Computes P * P _ plus * X * P . Takes in account the size of each matrix and does the multiplication in an order * to minimize memory requirements . A naive implementation requires a temporary array of NxN * @ param X A diagonal matrix */ static void computePPXP ( DMatrixRMaj P , DMatrixRMaj P_plus , DMatrixRMaj X , int offsetX , DMatrixRMaj output ) { } }
final int N = P . numRows ; output . reshape ( N , 2 ) ; // diag ( X ) * P < - - N x 2 for ( int i = 0 , index = 0 ; i < N ; i ++ , index += 2 ) { double x = - X . data [ index + offsetX ] ; output . data [ index ] = x * P . data [ index ] ; output . data [ index + 1 ] = x * P . data [ index + 1 ] ; } // A = P _ plus * ( diag ( x ) * P ) < - - 2 x 2 double a00 = 0 , a01 = 0 , a10 = 0 , a11 = 0 ; for ( int i = 0 , index = 0 ; i < N ; i ++ , index += 2 ) { a00 += P_plus . data [ i ] * output . data [ index ] ; a01 += P_plus . data [ i ] * output . data [ index + 1 ] ; a10 += P_plus . data [ i + N ] * output . data [ index ] ; a11 += P_plus . data [ i + N ] * output . data [ index + 1 ] ; } // P * A < - - N x 2 for ( int i = 0 , index = 0 ; i < N ; i ++ , index += 2 ) { output . data [ index ] = P . data [ index ] * a00 + P . data [ index + 1 ] * a10 ; output . data [ index + 1 ] = P . data [ index ] * a01 + P . data [ index + 1 ] * a11 ; }
public class HeliosClient { /** * Returns a list of all hosts registered in the Helios cluster which match the given list of * host * selectors . * For example , { @ code listHosts ( Arrays . asList ( " site = foo " ) ) } will return all agents in the * cluster whose labels match the expression { @ code site = foo } . < / p > */ public ListenableFuture < List < String > > listHosts ( final Set < String > unparsedHostSelectors ) { } }
final Multimap < String , String > query = HashMultimap . create ( ) ; query . putAll ( "selector" , unparsedHostSelectors ) ; return listHosts ( query ) ;
public class PasswordHashGenerator { /** * generate salt value by using given string . * salt was generated as following format * String format of current time + given string + hostname */ public static byte [ ] generateSalt ( String saltString ) { } }
byte [ ] output = null ; if ( saltString == null || saltString . length ( ) < 1 ) { // use randomly generated value output = new byte [ SEED_LENGTH ] ; SecureRandom rand = new SecureRandom ( ) ; rand . setSeed ( rand . generateSeed ( SEED_LENGTH ) ) ; rand . nextBytes ( output ) ; } else { try { output = saltString . getBytes ( "UTF-8" ) ; } catch ( UnsupportedEncodingException e ) { // fall back to default encoding . since the value won ' t be converted to the string , this is not an issue . output = saltString . getBytes ( ) ; } } return output ;
public class VirtualNetworkGatewaysInner { /** * The Get VpnclientIpsecParameters operation retrieves information about the vpnclient ipsec policy for P2S client of virtual network gateway in the specified resource group through Network resource provider . * @ param resourceGroupName The name of the resource group . * @ param virtualNetworkGatewayName The virtual network gateway name . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < VpnClientIPsecParametersInner > beginGetVpnclientIpsecParametersAsync ( String resourceGroupName , String virtualNetworkGatewayName , final ServiceCallback < VpnClientIPsecParametersInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( beginGetVpnclientIpsecParametersWithServiceResponseAsync ( resourceGroupName , virtualNetworkGatewayName ) , serviceCallback ) ;
public class Consumers { /** * Yields the first element of the array . * @ param < E > the element type parameter * @ param array the array to be searched * @ throws IllegalArgumentException if no element matches * @ return the found element */ public static < E > E first ( E [ ] array ) { } }
return new FirstElement < E > ( ) . apply ( new ArrayIterator < > ( array ) ) ;
public class ReadabilityStatistics { /** * Returns total syllable count for text . * @ param strText Text to be measured * @ return */ private static int totalSyllables ( String strText ) { } }
int intSyllableCount = 0 ; List < String > arrWords = ( new WhitespaceTokenizer ( ) ) . tokenize ( strText ) ; int intWordCount = arrWords . size ( ) ; for ( int i = 0 ; i < intWordCount ; ++ i ) { intSyllableCount += syllableCount ( arrWords . get ( i ) ) ; } return intSyllableCount ;
public class Problem { /** * Custom readObject method that registers this object as a deserialization validator . * @ param stream { @ link ObjectInputStream } to register this validator to . * @ throws OptionalDataException If any error occurs while reading the object . * @ throws ClassNotFoundException If the default readObject call can not find a required class . * @ throws IOException If any error occurs while reading the object . */ private void readObject ( final ObjectInputStream stream ) throws OptionalDataException , ClassNotFoundException , IOException { } }
stream . registerValidation ( this , 0 ) ; stream . defaultReadObject ( ) ;
public class GeneratingExpression { /** * As the regular expression was distributed in separate node , every node has its own range . * This method returns an array containing all range objects . * @ return the separate range objects for all sub - nodes of this expression */ public Range [ ] getRanges ( ) { } }
Range [ ] ranges = new Range [ nodes . size ( ) ] ; for ( int i = 0 ; i < ranges . length ; i ++ ) { ranges [ i ] = nodes . get ( i ) . getRange ( ) ; } return ranges ;