signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class StatusBenchmark { /** * Javadoc comment . */ @ Benchmark @ BenchmarkMode ( Mode . SampleTime ) @ OutputTimeUnit ( TimeUnit . NANOSECONDS ) public byte [ ] messageEncodePlain ( ) { } }
return Status . MESSAGE_KEY . toBytes ( "Unexpected RST in stream" ) ;
public class VTProd { /** * Forward pass : p ( y ) = \ prod _ a \ psi ( y _ a ) where y _ a is a { @ link VarTensor } and we use variable - tensor product . */ @ Override public VarTensor forward ( ) { } }
MVecArray < VarTensor > xs = modIn . getOutput ( ) ; y = new VarTensor ( s , new VarSet ( ) , s . one ( ) ) ; for ( int a = 0 ; a < xs . dim ( ) ; a ++ ) { VarTensor x = xs . get ( a ) ; y . prod ( x ) ; } return y ;
public class URLEncodedUtils { /** * Returns a list of { @ link NameValuePair NameValuePairs } as deserialized from the given string * using the given character encoding . * @ param s * text to parse . * @ param charset * Encoding to use when decoding the parameters . * @ since 4.2 */ public static List < NameValuePair > parse ( final String s , final Charset charset ) { } }
if ( s == null ) { return Collections . emptyList ( ) ; } BasicHeaderValueParser deserializer = BasicHeaderValueParser . DEFAULT ; CharArrayBuffer buffer = new CharArrayBuffer ( s . length ( ) ) ; buffer . append ( s ) ; ParserCursor cursor = new ParserCursor ( 0 , buffer . length ( ) ) ; List < NameValuePair > list = new ArrayList < NameValuePair > ( ) ; while ( ! cursor . atEnd ( ) ) { NameValuePair nvp = deserializer . parseNameValuePair ( buffer , cursor , DELIM ) ; if ( nvp . getName ( ) . length ( ) > 0 ) { list . add ( new BasicNameValuePair ( decodeFormFields ( nvp . getName ( ) , charset ) , decodeFormFields ( nvp . getValue ( ) , charset ) ) ) ; } } return list ;
public class TypeConversion { /** * Writes a long to the byte array in the Varint format . * @ param v - long value to write to the bytes buffer in the Varint format * @ param bytes - byte buffer to write to - must contain enough space for maximum * length which is 10 bytes . * @ param offset - the offset within the bytes buffer to start writing * @ return - returns the number of bytes used from the bytes buffer */ public static int writeLongAsVarIntBytes ( long v , byte [ ] bytes , int offest ) { } }
int pos = offest ; while ( true ) { if ( ( v & ~ 0x7FL ) == 0 ) { bytes [ pos ++ ] = ( ( byte ) v ) ; return pos ; } else { bytes [ pos ++ ] = ( byte ) ( ( v & 0x7F ) | 0x80 ) ; v >>>= 7 ; } }
public class SrvGraphicClass { /** * Utils : */ public void evalHeightClass ( DRI di , SD ds , CL ge ) { } }
double heightAttrOper = getHeightAttributesBox ( ge ) + getHeightOperationsBox ( ge ) ; double height = getSettingsGraphic ( ) . getHeightHeadClass ( ) + heightAttrOper ; if ( height > ge . getHeight ( ) || ( ge . getIsAdjustMinimumSize ( ) && height < ge . getHeight ( ) ) || heightAttrOper > 0 && height != ge . getHeight ( ) ) { ge . setHeight ( height ) ; }
public class PercentNumberFormatTextValue { /** * Check string . * @ param value * the value * @ return the integer */ private Integer checkString ( final String value ) { } }
Integer val = 50 ; if ( value != null && ! value . isEmpty ( ) ) { if ( value . endsWith ( "%" ) ) { final String sVal = value . substring ( 0 , value . length ( ) - 1 ) ; if ( StringUtils . isNumeric ( sVal ) ) { val = Integer . valueOf ( sVal ) ; } } else { if ( StringUtils . isNumeric ( value ) ) { val = Integer . valueOf ( value ) ; } } } return val ;
public class Interaction { /** * Get transaction * @ return The value */ public String getTransaction ( ) { } }
if ( transaction == null ) { TraceEvent transactionEvent = TraceEventHelper . getType ( events , TraceEvent . ENLIST_CONNECTION_LISTENER , TraceEvent . ENLIST_CONNECTION_LISTENER_FAILED , TraceEvent . ENLIST_INTERLEAVING_CONNECTION_LISTENER , TraceEvent . ENLIST_INTERLEAVING_CONNECTION_LISTENER_FAILED ) ; if ( transactionEvent != null ) { transaction = transactionEvent . getPayload1 ( ) ; } else { transaction = "" ; } } if ( ! "" . equals ( transaction ) ) return transaction ; return null ;
public class MetadataProviderImpl { /** * Get or create a { @ link RelationTypeMetadata } instance for the given * { @ link AnnotatedType } . * @ param annotatedType * The { @ link AnnotatedType } . * @ param superTypes * The metadata collection of the super types . * @ param methodMetadataOfType * The method metadata of the type . * @ return The { @ link RelationTypeMetadata } instance representing the annotated * type . */ private RelationTypeMetadata < RelationMetadata > createRelationTypeMetadata ( AnnotatedType annotatedType , List < TypeMetadata > superTypes , Collection < MethodMetadata < ? , ? > > methodMetadataOfType ) { } }
Class < ? > fromType = null ; Class < ? > toType = null ; Collection < MethodMetadata < ? , ? > > current = methodMetadataOfType ; Queue < TypeMetadata > queue = new LinkedList < > ( superTypes ) ; // Starting from the type to be created search all its properties and // those of its super types for reference properties defining the from // and to entity types do { for ( MethodMetadata < ? , ? > methodMetadata : current ) { if ( methodMetadata instanceof EntityReferencePropertyMethodMetadata ) { EntityReferencePropertyMethodMetadata < ? > propertyMethodMetadata = ( EntityReferencePropertyMethodMetadata < ? > ) methodMetadata ; Class < ? > type = propertyMethodMetadata . getAnnotatedMethod ( ) . getType ( ) ; switch ( propertyMethodMetadata . getDirection ( ) ) { case FROM : fromType = type ; break ; case TO : toType = type ; break ; default : throw propertyMethodMetadata . getDirection ( ) . createNotSupportedException ( ) ; } } } TypeMetadata superType = queue . poll ( ) ; if ( superType != null ) { queue . addAll ( superType . getSuperTypes ( ) ) ; current = superType . getProperties ( ) ; } else { current = null ; } } while ( current != null && ( fromType == null || toType == null ) ) ; if ( fromType == null || toType == null ) { throw new XOException ( "Relation type '" + annotatedType . getAnnotatedElement ( ) . getName ( ) + "' does not define target entity properties for both directions." ) ; } RelationMetadata relationMetadata = metadataFactory . createRelationMetadata ( annotatedType , metadataByType ) ; RelationTypeMetadata < RelationMetadata > relationTypeMetadata = new RelationTypeMetadata < > ( annotatedType , superTypes , methodMetadataOfType , fromType , toType , relationMetadata ) ; metadataByType . put ( annotatedType . getAnnotatedElement ( ) , relationTypeMetadata ) ; return relationTypeMetadata ;
public class CollectionUtils { /** * Converts the specified { @ link Collection collection } to a { @ link JSONArray JSON array } . * @ param < T > the type of elements maintained by the specified collection * @ param collection the specified collection * @ return a { @ link JSONArray JSON array } */ public static < T > JSONArray toJSONArray ( final Collection < T > collection ) { } }
final JSONArray ret = new JSONArray ( ) ; if ( null == collection ) { return ret ; } for ( final T object : collection ) { ret . put ( object ) ; } return ret ;
public class KidnummerValidator { /** * Return true if the provided String is a valid KID - nummmer . * @ param kidnummer A String containing a Kidnummer * @ return true or false */ public static boolean isValid ( String kidnummer ) { } }
try { KidnummerValidator . getKidnummer ( kidnummer ) ; return true ; } catch ( IllegalArgumentException e ) { return false ; }
public class Configuration { /** * Use { @ link # balancingStrategyFactory ( ) } instead . * @ deprecated since 9.3 */ @ Deprecated public org . infinispan . client . hotrod . impl . transport . tcp . FailoverRequestBalancingStrategy balancingStrategy ( ) { } }
FailoverRequestBalancingStrategy strategy = balancingStrategyFactory . get ( ) ; if ( org . infinispan . client . hotrod . impl . transport . tcp . FailoverRequestBalancingStrategy . class . isInstance ( strategy ) ) { return ( org . infinispan . client . hotrod . impl . transport . tcp . FailoverRequestBalancingStrategy ) strategy ; } else { return null ; }
public class NativeDataView { /** * # string _ id _ map # */ @ Override protected int findPrototypeId ( String s ) { } }
int id ; // # generated # Last update : 2014-12-08 17:26:24 PST L0 : { id = 0 ; String X = null ; int c ; L : switch ( s . length ( ) ) { case 7 : c = s . charAt ( 0 ) ; if ( c == 'g' ) { X = "getInt8" ; id = Id_getInt8 ; } else if ( c == 's' ) { X = "setInt8" ; id = Id_setInt8 ; } break L ; case 8 : c = s . charAt ( 6 ) ; if ( c == '1' ) { c = s . charAt ( 0 ) ; if ( c == 'g' ) { X = "getInt16" ; id = Id_getInt16 ; } else if ( c == 's' ) { X = "setInt16" ; id = Id_setInt16 ; } } else if ( c == '3' ) { c = s . charAt ( 0 ) ; if ( c == 'g' ) { X = "getInt32" ; id = Id_getInt32 ; } else if ( c == 's' ) { X = "setInt32" ; id = Id_setInt32 ; } } else if ( c == 't' ) { c = s . charAt ( 0 ) ; if ( c == 'g' ) { X = "getUint8" ; id = Id_getUint8 ; } else if ( c == 's' ) { X = "setUint8" ; id = Id_setUint8 ; } } break L ; case 9 : c = s . charAt ( 0 ) ; if ( c == 'g' ) { c = s . charAt ( 8 ) ; if ( c == '2' ) { X = "getUint32" ; id = Id_getUint32 ; } else if ( c == '6' ) { X = "getUint16" ; id = Id_getUint16 ; } } else if ( c == 's' ) { c = s . charAt ( 8 ) ; if ( c == '2' ) { X = "setUint32" ; id = Id_setUint32 ; } else if ( c == '6' ) { X = "setUint16" ; id = Id_setUint16 ; } } break L ; case 10 : c = s . charAt ( 0 ) ; if ( c == 'g' ) { c = s . charAt ( 9 ) ; if ( c == '2' ) { X = "getFloat32" ; id = Id_getFloat32 ; } else if ( c == '4' ) { X = "getFloat64" ; id = Id_getFloat64 ; } } else if ( c == 's' ) { c = s . charAt ( 9 ) ; if ( c == '2' ) { X = "setFloat32" ; id = Id_setFloat32 ; } else if ( c == '4' ) { X = "setFloat64" ; id = Id_setFloat64 ; } } break L ; case 11 : X = "constructor" ; id = Id_constructor ; break L ; } if ( X != null && X != s && ! X . equals ( s ) ) id = 0 ; break L0 ; } // # / generated # return id ;
public class BloomFilter { /** * Creates a { @ link BloomFilter } with the expected number of insertions and expected false * positive probability . * Note that overflowing a { @ code BloomFilter } with significantly more elements than specified , * will result in its saturation , and a sharp deterioration of its false positive probability . */ public static BloomFilter create ( long expectedNumItems , double fpp ) { } }
if ( fpp <= 0D || fpp >= 1D ) { throw new IllegalArgumentException ( "False positive probability must be within range (0.0, 1.0)" ) ; } return create ( expectedNumItems , optimalNumOfBits ( expectedNumItems , fpp ) ) ;
public class InternalSARLParser { /** * InternalSARL . g : 9319:1 : entryRuleParameterizedTypeReferenceWithTypeArgs returns [ EObject current = null ] : iv _ ruleParameterizedTypeReferenceWithTypeArgs = ruleParameterizedTypeReferenceWithTypeArgs EOF ; */ public final EObject entryRuleParameterizedTypeReferenceWithTypeArgs ( ) throws RecognitionException { } }
EObject current = null ; EObject iv_ruleParameterizedTypeReferenceWithTypeArgs = null ; try { // InternalSARL . g : 9319:79 : ( iv _ ruleParameterizedTypeReferenceWithTypeArgs = ruleParameterizedTypeReferenceWithTypeArgs EOF ) // InternalSARL . g : 9320:2 : iv _ ruleParameterizedTypeReferenceWithTypeArgs = ruleParameterizedTypeReferenceWithTypeArgs EOF { if ( state . backtracking == 0 ) { newCompositeNode ( grammarAccess . getParameterizedTypeReferenceWithTypeArgsRule ( ) ) ; } pushFollow ( FOLLOW_1 ) ; iv_ruleParameterizedTypeReferenceWithTypeArgs = ruleParameterizedTypeReferenceWithTypeArgs ( ) ; state . _fsp -- ; if ( state . failed ) return current ; if ( state . backtracking == 0 ) { current = iv_ruleParameterizedTypeReferenceWithTypeArgs ; } match ( input , EOF , FOLLOW_2 ) ; if ( state . failed ) return current ; } } catch ( RecognitionException re ) { recover ( input , re ) ; appendSkippedTokens ( ) ; } finally { } return current ;
public class Ranges { /** * Return true if the specified range is strictly less than the specified value . * @ param < C > range endpoint type * @ param range range , must not be null * @ param value value , must not be null * @ return true if the specified range is strictly less than the specified value */ public static < C extends Comparable > boolean isLessThan ( final Range < C > range , final C value ) { } }
checkNotNull ( range ) ; checkNotNull ( value ) ; if ( ! range . hasUpperBound ( ) ) { return false ; } if ( range . upperBoundType ( ) == BoundType . OPEN && range . upperEndpoint ( ) . equals ( value ) ) { return true ; } return range . upperEndpoint ( ) . compareTo ( value ) < 0 ;
public class AudioSelectorMarshaller { /** * Marshall the given parameter object . */ public void marshall ( AudioSelector audioSelector , ProtocolMarshaller protocolMarshaller ) { } }
if ( audioSelector == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( audioSelector . getCustomLanguageCode ( ) , CUSTOMLANGUAGECODE_BINDING ) ; protocolMarshaller . marshall ( audioSelector . getDefaultSelection ( ) , DEFAULTSELECTION_BINDING ) ; protocolMarshaller . marshall ( audioSelector . getExternalAudioFileInput ( ) , EXTERNALAUDIOFILEINPUT_BINDING ) ; protocolMarshaller . marshall ( audioSelector . getLanguageCode ( ) , LANGUAGECODE_BINDING ) ; protocolMarshaller . marshall ( audioSelector . getOffset ( ) , OFFSET_BINDING ) ; protocolMarshaller . marshall ( audioSelector . getPids ( ) , PIDS_BINDING ) ; protocolMarshaller . marshall ( audioSelector . getProgramSelection ( ) , PROGRAMSELECTION_BINDING ) ; protocolMarshaller . marshall ( audioSelector . getRemixSettings ( ) , REMIXSETTINGS_BINDING ) ; protocolMarshaller . marshall ( audioSelector . getSelectorType ( ) , SELECTORTYPE_BINDING ) ; protocolMarshaller . marshall ( audioSelector . getTracks ( ) , TRACKS_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class ScreenshotUtils { /** * Writes a { @ link Bitmap } object to a file in the current context to the given directory . * @ param bitmap The bitmap object to output . * @ param filename The name of the file to output . * @ param dirFile The directory { @ link File } into which to write the bitmap . * @ return A file where the Bitmap was stored , or { @ code null } if the write * operation failed . */ private static File writeBitmapToDirectory ( Bitmap bitmap , String filename , File dirFile ) { } }
if ( ! dirFile . exists ( ) && ! dirFile . mkdirs ( ) ) { LogUtils . log ( ScreenshotUtils . class , Log . WARN , "Directory %s does not exist and could not be created." , dirFile . getAbsolutePath ( ) ) ; return null ; } final File outFile = new File ( dirFile , filename ) ; if ( outFile . exists ( ) ) { LogUtils . log ( ScreenshotUtils . class , Log . WARN , "Tried to write a bitmap to a file that already exists." ) ; return null ; } FileOutputStream outStream = null ; try { outStream = new FileOutputStream ( outFile ) ; final boolean compressSuccess = bitmap . compress ( CompressFormat . PNG , 0 /* quality , ignored for PNG */ , outStream ) ; if ( compressSuccess ) { LogUtils . log ( ScreenshotUtils . class , Log . VERBOSE , "Wrote bitmap to %s." , outFile . getAbsolutePath ( ) ) ; return outFile ; } else { LogUtils . log ( ScreenshotUtils . class , Log . WARN , "Bitmap failed to compress to file %s." , outFile . getAbsolutePath ( ) ) ; return null ; } } catch ( IOException e ) { LogUtils . log ( ScreenshotUtils . class , Log . WARN , "Could not output bitmap file to %s." , outFile . getAbsolutePath ( ) ) ; return null ; } finally { if ( outStream != null ) { try { outStream . close ( ) ; } catch ( IOException e ) { LogUtils . log ( ScreenshotUtils . class , Log . WARN , "Unable to close resource." ) ; } } }
public class HeaderFooterRecyclerAdapter { /** * Notifies that multiple footer items are removed . * @ param positionStart the position . * @ param itemCount the item count . */ public final void notifyFooterItemRangeRemoved ( int positionStart , int itemCount ) { } }
if ( positionStart < 0 || itemCount < 0 || positionStart + itemCount > footerItemCount ) { throw new IndexOutOfBoundsException ( "The given range [" + positionStart + " - " + ( positionStart + itemCount - 1 ) + "] is not within the position bounds for footer items [0 - " + ( footerItemCount - 1 ) + "]." ) ; } notifyItemRangeRemoved ( positionStart + headerItemCount + contentItemCount , itemCount ) ;
public class MediaApi { /** * Pull an Interaction from a Workbin * @ param mediatype The media channel . ( required ) * @ param id The ID of the interaction . ( required ) * @ param pullInteractionFromWorkbinData ( optional ) * @ return ApiSuccessResponse * @ throws ApiException If fail to call the API , e . g . server error or cannot deserialize the response body */ public ApiSuccessResponse pullInteractionFromWorkbin ( String mediatype , String id , PullInteractionFromWorkbinData pullInteractionFromWorkbinData ) throws ApiException { } }
ApiResponse < ApiSuccessResponse > resp = pullInteractionFromWorkbinWithHttpInfo ( mediatype , id , pullInteractionFromWorkbinData ) ; return resp . getData ( ) ;
public class HLL { /** * Deserializes the HLL ( in { @ link # toBytes ( ISchemaVersion ) } format ) serialized * into < code > bytes < / code > . < p / > * @ param bytes the serialized bytes of new HLL * @ return the deserialized HLL . This will never be < code > null < / code > . * @ see # toBytes ( ISchemaVersion ) */ public static HLL fromBytes ( final byte [ ] bytes ) { } }
final ISchemaVersion schemaVersion = SerializationUtil . getSchemaVersion ( bytes ) ; final IHLLMetadata metadata = schemaVersion . readMetadata ( bytes ) ; final HLLType type = metadata . HLLType ( ) ; final int regwidth = metadata . registerWidth ( ) ; final int log2m = metadata . registerCountLog2 ( ) ; final boolean sparseon = metadata . sparseEnabled ( ) ; final int expthresh ; if ( metadata . explicitAuto ( ) ) { expthresh = - 1 ; } else if ( metadata . explicitOff ( ) ) { expthresh = 0 ; } else { // NOTE : take into account that the postgres - compatible constructor // subtracts one before taking a power of two . expthresh = metadata . log2ExplicitCutoff ( ) + 1 ; } final HLL hll = new HLL ( log2m , regwidth , expthresh , sparseon , type ) ; // Short - circuit on empty , which needs no other deserialization . if ( HLLType . EMPTY . equals ( type ) ) { return hll ; } final int wordLength ; switch ( type ) { case EXPLICIT : wordLength = Long . SIZE ; break ; case SPARSE : wordLength = hll . shortWordLength ; break ; case FULL : wordLength = hll . regwidth ; break ; default : throw new RuntimeException ( "Unsupported HLL type " + type ) ; } final IWordDeserializer deserializer = schemaVersion . getDeserializer ( type , wordLength , bytes ) ; switch ( type ) { case EXPLICIT : // NOTE : This should not exceed expthresh and this will always // be exactly the number of words that were encoded , // because the word length is at least a byte wide . // SEE : IWordDeserializer # totalWordCount ( ) for ( int i = 0 ; i < deserializer . totalWordCount ( ) ; i ++ ) { hll . explicitStorage . add ( deserializer . readWord ( ) ) ; } break ; case SPARSE : // NOTE : If the shortWordLength were smaller than 8 bits // (1 byte ) there would be a possibility ( because of // padding arithmetic ) of having one or more extra // registers read . However , this is not relevant as the // extra registers will be all zeroes , which are ignored // in the sparse representation . for ( int i = 0 ; i < deserializer . totalWordCount ( ) ; i ++ ) { final long shortWord = deserializer . readWord ( ) ; final byte registerValue = ( byte ) ( shortWord & hll . valueMask ) ; // Only set non - zero registers . if ( registerValue != 0 ) { hll . sparseProbabilisticStorage . put ( ( int ) ( shortWord >>> hll . regwidth ) , registerValue ) ; } } break ; case FULL : // NOTE : Iteration is done using m ( register count ) and NOT // deserializer # totalWordCount ( ) because regwidth may be // less than 8 and as such the padding on the ' last ' byte // may be larger than regwidth , causing an extra register // to be read . // SEE : IWordDeserializer # totalWordCount ( ) for ( long i = 0 ; i < hll . m ; i ++ ) { hll . probabilisticStorage . setRegister ( i , deserializer . readWord ( ) ) ; } break ; default : throw new RuntimeException ( "Unsupported HLL type " + type ) ; } return hll ;
public class CatalinaSipListenersHolder { /** * / * ( non - Javadoc ) * @ see org . mobicents . servlet . sip . core . session . SipListenersHolder # loadListeners ( java . lang . String [ ] , java . lang . ClassLoader ) */ @ Override public boolean loadListeners ( String [ ] listeners , ClassLoader classLoader ) { } }
// Instantiate all the listeners for ( String className : listeners ) { try { Class listenerClass = Class . forName ( className , false , classLoader ) ; EventListener listener = ( EventListener ) listenerClass . newInstance ( ) ; // FIXME ! ! ! SipInstanceManager sipInstanceManager = ( ( CatalinaSipContext ) sipContext ) . getSipInstanceManager ( ) ; // FIXME ! ! sipInstanceManager . processAnnotations ( listener , sipInstanceManager . getInjectionMap ( listenerClass . getName ( ) ) ) ; MobicentsSipServlet sipServletImpl = ( MobicentsSipServlet ) sipContext . findSipServletByClassName ( className ) ; if ( sipServletImpl != null ) { listener = ( EventListener ) sipServletImpl . allocate ( ) ; listenerServlets . put ( listener , sipServletImpl ) ; } else { SipServlet servlet = ( SipServlet ) listenerClass . getAnnotation ( SipServlet . class ) ; if ( servlet != null ) { sipServletImpl = ( MobicentsSipServlet ) sipContext . findSipServletByName ( servlet . name ( ) ) ; if ( sipServletImpl != null ) { listener = ( EventListener ) sipServletImpl . allocate ( ) ; listenerServlets . put ( listener , sipServletImpl ) ; } } } addListenerToBunch ( listener ) ; } catch ( Exception e ) { logger . fatal ( "Cannot instantiate listener class " + className , e ) ; return false ; } } return true ;
public class BackgroundElementRastered { /** * Get raster surface from its id . * @ param id The raster id . * @ return The raster surface . */ public Sprite getRaster ( int id ) { } }
return rasters . get ( UtilMath . clamp ( id , 0 , rasters . size ( ) - 1 ) ) ;
public class DbUserQueryImpl { /** * results / / / / / */ public long executeCount ( CommandContext commandContext ) { } }
checkQueryOk ( ) ; final DbReadOnlyIdentityServiceProvider identityProvider = getIdentityProvider ( commandContext ) ; return identityProvider . findUserCountByQueryCriteria ( this ) ;
public class ServicesInner { /** * Delete DMS Service Instance . * The services resource is the top - level resource that represents the Data Migration Service . The DELETE method deletes a service . Any running tasks will be canceled . * @ param groupName Name of the resource group * @ param serviceName Name of the service * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < Void > beginDeleteAsync ( String groupName , String serviceName , final ServiceCallback < Void > serviceCallback ) { } }
return ServiceFuture . fromResponse ( beginDeleteWithServiceResponseAsync ( groupName , serviceName ) , serviceCallback ) ;
public class ExpressionUtil { /** * Evaluate / reduce / simplify an input expression at the compilation time * @ param expr Original Expression * @ return AbstractExpression */ public static AbstractExpression evaluateExpression ( AbstractExpression expr ) { } }
if ( expr == null ) { return null ; } // Evaluate children first expr . setLeft ( evaluateExpression ( expr . getLeft ( ) ) ) ; expr . setRight ( evaluateExpression ( expr . getRight ( ) ) ) ; // Evaluate self if ( ExpressionType . CONJUNCTION_AND == expr . getExpressionType ( ) ) { if ( ExpressionType . VALUE_CONSTANT == expr . getLeft ( ) . getExpressionType ( ) ) { if ( ConstantValueExpression . isBooleanTrue ( expr . getLeft ( ) ) ) { return expr . getRight ( ) ; } else { return expr . getLeft ( ) ; } } if ( ExpressionType . VALUE_CONSTANT == expr . getRight ( ) . getExpressionType ( ) ) { if ( ConstantValueExpression . isBooleanTrue ( expr . getRight ( ) ) ) { return expr . getLeft ( ) ; } else { return expr . getRight ( ) ; } } } else if ( ExpressionType . CONJUNCTION_OR == expr . getExpressionType ( ) ) { if ( ExpressionType . VALUE_CONSTANT == expr . getLeft ( ) . getExpressionType ( ) ) { if ( ConstantValueExpression . isBooleanTrue ( expr . getLeft ( ) ) ) { return expr . getLeft ( ) ; } else { return expr . getRight ( ) ; } } if ( ExpressionType . VALUE_CONSTANT == expr . getRight ( ) . getExpressionType ( ) ) { if ( ConstantValueExpression . isBooleanTrue ( expr . getRight ( ) ) ) { return expr . getRight ( ) ; } else { return expr . getLeft ( ) ; } } } else if ( ExpressionType . OPERATOR_NOT == expr . getExpressionType ( ) ) { AbstractExpression leftExpr = expr . getLeft ( ) ; // function expressions can also return boolean . So the left child expression // can be expression which are not constant value expressions , so don ' t // evaluate every left child expr as constant value expression if ( ( VoltType . BOOLEAN == leftExpr . getValueType ( ) ) && ( leftExpr instanceof ConstantValueExpression ) ) { if ( ConstantValueExpression . isBooleanTrue ( leftExpr ) ) { return ConstantValueExpression . getFalse ( ) ; } else { return ConstantValueExpression . getTrue ( ) ; } } else if ( ExpressionType . OPERATOR_NOT == leftExpr . getExpressionType ( ) ) { return leftExpr . getLeft ( ) ; } else if ( ExpressionType . CONJUNCTION_OR == leftExpr . getExpressionType ( ) ) { // NOT ( . . OR . . OR . . ) = > NOT ( . . ) AND NOT ( . . ) AND NOT ( . . ) AbstractExpression l = new OperatorExpression ( ExpressionType . OPERATOR_NOT , leftExpr . getLeft ( ) , null ) ; AbstractExpression r = new OperatorExpression ( ExpressionType . OPERATOR_NOT , leftExpr . getRight ( ) , null ) ; leftExpr = new OperatorExpression ( ExpressionType . CONJUNCTION_AND , l , r ) ; return evaluateExpression ( leftExpr ) ; } // NOT ( expr1 AND expr2 ) = > ( NOT expr1 ) | | ( NOT expr2) // The above case is probably not interesting to do for short circuit purpose } return expr ;
public class FastAdapter { /** * register a new type into the TypeInstances to be able to efficiently create thew ViewHolders * @ param item an IItem which will be shown in the list */ @ SuppressWarnings ( "unchecked" ) public void registerTypeInstance ( Item item ) { } }
if ( getTypeInstanceCache ( ) . register ( item ) ) { // check if the item implements hookable when its added for the first time if ( item instanceof IHookable ) { withEventHooks ( ( ( IHookable < Item > ) item ) . getEventHooks ( ) ) ; } }
public class Query { /** * @ see org . eclipse . datatools . connectivity . oda . IQuery # setProperty ( java . lang . String , java . lang . String ) */ public void setProperty ( String name , String value ) throws OdaException { } }
// do nothing ; assumes no data set query property properties . put ( name , value ) ;
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public EEnum getIfcProjectOrderTypeEnum ( ) { } }
if ( ifcProjectOrderTypeEnumEEnum == null ) { ifcProjectOrderTypeEnumEEnum = ( EEnum ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 1038 ) ; } return ifcProjectOrderTypeEnumEEnum ;
public class Jenkins { /** * Called to shut down the system . */ @ edu . umd . cs . findbugs . annotations . SuppressFBWarnings ( "ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD" ) public void cleanUp ( ) { } }
if ( theInstance != this && theInstance != null ) { LOGGER . log ( Level . WARNING , "This instance is no longer the singleton, ignoring cleanUp()" ) ; return ; } synchronized ( Jenkins . class ) { if ( cleanUpStarted ) { LOGGER . log ( Level . WARNING , "Jenkins.cleanUp() already started, ignoring repeated cleanUp()" ) ; return ; } cleanUpStarted = true ; } try { LOGGER . log ( Level . INFO , "Stopping Jenkins" ) ; final List < Throwable > errors = new ArrayList < > ( ) ; fireBeforeShutdown ( errors ) ; _cleanUpRunTerminators ( errors ) ; terminating = true ; final Set < Future < ? > > pending = _cleanUpDisconnectComputers ( errors ) ; _cleanUpShutdownUDPBroadcast ( errors ) ; _cleanUpCloseDNSMulticast ( errors ) ; _cleanUpInterruptReloadThread ( errors ) ; _cleanUpShutdownTriggers ( errors ) ; _cleanUpShutdownTimer ( errors ) ; _cleanUpShutdownTcpSlaveAgent ( errors ) ; _cleanUpShutdownPluginManager ( errors ) ; _cleanUpPersistQueue ( errors ) ; _cleanUpShutdownThreadPoolForLoad ( errors ) ; _cleanUpAwaitDisconnects ( errors , pending ) ; _cleanUpPluginServletFilters ( errors ) ; _cleanUpReleaseAllLoggers ( errors ) ; LOGGER . log ( Level . INFO , "Jenkins stopped" ) ; if ( ! errors . isEmpty ( ) ) { StringBuilder message = new StringBuilder ( "Unexpected issues encountered during cleanUp: " ) ; Iterator < Throwable > iterator = errors . iterator ( ) ; message . append ( iterator . next ( ) . getMessage ( ) ) ; while ( iterator . hasNext ( ) ) { message . append ( "; " ) ; message . append ( iterator . next ( ) . getMessage ( ) ) ; } iterator = errors . iterator ( ) ; RuntimeException exception = new RuntimeException ( message . toString ( ) , iterator . next ( ) ) ; while ( iterator . hasNext ( ) ) { exception . addSuppressed ( iterator . next ( ) ) ; } throw exception ; } } finally { theInstance = null ; if ( JenkinsJVM . isJenkinsJVM ( ) ) { JenkinsJVMAccess . _setJenkinsJVM ( oldJenkinsJVM ) ; } ClassFilterImpl . unregister ( ) ; }
public class AroundInterceptor { /** * Handle exception caused by : * < ul > * < li > Target method code itself . < / li > * < li > Invocation of the original method . These exceptions won ' t be wrapped into { @ link java . lang . reflect . InvocationTargetException } . < / li > * < / ul > * The implementation is not allowed to throw a checked exception . Exceptional behavior should be expressed by * returning a result . * @ param proxy The proxied instance . * @ param method Intercepted method . * @ param args Array of arguments , primitive types are boxed . * @ param cause The original exception ( throwable ) . * @ param context * @ return The resulting exception to be thrown . */ public Throwable handleException ( T proxy , Method method , Object [ ] args , Throwable cause , Map < String , Object > context ) { } }
// Just return the cause return cause ;
public class DsCompositeModelDoc { /** * Create a DOM document from the original XML . */ private Document parseBytesToDocument ( String pid , byte [ ] bytes ) throws InvalidContentModelException { } }
try { DocumentBuilderFactory factory = DocumentBuilderFactory . newInstance ( ) ; factory . setNamespaceAware ( true ) ; DocumentBuilder builder = factory . newDocumentBuilder ( ) ; return builder . parse ( new ByteArrayInputStream ( bytes ) ) ; } catch ( ParserConfigurationException e ) { throw new InvalidContentModelException ( pid , "Failed to parse " + DS_COMPOSITE_MODEL , e ) ; } catch ( SAXException e ) { throw new InvalidContentModelException ( pid , "Failed to parse " + DS_COMPOSITE_MODEL , e ) ; } catch ( IOException e ) { throw new InvalidContentModelException ( pid , "Failed to parse " + DS_COMPOSITE_MODEL , e ) ; }
public class ActionDefinition { /** * syntactic sugar */ public ActionDefinition addAction ( ActionDefinition t ) { } }
if ( t == null ) return this ; if ( this . action == null ) this . action = new ArrayList < ActionDefinition > ( ) ; this . action . add ( t ) ; return this ;
public class BaseTransport { /** * { @ link ServerHttpExchange } is available . */ @ Override public < T > T unwrap ( Class < T > clazz ) { } }
return ServerHttpExchange . class . isAssignableFrom ( clazz ) ? clazz . cast ( http ) : null ;
public class OGNL { /** * 是否包含 forUpdate * @ param parameter * @ return */ public static boolean hasForUpdate ( Object parameter ) { } }
if ( parameter != null && parameter instanceof Example ) { Example example = ( Example ) parameter ; return example . isForUpdate ( ) ; } return false ;
public class WebAppTypeImpl { /** * Returns all < code > filter - mapping < / code > elements * @ return list of < code > filter - mapping < / code > */ public List < FilterMappingType < WebAppType < T > > > getAllFilterMapping ( ) { } }
List < FilterMappingType < WebAppType < T > > > list = new ArrayList < FilterMappingType < WebAppType < T > > > ( ) ; List < Node > nodeList = childNode . get ( "filter-mapping" ) ; for ( Node node : nodeList ) { FilterMappingType < WebAppType < T > > type = new FilterMappingTypeImpl < WebAppType < T > > ( this , "filter-mapping" , childNode , node ) ; list . add ( type ) ; } return list ;
public class ListTagsForResourceRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ListTagsForResourceRequest listTagsForResourceRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( listTagsForResourceRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( listTagsForResourceRequest . getResourceType ( ) , RESOURCETYPE_BINDING ) ; protocolMarshaller . marshall ( listTagsForResourceRequest . getResourceId ( ) , RESOURCEID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class DeleteUserPoolRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( DeleteUserPoolRequest deleteUserPoolRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( deleteUserPoolRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( deleteUserPoolRequest . getUserPoolId ( ) , USERPOOLID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class IntHashSet { /** * Alias for { @ link # removeAll ( Collection ) } for the specialized case when removing another IntHashSet , * avoids boxing and allocations * @ param coll containing the values to be removed . * @ return { @ code true } if this set changed as a result of the call */ public boolean removeAll ( final IntHashSet coll ) { } }
boolean acc = false ; for ( final int value : coll . values ) { if ( value != MISSING_VALUE ) { acc |= remove ( value ) ; } } if ( coll . containsMissingValue ) { acc |= remove ( MISSING_VALUE ) ; } return acc ;
public class LoggingMetricsConfigurator { /** * Add an appender to Logback logging framework that will track the types of log messages made . */ @ PostConstruct public final void addMetricsAppenderToLogback ( ) { } }
final LoggerContext factory = ( LoggerContext ) LoggerFactory . getILoggerFactory ( ) ; final Logger root = factory . getLogger ( Logger . ROOT_LOGGER_NAME ) ; final InstrumentedAppender metrics = new InstrumentedAppender ( this . metricRegistry ) ; metrics . setContext ( root . getLoggerContext ( ) ) ; metrics . start ( ) ; root . addAppender ( metrics ) ;
public class UrlUtil { /** * Gets the URL to a given { @ code File } . * @ param file * file to be converted to a URL * @ return an URL to the passed file * @ throws IllegalStateException * if no URL can be resolved to the given file */ public static URL toUrl ( @ Nonnull final File file ) { } }
Check . notNull ( file , "file" ) ; URL url = null ; try { url = file . toURI ( ) . toURL ( ) ; } catch ( final MalformedURLException e ) { throw new IllegalStateException ( "Can not construct an URL for passed file." , e ) ; } return url ;
public class MultiClassPrecisionRecallStats { /** * Returns a String summarizing precision that will print nicely . */ public String getRecallDescription ( int numDigits ) { } }
NumberFormat nf = NumberFormat . getNumberInstance ( ) ; nf . setMaximumFractionDigits ( numDigits ) ; Triple < Double , Integer , Integer > recall = getRecallInfo ( ) ; return nf . format ( recall . first ( ) ) + " (" + recall . second ( ) + "/" + ( recall . second ( ) + recall . third ( ) ) + ")" ;
public class MisoScenePanel { /** * documentation inherited */ public boolean canTraverse ( Object traverser , int tx , int ty ) { } }
SceneBlock block = getBlock ( tx , ty ) ; return ( block == null ) ? canTraverseUnresolved ( traverser , tx , ty ) : block . canTraverse ( traverser , tx , ty ) ;
public class PersistHdfs { /** * Is there a bucket name without a trailing " / " ? */ private boolean isBareS3NBucketWithoutTrailingSlash ( String s ) { } }
String s2 = s . toLowerCase ( ) ; Matcher m = Pattern . compile ( "s3n://[^/]*" ) . matcher ( s2 ) ; return m . matches ( ) ;
public class JsonParser { /** * 根据路径获取对象 * @ param path < a href = " https : / / github . com / alibaba / fastjson / wiki / JSONPath " > 路径语法 < / a > * @ return 对象 */ public Object eval ( String path ) { } }
String key = pathToKey ( path ) ; return jsonStore . containsKey ( key ) ? jsonStore . get ( key ) : JSONPath . eval ( jsonObject , checkPath ( path ) ) ;
public class CEMILDataEx { /** * Writes all additional information to < code > os < / code > . * @ param os the output stream */ synchronized void writeAddInfo ( ByteArrayOutputStream os ) { } }
os . write ( getAddInfoLength ( ) ) ; for ( int i = 0 ; i < addInfo . length ; i ++ ) if ( addInfo [ i ] != null ) { os . write ( i ) ; os . write ( addInfo [ i ] . length ) ; os . write ( addInfo [ i ] , 0 , addInfo [ i ] . length ) ; }
public class CommentProcessor { /** * Align a list of comments on the left marging . EOLComments are left alone . Block and JavaDoc comments are aligned by making every line that starts * with a * be one space in from the comment opener . Lines that don ' t start with * are left alone . */ public List < JavaComment > leftAlign ( List < JavaComment > originals ) { } }
final List < JavaComment > results = new ArrayList < JavaComment > ( originals . size ( ) ) ; for ( JavaComment original : originals ) { results . add ( original . match ( new JavaComment . MatchBlock < JavaComment > ( ) { @ Override public JavaComment _case ( JavaDocComment x ) { final List < JDToken > leadingWhiteSpace = new ArrayList < JDToken > ( 1 ) ; final LeftAlignState state [ ] = new LeftAlignState [ ] { LeftAlignState . IN_LINE } ; return _JavaDocComment ( x . start , leftAlignSection ( x . generalSection , x . tagSections . isEmpty ( ) , leadingWhiteSpace , state ) , leftAlignSections ( x . tagSections , leadingWhiteSpace , state ) , x . end ) ; } @ Override public JavaComment _case ( JavaBlockComment x ) { return _JavaBlockComment ( leftAlignBlock ( x . lines ) ) ; } @ Override public JavaComment _case ( JavaEOLComment x ) { return x ; } } ) ) ; } return results ;
public class SofaNettyJaxrsServer { /** * Add additional { @ link io . netty . channel . ChannelHandler } s to the { @ link io . netty . bootstrap . ServerBootstrap } . * < p > The additional channel handlers are being added < em > after < / em > the HTTP handling . < / p > * @ param httpChannelHandlers the additional { @ link io . netty . channel . ChannelHandler } s . */ public void setHttpChannelHandlers ( final List < ChannelHandler > httpChannelHandlers ) { } }
this . httpChannelHandlers = httpChannelHandlers == null ? Collections . < ChannelHandler > emptyList ( ) : httpChannelHandlers ;
public class ImageGridViewAdapter { /** * Re - select image from selected image set * @ param images */ public void setData ( List < ImageItem > images ) { } }
selectedImages . clear ( ) ; if ( images != null && images . size ( ) > 0 ) { this . images = images ; } else { this . images . clear ( ) ; } notifyDataSetChanged ( ) ;
public class RollingFileAppender { /** * { @ inheritDoc } */ public boolean parseUnrecognizedElement ( final Element element , final Properties props ) throws Exception { } }
final String nodeName = element . getNodeName ( ) ; if ( "rollingPolicy" . equals ( nodeName ) ) { OptionHandler rollingPolicy = org . apache . log4j . extras . DOMConfigurator . parseElement ( element , props , RollingPolicy . class ) ; if ( rollingPolicy != null ) { rollingPolicy . activateOptions ( ) ; this . setRollingPolicy ( ( RollingPolicy ) rollingPolicy ) ; } return true ; } if ( "triggeringPolicy" . equals ( nodeName ) ) { OptionHandler triggerPolicy = org . apache . log4j . extras . DOMConfigurator . parseElement ( element , props , TriggeringPolicy . class ) ; if ( triggerPolicy != null ) { triggerPolicy . activateOptions ( ) ; this . setTriggeringPolicy ( ( TriggeringPolicy ) triggerPolicy ) ; } return true ; } return false ;
public class User { /** * This function is mainly made for plug - in use , it sends * data to global user handler for example to perform some tasks * that aren ' t related directly to widgets . Global handler is a javascript * function in array named ' global ' , under key that is passed to this function as * handler parameter . As first argument the javascript function is given * data object that is passed as JSON here * @ param handler Name of client - side handler , generally syntax * should be [ PluginName ] - [ HandlerName ] , like JWWF - UserData * @ param data Data to send , JSON - formatted */ public final void sendGlobal ( String handler , String data ) { } }
try { connection . sendMessage ( "{\"id\":-1,\"type\":\"global\",\"handler\":" + Json . escapeString ( handler ) + ",\"data\":" + data + "}" ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; }
public class FutureReadResultEntryCollection { /** * Finds the Result Entries that have a starting offset before the given offset , removes them from the collection , * and returns them . * @ param maxOffset The offset to query against . */ Collection < FutureReadResultEntry > poll ( long maxOffset ) { } }
List < FutureReadResultEntry > result = new ArrayList < > ( ) ; synchronized ( this . reads ) { Exceptions . checkNotClosed ( this . closed , this ) ; // ' reads ' is sorted by Starting Offset , in ascending order . As long as it is not empty and the // first entry overlaps the given offset by at least one byte , extract and return it . while ( this . reads . size ( ) > 0 && this . reads . peek ( ) . getStreamSegmentOffset ( ) <= maxOffset ) { result . add ( this . reads . poll ( ) ) ; } } return result ;
public class dnsnsrec { /** * Use this API to fetch filtered set of dnsnsrec resources . * filter string should be in JSON format . eg : " port : 80 , servicetype : HTTP " . */ public static dnsnsrec [ ] get_filtered ( nitro_service service , String filter ) throws Exception { } }
dnsnsrec obj = new dnsnsrec ( ) ; options option = new options ( ) ; option . set_filter ( filter ) ; dnsnsrec [ ] response = ( dnsnsrec [ ] ) obj . getfiltered ( service , option ) ; return response ;
public class BitMarketAdapters { /** * Adapts BitMarketBalance to Wallet * @ param balance * @ return */ public static Wallet adaptWallet ( BitMarketBalance balance ) { } }
List < Balance > balances = new ArrayList < > ( balance . getAvailable ( ) . size ( ) ) ; for ( Map . Entry < String , BigDecimal > entry : balance . getAvailable ( ) . entrySet ( ) ) { Currency currency = Currency . getInstance ( entry . getKey ( ) ) ; BigDecimal frozen = balance . getBlocked ( ) . containsKey ( entry . getKey ( ) ) ? balance . getBlocked ( ) . get ( entry . getKey ( ) ) : new BigDecimal ( "0" ) ; BigDecimal available = entry . getValue ( ) ; balances . add ( new Balance ( currency , available . add ( frozen ) , available , frozen ) ) ; } return new Wallet ( balances ) ;
public class RepositoryFinderMongo { /** * { @ inheritDoc } */ @ Override public void insert ( final FinderObject owner , final FinderObject fob ) { } }
final GedObject gob = ( GedObject ) fob ; try { logger . debug ( "Starting insert: " + gob . getString ( ) ) ; final GedDocumentMongo < ? > gedDoc = toDocConverter . createGedDocument ( gob ) ; final TopLevelGedDocumentMongoVisitor visitor = new SaveVisitor ( repositoryManager ) ; gedDoc . accept ( visitor ) ; } catch ( DataAccessException e ) { logger . error ( "Error saving: " + gob . getString ( ) , e ) ; } logger . debug ( "Ending insert: " + gob . getString ( ) ) ;
public class MailService { /** * Sends to a mailbox */ public void send ( Message message ) { } }
Transport smtp = null ; try { smtp = _session . getTransport ( "smtp" ) ; smtp . connect ( ) ; smtp . send ( message , _to ) ; log . fine ( this + " sent mail to " + _to [ 0 ] ) ; } catch ( RuntimeException e ) { throw e ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } finally { try { if ( smtp != null ) smtp . close ( ) ; } catch ( Exception e ) { log . log ( Level . FINER , e . toString ( ) , e ) ; } }
public class SpanningTree { /** * Access the computed spanning tree of the input molecule . * @ return acyclic tree of the input molecule */ public IAtomContainer getSpanningTree ( ) { } }
IAtomContainer container = molecule . getBuilder ( ) . newInstance ( IAtomContainer . class ) ; for ( int a = 0 ; a < totalVertexCount ; a ++ ) container . addAtom ( molecule . getAtom ( a ) ) ; for ( int b = 0 ; b < totalEdgeCount ; b ++ ) if ( bondsInTree [ b ] ) container . addBond ( molecule . getBond ( b ) ) ; return container ;
public class Scope { /** * Declare a variable for use in this scope . If no variable of this name * and type has been defined , it is added to the shared set of pooled * variables . Returns the actual Variable object that should be used * instead . * @ param isPrivate when true , variable declaration doesn ' t leave this * scope during an intersection or promotion */ public Variable declareVariable ( Variable var , boolean isPrivate ) { } }
if ( mVariables . containsKey ( var ) ) { var = mVariables . get ( var ) ; } else { mVariables . put ( var , var ) ; } mDeclared . put ( var . getName ( ) , var ) ; if ( isPrivate ) { if ( mPrivateVars == null ) { mPrivateVars = new HashSet < Variable > ( 7 ) ; } mPrivateVars . add ( var ) ; } else { if ( mPrivateVars != null ) { mPrivateVars . remove ( var ) ; } } return var ;
public class GwtEventUtil { /** * Get the target DOM element of the mouse event . * @ param event * The mouse event itself . * @ return Returns the DOM element or null if something went wrong . */ public static Element getTarget ( DomEvent < ? > event ) { } }
EventTarget target = event . getNativeEvent ( ) . getEventTarget ( ) ; if ( Element . is ( target ) ) { return Element . as ( target ) ; } return null ;
public class ArchiveBase { /** * { @ inheritDoc } * @ see org . jboss . shrinkwrap . api . Archive # add ( org . jboss . shrinkwrap . api . Archive , org . jboss . shrinkwrap . api . ArchivePath , * java . lang . Class ) */ @ Override public T add ( final Archive < ? > archive , final ArchivePath path , Class < ? extends StreamExporter > exporter ) { } }
// Precondition checks Validate . notNull ( path , "No path was specified" ) ; Validate . notNull ( archive , "No archive was specified" ) ; Validate . notNull ( exporter , "No exporter was specified" ) ; // Make a Path final String archiveName = archive . getName ( ) ; final ArchivePath contentPath = new BasicPath ( path , archiveName ) ; // Create ArchiveAsset final ArchiveAsset archiveAsset = new ArchiveAsset ( archive , exporter ) ; // Delegate return add ( archiveAsset , contentPath ) ;
public class VisibleAssertions { /** * Assert that an actual value is equal to an expected value . * Equality is tested with the standard Object equals ( ) method , unless both values are null . * If the assertion passes , a green tick will be shown . If the assertion fails , a red cross will be shown . * @ param message message to display alongside the assertion outcome * @ param expected the expected value * @ param actual the actual value */ public static void assertEquals ( String message , Object expected , Object actual ) { } }
String expectedInQuotes = inQuotesIfNotNull ( expected ) ; String actualInQuotes = inQuotesIfNotNull ( actual ) ; if ( areBothNull ( expected , actual ) ) { pass ( message ) ; } else if ( isObjectEquals ( expected , actual ) ) { pass ( message ) ; } else if ( isObjectStringEqualsButDifferentType ( expected , actual ) ) { String actualClass = actual . getClass ( ) . getCanonicalName ( ) ; String expectedClass = expected . getClass ( ) . getCanonicalName ( ) ; fail ( message , actualInQuotes + " [" + actualClass + "] does not equal expected " + expectedInQuotes + " [" + expectedClass + "]" ) ; } else { fail ( message , actualInQuotes + " does not equal expected " + expectedInQuotes ) ; }
public class DocumentManagerImpl { /** * @ Override public DocumentPage read ( ServerTransform transform , Transaction * transaction , String temporalCollection , String [ ] uris ) { boolean * withContent = true ; return read ( transform , transaction , withContent , * temporalCollection , uris ) ; } */ public DocumentPage read ( long serverTimestamp , ServerTransform transform , Transaction transaction , boolean withContent , String temporalCollection , String [ ] uris ) { } }
if ( uris == null || uris . length == 0 ) throw new IllegalArgumentException ( "Attempt to call read with no uris" ) ; if ( logger . isInfoEnabled ( ) ) logger . info ( "Reading metadata and content for multiple uris beginning with {}" , uris [ 0 ] ) ; RequestParameters extraParams = addTemporalParams ( new RequestParameters ( ) , temporalCollection , null , null ) ; return services . getBulkDocuments ( requestLogger , serverTimestamp , transaction , // the default for bulk is no metadata , which differs from the normal // default of ALL ( isProcessedMetadataModified || ! withContent ) ? processedMetadata : null , nonDocumentFormat , mergeTransformParameters ( ( transform != null ) ? transform : getReadTransform ( ) , extraParams ) , withContent , uris ) ;
public class WsLogger { /** * @ see java . util . logging . Logger # finer ( java . lang . String ) */ @ Override public void finer ( String msg ) { } }
if ( isLoggable ( Level . FINER ) ) { log ( Level . FINER , msg ) ; }
public class LToIntFunctionBuilder { /** * Adds full new case for the argument that are of specific classes ( matched by instanceOf , null is a wildcard ) . */ @ Nonnull public < V extends T > LToIntFunctionBuilder < T > aCase ( Class < V > argC , LToIntFunction < V > function ) { } }
PartialCaseWithIntProduct . The pc = partialCaseFactoryMethod ( a -> ( argC == null || argC . isInstance ( a ) ) ) ; pc . evaluate ( function ) ; return self ( ) ;
public class AlipayLogger { /** * 开启DEBUG级别日志 ( 仅针对JDK14LOGGER , LOG4J请自行修改配置文件 ) * @ param isEnabled */ public static void setJDKDebugEnabled ( Boolean isEnabled ) { } }
// 如果使用JDK14LOGGER , 将业务日志级别设为DEBUG ( FINE ) if ( blog instanceof Jdk14Logger ) { Jdk14Logger logger = ( Jdk14Logger ) blog ; if ( isEnabled ) { logger . getLogger ( ) . setLevel ( Level . FINE ) ; Handler consoleHandler = new ConsoleHandler ( ) ; consoleHandler . setLevel ( Level . FINE ) ; logger . getLogger ( ) . addHandler ( consoleHandler ) ; } else { logger . getLogger ( ) . setLevel ( Level . INFO ) ; } }
public class KiteConfigurationService { /** * where Configuration . addResource ( configuration ) is not available */ private static Configuration merge ( Configuration one , Configuration two ) { } }
if ( one == null ) { if ( two == null ) { return new Configuration ( ) ; } return new Configuration ( two ) ; } Configuration c = new Configuration ( one ) ; if ( two == null ) { return c ; } for ( Map . Entry < String , String > entry : two ) { c . set ( entry . getKey ( ) , entry . getValue ( ) ) ; } return c ;
public class StateFilter { /** * Applies state based filter rules on frame . * Criteria for accept : * < ul > * < li > the KNX message destination address is a group address < / li > * < li > there is < b > no < / b > datapoint model available in the configuration , or < / li > * < li > there is a datapoint model available with a datapoint identified by the * destination address < b > and < / b > the datapoint is state based < / li > * < li > the message is an application layer group write or group response < / li > * < / ul > * On acceptance , the frame is stored into the configuration cache using a * { @ link LDataObject } . For easier handling of subsequent read requests on such a * buffered frame , all frames are converted to L - data indications with application * layer group response service code before getting stored . * If update and invalidation information is available , other dependent datapoint * state values will be updated or invalidated appropriately . * @ param frame { @ inheritDoc } * @ param c { @ inheritDoc } */ @ Override public void accept ( final CEMI frame , final Configuration c ) { } }
final Cache cache = c . getCache ( ) ; if ( cache == null || ! ( frame instanceof CEMILData ) ) return ; final CEMILData f = ( CEMILData ) frame ; if ( ! ( f . getDestination ( ) instanceof GroupAddress ) ) return ; final GroupAddress dst = ( GroupAddress ) f . getDestination ( ) ; final DatapointModel < ? > m = c . getDatapointModel ( ) ; Datapoint dp = null ; if ( m != null && ( ( dp = m . get ( dst ) ) == null || ! dp . isStateBased ( ) ) ) return ; final byte [ ] d = f . getPayload ( ) ; // filter for A - Group write ( 0x80 ) and read . res ( 0x40 ) services final int svc = d [ 0 ] & 0x03 | d [ 1 ] & 0xC0 ; CEMILData copy ; if ( svc == 0x40 ) { // read . res could be in a L - Data . con , too if ( f . getMessageCode ( ) == CEMILData . MC_LDATA_CON ) try { copy = ( CEMILData ) CEMIFactory . create ( CEMILData . MC_LDATA_IND , d , f ) ; } catch ( final KNXFormatException e ) { LogService . getLogger ( "calimero" ) . error ( "create L_Data.ind for network buffer: {}" , f , e ) ; return ; } else copy = f ; } else if ( svc == 0x80 ) { // adjust to read response frame d [ 1 ] = ( byte ) ( d [ 1 ] & 0x3f | 0x40 ) ; try { copy = ( CEMILData ) CEMIFactory . create ( CEMILData . MC_LDATA_IND , d , f ) ; } catch ( final KNXFormatException e ) { LogService . getLogger ( "calimero" ) . error ( "create L_Data.ind for network buffer: {}" , f , e ) ; return ; } } else return ; // adjust some fields of the frame to buffer : hop count , repetition // make sure the frame hop count is 6 final int hops = 6 ; if ( copy instanceof CEMILDataEx ) { if ( copy . getHopCount ( ) != hops ) ( ( CEMILDataEx ) copy ) . setHopCount ( hops ) ; if ( copy . isRepetition ( ) ) copy = CEMIFactory . create ( null , null , copy , false , false ) ; } else { if ( copy . getHopCount ( ) != hops || copy . isRepetition ( ) ) copy = new CEMILData ( copy . getMessageCode ( ) , copy . getSource ( ) , copy . getDestination ( ) , copy . getPayload ( ) , copy . getPriority ( ) , false , hops ) ; } // put into cache object CacheObject co = cache . get ( dst ) ; if ( co != null ) ( ( LDataObject ) co ) . setFrame ( copy ) ; else co = new LDataObject ( copy ) ; cache . put ( co ) ; // do invalidation / update of other datapoints // a write updates and invalidates , read . res only updates update ( copy , cache ) ; if ( svc == 0x80 ) invalidate ( copy , cache ) ;
public class IOUtils { /** * Deserializes the given input stream into to a newly allocated object , and close the input * stream . * @ param inputStream input stream to deserialize * @ since 1.16 */ @ SuppressWarnings ( "unchecked" ) public static < S extends Serializable > S deserialize ( InputStream inputStream ) throws IOException { } }
try { return ( S ) new ObjectInputStream ( inputStream ) . readObject ( ) ; } catch ( ClassNotFoundException exception ) { IOException ioe = new IOException ( "Failed to deserialize object" ) ; ioe . initCause ( exception ) ; throw ioe ; } finally { inputStream . close ( ) ; }
public class BsFailureUrl { @ Override public Map < String , Object > toSource ( ) { } }
Map < String , Object > sourceMap = new HashMap < > ( ) ; if ( configId != null ) { addFieldToSource ( sourceMap , "configId" , configId ) ; } if ( errorCount != null ) { addFieldToSource ( sourceMap , "errorCount" , errorCount ) ; } if ( errorLog != null ) { addFieldToSource ( sourceMap , "errorLog" , errorLog ) ; } if ( errorName != null ) { addFieldToSource ( sourceMap , "errorName" , errorName ) ; } if ( lastAccessTime != null ) { addFieldToSource ( sourceMap , "lastAccessTime" , lastAccessTime ) ; } if ( threadName != null ) { addFieldToSource ( sourceMap , "threadName" , threadName ) ; } if ( url != null ) { addFieldToSource ( sourceMap , "url" , url ) ; } return sourceMap ;
public class WaveformDetailComponent { /** * Set the current playback state for a player . * Will cause part of the component to be redrawn if the player state has * changed ( and we have the { @ link TrackMetadata } we need to translate the time into a position in the * component ) . This will be quickly overruled if a player is being monitored , but * can be used in other contexts . * @ param player the player number whose playback state is being recorded * @ param position the current playback position of that player in milliseconds * @ param playing whether the player is actively playing the track * @ throws IllegalStateException if the component is configured to monitor a player , and this is called * with state for a different player * @ throws IllegalArgumentException if player is less than one * @ since 0.5.0 */ public synchronized void setPlaybackState ( int player , long position , boolean playing ) { } }
if ( getMonitoredPlayer ( ) != 0 && player != getMonitoredPlayer ( ) ) { throw new IllegalStateException ( "Cannot setPlaybackState for another player when monitoring player " + getMonitoredPlayer ( ) ) ; } if ( player < 1 ) { throw new IllegalArgumentException ( "player must be positive" ) ; } PlaybackState oldFurthestState = getFurthestPlaybackState ( ) ; PlaybackState newState = new PlaybackState ( player , position , playing ) ; PlaybackState oldState = playbackStateMap . put ( player , newState ) ; if ( oldState == null || oldState . position != newState . position ) { repaintDueToPlaybackStateChange ( oldState , newState , oldFurthestState ) ; }
public class ServiceLocatorImpl { /** * { @ inheritDoc } */ @ Override public synchronized List < String > getEndpointNames ( QName serviceName ) throws ServiceLocatorException , InterruptedException { } }
if ( LOG . isLoggable ( Level . FINE ) ) { LOG . fine ( "Get all endpoint names of service " + serviceName + "..." ) ; } List < String > children ; RootNode rootNode = getBackend ( ) . connect ( ) ; ServiceNode serviceNode = rootNode . getServiceNode ( serviceName ) ; if ( serviceNode . exists ( ) ) { children = serviceNode . getEndpointNames ( ) ; } else { if ( LOG . isLoggable ( Level . FINE ) ) { LOG . fine ( "Lookup of service " + serviceName + " failed, service is not known." ) ; } children = Collections . emptyList ( ) ; } return children ;
public class OmemoManager { /** * Remove active stanza listeners needed for OMEMO . */ public void stopStanzaAndPEPListeners ( ) { } }
PepManager . getInstanceFor ( connection ( ) ) . removePepListener ( deviceListUpdateListener ) ; connection ( ) . removeAsyncStanzaListener ( internalOmemoMessageStanzaListener ) ; CarbonManager . getInstanceFor ( connection ( ) ) . removeCarbonCopyReceivedListener ( internalOmemoCarbonCopyListener ) ;
public class ProxyArtifactStore { /** * { @ inheritDoc } */ public synchronized Set < Artifact > getArtifacts ( String groupId , String artifactId , String version ) { } }
String path = groupId . replace ( '.' , '/' ) + '/' + artifactId + "/" + version ; Map < String , Artifact > artifactMapper = this . children . get ( path ) ; if ( artifactMapper == null ) { return Collections . emptySet ( ) ; } Set < Artifact > result = new HashSet < Artifact > ( ) ; for ( Artifact a : artifactMapper . values ( ) ) { if ( a != null ) { result . add ( a ) ; } } return result ;
public class Gauge { /** * Defines the position of the knob in radial gauges . This * position also defines where the needle will be placed . * Dependent on the SkinType you can use the following values * GaugeSkin : CENTER * HSkin : TOP _ CENTER , BOTTOM _ CENTER * VSkin : CENTER _ LEFT , CENTER _ RIGHT * QuarterSkin : TOP _ RIGHT , BOTTOM _ RIGHT , BOTTOM _ LEFT , TOP _ LEFT * @ param POSITION */ public void setKnobPosition ( final Pos POSITION ) { } }
if ( null == knobPosition ) { _knobPosition = POSITION ; fireUpdateEvent ( RESIZE_EVENT ) ; } else { knobPosition . set ( POSITION ) ; }
public class DateTimeType { /** * Creates a new instance by parsing an HL7 v3 format date time string */ public static DateTimeType parseV3 ( String theV3String ) { } }
DateTimeType retVal = new DateTimeType ( ) ; retVal . setValueAsV3String ( theV3String ) ; return retVal ;
public class RuleWrapper { /** * Remove the { @ link TextSymbolizerWrapper } from the ruleWrapper . */ public void removeTextSymbolizersWrapper ( ) { } }
List < SymbolizerWrapper > removeSW = new ArrayList < SymbolizerWrapper > ( ) ; List < Symbolizer > removeS = new ArrayList < Symbolizer > ( ) ; List < Symbolizer > symbolizers = rule . symbolizers ( ) ; for ( SymbolizerWrapper symbolizerWrapper : symbolizersWrapperList ) { if ( symbolizerWrapper . isTextSymbolizer ( ) ) { Symbolizer symbolizer = symbolizerWrapper . getSymbolizer ( ) ; removeSW . add ( symbolizerWrapper ) ; removeS . add ( symbolizer ) ; } } symbolizersWrapperList . removeAll ( removeSW ) ; symbolizers . removeAll ( removeS ) ;
public class CPDefinitionLinkPersistenceImpl { /** * Returns the first cp definition link in the ordered set where CProductId = & # 63 ; and type = & # 63 ; . * @ param CProductId the c product ID * @ param type the type * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the first matching cp definition link , or < code > null < / code > if a matching cp definition link could not be found */ @ Override public CPDefinitionLink fetchByCP_T_First ( long CProductId , String type , OrderByComparator < CPDefinitionLink > orderByComparator ) { } }
List < CPDefinitionLink > list = findByCP_T ( CProductId , type , 0 , 1 , orderByComparator ) ; if ( ! list . isEmpty ( ) ) { return list . get ( 0 ) ; } return null ;
public class SynchronizeFXTomcatChannel { /** * Disconnects all clients and makes the servlet refuse new connections . */ @ Override public void shutdown ( ) { } }
synchronized ( connections ) { parent . channelCloses ( this ) ; for ( final MessageInbound connection : connections ) { try { connection . getWsOutbound ( ) . close ( 0 , null ) ; } catch ( final IOException e ) { LOG . error ( "Connection [" + connection . toString ( ) + "] can't be closed." , e ) ; } finally { final ExecutorService executorService = connectionThreads . get ( connection ) ; if ( executorService != null ) { executorService . shutdown ( ) ; } connectionThreads . remove ( connection ) ; } } connections . clear ( ) ; } callback = null ;
public class HttpUtils { /** * Encodes partially encoded string . Encode all values but those matching pattern * " percent char followed by two hexadecimal digits " . * @ param encoded fully or partially encoded string . * @ return fully encoded string */ public static String encodePartiallyEncoded ( String encoded , boolean query ) { } }
if ( encoded . length ( ) == 0 ) { return encoded ; } Matcher m = ENCODE_PATTERN . matcher ( encoded ) ; if ( ! m . find ( ) ) { return query ? HttpUtils . queryEncode ( encoded ) : HttpUtils . pathEncode ( encoded ) ; } int length = encoded . length ( ) ; StringBuilder sb = new StringBuilder ( length + 8 ) ; int i = 0 ; do { String before = encoded . substring ( i , m . start ( ) ) ; sb . append ( query ? HttpUtils . queryEncode ( before ) : HttpUtils . pathEncode ( before ) ) ; sb . append ( m . group ( ) ) ; i = m . end ( ) ; } while ( m . find ( ) ) ; String tail = encoded . substring ( i , length ) ; sb . append ( query ? HttpUtils . queryEncode ( tail ) : HttpUtils . pathEncode ( tail ) ) ; return sb . toString ( ) ;
public class Collections { /** * Returns an unmodifiable view of the specified map . This method * allows modules to provide users with " read - only " access to internal * maps . Query operations on the returned map " read through " * to the specified map , and attempts to modify the returned * map , whether direct or via its collection views , result in an * < tt > UnsupportedOperationException < / tt > . < p > * The returned map will be serializable if the specified map * is serializable . * @ param < K > the class of the map keys * @ param < V > the class of the map values * @ param m the map for which an unmodifiable view is to be returned . * @ return an unmodifiable view of the specified map . */ public static < K , V > Map < K , V > unmodifiableMap ( Map < ? extends K , ? extends V > m ) { } }
return new UnmodifiableMap < > ( m ) ;
public class LibertyTracePreprocessInstrumentation { /** * Process the class and look for hard - coded entry / exit trace points . * Methods with hard - coded trace points will not be instrumented and * a warning will be issued . * @ param info the collected class information */ @ SuppressWarnings ( "unchecked" ) private void processManuallyTracedMethods ( ClassTraceInfo info ) { } }
for ( MethodNode mn : ( List < MethodNode > ) info . classNode . methods ) { // Don ' t re - process methods that have already had trace injected if ( isMethodAlreadyInjectedAnnotationPresent ( mn ) ) { continue ; } // Look through the method ' s instruction stream for well known entry / exit methods Iterator < ? extends AbstractInsnNode > instructionIterator = mn . instructions . iterator ( ) ; while ( instructionIterator . hasNext ( ) ) { AbstractInsnNode insnNode = instructionIterator . next ( ) ; // Look for calls to Tr . entry , Tr . exit , Logger . entering , Logger . exiting boolean manuallyTraced = false ; if ( insnNode . getType ( ) == AbstractInsnNode . METHOD_INSN ) { MethodInsnNode methodInsn = ( MethodInsnNode ) insnNode ; String methodName = methodInsn . name ; if ( methodInsn . owner . equals ( LOGGER_TYPE . getInternalName ( ) ) ) { manuallyTraced = ( methodName . equals ( "entering" ) || methodName . equals ( "exiting" ) ) ; } else if ( methodInsn . owner . equals ( LIBERTY_TR_TYPE . getInternalName ( ) ) ) { manuallyTraced = ( methodName . equals ( "entry" ) || methodName . equals ( "exit" ) ) ; } else if ( methodInsn . owner . equals ( WEBSPHERE_TR_TYPE . getInternalName ( ) ) ) { manuallyTraced = ( methodName . equals ( "entry" ) || methodName . equals ( "exit" ) ) ; } } // Mark the manually traced method , and create a warning if ( manuallyTraced ) { mn . visitAnnotation ( MANUAL_TRACE_TYPE . getDescriptor ( ) , true ) . visitEnd ( ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "WARNING: Hard coded entry/exit trace point found in " ) ; sb . append ( info . classNode . name . replaceAll ( "/" , "\\." ) ) . append ( "." ) . append ( mn . name ) . append ( mn . desc ) ; sb . append ( ". Skipping method." ) ; info . warnings . add ( sb . toString ( ) ) ; break ; } } }
public class ServletAuthenticationCallHandler { /** * Only allow the request through if successfully authenticated or if authentication is not required . * @ see io . undertow . server . HttpHandler # handleRequest ( io . undertow . server . HttpServerExchange ) */ @ Override public void handleRequest ( final HttpServerExchange exchange ) throws Exception { } }
if ( exchange . isInIoThread ( ) ) { exchange . dispatch ( this ) ; return ; } SecurityContext context = exchange . getSecurityContext ( ) ; if ( context . authenticate ( ) ) { if ( ! exchange . isComplete ( ) ) { next . handleRequest ( exchange ) ; } } else { if ( exchange . getStatusCode ( ) >= StatusCodes . BAD_REQUEST && ! exchange . isComplete ( ) ) { ServletRequestContext src = exchange . getAttachment ( ServletRequestContext . ATTACHMENT_KEY ) ; src . getOriginalResponse ( ) . sendError ( exchange . getStatusCode ( ) ) ; } else { exchange . endExchange ( ) ; } }
public class ManagedInstanceKeysInner { /** * Gets a list of managed instance keys . * ServiceResponse < PageImpl < ManagedInstanceKeyInner > > * @ param resourceGroupName The name of the resource group that contains the resource . You can obtain this value from the Azure Resource Manager API or the portal . * ServiceResponse < PageImpl < ManagedInstanceKeyInner > > * @ param managedInstanceName The name of the managed instance . * ServiceResponse < PageImpl < ManagedInstanceKeyInner > > * @ param filter An OData filter expression that filters elements in the collection . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the PagedList & lt ; ManagedInstanceKeyInner & gt ; object wrapped in { @ link ServiceResponse } if successful . */ public Observable < ServiceResponse < Page < ManagedInstanceKeyInner > > > listByInstanceSinglePageAsync ( final String resourceGroupName , final String managedInstanceName , final String filter ) { } }
if ( resourceGroupName == null ) { throw new IllegalArgumentException ( "Parameter resourceGroupName is required and cannot be null." ) ; } if ( managedInstanceName == null ) { throw new IllegalArgumentException ( "Parameter managedInstanceName is required and cannot be null." ) ; } if ( this . client . subscriptionId ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.subscriptionId() is required and cannot be null." ) ; } if ( this . client . apiVersion ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.apiVersion() is required and cannot be null." ) ; } return service . listByInstance ( resourceGroupName , managedInstanceName , this . client . subscriptionId ( ) , filter , this . client . apiVersion ( ) , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < Page < ManagedInstanceKeyInner > > > > ( ) { @ Override public Observable < ServiceResponse < Page < ManagedInstanceKeyInner > > > call ( Response < ResponseBody > response ) { try { ServiceResponse < PageImpl < ManagedInstanceKeyInner > > result = listByInstanceDelegate ( response ) ; return Observable . just ( new ServiceResponse < Page < ManagedInstanceKeyInner > > ( result . body ( ) , result . response ( ) ) ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ;
public class PmiRegistry { /** * get instrumenation level based on the path during runtime */ public static int getInstrumentationLevel ( String [ ] path ) { } }
if ( disabled ) return LEVEL_UNDEFINED ; DataDescriptor dd = new DataDescriptor ( path ) ; ModuleItem item = findModuleItem ( dd ) ; if ( item == null ) { // wrong moduleName return LEVEL_UNDEFINED ; } else { return item . getInstance ( ) . getInstrumentationLevel ( ) ; }
public class XMemcachedClient { /** * ( non - Javadoc ) * @ see net . rubyeye . xmemcached . MemcachedClient # get ( java . util . Collection , long , * net . rubyeye . xmemcached . transcoders . Transcoder ) */ public final < T > Map < String , T > get ( final Collection < String > keyCollections , final long timeout , final Transcoder < T > transcoder ) throws TimeoutException , InterruptedException , MemcachedException { } }
return this . getMulti0 ( keyCollections , timeout , CommandType . GET_MANY , transcoder ) ;
public class CommPortIdentifier { /** * by implementing its own linked list functionallity */ private static void AddIdentifierToList ( CommPortIdentifier cpi ) { } }
LOGGER . fine ( "CommPortIdentifier:AddIdentifierToList()" ) ; synchronized ( Sync ) { if ( CommPortIndex == null ) { CommPortIndex = cpi ; LOGGER . fine ( "CommPortIdentifier:AddIdentifierToList() null" ) ; } else { CommPortIdentifier index = CommPortIndex ; while ( index . next != null ) { index = index . next ; LOGGER . fine ( "CommPortIdentifier:AddIdentifierToList() index.next" ) ; } index . next = cpi ; } }
public class LifecycleHooks { /** * Set the value of the specified field of the supplied object . * @ param target target object * @ param name field name * @ param value value to set in the specified field of the supplied object * @ throws IllegalAccessException if the { @ code Field } object is enforcing access control for an inaccessible field * @ throws NoSuchFieldException if a field with the specified name is not found * @ throws SecurityException if the request is denied */ static void setFieldValue ( Object target , String name , Object value ) throws IllegalAccessException , NoSuchFieldException , SecurityException { } }
Field field = getDeclaredField ( target , name ) ; field . setAccessible ( true ) ; field . set ( target , value ) ;
public class Drawer { /** * Sets the toolbar which should be used in combination with the drawer * This will handle the ActionBarDrawerToggle for you . * Do not set this if you are in a sub activity and want to handle the back arrow on your own * @ param activity * @ param toolbar the toolbar which is used in combination with the drawer * @ param recreateActionBarDrawerToggle defines if the ActionBarDrawerToggle needs to be recreated with the new set Toolbar */ public void setToolbar ( @ NonNull Activity activity , @ NonNull Toolbar toolbar , boolean recreateActionBarDrawerToggle ) { } }
this . mDrawerBuilder . mToolbar = toolbar ; this . mDrawerBuilder . handleDrawerNavigation ( activity , recreateActionBarDrawerToggle ) ;
public class G1Function { /** * { @ inheritDoc } */ protected double getOldCentroidScore ( DoubleVector vector , int oldCentroidIndex , int altClusterSize ) { } }
double newScore = simToComplete [ oldCentroidIndex ] ; newScore -= VectorMath . dotProduct ( completeCentroid , vector ) ; newScore /= subtractedMagnitudeSqrd ( centroids [ oldCentroidIndex ] , vector ) ; return newScore ;
public class CmsAutoSetupProperties { /** * Converts and returns this object as map . < p > * @ return this object as map */ public Map < String , String [ ] > toParameterMap ( ) { } }
Map < String , String [ ] > result = new HashMap < String , String [ ] > ( ) ; result . put ( "dbCreateConStr" , new String [ ] { getConnectionUrl ( ) } ) ; result . put ( "dbName" , new String [ ] { getDbName ( ) } ) ; result . put ( "dbProduct" , new String [ ] { getDbProduct ( ) } ) ; result . put ( "dbProvider" , new String [ ] { getDbProvider ( ) } ) ; result . put ( "dbName" , new String [ ] { getDbName ( ) } ) ; result . put ( "db" , new String [ ] { getDbName ( ) } ) ; result . put ( "createDb" , new String [ ] { Boolean . toString ( isCreateDb ( ) ) } ) ; result . put ( "createTables" , new String [ ] { Boolean . toString ( isCreateTables ( ) ) } ) ; result . put ( "jdbcDriver" , new String [ ] { getJdbcDriver ( ) } ) ; result . put ( "templateDb" , new String [ ] { getTemplateDb ( ) } ) ; result . put ( "dbCreateUser" , new String [ ] { getCreateUser ( ) } ) ; result . put ( "dbCreatePwd" , new String [ ] { getCreatePwd ( ) == null ? "" : getCreatePwd ( ) } ) ; result . put ( "dbWorkUser" , new String [ ] { getWorkerUser ( ) } ) ; result . put ( "dbWorkPwd" , new String [ ] { getWorkerPwd ( ) == null ? "" : getWorkerPwd ( ) } ) ; result . put ( "dbDefaultTablespace" , new String [ ] { getDefaultTablespace ( ) } ) ; result . put ( "dbTemporaryTablespace" , new String [ ] { getTemporaryTablespace ( ) } ) ; result . put ( "dbIndexTablespace" , new String [ ] { getIndexTablespace ( ) } ) ; result . put ( "dropDb" , new String [ ] { Boolean . toString ( isDropDb ( ) ) } ) ; result . put ( "servletMapping" , new String [ ] { getServeltMapping ( ) } ) ; result . put ( "submit" , new String [ ] { Boolean . TRUE . toString ( ) } ) ; return result ;
public class PortalHttpServletRequestWrapper { /** * / * ( non - Javadoc ) * @ see org . apereo . portal . url . AbstractHttpServletRequestWrapper # getLocale ( ) */ @ Override public Locale getLocale ( ) { } }
if ( super . getSession ( false ) == null ) { return super . getLocale ( ) ; } final IUserInstance userInstance = this . userInstanceManager . getUserInstance ( this . getWrappedRequest ( ) ) ; final LocaleManager localeManager = userInstance . getLocaleManager ( ) ; final List < Locale > locales = localeManager . getLocales ( ) ; return locales . get ( 0 ) ;
public class vpnvserver_authenticationtacacspolicy_binding { /** * Use this API to fetch vpnvserver _ authenticationtacacspolicy _ binding resources of given name . */ public static vpnvserver_authenticationtacacspolicy_binding [ ] get ( nitro_service service , String name ) throws Exception { } }
vpnvserver_authenticationtacacspolicy_binding obj = new vpnvserver_authenticationtacacspolicy_binding ( ) ; obj . set_name ( name ) ; vpnvserver_authenticationtacacspolicy_binding response [ ] = ( vpnvserver_authenticationtacacspolicy_binding [ ] ) obj . get_resources ( service ) ; return response ;
public class Project { /** * Add a working directory to the project . * @ param dirName * the directory to add * @ return true if the working directory was added , or false if the working * directory was already present */ public boolean addWorkingDir ( String dirName ) { } }
if ( dirName == null ) { throw new NullPointerException ( ) ; } return addToListInternal ( currentWorkingDirectoryList , new File ( dirName ) ) ;
public class JsonDataProviderImpl { /** * Gets JSON data from a resource for the specified indexes . * @ param indexes * The set of indexes to be fetched from the JSON file . */ @ Override public Object [ ] [ ] getDataByIndex ( int [ ] indexes ) { } }
validateResourceParams ( resource ) ; Preconditions . checkArgument ( ( indexes . length != 0 ) , "Indexes cannot be empty" ) ; logger . entering ( indexes ) ; Object [ ] [ ] requestedData = null ; Class < ? > arrayType ; JsonReader reader = null ; try { requestedData = new Object [ indexes . length ] [ 1 ] ; reader = new JsonReader ( getReader ( resource ) ) ; arrayType = Array . newInstance ( resource . getCls ( ) , 0 ) . getClass ( ) ; logger . log ( Level . FINE , "The Json Data is mapped as" , arrayType ) ; Object [ ] [ ] mappedData = mapJsonData ( reader , arrayType ) ; int i = 0 ; for ( int indexVal : indexes ) { indexVal -- ; requestedData [ i ] = mappedData [ indexVal ] ; i ++ ; } } catch ( IOException e ) { throw new DataProviderException ( "Error while getting the data by index from Json file" , e ) ; } finally { IOUtils . closeQuietly ( reader ) ; } logger . exiting ( ( Object [ ] ) requestedData ) ; return requestedData ;
public class WorkflowTriggersInner { /** * Get the trigger schema as JSON . * @ param resourceGroupName The resource group name . * @ param workflowName The workflow name . * @ param triggerName The workflow trigger name . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < JsonSchemaInner > getSchemaJsonAsync ( String resourceGroupName , String workflowName , String triggerName , final ServiceCallback < JsonSchemaInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( getSchemaJsonWithServiceResponseAsync ( resourceGroupName , workflowName , triggerName ) , serviceCallback ) ;
public class TagHandlerPool { /** * free a tag for reusing * @ param tag * @ throws ExpressionException */ public void reuse ( Tag tag ) { } }
tag . release ( ) ; Queue < Tag > queue = getQueue ( tag . getClass ( ) . getName ( ) ) ; queue . add ( tag ) ;
public class InstanceClient { /** * Starts an instance that was stopped using the instances ( ) . stop method . For more information , * see Restart an instance . * < p > Sample code : * < pre > < code > * try ( InstanceClient instanceClient = InstanceClient . create ( ) ) { * ProjectZoneInstanceName instance = ProjectZoneInstanceName . of ( " [ PROJECT ] " , " [ ZONE ] " , " [ INSTANCE ] " ) ; * Operation response = instanceClient . startInstance ( instance ) ; * < / code > < / pre > * @ param instance Name of the instance resource to start . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi public final Operation startInstance ( ProjectZoneInstanceName instance ) { } }
StartInstanceHttpRequest request = StartInstanceHttpRequest . newBuilder ( ) . setInstance ( instance == null ? null : instance . toString ( ) ) . build ( ) ; return startInstance ( request ) ;
public class AWSStorageGatewayClient { /** * Returns the bandwidth rate limits of a gateway . By default , these limits are not set , which means no bandwidth * rate limiting is in effect . * This operation only returns a value for a bandwidth rate limit only if the limit is set . If no limits are set for * the gateway , then this operation returns only the gateway ARN in the response body . To specify which gateway to * describe , use the Amazon Resource Name ( ARN ) of the gateway in your request . * @ param describeBandwidthRateLimitRequest * A JSON object containing the of the gateway . * @ return Result of the DescribeBandwidthRateLimit operation returned by the service . * @ throws InvalidGatewayRequestException * An exception occurred because an invalid gateway request was issued to the service . For more information , * see the error and message fields . * @ throws InternalServerErrorException * An internal server error has occurred during the request . For more information , see the error and message * fields . * @ sample AWSStorageGateway . DescribeBandwidthRateLimit * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / storagegateway - 2013-06-30 / DescribeBandwidthRateLimit " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DescribeBandwidthRateLimitResult describeBandwidthRateLimit ( DescribeBandwidthRateLimitRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDescribeBandwidthRateLimit ( request ) ;
public class VdmDebugConsole { /** * Notify listeners when name changes . * @ see org . eclipse . debug . core . IDebugEventSetListener # handleDebugEvents ( org . eclipse . debug . core . DebugEvent [ ] ) */ public void handleDebugEvents ( DebugEvent [ ] events ) { } }
for ( int i = 0 ; i < events . length ; i ++ ) { DebugEvent event = events [ i ] ; if ( event . getSource ( ) . equals ( getProcess ( ) ) ) { if ( event . getKind ( ) == DebugEvent . TERMINATE ) { closeStreams ( ) ; DebugPlugin . getDefault ( ) . removeDebugEventListener ( this ) ; } resetName ( ) ; } }
public class CachingCertificateVerifier { /** * This implementation will forward the given certificate to the delegate * provided in the constructor , and cache the delegate ' s response . * On every subsequent invocation with the same certificate , the initial * response from the delegate will be returned . * @ param cert * the certificate to verify . * @ return the result of calling < tt > verify < / tt > on the delegate * < tt > CertificateVerifier < / tt > . */ @ Override public synchronized boolean verify ( final X509Certificate cert ) { } }
if ( verificationAnswers . containsKey ( cert ) ) { return verificationAnswers . get ( cert ) ; } else { boolean answer = delegate . verify ( cert ) ; verificationAnswers . put ( cert , answer ) ; return answer ; }
public class QueueFile { /** * Stores an { @ code long } in the { @ code byte [ ] } . The behavior is equivalent to calling * { @ link RandomAccessFile # writeLong } . */ private static void writeLong ( byte [ ] buffer , int offset , long value ) { } }
buffer [ offset ] = ( byte ) ( value >> 56 ) ; buffer [ offset + 1 ] = ( byte ) ( value >> 48 ) ; buffer [ offset + 2 ] = ( byte ) ( value >> 40 ) ; buffer [ offset + 3 ] = ( byte ) ( value >> 32 ) ; buffer [ offset + 4 ] = ( byte ) ( value >> 24 ) ; buffer [ offset + 5 ] = ( byte ) ( value >> 16 ) ; buffer [ offset + 6 ] = ( byte ) ( value >> 8 ) ; buffer [ offset + 7 ] = ( byte ) value ;