signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class MultiLineLabel { /** * Sets the text displayed by this label . */ public void setText ( String text ) { } }
if ( _label . setText ( text ) ) { _dirty = true ; // clear out our constrained size where appropriate if ( _constrain == HORIZONTAL || _constrain == VERTICAL ) { _constrainedSize = 0 ; _label . clearTargetDimens ( ) ; } revalidate ( ) ; repaint ( ) ; }
public class ProtobufUtil { /** * Returns configured protobuf message class for the given Kafka topic * @ param topic * Kafka topic * @ return protobuf message class used by this utility instance , or * < code > null < / code > in case valid class couldn ' t be found in the * configuration . */ public Class < ? extends Message > getMessageClass ( String topic ) { } }
return allTopics ? messageClassForAll : messageClassByTopic . get ( topic ) ;
public class MpTerm { /** * Start a new Term . This starts watching followers via ZK . Block on an * appropriate repair algorithm to watch final promotion to leader . */ @ Override public void start ( ) { } }
try { m_leaderCache = new LeaderCache ( m_zk , "MpTerm-iv2masters" , VoltZK . iv2masters , m_leadersChangeHandler ) ; m_leaderCache . start ( true ) ; } catch ( ExecutionException ee ) { VoltDB . crashLocalVoltDB ( "Unable to create babysitter starting term." , true , ee ) ; } catch ( InterruptedException e ) { VoltDB . crashLocalVoltDB ( "Unable to create babysitter starting term." , true , e ) ; }
public class DateCaster { /** * converts a Object to a Time Object , returns null if invalid string * @ param o Object to Convert * @ return coverted Date Time Object * @ throws PageException */ public static Time toTime ( TimeZone timeZone , Object o ) throws PageException { } }
if ( o instanceof Time ) return ( Time ) o ; else if ( o instanceof Date ) return new TimeImpl ( ( Date ) o ) ; else if ( o instanceof Castable ) return new TimeImpl ( ( ( Castable ) o ) . castToDateTime ( ) ) ; else if ( o instanceof String ) { Time dt = toTime ( timeZone , o . toString ( ) , null ) ; if ( dt == null ) throw new ExpressionException ( "can't cast [" + o + "] to time value" ) ; return dt ; } else if ( o instanceof ObjectWrap ) return toTime ( timeZone , ( ( ObjectWrap ) o ) . getEmbededObject ( ) ) ; else if ( o instanceof Calendar ) { // TODO check timezone offset return new TimeImpl ( ( ( Calendar ) o ) . getTimeInMillis ( ) , false ) ; } throw new ExpressionException ( "can't cast [" + Caster . toClassName ( o ) + "] to time value" ) ;
public class GeneratedDi18nDaoImpl { /** * query - by method for field baseBundle * @ param baseBundle the specified attribute * @ return an Iterable of Di18ns for the specified baseBundle */ public Iterable < Di18n > queryByBaseBundle ( java . lang . String baseBundle ) { } }
return queryByField ( null , Di18nMapper . Field . BASEBUNDLE . getFieldName ( ) , baseBundle ) ;
public class FileUtil { /** * Copy the source file system structure into the supplied target location . If the source is a file , the destination will be * created as a file ; if the source is a directory , the destination will be created as a directory . * @ param sourceFileOrDirectory the file or directory whose contents are to be copied into the target location * @ param destinationFileOrDirectory the location where the copy is to be placed ; does not need to exist , but if it does its * type must match that of < code > src < / code > * @ param exclusionFilter a filter that matches files or folders that should _ not _ be copied ; null indicates that all files * and folders should be copied * @ return the number of files ( not directories ) that were copied * @ throws IllegalArgumentException if the < code > src < / code > or < code > dest < / code > references are null * @ throws IOException */ public static int copy ( File sourceFileOrDirectory , File destinationFileOrDirectory , FilenameFilter exclusionFilter ) throws IOException { } }
if ( exclusionFilter == null ) exclusionFilter = ACCEPT_ALL ; int numberOfFilesCopied = 0 ; if ( sourceFileOrDirectory . isDirectory ( ) ) { destinationFileOrDirectory . mkdirs ( ) ; String list [ ] = sourceFileOrDirectory . list ( exclusionFilter ) ; for ( int i = 0 ; i < list . length ; i ++ ) { String dest1 = destinationFileOrDirectory . getPath ( ) + File . separator + list [ i ] ; String src1 = sourceFileOrDirectory . getPath ( ) + File . separator + list [ i ] ; numberOfFilesCopied += copy ( new File ( src1 ) , new File ( dest1 ) , exclusionFilter ) ; } } else { try ( FileInputStream fis = new FileInputStream ( sourceFileOrDirectory ) ; BufferedInputStream bis = new BufferedInputStream ( fis ) ; FileOutputStream fos = new FileOutputStream ( destinationFileOrDirectory ) ; BufferedOutputStream bos = new BufferedOutputStream ( fos ) ) { int c ; while ( ( c = bis . read ( ) ) >= 0 ) { bos . write ( c ) ; } } numberOfFilesCopied ++ ; } return numberOfFilesCopied ;
public class XObject { /** * Tell if one object is less than the other . * @ param obj2 Object to compare this to * @ return True if this object is less than the given object * @ throws javax . xml . transform . TransformerException */ public boolean lessThan ( XObject obj2 ) throws javax . xml . transform . TransformerException { } }
// In order to handle the ' all ' semantics of // nodeset comparisons , we always call the // nodeset function . Because the arguments // are backwards , we call the opposite comparison // function . if ( obj2 . getType ( ) == XObject . CLASS_NODESET ) return obj2 . greaterThan ( this ) ; return this . num ( ) < obj2 . num ( ) ;
public class ModifyCacheClusterRequest { /** * A list of cache node IDs to be removed . A node ID is a numeric identifier ( 0001 , 0002 , etc . ) . This parameter is * only valid when < code > NumCacheNodes < / code > is less than the existing number of cache nodes . The number of cache * node IDs supplied in this parameter must match the difference between the existing number of cache nodes in the * cluster or pending cache nodes , whichever is greater , and the value of < code > NumCacheNodes < / code > in the request . * For example : If you have 3 active cache nodes , 7 pending cache nodes , and the number of cache nodes in this * < code > ModifyCacheCluster < / code > call is 5 , you must list 2 ( 7 - 5 ) cache node IDs to remove . * @ return A list of cache node IDs to be removed . A node ID is a numeric identifier ( 0001 , 0002 , etc . ) . This * parameter is only valid when < code > NumCacheNodes < / code > is less than the existing number of cache nodes . * The number of cache node IDs supplied in this parameter must match the difference between the existing * number of cache nodes in the cluster or pending cache nodes , whichever is greater , and the value of * < code > NumCacheNodes < / code > in the request . < / p > * For example : If you have 3 active cache nodes , 7 pending cache nodes , and the number of cache nodes in * this < code > ModifyCacheCluster < / code > call is 5 , you must list 2 ( 7 - 5 ) cache node IDs to remove . */ public java . util . List < String > getCacheNodeIdsToRemove ( ) { } }
if ( cacheNodeIdsToRemove == null ) { cacheNodeIdsToRemove = new com . amazonaws . internal . SdkInternalList < String > ( ) ; } return cacheNodeIdsToRemove ;
public class BitArray { /** * Sets a range of bits . * @ param start start of range , inclusive . * @ param end end of range , exclusive */ public void setRange ( int start , int end ) { } }
if ( end < start || start < 0 || end > size ) { throw new IllegalArgumentException ( ) ; } if ( end == start ) { return ; } end -- ; // will be easier to treat this as the last actually set bit - - inclusive int firstInt = start / 32 ; int lastInt = end / 32 ; for ( int i = firstInt ; i <= lastInt ; i ++ ) { int firstBit = i > firstInt ? 0 : start & 0x1F ; int lastBit = i < lastInt ? 31 : end & 0x1F ; // Ones from firstBit to lastBit , inclusive int mask = ( 2 << lastBit ) - ( 1 << firstBit ) ; bits [ i ] |= mask ; }
public class ComboButtonBox { /** * Notifies our listeners when the selection changed . */ protected void fireActionPerformed ( ) { } }
// guaranteed to return a non - null array Object [ ] listeners = listenerList . getListenerList ( ) ; // process the listeners last to first , notifying those that are // interested in this event for ( int i = listeners . length - 2 ; i >= 0 ; i -= 2 ) { if ( listeners [ i ] == ActionListener . class ) { // lazily create the event : if ( _actionEvent == null ) { _actionEvent = new ActionEvent ( this , ActionEvent . ACTION_PERFORMED , _actionCommand ) ; } ( ( ActionListener ) listeners [ i + 1 ] ) . actionPerformed ( _actionEvent ) ; } }
public class TraceActivityMonitor { /** * Resumes or forks */ private void startSpan ( ActivityRuntimeContext context ) { } }
Tracing tracing = TraceHelper . getTracing ( "mdw-activity" ) ; Tracer tracer = tracing . tracer ( ) ; Span span = tracer . currentSpan ( ) ; if ( span == null ) { // async brave server if b3 requestHeaders populated ( subspan ) Object requestHeaders = context . getValue ( "requestHeaders" ) ; if ( requestHeaders instanceof Map ) { Map < ? , ? > headers = ( Map < ? , ? > ) requestHeaders ; if ( headers . containsKey ( "x-b3-traceid" ) ) { TraceContext . Extractor < Map < ? , ? > > extractor = tracing . propagation ( ) . extractor ( ( map , key ) -> { Object val = map . get ( key . toLowerCase ( ) ) ; return val == null ? null : val . toString ( ) ; } ) ; TraceContextOrSamplingFlags extracted = extractor . extract ( headers ) ; span = extracted . context ( ) != null ? tracer . joinSpan ( extracted . context ( ) ) : tracer . nextSpan ( extracted ) ; span . name ( "m" + context . getMasterRequestId ( ) ) . kind ( Span . Kind . SERVER ) ; span = tracer . newChild ( span . context ( ) ) . name ( "p" + context . getProcessInstanceId ( ) ) ; } } } if ( span == null ) { // create a new root span for async start span = tracer . nextSpan ( ) . name ( "a" + context . getActivityInstanceId ( ) ) ; } span . start ( ) . flush ( ) ; // async Tracer . SpanInScope spanInScope = tracer . withSpanInScope ( span ) ; context . getRuntimeAttributes ( ) . put ( Tracer . SpanInScope . class , spanInScope ) ;
public class ConvertCode { /** * Move the files in this directory to the new directory . */ public void moveThisFile ( File file ) { } }
String strDestName = file . getName ( ) ; boolean bMoveFile = true ; String extension = this . getProperty ( EXTENSION ) ; String filter = this . getProperty ( FILTER ) ; if ( extension != null ) { bMoveFile = false ; int iJava = strDestName . lastIndexOf ( extension ) ; if ( iJava != - 1 ) if ( iJava == strDestName . length ( ) - extension . length ( ) ) bMoveFile = true ; } if ( filter != null ) { bMoveFile = strDestName . matches ( filter ) ; } if ( ( bMoveFile ) && ( this . getScanListener ( ) != null ) ) bMoveFile = this . getScanListener ( ) . filterFile ( file ) ; if ( bMoveFile ) { String strParentSource = file . getParent ( ) ; this . moveThisFile ( file , strDestName , strParentSource ) ; }
public class ValidationProcessor { /** * / * package private */ static String generateValidation ( final Annotation annotation , final String checkType , final String checkName , final String fieldName ) { } }
if ( VALIDATE_WITH_METHOD_CHECK . equals ( checkType ) ) { // Special Case : Unwrap the check method and code generate its invocation final ValidateWithMethod validateWithMethod = ( ValidateWithMethod ) annotation ; return "if (!(" + fieldName + " == null && " + checkName + ".isIgnoreIfNull()) && !" + validateWithMethod . methodName ( ) + "(" + fieldName + ")) {\n" + "violations.add(new net.sf.oval.ConstraintViolation(" + checkName + ", " + checkName + ".getMessage(), this, " + fieldName + ", " + checkName + "_CONTEXT));\n" + "}\n" ; } else if ( FIELDS_EQUAL_CHECK . equals ( checkType ) ) { // Special Case : Unwrap the other field reference and code generate its comparison // NOTE : This does not support getter usage . final EqualToField equalTo = ( EqualToField ) annotation ; if ( ! equalTo . useGetter ( ) ) { return "if (" + fieldName + " != null && (" + equalTo . value ( ) + " == null " + "|| !" + fieldName + ".equals(" + equalTo . value ( ) + "))) {\n" + "violations.add(new net.sf.oval.ConstraintViolation(" + checkName + ", " + checkName + ".getMessage(), this, " + fieldName + ", " + checkName + "_CONTEXT));\n" + "}\n" ; } } else if ( FIELDS_NOT_EQUAL_CHECK . equals ( checkType ) ) { // Special Case : Unwrap the other field reference and code generate its comparison // NOTE : This does not support getter usage . final NotEqualToField notEqualTo = ( NotEqualToField ) annotation ; if ( ! notEqualTo . useGetter ( ) ) { return "if (" + fieldName + " != null && " + notEqualTo . value ( ) + " != null " + "&& " + fieldName + ".equals(" + notEqualTo . value ( ) + ")) {\n" + "violations.add(new net.sf.oval.ConstraintViolation(" + checkName + ", " + checkName + ".getMessage(), this, " + fieldName + ", " + checkName + "_CONTEXT));\n" + "}\n" ; } } return "if (!" + checkName + ".isSatisfied(this, " + fieldName + ", null, null)) {\n" + "violations.add(new net.sf.oval.ConstraintViolation(" + checkName + ", " + checkName + ".getMessage(), this, " + fieldName + ", " + checkName + "_CONTEXT));\n" + "}\n" ;
public class RouteBuilder { /** * Specifies that this route is mapped to HTTP DELETE method . * @ return instance of { @ link RouteBuilder } . */ public RouteBuilder delete ( ) { } }
if ( ! methods . contains ( HttpMethod . DELETE ) ) { methods . add ( HttpMethod . DELETE ) ; } return this ;
public class GenUtil { /** * " Boxes " the supplied argument , ie . turning an < code > int < / code > into an < code > Integer < / code > * object . */ public static String boxASArgument ( Class < ? > clazz , String name ) { } }
return boxASArgumentAndGatherImports ( clazz , name , new ImportSet ( ) ) ;
public class DBPIDGenerator { /** * Gets the highest id ever used for the given namespace . */ private int getHighestID ( String namespace ) { } }
Integer i = ( Integer ) m_highestID . get ( namespace ) ; if ( i == null ) { return 0 ; } return i . intValue ( ) ;
public class CompressPacketOutputStream { /** * Flush the internal buffer . * < p > Compression add a 7 header : < / p > * < ol > * < li > 3 byte compression length < / li > * < li > 1 byte compress sequence number < / li > * < li > 3 bytes uncompress length < / li > * < / ol > * < p > in case packet isn ' t compressed ( last 3 bytes = = 0 ) : < / p > * < ol > * < li > 3 byte uncompress length < / li > * < li > 1 byte compress sequence number < / li > * < li > 3 bytes with 0 value < / li > * < / ol > * < p > Content correspond to standard content . < / p > * < ol > * < li > 3 byte length < / li > * < li > 1 byte sequence number ( ! = than compress sequence number ) < / li > * < li > sub - content < / li > * < / ol > * < p > Problem is when standard content is bigger than 16mb : content will not send 4byte standard * header + 16mb content , since packet are limited to 16mb then 4 bytes standard header + 16mb - 4 * bytes content . the ending 4 bytes are waiting to be send . next packet will then send the * waiting data before next packet , putting more waiting data is needed . if ending data is exactly * MAX _ PACKET _ LENGTH length , then an empty packet must be send . < / p > * @ param commandEnd command end * @ throws IOException id connection error occur . */ protected void flushBuffer ( boolean commandEnd ) throws IOException { } }
if ( pos > 0 ) { if ( pos + remainingData . length > MIN_COMPRESSION_SIZE ) { byte [ ] compressedBytes ; int uncompressSize = Math . min ( MAX_PACKET_LENGTH , remainingData . length + 4 + pos ) ; checkMaxAllowedLength ( uncompressSize ) ; try ( ByteArrayOutputStream baos = new ByteArrayOutputStream ( ) ) { try ( DeflaterOutputStream deflater = new DeflaterOutputStream ( baos ) ) { if ( remainingData . length != 0 ) { deflater . write ( remainingData ) ; } subHeader [ 0 ] = ( byte ) pos ; subHeader [ 1 ] = ( byte ) ( pos >>> 8 ) ; subHeader [ 2 ] = ( byte ) ( pos >>> 16 ) ; subHeader [ 3 ] = ( byte ) this . seqNo ++ ; deflater . write ( subHeader , 0 , 4 ) ; deflater . write ( buf , 0 , uncompressSize - ( remainingData . length + 4 ) ) ; deflater . finish ( ) ; } compressedBytes = baos . toByteArray ( ) ; if ( compressedBytes . length < ( int ) ( MIN_COMPRESSION_RATIO * pos ) ) { int compressedLength = compressedBytes . length ; header [ 0 ] = ( byte ) compressedLength ; header [ 1 ] = ( byte ) ( compressedLength >>> 8 ) ; header [ 2 ] = ( byte ) ( compressedLength >>> 16 ) ; header [ 3 ] = ( byte ) this . compressSeqNo ++ ; header [ 4 ] = ( byte ) uncompressSize ; header [ 5 ] = ( byte ) ( uncompressSize >>> 8 ) ; header [ 6 ] = ( byte ) ( uncompressSize >>> 16 ) ; out . write ( header , 0 , 7 ) ; out . write ( compressedBytes , 0 , compressedLength ) ; cmdLength += uncompressSize ; if ( traceCache != null ) { // trace last packets if ( remainingData . length != 0 ) { traceCache . put ( new TraceObject ( true , COMPRESSED_PROTOCOL_COMPRESSED_PACKET , Arrays . copyOfRange ( header , 0 , 7 ) , Arrays . copyOfRange ( remainingData , 0 , remainingData . length ) , Arrays . copyOfRange ( subHeader , 0 , 4 ) , Arrays . copyOfRange ( buf , 0 , ( uncompressSize > 1000 ? 1000 : uncompressSize ) - ( remainingData . length + 4 ) ) ) ) ; } else { traceCache . put ( new TraceObject ( true , COMPRESSED_PROTOCOL_COMPRESSED_PACKET , Arrays . copyOfRange ( header , 0 , 7 ) , Arrays . copyOfRange ( subHeader , 0 , 4 ) , Arrays . copyOfRange ( buf , 0 , ( uncompressSize > 1000 ? 1000 : uncompressSize ) - ( remainingData . length + 4 ) ) ) ) ; } } if ( logger . isTraceEnabled ( ) ) { if ( remainingData . length != 0 ) { logger . trace ( "send compress: {}{}" , serverThreadLog , Utils . hexdump ( maxQuerySizeToLog - ( remainingData . length + 11 ) , 0 , compressedLength , header , remainingData , subHeader , buf ) ) ; } else { logger . trace ( "send compress: {}{}" , serverThreadLog , Utils . hexdump ( maxQuerySizeToLog - 11 , 0 , compressedLength , header , subHeader , buf ) ) ; } } if ( pos + remainingData . length + 4 - uncompressSize > 0 ) { remainingData = Arrays . copyOfRange ( buf , uncompressSize - ( remainingData . length + 4 ) , pos ) ; } else { remainingData = EMPTY_ARRAY ; } // if last packet fill the max size , must send an empty packet to indicate command end . lastPacketExactMaxPacketLength = pos == MAX_PACKET_LENGTH ; if ( commandEnd && lastPacketExactMaxPacketLength ) { writeEmptyPacket ( ) ; } pos = 0 ; return ; } } } int uncompressSize = Math . min ( MAX_PACKET_LENGTH , remainingData . length + 4 + pos ) ; checkMaxAllowedLength ( uncompressSize ) ; cmdLength += uncompressSize ; // send packet without compression header [ 0 ] = ( byte ) uncompressSize ; header [ 1 ] = ( byte ) ( uncompressSize >>> 8 ) ; header [ 2 ] = ( byte ) ( uncompressSize >>> 16 ) ; header [ 3 ] = ( byte ) this . compressSeqNo ++ ; header [ 4 ] = ( byte ) 0x00 ; header [ 5 ] = ( byte ) 0x00 ; header [ 6 ] = ( byte ) 0x00 ; out . write ( header , 0 , 7 ) ; cmdLength += uncompressSize ; if ( remainingData . length != 0 ) { out . write ( remainingData ) ; } subHeader [ 0 ] = ( byte ) pos ; subHeader [ 1 ] = ( byte ) ( pos >>> 8 ) ; subHeader [ 2 ] = ( byte ) ( pos >>> 16 ) ; subHeader [ 3 ] = ( byte ) this . seqNo ++ ; out . write ( subHeader , 0 , 4 ) ; out . write ( buf , 0 , uncompressSize - ( remainingData . length + 4 ) ) ; cmdLength += remainingData . length ; if ( traceCache != null ) { // trace last packets if ( remainingData . length != 0 ) { traceCache . put ( new TraceObject ( true , COMPRESSED_PROTOCOL_NOT_COMPRESSED_PACKET , Arrays . copyOfRange ( header , 0 , 7 ) , Arrays . copyOfRange ( remainingData , 0 , remainingData . length ) , Arrays . copyOfRange ( subHeader , 0 , 4 ) , Arrays . copyOfRange ( buf , 0 , ( uncompressSize > 1000 ? 1000 : uncompressSize ) - ( remainingData . length + 4 ) ) ) ) ; } else { traceCache . put ( new TraceObject ( true , COMPRESSED_PROTOCOL_NOT_COMPRESSED_PACKET , Arrays . copyOfRange ( header , 0 , 7 ) , Arrays . copyOfRange ( subHeader , 0 , 4 ) , Arrays . copyOfRange ( buf , 0 , ( uncompressSize > 1000 ? 1000 : uncompressSize ) - ( remainingData . length + 4 ) ) ) ) ; } } if ( logger . isTraceEnabled ( ) ) { if ( remainingData . length != 0 ) { logger . trace ( "send uncompress: {}{}" , serverThreadLog , Utils . hexdump ( maxQuerySizeToLog - ( remainingData . length + 11 ) , 0 , pos , header , remainingData , subHeader , buf ) ) ; } else { logger . trace ( "send uncompress: {}{}" , serverThreadLog , Utils . hexdump ( maxQuerySizeToLog - 11 , 0 , pos , header , subHeader , buf ) ) ; } } if ( pos + remainingData . length + 4 - uncompressSize > 0 ) { remainingData = Arrays . copyOfRange ( buf , uncompressSize - ( remainingData . length + 4 ) , pos ) ; } else { remainingData = EMPTY_ARRAY ; } // if last packet fill the max size , must send an empty packet to indicate command end . lastPacketExactMaxPacketLength = pos == MAX_PACKET_LENGTH ; pos = 0 ; } if ( remainingData . length > 0 ) { if ( remainingData . length > MIN_COMPRESSION_SIZE ) { byte [ ] compressedBytes ; int uncompressSize = Math . min ( MAX_PACKET_LENGTH , remainingData . length ) ; checkMaxAllowedLength ( uncompressSize ) ; cmdLength += uncompressSize ; try ( ByteArrayOutputStream baos = new ByteArrayOutputStream ( ) ) { try ( DeflaterOutputStream deflater = new DeflaterOutputStream ( baos ) ) { deflater . write ( remainingData ) ; deflater . finish ( ) ; } compressedBytes = baos . toByteArray ( ) ; remainingData = EMPTY_ARRAY ; } if ( compressedBytes . length < ( int ) ( MIN_COMPRESSION_RATIO * pos ) ) { int compressedLength = compressedBytes . length ; header [ 0 ] = ( byte ) compressedLength ; header [ 1 ] = ( byte ) ( compressedLength >>> 8 ) ; header [ 2 ] = ( byte ) ( compressedLength >>> 16 ) ; header [ 3 ] = ( byte ) this . compressSeqNo ++ ; header [ 4 ] = ( byte ) uncompressSize ; header [ 5 ] = ( byte ) ( uncompressSize >>> 8 ) ; header [ 6 ] = ( byte ) ( uncompressSize >>> 16 ) ; out . write ( header , 0 , 7 ) ; out . write ( compressedBytes , 0 , compressedLength ) ; if ( traceCache != null ) { traceCache . put ( new TraceObject ( true , COMPRESSED_PROTOCOL_COMPRESSED_PACKET , Arrays . copyOfRange ( header , 0 , 7 ) , Arrays . copyOfRange ( remainingData , 0 , ( uncompressSize > 1000 ? 1000 : uncompressSize ) ) ) ) ; } if ( logger . isTraceEnabled ( ) ) { logger . trace ( "send compress: {}{}" , serverThreadLog , Utils . hexdump ( maxQuerySizeToLog - 7 , 0 , uncompressSize , header , remainingData ) ) ; } // if last packet fill the max size , must send an empty packet to indicate command end . if ( commandEnd && lastPacketExactMaxPacketLength ) { writeEmptyPacket ( ) ; } return ; } } int uncompressSize = Math . min ( MAX_PACKET_LENGTH , remainingData . length ) ; checkMaxAllowedLength ( uncompressSize ) ; cmdLength += uncompressSize ; // send packet without compression header [ 0 ] = ( byte ) uncompressSize ; header [ 1 ] = ( byte ) ( uncompressSize >>> 8 ) ; header [ 2 ] = ( byte ) ( uncompressSize >>> 16 ) ; header [ 3 ] = ( byte ) this . compressSeqNo ++ ; header [ 4 ] = ( byte ) 0x00 ; header [ 5 ] = ( byte ) 0x00 ; header [ 6 ] = ( byte ) 0x00 ; out . write ( header , 0 , 7 ) ; out . write ( remainingData ) ; remainingData = EMPTY_ARRAY ; if ( traceCache != null ) { traceCache . put ( new TraceObject ( true , COMPRESSED_PROTOCOL_NOT_COMPRESSED_PACKET , Arrays . copyOfRange ( header , 0 , 7 ) , Arrays . copyOfRange ( remainingData , 0 , ( remainingData . length > 1000 ? 1000 : remainingData . length ) ) ) ) ; } if ( logger . isTraceEnabled ( ) ) { logger . trace ( "send uncompress: {}{}" , serverThreadLog , Utils . hexdump ( maxQuerySizeToLog - 7 , 0 , remainingData . length , header , remainingData ) ) ; } if ( commandEnd && lastPacketExactMaxPacketLength ) { writeEmptyPacket ( ) ; } }
public class xen_brvpx_image { /** * < pre > * Use this operation to delete Repeater XVA file . * < / pre > */ public static xen_brvpx_image delete ( nitro_service client , xen_brvpx_image resource ) throws Exception { } }
resource . validate ( "delete" ) ; return ( ( xen_brvpx_image [ ] ) resource . delete_resource ( client ) ) [ 0 ] ;
public class TangoDataReady { private void fireTangoDataReadyEvent ( TangoDataReady tangoDataReady , EventData eventData ) { } }
TangoDataReadyEvent data_ready_event = new TangoDataReadyEvent ( tangoDataReady , eventData ) ; // Notifying those that are interested in this event ArrayList < EventListener > listeners = event_listeners . getListeners ( ITangoDataReadyListener . class ) ; for ( EventListener eventListener : listeners ) { ( ( ITangoDataReadyListener ) eventListener ) . data_ready ( data_ready_event ) ; }
public class MainForm { /** * Exports to a Wavefile * @ since 01.07.2006 */ private void doExportToWave ( ) { } }
doStopPlaying ( ) ; if ( currentContainer == null ) { JOptionPane . showMessageDialog ( this , "You need to load a file first!" , "Ups!" , JOptionPane . ERROR_MESSAGE ) ; } else { do { String fileName = Helpers . createLocalFileStringFromURL ( currentContainer . getFileURL ( ) , true ) ; fileName = fileName . substring ( fileName . lastIndexOf ( File . separatorChar ) + 1 ) ; String exportToWav = exportPath + File . separatorChar + fileName + ".WAV" ; FileChooserResult selectedFile = Helpers . selectFileNameFor ( this , exportToWav , "Export to wave" , fileFilterExport , 1 , false , false ) ; if ( selectedFile != null ) { File f = selectedFile . getSelectedFile ( ) ; if ( f != null ) { if ( f . exists ( ) ) { int result = JOptionPane . showConfirmDialog ( this , "File already exists! Overwrite?" , "Overwrite confirmation" , JOptionPane . YES_NO_CANCEL_OPTION , JOptionPane . QUESTION_MESSAGE ) ; if ( result == JOptionPane . CANCEL_OPTION ) return ; if ( result == JOptionPane . NO_OPTION ) continue ; // Reselect boolean ok = f . delete ( ) ; if ( ! ok ) { JOptionPane . showMessageDialog ( MainForm . this , "Overwrite failed. Is file write protected or in use?" , "Failed" , JOptionPane . ERROR_MESSAGE ) ; return ; } } // get Export Type from selected filechooser index ( find the index ; ) ) String modFileName = f . getAbsolutePath ( ) ; int i = modFileName . lastIndexOf ( File . separatorChar ) ; exportPath = modFileName . substring ( 0 , i ) ; int result = JOptionPane . showConfirmDialog ( this , "Continue playback while exporting?" , "Playback?" , JOptionPane . YES_NO_OPTION , JOptionPane . QUESTION_MESSAGE ) ; Mixer mixer = createNewMixer ( ) ; mixer . setPlayDuringExport ( result == JOptionPane . YES_OPTION ) ; mixer . setExportFile ( f ) ; playerThread = new PlayThread ( mixer , this ) ; playerThread . start ( ) ; inExportMode = true ; // Signal , that we are exporting right now . . . } } return ; } while ( true ) ; }
public class DescribeVpcPeeringConnectionsResult { /** * Information about the VPC peering connections . * @ return Information about the VPC peering connections . */ public java . util . List < VpcPeeringConnection > getVpcPeeringConnections ( ) { } }
if ( vpcPeeringConnections == null ) { vpcPeeringConnections = new com . amazonaws . internal . SdkInternalList < VpcPeeringConnection > ( ) ; } return vpcPeeringConnections ;
public class Model { /** * Deletes a single table record represented by this instance . This method assumes that a corresponding table * has only one record whose PK is the ID of this instance . * After deletion , this instance becomes { @ link # frozen ( ) } and cannot be used anymore until { @ link # thaw ( ) } is called . * @ return true if a record was deleted , false if not . */ public boolean delete ( ) { } }
fireBeforeDelete ( ) ; int result ; if ( getCompositeKeys ( ) != null ) { String [ ] compositeKeys = getCompositeKeys ( ) ; StringBuilder query = new StringBuilder ( ) ; Object [ ] values = new Object [ compositeKeys . length ] ; for ( int i = 0 ; i < compositeKeys . length ; i ++ ) { query . append ( i == 0 ? "DELETE FROM " + metaModelLocal . getTableName ( ) + " WHERE " : " AND " ) . append ( compositeKeys [ i ] ) . append ( " = ?" ) ; values [ i ] = get ( compositeKeys [ i ] ) ; } result = new DB ( metaModelLocal . getDbName ( ) ) . exec ( query . toString ( ) , values ) ; } else { StringBuilder query = new StringBuilder ( "DELETE FROM " ) . append ( metaModelLocal . getTableName ( ) ) . append ( " WHERE " ) . append ( getIdName ( ) ) . append ( " = ?" ) ; List < Object > values = new ArrayList < > ( ) ; values . add ( getId ( ) ) ; if ( metaModelLocal . hasPartitionIDs ( ) ) { for ( String partitionId : metaModelLocal . getPartitionIDs ( ) ) { query . append ( " AND " ) . append ( partitionId ) . append ( " = ?" ) ; values . add ( get ( partitionId ) ) ; } } result = new DB ( metaModelLocal . getDbName ( ) ) . exec ( query . toString ( ) , values . toArray ( ) ) ; } if ( 1 == result ) { frozen = true ; if ( metaModelOf ( getClass ( ) ) . cached ( ) ) { Registry . cacheManager ( ) . purgeTableCache ( metaModelLocal ) ; } ModelDelegate . purgeEdges ( metaModelLocal ) ; fireAfterDelete ( ) ; return true ; } fireAfterDelete ( ) ; return false ;
public class ReflectiveInterceptor { /** * Retrieve modifiers for a Java class , which might or might not be reloadable or reloaded . * @ param clazz the class for which to discover modifiers * @ return the modifiers */ public static int jlClassGetModifiers ( Class < ? > clazz ) { } }
// ReloadableType rtype = getReloadableTypeIfHasBeenReloaded ( clazz ) ; ReloadableType rtype = getRType ( clazz ) ; if ( rtype == null ) { return clazz . getModifiers ( ) ; } else { // Note : the " super bit " may be set in class modifiers but we should block it out , it // shouldn ' t be shown to users of the reflection API . return rtype . getLatestTypeDescriptor ( ) . getModifiers ( ) & ~ Opcodes . ACC_SUPER ; }
public class OggMetaData { /** * A short description like winamp does in its default * @ since 26.12.2008 * @ return */ public String getShortDescription ( ) { } }
String artist = getArtist ( ) ; String album = getAlbum ( ) ; String title = getTitle ( ) ; StringBuilder str = new StringBuilder ( ) ; if ( artist != null && artist . length ( ) != 0 ) { str . append ( artist ) . append ( " - " ) ; } if ( album != null && album . length ( ) != 0 ) { str . append ( album ) . append ( " - " ) ; } if ( title == null || title . length ( ) == 0 ) title = MultimediaContainerManager . getSongNameFromURL ( urlName ) ; return str . append ( title ) . toString ( ) ;
public class Log4JLogger { /** * Wrapper around log4j . * @ param level net . spy . compat . log . Level level . * @ param message object message * @ param e optional throwable */ @ Override public void log ( Level level , Object message , Throwable e ) { } }
org . apache . log4j . Level pLevel = org . apache . log4j . Level . DEBUG ; switch ( level == null ? Level . FATAL : level ) { case TRACE : pLevel = org . apache . log4j . Level . TRACE ; break ; case DEBUG : pLevel = org . apache . log4j . Level . DEBUG ; break ; case INFO : pLevel = org . apache . log4j . Level . INFO ; break ; case WARN : pLevel = org . apache . log4j . Level . WARN ; break ; case ERROR : pLevel = org . apache . log4j . Level . ERROR ; break ; case FATAL : pLevel = org . apache . log4j . Level . FATAL ; break ; default : // I don ' t know what this is , so consider it fatal pLevel = org . apache . log4j . Level . FATAL ; l4jLogger . log ( "net.spy.compat.log.AbstractLogger" , pLevel , "Unhandled " + "log level: " + level + " for the following message" , null ) ; } l4jLogger . log ( "net.spy.compat.log.AbstractLogger" , pLevel , message , e ) ;
public class TagLibFactory { /** * Laedt eine einzelne TagLib . * @ param file TLD die geladen werden soll . * @ param saxParser Definition des Sax Parser mit dem die TagLib eingelsesen werden soll . * @ return TagLib * @ throws TagLibException */ public static TagLib loadFromStream ( InputStream is , Identification id ) throws TagLibException { } }
return new TagLibFactory ( null , is , id ) . getLib ( ) ;
public class SharedUtils { /** * Determine whether String is a mixed value binding expression or not . */ static boolean isMixedExpression ( String expression ) { } }
if ( null == expression ) { return false ; } // if it doesn ' t start and end with delimiters return ( ! ( expression . startsWith ( "#{" ) && expression . endsWith ( "}" ) ) ) && isExpression ( expression ) ;
public class Mmff { /** * Helper method to find all existing aromatic chem objects . * @ param mol molecule * @ return chem objects */ private Set < IChemObject > getAromatics ( IAtomContainer mol ) { } }
Set < IChemObject > oldArom = new HashSet < > ( ) ; for ( IAtom atom : mol . atoms ( ) ) if ( atom . getFlag ( CDKConstants . ISAROMATIC ) ) oldArom . add ( atom ) ; for ( IBond bond : mol . bonds ( ) ) if ( bond . getFlag ( CDKConstants . ISAROMATIC ) ) oldArom . add ( bond ) ; return oldArom ;
public class QueryQuestionCommentController { /** * Lists not archived comments by given parameters . * @ param panel panel . Required * @ param stamp filter by panel stamp . Defaults to panel ' s current stamp * @ param queryPage filter by comment ' s query page . Ignored if null * @ param query filter by query . Ignored if null * @ param parentComment filter by parent comment . Ignored if null * @ param user filter by user . Ignored if null . * @ param onlyRootComments return only root comments . * @ return a list of comments */ public List < QueryQuestionComment > listQueryQuestionComments ( Panel panel , PanelStamp stamp , QueryPage queryPage , Query query , QueryQuestionComment parentComment , User user , boolean onlyRootComments ) { } }
if ( stamp == null ) { stamp = panel . getCurrentStamp ( ) ; } return queryQuestionCommentDAO . list ( queryPage , stamp , query , panel . getRootFolder ( ) , parentComment , onlyRootComments , user , Boolean . FALSE ) ;
public class Calc { /** * Calculate the TM - Score for the superposition . * Atom sets must be pre - rotated . * Citation : < br / > * < i > Zhang Y and Skolnick J ( 2004 ) . " Scoring function for automated * assessment of protein structure template quality " . Proteins 57 : 702 - * 710 . < / i > * @ param atomSet1 * atom array 1 * @ param atomSet2 * atom array 2 * @ param len1 * The full length of the protein supplying atomSet1 * @ param len2 * The full length of the protein supplying atomSet2 * @ param normalizeMin * Whether to normalize by the < strong > minimum < / strong > - length structure , * that is , { @ code min \ { len1 , len2 \ } } . If false , normalized by the { @ code max \ { len1 , len2 \ } } ) . * @ return The TM - Score * @ throws StructureException */ public static double getTMScore ( Atom [ ] atomSet1 , Atom [ ] atomSet2 , int len1 , int len2 , boolean normalizeMin ) throws StructureException { } }
if ( atomSet1 . length != atomSet2 . length ) { throw new StructureException ( "The two atom sets are not of same length!" ) ; } if ( atomSet1 . length > len1 ) { throw new StructureException ( "len1 must be greater or equal to the alignment length!" ) ; } if ( atomSet2 . length > len2 ) { throw new StructureException ( "len2 must be greater or equal to the alignment length!" ) ; } int Lnorm ; if ( normalizeMin ) { Lnorm = Math . min ( len1 , len2 ) ; } else { Lnorm = Math . max ( len1 , len2 ) ; } int Laln = atomSet1 . length ; double d0 = 1.24 * Math . cbrt ( Lnorm - 15. ) - 1.8 ; double d0sq = d0 * d0 ; double sum = 0 ; for ( int i = 0 ; i < Laln ; i ++ ) { double d = Calc . getDistance ( atomSet1 [ i ] , atomSet2 [ i ] ) ; sum += 1. / ( 1 + d * d / d0sq ) ; } return sum / Lnorm ;
public class SQLiteExecutor { /** * Insert one record into database . * To exclude the some properties or default value , invoke { @ code com . landawn . abacus . util . N # entity2Map ( Object , boolean , Collection , NamingPolicy ) } * @ param table * @ param record can be < code > Map < / code > or < code > entity < / code > with getter / setter methods * @ param conflictAlgorithm * @ return * @ see com . landawn . abacus . util . Maps # entity2Map ( Object , boolean , Collection , NamingPolicy ) */ public long insert ( String table , Object record , int conflictAlgorithm ) { } }
table = formatName ( table ) ; final ContentValues contentValues = record instanceof ContentValues ? ( ContentValues ) record : toContentValues ( record , readOnlyPropNamesMap . get ( record . getClass ( ) ) , columnNamingPolicy , false ) ; removeIdDefaultValue ( contentValues ) ; return sqliteDB . insertWithOnConflict ( table , null , contentValues , conflictAlgorithm ) ;
public class Ifc4PackageImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ @ Override public EEnum getIfcLightDistributionCurveEnum ( ) { } }
if ( ifcLightDistributionCurveEnumEEnum == null ) { ifcLightDistributionCurveEnumEEnum = ( EEnum ) EPackage . Registry . INSTANCE . getEPackage ( Ifc4Package . eNS_URI ) . getEClassifiers ( ) . get ( 1011 ) ; } return ifcLightDistributionCurveEnumEEnum ;
public class SimpleVersionedSerialization { /** * Deserializes the version and datum from a byte array . The first four bytes will be read as * the version , in < i > big - endian < / i > encoding . The remaining bytes will be passed to the serializer * for deserialization , via { @ link SimpleVersionedSerializer # deserialize ( int , byte [ ] ) } . * @ param serializer The serializer to deserialize the datum with . * @ param bytes The bytes to deserialize from . * @ return The deserialized datum . * @ throws IOException Exceptions from the { @ link SimpleVersionedSerializer # deserialize ( int , byte [ ] ) } * method are forwarded . */ public static < T > T readVersionAndDeSerialize ( SimpleVersionedSerializer < T > serializer , byte [ ] bytes ) throws IOException { } }
checkNotNull ( serializer , "serializer" ) ; checkNotNull ( bytes , "bytes" ) ; checkArgument ( bytes . length >= 4 , "byte array below minimum length (4 bytes)" ) ; final byte [ ] dataOnly = Arrays . copyOfRange ( bytes , 8 , bytes . length ) ; final int version = ( ( bytes [ 0 ] & 0xff ) << 24 ) | ( ( bytes [ 1 ] & 0xff ) << 16 ) | ( ( bytes [ 2 ] & 0xff ) << 8 ) | ( bytes [ 3 ] & 0xff ) ; final int length = ( ( bytes [ 4 ] & 0xff ) << 24 ) | ( ( bytes [ 5 ] & 0xff ) << 16 ) | ( ( bytes [ 6 ] & 0xff ) << 8 ) | ( bytes [ 7 ] & 0xff ) ; if ( length == dataOnly . length ) { return serializer . deserialize ( version , dataOnly ) ; } else { throw new IOException ( "Corrupt data, conflicting lengths. Length fields: " + length + ", data: " + dataOnly . length ) ; }
public class MessageSelector { /** * Test the selector against a given message */ public boolean matches ( Message message ) throws JMSException { } }
Boolean result = selectorTree != null ? selectorTree . evaluateBoolean ( message ) : null ; return result != null && result . booleanValue ( ) ;
public class Table { /** * Applies the function in { @ code pairs } to each consecutive pairs of rows in the table */ public void doWithRows ( Pairs pairs ) { } }
if ( isEmpty ( ) ) { return ; } Row row1 = new Row ( this ) ; Row row2 = new Row ( this ) ; int max = rowCount ( ) ; for ( int i = 1 ; i < max ; i ++ ) { row1 . at ( i - 1 ) ; row2 . at ( i ) ; pairs . doWithPair ( row1 , row2 ) ; }
public class Follower { /** * Wait for a commit message from the leader . * @ throws TimeoutException in case of timeout . * @ throws InterruptedException in case of interruption . * @ throws IOException in case of IO failures . */ void waitForCommitMessage ( ) throws TimeoutException , InterruptedException , IOException { } }
LOG . debug ( "Waiting for commit message from {}" , this . electedLeader ) ; MessageTuple tuple = filter . getExpectedMessage ( MessageType . COMMIT , this . electedLeader , config . getTimeoutMs ( ) ) ; Zxid zxid = MessageBuilder . fromProtoZxid ( tuple . getMessage ( ) . getCommit ( ) . getZxid ( ) ) ; Zxid lastZxid = persistence . getLatestZxid ( ) ; // If the followers are appropriately synchronized , the Zxid of ACK should // match the last Zxid in followers ' log . if ( zxid . compareTo ( lastZxid ) != 0 ) { LOG . error ( "The ACK zxid {} doesn't match last zxid {} in log!" , zxid , lastZxid ) ; throw new RuntimeException ( "The ACK zxid doesn't match last zxid" ) ; }
public class SelectBuilder { /** * Adds the columns of the current entity to the select clause */ public SelectBuilder columns ( String ... columns ) { } }
for ( String column : columns ) { column ( column , null ) ; } return this ;
public class DatamodelConverter { /** * Copies a { @ link Claim } . * @ param object * object to copy * @ return copied object */ public Claim copy ( Claim object ) { } }
return dataObjectFactory . getClaim ( ( EntityIdValue ) visit ( object . getSubject ( ) ) , copy ( object . getMainSnak ( ) ) , copy ( object . getQualifiers ( ) ) ) ;
public class AnnotationTypeFieldBuilder { /** * Construct a new AnnotationTypeFieldBuilder . * @ param context the build context . * @ param classDoc the class whose members are being documented . * @ param writer the doclet specific writer . */ public static AnnotationTypeFieldBuilder getInstance ( Context context , ClassDoc classDoc , AnnotationTypeFieldWriter writer ) { } }
return new AnnotationTypeFieldBuilder ( context , classDoc , writer , VisibleMemberMap . ANNOTATION_TYPE_FIELDS ) ;
public class AntClassLoader { /** * Returns a stream to read the requested resource name . * @ param name The name of the resource for which a stream is required . * Must not be < code > null < / code > . * @ return a stream to the required resource or < code > null < / code > if the * resource cannot be found on the loader ' s classpath . */ public InputStream getResourceAsStream ( String name ) { } }
InputStream resourceStream = null ; if ( isParentFirst ( name ) ) { resourceStream = loadBaseResource ( name ) ; } if ( resourceStream != null ) { log ( "ResourceStream for " + name + " loaded from parent loader" , Project . MSG_DEBUG ) ; } else { resourceStream = loadResource ( name ) ; if ( resourceStream != null ) { log ( "ResourceStream for " + name + " loaded from ant loader" , Project . MSG_DEBUG ) ; } } if ( resourceStream == null && ! isParentFirst ( name ) ) { if ( ignoreBase ) { resourceStream = getRootLoader ( ) == null ? null : getRootLoader ( ) . getResourceAsStream ( name ) ; } else { resourceStream = loadBaseResource ( name ) ; } if ( resourceStream != null ) { log ( "ResourceStream for " + name + " loaded from parent loader" , Project . MSG_DEBUG ) ; } } if ( resourceStream == null ) { log ( "Couldn't load ResourceStream for " + name , Project . MSG_DEBUG ) ; } return resourceStream ;
public class JiraService { /** * Make sure jiraIssueTransitionId is an integer and not an empty string . * @ param properties the Map holding the properties */ private void fixJiraIssueTransitionId ( Map < String , Object > properties ) { } }
if ( properties != null ) { Object jiraIssueTransitionId = properties . get ( JIRA_ISSUE_TRANSITION_ID_PROP ) ; if ( jiraIssueTransitionId instanceof String ) { if ( ( ( String ) jiraIssueTransitionId ) . trim ( ) . isEmpty ( ) ) { properties . put ( JIRA_ISSUE_TRANSITION_ID_PROP , null ) ; } else { properties . put ( JIRA_ISSUE_TRANSITION_ID_PROP , Integer . valueOf ( ( String ) jiraIssueTransitionId ) ) ; } } }
public class XML { /** * verifies that this class exist in Xml Configuration File . * @ param aClass Class to verify * @ return this instance */ private boolean classExists ( Class < ? > aClass ) { } }
if ( xmlJmapper . classes == null ) return false ; return findXmlClass ( aClass ) != null ? true : false ;
public class AnalyticFormulas { /** * Calculated the approximation to the lognormal Black volatility using the * standard SABR model and the standard Hagan approximation . * @ param alpha initial value of the stochastic volatility process of the SABR model . * @ param beta CEV parameter of the SABR model . * @ param rho Correlation ( leverages ) of the stochastic volatility . * @ param nu Volatility of the stochastic volatility ( vol - of - vol ) . * @ param displacement The displacement parameter d . * @ param underlying Underlying ( spot ) value . * @ param strike Strike . * @ param maturity Maturity . * @ return Implied lognormal Black volatility . */ public static double sabrHaganLognormalBlackVolatilityApproximation ( double alpha , double beta , double rho , double nu , double displacement , double underlying , double strike , double maturity ) { } }
if ( alpha <= 0 ) { throw new IllegalArgumentException ( "&alpha; must be greater than 0." ) ; } if ( rho > 1 || rho < - 1 ) { throw new IllegalArgumentException ( "&rho; must be between -1 and 1." ) ; } if ( nu <= 0 ) { throw new IllegalArgumentException ( "&nu; must be greater than 0." ) ; } if ( underlying <= 0 ) { throw new IllegalArgumentException ( "Approximation not definied for non-positive underlyings." ) ; } // Apply displacement . Displaced model is just a shift on underlying and strike . underlying += displacement ; strike += displacement ; if ( Math . abs ( underlying - strike ) < 0.0001 * ( 1 + Math . abs ( underlying ) ) ) { /* * ATM case - we assume underlying = strike */ double term1 = alpha / ( Math . pow ( underlying , 1 - beta ) ) ; double term2 = Math . pow ( 1 - beta , 2 ) / 24 * Math . pow ( alpha , 2 ) / Math . pow ( underlying , 2 * ( 1 - beta ) ) + rho * beta * alpha * nu / ( 4 * Math . pow ( underlying , 1 - beta ) ) + ( 2 - 3 * rho * rho ) * nu * nu / 24 ; return term1 * ( 1 + term2 * maturity ) ; } else { /* * General non - ATM case no prob with log ( F / K ) */ double forwardTimesStrike = underlying * strike ; double z = nu / alpha * Math . pow ( forwardTimesStrike , ( 1 - beta ) / 2 ) * Math . log ( underlying / strike ) ; double x = Math . log ( ( Math . sqrt ( 1 - 2 * rho * z + z * z ) + z - rho ) / ( 1 - rho ) ) ; double term1 = alpha / Math . pow ( forwardTimesStrike , ( 1 - beta ) / 2 ) / ( 1 + Math . pow ( 1 - beta , 2 ) / 24 * Math . pow ( Math . log ( underlying / strike ) , 2 ) + Math . pow ( 1 - beta , 4 ) / 1920 * Math . pow ( Math . log ( underlying / strike ) , 4 ) ) ; double term2 = ( Math . abs ( x - z ) < 1E-10 ) ? 1 : z / x ; double term3 = 1 + ( Math . pow ( 1 - beta , 2 ) / 24 * Math . pow ( alpha , 2 ) / Math . pow ( forwardTimesStrike , 1 - beta ) + rho * beta * nu * alpha / 4 / Math . pow ( forwardTimesStrike , ( 1 - beta ) / 2 ) + ( 2 - 3 * rho * rho ) / 24 * nu * nu ) * maturity ; return term1 * term2 * term3 ; }
public class Block { public void write ( DataOutput out ) throws IOException { } }
out . writeLong ( blockId ) ; out . writeLong ( numBytes ) ; out . writeLong ( generationStamp ) ;
public class StructureAlignmentOptimizer { /** * superimpose two structures according to the equivalent residues */ private void superimposeBySet ( ) throws StructureException { } }
// extract the coordinations of equivalent residues Atom [ ] tmp1 = new Atom [ equLen ] ; Atom [ ] tmp2 = new Atom [ equLen ] ; int i , r1 , r2 ; for ( i = 0 ; i < equLen ; i ++ ) { r1 = equSet [ 0 ] [ i ] ; r2 = equSet [ 1 ] [ i ] ; tmp1 [ i ] = cod1 [ r1 ] ; tmp2 [ i ] = ( Atom ) cod2 [ r2 ] . clone ( ) ; // have to be cloned ! // tmp2 [ i ] = cod2 [ r2 ] ; /* try { System . out . println ( " before superimpos : " + equSet [ 0 ] [ i ] + " - " + equSet [ 1 ] [ i ] + " dist : " + Calc . getDistance ( tmp1 [ i ] , cod2 [ equSet [ 1 ] [ i ] ] ) ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; */ } // superimpose the equivalent residues Matrix4d trans = SuperPositions . superpose ( Calc . atomsToPoints ( tmp1 ) , Calc . atomsToPoints ( tmp2 ) ) ; Calc . transform ( tmp2 , trans ) ; // weird , why does it take the RMSD before the rotation ? // the rmsd is only for the subset contained in the tmp arrays . rmsd = Calc . rmsd ( tmp1 , tmp2 ) ; // System . err . println ( " rmsd after superimpose by set : " + rmsd ) ; // transform structure 2 according to the superimposition of the equivalent residues Calc . transform ( cod2 , trans ) ; // for ( i = 0 ; i < equLen ; i + + ) { // try { // System . err . println ( " after superimpos : " + equSet [ 0 ] [ i ] + " - " + equSet [ 1 ] [ i ] + " dist : " + Calc . getDistance ( tmp1 [ i ] , cod2 [ equSet [ 1 ] [ i ] ] ) ) ; // } catch ( Exception e ) { // e . printStackTrace ( ) ;
public class CloudStorageOptions { /** * Sets an unmodifiable piece of user metadata on a Cloud Storage object . * @ see " https : / / developers . google . com / storage / docs / reference - headers # xgoogmeta " */ public static CloudStorageOption . OpenCopy withUserMetadata ( String key , String value ) { } }
return OptionUserMetadata . create ( key , value ) ;
public class AbstractBaseProcessLauncher { /** * Get required system properties to launch the sub process * @ return an array of { @ link String } which represents the System properties to pass * @ throws IOException */ String [ ] getJavaSystemPropertiesArguments ( ) throws IOException { } }
LOGGER . entering ( ) ; List < String > args = new LinkedList < > ( ) ; // Next , FWD all JVM - D args to the child process args . addAll ( Arrays . asList ( getPresentJavaSystemPropertiesArguments ( ) ) ) ; // Setup logging for child process args . addAll ( Arrays . asList ( getLoggingSystemPropertiesArguments ( ) ) ) ; LOGGER . exiting ( args . toString ( ) ) ; return args . toArray ( new String [ args . size ( ) ] ) ;
public class Orderer { /** * Orders the descriptions . * @ return descriptions in order */ public List < Description > order ( Collection < Description > descriptions ) throws InvalidOrderingException { } }
List < Description > inOrder = ordering . orderItems ( Collections . unmodifiableCollection ( descriptions ) ) ; if ( ! ordering . validateOrderingIsCorrect ( ) ) { return inOrder ; } Set < Description > uniqueDescriptions = new HashSet < Description > ( descriptions ) ; if ( ! uniqueDescriptions . containsAll ( inOrder ) ) { throw new InvalidOrderingException ( "Ordering added items" ) ; } Set < Description > resultAsSet = new HashSet < Description > ( inOrder ) ; if ( resultAsSet . size ( ) != inOrder . size ( ) ) { throw new InvalidOrderingException ( "Ordering duplicated items" ) ; } else if ( ! resultAsSet . containsAll ( uniqueDescriptions ) ) { throw new InvalidOrderingException ( "Ordering removed items" ) ; } return inOrder ;
public class BinaryJedis { /** * If member already exists in the sorted set adds the increment to its score and updates the * position of the element in the sorted set accordingly . If member does not already exist in the * sorted set it is added with increment as score ( that is , like if the previous score was * virtually zero ) . If key does not exist a new sorted set with the specified member as sole * member is created . If the key exists but does not hold a sorted set value an error is returned . * The score value can be the string representation of a double precision floating point number . * It ' s possible to provide a negative value to perform a decrement . * For an introduction to sorted sets check the Introduction to Redis data types page . * Time complexity O ( log ( N ) ) with N being the number of elements in the sorted set * @ param key * @ param increment * @ param member * @ return The new score */ @ Override public Double zincrby ( final byte [ ] key , final double increment , final byte [ ] member ) { } }
checkIsInMultiOrPipeline ( ) ; client . zincrby ( key , increment , member ) ; return BuilderFactory . DOUBLE . build ( client . getOne ( ) ) ;
public class Utf8Benchmark { /** * Benchmarks { @ link String # getBytes } on valid strings containing * pseudo - randomly - generated codePoints less than { @ code * maxCodePoint } . A constant seed is used , so separate runs perform * identical computations . */ @ Benchmark void getBytes ( int reps ) { } }
final String [ ] strings = this . strings ; final int mask = STRING_COUNT - 1 ; for ( int i = 0 ; i < reps ; i ++ ) { String string = strings [ i & mask ] ; byte [ ] bytes = string . getBytes ( UTF_8 ) ; if ( bytes [ 0 ] == 86 && bytes [ bytes . length - 1 ] == 99 ) { throw new Error ( "Unlikely! We're just defeating the optimizer!" ) ; } }
public class World { /** * Registers the { @ code CompletesEventuallyProvider } plugin by { @ code name } . * @ param name the { @ code String } name of the { @ code CompletesEventuallyProvider } to register * @ param completesEventuallyProvider the { @ code CompletesEventuallyProvider } to register */ @ Override public void register ( final String name , final CompletesEventuallyProvider completesEventuallyProvider ) { } }
completesEventuallyProvider . initializeUsing ( stage ( ) ) ; this . completesProviderKeeper . keep ( name , completesEventuallyProvider ) ;
public class MatchState { /** * Method for getting the formatted match as a single string . In case of * multiple matches , it joins them using a regular expression operator " | " . * @ return Formatted string of the matched token . */ final String toTokenString ( ) throws IOException { } }
String [ ] stringToFormat = toFinalString ( null ) ; return String . join ( "|" , Arrays . asList ( stringToFormat ) ) ;
public class ReflectedHeap { /** * Decrease the key of an element . * @ param n * the element * @ param newKey * the new key */ @ SuppressWarnings ( "unchecked" ) private void decreaseKey ( ReflectedHandle < K , V > n , K newKey ) { } }
if ( n . inner == null && free != n ) { throw new IllegalArgumentException ( "Invalid handle!" ) ; } int c ; if ( comparator == null ) { c = ( ( Comparable < ? super K > ) newKey ) . compareTo ( n . key ) ; } else { c = comparator . compare ( newKey , n . key ) ; } if ( c > 0 ) { throw new IllegalArgumentException ( "Keys can only be decreased!" ) ; } n . key = newKey ; if ( c == 0 || free == n ) { return ; } // actual decrease AddressableHeap . Handle < K , HandleMap < K , V > > nInner = n . inner ; if ( n . minNotMax ) { // we are in the min heap , easy case n . inner . decreaseKey ( newKey ) ; } else { // we are in the max heap , remove nInner . delete ( ) ; ReflectedHandle < K , V > nOuter = nInner . getValue ( ) . outer ; nOuter . inner = null ; nOuter . minNotMax = false ; // remove min AddressableHeap . Handle < K , HandleMap < K , V > > minInner = nInner . getValue ( ) . otherInner ; ReflectedHandle < K , V > minOuter = minInner . getValue ( ) . outer ; minInner . delete ( ) ; minOuter . inner = null ; minOuter . minNotMax = false ; // update key nOuter . key = newKey ; // reinsert both insertPair ( nOuter , minOuter ) ; }
public class ServerImpl { /** * Add a client . * @ param socket The socket to add . */ void notifyNewClientConnected ( Socket socket ) { } }
try { int secure = 0 ; while ( clients . containsKey ( Byte . valueOf ( lastId ) ) ) { lastId ++ ; secure ++ ; final int max = 127 ; if ( secure > max ) { break ; } } // Prepare first data final ClientSocket client = new ClientSocket ( lastId , socket ) ; client . setState ( StateConnection . CONNECTING ) ; client . getOut ( ) . writeByte ( NetworkMessageSystemId . CONNECTING ) ; client . getOut ( ) . writeByte ( client . getId ( ) ) ; client . getOut ( ) . flush ( ) ; // Update list clients . put ( Byte . valueOf ( client . getId ( ) ) , client ) ; clientsNumber ++ ; } catch ( final IOException exception ) { errorNewClientConnected ( exception ) ; } catch ( final LionEngineException exception ) { errorNewClientConnected ( exception ) ; }
public class ProjectControl { /** * Add this field in the Record ' s field sequence . */ public BaseField setupField ( int iFieldSeq ) { } }
BaseField field = null ; // if ( iFieldSeq = = 0) // field = new CounterField ( this , ID , Constants . DEFAULT _ FIELD _ LENGTH , null , null ) ; // field . setHidden ( true ) ; // if ( iFieldSeq = = 1) // field = new RecordChangedField ( this , LAST _ CHANGED , Constants . DEFAULT _ FIELD _ LENGTH , null , null ) ; // field . setHidden ( true ) ; // if ( iFieldSeq = = 2) // field = new BooleanField ( this , DELETED , Constants . DEFAULT _ FIELD _ LENGTH , null , new Boolean ( false ) ) ; // field . setHidden ( true ) ; if ( iFieldSeq == 3 ) field = new ImageField ( this , START_ICON , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 4 ) field = new ImageField ( this , END_ICON , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 5 ) field = new ImageField ( this , START_PARENT_ICON , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 6 ) field = new ImageField ( this , END_PARENT_ICON , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 7 ) field = new ColorField ( this , TASK_COLOR , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 8 ) field = new ColorField ( this , TASK_SELECT_COLOR , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 9 ) field = new ColorField ( this , PARENT_TASK_COLOR , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( iFieldSeq == 10 ) field = new ColorField ( this , PARENT_TASK_SELECT_COLOR , Constants . DEFAULT_FIELD_LENGTH , null , null ) ; if ( field == null ) field = super . setupField ( iFieldSeq ) ; return field ;
public class ScriptBuilder { /** * Adds a copy of the given byte array as a data element ( i . e . PUSHDATA ) at the end of the program . */ public ScriptBuilder data ( byte [ ] data ) { } }
if ( data . length == 0 ) return smallNum ( 0 ) ; else return data ( chunks . size ( ) , data ) ;
public class ConstructorInstrumenter { /** * { @ inheritDoc } */ @ Override public byte [ ] transform ( ClassLoader loader , String className , Class < ? > classBeingRedefined , ProtectionDomain protectionDomain , byte [ ] classfileBuffer ) { } }
if ( ( classBeingRedefined == null ) || ( ! samplerMap . containsKey ( classBeingRedefined ) ) ) { return null ; } if ( ! AllocationInstrumenter . canRewriteClass ( className , loader ) ) { throw new RuntimeException ( new UnmodifiableClassException ( "cannot instrument " + className ) ) ; } return instrument ( classfileBuffer , classBeingRedefined ) ;
public class DefaultBeanDescriptor { /** * Get the parameter annotation . Try first on the setter then on the getter if no annotation has been found . * @ param < T > the Class object corresponding to the annotation type . * @ param writeMethod the method that should be used to write the property value . * @ param readMethod the method that should be used to read the property value . * @ param annotationClass the Class object corresponding to the annotation type . * @ return this element ' s annotation for the specified annotation type if present on this element , else null . */ protected < T extends Annotation > T extractPropertyAnnotation ( Method writeMethod , Method readMethod , Class < T > annotationClass ) { } }
T parameterDescription = writeMethod . getAnnotation ( annotationClass ) ; if ( parameterDescription == null && readMethod != null ) { parameterDescription = readMethod . getAnnotation ( annotationClass ) ; } return parameterDescription ;
public class Record { /** * Get value { @ link DoubleWritable } value * @ param label target label * @ return { @ link DoubleWritable } value of the label . If it is not null . */ public DoubleWritable getValueDoubleWritable ( String label ) { } }
HadoopObject o = getHadoopObject ( VALUE , label , ObjectUtil . DOUBLE , "Double" ) ; if ( o == null ) { return null ; } return ( DoubleWritable ) o . getObject ( ) ;
public class ELContext { /** * Inquires if the name is a LambdaArgument * @ param arg A possible Lambda formal parameter name * @ return true if arg is a LambdaArgument , false otherwise . */ public boolean isLambdaArgument ( String arg ) { } }
if ( lambdaArgs == null ) { return false ; } for ( int i = lambdaArgs . size ( ) - 1 ; i >= 0 ; i -- ) { Map < String , Object > lmap = lambdaArgs . elementAt ( i ) ; if ( lmap . containsKey ( arg ) ) { return true ; } } return false ;
public class ComponentsInner { /** * Gets a list of all Application Insights components within a subscription . * @ param nextPageLink The NextLink from the previous successful call to List operation . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the PagedList & lt ; ApplicationInsightsComponentInner & gt ; object */ public Observable < Page < ApplicationInsightsComponentInner > > listNextAsync ( final String nextPageLink ) { } }
return listNextWithServiceResponseAsync ( nextPageLink ) . map ( new Func1 < ServiceResponse < Page < ApplicationInsightsComponentInner > > , Page < ApplicationInsightsComponentInner > > ( ) { @ Override public Page < ApplicationInsightsComponentInner > call ( ServiceResponse < Page < ApplicationInsightsComponentInner > > response ) { return response . body ( ) ; } } ) ;
public class BinaryImageOps { /** * Converts a labeled image into a binary image by setting any non - zero value to one . * @ param labelImage Input image . Not modified . * @ param binaryImage Output image . Modified . * @ return The binary image . */ public static GrayU8 labelToBinary ( GrayS32 labelImage , GrayU8 binaryImage ) { } }
binaryImage = InputSanityCheck . checkDeclare ( labelImage , binaryImage , GrayU8 . class ) ; if ( BoofConcurrency . USE_CONCURRENT ) { ImplBinaryImageOps_MT . labelToBinary ( labelImage , binaryImage ) ; } else { ImplBinaryImageOps . labelToBinary ( labelImage , binaryImage ) ; } return binaryImage ;
public class StringRecord { /** * deserialize */ public void read ( final DataInput in ) throws IOException { } }
final int newLength = in . readInt ( ) ; setCapacity ( newLength , false ) ; in . readFully ( this . bytes , 0 , newLength ) ; this . length = newLength ; this . hash = 0 ;
public class MDBRuntimeImpl { /** * Declarative service method for removing an AdminObjectService . */ protected synchronized void removeAdminObjectService ( ServiceReference < AdminObjectService > reference ) { } }
String id = ( String ) reference . getProperty ( ADMIN_OBJECT_CFG_ID ) ; if ( id != null ) { removeAdminObjectService ( reference , id , false ) ; String jndiName = ( String ) reference . getProperty ( ADMIN_OBJECT_CFG_JNDI_NAME ) ; if ( jndiName != null && ! jndiName . equals ( id ) ) { removeAdminObjectService ( reference , jndiName , true ) ; } }
public class WaitForExitService { /** * Thread to wait until Resin should be stopped . */ void waitForExit ( ) { } }
Runtime runtime = Runtime . getRuntime ( ) ; ShutdownSystem shutdown = _resinSystem . getSystem ( ShutdownSystem . class ) ; if ( shutdown == null ) { throw new IllegalStateException ( L . l ( "'{0}' requires an active {1}" , this , ShutdownSystem . class . getSimpleName ( ) ) ) ; } /* * If the server has a parent process watching over us , close * gracefully when the parent dies . */ while ( ! _server . isClosing ( ) ) { try { Thread . sleep ( 10 ) ; if ( ! checkMemory ( runtime ) ) { shutdown . shutdown ( ShutdownModeAmp . IMMEDIATE , ExitCode . MEMORY , "Server shutdown from out of memory" ) ; // dumpHeapOnExit ( ) ; return ; } if ( ! checkFileDescriptor ( ) ) { shutdown . shutdown ( ShutdownModeAmp . IMMEDIATE , ExitCode . MEMORY , "Server shutdown from out of file descriptors" ) ; // dumpHeapOnExit ( ) ; return ; } synchronized ( this ) { wait ( 10000 ) ; } } catch ( OutOfMemoryError e ) { String msg = "Server shutdown from out of memory" ; ShutdownSystem . shutdownOutOfMemory ( msg ) ; } catch ( Throwable e ) { log . log ( Level . WARNING , e . toString ( ) , e ) ; return ; } }
public class SyncBulkheadStateImpl { /** * { @ inheritDoc } */ @ Override public < R > MethodResult < R > run ( Callable < R > callable ) { } }
if ( ! semaphore . tryAcquire ( ) ) { metrics . incrementBulkheadRejectedCount ( ) ; return MethodResult . failure ( new BulkheadException ( ) ) ; } long startTime = System . nanoTime ( ) ; metrics . incrementBulkeadAcceptedCount ( ) ; try { return super . run ( callable ) ; } finally { semaphore . release ( ) ; long endTime = System . nanoTime ( ) ; metrics . recordBulkheadExecutionTime ( endTime - startTime ) ; }
public class JdbcUtil { /** * Returns { @ code true } if succeed to create table , otherwise { @ code false } is returned . * @ param conn * @ param tableName * @ param schema * @ return */ public static boolean createTableIfNotExists ( final Connection conn , final String tableName , final String schema ) { } }
if ( doesTableExist ( conn , tableName ) ) { return false ; } try { execute ( conn , schema ) ; return true ; } catch ( SQLException e ) { return false ; }
public class ACModelDialog { private JButton getButtonEditRoleMembership ( ) { } }
if ( btnEditRoleMembership == null ) { btnEditRoleMembership = new JButton ( "Edit role membership" ) ; btnEditRoleMembership . addActionListener ( new ActionListener ( ) { public void actionPerformed ( ActionEvent e ) { try { RoleMembershipDialog . showDialog ( ACModelDialog . this , ( RBACModel ) getDialogObject ( ) ) ; updateTextArea ( ) ; } catch ( Exception e1 ) { internalException ( "Cannot launch role membership dialog." , e1 ) ; } } } ) ; } return btnEditRoleMembership ;
public class SideBarItemDescriptor { /** * Attempts to find and return an annotation of the specified type declared on this side bar item . * @ param annotationType the type of the annotation to look for . * @ return the annotation , or { @ code null } if not found . */ public < A extends Annotation > A findAnnotationOnBean ( Class < A > annotationType ) { } }
return applicationContext . findAnnotationOnBean ( beanName , annotationType ) ;
public class JobClient { /** * Display the list of active trackers */ private void listActiveTrackers ( ) throws IOException { } }
ClusterStatus c = jobSubmitClient . getClusterStatus ( true ) ; Collection < String > trackers = c . getActiveTrackerNames ( ) ; for ( String trackerName : trackers ) { System . out . println ( trackerName ) ; }
public class ShibHttpClient { /** * Extracts the SOAP message from the HttpResponse * @ param entity the HttpEntity to retrieve the SOAP message from * @ return soapEnvelope the SOAP message * @ throws IOException * @ throws IllegalStateException * @ throws ClientProtocolException */ protected org . opensaml . ws . soap . soap11 . Envelope getSoapMessage ( HttpEntity entity ) throws ClientProtocolException , IllegalStateException , IOException { } }
Envelope soapEnvelope = ( Envelope ) unmarshallMessage ( parserPool , entity . getContent ( ) ) ; EntityUtils . consumeQuietly ( entity ) ; return soapEnvelope ;
public class IniFile { /** * loads the ini file * @ param in inputstream to read * @ throws IOException */ public void load ( InputStream in ) throws IOException { } }
BufferedReader input = IOUtil . toBufferedReader ( new InputStreamReader ( in ) ) ; String read ; Map section = null ; String sectionName ; while ( ( read = input . readLine ( ) ) != null ) { if ( read . startsWith ( ";" ) || read . startsWith ( "#" ) ) { continue ; } else if ( read . startsWith ( "[" ) ) { // new section sectionName = read . substring ( 1 , read . indexOf ( "]" ) ) . trim ( ) . toLowerCase ( ) ; section = getSectionEL ( sectionName ) ; if ( section == null ) { section = newMap ( ) ; sections . put ( sectionName , section ) ; } } else if ( read . indexOf ( "=" ) != - 1 && section != null ) { // new key String key = read . substring ( 0 , read . indexOf ( "=" ) ) . trim ( ) . toLowerCase ( ) ; String value = read . substring ( read . indexOf ( "=" ) + 1 ) . trim ( ) ; section . put ( key , value ) ; } }
public class CommerceAccountUserRelPersistenceImpl { /** * Returns the commerce account user rels before and after the current commerce account user rel in the ordered set where commerceAccountUserId = & # 63 ; . * @ param commerceAccountUserRelPK the primary key of the current commerce account user rel * @ param commerceAccountUserId the commerce account user ID * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the previous , current , and next commerce account user rel * @ throws NoSuchAccountUserRelException if a commerce account user rel with the primary key could not be found */ @ Override public CommerceAccountUserRel [ ] findByCommerceAccountUserId_PrevAndNext ( CommerceAccountUserRelPK commerceAccountUserRelPK , long commerceAccountUserId , OrderByComparator < CommerceAccountUserRel > orderByComparator ) throws NoSuchAccountUserRelException { } }
CommerceAccountUserRel commerceAccountUserRel = findByPrimaryKey ( commerceAccountUserRelPK ) ; Session session = null ; try { session = openSession ( ) ; CommerceAccountUserRel [ ] array = new CommerceAccountUserRelImpl [ 3 ] ; array [ 0 ] = getByCommerceAccountUserId_PrevAndNext ( session , commerceAccountUserRel , commerceAccountUserId , orderByComparator , true ) ; array [ 1 ] = commerceAccountUserRel ; array [ 2 ] = getByCommerceAccountUserId_PrevAndNext ( session , commerceAccountUserRel , commerceAccountUserId , orderByComparator , false ) ; return array ; } catch ( Exception e ) { throw processException ( e ) ; } finally { closeSession ( session ) ; }
public class SerializableObjectStore { /** * Puts an serializable object into the store . * @ param key * the object key . * @ param value * the serializable object . * @ return true if the put operation is succeeds . * @ throws NullPointerException if < code > key < / code > is null . * @ throws Exception if this operation cannot be completed successfully . */ @ Override public boolean put ( K key , V value ) throws Exception { } }
if ( key == null ) { throw new NullPointerException ( "key" ) ; } if ( value == null ) { return _store . delete ( _keySerializer . serialize ( key ) ) ; } else { return _store . put ( _keySerializer . serialize ( key ) , _valSerializer . serialize ( value ) ) ; }
public class ServerService { /** * Updates the given server . * @ param server The server to update * @ return The server that was updated */ public Optional < Server > update ( Server server ) { } }
return HTTP . PUT ( String . format ( "/v2/servers/%d.json" , server . getId ( ) ) , server , SERVER ) ;
public class ImgUtil { /** * 将Base64编码的图像信息转为 { @ link BufferedImage } * @ param base64 图像的Base64表示 * @ return { @ link BufferedImage } * @ throws IORuntimeException IO异常 */ public static BufferedImage toImage ( String base64 ) throws IORuntimeException { } }
byte [ ] decode = Base64 . decode ( base64 , CharsetUtil . CHARSET_UTF_8 ) ; return toImage ( decode ) ;
public class Index { /** * Add or Replace a list of synonyms * @ param objects List of synonyms * @ param forwardToReplicas Forward the operation to the replica indices * @ param requestOptions Options to pass to this request */ public JSONObject batchSynonyms ( List < JSONObject > objects , boolean forwardToReplicas , RequestOptions requestOptions ) throws AlgoliaException { } }
return batchSynonyms ( objects , forwardToReplicas , false , requestOptions ) ;
public class StrictFormatStringValidation { /** * Returns whether an input { @ link Symbol } is a format string in a { @ link FormatMethod } . This is * true if the { @ link Symbol } is a { @ link String } parameter in a { @ link FormatMethod } and is * either : * < ol > * < li > Annotated with { @ link FormatString } * < li > The first { @ link String } parameter in the method with no other parameters annotated * { @ link FormatString } . * < / ol > */ private static boolean isFormatStringParameter ( Symbol formatString , VisitorState state ) { } }
Type stringType = state . getSymtab ( ) . stringType ; // The input symbol must be a String and a parameter of a @ FormatMethod to be a @ FormatString . if ( ! ASTHelpers . isSameType ( formatString . type , stringType , state ) || ! ( formatString . owner instanceof MethodSymbol ) || ! ASTHelpers . hasAnnotation ( formatString . owner , FormatMethod . class , state ) ) { return false ; } // If the format string is annotated @ FormatString in a @ FormatMethod , it is a format string . if ( ASTHelpers . hasAnnotation ( formatString , FormatString . class , state ) ) { return true ; } // Check if format string is the first string with no @ FormatString params in the @ FormatMethod . MethodSymbol owner = ( MethodSymbol ) formatString . owner ; boolean formatStringFound = false ; for ( Symbol param : owner . getParameters ( ) ) { if ( param == formatString ) { formatStringFound = true ; } if ( ASTHelpers . isSameType ( param . type , stringType , state ) ) { // If this is a String parameter before the input Symbol , then the input symbol can ' t be the // format string since it wasn ' t annotated @ FormatString . if ( ! formatStringFound ) { return false ; } else if ( ASTHelpers . hasAnnotation ( param , FormatString . class , state ) ) { return false ; } } } return true ;
public class Location { /** * Return distance between this location and the other location . * Distance is defined only if both locations are on same strand . * @ param other The location to compare . * @ return The integer distance . Returns - 1 if they overlap ; 0 if directly adjacent . * @ throws IllegalArgumentException Locations are on opposite strands . */ public int distance ( Location other ) { } }
if ( isSameStrand ( other ) ) { if ( overlaps ( other ) ) { return - 1 ; } else { return ( mEnd <= other . mStart ) ? ( other . mStart - mEnd ) : ( mStart - other . mEnd ) ; } } else { throw new IllegalArgumentException ( "Locations are on opposite strands." ) ; }
public class TaskResult { /** * Inserts a List of Parcelable values into the mapping of this Bundle , replacing any existing * value for the given key . Either key or value may be null . * @ param key a String , or null * @ param value an ArrayList of Parcelable objects , or null */ public TaskResult addParcelableArrayList ( String key , ArrayList < ? extends Parcelable > value ) { } }
mBundle . putParcelableArrayList ( key , value ) ; return this ;
public class MarkLogicClient { /** * add triples from InputStream * @ param in * @ param baseURI * @ param dataFormat * @ param contexts */ public void sendAdd ( InputStream in , String baseURI , RDFFormat dataFormat , Resource ... contexts ) throws RDFParseException , MarkLogicSesameException { } }
getClient ( ) . performAdd ( in , baseURI , dataFormat , this . tx , contexts ) ;
public class MinioClient { /** * Initializes new multipart upload for given bucket name , object name and content type . */ private String initMultipartUpload ( String bucketName , String objectName , Map < String , String > headerMap ) throws InvalidBucketNameException , NoSuchAlgorithmException , InsufficientDataException , IOException , InvalidKeyException , NoResponseException , XmlPullParserException , ErrorResponseException , InternalException { } }
// set content type if not set already if ( headerMap . get ( "Content-Type" ) == null ) { headerMap . put ( "Content-Type" , "application/octet-stream" ) ; } Map < String , String > queryParamMap = new HashMap < > ( ) ; queryParamMap . put ( "uploads" , "" ) ; HttpResponse response = executePost ( bucketName , objectName , headerMap , queryParamMap , "" ) ; InitiateMultipartUploadResult result = new InitiateMultipartUploadResult ( ) ; result . parseXml ( response . body ( ) . charStream ( ) ) ; response . body ( ) . close ( ) ; return result . uploadId ( ) ;
public class ClustersInner { /** * Lists eligible SKUs for Kusto resource provider . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the List & lt ; AzureSkuInner & gt ; object */ public Observable < ServiceResponse < List < AzureSkuInner > > > listSkusWithServiceResponseAsync ( ) { } }
if ( this . client . subscriptionId ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.subscriptionId() is required and cannot be null." ) ; } if ( this . client . apiVersion ( ) == null ) { throw new IllegalArgumentException ( "Parameter this.client.apiVersion() is required and cannot be null." ) ; } return service . listSkus ( this . client . subscriptionId ( ) , this . client . apiVersion ( ) , this . client . acceptLanguage ( ) , this . client . userAgent ( ) ) . flatMap ( new Func1 < Response < ResponseBody > , Observable < ServiceResponse < List < AzureSkuInner > > > > ( ) { @ Override public Observable < ServiceResponse < List < AzureSkuInner > > > call ( Response < ResponseBody > response ) { try { ServiceResponse < PageImpl < AzureSkuInner > > result = listSkusDelegate ( response ) ; List < AzureSkuInner > items = null ; if ( result . body ( ) != null ) { items = result . body ( ) . items ( ) ; } ServiceResponse < List < AzureSkuInner > > clientResponse = new ServiceResponse < List < AzureSkuInner > > ( items , result . response ( ) ) ; return Observable . just ( clientResponse ) ; } catch ( Throwable t ) { return Observable . error ( t ) ; } } } ) ;
public class AxesChartSeriesCategory { /** * Finds the min and max of a dataset * @ param data * @ return */ double [ ] findMinMax ( Collection < ? > data , DataType dataType ) { } }
double min = Double . MAX_VALUE ; double max = - Double . MAX_VALUE ; for ( Object dataPoint : data ) { if ( dataPoint == null ) { continue ; } double value = 0.0 ; if ( dataType == DataType . Number ) { value = ( ( Number ) dataPoint ) . doubleValue ( ) ; } else if ( dataType == DataType . Date ) { Date date = ( Date ) dataPoint ; value = date . getTime ( ) ; } else if ( dataType == DataType . String ) { return new double [ ] { Double . NaN , Double . NaN } ; } if ( value < min ) { min = value ; } if ( value > max ) { max = value ; } } return new double [ ] { min , max } ;
public class IVFPQ { /** * Returns the pq code of the image with the given id . * @ param id * @ return * @ throws Exception */ public byte [ ] getPQCodeByte ( String id ) throws Exception { } }
int iid = getInternalId ( id ) ; if ( iid == - 1 ) { throw new Exception ( "Id does not exist!" ) ; } if ( numProductCentroids > 256 ) { throw new Exception ( "Call the short variant of the method!" ) ; } DatabaseEntry key = new DatabaseEntry ( ) ; IntegerBinding . intToEntry ( iid , key ) ; DatabaseEntry data = new DatabaseEntry ( ) ; if ( ( iidToIvfpqDB . get ( null , key , data , null ) == OperationStatus . SUCCESS ) ) { TupleInput input = TupleBinding . entryToInput ( data ) ; input . readInt ( ) ; // skip the list id byte [ ] code = new byte [ numSubVectors ] ; for ( int i = 0 ; i < numSubVectors ; i ++ ) { code [ i ] = input . readByte ( ) ; } return code ; } else { throw new Exception ( "Id does not exist!" ) ; }
public class AcroFields { /** * Regenerates the field appearance . * This is useful when you change a field property , but not its value , * for instance form . setFieldProperty ( " f " , " bgcolor " , Color . BLUE , null ) ; * This won ' t have any effect , unless you use regenerateField ( " f " ) after changing * the property . * @ param name the fully qualified field name or the partial name in the case of XFA forms * @ throws IOException on error * @ throws DocumentException on error * @ return < CODE > true < / CODE > if the field was found and changed , * < CODE > false < / CODE > otherwise */ public boolean regenerateField ( String name ) throws IOException , DocumentException { } }
String value = getField ( name ) ; return setField ( name , value , value ) ;
public class TreeMatcher { /** * Return all matching < em > sub < / em > - trees . * @ return all matching sub - trees * @ throws NullPointerException if the given predicate is { @ code null } */ public Stream < TreeMatchResult < V > > results ( ) { } }
return _tree . stream ( ) . flatMap ( ( Tree < V , ? > tree ) -> _pattern . match ( tree , _equals ) . map ( Stream :: of ) . orElseGet ( Stream :: empty ) ) ;
public class WktToGeoJsonConverter { /** * input : ( ( 10 10 , 20 20 , 10 40 ) , ( 40 40 , 30 30 , 40 20 , 30 10 ) ) */ private static String multiLineStringCoordinatesFromWkt ( String wkt ) { } }
wkt = removeBrackets ( wkt , 1 ) ; String lineStringsWithPipeSeparator = wkt . replaceAll ( "\\s*\\)\\s*,\\s*\\(" , ")|(" ) ; String [ ] lineStrings = lineStringsWithPipeSeparator . split ( "\\|" ) ; String [ ] coordinates = new String [ lineStrings . length ] ; for ( int i = 0 ; i < lineStrings . length ; i ++ ) { coordinates [ i ] = lineStringCoordinatesFromWkt ( lineStrings [ i ] ) ; } String multiLineStringCoordinates = Joiner . on ( "," ) . join ( coordinates ) ; return String . format ( "[%s]" , multiLineStringCoordinates ) ;
public class CompactStartElement { /** * StartElement implementation */ @ Override public Attribute getAttributeByName ( QName name ) { } }
if ( mAttrs == null ) { return null ; } int ix = mAttrs . findIndex ( name ) ; if ( ix < 0 ) { return null ; } return constructAttr ( mRawAttrs , ix , mAttrs . isDefault ( ix ) ) ;
public class SqlTableStructure { /** * / * ( non - Javadoc ) * @ see org . parosproxy . paros . db . paros . TableParam # read ( long ) */ @ Override public synchronized RecordStructure read ( long sessionId , long urlId ) throws DatabaseException { } }
SqlPreparedStatementWrapper psRead = null ; try { psRead = DbSQL . getSingleton ( ) . getPreparedStatement ( "structure.ps.read" ) ; psRead . getPs ( ) . setLong ( 1 , sessionId ) ; psRead . getPs ( ) . setLong ( 2 , urlId ) ; try ( ResultSet rs = psRead . getPs ( ) . executeQuery ( ) ) { RecordStructure result = null ; if ( rs . next ( ) ) { result = build ( rs ) ; } return result ; } } catch ( SQLException e ) { throw new DatabaseException ( e ) ; } finally { DbSQL . getSingleton ( ) . releasePreparedStatement ( psRead ) ; }
public class ConfigQueryBuilder { /** * Query which asserts that a property is greater than ( but not equal to ) a value . * @ param property field to query * @ param value value to query for * @ return restriction to be added to { @ link ConfigQuery } . */ public static < A extends Comparable < A > > Restriction greaterThan ( String property , A value ) { } }
return new GreaterThan < > ( property , value ) ;
public class JpaDistributionSetManagement { /** * executes findAll with the given { @ link DistributionSet } * { @ link Specification } s . * @ param pageable * paging parameter * @ param specList * list of @ link { @ link Specification } * @ return the page with the found { @ link DistributionSet } */ private Page < JpaDistributionSet > findByCriteriaAPI ( final Pageable pageable , final List < Specification < JpaDistributionSet > > specList ) { } }
if ( CollectionUtils . isEmpty ( specList ) ) { return distributionSetRepository . findAll ( pageable ) ; } return distributionSetRepository . findAll ( SpecificationsBuilder . combineWithAnd ( specList ) , pageable ) ;
public class AbstractListPreference { /** * Obtains the the values , which correspond to the entries of the list preference from a * specific typed array . * @ param typedArray * The typed array , the entry values should be obtained from , as an instance of the * class { @ link TypedArray } . The typed array may not be null */ private void obtainEntryValues ( @ NonNull final TypedArray typedArray ) { } }
CharSequence [ ] obtainedEntryValues = typedArray . getTextArray ( R . styleable . AbstractListPreference_android_entryValues ) ; if ( obtainedEntryValues != null ) { setEntryValues ( obtainedEntryValues ) ; }
public class MariaDbClob { /** * Returns a Reader object that contains a partial Clob value , starting with the character * specified by pos , which is length characters in length . * @ param pos the offset to the first character of the partial value to be retrieved . The first * character in the Clob is at position 1. * @ param length the length in characters of the partial value to be retrieved . * @ return Reader through which the partial Clob value can be read . * @ throws SQLException if pos is less than 1 or if pos is greater than the number of characters * in the Clob or if pos + length is greater than the number of characters in * the Clob */ public Reader getCharacterStream ( long pos , long length ) throws SQLException { } }
String val = toString ( ) ; if ( val . length ( ) < ( int ) pos - 1 + length ) { throw ExceptionMapper . getSqlException ( "pos + length is greater than the number of characters in the Clob" ) ; } String sub = val . substring ( ( int ) pos - 1 , ( int ) pos - 1 + ( int ) length ) ; return new StringReader ( sub ) ;
public class ImageLoader { /** * { @ linkplain # stop ( ) Stops ImageLoader } and clears current configuration . < br / > * You can { @ linkplain # init ( ImageLoaderConfiguration ) init } ImageLoader with new configuration after calling this * method . */ public void destroy ( ) { } }
if ( configuration != null ) L . d ( LOG_DESTROY ) ; stop ( ) ; configuration . diskCache . close ( ) ; engine = null ; configuration = null ;
public class PeasyRecyclerView { /** * Present as Grid View * Be noted , columns must not less than { @ value DefaultGridColumnSize } * Default divider is { @ link PeasyGridDividerItemDecoration } * Execute { @ link # resetItemDecorations ( ) } * Execute { @ link # resetItemAnimator ( ) } * @ param columns provided to { @ link PeasyConfigurations # issueColumnSize ( int ) } * @ return GridLayoutManager */ public GridLayoutManager asGridView ( int columns ) { } }
this . presentation = PeasyPresentation . BasicGrid ; resetItemDecorations ( ) ; resetItemAnimator ( ) ; final GridLayoutManager layoutManager = PeasyRecyclerView . BasicGrid . newLayoutManager ( getContext ( ) , columns ) ; PeasyConfigurations . bundleColumnSize ( getExtraData ( ) , columns ) ; getRecyclerView ( ) . setLayoutManager ( layoutManager ) ; getRecyclerView ( ) . addItemDecoration ( new PeasyGridDividerItemDecoration ( getContext ( ) , columns ) ) ; getRecyclerView ( ) . setItemAnimator ( new DefaultItemAnimator ( ) ) ; return layoutManager ;
public class LdapConnection { /** * Check the search cache for previously performed searches . If the result is not cached , * query the LDAP server . * @ param name The name of the context or object to search * @ param filterExpr the filter expression used in the search . * @ param filterArgs the filter arguments used in the search . * @ param cons The search controls used in the search . * @ return The { @ link CachedNamingEnumeration } if the search is still in the cache , null otherwise . * @ throws WIMException If the search failed with an error . */ private NamingEnumeration < SearchResult > checkSearchCache ( String name , String filterExpr , Object [ ] filterArgs , SearchControls cons ) throws WIMException { } }
final String METHODNAME = "checkSearchCache" ; NamingEnumeration < SearchResult > neu = null ; if ( getSearchResultsCache ( ) != null ) { String key = null ; if ( filterArgs == null ) { key = toKey ( name , filterExpr , cons ) ; } else { key = toKey ( name , filterExpr , filterArgs , cons ) ; } CachedNamingEnumeration cached = ( CachedNamingEnumeration ) getSearchResultsCache ( ) . get ( key ) ; if ( cached == null ) { if ( tc . isDebugEnabled ( ) ) { Tr . debug ( tc , METHODNAME + " Miss cache: " + key ) ; } neu = search ( name , filterExpr , filterArgs , cons , null ) ; String [ ] reqAttrIds = cons . getReturningAttributes ( ) ; neu = updateSearchCache ( name , key , neu , reqAttrIds ) ; } else { if ( tc . isDebugEnabled ( ) ) { Tr . debug ( tc , METHODNAME + " Hit cache: " + key ) ; } neu = ( CachedNamingEnumeration ) cached . clone ( ) ; } } else { neu = search ( name , filterExpr , filterArgs , cons , null ) ; } return neu ;
public class nd6 { /** * Use this API to clear nd6. */ public static base_response clear ( nitro_service client ) throws Exception { } }
nd6 clearresource = new nd6 ( ) ; return clearresource . perform_operation ( client , "clear" ) ;
public class ParseUtils { /** * A variation of nextElement that verifies the nextElement is not in a different namespace . * @ param reader the XmlExtendedReader to read from . * @ param expectedNamespace the namespace expected . * @ return the element or null if the end is reached * @ throws XMLStreamException if the namespace is wrong or there is a problem accessing the reader */ public static Element nextElement ( XMLExtendedStreamReader reader , Namespace expectedNamespace ) throws XMLStreamException { } }
Element element = nextElement ( reader ) ; if ( element == null ) { return null ; } else if ( element != Element . UNKNOWN && expectedNamespace . equals ( Namespace . forUri ( reader . getNamespaceURI ( ) ) ) ) { return element ; } throw unexpectedElement ( reader ) ;
public class AWSKMSClient { /** * Connects or reconnects a < a * href = " http : / / docs . aws . amazon . com / kms / latest / developerguide / key - store - overview . html " > custom key store < / a > to its * associated AWS CloudHSM cluster . * The custom key store must be connected before you can create customer master keys ( CMKs ) in the key store or use * the CMKs it contains . You can disconnect and reconnect a custom key store at any time . * To connect a custom key store , its associated AWS CloudHSM cluster must have at least one active HSM . To get the * number of active HSMs in a cluster , use the < a * href = " http : / / docs . aws . amazon . com / cloudhsm / latest / APIReference / API _ DescribeClusters " > DescribeClusters < / a > * operation . To add HSMs to the cluster , use the < a * href = " http : / / docs . aws . amazon . com / cloudhsm / latest / APIReference / API _ CreateHsm " > CreateHsm < / a > operation . * The connection process can take an extended amount of time to complete ; up to 20 minutes . This operation starts * the connection process , but it does not wait for it to complete . When it succeeds , this operation quickly returns * an HTTP 200 response and a JSON object with no properties . However , this response does not indicate that the * custom key store is connected . To get the connection state of the custom key store , use the * < a > DescribeCustomKeyStores < / a > operation . * During the connection process , AWS KMS finds the AWS CloudHSM cluster that is associated with the custom key * store , creates the connection infrastructure , connects to the cluster , logs into the AWS CloudHSM client as the * < a href = " http : / / docs . aws . amazon . com / kms / latest / developerguide / key - store - concepts . html # concept - kmsuser " > * < code > kmsuser < / code > crypto user < / a > ( CU ) , and rotates its password . * The < code > ConnectCustomKeyStore < / code > operation might fail for various reasons . To find the reason , use the * < a > DescribeCustomKeyStores < / a > operation and see the < code > ConnectionErrorCode < / code > in the response . For help * interpreting the < code > ConnectionErrorCode < / code > , see < a > CustomKeyStoresListEntry < / a > . * To fix the failure , use the < a > DisconnectCustomKeyStore < / a > operation to disconnect the custom key store , correct * the error , use the < a > UpdateCustomKeyStore < / a > operation if necessary , and then use * < code > ConnectCustomKeyStore < / code > again . * If you are having trouble connecting or disconnecting a custom key store , see < a * href = " http : / / docs . aws . amazon . com / kms / latest / developerguide / fix - keystore . html " > Troubleshooting a Custom Key * Store < / a > in the < i > AWS Key Management Service Developer Guide < / i > . * @ param connectCustomKeyStoreRequest * @ return Result of the ConnectCustomKeyStore operation returned by the service . * @ throws CloudHsmClusterNotActiveException * The request was rejected because the AWS CloudHSM cluster that is associated with the custom key store is * not active . Initialize and activate the cluster and try the command again . For detailed instructions , see * < a href = " http : / / docs . aws . amazon . com / cloudhsm / latest / userguide / getting - started . html " > Getting Started < / a > * in the < i > AWS CloudHSM User Guide < / i > . * @ throws CustomKeyStoreInvalidStateException * The request was rejected because of the < code > ConnectionState < / code > of the custom key store . To get the * < code > ConnectionState < / code > of a custom key store , use the < a > DescribeCustomKeyStores < / a > operation . < / p > * This exception is thrown under the following conditions : * < ul > * < li > * You requested the < a > CreateKey < / a > or < a > GenerateRandom < / a > operation in a custom key store that is not * connected . These operations are valid only when the custom key store < code > ConnectionState < / code > is * < code > CONNECTED < / code > . * < / li > * < li > * You requested the < a > UpdateCustomKeyStore < / a > or < a > DeleteCustomKeyStore < / a > operation on a custom key * store that is not disconnected . This operation is valid only when the custom key store * < code > ConnectionState < / code > is < code > DISCONNECTED < / code > . * < / li > * < li > * You requested the < a > ConnectCustomKeyStore < / a > operation on a custom key store with a * < code > ConnectionState < / code > of < code > DISCONNECTING < / code > or < code > FAILED < / code > . This operation is * valid for all other < code > ConnectionState < / code > values . * < / li > * @ throws CustomKeyStoreNotFoundException * The request was rejected because AWS KMS cannot find a custom key store with the specified key store name * or ID . * @ throws KMSInternalException * The request was rejected because an internal exception occurred . The request can be retried . * @ throws CloudHsmClusterInvalidConfigurationException * The request was rejected because the associated AWS CloudHSM cluster did not meet the configuration * requirements for a custom key store . The cluster must be configured with private subnets in at least two * different Availability Zones in the Region . Also , it must contain at least as many HSMs as the operation * requires . < / p > * For the < a > CreateCustomKeyStore < / a > , < a > UpdateCustomKeyStore < / a > , and < a > CreateKey < / a > operations , the * AWS CloudHSM cluster must have at least two active HSMs , each in a different Availability Zone . For the * < a > ConnectCustomKeyStore < / a > operation , the AWS CloudHSM must contain at least one active HSM . * For information about creating a private subnet for a AWS CloudHSM cluster , see < a * href = " http : / / docs . aws . amazon . com / cloudhsm / latest / userguide / create - subnets . html " > Create a Private * Subnet < / a > in the < i > AWS CloudHSM User Guide < / i > . To add HSMs , use the AWS CloudHSM < a * href = " http : / / docs . aws . amazon . com / cloudhsm / latest / APIReference / API _ CreateHsm . html " > CreateHsm < / a > * operation . * @ sample AWSKMS . ConnectCustomKeyStore * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / kms - 2014-11-01 / ConnectCustomKeyStore " target = " _ top " > AWS API * Documentation < / a > */ @ Override public ConnectCustomKeyStoreResult connectCustomKeyStore ( ConnectCustomKeyStoreRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeConnectCustomKeyStore ( request ) ;