signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class Charset { /** * Returns a new { @ code ByteBuffer } containing the bytes encoding the characters from * { @ code buffer } . * This method uses { @ code CodingErrorAction . REPLACE } . * < p > Applications should generally create a { @ link CharsetEncoder } using { @ link # newEncoder } * for performance . * @ param buffer * the character buffer containing the content to be encoded . * @ return the result of the encoding . */ public final ByteBuffer encode ( CharBuffer buffer ) { } }
try { return newEncoder ( ) . onMalformedInput ( CodingErrorAction . REPLACE ) . onUnmappableCharacter ( CodingErrorAction . REPLACE ) . encode ( buffer ) ; } catch ( CharacterCodingException ex ) { throw new Error ( ex . getMessage ( ) , ex ) ; }
public class GlobalParameter { /** * Create a new GP entry in the database . No commit performed . */ public static GlobalParameter create ( DbConn cnx , String key , String value ) { } }
QueryResult r = cnx . runUpdate ( "globalprm_insert" , key , value ) ; GlobalParameter res = new GlobalParameter ( ) ; res . id = r . getGeneratedId ( ) ; res . key = key ; res . value = value ; return res ;
public class ReverseBinaryEncoder { /** * Write contents of a local symbol table as a struct . * The contents are the IST : : annotation , declared symbols , and import * declarations ( that refer to shared symtabs ) if they exist . * @ param symTab the local symbol table , not shared , not system */ private void writeLocalSymbolTable ( SymbolTable symTab ) { } }
assert symTab . isLocalTable ( ) ; final int originalOffset = myBuffer . length - myOffset ; // Write declared local symbol strings if any exists writeSymbolsField ( symTab ) ; // Write import declarations if any exists writeImportsField ( symTab ) ; // Write the struct prefix writePrefix ( TYPE_STRUCT , myBuffer . length - myOffset - originalOffset ) ; // Write the $ ion _ symbol _ table annotation byte [ ] ionSymbolTableByteArray = { ( byte ) ( 0x80 | 1 ) , /* annot - length */ ( byte ) ( 0x80 | ION_SYMBOL_TABLE_SID ) /* annot */ } ; writeBytes ( ionSymbolTableByteArray ) ; writePrefix ( TYPE_ANNOTATIONS , myBuffer . length - myOffset - originalOffset ) ;
public class HashIndex { /** * Returns an unmodifiable view of the Index . It is just * a locked index that cannot be unlocked , so if you * try to add something , nothing will happen ( it won ' t throw * an exception ) . Trying to unlock it will throw an * UnsupportedOperationException . If the * underlying Index is modified , the change will * " write - through " to the view . * @ return An unmodifiable view of the Index */ public HashIndex < E > unmodifiableView ( ) { } }
HashIndex < E > newIndex = new HashIndex < E > ( ) { @ Override public void unlock ( ) { throw new UnsupportedOperationException ( "This is an unmodifiable view!" ) ; } private static final long serialVersionUID = 3415903369787491736L ; } ; newIndex . objects = objects ; newIndex . indexes = indexes ; newIndex . lock ( ) ; return newIndex ;
public class CmsStringUtil { /** * Extracts the xml encoding setting from an xml file that is contained in a String by parsing * the xml head . < p > * This is useful if you have a byte array that contains a xml String , * but you do not know the xml encoding setting . Since the encoding setting * in the xml head is usually encoded with standard US - ASCII , you usually * just create a String of the byte array without encoding setting , * and use this method to find the ' true ' encoding . Then create a String * of the byte array again , this time using the found encoding . < p > * This method will return < code > null < / code > in case no xml head * or encoding information is contained in the input . < p > * @ param content the xml content to extract the encoding from * @ return the extracted encoding , or null if no xml encoding setting was found in the input */ public static String extractXmlEncoding ( String content ) { } }
String result = null ; Matcher xmlHeadMatcher = XML_HEAD_REGEX . matcher ( content ) ; if ( xmlHeadMatcher . find ( ) ) { String xmlHead = xmlHeadMatcher . group ( ) ; Matcher encodingMatcher = XML_ENCODING_REGEX . matcher ( xmlHead ) ; if ( encodingMatcher . find ( ) ) { String encoding = encodingMatcher . group ( ) ; int pos1 = encoding . indexOf ( '=' ) + 2 ; String charset = encoding . substring ( pos1 , encoding . length ( ) - 1 ) ; if ( Charset . isSupported ( charset ) ) { result = charset ; } } } return result ;
public class WSController { /** * A message is a call service request or subscribe / unsubscribe topic * @ param client * @ param json */ @ Override public void receiveCommandMessage ( Session client , String json ) { } }
MessageFromClient message = MessageFromClient . createFromJson ( json ) ; logger . debug ( "Receive call message in websocket '{}' for session '{}'" , message . getId ( ) , client . getId ( ) ) ; callServiceManager . sendMessageToClient ( message , client ) ;
public class TargetLoginStage { /** * Returns < code > true < / code > , if and only if the specified PDU is a Login Request PDU and the CSN and * InitiatorTaskTag fields check out . * @ param pdu the PDU to check * @ return < code > true < / code > if the PDU checks out */ protected boolean checkPdu ( ProtocolDataUnit pdu ) { } }
final BasicHeaderSegment bhs = pdu . getBasicHeaderSegment ( ) ; final LoginRequestParser parser = ( LoginRequestParser ) bhs . getParser ( ) ; if ( bhs . getOpCode ( ) == OperationCode . LOGIN_REQUEST && parser . getCurrentStageNumber ( ) == stageNumber && bhs . getInitiatorTaskTag ( ) == initiatorTaskTag ) return true ; return false ;
public class BucketFlusher { /** * Initiates a flush request against the server . * The result indicates if polling needs to be done or the flush is already complete . It can also fail in case * flush is disabled or something else went wrong in the server response . * @ param core the core reference . * @ param bucket the bucket to flush . * @ param username the user authorized for bucket access * @ param password the password of the user . * @ return an observable indicating if done ( true ) or polling needs to happen ( false ) . */ private static Observable < Boolean > initiateFlush ( final ClusterFacade core , final String bucket , final String username , final String password ) { } }
return deferAndWatch ( new Func1 < Subscriber , Observable < FlushResponse > > ( ) { @ Override public Observable < FlushResponse > call ( Subscriber subscriber ) { FlushRequest request = new FlushRequest ( bucket , username , password ) ; request . subscriber ( subscriber ) ; return core . send ( request ) ; } } ) . retryWhen ( any ( ) . delay ( Delay . fixed ( 100 , TimeUnit . MILLISECONDS ) ) . max ( Integer . MAX_VALUE ) . build ( ) ) . map ( new Func1 < FlushResponse , Boolean > ( ) { @ Override public Boolean call ( FlushResponse flushResponse ) { if ( ! flushResponse . status ( ) . isSuccess ( ) ) { if ( flushResponse . content ( ) . contains ( "disabled" ) ) { throw new FlushDisabledException ( "Flush is disabled for this bucket." ) ; } else { throw new CouchbaseException ( "Flush failed because of: " + flushResponse . content ( ) ) ; } } return flushResponse . isDone ( ) ; } } ) ;
public class MtasDataCollector { /** * Adds the . * @ param increaseSourceNumber the increase source number * @ return the mtas data collector * @ throws IOException Signals that an I / O exception has occurred . */ protected final MtasDataCollector add ( boolean increaseSourceNumber ) throws IOException { } }
if ( ! closed ) { if ( ! collectorType . equals ( DataCollector . COLLECTOR_TYPE_DATA ) ) { throw new IOException ( "collector should be " + DataCollector . COLLECTOR_TYPE_DATA ) ; } else { if ( newPosition > 0 ) { newCurrentExisting = true ; } else if ( position < getSize ( ) ) { // copy newKeyList [ 0 ] = keyList [ 0 ] ; newSourceNumberList [ 0 ] = sourceNumberList [ 0 ] ; if ( increaseSourceNumber ) { newSourceNumberList [ 0 ] ++ ; } newErrorNumber [ 0 ] = errorNumber [ 0 ] ; newErrorList [ 0 ] = errorList [ 0 ] ; if ( hasSub ) { newSubCollectorNextLevel = subCollectorNextLevel ; } copyToNew ( 0 , 0 ) ; newPosition = 1 ; position = 1 ; newCurrentExisting = true ; } else { // add key newKeyList [ 0 ] = DataCollector . COLLECTOR_TYPE_DATA ; newSourceNumberList [ 0 ] = 1 ; newErrorNumber [ 0 ] = 0 ; newErrorList [ 0 ] = new HashMap < > ( ) ; newPosition = 1 ; newCurrentPosition = newPosition - 1 ; newCurrentExisting = false ; // ready , only handle sub if ( hasSub ) { newSubCollectorNextLevel = DataCollector . getCollector ( subCollectorTypes [ 0 ] , subDataTypes [ 0 ] , subStatsTypes [ 0 ] , subStatsItems [ 0 ] , subSortTypes [ 0 ] , subSortDirections [ 0 ] , subStart [ 0 ] , subNumber [ 0 ] , newSubCollectorTypes , newSubDataTypes , newSubStatsTypes , newSubStatsItems , newSubSortTypes , newSubSortDirections , newSubStart , newSubNumber , segmentRegistration , null ) ; } else { newSubCollectorNextLevel = null ; } } return newSubCollectorNextLevel ; } } else { throw new IOException ( "already closed" ) ; }
public class Parser { /** * Parse the raw data . * @ return parsed media information */ MediaInfo parse ( ) { } }
try { BufferedReader reader = new BufferedReader ( new StringReader ( data ) ) ; MediaInfo mediaInfo = new MediaInfo ( ) ; String sectionName ; String line ; Sections sections ; Section section = null ; while ( parseState != ParseState . FINISHED ) { switch ( parseState ) { case DEFAULT : parseState = ParseState . NEXT_SECTION ; break ; case NEXT_SECTION : sectionName = reader . readLine ( ) ; if ( sectionName == null ) { parseState = ParseState . FINISHED ; } else if ( sectionName . length ( ) > 0 ) { parseState = ParseState . SECTION ; sections = mediaInfo . sections ( sectionName ) ; section = sections . newSection ( ) ; } break ; case SECTION : line = reader . readLine ( ) ; if ( line == null ) { parseState = ParseState . FINISHED ; } else if ( line . length ( ) == 0 ) { parseState = ParseState . NEXT_SECTION ; } else { String [ ] values = line . split ( ":" , 2 ) ; section . put ( values [ 0 ] . trim ( ) , values [ 1 ] . trim ( ) ) ; } break ; default : throw new IllegalStateException ( ) ; } } return mediaInfo ; } catch ( IOException e ) { throw new MediaInfoParseException ( "Failed to parse media info" , e ) ; }
public class FileUtilities { /** * Returns true if all deletions were successful . If a deletion fails , the method stops attempting to delete and * returns false . * @ param filehandle the file or folder to remove . * @ return true if all deletions were successful * @ throws IOException */ public static boolean deleteFileOrDir ( File filehandle ) throws IOException { } }
if ( filehandle . isDirectory ( ) ) { Files . walkFileTree ( Paths . get ( filehandle . getAbsolutePath ( ) ) , new SimpleFileVisitor < Path > ( ) { @ Override public FileVisitResult visitFile ( Path file , BasicFileAttributes attrs ) throws IOException { Files . delete ( file ) ; return FileVisitResult . CONTINUE ; } @ Override public FileVisitResult postVisitDirectory ( Path dir , IOException exc ) throws IOException { if ( exc != null ) { throw exc ; } Files . delete ( dir ) ; return FileVisitResult . CONTINUE ; } } ) ; } boolean isdel = filehandle . delete ( ) ; if ( ! isdel ) { // if it didn ' t work , which somtimes happens on windows systems , // remove on exit filehandle . deleteOnExit ( ) ; } return isdel ;
public class MssPackageMarshaller { /** * Marshall the given parameter object . */ public void marshall ( MssPackage mssPackage , ProtocolMarshaller protocolMarshaller ) { } }
if ( mssPackage == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( mssPackage . getEncryption ( ) , ENCRYPTION_BINDING ) ; protocolMarshaller . marshall ( mssPackage . getManifestWindowSeconds ( ) , MANIFESTWINDOWSECONDS_BINDING ) ; protocolMarshaller . marshall ( mssPackage . getSegmentDurationSeconds ( ) , SEGMENTDURATIONSECONDS_BINDING ) ; protocolMarshaller . marshall ( mssPackage . getStreamSelection ( ) , STREAMSELECTION_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class HelloDory { /** * Create a Dory client connection and execute the example commands . */ private void run ( String [ ] args ) { } }
if ( args . length != 2 ) { usage ( ) ; } System . out . println ( "Opening Doradus server: " + args [ 0 ] + ":" + args [ 1 ] ) ; try ( DoradusClient client = new DoradusClient ( args [ 0 ] , Integer . parseInt ( args [ 1 ] ) ) ) { deleteApplication ( client ) ; createApplication ( client ) ; addData ( client ) ; queryData ( client ) ; deleteData ( client ) ; }
public class BootstrapConfig { /** * Get value from initial configuration properties . If property is not * present in initial / framework properties , try finding it in system * properties . * @ param key * Property key * @ return Object value , or null if not found . */ public String get ( final String key ) { } }
if ( key == null || initProps == null ) return null ; String value = initProps . get ( key ) ; if ( value == null ) { try { value = AccessController . doPrivileged ( new java . security . PrivilegedExceptionAction < String > ( ) { @ Override public String run ( ) throws Exception { return System . getProperty ( key ) ; } } ) ; } catch ( Exception ex ) { } } return value ;
public class VFSStoreResource { /** * Tells the resource manager to forget about a heuristically completed * transaction branch . * @ param _ xid global transaction identifier ( not used , because each file * with the file id gets a new VFS store resource instance ) */ @ Override public void forget ( final Xid _xid ) { } }
if ( VFSStoreResource . LOG . isDebugEnabled ( ) ) { VFSStoreResource . LOG . debug ( "forget (xid = " + _xid + ")" ) ; }
public class GalaxyProperties { /** * Gets path to a config file for Galaxy relative to the Galaxy root directory . * This will check for the existence of the file and attempt to copy it from a * . sample file * if it does not exist . * @ param galaxyRoot The Galaxy root directory . * @ param configFileName The name of the config file to get . * @ return The path to the config file relative to the Galaxy root . * @ throws IOException If the copy failed . */ private String getConfigPathFromRoot ( File galaxyRoot , String configFileName ) throws IOException { } }
if ( isPre20141006Release ( galaxyRoot ) ) { return configFileName ; } else { File configDirectory = new File ( galaxyRoot , CONFIG_DIR_NAME ) ; File toolConf = new File ( configDirectory , configFileName ) ; // if config file does not exist , copy it from the . sample version if ( ! toolConf . exists ( ) ) { File toolConfSample = new File ( configDirectory , configFileName + ".sample" ) ; Files . copy ( toolConfSample , toolConf ) ; } return CONFIG_DIR_NAME + "/" + configFileName ; }
public class DJBar3DChartBuilder { /** * Adds the specified serie column to the dataset with custom label . * @ param column the serie column * @ param label column the custom label */ public DJBar3DChartBuilder addSerie ( AbstractColumn column , String label ) { } }
getDataset ( ) . addSerie ( column , label ) ; return this ;
public class ClasspathResource { /** * @ see # ClasspathResource ( Class , String , boolean ) * @ param someClass is the class identifying the path where the resource is located and the prefix of its * filename . * @ param nameOrSuffix is the filename of the resource or a suffix ( e . g . " . properties " or " - test . xml " ) for * it depending on { @ code append } . * @ param append - if { @ code true } the { @ code nameOrSuffix } is appended to the { @ link Class # getSimpleName ( ) * simple classname } of { @ code someClass } or { @ code false } if the simple name is replaced by * { @ code nameOrSuffix } . * @ return the absolute path . */ private static String getAbsolutePath ( Class < ? > someClass , String nameOrSuffix , boolean append ) { } }
if ( append ) { return someClass . getName ( ) . replace ( '.' , '/' ) + nameOrSuffix ; } else { return someClass . getPackage ( ) . getName ( ) . replace ( '.' , '/' ) + '/' + nameOrSuffix ; }
public class ApiOvhMe { /** * Get this object properties * REST : GET / me / debtAccount / debt / { debtId } * @ param debtId [ required ] */ public OvhDebt debtAccount_debt_debtId_GET ( Long debtId ) throws IOException { } }
String qPath = "/me/debtAccount/debt/{debtId}" ; StringBuilder sb = path ( qPath , debtId ) ; String resp = exec ( qPath , "GET" , sb . toString ( ) , null ) ; return convertTo ( resp , OvhDebt . class ) ;
public class ApproximateHistogram { /** * Returns a byte - array representation of this ApproximateHistogram object * @ return byte array representation */ @ JsonValue public byte [ ] toBytes ( ) { } }
ByteBuffer buf = ByteBuffer . allocate ( getMinStorageSize ( ) ) ; toBytes ( buf ) ; return buf . array ( ) ;
public class EpsReader { /** * Main method that parses all comments and then distributes data extraction among other methods that parse the * rest of file and store encountered data in metadata ( if there exists an entry in EpsDirectory * for the found data ) . Reads until a begin data / binary comment is found or _ reader ' s estimated * available data has run out ( or AI09 End Private Data ) . Will extract data from normal EPS comments , Photoshop , ICC , and XMP . * @ param metadata Metadata to add directory to and extracted data */ private void extract ( @ NotNull final EpsDirectory directory , @ NotNull Metadata metadata , @ NotNull SequentialReader reader ) throws IOException { } }
StringBuilder line = new StringBuilder ( ) ; while ( true ) { line . setLength ( 0 ) ; // Read the next line , excluding any trailing newline character // Note that for Windows - style line endings ( " \ r \ n " ) the outer loop will be run a second time with an empty // string , which is fine . while ( true ) { char c = ( char ) reader . getByte ( ) ; if ( c == '\r' || c == '\n' ) break ; line . append ( c ) ; } // Stop when we hit a line that is not a comment if ( line . length ( ) != 0 && line . charAt ( 0 ) != '%' ) break ; String name ; // ' : ' signifies there is an associated keyword ( should be put in directory ) // otherwise , the name could be a marker int colonIndex = line . indexOf ( ":" ) ; if ( colonIndex != - 1 ) { name = line . substring ( 0 , colonIndex ) . trim ( ) ; String value = line . substring ( colonIndex + 1 ) . trim ( ) ; addToDirectory ( directory , name , value ) ; } else { name = line . toString ( ) . trim ( ) ; } // Some comments will both have a value and signify a new block to follow if ( name . equals ( "%BeginPhotoshop" ) ) { extractPhotoshopData ( metadata , reader ) ; } else if ( name . equals ( "%%BeginICCProfile" ) ) { extractIccData ( metadata , reader ) ; } else if ( name . equals ( "%begin_xml_packet" ) ) { extractXmpData ( metadata , reader ) ; } }
public class ZipFileContainerUtils { /** * Allocate an offsets list . * If no discarded list is available , allocate a new list . * Otherwise , use one of the discarded lists . * @ param offsetsStorage Storage of discarded offset lists . * @ return An allocated offsets list . */ private static List < Integer > allocateOffsets ( List < List < Integer > > offsetsStorage ) { } }
if ( offsetsStorage . isEmpty ( ) ) { return new ArrayList < Integer > ( ) ; } else { return ( offsetsStorage . remove ( 0 ) ) ; }
public class CmsJspTagLink { /** * Returns a link to a file in the OpenCms VFS * that has been adjusted according to the web application path and the * OpenCms static export rules . < p > * < p > If the < code > baseUri < / code > parameter is provided , this will be treated as the source of the link , * if this is < code > null < / code > then the current OpenCms user context URI will be used as source . < / p > * < p > If the < code > locale < / code > parameter is provided , the locale in the request context will be switched * to the provided locale . This influences only the behavior of the * { @ link org . opencms . staticexport . CmsLocalePrefixLinkSubstitutionHandler } . < / p > * Relative links are converted to absolute links , using the current element URI as base . < p > * @ param target the link that should be calculated , can be relative or absolute * @ param req the current request * @ param baseUri the base URI for the link source * @ param locale the locale for which the link should be created ( see { @ link org . opencms . staticexport . CmsLocalePrefixLinkSubstitutionHandler } * @ return the target link adjusted according to the web application path and the OpenCms static export rules * @ see # linkTagAction ( String , ServletRequest ) * @ since 8.0.3 */ public static String linkTagAction ( String target , ServletRequest req , String baseUri , Locale locale ) { } }
return linkTagAction ( target , req , baseUri , null , locale ) ;
public class Americanize { /** * Americanize and print the command line arguments . * This main method is just for debugging . * @ param args Command line arguments : a list of words */ public static void main ( String [ ] args ) throws IOException { } }
System . err . println ( new Americanize ( ) ) ; System . err . println ( ) ; if ( args . length == 0 ) { // stdin - > stdout : BufferedReader buf = new BufferedReader ( new InputStreamReader ( System . in ) ) ; String line ; while ( ( line = buf . readLine ( ) ) != null ) { for ( String w : line . split ( "\\s+" ) ) { System . out . print ( Americanize . americanize ( w ) + " " ) ; } System . out . println ( ) ; } buf . close ( ) ; } for ( String arg : args ) { System . out . print ( arg ) ; System . out . print ( " --> " ) ; System . out . println ( americanize ( arg ) ) ; }
public class OffsetCharSequence { /** * Compares this char sequence to another char sequence < code > o < / code > . * @ param o the other char sequence . * @ return as defined in { @ link String # compareTo ( Object ) } but also takes * { @ link # transform } into account . */ public int compareTo ( OffsetCharSequence other ) { } }
int len1 = length ( ) ; int len2 = other . length ( ) ; int lim = Math . min ( len1 , len2 ) ; for ( int i = 0 ; i < lim ; i ++ ) { char c1 = charAt ( i ) ; char c2 = other . charAt ( i ) ; if ( c1 != c2 ) { return c1 - c2 ; } } return len1 - len2 ;
public class RelationImpl { /** * Reads some data from a RelationReified . If the Relation has not been reified then an empty * Stream is returned . */ private < X > Stream < X > readFromReified ( Function < RelationReified , Stream < X > > producer ) { } }
return reified ( ) . map ( producer ) . orElseGet ( Stream :: empty ) ;
public class Rectangle2dfx { /** * Replies the property that is the width of the box . * @ return the width property . */ @ Pure public DoubleProperty widthProperty ( ) { } }
if ( this . width == null ) { this . width = new ReadOnlyDoubleWrapper ( this , MathFXAttributeNames . WIDTH ) ; this . width . bind ( Bindings . subtract ( maxXProperty ( ) , minXProperty ( ) ) ) ; } return this . width ;
public class ScheduledTask { /** * Callable . call is invoked by the executor to run this task some time ( hopefully soon ) * after the scheduled execution time has been reached . */ @ FFDCIgnore ( Throwable . class ) @ Override public T call ( ) throws Exception { } }
final boolean trace = TraceComponent . isAnyTracingEnabled ( ) ; if ( future . isCancelled ( ) ) { if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "canceled - not running the task" ) ; return null ; } Result result = resultRef . get ( ) , resultForThisExecution = result ; Status < T > skipped = null ; Status < T > status ; boolean done = false ; T taskResult = null ; ArrayList < ThreadContext > contextAppliedToThread = null ; resultForThisExecution . executionThread = Thread . currentThread ( ) ; try { // EE Concurrency 3.2.6.1 : All tasks submitted to an executor must not run if task ' s component is not started . // ThreadContextDescriptor . taskStarting covers this requirement for us . contextAppliedToThread = threadContextDescriptor . taskStarting ( ) ; // Determine if task should be skipped if ( trigger != null ) try { if ( trigger . skipRun ( lastExecution , new Date ( nextExecutionTime ) ) ) skipped = new Status < T > ( Status . Type . SKIPPED , null , null , false ) ; } catch ( Throwable x ) { // spec requires skip when skipRun fails Tr . error ( tc , "CWWKC1103.skip.run.failed" , getName ( ) , managedExecSvc . name , x ) ; skipped = new Status < T > ( Status . Type . SKIPPED , null , x , false ) ; } Date nextExecutionDate = null ; // Run task if it wasn ' t skipped if ( skipped == null ) { // notify listener : taskStarting if ( listener != null ) try { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) Tr . event ( this , tc , "taskStarting" , managedExecSvc , task ) ; listener . taskStarting ( future , managedExecSvc , task ) ; } finally { done = result . getStatus ( ) . type == Status . Type . CANCELED ; } // run the task if the listener didn ' t cancel it status = result . getStatus ( ) ; if ( status . type == Status . Type . SUBMITTED && result . compareAndSet ( status , new Status < T > ( Status . Type . STARTED , null , null , false ) ) ) { try { if ( trigger == null ) if ( isCallable ) taskResult = ( ( Callable < T > ) task ) . call ( ) ; else ( ( Runnable ) task ) . run ( ) ; else { long startTime = System . currentTimeMillis ( ) ; if ( isCallable ) taskResult = ( ( Callable < T > ) task ) . call ( ) ; else ( ( Runnable ) task ) . run ( ) ; long endTime = System . currentTimeMillis ( ) ; String identityName = threadContextDescriptor . getExecutionProperties ( ) . get ( ManagedTask . IDENTITY_NAME ) ; lastExecution = new LastExecutionImpl ( identityName , nextExecutionTime , startTime , endTime , taskResult ) ; } } catch ( Throwable x ) { Tr . error ( tc , "CWWKC1101.task.failed" , getName ( ) , managedExecSvc . name , x ) ; status = result . getStatus ( ) ; if ( status . type == Status . Type . CANCELED ) // include the failure in the result so it will be available to taskDone result . compareAndSet ( status , new Status < T > ( Status . Type . CANCELED , null , x , true ) ) ; else if ( status . type == Status . Type . STARTED ) result . compareAndSet ( status , new Status < T > ( Status . Type . DONE , null , x , true ) ) ; result . latch . countDown ( ) ; } status = result . getStatus ( ) ; if ( status . type == Status . Type . STARTED ) { // calculate next execution if ( trigger == null ) result . compareAndSet ( status , new Status < T > ( Status . Type . DONE , taskResult , null , fixedDelay == null && fixedRate == null ) ) ; else { nextExecutionDate = trigger . getNextRunTime ( lastExecution , taskScheduledTime ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "getNextRunTime" , trigger , lastExecution , "taskScheduled " + Utils . toString ( taskScheduledTime ) , "nextRunTime = " + Utils . toString ( nextExecutionDate ) ) ; result . compareAndSet ( status , new Status < T > ( Status . Type . DONE , taskResult , null , nextExecutionDate == null ) ) ; } result . latch . countDown ( ) ; } done = true ; if ( listener != null ) try { if ( status . type == Status . Type . CANCELED ) try { CancellationException cancelX = new CancellationException ( Tr . formatMessage ( tc , "CWWKC1110.task.canceled" , getName ( ) , managedExecSvc . name ) ) ; if ( trace && tc . isEventEnabled ( ) ) Tr . event ( this , tc , "taskCanceled" , managedExecSvc , task , cancelX ) ; listener . taskAborted ( future , managedExecSvc , task , cancelX ) ; } catch ( Throwable x ) { Tr . error ( tc , "CWWKC1102.listener.failed" , getName ( ) , managedExecSvc . name , x ) ; } Throwable failure = result . getStatus ( ) . failure ; if ( trace && tc . isEventEnabled ( ) ) Tr . event ( this , tc , "taskDone" , managedExecSvc , task , failure ) ; listener . taskDone ( future , managedExecSvc , task , failure ) ; } catch ( Throwable x ) { Tr . error ( tc , "CWWKC1102.listener.failed" , getName ( ) , managedExecSvc . name , x ) ; } } } else { // Skip this execution ( only possible if using a trigger ) try { status = result . getStatus ( ) ; if ( status . type == Status . Type . SUBMITTED ) result . compareAndSet ( status , skipped ) ; // calculate next execution nextExecutionDate = trigger . getNextRunTime ( lastExecution , taskScheduledTime ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "getNextRunTime" , trigger , lastExecution , "taskScheduled " + Utils . toString ( taskScheduledTime ) , "nextRunTime = " + Utils . toString ( nextExecutionDate ) ) ; // No next execution if ( nextExecutionDate == null ) result . compareAndSet ( skipped , new Status < T > ( Status . Type . SKIPPED , null , skipped . failure , true ) ) ; } finally { result . latch . countDown ( ) ; // notify listener : taskAborted if ( listener != null ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) Tr . event ( this , tc , "taskAborted(skipped)" , managedExecSvc , task ) ; listener . taskAborted ( future , managedExecSvc , task , new SkippedException ( skipped . failure ) ) ; } } // notify listener : taskDone if ( listener != null ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) Tr . event ( this , tc , "taskDone(skipped)" , managedExecSvc , task ) ; listener . taskDone ( future , managedExecSvc , task , null ) ; } } // Resubmit this task to run at the next scheduled time status = result . getStatus ( ) ; if ( ! status . finalExecutionIsComplete && ( status . type == Status . Type . DONE || status . type == Status . Type . SKIPPED ) ) { if ( trace && tc . isEventEnabled ( ) ) Tr . event ( this , tc , "DONE-->NONE (reset for next result)" ) ; resultRef . set ( result = new Result ( ) ) ; done = false ; // compute the delay and estimate the next execution time long delay ; if ( fixedDelay != null ) { delay = fixedDelay ; nextExecutionTime = System . nanoTime ( ) + unit . toNanos ( delay ) ; } else if ( fixedRate != null ) { // Time elapsed from when the task should have started for the first time long nanoTime = System . nanoTime ( ) ; long elapsed = unit . convert ( nanoTime - taskScheduledNanos , TimeUnit . NANOSECONDS ) - initialDelay ; delay = ( ( elapsed / fixedRate ) + 1 ) * fixedRate - elapsed ; nextExecutionTime = nanoTime + unit . toNanos ( delay ) ; } else { nextExecutionTime = nextExecutionDate . getTime ( ) ; delay = nextExecutionTime - System . currentTimeMillis ( ) ; } if ( delay < 0 ) delay = 0 ; // notify listener : taskSubmitted if ( listener != null ) try { result . executionThread = Thread . currentThread ( ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) Tr . event ( this , tc , "taskSubmitted" , managedExecSvc , task ) ; listener . taskSubmitted ( future , managedExecSvc , task ) ; } finally { result . executionThread = null ; } // reschedule the task if the listener didn ' t cancel it status = result . getStatus ( ) ; if ( status . type == Status . Type . NONE && result . compareAndSet ( status , new Status < T > ( Status . Type . SUBMITTED , null , null , false ) ) ) { if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "reschedule " + delay + ' ' + unit + " from now" ) ; ScheduledFuture < ? > scheduledFuture = managedExecSvc . scheduledExecSvc . schedule ( this , delay , unit ) ; future . scheduledFutureRef . set ( scheduledFuture ) ; } } } catch ( Throwable x ) { // Some cases where this can happen : // component that scheduled the task is no longer available // listener . taskAborted ( for skipped task ) fails // trigger . getNextRunTime ( for reschedule after skip ) fails // listener . taskStarting fails // trigger . getNextRunTime ( for reschedule after success ) fails // taskAborted or taskDone fails // Liberty scheduled executor unavailable , or it fails to schedule if ( contextAppliedToThread == null && x instanceof IllegalStateException && FrameworkState . isStopping ( ) ) { if ( trace && tc . isDebugEnabled ( ) ) Tr . debug ( this , tc , "Task not started due to server shutdown" , getName ( ) , x ) ; } else Tr . error ( tc , "CWWKC1101.task.failed" , getName ( ) , managedExecSvc . name , x ) ; status = result . getStatus ( ) ; if ( ! status . finalExecutionIsComplete ) { Status < T > newStatus = status . type == Status . Type . STARTED ? new Status < T > ( Status . Type . DONE , null , x , true ) : new Status < T > ( Status . Type . ABORTED , null , x , true ) ; result . compareAndSet ( status , newStatus ) ; } result . latch . countDown ( ) ; if ( listener != null && ! done ) try { try { if ( skipped == null && status . type != Status . Type . STARTED ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) Tr . event ( this , tc , "taskAborted" , managedExecSvc ) ; listener . taskAborted ( future , managedExecSvc , task , new AbortedException ( x ) ) ; } } finally { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) Tr . event ( this , tc , "taskDone" , managedExecSvc , task , x ) ; listener . taskDone ( future , managedExecSvc , task , x ) ; } } catch ( Throwable t ) { // Log message , but otherwise ignore because we want to the original failure to be raised Tr . error ( tc , "CWWKC1102.listener.failed" , getName ( ) , managedExecSvc . name , x ) ; } if ( x instanceof Exception ) throw ( Exception ) x ; else throw ( Error ) x ; } finally { resultForThisExecution . executionThread = null ; if ( contextAppliedToThread != null ) threadContextDescriptor . taskStopping ( contextAppliedToThread ) ; } return taskResult ;
public class ApitraryDaoSupport { /** * findAll . * @ param entity * a { @ link java . lang . Class } object . * @ param < T > * a T object . * @ return a { @ link java . util . List } object . */ @ SuppressWarnings ( "unchecked" ) public < T > List < T > findAll ( Class < T > entity ) { } }
if ( entity == null ) { throw new ApitraryOrmException ( "Cannot access null entity" ) ; } log . debug ( "Loading all " + entity . getName ( ) ) ; QueriedGetRequest request = new QueriedGetRequest ( ) ; request . setEntity ( resolveApitraryEntity ( entity ) ) ; QueriedGetResponse response = resolveApitraryClient ( ) . send ( request ) ; if ( HttpStatus . OK . ordinal ( ) == response . getStatusCode ( ) ) { return ( List < T > ) new QueriedGetResponseUnmarshaller ( this ) . unMarshall ( response , entity ) ; } else { /* * happens more often than expected . . . */ // if ( HttpStatus . Internal _ Server _ Error . ordinal ( ) = = // response . getStatusCode ( ) ) { // throw new // CommunicationErrorException ( HttpStatus . Internal _ Server _ Error ) ; throw new CommunicationErrorException ( HttpStatus . getStatus ( response . getStatusCode ( ) ) , response . getResult ( ) ) ; }
public class IPv4PacketImpl { /** * Very naive initial implementation . Should be changed to do a better job * and its performance probably can go up a lot as well . * @ param startIndex * @ param address */ private void setIP ( final int startIndex , final String address ) { } }
final String [ ] parts = address . split ( "\\." ) ; this . headers . setByte ( startIndex + 0 , ( byte ) Integer . parseInt ( parts [ 0 ] ) ) ; this . headers . setByte ( startIndex + 1 , ( byte ) Integer . parseInt ( parts [ 1 ] ) ) ; this . headers . setByte ( startIndex + 2 , ( byte ) Integer . parseInt ( parts [ 2 ] ) ) ; this . headers . setByte ( startIndex + 3 , ( byte ) Integer . parseInt ( parts [ 3 ] ) ) ; reCalculateChecksum ( ) ;
public class AdRule { /** * Gets the midroll value for this AdRule . * @ return midroll * This { @ link AdRule } object ' s mid - roll slot . This attribute * is required . */ public com . google . api . ads . admanager . axis . v201902 . BaseAdRuleSlot getMidroll ( ) { } }
return midroll ;
public class lbwlm { /** * Use this API to fetch filtered set of lbwlm resources . * filter string should be in JSON format . eg : " port : 80 , servicetype : HTTP " . */ public static lbwlm [ ] get_filtered ( nitro_service service , String filter ) throws Exception { } }
lbwlm obj = new lbwlm ( ) ; options option = new options ( ) ; option . set_filter ( filter ) ; lbwlm [ ] response = ( lbwlm [ ] ) obj . getfiltered ( service , option ) ; return response ;
public class ControlBeanContextServicesSupport { /** * Reports whether or not a given service is * currently available from this context . * @ param serviceClass the service in question * @ return true if the service is available */ public synchronized boolean hasService ( Class serviceClass ) { } }
// todo : for multithreaded usage this block needs to be synchronized ServiceProvider sp = _serviceProviders . get ( serviceClass ) ; if ( sp != null && ! sp . isRevoked ( ) ) { return true ; } // if service not found locally check in nested context BeanContext bc = getBeanContext ( ) ; if ( bc instanceof BeanContextServices ) { return ( ( BeanContextServices ) bc ) . hasService ( serviceClass ) ; } return false ; // end synchronized
public class Excel03SaxReader { /** * 处理行结束后的操作 , { @ link LastCellOfRowDummyRecord } 是行结束的标识Record * @ param lastCell 行结束的标识Record */ private void processLastCell ( LastCellOfRowDummyRecord lastCell ) { } }
// 每行结束时 , 调用handle ( ) 方法 this . rowHandler . handle ( curSheetIndex , lastCell . getRow ( ) , this . rowCellList ) ; // 清空行Cache this . rowCellList . clear ( ) ;
public class EVCacheClientPool { /** * back to the read map . */ private void updateMemcachedReadInstancesByZone ( ) { } }
for ( ServerGroup serverGroup : memcachedInstancesByServerGroup . keySet ( ) ) { final BooleanProperty isZoneInWriteOnlyMode = writeOnlyFastPropertyMap . get ( serverGroup ) ; if ( isZoneInWriteOnlyMode . get ( ) . booleanValue ( ) ) { if ( memcachedReadInstancesByServerGroup . containsKey ( serverGroup ) ) { EVCacheMetricsFactory . increment ( _appName , null , serverGroup . getName ( ) , _appName + "-" + serverGroup . getName ( ) + "-WRITE_ONLY" ) ; memcachedReadInstancesByServerGroup . remove ( serverGroup ) ; } } else { if ( ! memcachedReadInstancesByServerGroup . containsKey ( serverGroup ) ) { EVCacheMetricsFactory . increment ( _appName , null , serverGroup . getName ( ) , _appName + "-" + serverGroup . getName ( ) + "-READ_ENABLED" ) ; memcachedReadInstancesByServerGroup . put ( serverGroup , memcachedInstancesByServerGroup . get ( serverGroup ) ) ; } } // if we lose over 50 % of instances put that zone in writeonly mode . final List < EVCacheClient > clients = memcachedReadInstancesByServerGroup . get ( serverGroup ) ; if ( clients != null && ! clients . isEmpty ( ) ) { final EVCacheClient client = clients . get ( 0 ) ; if ( client != null ) { final EVCacheConnectionObserver connectionObserver = client . getConnectionObserver ( ) ; if ( connectionObserver != null ) { final int activeServerCount = connectionObserver . getActiveServerCount ( ) ; final int inActiveServerCount = connectionObserver . getInActiveServerCount ( ) ; if ( inActiveServerCount > activeServerCount ) { memcachedReadInstancesByServerGroup . remove ( serverGroup ) ; } } } } } if ( memcachedReadInstancesByServerGroup . size ( ) != memcachedFallbackReadInstances . getSize ( ) ) { memcachedFallbackReadInstances = new ServerGroupCircularIterator ( memcachedReadInstancesByServerGroup . keySet ( ) ) ; Map < String , Set < ServerGroup > > readServerGroupByZoneMap = new ConcurrentHashMap < String , Set < ServerGroup > > ( ) ; for ( ServerGroup serverGroup : memcachedReadInstancesByServerGroup . keySet ( ) ) { Set < ServerGroup > serverGroupList = readServerGroupByZoneMap . get ( serverGroup . getZone ( ) ) ; if ( serverGroupList == null ) { serverGroupList = new HashSet < ServerGroup > ( ) ; readServerGroupByZoneMap . put ( serverGroup . getZone ( ) , serverGroupList ) ; } serverGroupList . add ( serverGroup ) ; } Map < String , ServerGroupCircularIterator > _readServerGroupByZone = new ConcurrentHashMap < String , ServerGroupCircularIterator > ( ) ; for ( Entry < String , Set < ServerGroup > > readServerGroupByZoneEntry : readServerGroupByZoneMap . entrySet ( ) ) { _readServerGroupByZone . put ( readServerGroupByZoneEntry . getKey ( ) , new ServerGroupCircularIterator ( readServerGroupByZoneEntry . getValue ( ) ) ) ; } this . readServerGroupByZone = _readServerGroupByZone ; localServerGroupIterator = readServerGroupByZone . get ( _zone ) ; }
public class TargetSenderWorker { /** * Receives a < code > ProtocolDataUnit < / code > from the socket and appends it to the end of the receiving queue of this * connection . * @ return Queue with the resulting units * @ throws IOException if an I / O error occurs . * @ throws InternetSCSIException if any violation of the iSCSI - Standard emerge . * @ throws DigestException if a mismatch of the digest exists . * @ throws SettingsException */ ProtocolDataUnit receiveFromWire ( ) throws DigestException , InternetSCSIException , IOException , SettingsException { } }
ProtocolDataUnit pdu ; if ( initialPdu ) { /* * The connection ' s ConnectionSettingsNegotiator has not been initialized , hence getSettings ( ) would throw a * NullPointerException . Initialize PDU with default values , i . e . no digests . */ pdu = protocolDataUnitFactory . create ( TextKeyword . NONE , // header // digest TextKeyword . NONE ) ; // data digest } else { // use negotiated or ( now available ) default settings final Settings settings = connection . getSettings ( ) ; pdu = protocolDataUnitFactory . create ( settings . getHeaderDigest ( ) , settings . getDataDigest ( ) ) ; } try { pdu . read ( socketChannel ) ; } catch ( ClosedChannelException e ) { throw new InternetSCSIException ( e ) ; } if ( LOGGER . isDebugEnabled ( ) ) LOGGER . debug ( "Receiving this PDU:\n" + pdu ) ; // parse sequence counters final BasicHeaderSegment bhs = pdu . getBasicHeaderSegment ( ) ; final InitiatorMessageParser parser = ( InitiatorMessageParser ) bhs . getParser ( ) ; // final int commandSequenceNumber = parser . getCommandSequenceNumber ( ) ; // final int expectedStatusSequenceNumber = parser . getExpectedStatusSequenceNumber ( ) ; if ( LOGGER . isDebugEnabled ( ) ) { // sharrajesh // Needed to debug , out of order receiving of StatusSN and ExpStatSN if ( bhs . getOpCode ( ) == OperationCode . SCSI_COMMAND ) { final SCSICommandParser scsiParser = ( SCSICommandParser ) bhs . getParser ( ) ; ScsiOperationCode scsiOpCode = ScsiOperationCode . valueOf ( scsiParser . getCDB ( ) . get ( 0 ) ) ; LOGGER . debug ( "scsiOpCode = " + scsiOpCode ) ; LOGGER . debug ( "CDB bytes: \n" + Debug . byteBufferToString ( scsiParser . getCDB ( ) ) ) ; } // LOGGER . debug ( " parser . expectedStatusSequenceNumber : " + expectedStatusSequenceNumber ) ; if ( connection == null ) LOGGER . debug ( "connection: null" ) ; else if ( connection . getStatusSequenceNumber ( ) == null ) LOGGER . debug ( "connection.getStatusSequenceNumber: null" ) ; else LOGGER . debug ( "connection.getStatusSequenceNumber: " + connection . getStatusSequenceNumber ( ) . getValue ( ) ) ; } // if this is the first PDU in the leading connection , then // initialize the session ' s ExpectedCommandSequenceNumber if ( initialPdu ) { initialPdu = false ; // PDU is immediate Login PDU , checked in Target . main ( ) , // ExpCmdSN of this PDU will be used to initialize the // respective session and connection parameters ( sequence numbers ) // see TargetSession and TargetConnection initialization in // Target . main ( ) } else { // check sequence counters // if ( session . getMaximumCommandSequenceNumber ( ) . lessThan ( commandSequenceNumber ) ) // throw new InternetSCSIException ( " received CmdSN > local MaxCmdSN " ) ; // verified , is working with Windows 8 initiator // if ( ! connection . getStatusSequenceNumber ( ) . equals ( expectedStatusSequenceNumber ) // & & expectedStatusSequenceNumber ! = 0 ) / / required by MS iSCSI // / / initiator DATA - OUT // / / PDU sequence // throw new InternetSCSIException ( " received ExpStatusSN ! = local StatusSN + 1 " ) ; } // increment CmdSN if not immediate PDU ( or Data - Out PDU ) try { if ( parser . incrementSequenceNumber ( ) ) session . getExpectedCommandSequenceNumber ( ) . increment ( ) ; } catch ( NullPointerException exc ) { } return pdu ;
public class PoolableObject { /** * Releases the current owning thread from this Object * @ param < K > the Key wrapped Type * @ param < E > the Entry type * @ return PoolableObject for method chaining */ @ SuppressWarnings ( "unchecked" ) < K , E extends PoolableObject < V > > E releaseOwner ( ) { } }
this . owner = null ; return ( E ) this ;
public class TIFFField { /** * Returns data in any numerical format as a float . Data in * TIFF _ SRATIONAL or TIFF _ RATIONAL format are evaluated by * dividing the numerator into the denominator using * double - precision arithmetic and then truncating to single * precision . Data in TIFF _ SLONG , TIFF _ LONG , or TIFF _ DOUBLE * format may suffer from truncation . * < p > A ClassCastException will be thrown if the field is * of type TIFF _ UNDEFINED or TIFF _ ASCII . */ public float getAsFloat ( int index ) { } }
switch ( type ) { case TIFF_BYTE : return ( ( byte [ ] ) data ) [ index ] & 0xff ; case TIFF_SBYTE : return ( ( byte [ ] ) data ) [ index ] ; case TIFF_SHORT : return ( ( char [ ] ) data ) [ index ] & 0xffff ; case TIFF_SSHORT : return ( ( short [ ] ) data ) [ index ] ; case TIFF_SLONG : return ( ( int [ ] ) data ) [ index ] ; case TIFF_LONG : return ( ( long [ ] ) data ) [ index ] ; case TIFF_FLOAT : return ( ( float [ ] ) data ) [ index ] ; case TIFF_DOUBLE : return ( float ) ( ( double [ ] ) data ) [ index ] ; case TIFF_SRATIONAL : int [ ] ivalue = getAsSRational ( index ) ; return ( float ) ( ( double ) ivalue [ 0 ] / ivalue [ 1 ] ) ; case TIFF_RATIONAL : long [ ] lvalue = getAsRational ( index ) ; return ( float ) ( ( double ) lvalue [ 0 ] / lvalue [ 1 ] ) ; default : throw new ClassCastException ( ) ; }
public class StatementGroup { /** * Returns a list of all parameters contained by this statement group ' s * statements , and nested statement groups . * @ return List of parameters */ public List < Parameter > getAllParameters ( ) { } }
List < Parameter > ret = new ArrayList < Parameter > ( ) ; // Parameters within statements if ( statements != null ) { for ( final Statement stmt : statements ) ret . addAll ( stmt . getAllParameters ( ) ) ; } // Parameters within nested statement groups if ( statementGroups != null ) { for ( final StatementGroup sg : statementGroups ) ret . addAll ( sg . getAllParameters ( ) ) ; } return ret ;
public class JoinedQueryExecutor { /** * Builds and returns a complex joined excutor against a chained property , * supporting multi - way joins . Filtering and ordering may also be supplied , * in order to better distribute work throughout the join . * @ param repoAccess used to create query executors for outer and inner loops * @ param targetToSourceProperty join property of < i > target < / i > type which maps * to instances of < i > source < / i > type * @ param targetFilter optional filter for fetching < i > target < / i > instances * @ param targetOrdering optional ordering to apply to < i > target < / i > executor * & @ param hints optional hints * @ throws IllegalArgumentException if any parameter is null or if join * property is not a Storable type * @ throws RepositoryException from RepositoryAccess */ public static < T extends Storable > QueryExecutor < T > build ( RepositoryAccess repoAccess , ChainedProperty < T > targetToSourceProperty , Filter < T > targetFilter , OrderingList < T > targetOrdering , QueryHints hints ) throws RepositoryException { } }
if ( targetOrdering == null ) { targetOrdering = OrderingList . emptyList ( ) ; } QueryExecutor < T > executor = buildJoin ( repoAccess , targetToSourceProperty , targetFilter , targetOrdering , hints ) ; OrderingList < T > handledOrdering = executor . getOrdering ( ) ; // Apply sort if any remaining ordering properties . int handledCount = commonOrderingCount ( handledOrdering , targetOrdering ) ; OrderingList < T > remainderOrdering = targetOrdering . subList ( handledCount , targetOrdering . size ( ) ) ; if ( remainderOrdering . size ( ) > 0 ) { SortedQueryExecutor . Support < T > support = repoAccess . storageAccessFor ( targetToSourceProperty . getPrimeProperty ( ) . getEnclosingType ( ) ) ; executor = new SortedQueryExecutor < T > ( support , executor , handledOrdering , remainderOrdering ) ; } return executor ;
public class Global { /** * Load a Java class that defines a JavaScript object using the * conventions outlined in ScriptableObject . defineClass . * This method is defined as a JavaScript function . * @ exception IllegalAccessException if access is not available * to a reflected class member * @ exception InstantiationException if unable to instantiate * the named class * @ exception InvocationTargetException if an exception is thrown * during execution of methods of the named class * @ see org . mozilla . javascript . ScriptableObject # defineClass ( Scriptable , Class ) */ @ SuppressWarnings ( { } }
"unchecked" } ) public static void defineClass ( Context cx , Scriptable thisObj , Object [ ] args , Function funObj ) throws IllegalAccessException , InstantiationException , InvocationTargetException { Class < ? > clazz = getClass ( args ) ; if ( ! Scriptable . class . isAssignableFrom ( clazz ) ) { throw reportRuntimeError ( "msg.must.implement.Scriptable" ) ; } ScriptableObject . defineClass ( thisObj , ( Class < ? extends Scriptable > ) clazz ) ;
public class CLDRBase { /** * Add a language alias entry . */ protected static void addLanguageAlias ( String rawType , String rawReplacement ) { } }
LanguageAlias type = LanguageAlias . parse ( rawType ) ; LanguageAlias replacement = LanguageAlias . parse ( rawReplacement ) ; String language = type . language ( ) ; List < Pair < LanguageAlias , LanguageAlias > > aliases = LANGUAGE_ALIAS_MAP . get ( language ) ; if ( aliases == null ) { aliases = new ArrayList < > ( ) ; LANGUAGE_ALIAS_MAP . put ( language , aliases ) ; } aliases . add ( Pair . pair ( type , replacement ) ) ;
public class AbstractTypeBuilder { /** * Visits an annotation and adds a corresponding node to the * specified Element . * Despite the name , this method is not inherited through any * visitor interface . It is not intended for external calls . * @ param parent the target of the annotation * @ param annotation the annotation * @ param primary whether this is a primary contract annotation * @ param owner the owner of this annotation * @ param p the element to add the created annotation to * @ see ContractAnnotationModel */ @ Requires ( { } }
"parent != null" , "annotation != null" , "owner != null" , "p != null" } ) protected void visitAnnotation ( Element parent , AnnotationMirror annotation , boolean primary , ClassName owner , ElementModel p ) { if ( utils . isContractAnnotation ( annotation ) ) { ContractAnnotationModel model = createContractModel ( parent , annotation , primary , owner ) ; if ( model != null ) { p . addEnclosedElement ( model ) ; } }
public class MoreCollectors { /** * Returns a { @ code Collector } which aggregates the results of two supplied * collectors using the supplied finisher function . * This method returns a * < a href = " package - summary . html # ShortCircuitReduction " > short - circuiting * collector < / a > if both downstream collectors are short - circuiting . The * collection might stop when both downstream collectors report that the * collection is complete . * This collector is similar to the { @ code teeing } collector available since * JDK 12 . The only difference is that this collector correctly combines * short - circuiting collectors . * @ param < T > the type of the input elements * @ param < A1 > the intermediate accumulation type of the first collector * @ param < A2 > the intermediate accumulation type of the second collector * @ param < R1 > the result type of the first collector * @ param < R2 > the result type of the second collector * @ param < R > the final result type * @ param c1 the first collector * @ param c2 the second collector * @ param finisher the function which merges two results into the single * one . * @ return a { @ code Collector } which aggregates the results of two supplied * collectors . */ public static < T , A1 , A2 , R1 , R2 , R > Collector < T , ? , R > pairing ( Collector < ? super T , A1 , R1 > c1 , Collector < ? super T , A2 , R2 > c2 , BiFunction < ? super R1 , ? super R2 , ? extends R > finisher ) { } }
EnumSet < Characteristics > c = EnumSet . noneOf ( Characteristics . class ) ; c . addAll ( c1 . characteristics ( ) ) ; c . retainAll ( c2 . characteristics ( ) ) ; c . remove ( Characteristics . IDENTITY_FINISH ) ; Supplier < A1 > c1Supplier = c1 . supplier ( ) ; Supplier < A2 > c2Supplier = c2 . supplier ( ) ; BiConsumer < A1 , ? super T > c1Accumulator = c1 . accumulator ( ) ; BiConsumer < A2 , ? super T > c2Accumulator = c2 . accumulator ( ) ; BinaryOperator < A1 > c1Combiner = c1 . combiner ( ) ; BinaryOperator < A2 > c2combiner = c2 . combiner ( ) ; Supplier < PairBox < A1 , A2 > > supplier = ( ) -> new PairBox < > ( c1Supplier . get ( ) , c2Supplier . get ( ) ) ; BiConsumer < PairBox < A1 , A2 > , T > accumulator = ( acc , v ) -> { c1Accumulator . accept ( acc . a , v ) ; c2Accumulator . accept ( acc . b , v ) ; } ; BinaryOperator < PairBox < A1 , A2 > > combiner = ( acc1 , acc2 ) -> { acc1 . a = c1Combiner . apply ( acc1 . a , acc2 . a ) ; acc1 . b = c2combiner . apply ( acc1 . b , acc2 . b ) ; return acc1 ; } ; Function < PairBox < A1 , A2 > , R > resFinisher = acc -> { R1 r1 = c1 . finisher ( ) . apply ( acc . a ) ; R2 r2 = c2 . finisher ( ) . apply ( acc . b ) ; return finisher . apply ( r1 , r2 ) ; } ; Predicate < A1 > c1Finished = finished ( c1 ) ; Predicate < A2 > c2Finished = finished ( c2 ) ; if ( c1Finished != null && c2Finished != null ) { Predicate < PairBox < A1 , A2 > > finished = acc -> c1Finished . test ( acc . a ) && c2Finished . test ( acc . b ) ; return new CancellableCollectorImpl < > ( supplier , accumulator , combiner , resFinisher , finished , c ) ; } return Collector . of ( supplier , accumulator , combiner , resFinisher , c . toArray ( new Characteristics [ 0 ] ) ) ;
public class BaseWindowedBolt { /** * define max lag in ms , only for event time windows * @ param maxLag max lag time */ public BaseWindowedBolt < T > withMaxLagMs ( Time maxLag ) { } }
this . maxLagMs = maxLag . toMilliseconds ( ) ; ensureNonNegativeTime ( maxLagMs ) ; return this ;
public class PrimeExceptionHandler { /** * Builds the view if not already available . This is mostly required for ViewExpiredException ' s . * @ param context The { @ link FacesContext } . * @ param throwable The occurred { @ link Throwable } . * @ param rootCause The root cause . * @ return The unwrapped { @ link Throwable } . * @ throws java . io . IOException If building the view fails . */ protected Throwable buildView ( FacesContext context , Throwable throwable , Throwable rootCause ) throws IOException { } }
if ( context . getViewRoot ( ) == null ) { ViewHandler viewHandler = context . getApplication ( ) . getViewHandler ( ) ; String viewId = viewHandler . deriveViewId ( context , ComponentUtils . calculateViewId ( context ) ) ; ViewDeclarationLanguage vdl = viewHandler . getViewDeclarationLanguage ( context , viewId ) ; UIViewRoot viewRoot = vdl . createView ( context , viewId ) ; context . setViewRoot ( viewRoot ) ; vdl . buildView ( context , viewRoot ) ; // Workaround for Mojarra // if UIViewRoot = = null - > ' IllegalArgumentException ' is throwed instead of ' ViewExpiredException ' if ( rootCause == null && throwable instanceof IllegalArgumentException ) { rootCause = new javax . faces . application . ViewExpiredException ( viewId ) ; } } return rootCause ;
public class CmsJlanSearch { /** * Returns the next file object in the search result . < p > * @ return the next file object */ protected CmsJlanNetworkFile nextFile ( ) { } }
if ( ! hasMoreFiles ( ) ) { return null ; } CmsJlanNetworkFile file = m_files . get ( m_position ) ; m_position += 1 ; return file ;
public class FileUtils { /** * Reads a file and returns the result in a String * @ param file File * @ return String * @ throws IOException */ public static String read ( final File file ) throws IOException { } }
final StringBuilder sb = new StringBuilder ( ) ; try ( final FileReader fr = new FileReader ( file ) ; final BufferedReader br = new BufferedReader ( fr ) ; ) { String sCurrentLine ; while ( ( sCurrentLine = br . readLine ( ) ) != null ) { sb . append ( sCurrentLine ) ; } } return sb . toString ( ) ;
public class JQLChecker { /** * Replace place holder with element passed by listener . * @ param context * the context * @ param jql * the jql * @ param listener * the listener * @ return string obtained by replacements */ public String replaceFromVariableStatement ( JQLContext context , String jql , final JQLReplacerListener listener ) { } }
JQLRewriterListener rewriterListener = new JQLRewriterListener ( ) ; rewriterListener . init ( listener ) ; return replaceFromVariableStatementInternal ( context , jql , replace , rewriterListener ) ;
public class IPSettings { /** * returns a single , best matching node for the given address * @ param addr * @ return */ public IPRangeNode get ( InetAddress addr ) { } }
if ( version == 0 ) // no data was added return null ; IPRangeNode node = isV4 ( addr ) ? ipv4 : ipv6 ; if ( ! this . isSorted ) this . optimize ( ) ; return node . findFast ( addr ) ;
public class ClassUtil { /** * It ' s designed for field / method / class / column / table names . and source and target Strings will be cached . * @ param str * @ return */ public static String toUpperCaseWithUnderscore ( final String str ) { } }
if ( N . isNullOrEmpty ( str ) ) { return str ; } String result = upperCaseWithUnderscorePropNamePool . get ( str ) ; if ( result == null ) { result = StringUtil . toUpperCaseWithUnderscore ( str ) ; upperCaseWithUnderscorePropNamePool . put ( str , result ) ; } return result ;
public class ServiceManagerAmpWrapper { /** * @ Override * public < T > T createPinProxy ( ServiceRefAmp actorRef , * Class < T > api , * Class < ? > . . . apis ) * return getDelegate ( ) . createPinProxy ( actorRef , api , apis ) ; */ @ Override public ServiceRefAmp bind ( ServiceRefAmp service , String address ) { } }
return delegate ( ) . bind ( service , address ) ;
public class Results { /** * A redirect that uses 303 see other . * The redirect does NOT need a template and does NOT * render a text in the Http body by default . * If you wish to do so please * remove the { @ link NoHttpBody } that is set as renderable of * the Result . * @ param url The url used as redirect target . * @ return A nicely configured result with status code 303 and the url set * as Location header . Renders no Http body by default . */ public static Result redirect ( String url ) { } }
return status ( Result . SEE_OTHER ) . with ( HeaderNames . LOCATION , url ) . render ( NoHttpBody . INSTANCE ) ;
public class StandardSocketServer { /** * Contains the loop that waits for and handles incoming connections . */ public void run ( ) { } }
try { while ( true ) { // server thread blocks until a client connects Socket socket = server . accept ( ) ; System . out . println ( new LogEntry ( "client (" + socket . getInetAddress ( ) . getHostAddress ( ) + ") attempts to connect..." ) ) ; Connection c = establishConnection ( socket ) ; updateConnectedClients ( c ) ; } } catch ( IOException ioe ) { System . out . println ( new LogEntry ( "server forced to stop with message " + ioe . getClass ( ) . getName ( ) + ' ' + ioe . getMessage ( ) ) ) ; } catch ( Throwable t ) { System . out . println ( new LogEntry ( Level . CRITICAL , "server forced to stop with message " + t . getClass ( ) . getName ( ) + ' ' + t . getMessage ( ) , t ) ) ; }
public class FilesystemIterator { /** * Gets the rule that best suits the provided filename . The rule is the longer * rule between the regular rules and the prefix rules . * The regular rules are scanned first by looking through the filename and then * all parents up to the root for the first match . These use Map lookups in the * set of rules so this should still perform well when there are many rules . * For example , when searching for the rule for / home / u / username / tmp / , this * will search : * < ol > * < li > / home / u / username / tmp / < / li > * < li > / home / u / username / tmp < / li > * < li > / home / u / username / < / li > * < li > / home / u / username < / li > * < li > / home / u / < / li > * < li > / home / u < / li > * < li > / home / < / li > * < li > / home < / li > * < li > / < / li > * < li > < / li > * < / ol > * Next , the entire list of prefix rules is searched , with the longest rule * being used if it is a longer match than that found in the regular rules . */ private FilesystemIteratorRule getBestRule ( final String filename ) { } }
String longestPrefix = null ; FilesystemIteratorRule rule = null ; // First search the path and all of its parents for the first regular rule String path = filename ; while ( true ) { // Check the current path for an exact match // System . out . println ( " DEBUG : Checking " + path ) ; rule = rules . get ( path ) ; if ( rule != null ) { longestPrefix = path ; break ; } // If done , break the loop int pathLen = path . length ( ) ; if ( pathLen == 0 ) break ; int lastSlashPos = path . lastIndexOf ( File . separatorChar ) ; if ( lastSlashPos == - 1 ) { path = "" ; } else if ( lastSlashPos == ( pathLen - 1 ) ) { // If ends with a slash , remove that slash path = path . substring ( 0 , lastSlashPos ) ; } else { // Otherwise , remove and leave the last slash path = path . substring ( 0 , lastSlashPos + 1 ) ; } } if ( prefixRules != null ) { // TODO : If there are many more prefix rules than the length of this filename , it will at some threshold // be faster to do a map lookup for each possible length of the string . // Would also only need to look down to longestPrefix for ( Map . Entry < String , FilesystemIteratorRule > entry : prefixRules . entrySet ( ) ) { String prefix = entry . getKey ( ) ; if ( ( longestPrefix == null || prefix . length ( ) > longestPrefix . length ( ) ) && filename . startsWith ( prefix ) ) { // System . err . println ( " DEBUG : FilesystemIterator : getBestRule : filename = " + filename + " , prefix = " + prefix + " , longestPrefix = " + longestPrefix ) ; longestPrefix = prefix ; rule = entry . getValue ( ) ; } } } return rule ;
public class UTF16 { /** * Check if the string buffer contains more Unicode code points than a certain number . This is * more efficient than counting all code points in the entire string buffer and comparing that * number with a threshold . This function may not need to scan the string buffer at all if the * length is within a certain range , and never needs to count more than ' number + 1 ' code * points . Logically equivalent to ( countCodePoint ( s ) & gt ; number ) . A Unicode code point may * occupy either one or two code units . * @ param source The input string buffer . * @ param number The number of code points in the string buffer is compared against the ' number ' * parameter . * @ return boolean value for whether the string buffer contains more Unicode code points than * ' number ' . */ public static boolean hasMoreCodePointsThan ( StringBuffer source , int number ) { } }
if ( number < 0 ) { return true ; } if ( source == null ) { return false ; } int length = source . length ( ) ; // length > = 0 known // source contains at least ( length + 1 ) / 2 code points : < = 2 // chars per cp if ( ( ( length + 1 ) >> 1 ) > number ) { return true ; } // check if source does not even contain enough chars int maxsupplementary = length - number ; if ( maxsupplementary <= 0 ) { return false ; } // there are maxsupplementary = length - number more chars than // asked - for code points // count code points until they exceed and also check that there are // no more than maxsupplementary supplementary code points ( char pairs ) int start = 0 ; while ( true ) { if ( length == 0 ) { return false ; } if ( number == 0 ) { return true ; } if ( isLeadSurrogate ( source . charAt ( start ++ ) ) && start != length && isTrailSurrogate ( source . charAt ( start ) ) ) { start ++ ; if ( -- maxsupplementary <= 0 ) { // too many pairs - too few code points return false ; } } -- number ; }
public class WorkspaceDataContainerBase { /** * { @ inheritDoc } */ public Calendar getCurrentTime ( ) { } }
Calendar cal = Calendar . getInstance ( ) ; cal . setTime ( new Date ( ) ) ; return cal ;
public class JDBC4CallableStatement { /** * Sets the designated parameter to the given java . sql . Time value . */ @ Override public void setTime ( String parameterName , Time x ) throws SQLException { } }
checkClosed ( ) ; throw SQLError . noSupport ( ) ;
public class DoubleIntegerDBIDKNNHeap { /** * Ensure the ties array has capacity for at least one more element . * @ param id * Id to add */ private void addToTies ( int id ) { } }
if ( ties . length == numties ) { ties = Arrays . copyOf ( ties , ( ties . length << 1 ) + 1 ) ; // grow . } ties [ numties ] = id ; ++ numties ;
public class AuthToken { /** * Create a copy of this AuthToken * @ return a new AuthToken object */ public AuthToken copy ( ) { } }
final AuthToken authToken = new AuthToken ( key ) ; authToken . tokenName = tokenName ; authToken . startTime = startTime ; authToken . expiration = expiration ; authToken . ip = ip ; authToken . acl = acl ; authToken . duration = duration ; return authToken ;
public class Occurrence { /** * 统计词频 * @ param key 增加一个词 */ public void addTerm ( String key ) { } }
TermFrequency value = trieSingle . get ( key ) ; if ( value == null ) { value = new TermFrequency ( key ) ; trieSingle . put ( key , value ) ; } else { value . increase ( ) ; } ++ totalTerm ;
public class ClassLoaderReflectionToolkit { /** * Calls { @ link ClassLoader # findLoadedClass } while holding { @ link ClassLoader # getClassLoadingLock } . * @ since 1.553 */ public static @ CheckForNull Class < ? > _findLoadedClass ( ClassLoader cl , String name ) { } }
synchronized ( getClassLoadingLock ( cl , name ) ) { return ( Class ) invoke ( FIND_LOADED_CLASS , RuntimeException . class , cl , name ) ; }
public class NonSyncHashtable { /** * Returns the value to which the specified key is mapped in this hashtable . * @ param key a key in the hashtable . * @ return The value to which the key is mapped in this hashtable ; * < code > null < / code > if the key is not mapped to any value in * this hashtable . */ public Object get ( Object key ) { } }
NonSyncHashtableEntry tab [ ] = table ; int hash = key . hashCode ( ) ; int index = ( hash & 0x7FFFFFFF ) % tab . length ; for ( NonSyncHashtableEntry e = tab [ index ] ; e != null ; e = e . next ) { if ( ( e . hash == hash ) && e . key . equals ( key ) ) { return e . value ; } } return null ;
public class KPSwitchConflictUtil { /** * Hide the panel and the keyboard . * @ param panelLayout the layout of panel . */ public static void hidePanelAndKeyboard ( final View panelLayout ) { } }
final Activity activity = ( Activity ) panelLayout . getContext ( ) ; final View focusView = activity . getCurrentFocus ( ) ; if ( focusView != null ) { KeyboardUtil . hideKeyboard ( activity . getCurrentFocus ( ) ) ; focusView . clearFocus ( ) ; } panelLayout . setVisibility ( View . GONE ) ;
public class S3Dispatcher { /** * Handles POST / bucket / id ? uploads * @ param ctx the context describing the current request * @ param bucket the bucket containing the object to upload * @ param id name of the object to upload */ private void startMultipartUpload ( WebContext ctx , Bucket bucket , String id ) { } }
Response response = ctx . respondWith ( ) ; Map < String , String > properties = Maps . newTreeMap ( ) ; for ( String name : ctx . getRequest ( ) . headers ( ) . names ( ) ) { String nameLower = name . toLowerCase ( ) ; if ( nameLower . startsWith ( "x-amz-meta-" ) || "content-md5" . equals ( nameLower ) || "content-type" . equals ( nameLower ) || "x-amz-acl" . equals ( nameLower ) ) { properties . put ( name , ctx . getHeader ( name ) ) ; response . addHeader ( name , ctx . getHeader ( name ) ) ; } } response . setHeader ( HTTP_HEADER_NAME_CONTENT_TYPE , CONTENT_TYPE_XML ) ; String uploadId = String . valueOf ( uploadIdCounter . inc ( ) ) ; multipartUploads . add ( uploadId ) ; getUploadDir ( uploadId ) . mkdirs ( ) ; XMLStructuredOutput out = response . xml ( ) ; out . beginOutput ( "InitiateMultipartUploadResult" ) ; out . property ( RESPONSE_BUCKET , bucket . getName ( ) ) ; out . property ( "Key" , id ) ; out . property ( "UploadId" , uploadId ) ; out . endOutput ( ) ;
public class FileUtils { /** * Devuelve el nombre de un path , por ejmeplo : / dir / toto . txt > toto . txt o en windows \ toto \ toto . txt > toto . txt . * El separador se escoje en funcion de si la el fileNameAndPath ya contien \ o / * @ param fileNameAndPath contiene un path y un nombre del fichero , ej : / dir / toto . txt * @ return */ public static String getFileName ( String fileNameAndPath ) { } }
if ( fileNameAndPath == null ) { return null ; } String fileSeparator ; if ( fileNameAndPath . contains ( "/" ) ) { fileSeparator = "/" ; } else { fileSeparator = "\\" ; } int lastIndexOf = fileNameAndPath . lastIndexOf ( fileSeparator ) ; if ( lastIndexOf < 0 ) { return fileNameAndPath ; } else { return fileNameAndPath . substring ( lastIndexOf + 1 , fileNameAndPath . length ( ) ) ; }
public class ConnectionDAODefaultImpl { public void init ( final Connection connection , final String host , final String port ) throws DevFailed { } }
connection . url = new TangoUrl ( buildUrlName ( TangoUrl . getCanonicalName ( host ) , port ) ) ; connection . setDevice_is_dbase ( true ) ; connection . transparent_reconnection = true ; // Always true for Database ApiUtil . get_orb ( ) ; connect_to_dbase ( connection ) ; connection . devname = connection . device . name ( ) ; connection . setAlready_connected ( true ) ;
public class PhoneNumberValueRestValidator { /** * { @ inheritDoc } initialize the validator . * @ see javax . validation . ConstraintValidator # initialize ( java . lang . annotation . Annotation ) */ @ Override public final void initialize ( final PhoneNumberValueRest pconstraintAnnotation ) { } }
message = pconstraintAnnotation . message ( ) ; fieldPhoneNumber = pconstraintAnnotation . fieldPhoneNumber ( ) ; fieldCountryCode = pconstraintAnnotation . fieldCountryCode ( ) ; allowDin5008 = pconstraintAnnotation . allowDin5008 ( ) ; allowE123 = pconstraintAnnotation . allowE123 ( ) ; allowUri = pconstraintAnnotation . allowUri ( ) ; allowMs = pconstraintAnnotation . allowMs ( ) ; allowCommon = pconstraintAnnotation . allowCommon ( ) ;
public class BytesWritable { /** * Set the value to a copy of the given byte range * @ param newData the new values to copy in * @ param offset the offset in newData to start at * @ param length the number of bytes to copy */ public void set ( byte [ ] newData , int offset , int length ) { } }
setSize ( 0 ) ; setSize ( length ) ; System . arraycopy ( newData , offset , bytes , 0 , size ) ;
public class Do { /** * < div color = ' red ' style = " font - size : 24px ; color : red " > < b > < i > < u > JCYPHER < / u > < / i > < / b > < / div > * < div color = ' red ' style = " font - size : 18px ; color : red " > < i > set a label of a node in the DO part of a FOREACH expression < / i > < / div > * < div color = ' red ' style = " font - size : 18px ; color : red " > < i > e . g . . . . < b > SET ( n . label ( " Person " ) ) < / b > < / i > < / div > * < br / > */ public DoConcat SET ( JcLabel label ) { } }
ModifyTerminal mt = ModifyFactory . setLabel ( label ) ; ASTNode clause = APIObjectAccess . getAstNode ( mt ) ; clause . setClauseType ( ClauseType . SET ) ; return createConcat ( clause ) ;
public class CleanupThread { /** * { @ inheritDoc } */ @ Override public void run ( ) { } }
try { boolean retry = true ; while ( retry && active ) { // Get all threads , wait for ' foreign ' ( = = not our own threads ) // and when all finished , finish as well . This is in order to avoid // hanging endless because the HTTP Server thread cant be set into // daemon mode Thread threads [ ] = enumerateThreads ( ) ; retry = joinThreads ( threads ) ; } } finally { // All non - daemon threads stopped = = > server can be stopped , too server . stop ( 0 ) ; }
public class CommerceDiscountUsageEntryUtil { /** * Returns the last commerce discount usage entry in the ordered set where groupId = & # 63 ; . * @ param groupId the group ID * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the last matching commerce discount usage entry * @ throws NoSuchDiscountUsageEntryException if a matching commerce discount usage entry could not be found */ public static CommerceDiscountUsageEntry findByGroupId_Last ( long groupId , OrderByComparator < CommerceDiscountUsageEntry > orderByComparator ) throws com . liferay . commerce . discount . exception . NoSuchDiscountUsageEntryException { } }
return getPersistence ( ) . findByGroupId_Last ( groupId , orderByComparator ) ;
public class StrBuilder { /** * Appends one of both separators to the StrBuilder . * If the builder is currently empty it will append the defaultIfEmpty - separator * Otherwise it will append the standard - separator * Appending a null separator will have no effect . * The separator is appended using { @ link # append ( String ) } . * This method is for example useful for constructing queries * < pre > * StrBuilder whereClause = new StrBuilder ( ) ; * if ( searchCommand . getPriority ( ) ! = null ) { * whereClause . appendSeparator ( " and " , " where " ) ; * whereClause . append ( " priority = ? " ) * if ( searchCommand . getComponent ( ) ! = null ) { * whereClause . appendSeparator ( " and " , " where " ) ; * whereClause . append ( " component = ? " ) * selectClause . append ( whereClause ) * < / pre > * @ param standard the separator if builder is not empty , null means no separator * @ param defaultIfEmpty the separator if builder is empty , null means no separator * @ return this , to enable chaining * @ since 2.5 */ public StrBuilder appendSeparator ( final String standard , final String defaultIfEmpty ) { } }
final String str = isEmpty ( ) ? defaultIfEmpty : standard ; if ( str != null ) { append ( str ) ; } return this ;
public class Op { /** * Creates an < i > operation expression < / i > on the specified target object . * @ param target the target object on which the expression will execute * @ return an operator , ready for chaining */ public static < T > Level0ArrayOperator < String [ ] , String > on ( final String [ ] target ) { } }
return onArrayOf ( Types . STRING , target ) ;
public class LabsInner { /** * Add users to a lab . * @ param resourceGroupName The name of the resource group . * @ param labAccountName The name of the lab Account . * @ param labName The name of the lab . * @ param emailAddresses List of user emails addresses to add to the lab . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < Void > addUsersAsync ( String resourceGroupName , String labAccountName , String labName , List < String > emailAddresses , final ServiceCallback < Void > serviceCallback ) { } }
return ServiceFuture . fromResponse ( addUsersWithServiceResponseAsync ( resourceGroupName , labAccountName , labName , emailAddresses ) , serviceCallback ) ;
public class FleetReader { /** * Make the request to the Twilio API to perform the read . * @ param client TwilioRestClient with which to make the request * @ return Fleet ResourceSet */ @ Override public ResourceSet < Fleet > read ( final TwilioRestClient client ) { } }
return new ResourceSet < > ( this , client , firstPage ( client ) ) ;
public class MQMessageUtils { /** * match return List , not match return null */ public static HashMode getPartitionHashColumns ( String name , String pkHashConfigs ) { } }
if ( StringUtils . isEmpty ( pkHashConfigs ) ) { return null ; } List < PartitionData > datas = partitionDatas . get ( pkHashConfigs ) ; for ( PartitionData data : datas ) { if ( data . simpleName != null ) { if ( data . simpleName . equalsIgnoreCase ( name ) ) { return data . hashMode ; } } else { if ( data . regexFilter . filter ( name ) ) { return data . hashMode ; } } } return null ;
public class StorageAccountsInner { /** * Updates the specified Data Lake Analytics account to add an Azure Storage account . * @ param resourceGroupName The name of the Azure resource group . * @ param accountName The name of the Data Lake Analytics account . * @ param storageAccountName The name of the Azure Storage account to add * @ param parameters The parameters containing the access key and optional suffix for the Azure Storage Account . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < Void > addAsync ( String resourceGroupName , String accountName , String storageAccountName , AddStorageAccountParameters parameters , final ServiceCallback < Void > serviceCallback ) { } }
return ServiceFuture . fromResponse ( addWithServiceResponseAsync ( resourceGroupName , accountName , storageAccountName , parameters ) , serviceCallback ) ;
public class ECDSASignature { /** * Will automatically adjust the S component to be less than or equal to half the curve * order , if necessary . This is required because for every signature ( r , s ) the signature * ( r , - s ( mod N ) ) is a valid signature of the same message . However , we dislike the * ability to modify the bits of a Bitcoin transaction after it ' s been signed , as that * violates various assumed invariants . Thus in future only one of those forms will be * considered legal and the other will be banned . * @ return the signature in a canonicalised form . */ public ECDSASignature toCanonicalised ( ) { } }
if ( ! isCanonical ( ) ) { // The order of the curve is the number of valid points that exist on that curve . // If S is in the upper half of the number of valid points , then bring it back to // the lower half . Otherwise , imagine that // N = 10 // s = 8 , so ( - 8 % 10 = = 2 ) thus both ( r , 8 ) and ( r , 2 ) are valid solutions . // 10 - 8 = = 2 , giving us always the latter solution , which is canonical . return new ECDSASignature ( r , Sign . CURVE . getN ( ) . subtract ( s ) ) ; } else { return this ; }
public class JsonFilesScanner { /** * / * map - > object */ public DataObject object ( File [ ] files , int start , int count ) { } }
return STRUCT . fromMapsAndCollections ( map ( files , start , count ) ) ;
public class CPInstancePersistenceImpl { /** * Returns the first cp instance in the ordered set where CPDefinitionId = & # 63 ; and status & ne ; & # 63 ; . * @ param CPDefinitionId the cp definition ID * @ param status the status * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the first matching cp instance , or < code > null < / code > if a matching cp instance could not be found */ @ Override public CPInstance fetchByC_NotST_First ( long CPDefinitionId , int status , OrderByComparator < CPInstance > orderByComparator ) { } }
List < CPInstance > list = findByC_NotST ( CPDefinitionId , status , 0 , 1 , orderByComparator ) ; if ( ! list . isEmpty ( ) ) { return list . get ( 0 ) ; } return null ;
public class UnresolvedBindingValidator { /** * Prune all of the invalid optional keys from the graph . After this method , all of the keys * remaining in the graph are resolvable . */ public void pruneInvalidOptional ( DependencyExplorerOutput output , InvalidKeys invalidKeys ) { } }
DependencyGraph . GraphPruner prunedGraph = new DependencyGraph . GraphPruner ( output . getGraph ( ) ) ; for ( Key < ? > key : invalidKeys . getInvalidOptionalKeys ( ) ) { prunedGraph . remove ( key ) ; output . removeBinding ( key ) ; } output . setGraph ( prunedGraph . update ( ) ) ;
public class MessageFormat { /** * Finds the ChoiceFormat sub - message for the given number . * @ param pattern A MessagePattern . * @ param partIndex the index of the first ChoiceFormat argument style part . * @ param number a number to be mapped to one of the ChoiceFormat argument ' s intervals * @ return the sub - message start part index . */ private static int findChoiceSubMessage ( MessagePattern pattern , int partIndex , double number ) { } }
int count = pattern . countParts ( ) ; int msgStart ; // Iterate over ( ARG _ INT | DOUBLE , ARG _ SELECTOR , message ) tuples // until ARG _ LIMIT or end of choice - only pattern . // Ignore the first number and selector and start the loop on the first message . partIndex += 2 ; for ( ; ; ) { // Skip but remember the current sub - message . msgStart = partIndex ; partIndex = pattern . getLimitPartIndex ( partIndex ) ; if ( ++ partIndex >= count ) { // Reached the end of the choice - only pattern . // Return with the last sub - message . break ; } Part part = pattern . getPart ( partIndex ++ ) ; Part . Type type = part . getType ( ) ; if ( type == Part . Type . ARG_LIMIT ) { // Reached the end of the ChoiceFormat style . // Return with the last sub - message . break ; } // part is an ARG _ INT or ARG _ DOUBLE assert type . hasNumericValue ( ) ; double boundary = pattern . getNumericValue ( part ) ; // Fetch the ARG _ SELECTOR character . int selectorIndex = pattern . getPatternIndex ( partIndex ++ ) ; char boundaryChar = pattern . getPatternString ( ) . charAt ( selectorIndex ) ; if ( boundaryChar == '<' ? ! ( number > boundary ) : ! ( number >= boundary ) ) { // The number is in the interval between the previous boundary and the current one . // Return with the sub - message between them . // The ! ( a > b ) and ! ( a > = b ) comparisons are equivalent to // ( a < = b ) and ( a < b ) except they " catch " NaN . break ; } } return msgStart ;
public class LocaleTemplateEnumerator { /** * Generate the next combination of template name and return . * @ return the next combination of template name */ @ Override public String next ( ) { } }
if ( hasNext ( ) ) { if ( enumerationSize == NULL_LOCALE_ENUMERATION_SIZE ) { cursor ++ ; return templateName ; } final String language = matcher . group ( 1 ) ; final String country = matcher . group ( 2 ) ; switch ( cursor ) { case 0 : cursor ++ ; return templateName + FILE_SEPARATOR + language + FILE_SEPARATOR + country ; case 1 : cursor ++ ; return templateName + FILE_SEPARATOR + language + UNDERSCORE + country ; case 2 : cursor ++ ; return templateName + FILE_SEPARATOR + language ; case 3 : cursor ++ ; return templateName + UNDERSCORE + language + UNDERSCORE + country ; case 4 : cursor ++ ; return templateName + UNDERSCORE + language ; case 5 : cursor ++ ; return templateName ; } } String message = "No next available template name combination." ; LOGGER . error ( message ) ; throw new IllegalStateException ( message ) ;
public class DefaultFXMLControllerFactory { /** * { @ inheritDoc } */ @ SuppressWarnings ( "unchecked" ) @ Override public Object call ( final Class < ? > controllerClass ) { } }
FXMLController < Model , View < Model , ? , ? > > controller = null ; try { controller = ( FXMLController < Model , View < Model , ? , ? > > ) controllerClass . newInstance ( ) ; controller . model ( relatedModel ( ) ) ; } catch ( InstantiationException | IllegalAccessException e ) { LOGGER . log ( FXMLMessages . DEFAULT_CTRLR_CREATION_ERROR , e , e . getMessage ( ) ) ; } return controller ;
public class ExtendedMessageFormat { /** * Get a custom format from a format description . * @ param desc * String * @ return Format */ private Format getFormat ( String desc ) { } }
if ( registry != null ) { String name = desc ; String args = "" ; int i = desc . indexOf ( START_FMT ) ; if ( i > 0 ) { name = desc . substring ( 0 , i ) . trim ( ) ; args = desc . substring ( i + 1 ) . trim ( ) ; } FormatFactory factory = registry . get ( name ) ; if ( factory != null ) { return factory . getFormat ( name , args , getLocale ( ) ) ; } } return null ;
public class KafkaQueue { /** * Setter for { @ link # kafkaClient } . * @ param kafkaClient * @ param setMyOwnKafkaClient * @ return */ protected KafkaQueue < ID , DATA > setKafkaClient ( KafkaClient kafkaClient , boolean setMyOwnKafkaClient ) { } }
if ( this . kafkaClient != null && myOwnKafkaClient ) { this . kafkaClient . destroy ( ) ; } this . kafkaClient = kafkaClient ; myOwnKafkaClient = setMyOwnKafkaClient ; return this ;
public class JSDocInfo { /** * Documents the block - level comment / description . * @ param description the description */ boolean documentBlock ( String description ) { } }
if ( ! lazyInitDocumentation ( ) ) { return true ; } if ( documentation . blockDescription != null ) { return false ; } documentation . blockDescription = description ; return true ;
public class LatencyLimiter { /** * Throttle the execution process and re - adjust the rate requirement on the fly . The limiter will automatically re - adjust the rate internally by using a basic { @ link RateLimiter } after analysis of the latency data gathered from the performance tracking . * @ param verbose the flag indicating whether rate adjustment messages should be displayed . */ public void throttle ( boolean verbose ) { } }
// Observe latency to adjust the rate to reach target latency if ( System . currentTimeMillis ( ) - this . LastCheck > 5000l ) { if ( this . End . getExecutionCount ( ) - this . Start . getExecutionCount ( ) > 0 ) { double observedLatency = ( double ) ( this . End . getTotalExecutionDuration ( ) - this . Start . getTotalExecutionDuration ( ) ) / ( double ) ( this . End . getExecutionCount ( ) - this . Start . getExecutionCount ( ) ) ; double tuningLatency = observedLatency ; long [ ] el = this . End . getLatencyBuckets ( ) ; long [ ] sl = this . Start . getLatencyBuckets ( ) ; long ec = this . End . getExecutionCount ( ) - this . Start . getExecutionCount ( ) ; long elsum = 0 ; for ( int i = 0 ; i < 25 ; i ++ ) { elsum += el [ i ] ; } long slsum = 0 ; for ( int i = 0 ; i < 25 ; i ++ ) { slsum += sl [ i ] ; } // If most ( 97 % ) requests are below 25ms , fudge out observed latency to remove accidental outliers that would cause too much oscillation in latency targetting if ( ( ( double ) ( elsum - slsum ) / ( double ) ec ) > 0.97 ) { long outlierExecutionDuration = 0 ; long outlierExecutionCount = 0 ; for ( int i = 25 ; i < 109 ; i ++ ) { outlierExecutionCount += ( el [ i ] - sl [ i ] ) ; // buckets over 99 cover 50ms each if ( i >= 100 ) outlierExecutionDuration += ( el [ i ] - sl [ i ] ) * 50l ; else outlierExecutionDuration += ( el [ i ] - sl [ i ] ) ; } tuningLatency = ( double ) ( this . End . getTotalExecutionDuration ( ) - this . Start . getTotalExecutionDuration ( ) - outlierExecutionDuration ) / ( double ) ( this . End . getExecutionCount ( ) - this . Start . getExecutionCount ( ) - outlierExecutionCount ) ; } long oldRate = this . Rate ; if ( tuningLatency > this . TargetLatency * 2.0 ) this . Rate = ( long ) ( this . Rate * 0.8 ) ; else if ( tuningLatency > this . TargetLatency * 1.25 ) this . Rate = ( long ) ( this . Rate * 0.95 ) ; else if ( tuningLatency > this . TargetLatency * 1.1 ) this . Rate = ( long ) ( this . Rate * 0.999 ) ; else if ( tuningLatency < this . TargetLatency * 0.5 ) this . Rate = ( long ) ( this . Rate * 1.1 ) ; else if ( tuningLatency < this . TargetLatency * 0.75 ) this . Rate = ( long ) ( this . Rate * 1.01 ) ; else if ( tuningLatency < this . TargetLatency * 0.9 ) this . Rate = ( long ) ( this . Rate * 1.001 ) ; if ( verbose && oldRate != this . Rate ) System . out . printf ( "%8s | Adjusting %s to: %,11.1f TPS | Recent Latency : %7.2f\n" , this . DateFormat . format ( new Date ( Math . round ( ( System . currentTimeMillis ( ) - this . StartTime ) / 1000d ) * 1000l ) ) , ( oldRate < this . Rate ? " UP " : "DOWN" ) , ( double ) this . Rate , tuningLatency ) ; } this . Start = ( PerfCounter ) this . End . clone ( ) ; this . End = ClientConnectionPool . getStatistics ( this . Connection ) . get ( this . Procedure ) ; this . LastCheck = System . currentTimeMillis ( ) ; } this . Limiter . throttle ( this . Rate ) ;
public class SegmentsStylingPolicy { /** * Select a segment * @ param segment Segment to select */ protected void selectSegment ( Segment segment ) { } }
if ( segment . isUnpaired ( ) ) { // remember selected unpaired segment for ( Segment other : segments . getPairedSegments ( segment ) ) { indirectSelections . put ( other , segment ) ; selectSegment ( other ) ; } } else { if ( ! selectedSegments . contains ( segment ) ) { selectedSegments . add ( segment ) ; if ( segment . getDBIDs ( ) != null ) { unselectedObjects . removeDBIDs ( segment . getDBIDs ( ) ) ; } } }
public class AbstractExecution { /** * { @ inheritDoc } */ public void checkParameters ( ) throws ExecutionException { } }
if ( isFailIfNoFiles ( ) && ( files == null || files . isEmpty ( ) ) ) { throw new ExecutionException ( "No file to process." ) ; } if ( isFailIfNoAlgorithms ( ) && ( algorithms == null || algorithms . isEmpty ( ) ) ) { throw new ExecutionException ( "No checksum algorithm defined." ) ; } if ( isFailIfNoTargets ( ) && ( targets == null || targets . isEmpty ( ) ) ) { throw new ExecutionException ( "No output target defined." ) ; }
public class SpeakUtil { /** * Sends a speak notification to the specified place object originating with the specified * speaker ( the speaker optionally being a server entity that wishes to fake a " speak " message ) * and with the supplied message content . * @ param speakObj the object on which to generate the speak message . * @ param speaker the username of the user that generated the message ( or some special speaker * name for server messages ) . * @ param bundle null when the message originates from a real human , the bundle identifier that * will be used by the client to translate the message text when the message originates from a * server entity " faking " a chat message . * @ param message the text of the speak message . */ public static void sendSpeak ( DObject speakObj , Name speaker , String bundle , String message ) { } }
sendSpeak ( speakObj , speaker , bundle , message , ChatCodes . DEFAULT_MODE ) ;
public class authenticationvserver_authenticationldappolicy_binding { /** * Use this API to fetch authenticationvserver _ authenticationldappolicy _ binding resources of given name . */ public static authenticationvserver_authenticationldappolicy_binding [ ] get ( nitro_service service , String name ) throws Exception { } }
authenticationvserver_authenticationldappolicy_binding obj = new authenticationvserver_authenticationldappolicy_binding ( ) ; obj . set_name ( name ) ; authenticationvserver_authenticationldappolicy_binding response [ ] = ( authenticationvserver_authenticationldappolicy_binding [ ] ) obj . get_resources ( service ) ; return response ;
public class DefaultNamespaceContext { /** * Declared the specified { @ code uri } in this namespaceContext and returns * the prefix to which it is bound . * the prefix is guessed from the suggested namespaces specified at construction * or derived from the specified { @ code uri } * @ param uri uri to be declared * @ return the prefix to which { @ code uri } is bound * @ see jlibs . core . net . URLUtil # suggestPrefix ( java . util . Properties , String ) */ public String declarePrefix ( String uri ) { } }
String prefix = getPrefix ( uri ) ; if ( prefix == null ) { prefix = URLUtil . suggestPrefix ( suggested , uri ) ; if ( getNamespaceURI ( prefix ) != null ) { int i = 1 ; String _prefix ; while ( true ) { _prefix = prefix + i ; if ( getNamespaceURI ( _prefix ) == null ) { prefix = _prefix ; break ; } i ++ ; } } declarePrefix ( prefix , uri ) ; } return prefix ;
public class SQLExpressions { /** * REGR _ COUNT returns an integer that is the number of non - null number pairs used to fit the regression line . * @ param arg1 first arg * @ param arg2 second arg * @ return regr _ count ( arg1 , arg2) */ public static WindowOver < Double > regrCount ( Expression < ? extends Number > arg1 , Expression < ? extends Number > arg2 ) { } }
return new WindowOver < Double > ( Double . class , SQLOps . REGR_COUNT , arg1 , arg2 ) ;
public class GeoJsonRead { /** * Read the GeoJSON file . * @ param connection * @ param fileName * @ param tableReference * @ throws IOException * @ throws SQLException */ public static void readGeoJson ( Connection connection , String fileName , String tableReference ) throws IOException , SQLException { } }
GeoJsonDriverFunction gjdf = new GeoJsonDriverFunction ( ) ; gjdf . importFile ( connection , tableReference , URIUtilities . fileFromString ( fileName ) , new EmptyProgressVisitor ( ) ) ;
public class Matrix4x3d { /** * Apply a symmetric orthographic projection transformation for a left - handed coordinate system * using the given NDC z range to this matrix . * This method is equivalent to calling { @ link # orthoLH ( double , double , double , double , double , double , boolean ) orthoLH ( ) } with * < code > left = - width / 2 < / code > , < code > right = + width / 2 < / code > , < code > bottom = - height / 2 < / code > and < code > top = + height / 2 < / code > . * If < code > M < / code > is < code > this < / code > matrix and < code > O < / code > the orthographic projection matrix , * then the new matrix will be < code > M * O < / code > . So when transforming a * vector < code > v < / code > with the new matrix by using < code > M * O * v < / code > , the * orthographic projection transformation will be applied first ! * In order to set the matrix to a symmetric orthographic projection without post - multiplying it , * use { @ link # setOrthoSymmetricLH ( double , double , double , double , boolean ) setOrthoSymmetricLH ( ) } . * Reference : < a href = " http : / / www . songho . ca / opengl / gl _ projectionmatrix . html # ortho " > http : / / www . songho . ca < / a > * @ see # setOrthoSymmetricLH ( double , double , double , double , boolean ) * @ param width * the distance between the right and left frustum edges * @ param height * the distance between the top and bottom frustum edges * @ param zNear * near clipping plane distance * @ param zFar * far clipping plane distance * @ param zZeroToOne * whether to use Vulkan ' s and Direct3D ' s NDC z range of < code > [ 0 . . + 1 ] < / code > when < code > true < / code > * or whether to use OpenGL ' s NDC z range of < code > [ - 1 . . + 1 ] < / code > when < code > false < / code > * @ return this */ public Matrix4x3d orthoSymmetricLH ( double width , double height , double zNear , double zFar , boolean zZeroToOne ) { } }
return orthoSymmetricLH ( width , height , zNear , zFar , zZeroToOne , this ) ;
public class AvailabilityTable { /** * Retrieve the table entry valid for the supplied date . * @ param date required date * @ return cost rate table entry */ public Availability getEntryByDate ( Date date ) { } }
Availability result = null ; for ( Availability entry : this ) { DateRange range = entry . getRange ( ) ; int comparisonResult = range . compareTo ( date ) ; if ( comparisonResult >= 0 ) { if ( comparisonResult == 0 ) { result = entry ; break ; } } else { break ; } } return result ;
public class AbstractSqlFluent { /** * { @ inheritDoc } * @ see jp . co . future . uroborosql . fluent . SqlFluent # param ( String , Supplier ) */ @ SuppressWarnings ( "unchecked" ) @ Override public T param ( final String paramName , final Supplier < Object > supplier ) { } }
context ( ) . param ( paramName , supplier ) ; return ( T ) this ;
public class ParameterTool { /** * Returns the String value for the given key . * If the key does not exist it will return null . */ public String get ( String key ) { } }
addToDefaults ( key , null ) ; unrequestedParameters . remove ( key ) ; return data . get ( key ) ;