signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class ApiOvhTelephony { /** * Previous billed consumption files * REST : GET / telephony / { billingAccount } / historyConsumption / { date } / file * @ param extension [ required ] Document suffix * @ param billingAccount [ required ] The name of your billingAccount * @ param date [ required ] */ public OvhPcsFile billingAccount_historyConsumption_date_file_GET ( String billingAccount , java . util . Date date , OvhBillDocument extension ) throws IOException { } }
String qPath = "/telephony/{billingAccount}/historyConsumption/{date}/file" ; StringBuilder sb = path ( qPath , billingAccount , date ) ; query ( sb , "extension" , extension ) ; String resp = exec ( qPath , "GET" , sb . toString ( ) , null ) ; return convertTo ( resp , OvhPcsFile . class ) ;
public class MethodDescriptorBenchmark { /** * Foo bar . */ @ Benchmark @ BenchmarkMode ( Mode . SampleTime ) @ OutputTimeUnit ( TimeUnit . NANOSECONDS ) public AsciiString old ( ) { } }
return new AsciiString ( "/" + method . getFullMethodName ( ) ) ;
public class CommonConfigUtils { /** * Returns the value for the configuration attribute matching the key provided . If the value does not exist or is empty , the * resulting value will be { @ code null } . */ public String [ ] getStringArrayConfigAttribute ( Map < String , Object > props , String key ) { } }
return trim ( ( String [ ] ) props . get ( key ) ) ;
public class BaseBigtableInstanceAdminClient { /** * Updates a cluster within an instance . * < p > Sample code : * < pre > < code > * try ( BaseBigtableInstanceAdminClient baseBigtableInstanceAdminClient = BaseBigtableInstanceAdminClient . create ( ) ) { * ClusterName name = ClusterName . of ( " [ PROJECT ] " , " [ INSTANCE ] " , " [ CLUSTER ] " ) ; * int serveNodes = 0; * Cluster request = Cluster . newBuilder ( ) * . setName ( name . toString ( ) ) * . setServeNodes ( serveNodes ) * . build ( ) ; * Cluster response = baseBigtableInstanceAdminClient . updateClusterAsync ( request ) . get ( ) ; * < / code > < / pre > * @ param request The request object containing all of the parameters for the API call . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi ( "The surface for long-running operations is not stable yet and may change in the future." ) public final OperationFuture < Cluster , UpdateClusterMetadata > updateClusterAsync ( Cluster request ) { } }
return updateClusterOperationCallable ( ) . futureCall ( request ) ;
public class Utils { /** * NULL and range safe get ( ) */ public static < T > T get ( T [ ] array , int position ) { } }
return position < 0 || position >= Utils . sizeOf ( array ) ? null : array [ position ] ;
public class MediaSpec { /** * Checks if this media specification matches a given media query . * @ param q The media query * @ return { @ code true } when this media specification matches the given media query . */ public boolean matches ( MediaQuery q ) { } }
// match the media type if ( q . getType ( ) != null ) { if ( q . getType ( ) . equals ( "all" ) ) { if ( q . isNegative ( ) ) return false ; // " NOT all " doesn ' t match to anything } else if ( q . getType ( ) . equals ( this . getType ( ) ) == q . isNegative ( ) ) // other than all return false ; } // match the eventual expressions for ( MediaExpression e : q ) { if ( ! this . matches ( e ) ) return false ; } // everything matched return true ;
public class PreprocessedRowsFlusher { /** * Create an new rows flusher * @ param xmlUtil an util * @ param tableRows the rows * @ return the flusher * @ throws IOException if an I / O error occurs */ public static PreprocessedRowsFlusher create ( final XMLUtil xmlUtil , final List < TableRow > tableRows ) throws IOException { } }
return new PreprocessedRowsFlusher ( xmlUtil , tableRows , new StringBuilder ( STRING_BUILDER_SIZE ) ) ;
public class BigtableUtil { /** * Updates all clusters within the specified Bigtable instance to a specified number of nodes . * Useful for increasing the number of nodes at the beginning of a job and decreasing it at * the end to lower costs yet still get high throughput during bulk ingests / dumps . * @ param bigtableOptions Bigtable Options * @ param numberOfNodes New number of nodes in the cluster * @ param sleepDuration How long to sleep after updating the number of nodes . Google recommends * at least 20 minutes before the new nodes are fully functional * @ throws IOException If setting up channel pool fails * @ throws InterruptedException If sleep fails */ public static void updateNumberOfBigtableNodes ( final BigtableOptions bigtableOptions , final int numberOfNodes , final Duration sleepDuration ) throws IOException , InterruptedException { } }
final ChannelPool channelPool = ChannelPoolCreator . createPool ( bigtableOptions . getAdminHost ( ) ) ; try { final BigtableInstanceClient bigtableInstanceClient = new BigtableInstanceGrpcClient ( channelPool ) ; final String instanceName = bigtableOptions . getInstanceName ( ) . toString ( ) ; // Fetch clusters in Bigtable instance final ListClustersRequest clustersRequest = ListClustersRequest . newBuilder ( ) . setParent ( instanceName ) . build ( ) ; final ListClustersResponse clustersResponse = bigtableInstanceClient . listCluster ( clustersRequest ) ; // For each cluster update the number of nodes for ( Cluster cluster : clustersResponse . getClustersList ( ) ) { final Cluster updatedCluster = Cluster . newBuilder ( ) . setName ( cluster . getName ( ) ) . setServeNodes ( numberOfNodes ) . build ( ) ; LOG . info ( "Updating number of nodes to {} for cluster {}" , numberOfNodes , cluster . getName ( ) ) ; bigtableInstanceClient . updateCluster ( updatedCluster ) ; } // Wait for the new nodes to be provisioned if ( sleepDuration . getMillis ( ) > 0 ) { LOG . info ( "Sleeping for {} after update" , formatter . print ( sleepDuration . toPeriod ( ) ) ) ; Thread . sleep ( sleepDuration . getMillis ( ) ) ; } } finally { channelPool . shutdownNow ( ) ; }
public class AuthorizationEndpoint { /** * We can grant a token and return it with implicit approval . */ private String getImplicitGrantResponse ( AuthorizationRequest authorizationRequest ) { } }
try { TokenRequest tokenRequest = requestFactory . createTokenRequest ( authorizationRequest , "implicit" ) ; OAuth2Request storedOAuth2Request = requestFactory . createOAuth2Request ( authorizationRequest ) ; OAuth2AccessToken accessToken = getAccessTokenForImplicitGrant ( tokenRequest , storedOAuth2Request ) ; if ( isNull ( accessToken ) ) { throw new UnsupportedResponseTypeException ( "Unsupported response type: token" ) ; } return appendAccessToken ( authorizationRequest , accessToken ) ; } catch ( OAuth2Exception e ) { return getUnsuccessfulRedirect ( authorizationRequest , e , true ) ; }
public class PayPreResponse { /** * 第三方的支付流水号 */ @ Override public PayPreResponse retcode ( int retcode ) { } }
this . retcode = retcode ; this . retinfo = PayRetCodes . retInfo ( retcode ) ; return this ;
public class RateLimiterBucket { /** * Returns the number of millis until the period resets . * @ param period the period * @ return millis until period resets */ public long getResetMillis ( RateBucketPeriod period ) { } }
long now = System . currentTimeMillis ( ) ; long periodBoundary = getPeriodBoundary ( now , period ) ; return periodBoundary - now ;
public class CPOptionPersistenceImpl { /** * Returns the last cp option in the ordered set where groupId = & # 63 ; . * @ param groupId the group ID * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the last matching cp option * @ throws NoSuchCPOptionException if a matching cp option could not be found */ @ Override public CPOption findByGroupId_Last ( long groupId , OrderByComparator < CPOption > orderByComparator ) throws NoSuchCPOptionException { } }
CPOption cpOption = fetchByGroupId_Last ( groupId , orderByComparator ) ; if ( cpOption != null ) { return cpOption ; } StringBundler msg = new StringBundler ( 4 ) ; msg . append ( _NO_SUCH_ENTITY_WITH_KEY ) ; msg . append ( "groupId=" ) ; msg . append ( groupId ) ; msg . append ( "}" ) ; throw new NoSuchCPOptionException ( msg . toString ( ) ) ;
public class AtomMetadataWriter { /** * Write a { @ code < link > } element for a given entity . * @ param entity The given entity . * @ throws XMLStreamException If unable to write to stream * @ throws ODataEdmException if unable to check entity types */ public void writeEntryEntityLink ( Object entity ) throws XMLStreamException , ODataEdmException { } }
EntityType entityType = getAndCheckEntityType ( entityDataModel , entity . getClass ( ) ) ; xmlWriter . writeStartElement ( ATOM_LINK ) ; if ( entityType . isReadOnly ( ) ) { xmlWriter . writeAttribute ( REL , SELF ) ; } else { xmlWriter . writeAttribute ( REL , EDIT ) ; } xmlWriter . writeAttribute ( TITLE , entityType . getName ( ) ) ; xmlWriter . writeAttribute ( HREF , getEntityWithKey ( entity ) ) ; xmlWriter . writeEndElement ( ) ;
public class Searches { /** * Extracts the last stream id from the filter string passed as part of the elasticsearch query . This is used later * to pass to possibly existing message decorators for stream - specific configurations . * The assumption is that usually ( when listing / searching messages for a stream ) only a single stream filter is passed . * When this is not the case , only the last stream id will be taked into account . * This is currently a workaround . A better solution would be to pass the stream id which is supposed to be the scope * for a search query as a separate parameter . * @ param filter the filter string like " streams : xxxyyyzzz " * @ return the optional stream id */ public static Optional < String > extractStreamId ( String filter ) { } }
if ( isNullOrEmpty ( filter ) ) { return Optional . empty ( ) ; } final Matcher streamIdMatcher = filterStreamIdPattern . matcher ( filter ) ; if ( streamIdMatcher . find ( ) ) { return Optional . of ( streamIdMatcher . group ( 2 ) ) ; } return Optional . empty ( ) ;
public class Batcher { /** * Sends _ all _ the queued objects at once to the processor block . * After this method returns , all inbox objects will be processed . * @ param waitForAllToFinish wait until all objects are processed . If set to True , * need to make sure not to call flushAll in the same * WorkExecutor used by the batcher as it will result to * deadlock . */ public void flushAll ( boolean waitForAllToFinish ) { } }
Log . v ( Log . TAG_BATCHER , "%s: flushing all objects (wait=%b)" , this , waitForAllToFinish ) ; synchronized ( mutex ) { isFlushing = true ; unschedule ( ) ; } while ( true ) { ScheduledFuture future = null ; synchronized ( mutex ) { if ( inbox . size ( ) == 0 ) break ; // Nothing to do final List < T > toProcess = new ArrayList < T > ( inbox ) ; inbox . clear ( ) ; mutex . notifyAll ( ) ; synchronized ( workExecutor ) { if ( ! workExecutor . isShutdown ( ) ) { future = workExecutor . schedule ( new Runnable ( ) { @ Override public void run ( ) { processor . process ( toProcess ) ; synchronized ( mutex ) { lastProcessedTime = System . currentTimeMillis ( ) ; } } } , 0 , TimeUnit . MILLISECONDS ) ; } } } if ( waitForAllToFinish ) { if ( future != null && ! future . isDone ( ) && ! future . isCancelled ( ) ) { try { future . get ( ) ; } catch ( Exception e ) { Log . e ( Log . TAG_BATCHER , "%s: Error while waiting for pending future " + "when flushing all items" , e , this ) ; } } } } synchronized ( mutex ) { isFlushing = false ; }
public class CmsPublishProject { /** * Override to display additional options in the lock dialog . < p > * @ return html code to display additional options */ public String buildPublishOptions ( ) { } }
// show only for direct publish actions StringBuffer result = new StringBuffer ( 128 ) ; boolean showOptionSiblings = ( isMultiOperation ( ) || isOperationOnFolder ( ) || ( isDirectPublish ( ) && hasSiblings ( ) && hasCorrectLockstate ( ) ) ) ; boolean showOptionSubresources = ( isMultiOperation ( ) || isOperationOnFolder ( ) ) ; result . append ( "<p>" ) ; if ( showOptionSiblings ) { // show only for multi resource operation or if resource has siblings and correct lock state if ( ! isMultiOperation ( ) && ! isOperationOnFolder ( ) ) { result . append ( key ( Messages . GUI_DELETE_WARNING_SIBLINGS_0 ) ) ; result . append ( "<br>" ) ; } result . append ( "<input type='checkbox' name='" ) ; result . append ( PARAM_PUBLISHSIBLINGS ) ; result . append ( "' value='true' onclick=\"reloadReport();\"" ) ; if ( Boolean . valueOf ( getParamPublishsiblings ( ) ) . booleanValue ( ) ) { result . append ( " checked='checked'" ) ; } result . append ( ">&nbsp;" ) ; result . append ( key ( Messages . GUI_PUBLISH_ALLSIBLINGS_0 ) ) ; result . append ( "<br>\n" ) ; } else { result . append ( "<input type='hidden' name='" ) ; result . append ( PARAM_PUBLISHSIBLINGS ) ; result . append ( "' value='" ) ; result . append ( Boolean . valueOf ( getParamPublishsiblings ( ) ) ) ; result . append ( "'" ) ; if ( Boolean . valueOf ( getParamPublishsiblings ( ) ) . booleanValue ( ) ) { result . append ( " checked='checked'" ) ; } result . append ( ">\n" ) ; } if ( showOptionSubresources ) { // at least one folder is selected , show " publish subresources " checkbox result . append ( "<input type='checkbox' name='" ) ; result . append ( PARAM_SUBRESOURCES ) ; result . append ( "' value='true' onclick=\"reloadReport();\"" ) ; if ( Boolean . valueOf ( getParamSubresources ( ) ) . booleanValue ( ) ) { result . append ( " checked='checked'" ) ; } result . append ( ">&nbsp;" ) ; if ( isMultiOperation ( ) ) { result . append ( key ( Messages . GUI_PUBLISH_MULTI_SUBRESOURCES_0 ) ) ; } else { result . append ( key ( Messages . GUI_PUBLISH_SUBRESOURCES_0 ) ) ; } result . append ( "<br>\n" ) ; } else { result . append ( "<input type='hidden' name='" ) ; result . append ( PARAM_SUBRESOURCES ) ; result . append ( "' value='" ) ; result . append ( Boolean . valueOf ( getParamSubresources ( ) ) ) ; result . append ( "'" ) ; if ( Boolean . valueOf ( getParamSubresources ( ) ) . booleanValue ( ) ) { result . append ( " checked='checked'" ) ; } result . append ( ">\n" ) ; } // code for the ' publish related resources ' button boolean disabled = false ; if ( ( OpenCms . getWorkplaceManager ( ) . getDefaultUserSettings ( ) . getPublishRelatedResources ( ) == CmsDefaultUserSettings . PUBLISH_RELATED_RESOURCES_MODE_FORCE ) && ! OpenCms . getRoleManager ( ) . hasRole ( getCms ( ) , CmsRole . VFS_MANAGER ) ) { disabled = true ; } result . append ( "<input type='checkbox' name='" ) ; result . append ( PARAM_RELATEDRESOURCES ) ; result . append ( "' value='true' onclick=\"reloadReport();\"" ) ; if ( Boolean . valueOf ( getParamRelatedresources ( ) ) . booleanValue ( ) ) { result . append ( " checked='checked'" ) ; } if ( disabled ) { result . append ( " disabled='disabled'" ) ; } result . append ( ">&nbsp;" ) ; result . append ( key ( Messages . GUI_PUBLISH_RELATED_RESOURCES_0 ) ) ; result . append ( "<br>\n" ) ; result . append ( "</p>\n" ) ; return result . toString ( ) ;
public class TileRow { /** * Set the tile data from an image * @ param image * image * @ param imageFormat * image format * @ throws IOException * upon failure */ public void setTileData ( BufferedImage image , String imageFormat ) throws IOException { } }
byte [ ] bytes = ImageUtils . writeImageToBytes ( image , imageFormat ) ; setTileData ( bytes ) ;
public class Auth { /** * Process a given url using the current authentication mode . * @ param url * url to access behind authentication * @ return * the given url processed using the right authentication mode */ public static String usingAuthentication ( String url ) { } }
if ( authenticationTypes . BASIC . toString ( ) . equals ( getInstance ( ) . authenticationType ) ) { return url . replace ( "://" , "://" + getLogin ( ) + ":" + getPassword ( ) + "@" ) ; } return url ;
public class DFA2ETFWriter { /** * Writes the type of the edge . A DFA edge contains one label , named ' letter ' , of type ' letter . * @ param pw the Writer . */ @ Override protected void writeEdge ( PrintWriter pw ) { } }
pw . println ( "begin edge" ) ; pw . println ( "letter:letter" ) ; pw . println ( "end edge" ) ;
public class DescribeInternetGatewaysResult { /** * Information about one or more internet gateways . * @ param internetGateways * Information about one or more internet gateways . */ public void setInternetGateways ( java . util . Collection < InternetGateway > internetGateways ) { } }
if ( internetGateways == null ) { this . internetGateways = null ; return ; } this . internetGateways = new com . amazonaws . internal . SdkInternalList < InternetGateway > ( internetGateways ) ;
public class CollectionManager { /** * Gets iterator for character range from highest unicode block set starting position . * Range from highest starting position which is less than supplied value code point . * @ param obj the top of the character range * @ return the bsh iterator */ public Iterator < String > getBshIterator ( final Character obj ) { } }
Integer value = Integer . valueOf ( obj . charValue ( ) ) ; int check = 33 , start = 0 ; for ( int i : unicodeBlockStarts ) if ( check <= value ) { start = check ; check = i ; } else break ; return IntStream . rangeClosed ( start , value ) . boxed ( ) . map ( Character :: toChars ) . map ( String :: valueOf ) . iterator ( ) ;
public class IsDefined { /** * used for older compiled code in ra files */ public static boolean invoke ( PageContext pc , String [ ] varNames , boolean allowNull ) { } }
int scope = VariableInterpreter . scopeString2Int ( pc . ignoreScopes ( ) , varNames [ 0 ] ) ; Object defVal = allowNull ? Null . NULL : null ; try { Object coll = VariableInterpreter . scope ( pc , scope , false ) ; // Object coll = pc . scope ( ( int ) scope ) ; for ( int i = scope == Scope . SCOPE_UNDEFINED ? 0 : 1 ; i < varNames . length ; i ++ ) { coll = pc . getVariableUtil ( ) . getCollection ( pc , coll , varNames [ i ] , defVal ) ; if ( coll == defVal ) return false ; } } catch ( Throwable t ) { ExceptionUtil . rethrowIfNecessary ( t ) ; return false ; } return true ;
public class BaseBigtableInstanceAdminClient { /** * Partially updates an instance within a project . * < p > Sample code : * < pre > < code > * try ( BaseBigtableInstanceAdminClient baseBigtableInstanceAdminClient = BaseBigtableInstanceAdminClient . create ( ) ) { * Instance instance = Instance . newBuilder ( ) . build ( ) ; * FieldMask updateMask = FieldMask . newBuilder ( ) . build ( ) ; * PartialUpdateInstanceRequest request = PartialUpdateInstanceRequest . newBuilder ( ) * . setInstance ( instance ) * . setUpdateMask ( updateMask ) * . build ( ) ; * Instance response = baseBigtableInstanceAdminClient . partialUpdateInstanceAsync ( request ) . get ( ) ; * < / code > < / pre > * @ param request The request object containing all of the parameters for the API call . * @ throws com . google . api . gax . rpc . ApiException if the remote call fails */ @ BetaApi ( "The surface for long-running operations is not stable yet and may change in the future." ) public final OperationFuture < Instance , UpdateInstanceMetadata > partialUpdateInstanceAsync ( PartialUpdateInstanceRequest request ) { } }
return partialUpdateInstanceOperationCallable ( ) . futureCall ( request ) ;
public class XNodeSetForDOM { /** * Cast result object to a nodelist . Always issues an error . * @ return null * @ throws javax . xml . transform . TransformerException */ public NodeIterator nodeset ( ) throws javax . xml . transform . TransformerException { } }
return ( m_origObj instanceof NodeIterator ) ? ( NodeIterator ) m_origObj : super . nodeset ( ) ;
public class FraggleManager { /** * Decides what to do with the backstack . * The default behavior is as follows : * FraggleManager will determine if the Fragment has a * { @ link com . sefford . fraggle . interfaces . FraggleFragment # customizedOnBackPressed ( ) customized action ( s ) for backpressing } * If so , the Fraggle Manager will execute its { @ link com . sefford . fraggle . interfaces . FraggleFragment # onBackPressed ( ) onBackPressed ( ) } method . * If the Fragment does not have any kind of custom action , then the FraggleManager will try * to determine if there is a { @ link com . sefford . fraggle . interfaces . FraggleFragment # onBackPressedTarget ( ) } . * If positive , the FraggleManager will pop until it finds the Fragment . * Otherwise will pop the inmediate Fragment and execute its { @ link com . sefford . fraggle . interfaces . FraggleFragment # onFragmentVisible ( ) } * @ param containerId Target container ID */ public void popBackStack ( int containerId ) { } }
FraggleFragment currentFragment = ( FraggleFragment ) fm . findFragmentById ( containerId ) ; if ( ! currentFragment . customizedOnBackPressed ( ) ) { if ( currentFragment . onBackPressedTarget ( ) . isEmpty ( ) ) { fm . popBackStackImmediate ( ) ; if ( fm . getBackStackEntryCount ( ) >= 1 ) { peek ( ) . onFragmentVisible ( ) ; } } else { // Clean all until containerId popBackStack ( currentFragment . onBackPressedTarget ( ) , 0 ) ; } } else { currentFragment . onBackPressed ( ) ; }
public class JobGraph { /** * Auxiliary method to collect all vertices which are reachable from the input vertices . * @ param jv * the currently considered job vertex * @ param collector * a temporary list to store the vertices that have already been visisted */ private void collectVertices ( final AbstractJobVertex jv , final List < AbstractJobVertex > collector ) { } }
if ( jv == null ) { final Iterator < AbstractJobInputVertex > iter = getInputVertices ( ) ; while ( iter . hasNext ( ) ) { collectVertices ( iter . next ( ) , collector ) ; } } else { if ( ! collector . contains ( jv ) ) { collector . add ( jv ) ; } else { return ; } for ( int i = 0 ; i < jv . getNumberOfForwardConnections ( ) ; i ++ ) { collectVertices ( jv . getForwardConnection ( i ) . getConnectedVertex ( ) , collector ) ; } }
public class ModbusSerialTransport { /** * Writes the request / response message to the port * @ param msg Message to write * @ throws ModbusIOException If the port throws an error */ private void writeMessage ( ModbusMessage msg ) throws ModbusIOException { } }
open ( ) ; notifyListenersBeforeWrite ( msg ) ; writeMessageOut ( msg ) ; long startTime = System . nanoTime ( ) ; // Wait here for the message to have been sent double bytesPerSec = commPort . getBaudRate ( ) / ( ( ( commPort . getNumDataBits ( ) == 0 ) ? 8 : commPort . getNumDataBits ( ) ) + ( ( commPort . getNumStopBits ( ) == 0 ) ? 1 : commPort . getNumStopBits ( ) ) + ( ( commPort . getParity ( ) == SerialPort . NO_PARITY ) ? 0 : 1 ) ) ; double delay = 1000000000.0 * msg . getOutputLength ( ) / bytesPerSec ; double delayMilliSeconds = Math . floor ( delay / 1000000 ) ; double delayNanoSeconds = delay % 1000000 ; try { // For delays less than a millisecond , we need to chew CPU cycles unfortunately // There are some fiddle factors here to allow for some oddities in the hardware if ( delayMilliSeconds == 0.0 ) { int priority = Thread . currentThread ( ) . getPriority ( ) ; Thread . currentThread ( ) . setPriority ( Thread . MIN_PRIORITY ) ; long end = startTime + ( ( int ) ( delayNanoSeconds * 1.3 ) ) ; while ( System . nanoTime ( ) < end ) { // noop } Thread . currentThread ( ) . setPriority ( priority ) ; } else { Thread . sleep ( ( int ) ( delayMilliSeconds * 1.4 ) , ( int ) delayNanoSeconds ) ; } } catch ( Exception e ) { logger . debug ( "nothing to do" ) ; } notifyListenersAfterWrite ( msg ) ;
public class Actors { /** * changed to non recursive due to stackoverflows . . */ private static < T > void awaitSettle ( final List < IPromise < T > > futures , final IPromise result ) { } }
PromiseLatch latch = new PromiseLatch ( futures . size ( ) ) ; latch . getPromise ( ) . then ( ( ) -> result . complete ( futures , null ) ) ; futures . forEach ( fut -> fut . then ( ( ) -> latch . countDown ( ) ) ) ;
public class PriorityHAC { /** * Goes through the < tt > merge < / tt > array in order from last merge to first , and sets the cluster assignment for each data point based on the merge list . * @ param designations the array to store the designations in , or null to have a new one created automatically . * @ param clusters the number of clusters to assume * @ param merges the array of merge pairs * @ return the array storing the designations . A new one will be created and returned if < tt > designations < / tt > was null . */ protected static int [ ] assignClusterDesignations ( int [ ] designations , int clusters , int [ ] merges ) { } }
int curCluster = 0 ; Arrays . fill ( designations , - 1 ) ; for ( int i = 0 ; i < merges . length ; i ++ ) { if ( designations [ merges [ i ] ] == - 1 ) // it has not been assigned { if ( curCluster < clusters ) // It will be a top level cluster designations [ merges [ i ] ] = curCluster ++ ; else designations [ merges [ i ] ] = designations [ merges [ i - 1 ] ] ; // The new cluster is always in an odd index , so its parrent is the even index to the left } } return designations ;
public class SubversionManager { /** * Revert all the working copy changes . */ public void revertWorkingCopy ( ) throws IOException , InterruptedException { } }
build . getWorkspace ( ) . act ( new RevertWorkingCopyCallable ( getLocation ( ) , getSvnAuthenticationProvider ( build ) , buildListener ) ) ;
public class Vector4d { /** * / * ( non - Javadoc ) * @ see org . joml . Vector4dc # fma ( org . joml . Vector4dc , org . joml . Vector4dc , org . joml . Vector4d ) */ public Vector4d fma ( Vector4dc a , Vector4dc b , Vector4d dest ) { } }
dest . x = x + a . x ( ) * b . x ( ) ; dest . y = y + a . y ( ) * b . y ( ) ; dest . z = z + a . z ( ) * b . z ( ) ; dest . w = w + a . w ( ) * b . w ( ) ; return dest ;
public class InputLambdaProcessorDescriptionMarshaller { /** * Marshall the given parameter object . */ public void marshall ( InputLambdaProcessorDescription inputLambdaProcessorDescription , ProtocolMarshaller protocolMarshaller ) { } }
if ( inputLambdaProcessorDescription == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( inputLambdaProcessorDescription . getResourceARN ( ) , RESOURCEARN_BINDING ) ; protocolMarshaller . marshall ( inputLambdaProcessorDescription . getRoleARN ( ) , ROLEARN_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class FindBugs2 { /** * Build the classpath from project codebases and system codebases . * @ throws InterruptedException * if the analysis thread is interrupted * @ throws IOException * if an I / O error occurs * @ throws CheckedAnalysisException */ private void buildClassPath ( ) throws InterruptedException , IOException , CheckedAnalysisException { } }
IClassPathBuilder builder = classFactory . createClassPathBuilder ( bugReporter ) ; { HashSet < String > seen = new HashSet < > ( ) ; for ( String path : project . getFileArray ( ) ) { if ( seen . add ( path ) ) { builder . addCodeBase ( classFactory . createFilesystemCodeBaseLocator ( path ) , true ) ; } } for ( String path : project . getAuxClasspathEntryList ( ) ) { if ( seen . add ( path ) ) { builder . addCodeBase ( classFactory . createFilesystemCodeBaseLocator ( path ) , false ) ; } } } builder . scanNestedArchives ( analysisOptions . scanNestedArchives ) ; builder . build ( classPath , progressReporter ) ; appClassList = builder . getAppClassList ( ) ; if ( PROGRESS ) { System . out . println ( appClassList . size ( ) + " classes scanned" ) ; } // If any of the application codebases contain source code , // add them to the source path . // Also , use the last modified time of application codebases // to set the project timestamp . List < String > pathNames = new ArrayList < > ( ) ; for ( Iterator < ? extends ICodeBase > i = classPath . appCodeBaseIterator ( ) ; i . hasNext ( ) ; ) { ICodeBase appCodeBase = i . next ( ) ; if ( appCodeBase . containsSourceFiles ( ) ) { String pathName = appCodeBase . getPathName ( ) ; if ( pathName != null ) { pathNames . add ( pathName ) ; } } project . addTimestamp ( appCodeBase . getLastModifiedTime ( ) ) ; } project . addSourceDirs ( pathNames ) ;
public class NlsTemplateImpl { /** * Called from { @ link # translate ( Locale ) } if localization failed . * @ param e is the { @ link Exception } . * @ return the fallback message . */ protected String translateFallback ( Exception e ) { } }
String messageId = this . name + ":" + this . key ; LOG . warn ( "Failed to resolve message (" + messageId + "): " + e . getMessage ( ) ) ; return translateFallback ( messageId ) ;
public class ApiOvhDedicatedserver { /** * Add a new SPLA license * REST : POST / dedicated / server / { serviceName } / spla * @ param type [ required ] License type * @ param serialNumber [ required ] License serial number * @ param serviceName [ required ] The internal name of your dedicated server */ public Long serviceName_spla_POST ( String serviceName , String serialNumber , OvhSplaTypeEnum type ) throws IOException { } }
String qPath = "/dedicated/server/{serviceName}/spla" ; StringBuilder sb = path ( qPath , serviceName ) ; HashMap < String , Object > o = new HashMap < String , Object > ( ) ; addBody ( o , "serialNumber" , serialNumber ) ; addBody ( o , "type" , type ) ; String resp = exec ( qPath , "POST" , sb . toString ( ) , o ) ; return convertTo ( resp , Long . class ) ;
public class Vector3f { /** * / * ( non - Javadoc ) * @ see org . joml . Vector3fc # add ( float , float , float , org . joml . Vector3f ) */ public Vector3f add ( float x , float y , float z , Vector3f dest ) { } }
dest . x = this . x + x ; dest . y = this . y + y ; dest . z = this . z + z ; return dest ;
public class QueryError { /** * Gets the reason value for this QueryError . * @ return reason */ public com . google . api . ads . adwords . axis . v201809 . cm . QueryErrorReason getReason ( ) { } }
return reason ;
public class ManagedObject { /** * Driven just before the object changes to prepared state within a transaction , * if requested . This is only redriven at recovery time if the the ManagedObject * explicitly re - requests this at recovery time . Any ManagedObject requesting this * callback should override this method . * @ param transaction causing the prepare . * @ throws ObjectManagerException */ public void prePrepare ( Transaction transaction ) throws ObjectManagerException { } }
if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . entry ( this , cclass , "prePrepare" , new Object [ ] { transaction } ) ; // By default does nothing , unless overridden . if ( Tracing . isAnyTracingEnabled ( ) && trace . isEntryEnabled ( ) ) trace . exit ( this , cclass , "prePrepare" ) ;
public class FischerRecognition { /** * Create a new tetrahedral stereocenter of the given focus and neighboring * bonds . This is an internal method and is presumed the atom can support * tetrahedral stereochemistry and it has three or four explicit neighbors . * The stereo element is only created if the local arrangement looks like * a Fischer projection . * @ param focus central atom * @ param bonds adjacent bonds * @ return a stereo element , or null if one could not be created */ static ITetrahedralChirality newTetrahedralCenter ( IAtom focus , IBond [ ] bonds ) { } }
// obtain the bonds of a centre arranged by cardinal direction IBond [ ] cardinalBonds = cardinalBonds ( focus , bonds ) ; if ( cardinalBonds == null ) return null ; // vertical bonds must be present and be sigma and planar ( no wedge / hatch ) if ( ! isPlanarSigmaBond ( cardinalBonds [ NORTH ] ) || ! isPlanarSigmaBond ( cardinalBonds [ SOUTH ] ) ) return null ; // one of the horizontal bonds can be missing but not both if ( cardinalBonds [ EAST ] == null && cardinalBonds [ WEST ] == null ) return null ; // the neighbors of our tetrahedral centre , the EAST or WEST may // be missing so we initialise these with the implicit ( focus ) IAtom [ ] neighbors = new IAtom [ ] { cardinalBonds [ NORTH ] . getOther ( focus ) , focus , cardinalBonds [ SOUTH ] . getOther ( focus ) , focus } ; // fill in the EAST / WEST bonds , if they are define , single and planar we add the // connected atom . else if bond is defined ( but not single or planar ) or we // have 4 neighbours something is wrong and we skip this atom if ( isPlanarSigmaBond ( cardinalBonds [ EAST ] ) ) { neighbors [ EAST ] = cardinalBonds [ EAST ] . getOther ( focus ) ; } else if ( cardinalBonds [ EAST ] != null || bonds . length == 4 ) { return null ; } if ( isPlanarSigmaBond ( cardinalBonds [ WEST ] ) ) { neighbors [ WEST ] = cardinalBonds [ WEST ] . getOther ( focus ) ; } else if ( cardinalBonds [ WEST ] != null || bonds . length == 4 ) { return null ; } return new TetrahedralChirality ( focus , neighbors , ANTI_CLOCKWISE ) ;
public class IDDImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public void setYSIZE ( Integer newYSIZE ) { } }
Integer oldYSIZE = ysize ; ysize = newYSIZE ; if ( eNotificationRequired ( ) ) eNotify ( new ENotificationImpl ( this , Notification . SET , AfplibPackage . IDD__YSIZE , oldYSIZE , ysize ) ) ;
public class AffineTransformation { /** * Add a translation operation to the matrix * @ param v translation vector */ public void addTranslation ( double [ ] v ) { } }
assert ( v . length == dim ) ; // reset inverse transformation - needs recomputation . inv = null ; double [ ] [ ] homTrans = unitMatrix ( dim + 1 ) ; for ( int i = 0 ; i < dim ; i ++ ) { homTrans [ i ] [ dim ] = v [ i ] ; } trans = times ( homTrans , trans ) ;
public class BaseNCodec { /** * Tests a given byte array to see if it contains any characters within the alphabet or PAD . * Intended for use in checking line - ending arrays * @ param arrayOctet * byte array to test * @ return { @ code true } if any byte is a valid character in the alphabet or PAD ; { @ code false } otherwise */ protected boolean containsAlphabetOrPad ( final byte [ ] arrayOctet ) { } }
if ( arrayOctet == null ) { return false ; } for ( final byte element : arrayOctet ) { if ( PAD == element || isInAlphabet ( element ) ) { return true ; } } return false ;
public class OpenAPIValidator { /** * { @ inheritDoc } */ @ Override public void validate ( ValidationHelper helper , Context context , String key , OpenAPI t ) { } }
if ( t != null ) { String openapiVersion = t . getOpenapi ( ) ; ValidatorUtils . validateRequiredField ( openapiVersion , context , "openapi" ) . ifPresent ( helper :: addValidationEvent ) ; ValidatorUtils . validateRequiredField ( t . getInfo ( ) , context , "info" ) . ifPresent ( helper :: addValidationEvent ) ; ValidatorUtils . validateRequiredField ( t . getPaths ( ) , context , "paths" ) . ifPresent ( helper :: addValidationEvent ) ; if ( openapiVersion != null && ! openapiVersion . startsWith ( "3." ) ) { final String message = Tr . formatMessage ( tc , "openAPIVersionInvalid" , openapiVersion ) ; helper . addValidationEvent ( new ValidationEvent ( ValidationEvent . Severity . ERROR , context . getLocation ( ) , message ) ) ; } List < Tag > tags = t . getTags ( ) ; if ( tags != null ) { Set < String > tagNames = new HashSet < String > ( ) ; for ( Tag tag : tags ) { if ( ! tagNames . add ( tag . getName ( ) ) ) { final String message = Tr . formatMessage ( tc , "openAPITagIsNotUnique" , tag . getName ( ) ) ; helper . addValidationEvent ( new ValidationEvent ( ValidationEvent . Severity . ERROR , context . getLocation ( ) , message ) ) ; } } } }
public class ChannelFrameworkImpl { /** * @ see com . ibm . wsspi . channelfw . ChannelFramework # getRunningChannels ( ) */ @ Override public synchronized ChannelData [ ] getRunningChannels ( ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . entry ( tc , "getRunningChannels" ) ; } // Note , runtime has child data objects so duplicates may be found . List < ChannelData > list = new ArrayList < ChannelData > ( ) ; for ( ChannelContainer channel : this . channelRunningMap . values ( ) ) { list . add ( channel . getChannelData ( ) . getParent ( ) ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) { Tr . exit ( tc , "getRunningChannels" ) ; } return list . toArray ( new ChannelData [ list . size ( ) ] ) ;
public class PathBuilder { /** * Create a new Enum path * @ param < A > * @ param property property name * @ param type property type * @ return property path */ public < A extends Enum < A > > EnumPath < A > getEnum ( String property , Class < A > type ) { } }
validate ( property , type ) ; return super . createEnum ( property , type ) ;
public class MpxjQuery { /** * This method lists all tasks defined in the file in a hierarchical * format , reflecting the parent - child relationships between them . * @ param file MPX file */ private static void listHierarchy ( ProjectFile file ) { } }
for ( Task task : file . getChildTasks ( ) ) { System . out . println ( "Task: " + task . getName ( ) + "\t" + task . getStart ( ) + "\t" + task . getFinish ( ) ) ; listHierarchy ( task , " " ) ; } System . out . println ( ) ;
public class SecureASTCustomizer { /** * Ensures that every star import ends with . * as this is the expected syntax in import checks . */ private static List < String > normalizeStarImports ( List < String > starImports ) { } }
List < String > result = new ArrayList < String > ( starImports . size ( ) ) ; for ( String starImport : starImports ) { if ( starImport . endsWith ( ".*" ) ) { result . add ( starImport ) ; } else if ( starImport . endsWith ( "." ) ) { result . add ( starImport + "*" ) ; } else { result . add ( starImport + ".*" ) ; } } return Collections . unmodifiableList ( result ) ;
public class Immutables { /** * Creates an immutable copy of the specified set . * @ param set the set to copy from * @ return an immutable set copy */ public static < T > Set < T > immutableSetCopy ( Set < T > set ) { } }
if ( set == null ) return null ; if ( set . isEmpty ( ) ) return Collections . emptySet ( ) ; if ( set . size ( ) == 1 ) return Collections . singleton ( set . iterator ( ) . next ( ) ) ; Set < ? extends T > copy = ObjectDuplicator . duplicateSet ( set ) ; if ( copy == null ) // Set uses Collection copy - ctor copy = attemptCopyConstructor ( set , Collection . class ) ; if ( copy == null ) copy = new HashSet < > ( set ) ; return new ImmutableSetWrapper < > ( copy ) ;
public class MessageStoreImpl { /** * ( non - Javadoc ) * @ see com . ibm . ws . sib . store . MessageStore # destructiveGet ( com . ibm . ws . sib . store . Filter , com . ibm . ws . sib . msgstore . Transaction ) */ @ Override public ItemStream removeFirstMatching ( Filter filter , Transaction transaction ) throws MessageStoreException { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "removeFirstMatching" , new Object [ ] { filter , transaction } ) ; if ( _rootMembership != null ) { ItemStream item = _rootMembership . removeFirstMatchingItemStream ( filter , ( PersistentTransaction ) transaction ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "removeFirstMatching" , "return=" + item ) ; return item ; } else { MessageStoreUnavailableException msue ; if ( ! _startupExceptions . isEmpty ( ) ) { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) SibTr . event ( this , tc , "Operation not possible as MessageStore failed to start!" ) ; msue = new MessageStoreUnavailableException ( "Operation not possible as MessageStore failed to start!" , _startupExceptions . get ( 0 ) ) ; } else { if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEventEnabled ( ) ) SibTr . event ( this , tc , "Operation not possible as MessageStore is unavailable!" ) ; msue = new MessageStoreUnavailableException ( "Operation not possible as MessageStore is unavailable!" ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "removeFirstMatching" ) ; throw msue ; }
public class AbstractScheduler { /** * Collects all execution vertices with the state ASSIGNED starting from the given collection of start vertices and * deploys them on the assigned { @ link AllocatedResource } objects . * @ param startVertices * the collection of execution vertices to start the deployment from */ public void deployAssignedVertices ( final Collection < ExecutionVertex > startVertices ) { } }
JobID jobID = null ; final Map < AbstractInstance , List < ExecutionVertex > > verticesToBeDeployed = new HashMap < AbstractInstance , List < ExecutionVertex > > ( ) ; final Set < ExecutionVertex > alreadyVisited = new HashSet < ExecutionVertex > ( ) ; for ( final ExecutionVertex startVertex : startVertices ) { if ( jobID == null ) { jobID = startVertex . getExecutionGraph ( ) . getJobID ( ) ; } findVerticesToBeDeployed ( startVertex , verticesToBeDeployed , alreadyVisited ) ; } if ( ! verticesToBeDeployed . isEmpty ( ) ) { final Iterator < Map . Entry < AbstractInstance , List < ExecutionVertex > > > it2 = verticesToBeDeployed . entrySet ( ) . iterator ( ) ; while ( it2 . hasNext ( ) ) { final Map . Entry < AbstractInstance , List < ExecutionVertex > > entry = it2 . next ( ) ; this . deploymentManager . deploy ( jobID , entry . getKey ( ) , entry . getValue ( ) ) ; } }
public class DistanceComputation { /** * Normalized early abandoned Euclidean distance . * @ param ts1 the first series . * @ param ts2 the second series . * @ param bsfDist the distance value ( used for early abandon ) . * @ return the distance value . */ protected double eculideanDistNormEAbandon ( double [ ] ts1 , double [ ] ts2 , double bsfDist ) { } }
double dist = 0 ; double tsLen = ts1 . length ; double bsf = Math . pow ( tsLen * bsfDist , 2 ) ; for ( int i = 0 ; i < ts1 . length ; i ++ ) { double diff = ts1 [ i ] - ts2 [ i ] ; dist += Math . pow ( diff , 2 ) ; if ( dist > bsf ) return Double . NaN ; } return Math . sqrt ( dist ) / tsLen ;
public class MultistepUtil { /** * Adds a step to the given { @ link OperationContext } for each operation included in the given map , either * using for each step a response node provided in the { @ code responses } map , or if the { @ code responses } map is empty , * creating them and storing them in the { @ code responses } map . The response objects are not tied into the overall response * to the operation associated with { @ code context } . It is the responsibility of the caller to do that . * < strong > NOTE : < / strong > The given { @ code operations } map must provide an iterator over its entry set that provides the entries in the * order in which the operations should execute . A { @ link LinkedHashMap } is the typical choice . * @ param context the { @ code OperationContext } . Cannot be { @ code null } * @ param operations the operations , each value of which must be a proper OBJECT type model node with a structure describing an operation . * @ param responses a map of the response nodes , the keys for which match the keys in the { @ code operations } param . * Cannot be { @ code null } but may be empty in which case this method will * create the response nodes and store them in this map . * @ param < T > the type of the keys in the maps * @ throws OperationFailedException if there is a problem registering a step for any of the operations */ @ SuppressWarnings ( "unused" ) public static < T > void recordOperationSteps ( final OperationContext context , final Map < T , ModelNode > operations , final Map < T , ModelNode > responses ) throws OperationFailedException { } }
recordOperationSteps ( context , operations , responses , OperationHandlerResolver . DEFAULT , false , true ) ;
public class Network { /** * Generate a dot file ( diagram ) of the current network infrastructure , included all connected elements and links . * @ param mo the model to use , it may contains naming services for switches or nodes that will * replace the generic names mainly based on the id number . * @ param out the output dot file to create * @ param fromLeftToRight if true : force diagram ' s shapes to be placed side by side ( create larger diagrams ) * @ throws IOException if an error occurred while writing */ public void generateDot ( Model mo , String out , boolean fromLeftToRight ) throws IOException { } }
try ( BufferedWriter dot = new BufferedWriter ( new FileWriter ( out ) ) ) { dot . append ( "digraph G {\n" ) ; if ( fromLeftToRight ) { dot . append ( "rankdir=LR;\n" ) ; } drawNodes ( dot , NamingService . getNodeNames ( mo ) ) ; drawSwitches ( dot ) ; drawLinks ( dot ) ; dot . append ( "}\n" ) ; }
public class WebAppConfigurator { /** * Main configuration API : * Do an initialization step , then process web . xml , then process fragments , * then do a defaulting step , then , if available , process the web binding and * the web extension , then do a finishing step . * @ throws UnableToAdaptException */ public void configure ( ) throws UnableToAdaptException { } }
boolean webIsMetadataComplete = isMetadataComplete ( ) ; // locate annotations // Collapse all of the prior scanner calls into a single step : // The resulting merge has obtained target information for all of the // target locations , with precedence per the location ordering . // What particular targets are obtained is a curious business , as // the class selection uses the names of classes in the included // location , but obtains the information for each of those classes // using the entire class search space . // Maybe , move this inside the ' isMetadataComplete ' test ? // The web annotations don ' t seem to be used except when // metadata - complete is false . this . webAnnotations = this . moduleContainer . adapt ( WebAnnotations . class ) ; if ( ! webIsMetadataComplete ) { this . webAnnotations . openInfoStore ( ) ; } try { for ( ServletConfiguratorHelper configHelper : configHelpers ) { configHelper . configureInit ( ) ; } // process web . xml if ( webApp != null ) { this . currentLibraryURI = WebApp . DD_NAME ; this . currentSource = ConfigSource . WEB_XML ; this . currentMetadataComplete = isMetadataComplete ( ) ; for ( ServletConfiguratorHelper configHelper : configHelpers ) { configHelper . configureFromWebApp ( webApp ) ; } } // process web - fragment . xml if ( ! webIsMetadataComplete ) { for ( WebFragmentInfo webFragmentItem : this . webAnnotations . getOrderedItems ( ) ) { WebFragment webFragment = webFragmentItem . getWebFragment ( ) ; this . currentLibraryURI = webFragmentItem . getLibraryURI ( ) ; this . currentSource = ConfigSource . WEB_FRAGMENT ; this . currentMetadataComplete = webFragment != null && webFragment . isMetadataComplete ( ) ; if ( webFragment != null ) { for ( ServletConfiguratorHelper configHelper : configHelpers ) { configHelper . configureFromWebFragment ( webFragmentItem ) ; } } if ( ! currentMetadataComplete ) { for ( ServletConfiguratorHelper configHelper : configHelpers ) { configHelper . configureFromAnnotations ( webFragmentItem ) ; } } ( ( WebAppConfiguratorHelper ) webAppHelper ) . processIgnoredMappings ( webApp ) ; } } for ( ServletConfiguratorHelper configHelper : configHelpers ) { configHelper . configureDefaults ( ) ; } } finally { if ( ! webIsMetadataComplete ) { this . webAnnotations . closeInfoStore ( ) ; } } clearContext ( ) ; if ( webBnd != null ) { for ( ServletConfiguratorHelper configHelper : configHelpers ) { configHelper . configureWebBnd ( webBnd ) ; } } if ( webExt != null ) { for ( ServletConfiguratorHelper configHelper : configHelpers ) { configHelper . configureWebExt ( webExt ) ; } } for ( ServletConfiguratorHelper configHelper : configHelpers ) { configHelper . finish ( ) ; } if ( this . haveAnyErrorMessages ( ) ) { throw new UnableToAdaptException ( this . getErrorMessageText ( ) ) ; }
public class Schema { /** * Lookup keyspace / ColumnFamily identifier * @ param ksName The keyspace name * @ param cfName The ColumnFamily name * @ return The id for the given ( ksname , cfname ) pair , or null if it has been dropped . */ public UUID getId ( String ksName , String cfName ) { } }
return cfIdMap . get ( Pair . create ( ksName , cfName ) ) ;
public class AlluxioFuseFileSystem { /** * Changes the size of a file . This operation would not succeed because of Alluxio ' s write - once * model . */ @ Override public int truncate ( String path , long size ) { } }
LOG . error ( "Truncate is not supported {}" , path ) ; return - ErrorCodes . EOPNOTSUPP ( ) ;
public class TaintMethodConfig { /** * Checks if the summary needs to be saved or has no information value * @ return true if summary should be saved , false otherwise */ public boolean isInformative ( ) { } }
if ( this == SAFE_CONFIG ) { // these are loaded automatically , do not need to store them return false ; } if ( outputTaint == null ) { return false ; } if ( ! outputTaint . isUnknown ( ) ) { return true ; } if ( outputTaint . hasParameters ( ) ) { return true ; } if ( outputTaint . getRealInstanceClass ( ) != null ) { return true ; } if ( outputTaint . hasTags ( ) || outputTaint . isRemovingTags ( ) ) { return true ; } return false ;
public class LightMetaBean { /** * determine the field names by reflection */ private static String [ ] fieldNames ( Class < ? > beanType ) { } }
Field [ ] fields = Stream . of ( beanType . getDeclaredFields ( ) ) . filter ( f -> ! Modifier . isStatic ( f . getModifiers ( ) ) && f . getAnnotation ( PropertyDefinition . class ) != null ) . toArray ( Field [ ] :: new ) ; List < String > fieldNames = new ArrayList < > ( ) ; for ( int i = 0 ; i < fields . length ; i ++ ) { fieldNames . add ( fields [ i ] . getName ( ) ) ; } return fieldNames . toArray ( new String [ fieldNames . size ( ) ] ) ;
public class JsonService { /** * Get a short value from a { @ link JSONObject } . * @ param jsonObject The object to get the key value from . * @ param key The name of the key to search the value for . * @ return Returns the value for the key in the object . * @ throws JSONException Thrown in case the key could not be found in the JSON object . */ public static Short getShortValue ( JSONObject jsonObject , String key ) throws JSONException { } }
checkArguments ( jsonObject , key ) ; JSONValue value = jsonObject . get ( key ) ; if ( value != null && value . isNumber ( ) != null ) { double number = ( ( JSONNumber ) value ) . doubleValue ( ) ; return new Short ( ( short ) number ) ; } return null ;
public class Expr { /** * Set the recursive value of this atom expression . * @ param type the type of expression * @ param mol the recursive pattern */ private void setRecursive ( Type type , IAtomContainer mol ) { } }
switch ( type ) { case RECURSIVE : this . type = type ; this . value = 0 ; this . left = null ; this . right = null ; this . query = mol ; this . ptrn = null ; break ; default : throw new IllegalArgumentException ( ) ; }
public class AmazonEC2Client { /** * Describes the specified IPv4 address pools . * @ param describePublicIpv4PoolsRequest * @ return Result of the DescribePublicIpv4Pools operation returned by the service . * @ sample AmazonEC2 . DescribePublicIpv4Pools * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / ec2-2016-11-15 / DescribePublicIpv4Pools " target = " _ top " > AWS * API Documentation < / a > */ @ Override public DescribePublicIpv4PoolsResult describePublicIpv4Pools ( DescribePublicIpv4PoolsRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDescribePublicIpv4Pools ( request ) ;
public class GeometryService { /** * Validates a geometry , focusing on changes at a specific sub - level of the geometry . The sublevel is indicated by * passing an array of indexes . The array should uniquely determine a coordinate or subgeometry ( linear ring ) by * recursing through the geometry tree . The only checks are on intersection ( for coordinates ) and containment ( for * subgeometries ) , we don ' t check on too few coordinates as we want to support incremental creation of polygons . * @ param geometry The geometry to check . * @ param index an array of indexes , points to vertex , ring , polygon , etc . . . * @ return validation state . * @ since 1.2.0 * @ deprecated use { @ link # validate ( Geometry , GeometryIndex ) } instead */ @ Deprecated public static GeometryValidationState validate ( Geometry geometry , int [ ] index ) { } }
return validate ( geometry , toIndex ( geometry . getGeometryType ( ) , index ) ) ;
public class SAXParser { /** * Parse the content given { @ link org . xml . sax . InputSource } * as XML using the specified * { @ link org . xml . sax . helpers . DefaultHandler } . * @ param is The InputSource containing the content to be parsed . * @ param dh The SAX DefaultHandler to use . * @ throws IllegalArgumentException If the < code > InputSource < / code > object * is < code > null < / code > . * @ throws IOException If any IO errors occur . * @ throws SAXException If any SAX errors occur during processing . * @ see org . xml . sax . DocumentHandler */ public void parse ( InputSource is , DefaultHandler dh ) throws SAXException , IOException { } }
if ( is == null ) { throw new IllegalArgumentException ( "InputSource cannot be null" ) ; } XMLReader reader = this . getXMLReader ( ) ; if ( dh != null ) { reader . setContentHandler ( dh ) ; reader . setEntityResolver ( dh ) ; reader . setErrorHandler ( dh ) ; reader . setDTDHandler ( dh ) ; } reader . parse ( is ) ;
public class AsyncExecutionHandler { /** * < p > If an { @ link AsyncHandler } is defined , any exception which resulted in an error will be * available via the < i > onError < / i > callback . < / p > * < p > See { @ link ExecutionHandler # onError ( InvocationContext , Exception ) } < / p > * @ param context * the { @ link InvocationContext } with information on the proxy invocation * < br > < br > * @ param error * the exception which resulted in a request execution failure * < br > < br > * @ since 1.3.0 */ @ Override public void onError ( InvocationContext context , Exception error ) { } }
AsyncHandler < Object > asyncHandler = getAsyncHandler ( context ) ; if ( asyncHandler != null ) { try { asyncHandler . onError ( error instanceof InvocationException ? ( InvocationException ) error : InvocationException . newInstance ( context , error ) ) ; } catch ( Exception e ) { Log . e ( getClass ( ) . getSimpleName ( ) , "Callback \"onError\" aborted with an exception." , e ) ; } }
public class AmazonNeptuneClient { /** * Returns information about DB cluster snapshots . This API action supports pagination . * @ param describeDBClusterSnapshotsRequest * @ return Result of the DescribeDBClusterSnapshots operation returned by the service . * @ throws DBClusterSnapshotNotFoundException * < i > DBClusterSnapshotIdentifier < / i > does not refer to an existing DB cluster snapshot . * @ sample AmazonNeptune . DescribeDBClusterSnapshots * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / neptune - 2014-10-31 / DescribeDBClusterSnapshots " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DescribeDBClusterSnapshotsResult describeDBClusterSnapshots ( DescribeDBClusterSnapshotsRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDescribeDBClusterSnapshots ( request ) ;
public class CLIService { /** * / * ( non - Javadoc ) * @ see org . apache . hive . service . cli . ICLIService # closeOperation ( org . apache . hive . service . cli . OperationHandle ) */ @ Override public void closeOperation ( OperationHandle opHandle ) throws HiveSQLException { } }
sessionManager . getOperationManager ( ) . getOperation ( opHandle ) . getParentSession ( ) . closeOperation ( opHandle ) ; LOG . debug ( opHandle + ": closeOperation" ) ;
public class AtomicNavigableMapType { /** * Returns a new consistent tree map type . * @ param < K > the key type * @ param < V > the value type * @ return a new consistent tree map type */ @ SuppressWarnings ( "unchecked" ) public static < K extends Comparable < K > , V > AtomicNavigableMapType < K , V > instance ( ) { } }
return INSTANCE ;
public class ByteArray { /** * Converts this < code > ByteArray < / code > to an array of bytes . * @ return An array of bytes containing the same elements as this array . */ public byte [ ] toByteArray ( ) { } }
byte [ ] copy = new byte [ size ] ; for ( int i = 0 ; i < size ; i ++ ) { copy [ i ] = elements [ i ] ; } return copy ;
public class CommonOps_ZDRM { /** * Returns the value of the real element in the matrix that has the minimum value . < br > * < br > * Min { a < sub > ij < / sub > } for all i and j < br > * @ param a A matrix . Not modified . * @ return The the minimum value out of all the real values . */ public static double elementMaxReal ( ZMatrixD1 a ) { } }
final int size = a . getDataLength ( ) ; double max = a . data [ 0 ] ; for ( int i = 2 ; i < size ; i += 2 ) { double val = a . data [ i ] ; if ( val > max ) { max = val ; } } return max ;
public class SQLDatabaseFactory { /** * Internal method for creating a SQLDatabase that allows a null filename to create an in - memory * database which can be useful for performing checks , but creating in - memory databases is not * permitted from outside of this class hence the private visibility . * @ param dbFile full file path of the db file or { @ code null } for an in - memory database * @ param provider Key provider or { @ link NullKeyProvider } . Must be { @ link NullKeyProvider } * if dbFilename is { @ code null } i . e . for internal in - memory databases . * @ return { @ code SQLDatabase } for the given filename * @ throws SQLException - if the database cannot be opened */ private static SQLDatabase internalOpenSQLDatabase ( File dbFile , KeyProvider provider ) throws SQLException { } }
boolean runningOnAndroid = Misc . isRunningOnAndroid ( ) ; boolean useSqlCipher = ( provider . getEncryptionKey ( ) != null ) ; try { if ( runningOnAndroid ) { if ( useSqlCipher ) { return ( SQLDatabase ) Class . forName ( "com.cloudant.sync.internal.sqlite.android" + ".AndroidSQLCipherSQLite" ) . getMethod ( "open" , File . class , KeyProvider . class ) . invoke ( null , new Object [ ] { dbFile , provider } ) ; } else { return ( SQLDatabase ) Class . forName ( "com.cloudant.sync.internal.sqlite.android" + ".AndroidSQLite" ) . getMethod ( "open" , File . class ) . invoke ( null , dbFile ) ; } } else { if ( useSqlCipher ) { throw new UnsupportedOperationException ( "No SQLCipher-based database " + "implementation for Java SE" ) ; } else { return ( SQLDatabase ) Class . forName ( "com.cloudant.sync.internal.sqlite" + ".sqlite4java.SQLiteWrapper" ) . getMethod ( "open" , File . class ) . invoke ( null , dbFile ) ; } } } catch ( RuntimeException e ) { throw e ; } catch ( Exception e ) { logger . log ( Level . SEVERE , "Failed to load database module" , e ) ; throw new SQLException ( "Failed to load database module" , e ) ; }
public class MessageDetailDefTransHandler { /** * Called when a change is the record status is about to happen / has happened . * @ param field If this file change is due to a field , this is the field . * @ param iChangeType The type of change that occurred . * @ param bDisplayOption If true , display any changes . * @ return an error code . * ADD _ TYPE - Before a write . * UPDATE _ TYPE - Before an update . * DELETE _ TYPE - Before a delete . * AFTER _ UPDATE _ TYPE - After a write or update . * LOCK _ TYPE - Before a lock . * SELECT _ TYPE - After a select . * DESELECT _ TYPE - After a deselect . * MOVE _ NEXT _ TYPE - After a move . * AFTER _ REQUERY _ TYPE - Record opened . * SELECT _ EOF _ TYPE - EOF Hit . */ public int doRecordChange ( FieldInfo field , int iChangeType , boolean bDisplayOption ) { } }
if ( ( iChangeType == DBConstants . AFTER_ADD_TYPE ) || ( iChangeType == DBConstants . AFTER_UPDATE_TYPE ) || ( iChangeType == DBConstants . AFTER_DELETE_TYPE ) ) { BaseField fldMessageDetail = this . getOwner ( ) . getField ( MessageDetail . MESSAGE_TRANSPORT_ID ) ; BaseField fldDefaultMessageDetail = this . getOwner ( ) . getField ( MessageDetail . DEFAULT_MESSAGE_TRANSPORT_ID ) ; Integer newDefaultMessageTransport = - 1 ; // None ( to start ) boolean bDefaultTransportFlag = fldMessageDetail . equals ( fldDefaultMessageDetail ) ; if ( iChangeType == DBConstants . AFTER_ADD_TYPE ) { if ( bDefaultTransportFlag ) newDefaultMessageTransport = ( Integer ) fldDefaultMessageDetail . getData ( ) ; // Change all the others else { newDefaultMessageTransport = - 2 ; } } if ( iChangeType == DBConstants . AFTER_DELETE_TYPE ) { if ( m_iOriginalMessageTransportID == m_iOriginalDefaultMessageTransportID ) newDefaultMessageTransport = null ; // It was default before delete , so clear all ! } else if ( iChangeType == DBConstants . AFTER_UPDATE_TYPE ) { if ( fldMessageDetail . isModified ( ) | fldDefaultMessageDetail . isModified ( ) ) { if ( bDefaultTransportFlag ) newDefaultMessageTransport = ( Integer ) fldDefaultMessageDetail . getData ( ) ; // Change all the others else if ( m_iOriginalMessageTransportID == m_iOriginalDefaultMessageTransportID ) newDefaultMessageTransport = null ; // If it was default before update , clear all ! } } if ( ( newDefaultMessageTransport == null ) || ( newDefaultMessageTransport != - 1 ) ) { MessageDetail recMessageDetail = new MessageDetail ( this . getOwner ( ) . findRecordOwner ( ) ) ; try { recMessageDetail . setKeyArea ( MessageDetail . CONTACT_TYPE_ID_KEY ) ; recMessageDetail . addListener ( new StringSubFileFilter ( this . getOwner ( ) . getField ( MessageDetail . CONTACT_TYPE_ID ) . toString ( ) , recMessageDetail . getField ( MessageDetail . CONTACT_TYPE_ID ) , this . getOwner ( ) . getField ( MessageDetail . PERSON_ID ) . toString ( ) , recMessageDetail . getField ( MessageDetail . PERSON_ID ) , this . getOwner ( ) . getField ( MessageDetail . MESSAGE_PROCESS_INFO_ID ) . toString ( ) , recMessageDetail . getField ( MessageDetail . MESSAGE_PROCESS_INFO_ID ) ) ) ; if ( newDefaultMessageTransport != null ) if ( newDefaultMessageTransport == - 2 ) { newDefaultMessageTransport = null ; recMessageDetail . close ( ) ; while ( recMessageDetail . hasNext ( ) ) { recMessageDetail . next ( ) ; if ( ! recMessageDetail . getField ( MessageDetail . DEFAULT_MESSAGE_TRANSPORT_ID ) . isNull ( ) ) { newDefaultMessageTransport = ( int ) recMessageDetail . getField ( MessageDetail . DEFAULT_MESSAGE_TRANSPORT_ID ) . getValue ( ) ; break ; // This is the current default ( to set in the new record ) } } } recMessageDetail . close ( ) ; while ( recMessageDetail . hasNext ( ) ) { recMessageDetail . next ( ) ; recMessageDetail . edit ( ) ; recMessageDetail . getField ( MessageDetail . DEFAULT_MESSAGE_TRANSPORT_ID ) . setData ( newDefaultMessageTransport , bDisplayOption , DBConstants . INIT_MOVE ) ; recMessageDetail . set ( ) ; } } catch ( DBException ex ) { recMessageDetail . free ( ) ; } } } return super . doRecordChange ( field , iChangeType , bDisplayOption ) ;
public class CmsWorkplaceConfiguration { /** * Adds the digester rules for the default - preferences node . < p > * @ param digester the digester object */ protected void addDefaultPreferencesRules ( Digester digester ) { } }
// creation of the default user settings digester . addObjectCreate ( "*/" + N_WORKPLACE + "/" + N_DEFAULTPREFERENCES , CmsDefaultUserSettings . class ) ; digester . addSetNext ( "*/" + N_WORKPLACE + "/" + N_DEFAULTPREFERENCES , "setDefaultUserSettings" ) ; // TODO : most of these settings are not user dependent , so they should not be stored in the CmsDefaultUserSettings class // add workplace preferences general options rules String xPathPrefix = "*/" + N_WORKPLACE + "/" + N_DEFAULTPREFERENCES + "/" + N_WORKPLACEPREFERENCES + "/" + N_WORKPLACEGENERALOPTIONS ; digester . addCallMethod ( xPathPrefix + "/" + N_BUTTONSTYLE , "setWorkplaceButtonStyle" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_REPORTTYPE , "setWorkplaceReportType" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_UPLOADAPPLET , "setUploadVariant" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_LISTALLPROJECTS , "setListAllProjects" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_PUBLISHNOTIFICATION , "setShowPublishNotification" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_PUBLISHBUTTONAPPEARANCE , "setPublishButtonAppearance" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_SHOWFILEUPLOADBUTTON , "setShowFileUploadButton" , 0 ) ; // add allow broken relations rule digester . addCallMethod ( xPathPrefix + "/" + N_ALLOWBROKENRELATIONS , "setAllowBrokenRelations" , 0 ) ; // add publish related resources rule digester . addCallMethod ( xPathPrefix + "/" + N_PUBLISHRELATEDRESOURCES , "setPublishRelatedResourcesMode" , 0 ) ; // add rules for the new folder dialog settings digester . addCallMethod ( xPathPrefix + "/" + N_NEWFOLDEREDITPROPERTIES , "setNewFolderEditProperties" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_NEWFOLDERCREATEINDEXPAGE , "setNewFolderCreateIndexPage" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_SHOWUPLOADTYPEDIALOG , "setShowUploadTypeDialog" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_SUBSITEMAP_CREATION_MODE , "setSubsitemapCreationMode" , 0 ) ; // add workplace preferences startup settings rules xPathPrefix = "*/" + N_WORKPLACE + "/" + N_DEFAULTPREFERENCES + "/" + N_WORKPLACEPREFERENCES + "/" + N_WORKPLACESTARTUPSETTINGS ; digester . addCallMethod ( xPathPrefix + "/" + N_LOCALE , "setLocale" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_PROJECT , "setStartProject" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_WORKPLACEVIEW , "setStartView" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_FOLDER , "setStartFolder" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_SITE , "setStartSite" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_RESTRICTEXPLORERVIEW , "setRestrictExplorerView" , 0 ) ; // add workplace search rules xPathPrefix = "*/" + N_WORKPLACE + "/" + N_DEFAULTPREFERENCES + "/" + N_WORKPLACEPREFERENCES + "/" + N_WORKPLACESEARCH ; digester . addCallMethod ( xPathPrefix + "/" + N_SEARCHINDEXNAME , "setWorkplaceSearchIndexName" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_SEARCHVIEWSTYLE , "setWorkplaceSearchViewStyle" , 0 ) ; // add explorer preferences generaloptions rules xPathPrefix = "*/" + N_WORKPLACE + "/" + N_DEFAULTPREFERENCES + "/" + N_EXPLORERPREFERENCES + "/" + N_EXPLORERGENERALOPTIONS ; digester . addCallMethod ( xPathPrefix + "/" + N_BUTTONSTYLE , "setExplorerButtonStyle" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_ENTRIES , "setExplorerFileEntries" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_ENTRYOPTIONS , "setExplorerFileEntryOptions" , 0 ) ; // add explorer display options rules xPathPrefix = "*/" + N_WORKPLACE + "/" + N_DEFAULTPREFERENCES + "/" + N_EXPLORERPREFERENCES + "/" + N_EXPLORERDISPLAYOPTIONS ; digester . addCallMethod ( xPathPrefix + "/" + N_TITLE , "setShowExplorerFileTitle" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_NAVTEXT , "setShowExplorerFileNavText" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_TYPE , "setShowExplorerFileType" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_DATELASTMODIFIED , "setShowExplorerFileDateLastModified" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_DATECREATED , "setShowExplorerFileDateCreated" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_LOCKEDBY , "setShowExplorerFileLockedBy" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_PERMISSIONS , "setShowExplorerFilePermissions" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_SIZE , "setShowExplorerFileSize" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_STATE , "setShowExplorerFileState" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_USERLASTMODIFIED , "setShowExplorerFileUserLastModified" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_USERCREATED , "setShowExplorerFileUserCreated" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_DATERELEASED , "setShowExplorerFileDateReleased" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_DATEEXPIRED , "setShowExplorerFileDateExpired" , 0 ) ; // add dialog preferences rules xPathPrefix = "*/" + N_WORKPLACE + "/" + N_DEFAULTPREFERENCES + "/" + N_DIALOGSPREFERENCES + "/" + N_DIALOGSDEFAULTSETTINGS ; digester . addCallMethod ( xPathPrefix + "/" + N_FILECOPY , "setDialogCopyFileMode" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_FOLDERCOPY , "setDialogCopyFolderMode" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_FILEDELETION , "setDialogDeleteFileMode" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_DIRECTPUBLISH , "setDialogPublishSiblings" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_SHOWLOCK , "setShowLockDialog" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_SHOWEXPORTSETTINGS , "setShowExportSettingsDialog" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_PERMISSIONSINHERITONFOLDER , "setDialogPermissionsInheritOnFolder" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_EXPANDPERMISSIONSINHERITED , "setDialogExpandInheritedPermissions" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_EXPANDPERMISSIONSUSER , "setDialogExpandUserPermissions" , 0 ) ; // add editor generaloptions rules xPathPrefix = "*/" + N_WORKPLACE + "/" + N_DEFAULTPREFERENCES + "/" + N_EDITORPREFERENCES + "/" + N_EDITORGENERALOPTIONS ; digester . addCallMethod ( xPathPrefix + "/" + N_BUTTONSTYLE , "setEditorButtonStyle" , 0 ) ; digester . addCallMethod ( xPathPrefix + "/" + N_DIRECTEDITSTYLE , "setDirectEditButtonStyle" , 0 ) ; // add editor preferrededitor rules xPathPrefix = "*/" + N_WORKPLACE + "/" + N_DEFAULTPREFERENCES + "/" + N_EDITORPREFERENCES + "/" + N_EDITORPREFERREDEDITORS ; digester . addCallMethod ( xPathPrefix + "/" + N_EDITOR , "setPreferredEditor" , 2 ) ; digester . addCallParam ( xPathPrefix + "/" + N_EDITOR , 0 , A_TYPE ) ; digester . addCallParam ( xPathPrefix + "/" + N_EDITOR , 1 , A_VALUE ) ; // add startgallery rules xPathPrefix = "*/" + N_WORKPLACE + "/" + N_DEFAULTPREFERENCES + "/" + N_GALLERIESPREFERENCES + "/" + N_STARTGALLERIES ; digester . addCallMethod ( xPathPrefix + "/" + N_STARTGALLERY , "setStartGallery" , 2 ) ; digester . addCallParam ( xPathPrefix + "/" + N_STARTGALLERY , 0 , A_TYPE ) ; digester . addCallParam ( xPathPrefix + "/" + N_STARTGALLERY , 1 , A_PATH ) ; digester . addRule ( "*/" + N_WORKPLACE + "/" + N_DEFAULTPREFERENCES + "/preference-tab" , new Rule ( ) { @ Override public void begin ( String namespace , String name , Attributes attributes ) throws Exception { getDigester ( ) . push ( attributes . getValue ( "name" ) ) ; } @ Override public void end ( String namespace , String name ) throws Exception { getDigester ( ) . pop ( ) ; } } ) ; String prefPath = "*/" + N_WORKPLACE + "/" + N_DEFAULTPREFERENCES + "/preference-tab/" + N_PREFERENCE ; digester . addRule ( prefPath , new CallMethodRule ( 1 , "addPreference" , 9 ) ) ; digester . addCallParam ( prefPath , 0 , A_NAME ) ; digester . addCallParam ( prefPath , 1 , A_VALUE ) ; digester . addCallParam ( prefPath , 2 , A_WIDGET ) ; digester . addCallParam ( prefPath , 3 , A_WIDGET_CONFIG ) ; digester . addCallParam ( prefPath , 4 , A_NICE_NAME ) ; digester . addCallParam ( prefPath , 5 , A_DESCRIPTION ) ; digester . addCallParam ( prefPath , 6 , A_RULE_REGEX ) ; digester . addCallParam ( prefPath , 7 , A_ERROR ) ; digester . addCallParam ( prefPath , 8 , 0 ) ;
public class HsqlTimer { /** * Sets the periodicity of the designated task to a new value . < p > * If the designated task is cancelled or the new period is identical to the * task ' s current period , then this invocation has essentially no effect * and the submitted object is returned . < p > * Otherwise , if the new period is greater than the designated task ' s * current period , then a simple assignment occurs and the submittted * object is returned . < p > * If neither case holds , then the designated task is cancelled and a new , * equivalent task with the new period is scheduled for immediate first * execution and returned to the caller . < p > * @ return a task reference , as per the rules stated above . * @ param task the task whose periodicity is to be set * @ param period the new period */ public static Object setPeriod ( final Object task , final long period ) { } }
return ( task instanceof Task ) ? ( ( Task ) task ) . setPeriod ( period ) : task ;
public class RestHelper { /** * Return the JSON - compatible string representation of the given property value . If the value is a * { @ link javax . jcr . PropertyType # BINARY binary } value , then this method returns the Base - 64 encoding of that value . Otherwise , * it just returns the string representation of the value . * @ param value the property value ; may not be null * @ return the string representation of the value * @ deprecated since 3.0 binary values are handled via URLs */ @ Deprecated public static String jsonEncodedStringFor ( Value value ) { } }
try { if ( value . getType ( ) != PropertyType . BINARY ) { return value . getString ( ) ; } // Encode the binary value in Base64 . . . InputStream stream = value . getBinary ( ) . getStream ( ) ; try { return Base64 . encode ( stream ) ; } finally { if ( stream != null ) { try { stream . close ( ) ; } catch ( IOException e ) { // Error accessing the value , so throw this . . . LOGGER . error ( e . getMessage ( ) , e ) ; } } } } catch ( RepositoryException e ) { LOGGER . error ( e . getMessage ( ) , e ) ; return null ; }
public class Matrix4x3f { /** * / * ( non - Javadoc ) * @ see org . joml . Matrix4x3fc # mulComponentWise ( org . joml . Matrix4x3fc , org . joml . Matrix4x3f ) */ public Matrix4x3f mulComponentWise ( Matrix4x3fc other , Matrix4x3f dest ) { } }
dest . m00 = m00 * other . m00 ( ) ; dest . m01 = m01 * other . m01 ( ) ; dest . m02 = m02 * other . m02 ( ) ; dest . m10 = m10 * other . m10 ( ) ; dest . m11 = m11 * other . m11 ( ) ; dest . m12 = m12 * other . m12 ( ) ; dest . m20 = m20 * other . m20 ( ) ; dest . m21 = m21 * other . m21 ( ) ; dest . m22 = m22 * other . m22 ( ) ; dest . m30 = m30 * other . m30 ( ) ; dest . m31 = m31 * other . m31 ( ) ; dest . m32 = m32 * other . m32 ( ) ; dest . properties = 0 ; return dest ;
public class UserCoreDao { /** * Build " columns as " values for the table columns with the specified * columns as the specified values * @ param columns * columns to include as value * @ param values * " columns as " values for specified columns * @ return " columns as " values * @ since 2.0.0 */ public String [ ] buildColumnsAs ( List < TColumn > columns , String [ ] values ) { } }
String [ ] columnsArray = buildColumnsArray ( columns ) ; return buildColumnsAs ( columnsArray , values ) ;
public class SqlApplicationConfigurationUpdate { /** * The array of < a > InputUpdate < / a > objects describing the new input streams used by the application . * @ param inputUpdates * The array of < a > InputUpdate < / a > objects describing the new input streams used by the application . */ public void setInputUpdates ( java . util . Collection < InputUpdate > inputUpdates ) { } }
if ( inputUpdates == null ) { this . inputUpdates = null ; return ; } this . inputUpdates = new java . util . ArrayList < InputUpdate > ( inputUpdates ) ;
public class RpczZPageHandler { /** * Gets stats snapshot for each method . */ private Map < String , StatsSnapshot > getStatsSnapshots ( boolean isReceived ) { } }
SortedMap < String , StatsSnapshot > map = Maps . newTreeMap ( ) ; // Sorted by method name . if ( isReceived ) { getStatsSnapshots ( map , SERVER_RPC_CUMULATIVE_VIEWS ) ; getStatsSnapshots ( map , SERVER_RPC_MINUTE_VIEWS ) ; getStatsSnapshots ( map , SERVER_RPC_HOUR_VIEWS ) ; } else { getStatsSnapshots ( map , CLIENT_RPC_CUMULATIVE_VIEWS ) ; getStatsSnapshots ( map , CLIENT_RPC_MINUTE_VIEWS ) ; getStatsSnapshots ( map , CLIENT_RPC_HOUR_VIEWS ) ; } return map ;
public class ConfigParams { /** * Creates a new ConfigParams object filled with key - value pairs from specified * object . * @ param value an object with key - value pairs used to initialize a new * ConfigParams . * @ return a new ConfigParams object . * @ see RecursiveObjectReader # getProperties ( Object ) */ public static ConfigParams fromValue ( Object value ) { } }
Map < String , Object > map = RecursiveObjectReader . getProperties ( value ) ; return new ConfigParams ( map ) ;
public class MicrochipPotentiometerBase { /** * The visibility of this method is protected because not all * devices support non - volatile wipers . Any derived class may * publish this method . * @ param nonVolatileMode The way non - volatile reads or writes are done */ protected void setNonVolatileMode ( final MicrochipPotentiometerNonVolatileMode nonVolatileMode ) { } }
if ( nonVolatileMode == null ) { throw new RuntimeException ( "Setting a null-NonVolatileMode is not valid!" ) ; } if ( ! isCapableOfNonVolatileWiper ( ) && ( nonVolatileMode != MicrochipPotentiometerNonVolatileMode . VOLATILE_ONLY ) ) { throw new RuntimeException ( "This device is not capable of non-volatile wipers." + " Using another NonVolatileMode than '" + MicrochipPotentiometerNonVolatileMode . VOLATILE_ONLY + "' is not valid!" ) ; } this . nonVolatileMode = nonVolatileMode ;
public class ZScreenField { /** * Get the Html parameter for this field . * ( Only for XML / HTML fields ) . * @ return The parameter name . * @ exception DBException File exception . */ public String getHtmlFieldParam ( ) { } }
String strFieldName = this . getScreenField ( ) . getSFieldParam ( null , false ) ; if ( this . getScreenField ( ) . getParentScreen ( ) instanceof GridScreen ) { // These are command buttons such as " Form " or " Detail " GridScreen gridScreen = ( GridScreen ) this . getScreenField ( ) . getParentScreen ( ) ; Record record = gridScreen . getMainRecord ( ) ; try { Object objBookmark = record . getHandle ( DBConstants . OBJECT_ID_HANDLE ) ; if ( objBookmark == null ) objBookmark = record . getHandle ( DBConstants . BOOKMARK_HANDLE ) ; if ( objBookmark != null ) strFieldName = strFieldName + '@' + objBookmark . toString ( ) ; } catch ( DBException ex ) { ex . printStackTrace ( ) ; } } try { strFieldName = URLEncoder . encode ( strFieldName , DBConstants . URL_ENCODING ) ; } catch ( java . io . UnsupportedEncodingException ex ) { ex . printStackTrace ( ) ; } return strFieldName ;
public class HttpRequest { /** * Decode HTTP request line . * @ param buf Character buffer * @ param len Length of line in buffer . * @ exception IOException */ void decodeRequestLine ( char [ ] buf , int len ) throws IOException { } }
// Search for first space separated chunk int s1 = - 1 , s2 = - 1 , s3 = - 1 ; int state = 0 ; startloop : for ( int i = 0 ; i < len ; i ++ ) { char c = buf [ i ] ; switch ( state ) { case 0 : // leading white if ( c == ' ' ) continue ; state = 1 ; s1 = i ; case 1 : // reading method if ( c == ' ' ) state = 2 ; else { s2 = i ; if ( c >= 'a' && c <= 'z' ) buf [ i ] = ( char ) ( c - 'a' + 'A' ) ; } continue ; case 2 : // skip whitespace after method s3 = i ; if ( c != ' ' ) break startloop ; } } // Search for last space separated chunk int e1 = - 1 , e2 = - 1 , e3 = - 1 ; state = 0 ; endloop : for ( int i = len ; i -- > 0 ; ) { char c = buf [ i ] ; switch ( state ) { case 0 : // trailing white if ( c == ' ' ) continue ; state = 1 ; e1 = i ; case 1 : // reading Version if ( c == ' ' ) state = 2 ; else e2 = i ; continue ; case 2 : // skip whitespace before version e3 = i ; if ( c != ' ' ) break endloop ; } } // Check sufficient params if ( s3 < 0 || e1 < 0 || e3 < s2 ) throw new IOException ( "Bad Request: " + new String ( buf , 0 , len ) ) ; // get method Map . Entry method = __methodCache . getEntry ( buf , s1 , s2 - s1 + 1 ) ; if ( method != null ) _method = ( String ) method . getKey ( ) ; else _method = new String ( buf , s1 , s2 - s1 + 1 ) . toUpperCase ( ) ; // get version as uppercase if ( s2 != e3 || s3 != e2 ) { Map . Entry version = __versionCache . getEntry ( buf , e2 , e1 - e2 + 1 ) ; if ( version != null ) _version = ( String ) version . getKey ( ) ; else { for ( int i = e2 ; i <= e1 ; i ++ ) if ( buf [ i ] >= 'a' && buf [ i ] <= 'z' ) buf [ i ] = ( char ) ( buf [ i ] - 'a' + 'A' ) ; _version = new String ( buf , e2 , e1 - e2 + 1 ) ; } } else { // missing version _version = __HTTP_0_9 ; e3 = e1 ; } // handle URI try { String raw_uri = null ; if ( URI . __CHARSET_IS_DEFAULT ) raw_uri = new String ( buf , s3 , e3 - s3 + 1 ) ; else { int l = e3 - s3 + 1 ; for ( int i = 0 ; i < l ; i ++ ) { char c = buf [ s3 + i ] ; if ( c >= 0 && c < 0x80 ) continue ; if ( _uriExpanded == null || _uriExpanded . length < 3 * l ) _uriExpanded = new char [ 3 * l ] ; if ( i > 0 ) System . arraycopy ( buf , s3 , _uriExpanded , 0 , i ) ; int j = i ; for ( ; i < l ; i ++ ) { c = buf [ s3 + i ] ; if ( c >= 0 && c < 0x80 ) _uriExpanded [ j ++ ] = c ; else { _uriExpanded [ j ++ ] = '%' ; _uriExpanded [ j ++ ] = TypeUtil . toHexChar ( 0xf & ( c >> 4 ) ) ; _uriExpanded [ j ++ ] = TypeUtil . toHexChar ( 0xf & c ) ; } } raw_uri = new String ( _uriExpanded , 0 , j ) ; } if ( raw_uri == null ) raw_uri = new String ( buf , s3 , e3 - s3 + 1 ) ; } if ( _uri == null ) _uri = new URI ( raw_uri ) ; else _uri . setURI ( raw_uri ) ; } catch ( IllegalArgumentException e ) { LogSupport . ignore ( log , e ) ; throw new HttpException ( HttpResponse . __400_Bad_Request , new String ( buf , s3 , e3 - s3 + 1 ) ) ; }
public class ProcessBuilderImpl { /** * Converts a drools version 4 . rf or . rfm ruleflow to a version 5 . rf . * Version 5 . rf ruleflows are allowed , but are not migrated . * @ param reader containing any drools 4 . rf or . rfm ruleflow , or a * version 5 . rf * @ return reader containing the input reader in the latest ( 5 ) . rf format * @ throws Exception */ private Reader portToCurrentVersion ( final Reader reader ) throws Exception { } }
// Migrate v4 ruleflows to v5 String xml = RuleFlowMigrator . convertReaderToString ( reader ) ; if ( RuleFlowMigrator . needToMigrateRFM ( xml ) ) { // Not a current version RFM convert it . xml = RuleFlowMigrator . portRFMToCurrentVersion ( xml ) ; } else if ( RuleFlowMigrator . needToMigrateRF ( xml ) ) { // Not a current version RF convert it . xml = RuleFlowMigrator . portRFMToCurrentVersion ( xml ) ; } // Note that we have also return any input v5 ruleflow as // a StringReader since the act of checking it using // convertReaderToString will have read the reader making it // appear empty if read later . As reset is not guaranteed on // all Reader implementation , it is safest to convert the v5 // ruleflow string representation to a StringReader as well . return new StringReader ( xml ) ;
public class BaseLayer { /** * The number of parameters for the model * @ return the number of parameters for the model */ @ Override public long numParams ( ) { } }
int ret = 0 ; for ( INDArray val : params . values ( ) ) ret += val . length ( ) ; return ret ;
public class AbstractAliasDestinationHandler { /** * This destination handler is being deleted and should perform any * processing required . */ public void delete ( ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "delete" ) ; // Tell the target of the alias to remove the backwards reference to it _targetDestinationHandler . removeTargettingAlias ( this ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "delete" ) ;
public class FullDTDReader { /** * Method similar to { @ link # skipComment } , but that has to collect * contents , to be reported for a SAX handler . */ protected void readComment ( DTDEventListener l ) throws XMLStreamException { } }
TextBuffer tb = getTextBuffer ( ) ; char [ ] outBuf = tb . getCurrentSegment ( ) ; int outPtr = 0 ; while ( true ) { char c = ( mInputPtr < mInputEnd ) ? mInputBuffer [ mInputPtr ++ ] : dtdNextFromCurr ( ) ; if ( c < CHAR_SPACE ) { if ( c == '\n' || c == '\r' ) { skipCRLF ( c ) ; c = '\n' ; } else if ( c != '\t' ) { throwInvalidSpace ( c ) ; } } else if ( c == '-' ) { c = dtdNextFromCurr ( ) ; if ( c == '-' ) { // Ok , has to be end marker then : // Either get ' > ' or error : c = dtdNextFromCurr ( ) ; if ( c != '>' ) { throwParseError ( ErrorConsts . ERR_HYPHENS_IN_COMMENT ) ; } break ; } c = '-' ; -- mInputPtr ; // need to push back the second char read } // Need more room ? if ( outPtr >= outBuf . length ) { outBuf = tb . finishCurrentSegment ( ) ; outPtr = 0 ; } // Ok , let ' s add char to output : outBuf [ outPtr ++ ] = c ; } tb . setCurrentLength ( outPtr ) ; tb . fireDtdCommentEvent ( l ) ;
public class StrKit { /** * 首字母变小写 */ public static String firstCharToLowerCase ( String str ) { } }
char firstChar = str . charAt ( 0 ) ; if ( firstChar >= 'A' && firstChar <= 'Z' ) { char [ ] arr = str . toCharArray ( ) ; arr [ 0 ] += ( 'a' - 'A' ) ; return new String ( arr ) ; } return str ;
public class VirtualMachineScaleSetsInner { /** * Power off ( stop ) one or more virtual machines in a VM scale set . Note that resources are still attached and you are getting charged for the resources . Instead , use deallocate to release resources and avoid charges . * @ param resourceGroupName The name of the resource group . * @ param vmScaleSetName The name of the VM scale set . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the observable to the OperationStatusResponseInner object */ public Observable < OperationStatusResponseInner > beginPowerOffAsync ( String resourceGroupName , String vmScaleSetName ) { } }
return beginPowerOffWithServiceResponseAsync ( resourceGroupName , vmScaleSetName ) . map ( new Func1 < ServiceResponse < OperationStatusResponseInner > , OperationStatusResponseInner > ( ) { @ Override public OperationStatusResponseInner call ( ServiceResponse < OperationStatusResponseInner > response ) { return response . body ( ) ; } } ) ;
public class Rollbar { /** * Log an error at level specified . * @ param error the error . * @ param level the level of the error . */ public void log ( Throwable error , Level level ) { } }
log ( error , null , null , level ) ;
public class BaseMessageFilter { /** * Are these filters functionally the same ? * Override this to compare filters . * @ return true if they are . */ public boolean isSameFilter ( BaseMessageFilter filter ) { } }
if ( filter . getClass ( ) . equals ( this . getClass ( ) ) ) { if ( filter . isFilterMatch ( this ) ) ; // return true ; ? todo ( don ) You need to figure out how to compare filters } return false ;
public class ContentMatcher { /** * Direct method for a complete ContentMatcher instance creation . * Use the ClassLoader for the resource detection and loading , be careful regarding the * relative file name use ( this class is in another package ) . * @ param xmlFileName the name of the XML file that need to be used for initialization * @ return a ContentMatcher instance */ public static ContentMatcher getInstance ( String xmlFileName ) { } }
ContentMatcher cm = new ContentMatcher ( ) ; // Load the pattern definitions from an XML file try { cm . loadXMLPatternDefinitions ( cm . getClass ( ) . getResourceAsStream ( xmlFileName ) ) ; } catch ( JDOMException | IOException ex ) { throw new IllegalArgumentException ( "Failed to initialize the ContentMatcher object using: " + xmlFileName , ex ) ; } return cm ;
public class GetCrawlerMetricsRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( GetCrawlerMetricsRequest getCrawlerMetricsRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( getCrawlerMetricsRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( getCrawlerMetricsRequest . getCrawlerNameList ( ) , CRAWLERNAMELIST_BINDING ) ; protocolMarshaller . marshall ( getCrawlerMetricsRequest . getMaxResults ( ) , MAXRESULTS_BINDING ) ; protocolMarshaller . marshall ( getCrawlerMetricsRequest . getNextToken ( ) , NEXTTOKEN_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class ProductPartitionNode { /** * Returns the child node with the specified ProductDimension . * @ throws IllegalArgumentException if no such direct child node exists . */ public ProductPartitionNode getChild ( ProductDimension dimension ) { } }
Preconditions . checkArgument ( hasChild ( dimension ) , "No child exists with dimension: %s" , toString ( dimension ) ) ; return children . get ( dimension ) ;
public class LinearSparseVector { /** * 去掉第idx维特征 * @ param idx * @ return */ public float remove ( int idx ) { } }
float ret = - 1f ; int p = Arrays . binarySearch ( index , 0 , length , idx ) ; if ( p >= 0 ) { System . arraycopy ( data , p + 1 , data , p , length - p - 1 ) ; System . arraycopy ( index , p + 1 , index , p , length - p - 1 ) ; length -- ; } else { System . err . println ( "error" ) ; } return ret ;
public class PublishedEventRepository { /** * region > findByTargetAndFromAndTo */ @ Programmatic public List < PublishedEvent > findByTargetAndFromAndTo ( final Object publishedObject , final LocalDate from , final LocalDate to ) { } }
final Bookmark bookmark = bookmarkService . bookmarkFor ( publishedObject ) ; return findByTargetAndFromAndTo ( bookmark , from , to ) ;
public class SARLJvmModelInferrer { /** * { @ inheritDoc } . * < p > The function is overridden in order to interleave the instructions from Xtend and the ones needed * for SARL . */ @ Override public void inferLocalClass ( AnonymousClass anonymousClass , String localClassName , JvmFeature container ) { } }
// Issue # 356 : do not generate if the class has no name . assert anonymousClass != null ; assert container != null ; if ( Strings . isNullOrEmpty ( localClassName ) ) { return ; } // Issue # 363 : do not generate the class if the SARL library is incompatible . if ( ! Utils . isCompatibleSARLLibraryOnClasspath ( this . typeReferences , anonymousClass ) ) { return ; } // Create the inner type // - - - Begin Xtend Part try { final JvmGenericType inferredJvmType = this . typesFactory . createJvmGenericType ( ) ; inferredJvmType . setSimpleName ( localClassName ) ; inferredJvmType . setAnonymous ( ! hasAdditionalMembers ( anonymousClass ) ) ; inferredJvmType . setFinal ( true ) ; setVisibility ( inferredJvmType , anonymousClass ) ; inferredJvmType . getSuperTypes ( ) . add ( this . typeBuilder . inferredType ( anonymousClass ) ) ; container . getLocalClasses ( ) . add ( inferredJvmType ) ; this . associator . associatePrimary ( anonymousClass , inferredJvmType ) ; // - - - End Xtend Part // Create the generation context that is used by the other transformation functions . final GenerationContext parentContext = getContext ( EcoreUtil2 . getContainerOfType ( container , JvmType . class ) ) ; final GenerationContext context = openContext ( anonymousClass , inferredJvmType , Arrays . asList ( SarlField . class , SarlConstructor . class , SarlAction . class ) ) ; context . setParentContext ( parentContext ) ; try { // - - - Begin Xtend Part for ( final XtendMember member : anonymousClass . getMembers ( ) ) { if ( context . isSupportedMember ( member ) ) { transform ( member , inferredJvmType , true ) ; } } appendSyntheticDispatchMethods ( anonymousClass , inferredJvmType ) ; this . nameClashResolver . resolveNameClashes ( inferredJvmType ) ; // - - - End Xtend Part // Add SARL synthetic functions appendSyntheticDefaultValuedParameterMethods ( anonymousClass , inferredJvmType , context ) ; } finally { closeContext ( context ) ; } } catch ( AssertionError | InternalError internalError ) { throw internalError ; } catch ( Exception exception ) { logInternalError ( exception ) ; }
public class GameSessionPlacement { /** * Set of values , expressed in milliseconds , indicating the amount of latency that a player experiences when * connected to AWS regions . * @ param playerLatencies * Set of values , expressed in milliseconds , indicating the amount of latency that a player experiences when * connected to AWS regions . */ public void setPlayerLatencies ( java . util . Collection < PlayerLatency > playerLatencies ) { } }
if ( playerLatencies == null ) { this . playerLatencies = null ; return ; } this . playerLatencies = new java . util . ArrayList < PlayerLatency > ( playerLatencies ) ;
public class TimestampUtils { /** * Converts the given java seconds to postgresql seconds . See { @ link # toJavaSecs } for the reverse * operation . The conversion is valid for any year 100 BC onwards . * @ param secs Postgresql seconds . * @ return Java seconds . */ private static long toPgSecs ( long secs ) { } }
// java epoc to postgres epoc secs -= 946684800L ; // Julian / Greagorian calendar cutoff point if ( secs < - 13165977600L ) { // October 15 , 1582 - > October 4 , 1582 secs -= 86400 * 10 ; if ( secs < - 15773356800L ) { // 1500-03-01 - > 1500-02-28 int years = ( int ) ( ( secs + 15773356800L ) / - 3155823050L ) ; years ++ ; years -= years / 4 ; secs += years * 86400 ; } } return secs ;
public class Utils { /** * Return true if the given string contains only digits ( characters ' 0 ' - ' 9 ' ) . * @ param string String to be tested . * @ return True if the string is not null , not empty , and contains only * digit characters ' 0 ' through ' 9 ' . */ public static boolean allDigits ( String string ) { } }
if ( string == null || string . length ( ) == 0 ) { return false ; } for ( int index = 0 ; index < string . length ( ) ; index ++ ) { char ch = string . charAt ( index ) ; if ( ch < '0' || ch > '9' ) { return false ; } } return true ;
public class LocalPDBDirectory { /** * Get the last modified time of the file in given url by retrieveing the " Last - Modified " header . * Note that this only works for http URLs * @ param url * @ return the last modified date or null if it couldn ' t be retrieved ( in that case a warning will be logged ) */ private Date getLastModifiedTime ( URL url ) { } }
// see http : / / stackoverflow . com / questions / 2416872 / how - do - you - obtain - modified - date - from - a - remote - file - java Date date = null ; try { String lastModified = url . openConnection ( ) . getHeaderField ( "Last-Modified" ) ; logger . debug ( "Last modified date of server file ({}) is {}" , url . toString ( ) , lastModified ) ; if ( lastModified != null ) { try { date = new SimpleDateFormat ( "E, d MMM yyyy HH:mm:ss Z" , Locale . ENGLISH ) . parse ( lastModified ) ; } catch ( ParseException e ) { logger . warn ( "Could not parse last modified time from string '{}', no last modified time available for file {}" , lastModified , url . toString ( ) ) ; // this will return null } } } catch ( IOException e ) { logger . warn ( "Problems while retrieving last modified time for file {}" , url . toString ( ) ) ; } return date ;