signature stringlengths 43 39.1k | implementation stringlengths 0 450k |
|---|---|
public class SimpleDataArray { /** * Submit current segment index buffer for asynchronous handling . */
protected void submitSegmentIndexBuffer ( ) { } } | if ( _sibEnabled && ! _sib . isDirty ( ) ) { _sib . setSegmentId ( _segment . getSegmentId ( ) ) ; _sib . setSegmentLastForcedTime ( _segment . getLastForcedTime ( ) ) ; _segmentManager . submit ( _sib ) ; } else { _segmentManager . remove ( _sib ) ; } |
public class BOGImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
@ Override public boolean eIsSet ( int featureID ) { } } | switch ( featureID ) { case AfplibPackage . BOG__OEG_NAME : return OEG_NAME_EDEFAULT == null ? oegName != null : ! OEG_NAME_EDEFAULT . equals ( oegName ) ; case AfplibPackage . BOG__TRIPLETS : return triplets != null && ! triplets . isEmpty ( ) ; } return super . eIsSet ( featureID ) ; |
public class TypeUtils { /** * create a type representing " base < args . . . > " */
private static ParameterizedType newParameterizedType ( final Class < ? > base , Type ... args ) throws Exception { } } | return Types . newParameterizedType ( base , args ) ; |
public class IoServiceListenerSupport { /** * Calls { @ link IoServiceListener # serviceDeactivated ( IoService ) }
* for all registered listeners . */
public void fireServiceDeactivated ( ) { } } | if ( ! activated . compareAndSet ( true , false ) ) { return ; } try { for ( IoServiceListener l : listeners ) { try { l . serviceDeactivated ( service ) ; } catch ( Throwable e ) { ExceptionMonitor . getInstance ( ) . exceptionCaught ( e ) ; } } } finally { disconnectSessions ( ) ; } |
public class FedoraTombstones { /** * Delete a tombstone resource ( freeing the original resource to be reused )
* @ return the free resource */
@ DELETE public Response delete ( ) { } } | LOGGER . info ( "Delete tombstone: {}" , resource ( ) ) ; resource ( ) . delete ( ) ; session . commit ( ) ; return noContent ( ) . build ( ) ; |
public class DocumentationHelper { /** * Create the pretty print HTML document of log report . Verify the content
* of log root directory and create the report . html file if it not exists .
* @ param logDir
* existing logs directory */
public void prettyPrintsReport ( File logDir ) throws Exception { } } | if ( ( ! logDir . exists ( ) ) || ( ! logDir . isDirectory ( ) ) ) { throw new Exception ( "Error: LOGDIR " + logDir . getAbsolutePath ( ) + " seems not a valid directory. " ) ; } File html_logs_report_file = new File ( logDir . getAbsolutePath ( ) + File . separator + "report.html" ) ; prettyPrintsReport ( logDir , html_logs_report_file ) ; |
public class CxDxServerSessionImpl { /** * / * ( non - Javadoc )
* @ see org . jdiameter . api . cxdx . ServerCxDxSession # sendServerAssignmentAnswer ( org . jdiameter . api . cxdx . events . JServerAssignmentAnswer ) */
@ Override public void sendServerAssignmentAnswer ( JServerAssignmentAnswer answer ) throws InternalException , IllegalDiameterStateException , RouteException , OverloadException { } } | send ( Event . Type . SEND_MESSAGE , null , answer ) ; |
public class KafkaProducer { /** * Creates default KafkaTemplate instance from endpoint configuration . */
private org . apache . kafka . clients . producer . KafkaProducer < Object , Object > createKafkaProducer ( ) { } } | Map < String , Object > producerProps = new HashMap < > ( ) ; producerProps . put ( ProducerConfig . BOOTSTRAP_SERVERS_CONFIG , endpointConfiguration . getServer ( ) ) ; producerProps . put ( ProducerConfig . REQUEST_TIMEOUT_MS_CONFIG , new Long ( endpointConfiguration . getTimeout ( ) ) . intValue ( ) ) ; producerProps . put ( ProducerConfig . KEY_SERIALIZER_CLASS_CONFIG , endpointConfiguration . getKeySerializer ( ) ) ; producerProps . put ( ProducerConfig . VALUE_SERIALIZER_CLASS_CONFIG , endpointConfiguration . getValueSerializer ( ) ) ; producerProps . put ( ProducerConfig . CLIENT_ID_CONFIG , Optional . ofNullable ( endpointConfiguration . getClientId ( ) ) . orElse ( KafkaMessageHeaders . KAFKA_PREFIX + "producer_" + UUID . randomUUID ( ) . toString ( ) ) ) ; producerProps . putAll ( endpointConfiguration . getProducerProperties ( ) ) ; return new org . apache . kafka . clients . producer . KafkaProducer < > ( producerProps ) ; |
public class NetworkInterfacesInner { /** * Gets all network interfaces in a resource group .
* @ param nextPageLink The NextLink from the previous successful call to List operation .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable to the PagedList & lt ; NetworkInterfaceInner & gt ; object */
public Observable < Page < NetworkInterfaceInner > > listByResourceGroupNextAsync ( final String nextPageLink ) { } } | return listByResourceGroupNextWithServiceResponseAsync ( nextPageLink ) . map ( new Func1 < ServiceResponse < Page < NetworkInterfaceInner > > , Page < NetworkInterfaceInner > > ( ) { @ Override public Page < NetworkInterfaceInner > call ( ServiceResponse < Page < NetworkInterfaceInner > > response ) { return response . body ( ) ; } } ) ; |
public class StaticHiveCluster { /** * Create a metastore client connected to the Hive metastore .
* As per Hive HA metastore behavior , return the first metastore in the list
* list of available metastores ( i . e . the default metastore ) if a connection
* can be made , else try another of the metastores at random , until either a
* connection succeeds or there are no more fallback metastores . */
@ Override public HiveMetastoreClient createMetastoreClient ( ) throws TException { } } | List < HostAndPort > metastores = new ArrayList < > ( addresses ) ; Collections . shuffle ( metastores . subList ( 1 , metastores . size ( ) ) ) ; TException lastException = null ; for ( HostAndPort metastore : metastores ) { try { HiveMetastoreClient client = clientFactory . create ( metastore ) ; if ( ! isNullOrEmpty ( metastoreUsername ) ) { client . setUGI ( metastoreUsername ) ; } return client ; } catch ( TException e ) { lastException = e ; } } throw new TException ( "Failed connecting to Hive metastore: " + addresses , lastException ) ; |
public class AnnotationClassReader { /** * Reads an attribute in { @ link # b b } .
* @ param attrs prototypes of the attributes that must be parsed during the
* visit of the class . Any attribute whose type is not equal to
* the type of one the prototypes is ignored ( i . e . an empty
* { @ link Attribute } instance is returned ) .
* @ param type the type of the attribute .
* @ param off index of the first byte of the attribute ' s content in
* { @ link # b b } . The 6 attribute header bytes , containing the
* type and the length of the attribute , are not taken into
* account here ( they have already been read ) .
* @ param len the length of the attribute ' s content .
* @ param buf buffer to be used to call { @ link # readUTF8 readUTF8 } ,
* { @ link # readClass ( int , char [ ] ) readClass } or { @ link # readConst
* readConst } .
* @ param codeOff index of the first byte of code ' s attribute content in
* { @ link # b b } , or - 1 if the attribute to be read is not a code
* attribute . The 6 attribute header bytes , containing the type
* and the length of the attribute , are not taken into account
* here .
* @ param labels the labels of the method ' s code , or < tt > null < / tt > if the
* attribute to be read is not a code attribute .
* @ return the attribute that has been read , or < tt > null < / tt > to skip this
* attribute . */
private Attribute readAttribute ( final Attribute [ ] attrs , final String type , final int off , final int len , final char [ ] buf , final int codeOff , final Label [ ] labels ) { } } | for ( int i = 0 ; i < attrs . length ; ++ i ) { if ( attrs [ i ] . type . equals ( type ) ) { return attrs [ i ] . read ( this , off , len , buf , codeOff , labels ) ; } } return new Attribute ( type ) . read ( this , off , len , null , - 1 , null ) ; |
public class PrimaveraXERFileReader { /** * Process resource rates . */
private void processResourceRates ( ) { } } | List < Row > rows = getRows ( "rsrcrate" , null , null ) ; m_reader . processResourceRates ( rows ) ; |
public class SegmentAttributeIterator { /** * region AttributeIterator Implementation */
@ Override public CompletableFuture < List < Map . Entry < UUID , Long > > > getNext ( ) { } } | return this . indexIterator . getNext ( ) . thenApply ( this :: mix ) ; |
public class ExecutableFlowPriorityComparator { /** * / * Helper method to fetch flow priority from flow props */
private int getPriority ( final ExecutableFlow exflow ) { } } | final ExecutionOptions options = exflow . getExecutionOptions ( ) ; int priority = ExecutionOptions . DEFAULT_FLOW_PRIORITY ; if ( options != null && options . getFlowParameters ( ) != null && options . getFlowParameters ( ) . containsKey ( ExecutionOptions . FLOW_PRIORITY ) ) { try { priority = Integer . valueOf ( options . getFlowParameters ( ) . get ( ExecutionOptions . FLOW_PRIORITY ) ) ; } catch ( final NumberFormatException ex ) { priority = ExecutionOptions . DEFAULT_FLOW_PRIORITY ; logger . error ( "Failed to parse flow priority for exec_id = " + exflow . getExecutionId ( ) , ex ) ; } } return priority ; |
public class ClusTree { /** * Calculates the best merge possible between two nodes in a node . This
* means that the pair with the smallest distance is found .
* @ param node The node in which these two entries have to be found .
* @ return An object which encodes the two position of the entries with the
* smallest distance in the node and the distance between them .
* @ see BestMergeInNode
* @ see Entry # calcDistance ( tree . Entry ) */
private BestMergeInNode calculateBestMergeInNode ( Node node ) { } } | assert ( node . numFreeEntries ( ) == 0 ) ; Entry [ ] entries = node . getEntries ( ) ; int toMerge1 = - 1 ; int toMerge2 = - 1 ; double distanceBetweenMergeEntries = Double . NaN ; double minDistance = Double . MAX_VALUE ; for ( int i = 0 ; i < entries . length ; i ++ ) { Entry e1 = entries [ i ] ; for ( int j = i + 1 ; j < entries . length ; j ++ ) { Entry e2 = entries [ j ] ; double distance = e1 . calcDistance ( e2 ) ; if ( distance < minDistance ) { toMerge1 = i ; toMerge2 = j ; distanceBetweenMergeEntries = distance ; } } } assert ( toMerge1 != - 1 && toMerge2 != - 1 ) ; if ( Double . isNaN ( distanceBetweenMergeEntries ) ) { throw new RuntimeException ( "The minimal distance between two " + "Entrys in a Node was Double.MAX_VAUE. That can hardly " + "be right." ) ; } return new BestMergeInNode ( toMerge1 , toMerge2 , distanceBetweenMergeEntries ) ; |
public class ZipFile { /** * Returns the zip file entry for the specified name , or null
* if not found .
* @ param name the name of the entry
* @ return the zip file entry , or null if not found
* @ throws IllegalStateException if the zip file has been closed */
public ZipEntry getEntry ( String name ) { } } | if ( name == null ) { throw new NullPointerException ( "name" ) ; } long jzentry = 0 ; synchronized ( this ) { ensureOpen ( ) ; jzentry = getEntry ( jzfile , zc . getBytes ( name ) , true ) ; if ( jzentry != 0 ) { ZipEntry ze = getZipEntry ( name , jzentry ) ; freeEntry ( jzfile , jzentry ) ; return ze ; } } return null ; |
public class CatalogUtil { /** * Given the deployment object generate the XML
* @ param deployment
* @ param indent
* @ return XML of deployment object .
* @ throws IOException */
public static String getDeployment ( DeploymentType deployment , boolean indent ) throws IOException { } } | try { if ( m_jc == null || m_schema == null ) { throw new RuntimeException ( "Error schema validation." ) ; } Marshaller marshaller = m_jc . createMarshaller ( ) ; marshaller . setSchema ( m_schema ) ; marshaller . setProperty ( Marshaller . JAXB_FORMATTED_OUTPUT , Boolean . valueOf ( indent ) ) ; StringWriter sw = new StringWriter ( ) ; marshaller . marshal ( new JAXBElement < > ( new QName ( "" , "deployment" ) , DeploymentType . class , deployment ) , sw ) ; return sw . toString ( ) ; } catch ( JAXBException e ) { // Convert some linked exceptions to more friendly errors .
if ( e . getLinkedException ( ) instanceof java . io . FileNotFoundException ) { hostLog . error ( e . getLinkedException ( ) . getMessage ( ) ) ; return null ; } else if ( e . getLinkedException ( ) instanceof org . xml . sax . SAXParseException ) { hostLog . error ( "Error schema validating deployment.xml file. " + e . getLinkedException ( ) . getMessage ( ) ) ; return null ; } else { throw new RuntimeException ( e ) ; } } |
public class CmsOUEditDialog { /** * Checks if all fields are valid . < p >
* @ return true , if all data are ok . */
@ SuppressWarnings ( "unchecked" ) protected boolean isValid ( ) { } } | boolean res = true ; res = res & m_description . isValid ( ) ; res = res & m_name . isValid ( ) ; if ( ! res ) { return res ; } for ( I_CmsEditableGroupRow row : m_ouResources . getRows ( ) ) { if ( ! ( ( AbstractField < String > ) row . getComponent ( ) ) . isValid ( ) ) { return false ; } } return true ; |
public class MongoDS { /** * 创建ServerAddress对象 , 会读取配置文件中的相关信息
* @ param group 分组 , 如果为null默认为无分组
* @ return ServerAddress */
private ServerAddress createServerAddress ( String group ) { } } | final Setting setting = checkSetting ( ) ; if ( group == null ) { group = StrUtil . EMPTY ; } final String tmpHost = setting . getByGroup ( "host" , group ) ; if ( StrUtil . isBlank ( tmpHost ) ) { throw new NotInitedException ( "Host name is empy of group: {}" , group ) ; } final int defaultPort = setting . getInt ( "port" , group , 27017 ) ; return new ServerAddress ( NetUtil . buildInetSocketAddress ( tmpHost , defaultPort ) ) ; |
public class Email { /** * Sets the sender address .
* @ param name The sender ' s name .
* @ param fromAddress The sender ' s email address . */
public void setFromAddress ( final String name , final String fromAddress ) { } } | fromRecipient = new Recipient ( name , fromAddress , null ) ; |
public class FnFloat { /** * A { @ link String } representing a percentage is created from the target number .
* @ return the string representation of the input number as a percentage */
public static final Function < Float , String > toPercentStr ( ) { } } | return ( Function < Float , String > ) ( ( Function ) FnNumber . toPercentStr ( ) ) ; |
public class ZooJdoProperties { /** * Whether the transactions should be optimistic , that means whether objects should become
* non - transactional during commit . This is for example useful when objects should be
* accessible outside transactions . This is optimistic , because less consistency guarantees
* are given .
* @ param flag The flag
* @ return this
* @ see Constants # PROPERTY _ OPTIMISTIC */
public ZooJdoProperties setOptimistic ( boolean flag ) { } } | DBTracer . logCall ( this , flag ) ; put ( Constants . PROPERTY_OPTIMISTIC , Boolean . toString ( flag ) ) ; if ( flag ) { throw new UnsupportedOperationException ( ) ; } return this ; |
public class TapJUnitUtil { /** * Get the line of the error in the exception info
* @ param testMethod
* @ return line of the error in the exception info */
public static String getLine ( JUnitTestData testMethod ) { } } | String line = "~" ; Throwable testException = testMethod . getFailException ( ) ; if ( testException != null ) { StringBuilder lookFor = new StringBuilder ( ) ; lookFor . append ( extractClassName ( testMethod . getDescription ( ) ) ) ; lookFor . append ( '.' ) ; lookFor . append ( extractMethodName ( testMethod . getDescription ( ) ) ) ; lookFor . append ( '(' ) ; lookFor . append ( extractSimpleClassName ( testMethod . getDescription ( ) ) ) ; lookFor . append ( ".java:" ) ; StackTraceElement [ ] els = testException . getStackTrace ( ) ; for ( int i = 0 ; i < els . length ; i ++ ) { StackTraceElement el = els [ i ] ; line = getLineNumberFromExceptionTraceLine ( el . toString ( ) , lookFor . toString ( ) ) ; if ( line . equals ( "" ) == Boolean . FALSE ) { break ; } } } return line ; |
public class AmazonRDSClient { /** * Copies the specified DB snapshot . The source DB snapshot must be in the " available " state .
* You can copy a snapshot from one AWS Region to another . In that case , the AWS Region where you call the
* < code > CopyDBSnapshot < / code > action is the destination AWS Region for the DB snapshot copy .
* For more information about copying snapshots , see < a
* href = " https : / / docs . aws . amazon . com / AmazonRDS / latest / UserGuide / USER _ CopyDBSnapshot . html " > Copying a DB Snapshot < / a >
* in the < i > Amazon RDS User Guide . < / i >
* @ param copyDBSnapshotRequest
* @ return Result of the CopyDBSnapshot operation returned by the service .
* @ throws DBSnapshotAlreadyExistsException
* < i > DBSnapshotIdentifier < / i > is already used by an existing snapshot .
* @ throws DBSnapshotNotFoundException
* < i > DBSnapshotIdentifier < / i > doesn ' t refer to an existing DB snapshot .
* @ throws InvalidDBSnapshotStateException
* The state of the DB snapshot doesn ' t allow deletion .
* @ throws SnapshotQuotaExceededException
* The request would result in the user exceeding the allowed number of DB snapshots .
* @ throws KMSKeyNotAccessibleException
* An error occurred accessing an AWS KMS key .
* @ sample AmazonRDS . CopyDBSnapshot
* @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / rds - 2014-10-31 / CopyDBSnapshot " target = " _ top " > AWS API
* Documentation < / a > */
@ Override public DBSnapshot copyDBSnapshot ( CopyDBSnapshotRequest request ) { } } | request = beforeClientExecution ( request ) ; return executeCopyDBSnapshot ( request ) ; |
public class ValidationManager { /** * Implements interface .
* @ param o
* the o
* @ return true , if successful */
private static boolean implementsInterface ( final Object o ) { } } | final boolean retVal ; retVal = ( o instanceof Serializable ) || ( o instanceof Externalizable ) ; return retVal ; |
public class Strings { /** * removeWord .
* @ param host
* a { @ link java . lang . String } object .
* @ param word
* a { @ link java . lang . String } object .
* @ param delimiter
* a { @ link java . lang . String } object .
* @ return a { @ link java . lang . String } object . */
public static String removeWord ( final String host , final String word , final String delimiter ) { } } | if ( host . indexOf ( word ) == - 1 ) { return host ; } else { int beginIndex = host . indexOf ( word ) ; int endIndex = beginIndex + word . length ( ) ; if ( beginIndex == 0 ) { return host . substring ( endIndex + 1 ) ; } if ( endIndex == host . length ( ) ) { return host . substring ( 0 , beginIndex - delimiter . length ( ) ) ; } else { String before = host . substring ( 0 , beginIndex ) ; String after = host . substring ( endIndex + 1 ) ; return before + after ; } } |
public class Http2ServerInitializer { /** * Configure the pipeline for TLS NPN negotiation to HTTP / 2. */
private void configureSsl ( SocketChannel ch ) { } } | ch . pipeline ( ) . addLast ( sslCtx . newHandler ( ch . alloc ( ) ) , new Http2OrHttpHandler ( ) ) ; |
public class ServerState { /** * Removes completed and cancelled jobs from the stat list . */
@ CommandArgument public void clean ( ) { } } | for ( int i = 0 ; i < jobProgressStates . size ( ) ; ) { ProgressState state = jobProgressStates . get ( i ) ; if ( state . isCancelled ( ) || state . isComplete ( ) ) { jobProgressStates . remove ( i ) ; } else { i ++ ; } } |
public class AclXmlFactory { /** * Returns an XML fragment representing the specified email address grantee .
* @ param grantee
* The email address grantee to convert to an XML representation
* that can be sent to Amazon S3 as part of request .
* @ param xml
* The XmlWriter to which to concatenate this node to .
* @ return The given XmlWriter containing the specified email address grantee */
protected XmlWriter convertToXml ( EmailAddressGrantee grantee , XmlWriter xml ) { } } | xml . start ( "Grantee" , new String [ ] { "xmlns:xsi" , "xsi:type" } , new String [ ] { "http://www.w3.org/2001/XMLSchema-instance" , "AmazonCustomerByEmail" } ) ; xml . start ( "EmailAddress" ) . value ( grantee . getIdentifier ( ) ) . end ( ) ; xml . end ( ) ; return xml ; |
public class AddressClient { /** * Retrieves a list of addresses contained within the specified region .
* < p > Sample code :
* < pre > < code >
* try ( AddressClient addressClient = AddressClient . create ( ) ) {
* ProjectRegionName region = ProjectRegionName . of ( " [ PROJECT ] " , " [ REGION ] " ) ;
* for ( Address element : addressClient . listAddresses ( region ) . iterateAll ( ) ) {
* / / doThingsWith ( element ) ;
* < / code > < / pre >
* @ param region Name of the region for this request .
* @ throws com . google . api . gax . rpc . ApiException if the remote call fails */
@ BetaApi public final ListAddressesPagedResponse listAddresses ( ProjectRegionName region ) { } } | ListAddressesHttpRequest request = ListAddressesHttpRequest . newBuilder ( ) . setRegion ( region == null ? null : region . toString ( ) ) . build ( ) ; return listAddresses ( request ) ; |
public class FullReadLines { /** * Start to read lines in a separate task . */
public final AsyncWork < T , Exception > start ( ) { } } | AsyncWork < T , Exception > result = new AsyncWork < > ( ) ; resume ( result ) ; return result ; |
public class BlockDataHandler { /** * On data save .
* @ param event the event */
@ SubscribeEvent public void onDataSave ( ChunkDataEvent . Save event ) { } } | NBTTagCompound nbt = event . getData ( ) ; for ( HandlerInfo < ? > handlerInfo : handlerInfos . values ( ) ) { ChunkData < ? > chunkData = chunkData ( handlerInfo . identifier , event . getWorld ( ) , event . getChunk ( ) ) ; if ( chunkData != null && chunkData . hasData ( ) ) { // MalisisCore . message ( " onDataSave ( " + event . getChunk ( ) . xPosition + " / " + event . getChunk ( ) . zPosition + " ) for "
// + handlerInfo . identifier ) ;
ByteBuf buf = Unpooled . buffer ( ) ; chunkData . toBytes ( buf ) ; nbt . setByteArray ( handlerInfo . identifier , buf . capacity ( buf . writerIndex ( ) ) . array ( ) ) ; } // unload data on save because saving is called after unload
if ( event . getChunk ( ) . unloadQueued ) datas . get ( ) . remove ( handlerInfo . identifier , event . getChunk ( ) ) ; } |
public class Connector { /** * Send a payload to ROX
* @ param payload The payload to send
* @ return True if the payload successfully sent to ROX
* @ throws MalformedURLException */
public boolean send ( RoxPayload payload ) throws MalformedURLException { } } | final URL payloadResourceUrl = getPayloadResourceUrl ( ) ; if ( payloadResourceUrl == null ) { return false ; } LOGGER . info ( "Connected to ROX Center API at {}" , configuration . getServerConfiguration ( ) . getApiUrl ( ) ) ; // Print the payload to the outout stream
if ( configuration . isPayloadPrint ( ) ) { OutputStreamWriter payloadOsw = null ; try { payloadOsw = new OutputStreamWriter ( System . out ) ; serializer . serializePayload ( payloadOsw , payload , true ) ; } catch ( IOException ioe ) { } finally { if ( payloadOsw != null ) { try { payloadOsw . close ( ) ; } catch ( IOException closeIoe ) { } } } } // Try to send the payload optimized
optimizeStart ( ) ; boolean result = sendPayload ( payloadResourceUrl , optimize ( payload ) , true ) ; optimizeStop ( result ) ; // If the payload was not sent optimized , try to send it non - optimized
if ( ! result ) { result = sendPayload ( payloadResourceUrl , payload , false ) ; } return result ; |
public class ApiOvhXdsl { /** * List the notifications for this access
* REST : GET / xdsl / { serviceName } / monitoringNotifications
* @ param serviceName [ required ] The internal name of your XDSL offer */
public ArrayList < Long > serviceName_monitoringNotifications_GET ( String serviceName ) throws IOException { } } | String qPath = "/xdsl/{serviceName}/monitoringNotifications" ; StringBuilder sb = path ( qPath , serviceName ) ; String resp = exec ( qPath , "GET" , sb . toString ( ) , null ) ; return convertTo ( resp , t15 ) ; |
public class ClassPathBuilder { /** * Probe a codebase to see if a given source exists in that code base .
* @ param resourceName
* name of a resource
* @ return true if the resource exists in the codebase , false if not */
private boolean probeCodeBaseForResource ( DiscoveredCodeBase discoveredCodeBase , String resourceName ) { } } | ICodeBaseEntry resource = discoveredCodeBase . getCodeBase ( ) . lookupResource ( resourceName ) ; return resource != null ; |
public class SQLFunctions { /** * split ( Column str , java . lang . String pattern ) */
public static Tuple < String , String > split ( String strColumn , String pattern , int index , String valueName ) { } } | String name = "split_" + random ( ) ; String script = "" ; if ( valueName == null ) { script = "def " + name + " = doc['" + strColumn + "'].value.split('" + pattern + "')[" + index + "]" ; } else { script = "; def " + name + " = " + valueName + ".split('" + pattern + "')[" + index + "]" ; } return new Tuple < > ( name , script ) ; |
public class Distributions { /** * Creates a { @ code Distribution } with { @ code LinearBuckets } .
* @ param numFiniteBuckets initializes the number of finite buckets
* @ param width initializes the width of each bucket
* @ param offset initializes the offset of the start bucket
* @ return a { @ code Distribution } with { @ code LinearBuckets }
* @ throws IllegalArgumentException if a bad input prevents creation . */
public static Distribution createLinear ( int numFiniteBuckets , double width , double offset ) { } } | if ( numFiniteBuckets <= 0 ) { throw new IllegalArgumentException ( MSG_BAD_NUM_FINITE_BUCKETS ) ; } if ( width <= 0.0 ) { throw new IllegalArgumentException ( String . format ( MSG_DOUBLE_TOO_LOW , "width" , 0.0 ) ) ; } LinearBuckets buckets = LinearBuckets . newBuilder ( ) . setOffset ( offset ) . setWidth ( width ) . setNumFiniteBuckets ( numFiniteBuckets ) . build ( ) ; Builder builder = Distribution . newBuilder ( ) . setLinearBuckets ( buckets ) ; for ( int i = 0 ; i < numFiniteBuckets + 2 ; i ++ ) { builder . addBucketCounts ( 0L ) ; } return builder . build ( ) ; |
public class JSONTokener { /** * Reads a sequence of values and the trailing closing brace ' ] ' of an array . The
* opening brace ' [ ' should have already been read . Note that " [ ] " yields an empty
* array , but " [ , ] " returns a two - element array equivalent to " [ null , null ] " .
* @ return an array
* @ throws JSONException if processing of json failed */
private JSONArray readArray ( ) throws JSONException { } } | JSONArray result = new JSONArray ( ) ; /* to cover input that ends with " , ] " . */
boolean hasTrailingSeparator = false ; while ( true ) { switch ( nextCleanInternal ( ) ) { case - 1 : throw syntaxError ( "Unterminated array" ) ; case ']' : if ( hasTrailingSeparator ) { result . put ( null ) ; } return result ; case ',' : case ';' : /* A separator without a value first means " null " . */
result . put ( null ) ; hasTrailingSeparator = true ; continue ; default : this . pos -- ; } result . put ( nextValue ( ) ) ; switch ( nextCleanInternal ( ) ) { case ']' : return result ; case ',' : case ';' : hasTrailingSeparator = true ; continue ; default : throw syntaxError ( "Unterminated array" ) ; } } |
public class ClientMapProxy { /** * Returns an iterator for iterating entries in the { @ code partitionId } . If { @ code prefetchValues } is
* { @ code true } , all values will be sent along with the keys and no additional data will be fetched when
* iterating . If { @ code false } , the values will be fetched when iterating the entries .
* The values are not fetched one - by - one but rather in batches .
* You may control the size of the batch by changing the { @ code fetchSize } parameter .
* A too small { @ code fetchSize } can affect performance since more data will have to be sent to and from the partition owner .
* A too high { @ code fetchSize } means that more data will be sent which can block other operations from being sent ,
* including internal operations .
* The underlying implementation may send more values in one batch than { @ code fetchSize } if it needs to get to
* a " safepoint " to later resume iteration .
* < b > NOTE < / b >
* Iterating the map should be done only when the { @ link IMap } is not being
* mutated and the cluster is stable ( there are no migrations or membership changes ) .
* In other cases , the iterator may not return some entries or may return an entry twice .
* @ param fetchSize the size of the batches which will be sent when iterating the data
* @ param partitionId the partition ID which is being iterated
* @ return the iterator for the projected entries */
public Iterator < Entry < K , V > > iterator ( int fetchSize , int partitionId , boolean prefetchValues ) { } } | return new ClientMapPartitionIterator < K , V > ( this , getContext ( ) , fetchSize , partitionId , prefetchValues ) ; |
public class SimpleGroovyClassDoc { /** * Replaces angle brackets inside a tag .
* @ param text GroovyDoc text to process
* @ param regex has to capture tag name in group 1 and tag body in group 2 */
public static String encodeAngleBracketsInTagBody ( String text , Pattern regex ) { } } | Matcher matcher = regex . matcher ( text ) ; if ( matcher . find ( ) ) { matcher . reset ( ) ; StringBuffer sb = new StringBuffer ( ) ; while ( matcher . find ( ) ) { String tagName = matcher . group ( 1 ) ; String tagBody = matcher . group ( 2 ) ; String encodedBody = Matcher . quoteReplacement ( encodeAngleBrackets ( tagBody ) ) ; String replacement = "{@" + tagName + " " + encodedBody + "}" ; matcher . appendReplacement ( sb , replacement ) ; } matcher . appendTail ( sb ) ; return sb . toString ( ) ; } else { return text ; } |
public class Assert { /** * Asserts that the { @ link Object array } is not empty .
* The assertion holds if and only if the { @ link Object array } is not { @ literal null } and contains at least 1 element .
* @ param array { @ link Object array } to evaluate .
* @ param message { @ link Supplier } containing the message using in the { @ link IllegalArgumentException } thrown
* if the assertion fails .
* @ throws java . lang . IllegalArgumentException if the { @ link Object array } is { @ literal null } or empty .
* @ see java . lang . Object [ ] */
public static void notEmpty ( Object [ ] array , Supplier < String > message ) { } } | if ( isEmpty ( array ) ) { throw new IllegalArgumentException ( message . get ( ) ) ; } |
public class VarExporter { /** * Load an exporter with a specified class .
* @ param clazz Class type from which variables will be exported .
* @ param declaredFieldsOnly if true , will not export any variable belonging to superclasses of { @ code clazz } .
* @ return exporter for the given class will be created if never before accessed . */
public static synchronized VarExporter forNamespace ( @ Nonnull final Class < ? > clazz , final boolean declaredFieldsOnly ) { } } | return getInstance ( clazz . getSimpleName ( ) , clazz , declaredFieldsOnly ) ; |
public class FactoryWaveletHaar { /** * Create a description for the inverse transform . Note that this will NOT produce
* an exact copy of the original due to rounding error .
* @ return Wavelet inverse coefficient description . */
private static WlCoef_I32 generateInv_I32 ( ) { } } | WlCoef_I32 ret = new WlCoef_I32 ( ) ; ret . scaling = new int [ ] { 1 , 1 } ; ret . wavelet = new int [ ] { ret . scaling [ 0 ] , - ret . scaling [ 0 ] } ; ret . denominatorScaling = 2 ; ret . denominatorWavelet = 2 ; return ret ; |
public class ApiOvhDomain { /** * Retrieve emails obfuscation rule
* REST : GET / domain / { serviceName } / rules / emailsObfuscation
* @ param serviceName [ required ] The internal name of your domain */
public ArrayList < OvhContactAllTypesEnum > serviceName_rules_emailsObfuscation_GET ( String serviceName ) throws IOException { } } | String qPath = "/domain/{serviceName}/rules/emailsObfuscation" ; StringBuilder sb = path ( qPath , serviceName ) ; String resp = exec ( qPath , "GET" , sb . toString ( ) , null ) ; return convertTo ( resp , t6 ) ; |
public class RubyObject { /** * Executes a method of any Object by Java reflection .
* @ param o
* an Object
* @ param methodName
* name of the method
* @ param arg
* a byte
* @ return the result of the method called */
@ SuppressWarnings ( "unchecked" ) public static < E > E send ( Object o , String methodName , byte arg ) { } } | try { Method method = o . getClass ( ) . getMethod ( methodName , byte . class ) ; return ( E ) method . invoke ( o , arg ) ; } catch ( Exception e ) { return send ( o , methodName , ( Object ) arg ) ; } |
public class ServicesInner { /** * Check nested resource name validity and availability .
* This method checks whether a proposed nested resource name is valid and available .
* @ param groupName Name of the resource group
* @ param serviceName Name of the service
* @ param parameters Requested name to validate
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable to the NameAvailabilityResponseInner object */
public Observable < NameAvailabilityResponseInner > checkChildrenNameAvailabilityAsync ( String groupName , String serviceName , NameAvailabilityRequest parameters ) { } } | return checkChildrenNameAvailabilityWithServiceResponseAsync ( groupName , serviceName , parameters ) . map ( new Func1 < ServiceResponse < NameAvailabilityResponseInner > , NameAvailabilityResponseInner > ( ) { @ Override public NameAvailabilityResponseInner call ( ServiceResponse < NameAvailabilityResponseInner > response ) { return response . body ( ) ; } } ) ; |
public class Signature { /** * Produce a method handle permuting the arguments in this signature using
* the given permute arguments and targeting the given java . lang . invoke . MethodHandle .
* Example :
* < pre >
* Signature sig = Signature . returning ( String . class ) . appendArg ( " a " , int . class ) . appendArg ( " b " , int . class ) ;
* MethodHandle handle = handleThatTakesOneInt ( ) ;
* MethodHandle newHandle = sig . permuteTo ( handle , " b " ) ;
* < / pre >
* @ param target the method handle to target
* @ param permuteArgs the arguments to permute
* @ return a new handle that permutes appropriate positions based on the
* given permute args */
public MethodHandle permuteWith ( MethodHandle target , String ... permuteArgs ) { } } | return MethodHandles . permuteArguments ( target , methodType , to ( permute ( permuteArgs ) ) ) ; |
public class GetRuleRequestMarshaller { /** * Marshall the given parameter object . */
public void marshall ( GetRuleRequest getRuleRequest , ProtocolMarshaller protocolMarshaller ) { } } | if ( getRuleRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( getRuleRequest . getRuleId ( ) , RULEID_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; } |
public class EditText { /** * Convenience method : Append the specified text slice to the TextView ' s display buffer ,
* upgrading it to BufferType . EDITABLE if it was not already editable . */
public final void append ( final CharSequence text , final int start , final int end ) { } } | getView ( ) . append ( text , start , end ) ; |
public class AddOn { /** * Gets the add - on with the given { @ code id } from the given collection of { @ code addOns } .
* @ param addOns the collection of add - ons where the search will be made
* @ param id the id of the add - on to search for
* @ return the { @ code AddOn } with the given id , or { @ code null } if not found */
private static AddOn getAddOn ( Collection < AddOn > addOns , String id ) { } } | for ( AddOn addOn : addOns ) { if ( addOn . getId ( ) . equals ( id ) ) { return addOn ; } } return null ; |
public class BaseStatisticsReportController { /** * use a Set to filter down to unique values . */
private AggregatedGroupMapping [ ] extractGroupsArray ( Set < D > columnGroups ) { } } | Set < AggregatedGroupMapping > groupMappings = new HashSet < AggregatedGroupMapping > ( ) ; for ( D discriminator : columnGroups ) { groupMappings . add ( discriminator . getAggregatedGroup ( ) ) ; } return groupMappings . toArray ( new AggregatedGroupMapping [ 0 ] ) ; |
public class SubscriptionMessageHandler { /** * Resets the complete message state */
private void reset ( ) { } } | if ( tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "reset" ) ; if ( iInitialised ) { // Reset the ArrayLists associated with this message
iTopics . clear ( ) ; iTopicSpaces . clear ( ) ; iTopicSpaceMappings . clear ( ) ; // Create a new message to send to this Neighbour .
iSubscriptionMessage = createSubscriptionMessage ( ) ; } else { iInitialised = true ; } if ( tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "reset" ) ; |
public class ParserPropertyHelper { /** * Returns the names of all those columns which represent a collection to be stored within the owning entity
* structure ( element collections and / or * - to - many associations , depending on the dialect ' s capabilities ) . */
protected String getColumn ( OgmEntityPersister persister , List < String > propertyPath ) { } } | Iterator < String > pathIterator = propertyPath . iterator ( ) ; String propertyName = pathIterator . next ( ) ; Type propertyType = persister . getPropertyType ( propertyName ) ; if ( ! pathIterator . hasNext ( ) ) { if ( isElementCollection ( propertyType ) ) { Joinable associatedJoinable = ( ( AssociationType ) propertyType ) . getAssociatedJoinable ( persister . getFactory ( ) ) ; OgmCollectionPersister collectionPersister = ( OgmCollectionPersister ) associatedJoinable ; // Collection of elements
return collectionPersister . getElementColumnNames ( ) [ 0 ] ; } return persister . getPropertyColumnNames ( propertyName ) [ 0 ] ; } else if ( propertyType . isComponentType ( ) ) { // Embedded property
String componentPropertyName = StringHelper . join ( propertyPath , "." ) ; return persister . getPropertyColumnNames ( componentPropertyName ) [ 0 ] ; } else if ( propertyType . isAssociationType ( ) ) { Joinable associatedJoinable = ( ( AssociationType ) propertyType ) . getAssociatedJoinable ( persister . getFactory ( ) ) ; if ( associatedJoinable . isCollection ( ) ) { OgmCollectionPersister collectionPersister = ( OgmCollectionPersister ) associatedJoinable ; if ( collectionPersister . getType ( ) . isComponentType ( ) ) { StringBuilder columnNameBuilder = new StringBuilder ( propertyName ) ; columnNameBuilder . append ( "." ) ; // Collection of embeddables
appendComponentCollectionPath ( columnNameBuilder , collectionPersister , pathIterator ) ; return columnNameBuilder . toString ( ) ; } } } throw new UnsupportedOperationException ( "Unrecognized property type: " + propertyType ) ; |
public class JaxbHelper { /** * Creates an instance by reading the XML from a file .
* @ param file
* File to read .
* @ param jaxbContext
* Context to use .
* @ return New instance .
* @ throws UnmarshalObjectException
* Error deserializing the object .
* @ param < TYPE >
* Type of the created object . */
@ SuppressWarnings ( "unchecked" ) @ NotNull public < TYPE > TYPE create ( @ NotNull @ FileExists @ IsFile final File file , @ NotNull final JAXBContext jaxbContext ) throws UnmarshalObjectException { } } | Contract . requireArgNotNull ( "file" , file ) ; FileExistsValidator . requireArgValid ( "file" , file ) ; IsFileValidator . requireArgValid ( "file" , file ) ; Contract . requireArgNotNull ( "jaxbContext" , jaxbContext ) ; try { final FileReader fr = new FileReader ( file ) ; try { return ( TYPE ) create ( fr , jaxbContext ) ; } finally { fr . close ( ) ; } } catch ( final IOException ex ) { throw new UnmarshalObjectException ( "Unable to parse XML from file: " + file , ex ) ; } |
public class IPv6AddressSection { /** * Gets the IPv4 section corresponding to the lowest ( least - significant ) 4 bytes in the original address ,
* which will correspond to between 0 and 4 bytes in this address . Many IPv4 to IPv6 mapping schemes ( but not all ) use these 4 bytes for a mapped IPv4 address .
* @ see # getEmbeddedIPv4AddressSection ( int , int )
* @ see # getMixedAddressSection ( )
* @ return the embedded IPv4 section or null */
public IPv4AddressSection getEmbeddedIPv4AddressSection ( ) { } } | if ( embeddedIPv4Section == null ) { synchronized ( this ) { if ( embeddedIPv4Section == null ) { int mixedCount = getSegmentCount ( ) - Math . max ( IPv6Address . MIXED_ORIGINAL_SEGMENT_COUNT - addressSegmentIndex , 0 ) ; int lastIndex = getSegmentCount ( ) - 1 ; IPv4AddressCreator creator = getIPv4Network ( ) . getAddressCreator ( ) ; IPv4AddressSegment [ ] mixed ; if ( mixedCount == 0 ) { mixed = creator . createSegmentArray ( 0 ) ; } else { if ( mixedCount == 1 ) { mixed = creator . createSegmentArray ( IPv6Address . BYTES_PER_SEGMENT ) ; IPv6AddressSegment last = getSegment ( lastIndex ) ; last . getSplitSegments ( mixed , 0 , creator ) ; } else { mixed = creator . createSegmentArray ( IPv6Address . BYTES_PER_SEGMENT << 1 ) ; IPv6AddressSegment low = getSegment ( lastIndex ) ; IPv6AddressSegment high = getSegment ( lastIndex - 1 ) ; high . getSplitSegments ( mixed , 0 , creator ) ; low . getSplitSegments ( mixed , IPv6Address . BYTES_PER_SEGMENT , creator ) ; } } embeddedIPv4Section = createEmbeddedSection ( creator , mixed , this ) ; } } } return embeddedIPv4Section ; |
public class Base64 { /** * Decodes data from Base64 notation , automatically
* detecting gzip - compressed data and decompressing it .
* @ param s the string to decode
* @ param options encode options such as URL _ SAFE
* @ return the decoded data
* @ throws java . io . IOException if there is an error
* @ throws NullPointerException if < tt > s < / tt > is null
* @ since 1.4 */
public static byte [ ] decode ( String s , int options ) throws java . io . IOException { } } | if ( s == null ) { throw new NullPointerException ( "Input string was null." ) ; } // end if
byte [ ] bytes ; try { bytes = s . getBytes ( PREFERRED_ENCODING ) ; } // end try
catch ( java . io . UnsupportedEncodingException uee ) { bytes = s . getBytes ( ) ; } // end catch
// < / change >
// Decode
bytes = decode ( bytes , 0 , bytes . length , options ) ; return bytes ; |
public class ReflectionHelper { /** * This function creates a new instance of the requested type .
* @ param type
* The class type
* @ return The instance */
public static Object createInstance ( Class < ? > type ) { } } | Object instance = null ; try { // create instance
instance = type . newInstance ( ) ; } catch ( Exception exception ) { throw new FaxException ( "Unable to create new instance of type: " + type , exception ) ; } return instance ; |
public class ApiOvhVps { /** * Get this object properties
* REST : GET / vps / { serviceName } / secondaryDnsDomains / { domain }
* @ param serviceName [ required ] The internal name of your VPS offer
* @ param domain [ required ] domain on slave server */
public OvhSecondaryDNS serviceName_secondaryDnsDomains_domain_GET ( String serviceName , String domain ) throws IOException { } } | String qPath = "/vps/{serviceName}/secondaryDnsDomains/{domain}" ; StringBuilder sb = path ( qPath , serviceName , domain ) ; String resp = exec ( qPath , "GET" , sb . toString ( ) , null ) ; return convertTo ( resp , OvhSecondaryDNS . class ) ; |
public class SimpleGapFunction { /** * Sets the the gap function ' s maximum gap value . A value of
* < code > null < / code > means any gap .
* @ param maximumGap the maximum gap { @ link Integer } . Must be non - negative .
* If set to zero , the gap function always will return < code > false < / code > . */
public void setMaximumGap ( Integer maximumGap ) { } } | if ( maximumGap != null && maximumGap . compareTo ( ZERO ) < 0 ) { throw new IllegalArgumentException ( "maximumGap must be null or >= 0" ) ; } this . maximumGap = maximumGap ; initRelation ( ) ; |
public class StringUtils { /** * Creates a random string with a length within the given interval . The string contains only characters that
* can be represented as a single code point .
* @ param rnd The random used to create the strings .
* @ param minLength The minimum string length .
* @ param maxLength The maximum string length ( inclusive ) .
* @ return A random String . */
public static String getRandomString ( Random rnd , int minLength , int maxLength ) { } } | int len = rnd . nextInt ( maxLength - minLength + 1 ) + minLength ; char [ ] data = new char [ len ] ; for ( int i = 0 ; i < data . length ; i ++ ) { data [ i ] = ( char ) ( rnd . nextInt ( 0x7fff ) + 1 ) ; } return new String ( data ) ; |
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link UnitOfMeasureType } { @ code > }
* @ param value
* Java instance representing xml element ' s value .
* @ return
* the new instance of { @ link JAXBElement } { @ code < } { @ link UnitOfMeasureType } { @ code > } */
@ XmlElementDecl ( namespace = "http://www.opengis.net/gml" , name = "unitOfMeasure" ) public JAXBElement < UnitOfMeasureType > createUnitOfMeasure ( UnitOfMeasureType value ) { } } | return new JAXBElement < UnitOfMeasureType > ( _UnitOfMeasure_QNAME , UnitOfMeasureType . class , null , value ) ; |
public class RaftNetworkClient { /** * Stop the { @ code RaftNetworkClient } .
* When a call to { @ code stop ( ) } is made
* all existing incoming and outgoing connections
* are disconnected and closed . The server component
* will stop accepting incoming connections and the
* client component will not initiate any connections
* to other Raft servers . Pending reconnect timeouts
* will fail silently . Since these timeouts may yet
* run in the future callers < strong > cannot < / strong >
* assume exclusive access to system resources immediately
* following { @ code stop ( ) } .
* The network thread pools < strong > are not < / strong >
* terminated . It is the caller ' s responsibility to call
* { @ link ChannelFactory # releaseExternalResources ( ) } on
* { @ code serverChannelFactory } and { @ code clientChannelFactory }
* to terminate them .
* Following a successful call to { @ code stop ( ) } subsequent calls are noops . */
public synchronized void stop ( ) { } } | // set running to ' false ' early , so that new
// I / O operations and timeouts are rejected
// not checking the value of ' running '
// allows ' stop ' to be called multiple times ,
// AFAIK , this is not an issue and are noops
running = false ; LOGGER . info ( "{}: stopping network client" , self . getId ( ) ) ; if ( serverChannel != null ) { serverChannel . disconnect ( ) . awaitUninterruptibly ( ) ; } if ( server != null ) { server . shutdown ( ) ; } clusterChannelGroup . disconnect ( ) . awaitUninterruptibly ( ) ; if ( client != null ) { client . shutdown ( ) ; } |
public class Firmata { /** * Add a messageListener to the Firmta object which will fire whenever a matching message is received
* over the SerialPort that corresponds to the given channel .
* @ param channel Integer indicating the specific channel or pin to listen on
* @ param messageListener MessageListener object to handle a received Message event over the SerialPort . */
public void addMessageListener ( Integer channel , MessageListener < ? extends Message > messageListener ) { } } | addListener ( channel , messageListener . getMessageType ( ) , messageListener ) ; |
public class MailUtil { /** * Creates a mime - message , multipart if attachements are supplied otherwise as plain text . Assumes UTF - 8 encoding for both subject and content .
* @ param session , must be specified
* @ param from , can be null
* @ param to , can be null or empty
* @ param subject , can be null
* @ param content , must be specified
* @ param attachments , can be null or empty
* @ return the mime - message
* @ throws MessagingException
* @ throws AddressException
* @ throws NoSuchProviderException */
public static MimeMessage createMimeMessage ( Session session , String from , String [ ] to , String subject , String content , DataSource [ ] attachments ) { } } | logger . debug ( "Creates a mime message with {} attachments" , ( attachments == null ) ? 0 : attachments . length ) ; try { MimeMessage message = new MimeMessage ( session ) ; if ( from != null ) { message . setSender ( new InternetAddress ( from ) ) ; } if ( subject != null ) { message . setSubject ( subject , "UTF-8" ) ; } if ( to != null ) { for ( String toAdr : to ) { message . addRecipient ( Message . RecipientType . TO , new InternetAddress ( toAdr ) ) ; } } if ( attachments == null || attachments . length == 0 ) { // Setup a plain text message
message . setContent ( content , "text/plain; charset=UTF-8" ) ; } else { // Setup a multipart message
Multipart multipart = new MimeMultipart ( ) ; message . setContent ( multipart ) ; // Create the message part
BodyPart messageBodyPart = new MimeBodyPart ( ) ; messageBodyPart . setContent ( content , "text/plain; charset=UTF-8" ) ; multipart . addBodyPart ( messageBodyPart ) ; // Add attachments , if any
if ( attachments != null ) { for ( DataSource attachment : attachments ) { BodyPart attatchmentBodyPart = new MimeBodyPart ( ) ; attatchmentBodyPart . setDataHandler ( new DataHandler ( attachment ) ) ; attatchmentBodyPart . setFileName ( attachment . getName ( ) ) ; multipart . addBodyPart ( attatchmentBodyPart ) ; } } } return message ; } catch ( AddressException e ) { throw new RuntimeException ( e ) ; } catch ( MessagingException e ) { throw new RuntimeException ( e ) ; } |
public class ServerControllerImpl { /** * Configuration using < code > org . ops4j . pax . web < / code > PID - only listeners and NCSA logging
* @ param configuration
* @ param builder
* @ param rootHandler current root handler
* @ return */
private HttpHandler configureUndertow ( Configuration configuration , Undertow . Builder builder , HttpHandler rootHandler ) { } } | if ( configuration . isLogNCSAFormatEnabled ( ) ) { String logNCSADirectory = configuration . getLogNCSADirectory ( ) ; String logNCSAFormat = configuration . getLogNCSAFormat ( ) ; Bundle bundle = FrameworkUtil . getBundle ( ServerControllerImpl . class ) ; ClassLoader loader = bundle . adapt ( BundleWiring . class ) . getClassLoader ( ) ; xnioWorker = UndertowUtil . createWorker ( loader ) ; // String logNameSuffix = logNCSAFormat . substring ( logNCSAFormat . lastIndexOf ( " . " ) ) ;
// String logBaseName = logNCSAFormat . substring ( 0 , logNCSAFormat . lastIndexOf ( " . " ) ) ;
AccessLogReceiver logReceiver = DefaultAccessLogReceiver . builder ( ) . setLogWriteExecutor ( xnioWorker ) . setOutputDirectory ( new File ( logNCSADirectory ) . toPath ( ) ) . setLogBaseName ( "request." ) . setLogNameSuffix ( "log" ) . setRotate ( true ) . build ( ) ; String format ; if ( configuration . isLogNCSAExtended ( ) ) { format = "combined" ; } else { format = "common" ; } // String format = " % a - - [ % t ] \ " % m % U % H \ " % s " ;
// TODO : still need to find out how to add cookie etc .
rootHandler = new AccessLogHandler ( rootHandler , logReceiver , format , AccessLogHandler . class . getClassLoader ( ) ) ; } for ( String address : configuration . getListeningAddresses ( ) ) { if ( configuration . isHttpEnabled ( ) ) { LOG . info ( "Starting undertow http listener on " + address + ":" + configuration . getHttpPort ( ) ) ; builder . addHttpListener ( configuration . getHttpPort ( ) , address ) ; } if ( configuration . isHttpSecureEnabled ( ) ) { LOG . info ( "Starting undertow https listener on " + address + ":" + configuration . getHttpSecurePort ( ) ) ; // TODO : could this be shared across interface : port bindings ?
SSLContext context = buildSSLContext ( ) ; builder . addHttpsListener ( configuration . getHttpSecurePort ( ) , address , context ) ; } } return rootHandler ; |
public class LocationManager { /** * Moves the specified body from whatever location they currently occupy to the location
* identified by the supplied place oid .
* @ return the config object for the new location .
* @ exception InvocationException thrown if the move was not successful for some reason
* ( which will be communicated as an error code in the exception ' s message data ) . */
public PlaceConfig moveTo ( BodyObject source , int placeOid ) throws InvocationException { } } | // make sure the place in question actually exists
PlaceManager pmgr = _plreg . getPlaceManager ( placeOid ) ; if ( pmgr == null ) { log . info ( "Requested to move to non-existent place" , "who" , source . who ( ) , "placeOid" , placeOid ) ; throw new InvocationException ( NO_SUCH_PLACE ) ; } // if they ' re already in the location they ' re asking to move to , just give them the config
// because we don ' t need to update anything in distributed object world
Place place = pmgr . getLocation ( ) ; if ( place . equals ( source . location ) ) { log . debug ( "Going along with client request to move to where they already are" , "source" , source . who ( ) , "place" , place ) ; return pmgr . getConfig ( ) ; } // make sure they have access to the specified place
String errmsg ; if ( ( errmsg = pmgr . ratifyBodyEntry ( source ) ) != null ) { throw new InvocationException ( errmsg ) ; } // acquire a lock on the body object to avoid breakage by rapid fire moveTo requests
if ( ! source . acquireLock ( "moveToLock" ) ) { // if we ' re still locked , a previous moveTo request hasn ' t been fully processed
throw new InvocationException ( MOVE_IN_PROGRESS ) ; } // configure the client accordingly if the place uses a custom class loader
PresentsSession client = _clmgr . getClient ( source . username ) ; if ( client != null ) { client . setClassLoader ( pmgr . getClass ( ) . getClassLoader ( ) ) ; } try { source . startTransaction ( ) ; try { // remove them from any previous location
leaveOccupiedPlace ( source ) ; // let the place manager know that we ' re coming in
pmgr . bodyWillEnter ( source ) ; // let the body object know that it ' s going in
source . willEnterPlace ( place , pmgr . getPlaceObject ( ) ) ; } finally { source . commitTransaction ( ) ; } } finally { // and finally queue up an event to release the lock once these events are processed
source . releaseLock ( "moveToLock" ) ; } return pmgr . getConfig ( ) ; |
public class ForkJoinDynamicTaskMapper { /** * This method creates a JOIN task that is used in the { @ link this # getMappedTasks ( TaskMapperContext ) }
* at the end to add a join task to be scheduled after all the fork tasks
* @ param workflowInstance : A instance of the { @ link Workflow } which represents the workflow being executed .
* @ param joinWorkflowTask : A instance of { @ link WorkflowTask } which is of type { @ link TaskType # JOIN }
* @ param joinInput : The input which is set in the { @ link Task # setInputData ( Map ) }
* @ return a new instance of { @ link Task } representing a { @ link SystemTaskType # JOIN } */
@ VisibleForTesting Task createJoinTask ( Workflow workflowInstance , WorkflowTask joinWorkflowTask , HashMap < String , Object > joinInput ) { } } | Task joinTask = new Task ( ) ; joinTask . setTaskType ( SystemTaskType . JOIN . name ( ) ) ; joinTask . setTaskDefName ( SystemTaskType . JOIN . name ( ) ) ; joinTask . setReferenceTaskName ( joinWorkflowTask . getTaskReferenceName ( ) ) ; joinTask . setWorkflowInstanceId ( workflowInstance . getWorkflowId ( ) ) ; joinTask . setWorkflowType ( workflowInstance . getWorkflowName ( ) ) ; joinTask . setCorrelationId ( workflowInstance . getCorrelationId ( ) ) ; joinTask . setScheduledTime ( System . currentTimeMillis ( ) ) ; joinTask . setInputData ( joinInput ) ; joinTask . setTaskId ( IDGenerator . generate ( ) ) ; joinTask . setStatus ( Task . Status . IN_PROGRESS ) ; joinTask . setWorkflowTask ( joinWorkflowTask ) ; return joinTask ; |
public class EJBMDOrchestrator { /** * F743-25855 */
private void validateSessionSynchronizationMethod ( BeanMetaData bmd , Method method , Class < ? > [ ] expectedParams , String methodType ) throws EJBConfigurationException { } } | if ( method == null ) { return ; } String specificCause = "" ; int modifiers = method . getModifiers ( ) ; if ( Modifier . isStatic ( modifiers ) ) { specificCause += " The 'static' modifier is not allowed." ; } if ( Modifier . isFinal ( modifiers ) ) { specificCause += " The 'final' modifier is not allowed." ; } if ( method . getReturnType ( ) != Void . TYPE ) { specificCause += " The return type must be 'void'." ; } Class < ? > [ ] paramTypes = method . getParameterTypes ( ) ; if ( expectedParams == null || expectedParams . length == 0 ) { if ( paramTypes != null && paramTypes . length > 0 ) { specificCause += " The method must take 0 arguments." ; } } else { if ( paramTypes == null || paramTypes . length != expectedParams . length ) { specificCause += " The method must take " + expectedParams . length + " arguments." ; } else { for ( int i = 0 ; i < expectedParams . length ; i ++ ) { if ( ! paramTypes [ i ] . equals ( expectedParams [ i ] ) ) { specificCause += " Parameter number " + i + " of type " + paramTypes [ i ] + " must be of type " + expectedParams [ i ] + "." ; } } } } // The spec does not specifically prohibit a throws clause , but also does
// not indicate one is allowed . To be consistent with what the spec does
// say about interceptors , a throws clause is being prohibited . If we are
// required to relax this , consider using the same method that the
// lifecycle interceptors use : isLifecycleApplicationException .
Class < ? > [ ] exceptions = method . getExceptionTypes ( ) ; if ( exceptions . length > 0 ) { specificCause += " The method must not throw any exceptions." ; } if ( ! "" . equals ( specificCause ) ) { Tr . error ( tc , "INVALID_SESSION_SYNCH_SIGNATURE_CNTR0327E" , new Object [ ] { method , methodType } ) ; throw new EJBConfigurationException ( "CNTR0327E: The " + method + " method does not have the required" + " method signature for a " + methodType + " session synchronization" + " method." + specificCause ) ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isDebugEnabled ( ) ) Tr . debug ( tc , "validateSessionSynchronizationMethod : valid : " + method ) ; |
public class TiffTags { /** * Gets tag information .
* @ param identifier Tag id
* @ return the tag or null if the identifier does not exist */
public static Tag getTag ( int identifier ) { } } | Tag t = null ; try { if ( instance == null ) getTiffTags ( ) ; } catch ( ReadTagsIOException e ) { /* Nothing to be shown */
} if ( tagMap . containsKey ( identifier ) ) t = tagMap . get ( identifier ) ; return t ; |
public class Pollers { /** * Parse the content of the system property that describes the polling intervals ,
* and in case of errors
* use the default of one poller running every minute . */
static long [ ] parse ( String pollers ) { } } | String [ ] periods = pollers . split ( ",\\s*" ) ; long [ ] result = new long [ periods . length ] ; boolean errors = false ; Logger logger = LoggerFactory . getLogger ( Pollers . class ) ; for ( int i = 0 ; i < periods . length ; ++ i ) { String period = periods [ i ] ; try { result [ i ] = Long . parseLong ( period ) ; if ( result [ i ] <= 0 ) { logger . error ( "Invalid polling interval: {} must be positive." , period ) ; errors = true ; } } catch ( NumberFormatException e ) { logger . error ( "Cannot parse '{}' as a long: {}" , period , e . getMessage ( ) ) ; errors = true ; } } if ( errors || periods . length == 0 ) { logger . info ( "Using a default configuration for poller intervals: {}" , join ( DEFAULT_PERIODS ) ) ; return DEFAULT_PERIODS ; } else { return result ; } |
public class Surface { /** * Clears the entire surface to the specified color .
* The channels are values in the range { @ code [ 0,1 ] } . */
public Surface clear ( float red , float green , float blue , float alpha ) { } } | batch . gl . glClearColor ( red , green , blue , alpha ) ; batch . gl . glClear ( GL20 . GL_COLOR_BUFFER_BIT ) ; return this ; |
public class FnJodaTimeUtils { /** * It converts the given { @ link Integer } elements into an { @ link Interval } . with the given { @ link Chronology }
* The target { @ link Integer } elements represent the start and end of the { @ link Interval } .
* < p > The accepted input Collection & lt ; Integer & gt ; are : < / p >
* < ul >
* < li > year , month , day , year , month , day < / li >
* < li > year , month , day , hour , minute , year , month , day , hour , minute < / li >
* < li > year , month , day , hour , minute , second , year , month , day , hour , minute , second < / li >
* < li > year , month , day , hour , minute , second , millisecond , year , month , day , hour , minute , second , millisecond < / li >
* < / ul >
* @ param chronology { @ link Chronology } to be used
* @ return the { @ link Interval } created from the input and arguments */
public static final Function < Collection < Integer > , Interval > integerFieldCollectionToInterval ( Chronology chronology ) { } } | return FnInterval . integerFieldCollectionToInterval ( chronology ) ; |
public class ConfigurationBuilder { /** * Obtains the default { @ link ExtensionLoader } to be used if none is specified
* @ return */
ExtensionLoader createDefaultExtensionLoader ( ) { } } | // First find the right Class / ClassLoader
final Class < ? > extensionLoaderImplClass ; try { extensionLoaderImplClass = ClassLoaderSearchUtil . findClassFromClassLoaders ( EXTENSION_LOADER_IMPL , getClassLoaders ( ) ) ; } catch ( final ClassNotFoundException cnfe ) { throw new IllegalStateException ( "Could not find extension loader impl class in any of the configured ClassLoaders" , cnfe ) ; } // Return
return SecurityActions . newInstance ( extensionLoaderImplClass , new Class < ? > [ ] { Iterable . class } , new Object [ ] { this . getClassLoaders ( ) } , ExtensionLoader . class ) ; |
public class KeycloakAuthenticationStrategy { /** * Refreshes the access token
* @ param requestContext request context
* @ param token token
* @ return refreshed token */
public OAuthAccessToken refreshToken ( RequestContext requestContext , OAuthAccessToken token ) { } } | OAuthAccessToken result = token ; String [ ] scopes = token . getScopes ( ) ; String tokenUrl = String . format ( "%s/realms/%s/protocol/openid-connect/token" , getServerUrl ( ) , getRealm ( ) ) ; HttpPost httpPost = new HttpPost ( tokenUrl ) ; try ( CloseableHttpClient httpclient = HttpClients . createDefault ( ) ) { List < NameValuePair > params = new ArrayList < NameValuePair > ( ) ; params . add ( new BasicNameValuePair ( "client_id" , getApiKey ( ) ) ) ; params . add ( new BasicNameValuePair ( "client_secret" , getApiSecret ( ) ) ) ; params . add ( new BasicNameValuePair ( "grant_type" , "refresh_token" ) ) ; params . add ( new BasicNameValuePair ( "refresh_token" , token . getRefreshToken ( ) ) ) ; httpPost . setEntity ( new UrlEncodedFormEntity ( params ) ) ; try ( CloseableHttpResponse response = httpclient . execute ( httpPost ) ) { StatusLine statusLine = response . getStatusLine ( ) ; if ( statusLine . getStatusCode ( ) == 200 ) { HttpEntity entity = response . getEntity ( ) ; ObjectMapper objectMapper = new ObjectMapper ( ) ; try ( InputStream stream = entity . getContent ( ) ) { RefreshTokenResponse refreshTokenResponse = objectMapper . readValue ( stream , RefreshTokenResponse . class ) ; Date expiresAt = getExpiresAt ( refreshTokenResponse . getExpiresIn ( ) ) ; result = new OAuthAccessToken ( token . getExternalId ( ) , refreshTokenResponse . getAccessToken ( ) , refreshTokenResponse . getRefreshToken ( ) , expiresAt , scopes ) ; AuthUtils . purgeOAuthAccessTokens ( requestContext , getName ( ) ) ; AuthUtils . storeOAuthAccessToken ( requestContext , getName ( ) , result ) ; } EntityUtils . consume ( entity ) ; } else { logger . log ( Level . WARNING , String . format ( "Failed to refresh access token with message [%d]: %s" , statusLine . getStatusCode ( ) , statusLine . getReasonPhrase ( ) ) ) ; } } } catch ( IOException e ) { logger . log ( Level . WARNING , "Failed to refresh access token" , e ) ; } return result ; |
public class ContentSpecUtilities { /** * Replaces the checksum of a Content Spec with a new checksum value
* @ param contentSpecString The content spec to replace the checksum for .
* @ param checksum The new checksum to be set in the Content Spec .
* @ return The fixed content spec string . */
public static String replaceChecksum ( final String contentSpecString , final String checksum ) { } } | Matcher matcher = CS_CHECKSUM_PATTERN . matcher ( contentSpecString ) ; if ( matcher . find ( ) ) { return matcher . replaceFirst ( "CHECKSUM=" + checksum + "\n" ) ; } return contentSpecString ; |
public class Util { /** * Encodes the URL by RFC 2396.
* I thought there ' s another spec that refers to UTF - 8 as the encoding ,
* but don ' t remember it right now .
* @ since 1.204
* @ deprecated since 2008-05-13 . This method is broken ( see ISSUE # 1666 ) . It should probably
* be removed but I ' m not sure if it is considered part of the public API
* that needs to be maintained for backwards compatibility .
* Use { @ link # encode ( String ) } instead . */
@ Deprecated public static String encodeRFC2396 ( String url ) { } } | try { return new URI ( null , url , null ) . toASCIIString ( ) ; } catch ( URISyntaxException e ) { LOGGER . log ( Level . WARNING , "Failed to encode {0}" , url ) ; // could this ever happen ?
return url ; } |
public class GSMTImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
public void setMCPT ( Integer newMCPT ) { } } | Integer oldMCPT = mcpt ; mcpt = newMCPT ; if ( eNotificationRequired ( ) ) eNotify ( new ENotificationImpl ( this , Notification . SET , AfplibPackage . GSMT__MCPT , oldMCPT , mcpt ) ) ; |
public class Closeables { /** * Close potential { @ linkplain Closeable } .
* An { @ linkplain IOException } caused by { @ linkplain Closeable # close ( ) } is ignored .
* @ param object the object to check and close . */
public static void safeClose ( @ Nullable Object object ) { } } | if ( object instanceof Closeable ) { try { ( ( Closeable ) object ) . close ( ) ; } catch ( IOException e ) { Exceptions . ignore ( e ) ; } } |
public class SpeechToTextWebSocketListener { /** * ( non - Javadoc )
* @ see okhttp3 . WebSocketListener # onOpen ( okhttp3 . WebSocket , okhttp3 . Response ) */
@ Override public void onOpen ( final WebSocket socket , Response response ) { } } | callback . onConnected ( ) ; this . socket = socket ; if ( ! socket . send ( buildStartMessage ( options ) ) ) { callback . onError ( new IOException ( "WebSocket unavailable" ) ) ; } else { // Send the InputStream on a different Thread . Elsewise , interim results cannot be
// received ,
// because the Thread that called SpeechToText . recognizeUsingWebSocket is blocked .
audioThread = new Thread ( AUDIO_TO_WEB_SOCKET ) { @ Override public void run ( ) { sendInputStream ( stream ) ; // Do not send the stop message if the socket has been closed already , for example because of the
// inactivity timeout .
// If the socket is still open after the sending finishes , for example because the user closed the
// microphone AudioInputStream , send a stop message .
if ( socketOpen && ! socket . send ( buildStopMessage ( ) ) ) { LOG . log ( Level . SEVERE , "Stop message discarded because WebSocket is unavailable" ) ; } } } ; audioThread . start ( ) ; } |
public class AbstractPersistent23Tree { /** * checks the subtree of the node for correctness : for tests only
* @ throws RuntimeException if the tree structure is incorrect */
static < K extends Comparable < K > > void checkNode ( Node < K > node ) { } } | node . checkNode ( null , null ) ; |
public class Discovery { /** * Delete the expansion list .
* Remove the expansion information for this collection . The expansion list must be deleted to disable query expansion
* for a collection .
* @ param deleteExpansionsOptions the { @ link DeleteExpansionsOptions } containing the options for the call
* @ return a { @ link ServiceCall } with a response type of Void */
public ServiceCall < Void > deleteExpansions ( DeleteExpansionsOptions deleteExpansionsOptions ) { } } | Validator . notNull ( deleteExpansionsOptions , "deleteExpansionsOptions cannot be null" ) ; String [ ] pathSegments = { "v1/environments" , "collections" , "expansions" } ; String [ ] pathParameters = { deleteExpansionsOptions . environmentId ( ) , deleteExpansionsOptions . collectionId ( ) } ; RequestBuilder builder = RequestBuilder . delete ( RequestBuilder . constructHttpUrl ( getEndPoint ( ) , pathSegments , pathParameters ) ) ; builder . query ( "version" , versionDate ) ; Map < String , String > sdkHeaders = SdkCommon . getSdkHeaders ( "discovery" , "v1" , "deleteExpansions" ) ; for ( Entry < String , String > header : sdkHeaders . entrySet ( ) ) { builder . header ( header . getKey ( ) , header . getValue ( ) ) ; } return createServiceCall ( builder . build ( ) , ResponseConverterUtils . getVoid ( ) ) ; |
public class Mapping { /** * Return the value for the ID / Key column from the given instance
* @ param instance
* the instance
* @ param valueClass
* type of the value ( must match the actual native type in the
* instance ' s class )
* @ return value */
public < V > V getIdValue ( T instance , Class < V > valueClass ) { } } | return getColumnValue ( instance , idFieldName , valueClass ) ; |
public class SPSMMappingFilter { /** * Looks for the related index for the source list at the position sourceIndex
* in the target list beginning at the targetIndex position for the defined relation .
* @ param source source list of siblings .
* @ param target target list of siblings .
* @ param relation relation
* @ return the index of the related element in target , or - 1 if there is no relate element . */
private int getRelatedIndex ( List < INode > source , List < INode > target , char relation ) { } } | int srcIndex = sourceIndex . get ( source . get ( 0 ) . getLevel ( ) - 1 ) ; int tgtIndex = targetIndex . get ( target . get ( 0 ) . getLevel ( ) - 1 ) ; int returnIndex = - 1 ; INode sourceNode = source . get ( srcIndex ) ; // find the first one who is related in the same level
for ( int i = tgtIndex + 1 ; i < target . size ( ) ; i ++ ) { INode targetNode = target . get ( i ) ; if ( isRelated ( sourceNode , targetNode , relation ) ) { setStrongestMapping ( sourceNode , targetNode ) ; return i ; } } // there was no correspondence between siblings in source and target lists
// try to clean the mapping elements
computeStrongestMappingForSource ( source . get ( srcIndex ) ) ; return returnIndex ; |
public class SchemaFactory { /** * Look up the value of a property .
* < p > The property name is any fully - qualified URI . It is
* possible for a { @ link SchemaFactory } to recognize a property name but
* temporarily be unable to return its value . < / p >
* < p > { @ link SchemaFactory } s are not required to recognize any specific
* property names . < / p >
* < p > Implementers are free ( and encouraged ) to invent their own properties ,
* using names built on their own URIs . < / p >
* @ param name The property name , which is a non - null fully - qualified URI .
* @ return The current value of the property .
* @ exception org . xml . sax . SAXNotRecognizedException If the property
* value can ' t be assigned or retrieved .
* @ exception org . xml . sax . SAXNotSupportedException When the
* XMLReader recognizes the property name but
* cannot determine its value at this time .
* @ exception NullPointerException
* if the name parameter is null .
* @ see # setProperty ( String , Object ) */
public Object getProperty ( String name ) throws SAXNotRecognizedException , SAXNotSupportedException { } } | if ( name == null ) { throw new NullPointerException ( "name == null" ) ; } throw new SAXNotRecognizedException ( name ) ; |
public class ConverterRegistry { /** * Do Conversion .
* @ param targetType
* @ param value
* @ return */
public Object convert ( Class < ? > targetType , Object value ) { } } | if ( value == null ) { return null ; } Converter converter = getConverter ( value . getClass ( ) , targetType ) ; if ( converter == null ) { throw new IllegalArgumentException ( "No converter from " + value . getClass ( ) + " to " + targetType . getName ( ) ) ; } else { return converter . convert ( targetType , value ) ; } |
public class DescribeTargetHealthRequest { /** * The targets .
* < b > NOTE : < / b > This method appends the values to the existing list ( if any ) . Use
* { @ link # setTargets ( java . util . Collection ) } or { @ link # withTargets ( java . util . Collection ) } if you want to override
* the existing values .
* @ param targets
* The targets .
* @ return Returns a reference to this object so that method calls can be chained together . */
public DescribeTargetHealthRequest withTargets ( TargetDescription ... targets ) { } } | if ( this . targets == null ) { setTargets ( new java . util . ArrayList < TargetDescription > ( targets . length ) ) ; } for ( TargetDescription ele : targets ) { this . targets . add ( ele ) ; } return this ; |
public class EnterpriseBeansTypeImpl { /** * If not already created , a new < code > message - driven < / code > element will be created and returned .
* Otherwise , the first existing < code > message - driven < / code > element will be returned .
* @ return the instance defined for the element < code > message - driven < / code > */
public MessageDrivenBeanType < EnterpriseBeansType < T > > getOrCreateMessageDriven ( ) { } } | List < Node > nodeList = childNode . get ( "message-driven" ) ; if ( nodeList != null && nodeList . size ( ) > 0 ) { return new MessageDrivenBeanTypeImpl < EnterpriseBeansType < T > > ( this , "message-driven" , childNode , nodeList . get ( 0 ) ) ; } return createMessageDriven ( ) ; |
public class AnnotationsClassLoader { /** * Finds the resource with the given name if it has previously been
* loaded and cached by this class loader , and return an input stream
* to the resource data . If this resource has not been cached , return
* < code > null < / code > .
* @ param name Name of the resource to return */
protected InputStream findLoadedResource ( String name ) { } } | ResourceEntry entry = ( ResourceEntry ) resourceEntries . get ( name ) ; if ( entry != null ) { if ( entry . binaryContent != null ) return new ByteArrayInputStream ( entry . binaryContent ) ; } return ( null ) ; |
public class ProcedureDescriptor { /** * Does this procedure return any values to the ' caller ' ?
* @ return < code > true < / code > if the procedure returns at least 1
* value that is returned to the caller . */
public final boolean hasReturnValues ( ) { } } | if ( this . hasReturnValue ( ) ) { return true ; } else { // TODO : We may be able to ' pre - calculate ' the results
// of this loop by just checking arguments as they are added
// The only problem is that the ' isReturnedbyProcedure ' property
// can be modified once the argument is added to this procedure .
// If that occurs , then ' pre - calculated ' results will be inacccurate .
Iterator iter = this . getArguments ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { ArgumentDescriptor arg = ( ArgumentDescriptor ) iter . next ( ) ; if ( arg . getIsReturnedByProcedure ( ) ) { return true ; } } } return false ; |
public class DatasourceDependencyBuilder { /** * Creates a ServiceDependencyBuilder with type = " db " and subtype = " Cassandra " .
* @ param datasources the datasources of the accessed database .
* @ return builder used to configure Cassandra datasource dependencies */
public static DatasourceDependencyBuilder cassandraDependency ( final List < Datasource > datasources ) { } } | return new DatasourceDependencyBuilder ( ) . withDatasources ( datasources ) . withType ( DatasourceDependency . TYPE_DB ) . withSubtype ( DatasourceDependency . SUBTYPE_CASSANDRA ) ; |
public class Notification { /** * Detect notification type based on the xml root name .
* @ param payload
* @ return notification type or null if root name is not found or if there
* is no type corresponding to the root name */
public static Type detect ( final String payload ) { } } | final Matcher m = ROOT_NAME . matcher ( payload ) ; if ( m . find ( ) && m . groupCount ( ) >= 1 ) { final String root = m . group ( 1 ) ; try { return Type . valueOf ( CaseFormat . LOWER_UNDERSCORE . to ( CaseFormat . UPPER_CAMEL , root ) ) ; } catch ( IllegalArgumentException e ) { log . warn ( "Enable to detect notification type, no type for {}" , root ) ; return null ; } } log . warn ( "Enable to detect notification type" ) ; return null ; |
public class CommerceTaxFixedRateAddressRelUtil { /** * Returns the first commerce tax fixed rate address rel in the ordered set where CPTaxCategoryId = & # 63 ; .
* @ param CPTaxCategoryId the cp tax category ID
* @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > )
* @ return the first matching commerce tax fixed rate address rel
* @ throws NoSuchTaxFixedRateAddressRelException if a matching commerce tax fixed rate address rel could not be found */
public static CommerceTaxFixedRateAddressRel findByCPTaxCategoryId_First ( long CPTaxCategoryId , OrderByComparator < CommerceTaxFixedRateAddressRel > orderByComparator ) throws com . liferay . commerce . tax . engine . fixed . exception . NoSuchTaxFixedRateAddressRelException { } } | return getPersistence ( ) . findByCPTaxCategoryId_First ( CPTaxCategoryId , orderByComparator ) ; |
public class AbstractDynamicSkinService { /** * Return true if the skin file already exists . Check memory first in a concurrent manner to
* allow multiple threads to check simultaneously . */
@ Override public boolean skinCssFileExists ( DynamicSkinInstanceData data ) { } } | final String cssInstanceKey = getCssInstanceKey ( data ) ; // Check the existing map first since it is faster than accessing the actual file .
if ( instanceKeysForExistingCss . contains ( cssInstanceKey ) ) { return true ; } boolean exists = innerSkinCssFileExists ( data ) ; if ( exists ) { if ( ! supportsRetainmentOfNonCurrentCss ( ) ) { instanceKeysForExistingCss . clear ( ) ; } instanceKeysForExistingCss . add ( cssInstanceKey ) ; } return exists ; |
public class SSLUtils { /** * Flip the input list of buffers , walking through the list until the flipped
* amount equals the input total size , mark the rest of the buffers as empty .
* @ param buffers
* @ param totalSize */
public static void flipBuffers ( WsByteBuffer [ ] buffers , int totalSize ) { } } | int size = 0 ; boolean overLimit = false ; for ( int i = 0 ; i < buffers . length && null != buffers [ i ] ; i ++ ) { if ( overLimit ) { buffers [ i ] . limit ( buffers [ i ] . position ( ) ) ; } else { buffers [ i ] . flip ( ) ; size += buffers [ i ] . remaining ( ) ; overLimit = ( size >= totalSize ) ; } } |
public class Linter { /** * Keep applying fixes to the given file until no more fixes can be found , or until fixes have
* been applied { @ code MAX _ FIXES } times . */
void fixRepeatedly ( String filename , ImmutableSet < DiagnosticType > unfixableErrors ) throws IOException { } } | for ( int i = 0 ; i < MAX_FIXES ; i ++ ) { if ( ! fix ( filename , unfixableErrors ) ) { break ; } } |
public class VoiceApi { /** * Delete the specified DN from the conference call . This operation can only be performed by the owner of the conference call .
* @ param connId The connection ID of the conference .
* @ param dnToDrop The DN of the party to drop from the conference . */
public void deleteFromConference ( String connId , String dnToDrop ) throws WorkspaceApiException { } } | this . deleteFromConference ( connId , dnToDrop , null , null ) ; |
public class Change { /** * Undos a change .
* Does this by setting the corresponding value of the { @ link Setting } to the old value of this
* change . */
public void undo ( ) { } } | if ( isListChange ( ) ) { LOGGER . trace ( "Undoing list change: " + oldList . get ( ) . toString ( ) ) ; setting . valueProperty ( ) . setValue ( oldList . get ( ) ) ; } else { setting . valueProperty ( ) . setValue ( oldValue . get ( ) ) ; } |
public class RunResultsGenerateHookSetter { /** * This may return false since multiple statement can be found in a line . */
private boolean isLineLastStament ( TestMethod method , int codeLineIndex ) { } } | CodeLine codeLine = method . getCodeBody ( ) . get ( codeLineIndex ) ; if ( codeLineIndex == method . getCodeBody ( ) . size ( ) - 1 ) { return true ; } CodeLine nextCodeLine = method . getCodeBody ( ) . get ( codeLineIndex + 1 ) ; assert codeLine . getEndLine ( ) <= nextCodeLine . getStartLine ( ) ; if ( codeLine . getEndLine ( ) == nextCodeLine . getStartLine ( ) ) { // if next statement exists in the same line ,
// this statement is not the last statement for the line
return false ; } return true ; |
public class XMLUtils { /** * Returns the path expression for a given node .
* Path expressions look like : Foo . Bar . Poo where elements are
* separated with a dot character .
* @ param node in DOM tree .
* @ return the path expression representing the node in DOM tree . */
public static String getNodesPathName ( Node node ) { } } | final StringBuffer buffer = new StringBuffer ( ) ; if ( node . getNodeType ( ) == Node . ATTRIBUTE_NODE ) { buildNodeName ( ( ( Attr ) node ) . getOwnerElement ( ) , buffer ) ; buffer . append ( "." ) ; buffer . append ( node . getLocalName ( ) ) ; } else { buildNodeName ( node , buffer ) ; } return buffer . toString ( ) ; |
public class Resources { /** * The the dictionary for the { @ code MorfologikMorphoTagger } .
* @ param lang
* the language
* @ param resourcesDirectory
* the directory where the dictionary can be found .
* If { @ code null } , load from package resources .
* @ return the URL of the dictionary */
public final URL getBinaryTaggerDict ( final String lang , final String resourcesDirectory ) { } } | return resourcesDirectory == null ? getBinaryTaggerDictFromResources ( lang ) : getBinaryTaggerDictFromDirectory ( lang , resourcesDirectory ) ; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.