signature
stringlengths
43
39.1k
implementation
stringlengths
0
450k
public class OpenIntObjectHashMap { /** * Returns the value associated with the specified key . * It is often a good idea to first check with { @ link # containsKey ( int ) } whether the given key has a value associated or not , i . e . whether there exists an association for the given key or not . * @ param key the key to be searched for . * @ return the value associated with the specified key ; < tt > null < / tt > if no such key is present . */ public Object get ( int key ) { } }
int i = indexOfKey ( key ) ; if ( i < 0 ) return null ; // not contained return values [ i ] ;
public class Utils { /** * Substitute the variables in the given expression with the * values from the resolver * @ param pResolver * @ param pExpression */ public static String replaceVariables ( final VariableResolver pResolver , final String pExpression , final String pOpen , final String pClose ) { } }
final char [ ] open = pOpen . toCharArray ( ) ; final char [ ] close = pClose . toCharArray ( ) ; final StringBuilder out = new StringBuilder ( ) ; StringBuilder sb = new StringBuilder ( ) ; char [ ] last = null ; int wo = 0 ; int wc = 0 ; int level = 0 ; for ( char c : pExpression . toCharArray ( ) ) { if ( c == open [ wo ] ) { if ( wc > 0 ) { sb . append ( close , 0 , wc ) ; } wc = 0 ; wo ++ ; if ( open . length == wo ) { // found open if ( last == open ) { out . append ( open ) ; } level ++ ; out . append ( sb ) ; sb = new StringBuilder ( ) ; wo = 0 ; last = open ; } } else if ( c == close [ wc ] ) { if ( wo > 0 ) { sb . append ( open , 0 , wo ) ; } wo = 0 ; wc ++ ; if ( close . length == wc ) { // found close if ( last == open ) { final String variable = pResolver . get ( sb . toString ( ) ) ; if ( variable != null ) { out . append ( variable ) ; } else { out . append ( open ) ; out . append ( sb ) ; out . append ( close ) ; } } else { out . append ( sb ) ; out . append ( close ) ; } sb = new StringBuilder ( ) ; level -- ; wc = 0 ; last = close ; } } else { if ( wo > 0 ) { sb . append ( open , 0 , wo ) ; } if ( wc > 0 ) { sb . append ( close , 0 , wc ) ; } sb . append ( c ) ; wo = wc = 0 ; } } if ( wo > 0 ) { sb . append ( open , 0 , wo ) ; } if ( wc > 0 ) { sb . append ( close , 0 , wc ) ; } if ( level > 0 ) { out . append ( open ) ; } out . append ( sb ) ; return out . toString ( ) ;
public class HessianInput { /** * Reads a byte array * < pre > * B b16 b8 data value * < / pre > */ public byte [ ] readBytes ( ) throws IOException { } }
int tag = read ( ) ; switch ( tag ) { case 'N' : return null ; case 'B' : case 'b' : _isLastChunk = tag == 'B' ; _chunkLength = ( read ( ) << 8 ) + read ( ) ; ByteArrayOutputStream bos = new ByteArrayOutputStream ( ) ; int data ; while ( ( data = parseByte ( ) ) >= 0 ) bos . write ( data ) ; return bos . toByteArray ( ) ; default : throw expect ( "bytes" , tag ) ; }
public class Expression { /** * Cast result object to a string . * @ param xctxt The XPath runtime context . * @ return The string this wraps or the empty string if null * @ throws javax . xml . transform . TransformerException */ public XMLString xstr ( XPathContext xctxt ) throws javax . xml . transform . TransformerException { } }
return execute ( xctxt ) . xstr ( ) ;
public class AmazonRedshiftClient { /** * Returns one or more cluster subnet group objects , which contain metadata about your cluster subnet groups . By * default , this operation returns information about all cluster subnet groups that are defined in you AWS account . * If you specify both tag keys and tag values in the same request , Amazon Redshift returns all subnet groups that * match any combination of the specified keys and values . For example , if you have < code > owner < / code > and * < code > environment < / code > for tag keys , and < code > admin < / code > and < code > test < / code > for tag values , all subnet * groups that have any combination of those values are returned . * If both tag keys and values are omitted from the request , subnet groups are returned regardless of whether they * have tag keys or values associated with them . * @ param describeClusterSubnetGroupsRequest * @ return Result of the DescribeClusterSubnetGroups operation returned by the service . * @ throws ClusterSubnetGroupNotFoundException * The cluster subnet group name does not refer to an existing cluster subnet group . * @ throws InvalidTagException * The tag is invalid . * @ sample AmazonRedshift . DescribeClusterSubnetGroups * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / redshift - 2012-12-01 / DescribeClusterSubnetGroups " * target = " _ top " > AWS API Documentation < / a > */ @ Override public DescribeClusterSubnetGroupsResult describeClusterSubnetGroups ( DescribeClusterSubnetGroupsRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeDescribeClusterSubnetGroups ( request ) ;
public class Templates { /** * 获取模板 , 容易被微信的文档误导 , 这里其实是获取模板到服务号的管理端 , 并不单纯是获取ID那么简单 * 对于相同的code , 每调用一次微信后台会生成一条新的记录 * @ param code 模板库中模板的编号 , 有 “ TM * * ” 和 “ OPENTMTM * * ” 等形式 * @ return */ public String fetch ( String code ) { } }
String url = WxEndpoint . get ( "url.template.get" ) ; String json = String . format ( "{\"template_id_short\":\"%s\"}" , code ) ; logger . debug ( "template message, get template id: {}" , json ) ; String response = wxClient . post ( url , json ) ; Map < String , Object > result = JsonMapper . defaultMapper ( ) . json2Map ( response ) ; if ( result . containsKey ( "template_id" ) ) { return result . get ( "template_id" ) . toString ( ) ; } else { throw new WxRuntimeException ( 999 , "fetch template id failed." ) ; }
public class MarkLogicRepositoryConnection { /** * overload for prepareBooleanQuery * @ param queryString * @ return MarkLogicBooleanQuery * @ throws RepositoryException * @ throws MalformedQueryException */ @ Override public MarkLogicBooleanQuery prepareBooleanQuery ( String queryString ) throws RepositoryException , MalformedQueryException { } }
return prepareBooleanQuery ( QueryLanguage . SPARQL , queryString , null ) ;
public class CLIQUE { /** * Updates the minima and maxima array according to the specified feature * vector . * @ param featureVector the feature vector * @ param minima the array of minima * @ param maxima the array of maxima */ private void updateMinMax ( NumberVector featureVector , double [ ] minima , double [ ] maxima ) { } }
assert ( minima . length == featureVector . getDimensionality ( ) ) ; for ( int d = 0 ; d < featureVector . getDimensionality ( ) ; d ++ ) { double v = featureVector . doubleValue ( d ) ; if ( v == v ) { // Avoid NaN . maxima [ d ] = MathUtil . max ( v , maxima [ d ] ) ; minima [ d ] = MathUtil . min ( v , minima [ d ] ) ; } }
public class AsynchronousRequest { /** * For more info on event detail API go < a href = " https : / / wiki . guildwars2 . com / wiki / API : 1 / event _ details " > here < / a > < br / > * @ param callback callback that is going to be used for { @ link Call # enqueue ( Callback ) } * @ throws NullPointerException if given { @ link Callback } is null * @ see EventDetail event detail */ public void getAllEventDetailedInfo ( Callback < EventDetail > callback ) throws NullPointerException { } }
gw2API . getAllEventDetailedInfo ( GuildWars2 . lang . getValue ( ) ) . enqueue ( callback ) ;
public class AmazonPinpointEmailClient { /** * Create a new pool of dedicated IP addresses . A pool can include one or more dedicated IP addresses that are * associated with your Amazon Pinpoint account . You can associate a pool with a configuration set . When you send an * email that uses that configuration set , Amazon Pinpoint sends it using only the IP addresses in the associated * pool . * @ param createDedicatedIpPoolRequest * A request to create a new dedicated IP pool . * @ return Result of the CreateDedicatedIpPool operation returned by the service . * @ throws AlreadyExistsException * The resource specified in your request already exists . * @ throws LimitExceededException * There are too many instances of the specified resource type . * @ throws TooManyRequestsException * Too many requests have been made to the operation . * @ throws BadRequestException * The input you provided is invalid . * @ throws ConcurrentModificationException * The resource is being modified by another operation or thread . * @ sample AmazonPinpointEmail . CreateDedicatedIpPool * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / pinpoint - email - 2018-07-26 / CreateDedicatedIpPool " * target = " _ top " > AWS API Documentation < / a > */ @ Override public CreateDedicatedIpPoolResult createDedicatedIpPool ( CreateDedicatedIpPoolRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeCreateDedicatedIpPool ( request ) ;
public class CmsGalleryService { /** * Generates the pre - loaded contents for the VFS tab of the gallery dialog . < p > * @ param cms the current CMS context * @ param vfsState the saved VFS tree state ( may be null ) * @ param folders the saved search folders ( may be null ) * @ return the root tree entry for the VFS tab */ public static CmsVfsEntryBean generateVfsPreloadData ( final CmsObject cms , final CmsTreeOpenState vfsState , final Set < String > folders ) { } }
CmsVfsEntryBean vfsPreloadData = null ; A_CmsTreeTabDataPreloader < CmsVfsEntryBean > vfsloader = new A_CmsTreeTabDataPreloader < CmsVfsEntryBean > ( ) { @ SuppressWarnings ( "synthetic-access" ) @ Override protected CmsVfsEntryBean createEntry ( CmsObject innerCms , CmsResource resource ) throws CmsException { String title = innerCms . readPropertyObject ( resource , CmsPropertyDefinition . PROPERTY_TITLE , false ) . getValue ( ) ; boolean isEditable = false ; try { isEditable = innerCms . hasPermissions ( resource , CmsPermissionSet . ACCESS_WRITE , false , CmsResourceFilter . ALL ) ; } catch ( CmsException e ) { LOG . info ( e . getLocalizedMessage ( ) , e ) ; } return internalCreateVfsEntryBean ( innerCms , resource , title , true , isEditable , null , false ) ; } } ; Set < CmsResource > treeOpenResources = Sets . newHashSet ( ) ; if ( vfsState != null ) { for ( CmsUUID structureId : vfsState . getOpenItems ( ) ) { try { treeOpenResources . add ( cms . readResource ( structureId , CmsResourceFilter . ONLY_VISIBLE_NO_DELETED ) ) ; } catch ( CmsException e ) { LOG . warn ( e . getLocalizedMessage ( ) , e ) ; } } } CmsObject rootCms = null ; Set < CmsResource > folderSetResources = Sets . newHashSet ( ) ; try { rootCms = OpenCms . initCmsObject ( cms ) ; rootCms . getRequestContext ( ) . setSiteRoot ( "" ) ; if ( ! ( ( folders == null ) || folders . isEmpty ( ) ) ) { for ( String folder : folders ) { try { folderSetResources . add ( rootCms . readResource ( folder , CmsResourceFilter . ONLY_VISIBLE_NO_DELETED ) ) ; } catch ( CmsException e ) { LOG . warn ( e . getLocalizedMessage ( ) , e ) ; } } } } catch ( CmsException e1 ) { LOG . error ( e1 . getLocalizedMessage ( ) , e1 ) ; } try { vfsPreloadData = vfsloader . preloadData ( cms , treeOpenResources , folderSetResources ) ; } catch ( CmsException e ) { LOG . error ( e . getLocalizedMessage ( ) , e ) ; } return vfsPreloadData ;
public class LinguisticServices { /** * Returns true if the spell check is positive */ @ Override public boolean isCorrectSpell ( String word , Language lang ) { } }
return isCorrectSpell ( word , getLocale ( lang ) ) ;
public class QueryParsers { /** * Add one or more languages to this engine by supplying the corresponding parsers . * @ param firstLanguage the query parser for the first language * @ param additionalLanguages the query parsers for the additional languages * @ throws IllegalArgumentException if the language parser is null */ public void addLanguages ( QueryParser firstLanguage , QueryParser ... additionalLanguages ) { } }
addLanguage ( firstLanguage ) ; for ( QueryParser language : additionalLanguages ) { addLanguage ( language ) ; }
public class LargeBlockManager { /** * Store the given block with the given ID to disk . * @ param blockId the ID of the block * @ param block the bytes for the block * @ throws IOException */ void storeBlock ( BlockId blockId , ByteBuffer block ) throws IOException { } }
synchronized ( m_accessLock ) { if ( m_blockPathMap . containsKey ( blockId ) ) { throw new IllegalArgumentException ( "Request to store block that is already stored: " + blockId . toString ( ) ) ; } int origPosition = block . position ( ) ; block . position ( 0 ) ; Path blockPath = makeBlockPath ( blockId ) ; try ( SeekableByteChannel channel = Files . newByteChannel ( blockPath , OPEN_OPTIONS , PERMISSIONS ) ) { channel . write ( block ) ; } finally { block . position ( origPosition ) ; } m_blockPathMap . put ( blockId , blockPath ) ; }
public class FileStorage { /** * Handle input by writing it to the file , if a channel exists . * @ param event the event * @ param channel the channel */ @ Handler public void onInput ( Input < ByteBuffer > event , Channel channel ) { } }
Writer writer = inputWriters . get ( channel ) ; if ( writer != null ) { writer . write ( event . buffer ( ) ) ; }
public class TrellisWebDAV { /** * Update properties on a resource . * @ param response the async response * @ param request the request * @ param uriInfo the URI info * @ param headers the headers * @ param security the security context * @ param propertyUpdate the property update request * @ throws ParserConfigurationException if the XML parser is not properly configured */ @ PROPPATCH @ Consumes ( { } }
APPLICATION_XML } ) @ Produces ( { APPLICATION_XML } ) @ Timed public void updateProperties ( @ Suspended final AsyncResponse response , @ Context final Request request , @ Context final UriInfo uriInfo , @ Context final HttpHeaders headers , @ Context final SecurityContext security , final DavPropertyUpdate propertyUpdate ) throws ParserConfigurationException { final Document doc = getDocument ( ) ; final TrellisRequest req = new TrellisRequest ( request , uriInfo , headers , security ) ; final IRI identifier = rdf . createIRI ( TRELLIS_DATA_PREFIX + req . getPath ( ) ) ; final String baseUrl = getBaseUrl ( req ) ; final String location = fromUri ( baseUrl ) . path ( req . getPath ( ) ) . build ( ) . toString ( ) ; final Session session = getSession ( req . getPrincipalName ( ) ) ; services . getResourceService ( ) . get ( identifier ) . thenApply ( this :: checkResource ) . thenCompose ( resourceToMultiStatus ( doc , identifier , location , baseUrl , session , propertyUpdate ) ) . thenApply ( multistatus -> status ( MULTI_STATUS ) . entity ( multistatus ) . build ( ) ) . exceptionally ( this :: handleException ) . thenApply ( response :: resume ) ;
public class TimelineCallback { /** * On simon creation a timeline attribute is added ( for Stopwatches only ) . * @ param simon created simon */ @ Override public void onSimonCreated ( Simon simon ) { } }
if ( simon instanceof Stopwatch ) { simon . setAttribute ( timelineAttributeName , new StopwatchTimeline ( timelineCapacity , timeRangeWidth ) ) ; }
public class ServerStatusTool { /** * Show a human - readable form of the latest message in the server status * file . If the status file doesn ' t yet exist , this will print a special * status message indicating the server is new . The response will have the * following form . * < pre > * STATE : Some State * AS OF : 2006-03-29 06:44:23AM EST * DETAIL : Detail line 1 , if it exists * Detail line 2 , if it exists * Detail line 3 , etc . . * < / pre > */ public void showStatus ( ) throws Exception { } }
ServerStatusMessage message ; if ( _statusFile . exists ( ) ) { ServerStatusMessage [ ] messages = getAllMessages ( ) ; message = messages [ messages . length - 1 ] ; } else { message = ServerStatusMessage . NEW_SERVER_MESSAGE ; } System . out . println ( message . toString ( ) ) ;
public class OsmConnection { /** * Make a request to the Http Osm Api * @ param call HTTP path and URL parameters ( if any ) * @ param method HTTP method . Defaults to " GET " * @ param authenticate whether to make this request as a logged in user * @ param writer the writer to send the request payload to the server . If null , no payload is * sent * @ param reader the reader to parse the server ' s response and return an instance of T . * If null , the server ' s response is ignored * @ return an instance of T */ public < T > T makeRequest ( String call , String method , boolean authenticate , ApiRequestWriter writer , ApiResponseReader < T > reader ) { } }
HttpURLConnection connection = null ; try { connection = sendRequest ( call , method , authenticate , writer ) ; handleResponseCode ( connection ) ; if ( reader != null ) return handleResponse ( connection , reader ) ; else return null ; } catch ( IOException e ) { throw new OsmConnectionException ( e ) ; } catch ( OAuthException e ) { // because it was the user ' s fault that he did not supply an oauth consumer and the // error is kinda related with the call he made throw new OsmAuthorizationException ( e ) ; } finally { if ( connection != null ) connection . disconnect ( ) ; }
public class OMVRBTreeMapEntryProvider { /** * Serialize only the new values or the changed . */ protected byte [ ] serializeStreamValue ( final int iIndex ) throws IOException { } }
if ( serializedValues [ iIndex ] <= 0 ) { // NEW OR MODIFIED : MARSHALL CONTENT OProfiler . getInstance ( ) . updateCounter ( "OMVRBTreeMapEntry.serializeValue" , 1 ) ; return ( ( OMVRBTreeMapProvider < K , V > ) treeDataProvider ) . valueSerializer . toStream ( values [ iIndex ] ) ; } // RETURN ORIGINAL CONTENT return stream . getAsByteArray ( serializedValues [ iIndex ] ) ;
public class NotifierUtils { /** * return all the ancestors of the given path , include itself . * @ param eventPath * @ return */ public static List < String > getAllAncestors ( String eventPath ) { } }
// check if the path is valid . if ( eventPath == null || ! eventPath . startsWith ( Path . SEPARATOR ) ) { return null ; } if ( eventPath . equals ( Path . SEPARATOR ) ) { return Arrays . asList ( Path . SEPARATOR ) ; } List < String > ancestors = new ArrayList < String > ( ) ; while ( eventPath . length ( ) > 0 ) { ancestors . add ( eventPath ) ; eventPath = eventPath . substring ( 0 , eventPath . lastIndexOf ( Path . SEPARATOR ) ) ; } // add the root directory ancestors . add ( Path . SEPARATOR ) ; return ancestors ;
public class StreamingJsonBuilder { /** * Delegates to { @ link # call ( Iterable , Closure ) } */ public Object call ( Collection coll , @ DelegatesTo ( StreamingJsonDelegate . class ) Closure c ) throws IOException { } }
return call ( ( Iterable ) coll , c ) ;
public class TimedMessage { /** * Add a timed message . * @ param message The message string . * @ param x The horizontal location . * @ param y The vertical location . * @ param time The remaining time . */ public void addMessage ( String message , int x , int y , long time ) { } }
messages . add ( new MessageData ( message , x , y , time ) ) ; hasMessage = true ;
public class DateUtil { /** * 偏移周 * @ param date 日期 * @ param offset 偏移周数 , 正数向未来偏移 , 负数向历史偏移 * @ return 偏移后的日期 */ public static DateTime offsetWeek ( Date date , int offset ) { } }
return offset ( date , DateField . WEEK_OF_YEAR , offset ) ;
public class ReferNotifySender { /** * Same as the other processRefer ( ) except allows for a couple of overrides for testing error * handling by the far end outbound REFER side : ( a ) takes a duration for adding ExpiresHeader to * the REFER response ( the response shouldn ' t have an expires header ) , and ( b ) this method takes * an EventHeader for overriding what would normally / correctly be sent back in the response * ( normally same as what was received in the request ) . */ public void processRefer ( long timeout , int statusCode , String reasonPhrase , int duration , EventHeader overrideEvent ) { } }
setErrorMessage ( "" ) ; PhoneB b = new PhoneB ( timeout + 500 , statusCode , reasonPhrase , duration , overrideEvent ) ; b . start ( ) ;
public class Convert { /** * 转换为Date < br > * 如果给定的值为空 , 或者转换失败 , 返回默认值 < br > * 转换失败不会报错 * @ param value 被转换的值 * @ param defaultValue 转换错误时的默认值 * @ return 结果 * @ since 4.1.6 */ public static Date toDate ( Object value , Date defaultValue ) { } }
return convert ( Date . class , value , defaultValue ) ;
public class ChocoMapper { /** * Register a mapping between an api - side view and its choco implementation . * It is expected from the implementation to exhibit a constructor that takes the api - side constraint as argument . * @ param c the api - side view * @ param cc the choco implementation * @ throws IllegalArgumentException if there is no suitable constructor for the choco implementation */ public void mapView ( Class < ? extends ModelView > c , Class < ? extends ChocoView > cc ) { } }
views . put ( c , cc ) ;
public class CliUtils { /** * Returns IndexOperator from string representation * @ param operator - string representing IndexOperator ( = , > = , > , < , < = ) * @ return IndexOperator - enum value of IndexOperator or null if not found */ public static IndexOperator getIndexOperator ( String operator ) { } }
if ( operator . equals ( "=" ) ) { return IndexOperator . EQ ; } else if ( operator . equals ( ">=" ) ) { return IndexOperator . GTE ; } else if ( operator . equals ( ">" ) ) { return IndexOperator . GT ; } else if ( operator . equals ( "<" ) ) { return IndexOperator . LT ; } else if ( operator . equals ( "<=" ) ) { return IndexOperator . LTE ; } return null ;
public class ConsumerDispatcherState { /** * Sets the noLocal flag . * @ param noLocal The noLocal to set */ public void setNoLocal ( boolean noLocal ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( this , tc , "setNoLocal" , Boolean . valueOf ( noLocal ) ) ; this . noLocal = noLocal ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( this , tc , "setNoLocal" ) ;
public class RenderAPI { /** * TODO 数据模型为对象而不仅仅是Map */ @ Deprecated public static void debug ( XWPFTemplate template , Map < String , Object > datas ) { } }
List < ElementTemplate > all = template . getElementTemplates ( ) ; LOGGER . debug ( "Template tag number is:{}" , ( null == all ? 0 : all . size ( ) ) ) ; if ( ( all == null || all . isEmpty ( ) ) && ( null == datas || datas . isEmpty ( ) ) ) { LOGGER . debug ( "No template gramer find and no render data find" ) ; return ; } Set < String > tagtKeys = new HashSet < String > ( ) ; for ( ElementTemplate ele : all ) { LOGGER . debug ( "Parse the tag:{}" , ele . getTagName ( ) ) ; tagtKeys . add ( ele . getTagName ( ) ) ; } Set < String > keySet = datas . keySet ( ) ; HashSet < String > copySet = new HashSet < String > ( keySet ) ; copySet . removeAll ( tagtKeys ) ; Iterator < String > iterator = copySet . iterator ( ) ; while ( iterator . hasNext ( ) ) { String key = iterator . next ( ) ; LOGGER . warn ( "Cannot find the gramer tag in template:" + key ) ; } tagtKeys . removeAll ( keySet ) ; iterator = tagtKeys . iterator ( ) ; while ( iterator . hasNext ( ) ) { String key = iterator . next ( ) ; LOGGER . warn ( "Cannot find the feild in java Map or Object:" + key ) ; }
public class ObservableReplay { /** * Creates a replaying ConnectableObservable with an unbounded buffer . * @ param < T > the value type * @ param source the source observable * @ return the new ConnectableObservable instance */ @ SuppressWarnings ( "unchecked" ) public static < T > ConnectableObservable < T > createFrom ( ObservableSource < ? extends T > source ) { } }
ObservableSource < T > sourceCast = ( ObservableSource < T > ) source ; return create ( sourceCast , DEFAULT_UNBOUNDED_FACTORY ) ;
public class scparameter { /** * Use this API to unset the properties of scparameter resource . * Properties that need to be unset are specified in args array . */ public static base_response unset ( nitro_service client , scparameter resource , String [ ] args ) throws Exception { } }
scparameter unsetresource = new scparameter ( ) ; return unsetresource . unset_resource ( client , args ) ;
public class SuperHero { /** * Creates a new instance of { @ link SuperHero } . */ public static SuperHero create ( String name , List < String > powers , Optional < Civilian > alterEgo ) { } }
return new AutoValue_SuperHero ( name , powers , alterEgo ) ;
public class Ifc2x3tc1FactoryImpl { /** * < ! - - begin - user - doc - - > * < ! - - end - user - doc - - > * @ generated */ public String convertIfcProcedureTypeEnumToString ( EDataType eDataType , Object instanceValue ) { } }
return instanceValue == null ? null : instanceValue . toString ( ) ;
public class KeystoreManager { /** * Loads a keystore . * @ param server the server the keystore is intended for * @ param keystore a keystore containing your private key and the certificate signed by Apple ( File , InputStream , byte [ ] , KeyStore or String for a file path ) * @ return a loaded keystore * @ throws KeystoreException */ static KeyStore loadKeystore ( AppleServer server , Object keystore ) throws KeystoreException { } }
return loadKeystore ( server , keystore , false ) ;
public class ArrayIntSet { /** * Creates an array of shorts from the contents of this set . Any values outside the range of a * short will be truncated by way of a cast . */ public short [ ] toShortArray ( ) { } }
short [ ] values = new short [ _size ] ; for ( int ii = 0 ; ii < _size ; ii ++ ) { values [ ii ] = ( short ) _values [ ii ] ; } return values ;
public class BatchUtil { /** * Given an ( ordered ) { @ link Collection } of non - < code > null < / code > , iterate * over the collection and return a list of ranges of the given batchSize . The * last range may be smaller and contains the remainder if the collection is * not equally divisible in batchSize ranges . * @ param < E > * The class of Elements in the collection out of which to create * ranges . * @ param collection * of non - < code > null < / code > elements to chop up in ranges . * @ param batchSize * the size to chop the collection into . Must be larger than * < code > 1 < / code > and can be larger than the collection . * @ return a non - null list of ranges . * For example , < code > getRanges ( [ 1,2,3 ] , 1 ) < / code > is * < code > [ 1-1,2-2,3-3 ] < / code > and * < code > getRanges ( [ 1,2,3,4,5,6,7,8,9,10 ] , 3 ) < / code > is * < code > [ 1-3,4-6,7-9,10-10 ] < / code > and * < code > getRanges ( [ 1,2,3,4,5,6,7,8,9,10 ] , 17 ) < / code > is * < code > [ 1-10 ] < / code > */ public static < E extends Comparable < E > > List < Range < E > > getRanges ( Collection < E > collection , int batchSize ) { } }
List < Range < E > > rangeList = new LinkedList < Range < E > > ( ) ; E currentMin = null ; // Check for edge cases if ( ( collection != null ) && ( collection . size ( ) > 0 ) && ( batchSize > 0 ) ) { int index = 1 ; for ( E element : collection ) { // Bootstrap first element in the next range if ( currentMin == null ) { currentMin = element ; } int mod = index % batchSize ; // On each batchSize items ( and the last one ) create a range if ( ( mod == 0 ) || ( index == collection . size ( ) ) ) { Range < E > range = new Range < E > ( currentMin , element ) ; rangeList . add ( range ) ; currentMin = null ; } index ++ ; } } return rangeList ;
public class TranslateAndSynthesizeExample { /** * Write the input stream to a file . */ private static void writeToFile ( InputStream in , File file ) { } }
try { OutputStream out = new FileOutputStream ( file ) ; byte [ ] buf = new byte [ 1024 ] ; int len ; while ( ( len = in . read ( buf ) ) > 0 ) { out . write ( buf , 0 , len ) ; } out . close ( ) ; in . close ( ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; }
public class PersistentNode { /** * You must call start ( ) to initiate the persistent node . An attempt to create the node * in the background will be started */ public void start ( ) { } }
Preconditions . checkState ( state . compareAndSet ( State . LATENT , State . STARTED ) , "Already started" ) ; client . getConnectionStateListenable ( ) . addListener ( connectionStateListener ) ; createNode ( ) ;
public class HtmlTree { /** * Adds an attribute for the HTML tag . * @ param attrName name of the attribute * @ param attrValue value of the attribute */ public void addAttr ( HtmlAttr attrName , String attrValue ) { } }
if ( attrs . isEmpty ( ) ) attrs = new LinkedHashMap < HtmlAttr , String > ( 3 ) ; attrs . put ( nullCheck ( attrName ) , escapeHtmlChars ( attrValue ) ) ;
public class DocumentStore { /** * Initializes this instance . */ @ Override public IDocumentStore initialize ( ) { } }
if ( initialized ) { return this ; } assertValidConfiguration ( ) ; try { if ( getConventions ( ) . getDocumentIdGenerator ( ) == null ) { // don ' t overwrite what the user is doing MultiDatabaseHiLoIdGenerator generator = new MultiDatabaseHiLoIdGenerator ( this , getConventions ( ) ) ; _multiDbHiLo = generator ; getConventions ( ) . setDocumentIdGenerator ( generator :: generateDocumentId ) ; } getConventions ( ) . freeze ( ) ; initialized = true ; } catch ( Exception e ) { close ( ) ; throw ExceptionsUtils . unwrapException ( e ) ; } return this ;
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link String } { @ code > } } */ @ XmlElementDecl ( namespace = "" , name = "skipped" ) public JAXBElement < String > createSkipped ( String value ) { } }
return new JAXBElement < String > ( _Skipped_QNAME , String . class , null , value ) ;
public class TrainingsImpl { /** * Update a specific project . * @ param projectId The id of the project to update * @ param updatedProject The updated project model * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent * @ return the Project object if successful . */ public Project updateProject ( UUID projectId , Project updatedProject ) { } }
return updateProjectWithServiceResponseAsync ( projectId , updatedProject ) . toBlocking ( ) . single ( ) . body ( ) ;
public class ValidationResult { /** * Create an OperationOutcome resource which contains all of the messages found as a result of this validation */ public IBaseOperationOutcome toOperationOutcome ( ) { } }
IBaseOperationOutcome oo = ( IBaseOperationOutcome ) myCtx . getResourceDefinition ( "OperationOutcome" ) . newInstance ( ) ; populateOperationOutcome ( oo ) ; return oo ;
public class CmsWorkplace { /** * Returns the html for an invisible spacer between button bar contents like buttons , labels , etc . < p > * @ param width the width of the invisible spacer * @ return the html for the invisible spacer */ public String buttonBarSpacer ( int width ) { } }
StringBuffer result = new StringBuffer ( 128 ) ; result . append ( "<td><span class=\"norm\"><span unselectable=\"on\" class=\"txtbutton\" style=\"width: " ) ; result . append ( width ) ; result . append ( "px;\"></span></span></td>\n" ) ; return result . toString ( ) ;
public class CommerceNotificationQueueEntryPersistenceImpl { /** * Returns the first commerce notification queue entry in the ordered set where sentDate & lt ; & # 63 ; . * @ param sentDate the sent date * @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > ) * @ return the first matching commerce notification queue entry , or < code > null < / code > if a matching commerce notification queue entry could not be found */ @ Override public CommerceNotificationQueueEntry fetchByLtS_First ( Date sentDate , OrderByComparator < CommerceNotificationQueueEntry > orderByComparator ) { } }
List < CommerceNotificationQueueEntry > list = findByLtS ( sentDate , 0 , 1 , orderByComparator ) ; if ( ! list . isEmpty ( ) ) { return list . get ( 0 ) ; } return null ;
public class CombinedWriter { /** * { @ inheritDoc } */ @ Override public void writeUberBucket ( final UberBucket pBucket ) throws TTException { } }
mService . submit ( new Callable < Void > ( ) { @ Override public Void call ( ) throws Exception { mSecondWriter . writeUberBucket ( pBucket ) ; return null ; } } ) ; mFirstWriter . writeUberBucket ( pBucket ) ;
public class NetworkModule { /** * http : / / stackoverflow . com / questions / 11555366 / enable - disable - data - connection - in - android - programmatically * Try to enabled / disable mobile network state using reflection . * Returns true if succeeded * @ param enabled */ private boolean setMobileNetworkEnabled ( ConnectivityManager connectivityManager , boolean enabled ) { } }
try { final Class conmanClass = Class . forName ( connectivityManager . getClass ( ) . getName ( ) ) ; final Field iConnectivityManagerField = conmanClass . getDeclaredField ( "mService" ) ; iConnectivityManagerField . setAccessible ( true ) ; final Object iConnectivityManager = iConnectivityManagerField . get ( connectivityManager ) ; final Class iConnectivityManagerClass = Class . forName ( iConnectivityManager . getClass ( ) . getName ( ) ) ; final Method setMobileDataEnabledMethod = iConnectivityManagerClass . getDeclaredMethod ( "setMobileDataEnabled" , Boolean . TYPE ) ; setMobileDataEnabledMethod . setAccessible ( true ) ; setMobileDataEnabledMethod . invoke ( iConnectivityManager , enabled ) ; return true ; } catch ( ClassNotFoundException e ) { } catch ( InvocationTargetException e ) { } catch ( NoSuchMethodException e ) { } catch ( IllegalAccessException e ) { } catch ( NoSuchFieldException e ) { } return false ;
public class SimpleDOReader { /** * { @ inheritDoc } */ @ Override public Set < RelationshipTuple > getRelationships ( SubjectNode subject , PredicateNode predicate , ObjectNode object ) { } }
return m_obj . getRelationships ( subject , predicate , object ) ;
public class AbstractFrame { /** * Saves the location of this frame , but only , if window state is ' normal ' . * If window state is iconfied or maximized , the location is not saved ! * @ param point */ private void saveWindowLocation ( Point point ) { } }
if ( point != null ) { if ( getExtendedState ( ) == Frame . NORMAL ) { if ( logger . isDebugEnabled ( ) ) logger . debug ( "Saving preference " + PREF_WINDOW_POSITION + "=" + point . x + "," + point . y ) ; this . preferences . put ( prefnzPrefix + PREF_WINDOW_POSITION , point . x + "," + point . y ) ; } else { if ( logger . isDebugEnabled ( ) ) logger . debug ( "Preference " + PREF_WINDOW_POSITION + " not saved, cause window state is not 'normal'." ) ; } }
public class EntryStream { /** * Returns a new { @ code EntryStream } which is a concatenation of two * supplied key - value pairs and this stream . * This is a < a href = " package - summary . html # StreamOps " > quasi - intermediate * operation < / a > with < a href = " package - summary . html # TSO " > tail - stream * optimization < / a > . * @ param k1 the key of the first { @ code Entry } to prepend to this stream * @ param v1 the value of the first { @ code Entry } to prepend to this stream * @ param k2 the key of the second { @ code Entry } to prepend to this stream * @ param v2 the value of the second { @ code Entry } to prepend to this stream * @ return the new stream * @ since 0.2.3 */ public EntryStream < K , V > prepend ( K k1 , V v1 , K k2 , V v2 ) { } }
@ SuppressWarnings ( "unchecked" ) SimpleImmutableEntry < K , V > [ ] array = new SimpleImmutableEntry [ ] { new SimpleImmutableEntry < > ( k1 , v1 ) , new SimpleImmutableEntry < > ( k2 , v2 ) } ; return prependSpliterator ( null , Spliterators . spliterator ( array , Spliterator . ORDERED ) ) ;
public class XMLUtil { /** * Read an enumeration value . * @ param < T > is the type of the enumeration . * @ param document is the XML document to explore . * @ param type is the type of the enumeration . * @ param caseSensitive indicates of the { @ code path } ' s components are case sensitive . * @ param path is the list of and ended by the attribute ' s name . * @ return the value of the enumeration or < code > null < / code > if none . */ @ Pure public static < T extends Enum < T > > T getAttributeEnum ( Node document , Class < T > type , boolean caseSensitive , String ... path ) { } }
assert document != null : AssertMessages . notNullParameter ( 0 ) ; return getAttributeEnumWithDefault ( document , type , caseSensitive , null , path ) ;
public class FileSystem { /** * Initializes output directories on distributed file systems according to the given write mode . * WriteMode . CREATE & parallel output : * - A directory is created if the output path does not exist . * - An existing file or directory raises an exception . * WriteMode . CREATE & NONE parallel output : * - An existing file or directory raises an exception . * WriteMode . OVERWRITE & parallel output : * - A directory is created if the output path does not exist . * - An existing directory and its content is deleted and a new directory is created . * - An existing file is deleted and replaced by a new directory . * WriteMode . OVERWRITE & NONE parallel output : * - An existing file or directory is deleted and replaced by a new directory . * @ param outPath Output path that should be prepared . * @ param writeMode Write mode to consider . * @ param createDirectory True , to initialize a directory at the given path , false otherwise . * @ return True , if the path was successfully prepared , false otherwise . * @ throws IOException */ public boolean initOutPathDistFS ( Path outPath , WriteMode writeMode , boolean createDirectory ) throws IOException { } }
if ( ! this . isDistributedFS ( ) ) { return false ; } // check if path exists if ( this . exists ( outPath ) ) { // path exists , check write mode switch ( writeMode ) { case NO_OVERWRITE : // file or directory may not be overwritten throw new IOException ( "File or directory already exists. Existing files and directories are not overwritten in " + WriteMode . NO_OVERWRITE . name ( ) + " mode. Use " + WriteMode . OVERWRITE . name ( ) + " mode to overwrite existing files and directories." ) ; case OVERWRITE : // output path exists . We delete it and all contained files in case of a directory . try { this . delete ( outPath , true ) ; } catch ( IOException ioe ) { // Some other thread might already have deleted the path . // If - for some other reason - the path could not be deleted , // this will be handled later . } break ; default : throw new IllegalArgumentException ( "Invalid write mode: " + writeMode ) ; } } if ( createDirectory ) { // Output directory needs to be created try { if ( ! this . exists ( outPath ) ) { this . mkdirs ( outPath ) ; } } catch ( IOException ioe ) { // Some other thread might already have created the directory . // If - for some other reason - the directory could not be created // and the path does not exist , this will be handled later . } // double check that the output directory exists return this . exists ( outPath ) && this . getFileStatus ( outPath ) . isDir ( ) ; } else { // check that the output path does not exist and an output file can be created by the output format . return ! this . exists ( outPath ) ; }
public class Hidden { /** * Generate the hidden input tag . * @ throws JspException if a JSP exception has occurred */ public int doEndTag ( ) throws JspException { } }
Object val = evaluateDataSource ( ) ; ServletRequest req = pageContext . getRequest ( ) ; if ( _dataInput != null ) { val = _dataInput . toString ( ) ; } // if there were expression errors report them if ( hasErrors ( ) ) return reportAndExit ( SKIP_BODY ) ; if ( val != null ) { _value = val . toString ( ) ; } // Create an appropriate " input " element based on our parameters ByRef ref = new ByRef ( ) ; nameHtmlControl ( _state , ref ) ; if ( _value != null ) { InternalStringBuilder sb = new InternalStringBuilder ( _value . length ( ) + 16 ) ; StringBuilderRenderAppender sbAppend = new StringBuilderRenderAppender ( sb ) ; HtmlUtils . filter ( _value , sbAppend ) ; _state . value = sb . toString ( ) ; } // correct for null text here if ( _state . value == null ) _state . value = "" ; WriteRenderAppender writer = new WriteRenderAppender ( pageContext ) ; TagRenderingBase hiddenTag = TagRenderingBase . Factory . getRendering ( TagRenderingBase . INPUT_HIDDEN_TAG , req ) ; hiddenTag . doStartTag ( writer , _state ) ; hiddenTag . doEndTag ( writer ) ; if ( ! ref . isNull ( ) ) write ( ( String ) ref . getRef ( ) ) ; // Continue processing this page localRelease ( ) ; return SKIP_BODY ;
public class SelfAssignment { /** * If the given expression is a call to a method checking the nullity of its first parameter , and * otherwise returns that parameter . */ private static ExpressionTree stripNullCheck ( ExpressionTree expression , VisitorState state ) { } }
if ( expression != null && expression . getKind ( ) == METHOD_INVOCATION ) { MethodInvocationTree methodInvocation = ( MethodInvocationTree ) expression ; if ( NON_NULL_MATCHER . matches ( methodInvocation , state ) ) { return methodInvocation . getArguments ( ) . get ( 0 ) ; } } return expression ;
public class DirectConnectGatewayMarshaller { /** * Marshall the given parameter object . */ public void marshall ( DirectConnectGateway directConnectGateway , ProtocolMarshaller protocolMarshaller ) { } }
if ( directConnectGateway == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( directConnectGateway . getDirectConnectGatewayId ( ) , DIRECTCONNECTGATEWAYID_BINDING ) ; protocolMarshaller . marshall ( directConnectGateway . getDirectConnectGatewayName ( ) , DIRECTCONNECTGATEWAYNAME_BINDING ) ; protocolMarshaller . marshall ( directConnectGateway . getAmazonSideAsn ( ) , AMAZONSIDEASN_BINDING ) ; protocolMarshaller . marshall ( directConnectGateway . getOwnerAccount ( ) , OWNERACCOUNT_BINDING ) ; protocolMarshaller . marshall ( directConnectGateway . getDirectConnectGatewayState ( ) , DIRECTCONNECTGATEWAYSTATE_BINDING ) ; protocolMarshaller . marshall ( directConnectGateway . getStateChangeError ( ) , STATECHANGEERROR_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class AmazonCloudDirectoryClient { /** * Returns a paginated list of < code > TypedLink < / code > facet names for a particular schema . For more information , see * < a href = * " https : / / docs . aws . amazon . com / clouddirectory / latest / developerguide / directory _ objects _ links . html # directory _ objects _ links _ typedlink " * > Typed Links < / a > . * @ param listTypedLinkFacetNamesRequest * @ return Result of the ListTypedLinkFacetNames operation returned by the service . * @ throws InternalServiceException * Indicates a problem that must be resolved by Amazon Web Services . This might be a transient error in * which case you can retry your request until it succeeds . Otherwise , go to the < a * href = " http : / / status . aws . amazon . com / " > AWS Service Health Dashboard < / a > site to see if there are any * operational issues with the service . * @ throws InvalidArnException * Indicates that the provided ARN value is not valid . * @ throws RetryableConflictException * Occurs when a conflict with a previous successful write is detected . For example , if a write operation * occurs on an object and then an attempt is made to read the object using “ SERIALIZABLE ” consistency , this * exception may result . This generally occurs when the previous write did not have time to propagate to the * host serving the current request . A retry ( with appropriate backoff logic ) is the recommended response to * this exception . * @ throws ValidationException * Indicates that your request is malformed in some manner . See the exception message . * @ throws LimitExceededException * Indicates that limits are exceeded . See < a * href = " https : / / docs . aws . amazon . com / clouddirectory / latest / developerguide / limits . html " > Limits < / a > for more * information . * @ throws AccessDeniedException * Access denied . Check your permissions . * @ throws ResourceNotFoundException * The specified resource could not be found . * @ throws InvalidNextTokenException * Indicates that the < code > NextToken < / code > value is not valid . * @ sample AmazonCloudDirectory . ListTypedLinkFacetNames * @ see < a href = " http : / / docs . aws . amazon . com / goto / WebAPI / clouddirectory - 2017-01-11 / ListTypedLinkFacetNames " * target = " _ top " > AWS API Documentation < / a > */ @ Override public ListTypedLinkFacetNamesResult listTypedLinkFacetNames ( ListTypedLinkFacetNamesRequest request ) { } }
request = beforeClientExecution ( request ) ; return executeListTypedLinkFacetNames ( request ) ;
public class SerializationUtils { /** * Deserialize an object . * @ param < T > the type parameter * @ param inBytes The bytes to be deserialized * @ param clazz the clazz * @ return the object * @ since 5.0.0 */ public static < T > T deserialize ( final byte [ ] inBytes , final Class < T > clazz ) { } }
val inputStream = new ByteArrayInputStream ( inBytes ) ; return deserialize ( inputStream , clazz ) ;
public class JobManager { /** * Starts the Jetty Infoserver for the Jobmanager */ public void startInfoServer ( ) { } }
final Configuration config = GlobalConfiguration . getConfiguration ( ) ; // Start InfoServer try { int port = config . getInteger ( ConfigConstants . JOB_MANAGER_WEB_PORT_KEY , ConfigConstants . DEFAULT_JOB_MANAGER_WEB_FRONTEND_PORT ) ; server = new WebInfoServer ( config , port , this ) ; server . start ( ) ; } catch ( FileNotFoundException e ) { LOG . error ( e . getMessage ( ) , e ) ; } catch ( Exception e ) { LOG . error ( "Cannot instantiate info server: " + e . getMessage ( ) , e ) ; }
public class PurandareFirstOrder { /** * Calculates the first order co - occurrence statics to determine the feature * set for each term , then clusters the feature vectors for each terms * contexts and finally induces the sense - specific vectors for each term . */ @ SuppressWarnings ( "unchecked" ) private void processSpace ( ) throws IOException { } }
compressedDocumentsWriter . close ( ) ; // Generate the reverse index - to - term mapping . We will need this for // assigning specific senses to each term String [ ] indexToTerm = new String [ termToIndex . size ( ) ] ; for ( Map . Entry < String , Integer > e : termToIndex . entrySet ( ) ) indexToTerm [ e . getValue ( ) ] = e . getKey ( ) ; // Compute how many terms were in the corpus . We will need this for // determining the log - likelihood for all co - occurrences . int corpusSize = 0 ; for ( AtomicInteger i : termCounts ) corpusSize += i . get ( ) ; final int uniqueTerms = cooccurrenceMatrix . rows ( ) ; LOGGER . info ( "calculating term features" ) ; // Create a set for each term that contains the term indices that are // determined to be features for the term , i . e . not all co - occurrences // will count as the features . The feature set for each term is // determined by computing the log - likelihood for each co - occurrence and // only keeping those terms whos l - l value is above a certain threshold . final BitSet [ ] termFeatures = new BitSet [ wordIndexCounter ] ; for ( int termIndex = 0 ; termIndex < uniqueTerms ; ++ termIndex ) { String term = indexToTerm [ termIndex ] ; termFeatures [ termIndex ] = calculateTermFeatures ( term , corpusSize ) ; } LOGGER . info ( "reprocessing corpus to generate feature vectors" ) ; // Set up the concurrent data structures so we can process the documents // concurrently final BlockingQueue < Runnable > workQueue = new LinkedBlockingQueue < Runnable > ( ) ; for ( int i = 0 ; i < Runtime . getRuntime ( ) . availableProcessors ( ) ; ++ i ) { Thread t = new WorkerThread ( workQueue ) ; t . start ( ) ; } final Semaphore termsProcessed = new Semaphore ( 0 ) ; for ( int termIndex = 0 ; termIndex < uniqueTerms ; ++ termIndex ) { final String term = indexToTerm [ termIndex ] ; final int i = termIndex ; workQueue . offer ( new Runnable ( ) { public void run ( ) { try { LOGGER . fine ( String . format ( "processing term %6d/%d: %s" , i , uniqueTerms , term ) ) ; Matrix contexts = getTermContexts ( i , termFeatures [ i ] ) ; senseInduce ( term , contexts ) ; } catch ( IOException ioe ) { ioe . printStackTrace ( ) ; } finally { termsProcessed . release ( ) ; } } } ) ; } // Wait until all the documents have been processed try { termsProcessed . acquire ( uniqueTerms ) ; } catch ( InterruptedException ie ) { throw new Error ( "interrupted while waiting for terms to " + "finish reprocessing" , ie ) ; } LOGGER . info ( "finished reprocessing all terms" ) ;
public class JDABuilder { /** * Flags used to enable selective parts of the JDA cache to reduce the runtime memory footprint . * < br > < b > It is highly recommended to use { @ link # setDisabledCacheFlags ( EnumSet ) } instead * for backwards compatibility < / b > . We might add more flags in the future which you then effectively disable * when updating and not changing your setting here . * @ param flags * EnumSet containing the flags for cache services that should be < b > enabled < / b > * @ return The JDABuilder instance . Useful for chaining . * @ see # setDisabledCacheFlags ( EnumSet ) */ public JDABuilder setEnabledCacheFlags ( EnumSet < CacheFlag > flags ) { } }
this . cacheFlags = flags == null ? EnumSet . noneOf ( CacheFlag . class ) : EnumSet . copyOf ( flags ) ; return this ;
public class ReferenceCache { /** * Asynchronously initialize a new cache value to associate with key . * If key is null or key already exist will do nothing . * Wraps the create method in a future task and starts a new process . * @ param key associated with cache value */ public void init ( K key ) { } }
if ( null == key ) return ; CacheReference < K > refKey = keyFactory . createKey ( key , queue ) ; if ( cache . containsKey ( refKey ) ) return ; FutureTask < CacheReference < V > > task = new FutureTask < > ( ( ) -> { V created = requireNonNull ( create ( key ) ) ; return valueFactory . createValue ( created , queue ) ; } ) ; cache . put ( refKey , task ) ; task . run ( ) ;
public class MessageProcessor { /** * First sets the { @ link MessageProcessor # HEADER _ BASIC _ MESSAGE _ CLASS } string property of { @ code destination } to * { @ code basicMessage . getClass ( ) . getName ( ) } , then copies all headers from { @ code basicMessage . getHeaders ( ) } to * { @ code destination } using { @ link Message # setStringProperty ( String , String ) } and then does the same thing with the * supplied { @ code headers } . * @ param basicMessage the { @ link BasicMessage } to copy headers from * @ param headers the headers to copy to { @ code destination } * @ param destination the { @ link Message } to copy the headers to * @ throws JMSException */ protected void setHeaders ( BasicMessage basicMessage , Map < String , String > headers , Message destination ) throws JMSException { } }
log . infof ( "Setting [%s] = [%s] on a message of type [%s]" , MessageProcessor . HEADER_BASIC_MESSAGE_CLASS , basicMessage . getClass ( ) . getName ( ) , destination . getClass ( ) . getName ( ) ) ; destination . setStringProperty ( MessageProcessor . HEADER_BASIC_MESSAGE_CLASS , basicMessage . getClass ( ) . getName ( ) ) ; // if the basicMessage has headers , use those first Map < String , String > basicMessageHeaders = basicMessage . getHeaders ( ) ; if ( basicMessageHeaders != null ) { for ( Map . Entry < String , String > entry : basicMessageHeaders . entrySet ( ) ) { destination . setStringProperty ( entry . getKey ( ) , entry . getValue ( ) ) ; } } // If we were given headers separately , add those now . // Notice these will override same - named headers that were found in the basic message itself . if ( headers != null ) { for ( Map . Entry < String , String > entry : headers . entrySet ( ) ) { destination . setStringProperty ( entry . getKey ( ) , entry . getValue ( ) ) ; } }
public class EvaluatorImpl { /** * Get the String value of a node . * @ param inNode * @ return */ protected String getStringFromNode ( Object inNode ) { } }
if ( tc . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) tc . entry ( cclass , "getStringFromNode" , inNode ) ; String strValue = null ; if ( tc . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) tc . exit ( cclass , "getStringFromNode" , strValue ) ; return strValue ;
public class AbstractIncrementalGenerator { /** * This method generates the source code for a public method declaration including the opening brace and indentation . * @ param sourceWriter is the { @ link SourceWriter } . * @ param method is the { @ link JMethod } to implement . */ protected final void generateSourcePublicMethodDeclaration ( SourceWriter sourceWriter , JMethod method ) { } }
StringBuilder arguments = new StringBuilder ( ) ; for ( JParameter parameter : method . getParameters ( ) ) { if ( arguments . length ( ) > 0 ) { arguments . append ( ", " ) ; } arguments . append ( parameter . getType ( ) . getQualifiedSourceName ( ) ) ; arguments . append ( " " ) ; arguments . append ( parameter . getName ( ) ) ; } generateSourcePublicMethodDeclaration ( sourceWriter , method . getReturnType ( ) . getQualifiedSourceName ( ) , method . getName ( ) , arguments . toString ( ) , false ) ;
public class MemoryHandler { /** * Store a < tt > LogRecord < / tt > in an internal buffer . * If there is a < tt > Filter < / tt > , its < tt > isLoggable < / tt > * method is called to check if the given log record is loggable . * If not we return . Otherwise the given record is copied into * an internal circular buffer . Then the record ' s level property is * compared with the < tt > pushLevel < / tt > . If the given level is * greater than or equal to the < tt > pushLevel < / tt > then < tt > push < / tt > * is called to write all buffered records to the target output * < tt > Handler < / tt > . * @ param record description of the log event . A null record is * silently ignored and is not published */ public synchronized void publish ( LogRecord record ) { } }
if ( ! isLoggable ( record ) ) { return ; } int ix = ( start + count ) % buffer . length ; buffer [ ix ] = record ; if ( count < buffer . length ) { count ++ ; } else { start ++ ; start %= buffer . length ; } if ( record . getLevel ( ) . intValue ( ) >= pushLevel . intValue ( ) ) { push ( ) ; }
public class StringUtils { /** * Case insensitive version of { @ link String # indexOf ( java . lang . String , int ) } . Equivalent to * { @ code text . indexOf ( str , startIndex ) } , except the matching is case insensitive . */ public static int indexOfIgnoreCase ( String text , String str , int startIndex ) { } }
Matcher m = Pattern . compile ( Pattern . quote ( str ) , Pattern . CASE_INSENSITIVE ) . matcher ( text ) ; return m . find ( startIndex ) ? m . start ( ) : - 1 ;
public class PersistentEntityStoreImpl { /** * Deletes specified entity clearing all its properties and deleting all its outgoing links . * @ param entity to delete . */ boolean deleteEntity ( @ NotNull final PersistentStoreTransaction txn , @ NotNull final PersistentEntity entity ) { } }
clearProperties ( txn , entity ) ; clearBlobs ( txn , entity ) ; deleteLinks ( txn , entity ) ; final PersistentEntityId id = entity . getId ( ) ; final int entityTypeId = id . getTypeId ( ) ; final long entityLocalId = id . getLocalId ( ) ; final ByteIterable key = LongBinding . longToCompressedEntry ( entityLocalId ) ; if ( config . isDebugSearchForIncomingLinksOnDelete ( ) ) { // search for incoming links final List < String > allLinkNames = getAllLinkNames ( txn ) ; for ( final String entityType : txn . getEntityTypes ( ) ) { for ( final String linkName : allLinkNames ) { // noinspection LoopStatementThatDoesntLoop for ( final Entity referrer : txn . findLinks ( entityType , entity , linkName ) ) { throw new EntityStoreException ( entity + " is about to be deleted, but it is referenced by " + referrer + ", link name: " + linkName ) ; } } } } if ( getEntitiesTable ( txn , entityTypeId ) . delete ( txn . getEnvironmentTransaction ( ) , key ) ) { txn . entityDeleted ( id ) ; return true ; } return false ;
public class ResourceUtils { /** * Convenient method to get resources from adaptables * @ param element * an IAdaptable object which may provide an adapter for * IResource * @ return resource object or null */ @ javax . annotation . CheckForNull public static IResource getResource ( Object element ) { } }
if ( element instanceof IJavaElement ) { return ( ( IJavaElement ) element ) . getResource ( ) ; } return Util . getAdapter ( IResource . class , element ) ;
public class LongUnaryOperatorBuilder { /** * One of ways of creating builder . This is possibly the least verbose way where compiler should be able to guess the generic parameters . */ @ Nonnull public static LongUnaryOperator longUnaryOperatorFrom ( Consumer < LongUnaryOperatorBuilder > buildingFunction ) { } }
LongUnaryOperatorBuilder builder = new LongUnaryOperatorBuilder ( ) ; buildingFunction . accept ( builder ) ; return builder . build ( ) ;
public class AOProtocolItemStream { /** * / * ( non - Javadoc ) * @ see com . ibm . ws . sib . processor . impl . store . itemstreams . SIMPItemStream # getPersistentData ( java . io . ObjectOutputStream ) */ public void getPersistentData ( ObjectOutputStream dout ) { } }
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "getPersistentData" , dout ) ; try { dout . writeUTF ( remoteMEId . toString ( ) ) ; dout . writeUTF ( streamId . toString ( ) ) ; String id = NULL ; if ( gatheringTargetDestUuid != null ) id = gatheringTargetDestUuid . toString ( ) ; dout . writeUTF ( id ) ; } catch ( IOException e ) { // No FFDC code needed SIErrorException e2 = new SIErrorException ( e ) ; if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "getPersistentData" , e2 ) ; throw e2 ; } if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "getPersistentData" ) ;
public class CacheingSearchResults { /** * Caches a matcher */ void setMatcher ( ContentMatcher m ) { } }
if ( tc . isEntryEnabled ( ) ) tc . entry ( this , cclass , "setMatcher" , "matcher: " + m + ", hasContent: " + new Boolean ( hasContent ) ) ; wildMatchers . add ( m ) ; this . hasContent |= m . hasTests ( ) ; if ( tc . isEntryEnabled ( ) ) tc . exit ( this , cclass , "setMatcher" ) ;
public class ShardCache { /** * Clear all cached shard information for the given application . * @ param appDef { @ link ApplicationDefinition } of an application . */ public synchronized void clear ( ApplicationDefinition appDef ) { } }
String appName = appDef . getAppName ( ) ; for ( TableDefinition tableDef : appDef . getTableDefinitions ( ) . values ( ) ) { m_cacheMap . remove ( appName + "/" + tableDef . getTableName ( ) ) ; // might have no entry } m_appShardMap . remove ( appName ) ;
public class EventImpl { /** * { @ inheritDoc } */ public void waitForCompletion ( ) { } }
if ( futures == null ) { return ; } for ( Future < ? > future : futures ) { try { future . wait ( ) ; } catch ( InterruptedException e ) { // TODO Should we catch the exception or should we throw it ? // Instrumentation will log to ffdc } }
public class BodyFilters { /** * Creates a { @ link BodyFilter } that replaces the properties in the form url encoded body with given replacement . * @ param properties query names properties to replace * @ param replacement String to replace the properties values * @ return BodyFilter generated */ @ API ( status = EXPERIMENTAL ) public static BodyFilter replaceFormUrlEncodedProperty ( final Set < String > properties , final String replacement ) { } }
final Predicate < String > formUrlEncoded = MediaTypeQuery . compile ( "application/x-www-form-urlencoded" ) ; final QueryFilter delegate = properties . stream ( ) . map ( name -> QueryFilters . replaceQuery ( name , replacement ) ) . reduce ( QueryFilter :: merge ) . orElseGet ( QueryFilter :: none ) ; return ( contentType , body ) -> formUrlEncoded . test ( contentType ) ? delegate . filter ( body ) : body ;
public class ListClosedWorkflowExecutionsRequestMarshaller { /** * Marshall the given parameter object . */ public void marshall ( ListClosedWorkflowExecutionsRequest listClosedWorkflowExecutionsRequest , ProtocolMarshaller protocolMarshaller ) { } }
if ( listClosedWorkflowExecutionsRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( listClosedWorkflowExecutionsRequest . getDomain ( ) , DOMAIN_BINDING ) ; protocolMarshaller . marshall ( listClosedWorkflowExecutionsRequest . getStartTimeFilter ( ) , STARTTIMEFILTER_BINDING ) ; protocolMarshaller . marshall ( listClosedWorkflowExecutionsRequest . getCloseTimeFilter ( ) , CLOSETIMEFILTER_BINDING ) ; protocolMarshaller . marshall ( listClosedWorkflowExecutionsRequest . getExecutionFilter ( ) , EXECUTIONFILTER_BINDING ) ; protocolMarshaller . marshall ( listClosedWorkflowExecutionsRequest . getCloseStatusFilter ( ) , CLOSESTATUSFILTER_BINDING ) ; protocolMarshaller . marshall ( listClosedWorkflowExecutionsRequest . getTypeFilter ( ) , TYPEFILTER_BINDING ) ; protocolMarshaller . marshall ( listClosedWorkflowExecutionsRequest . getTagFilter ( ) , TAGFILTER_BINDING ) ; protocolMarshaller . marshall ( listClosedWorkflowExecutionsRequest . getNextPageToken ( ) , NEXTPAGETOKEN_BINDING ) ; protocolMarshaller . marshall ( listClosedWorkflowExecutionsRequest . getMaximumPageSize ( ) , MAXIMUMPAGESIZE_BINDING ) ; protocolMarshaller . marshall ( listClosedWorkflowExecutionsRequest . getReverseOrder ( ) , REVERSEORDER_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
public class ReflectUtils { /** * 调用方法 * @ param object 对象 * @ param methodName 方法名 * @ param parameterTypes 参数类型 * @ param parameters 参数 * @ return 方法返回的结果 * @ throws NoSuchMethodException 异常 * @ throws InvocationTargetException 异常 * @ throws IllegalAccessException 异常 */ public static Object invokeMethod ( Object object , String methodName , Class < ? > [ ] parameterTypes , Object [ ] parameters ) throws NoSuchMethodException , InvocationTargetException , IllegalAccessException { } }
if ( Checker . isEmpty ( parameters ) ) { return object . getClass ( ) . getMethod ( methodName ) . invoke ( object ) ; } else { return object . getClass ( ) . getMethod ( methodName , parameterTypes ) . invoke ( object , parameters ) ; }
public class Parser { /** * 12.5 If Statement */ private IfStatementTree parseIfStatement ( ) { } }
SourcePosition start = getTreeStartLocation ( ) ; eat ( TokenType . IF ) ; eat ( TokenType . OPEN_PAREN ) ; ParseTree condition = parseExpression ( ) ; eat ( TokenType . CLOSE_PAREN ) ; ParseTree ifClause = parseStatement ( ) ; ParseTree elseClause = null ; if ( peek ( TokenType . ELSE ) ) { eat ( TokenType . ELSE ) ; elseClause = parseStatement ( ) ; } return new IfStatementTree ( getTreeLocation ( start ) , condition , ifClause , elseClause ) ;
public class XsdGeneratorHelper { /** * Parses the provided InputStream to create a dom Document . * @ param xmlStream An InputStream connected to an XML document . * @ return A DOM Document created from the contents of the provided stream . */ public static Document parseXmlStream ( final Reader xmlStream ) { } }
// Build a DOM model of the provided xmlFileStream . final DocumentBuilderFactory factory = DocumentBuilderFactory . newInstance ( ) ; factory . setNamespaceAware ( true ) ; try { return factory . newDocumentBuilder ( ) . parse ( new InputSource ( xmlStream ) ) ; } catch ( Exception e ) { throw new IllegalArgumentException ( "Could not acquire DOM Document" , e ) ; }
public class RetryingReadRowsOperation { /** * Special retry handling for watchdog timeouts , which uses its own fail counter . * @ return true if a retry has been scheduled */ private void handleTimeoutError ( Status status ) { } }
Preconditions . checkArgument ( status . getCause ( ) instanceof StreamWaitTimeoutException , "status is not caused by a StreamWaitTimeoutException" ) ; StreamWaitTimeoutException e = ( ( StreamWaitTimeoutException ) status . getCause ( ) ) ; // Cancel the existing rpc . rpcTimerContext . close ( ) ; failedCount ++ ; // Can this request be retried int maxRetries = retryOptions . getMaxScanTimeoutRetries ( ) ; if ( retryOptions . enableRetries ( ) && ++ timeoutRetryCount <= maxRetries ) { LOG . warn ( "The client could not get a response in %d ms. Retrying the scan." , e . getWaitTimeMs ( ) ) ; resetStatusBasedBackoff ( ) ; performRetry ( 0 ) ; } else { LOG . warn ( "The client could not get a response after %d tries, giving up." , timeoutRetryCount ) ; rpc . getRpcMetrics ( ) . markFailure ( ) ; finalizeStats ( status ) ; setException ( getExhaustedRetriesException ( status ) ) ; }
public class IonizationPotentialTool { /** * Looking if the IAtom belongs to the halogen family . * The IAtoms are F , Cl , Br , I . * @ param atom The IAtom * @ return True , if it belongs */ private static boolean familyHalogen ( IAtom atom ) { } }
String symbol = atom . getSymbol ( ) ; if ( symbol . equals ( "F" ) || symbol . equals ( "Cl" ) || symbol . equals ( "Br" ) || symbol . equals ( "I" ) ) return true ; else return false ;
public class BatchWriteItemResult { /** * The response object as a result of < code > BatchWriteItem < / code > call . * This is essentially a map of table name to * < code > ConsumedCapacityUnits < / code > . * Returns a reference to this object so that method calls can be chained together . * @ param responses The response object as a result of < code > BatchWriteItem < / code > call . * This is essentially a map of table name to * < code > ConsumedCapacityUnits < / code > . * @ return A reference to this updated object so that method calls can be chained * together . */ public BatchWriteItemResult withResponses ( java . util . Map < String , BatchWriteResponse > responses ) { } }
setResponses ( responses ) ; return this ;
public class CloudStorageFileSystemProvider { /** * Lists the project ' s buckets . But use the one in CloudStorageFileSystem . * < p > Example of listing buckets , specifying the page size and a name prefix . * < pre > { @ code * String prefix = " bucket _ " ; * Page < Bucket > buckets = provider . listBuckets ( BucketListOption . prefix ( prefix ) ) ; * Iterator < Bucket > bucketIterator = buckets . iterateAll ( ) ; * while ( bucketIterator . hasNext ( ) ) { * Bucket bucket = bucketIterator . next ( ) ; * / / do something with the bucket * } < / pre > * @ throws StorageException upon failure */ Page < Bucket > listBuckets ( Storage . BucketListOption ... options ) { } }
initStorage ( ) ; return storage . list ( options ) ;
public class DistributedFileSystem { /** * Fetch the list of files that have been open longer than a * specified amount of time . * @ param prefix path prefix specifying subset of files to examine * @ param millis select files that have been open longer that this * @ param start where to start searching in the case of large number * of files returns , or null * @ return array of OpenFileInfo objects * @ throw IOException */ @ Override public OpenFileInfo [ ] iterativeGetOpenFiles ( Path prefix , int millis , String start ) throws IOException { } }
return dfs . iterativeGetOpenFiles ( prefix , millis , start ) ;
public class FunctionCall { /** * Returns the parameter at the expected index . * @ param index the number of the parameter to access * @ return the expression representing at the given index * @ throws IllegalArgumentException if the index is out of bounds */ public Expression getExpectedParam ( int index ) { } }
if ( parameters . size ( ) <= index ) { throw new IllegalArgumentException ( "Parameter index out of bounds: " + index + ". Function call: " + this ) ; } return parameters . get ( index ) ;
public class Utils { /** * Closes a result set . * @ param st * @ param logger */ public static void closeResultSet ( ResultSet resultSet , Logger logger ) { } }
try { if ( resultSet != null ) resultSet . close ( ) ; } catch ( SQLException e ) { // Not important . Utils . logException ( logger , e ) ; }
public class ClustersInner { /** * Creates a new HDInsight cluster with the specified parameters . * @ param resourceGroupName The name of the resource group . * @ param clusterName The name of the cluster . * @ param parameters The cluster create request . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < ClusterInner > createAsync ( String resourceGroupName , String clusterName , ClusterCreateParametersExtended parameters , final ServiceCallback < ClusterInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( createWithServiceResponseAsync ( resourceGroupName , clusterName , parameters ) , serviceCallback ) ;
public class AppServiceEnvironmentsInner { /** * Get properties of a multi - role pool . * Get properties of a multi - role pool . * @ param resourceGroupName Name of the resource group to which the resource belongs . * @ param name Name of the App Service Environment . * @ param serviceCallback the async ServiceCallback to handle successful and failed responses . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ return the { @ link ServiceFuture } object */ public ServiceFuture < WorkerPoolResourceInner > getMultiRolePoolAsync ( String resourceGroupName , String name , final ServiceCallback < WorkerPoolResourceInner > serviceCallback ) { } }
return ServiceFuture . fromResponse ( getMultiRolePoolWithServiceResponseAsync ( resourceGroupName , name ) , serviceCallback ) ;
public class SamlProfileSamlAssertionBuilder { /** * Sign assertion . * @ param assertion the assertion * @ param request the request * @ param response the response * @ param service the service * @ param adaptor the adaptor * @ param binding the binding * @ param authnRequest the authn request * @ throws SamlException the saml exception */ protected void signAssertion ( final Assertion assertion , final HttpServletRequest request , final HttpServletResponse response , final SamlRegisteredService service , final SamlRegisteredServiceServiceProviderMetadataFacade adaptor , final String binding , final RequestAbstractType authnRequest ) throws SamlException { } }
if ( service . isSignAssertions ( ) ) { LOGGER . debug ( "SAML registered service [{}] requires assertions to be signed" , adaptor . getEntityId ( ) ) ; samlObjectSigner . encode ( assertion , service , adaptor , response , request , binding , authnRequest ) ; } else { LOGGER . debug ( "SAML registered service [{}] does not require assertions to be signed" , adaptor . getEntityId ( ) ) ; }
public class AsyncAccessEventAppenderFactory { /** * Creates an { @ link AsyncAppenderFactory } of type { @ link IAccessEvent } that prepares events * for deferred processing * @ return the { @ link AsyncAppenderFactory } */ @ Override public AsyncAppenderBase < IAccessEvent > build ( ) { } }
return new AsyncAppenderBase < IAccessEvent > ( ) { @ Override protected void preprocess ( IAccessEvent event ) { event . prepareForDeferredProcessing ( ) ; } } ;
public class EnvironmentSettingsInner { /** * Starts a template by starting all resources inside the template . This operation can take a while to complete . * @ param resourceGroupName The name of the resource group . * @ param labAccountName The name of the lab Account . * @ param labName The name of the lab . * @ param environmentSettingName The name of the environment Setting . * @ throws IllegalArgumentException thrown if parameters fail the validation * @ throws CloudException thrown if the request is rejected by server * @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent */ public void start ( String resourceGroupName , String labAccountName , String labName , String environmentSettingName ) { } }
startWithServiceResponseAsync ( resourceGroupName , labAccountName , labName , environmentSettingName ) . toBlocking ( ) . last ( ) . body ( ) ;
public class MaxAgeWhereClauseMatchCriteria { /** * { @ inheritDoc } */ public List < ? > getParameterValues ( ) { } }
final Calendar cal = Calendar . getInstance ( ) ; cal . add ( Calendar . DAY_OF_MONTH , - this . maxAge ) ; return Collections . singletonList ( cal . getTime ( ) ) ;
public class LambdaUtil { /** * Runs the given runnable with the given ClassLoader as the thread ' s * { @ link Thread # setContextClassLoader ( ClassLoader ) context class loader } . * < p > The method will make sure to set the context class loader of the calling thread * back to what it was before after the runnable completed . */ public static < R , E extends Throwable > R withContextClassLoader ( final ClassLoader cl , final SupplierWithException < R , E > s ) throws E { } }
try ( TemporaryClassLoaderContext tmpCl = new TemporaryClassLoaderContext ( cl ) ) { return s . get ( ) ; }
public class ValidationContext { /** * Asserts that the string represents a valid reference path expression . * @ param path Path expression to validate . * @ param propertyName Name of property . */ public void assertIsValidReferencePath ( String path , String propertyName ) { } }
if ( path == null ) { return ; } if ( path . isEmpty ( ) ) { problemReporter . report ( new Problem ( this , String . format ( "%s cannot be empty" , propertyName ) ) ) ; return ; }
public class ExecuteMethodChecker { protected void checkJsonBeanValidator ( ) { } }
final Class < ? > returnType = executeMethod . getReturnType ( ) ; if ( ! JsonResponse . class . isAssignableFrom ( returnType ) ) { return ; } final Type genericReturnType = executeMethod . getGenericReturnType ( ) ; if ( genericReturnType == null || ! ( genericReturnType instanceof ParameterizedType ) ) { // just in case return ; } final Class < ? > jsonBeanType = DfReflectionUtil . getGenericFirstClass ( genericReturnType ) ; if ( jsonBeanType == null ) { // just in case return ; } if ( Collection . class . isAssignableFrom ( jsonBeanType ) ) { // e . g . JsonResponse < List < SeaBean > > final Type [ ] resopnseArgTypes = DfReflectionUtil . getGenericParameterTypes ( genericReturnType ) ; if ( resopnseArgTypes . length > 0 ) { // just in case final Class < ? > elementBeanType = DfReflectionUtil . getGenericFirstClass ( resopnseArgTypes [ 0 ] ) ; if ( elementBeanType != null && mayBeJsonBeanType ( elementBeanType ) ) { // just in case doCheckJsonBeanValidator ( elementBeanType , Collections . emptyMap ( ) ) ; // can check JsonResponse < List < SeaBean > > } } } else if ( mayBeJsonBeanType ( jsonBeanType ) ) { // e . g . JsonResponse < SeaBean > , JsonResponse < WholeBean < SeaBean > > doCheckJsonBeanValidator ( jsonBeanType , prepareJsonBeanGenericMap ( genericReturnType , jsonBeanType ) ) ; }
public class WaveformFinder { /** * We have received notification that a device is no longer on the network , so clear out all its waveforms . * @ param announcement the packet which reported the device ’ s disappearance */ private void clearWaveforms ( DeviceAnnouncement announcement ) { } }
final int player = announcement . getNumber ( ) ; // Iterate over a copy to avoid concurrent modification issues for ( DeckReference deck : new HashSet < DeckReference > ( previewHotCache . keySet ( ) ) ) { if ( deck . player == player ) { previewHotCache . remove ( deck ) ; if ( deck . hotCue == 0 ) { deliverWaveformPreviewUpdate ( player , null ) ; // Inform listeners that preview is gone . } } } // Again iterate over a copy to avoid concurrent modification issues for ( DeckReference deck : new HashSet < DeckReference > ( detailHotCache . keySet ( ) ) ) { if ( deck . player == player ) { detailHotCache . remove ( deck ) ; if ( deck . hotCue == 0 ) { deliverWaveformDetailUpdate ( player , null ) ; // Inform listeners that detail is gone . } } }
public class Task { /** * - - - - - OTHER METHODS - - - - - */ final boolean abort ( ) { } }
if ( ! taken . get ( ) ) { // aborting always runs before connection // happens . If we reach this point the request // can be active or suspended // but nor delivered or committed result = BaasResult . cancel ( ) ; // we can simply forcefully set the value // to ABORTED to let the resource been cleaned up suspendableHandler . set ( Signal . ABORTED ) ; finish ( ) ; return true ; } return false ;
public class ScriptsActiveScanner { /** * Tells whether or not the scanner should be skipped . The scanner should be skipped when the { @ code ExtensionScript } is not * enabled , when there are no scripts , or if there are none is enabled . * @ return { @ code true } if the scanner should be skipped , { @ code false } otherwise */ private boolean shouldSkipScan ( ) { } }
if ( this . getExtension ( ) == null ) { return true ; } List < ScriptWrapper > scripts = getActiveScripts ( ) ; if ( scripts . isEmpty ( ) ) { return true ; } for ( ScriptWrapper script : scripts ) { if ( script . isEnabled ( ) ) { return false ; } } return true ;
public class VirtualHost { /** * Method findContext . * @ param path * @ return ServletContext */ public ServletContext findContext ( String path ) { } }
WebGroup g = ( WebGroup ) requestMapper . map ( path ) ; if ( g != null ) return g . getContext ( ) ; else return null ;
public class PropertiesUtils { /** * Extract all the keys that start with a < code > prefix < / code > in { @ link Properties } to a new { @ link Properties } * instance . * @ param properties the given { @ link Properties } instance * @ param prefix of keys to be extracted * @ return a { @ link Properties } instance */ public static Properties extractPropertiesWithPrefix ( Properties properties , Optional < String > prefix ) { } }
Preconditions . checkNotNull ( properties ) ; Preconditions . checkNotNull ( prefix ) ; Properties extractedProperties = new Properties ( ) ; for ( Map . Entry < Object , Object > entry : properties . entrySet ( ) ) { if ( StringUtils . startsWith ( entry . getKey ( ) . toString ( ) , prefix . or ( StringUtils . EMPTY ) ) ) { extractedProperties . put ( entry . getKey ( ) . toString ( ) , entry . getValue ( ) ) ; } } return extractedProperties ;