signature
stringlengths 43
39.1k
| implementation
stringlengths 0
450k
|
|---|---|
public class ZonedDateTime { /** * Returns a copy of this date - time with the specified amount subtracted .
* This returns a { @ code ZonedDateTime } , based on this one , with the specified amount subtracted .
* The amount is typically { @ link Period } or { @ link Duration } but may be
* any other type implementing the { @ link TemporalAmount } interface .
* The calculation is delegated to the amount object by calling
* { @ link TemporalAmount # subtractFrom ( Temporal ) } . The amount implementation is free
* to implement the subtraction in any way it wishes , however it typically
* calls back to { @ link # minus ( long , TemporalUnit ) } . Consult the documentation
* of the amount implementation to determine if it can be successfully subtracted .
* This instance is immutable and unaffected by this method call .
* @ param amountToSubtract the amount to subtract , not null
* @ return a { @ code ZonedDateTime } based on this date - time with the subtraction made , not null
* @ throws DateTimeException if the subtraction cannot be made
* @ throws ArithmeticException if numeric overflow occurs */
@ Override public ZonedDateTime minus ( TemporalAmount amountToSubtract ) { } }
|
if ( amountToSubtract instanceof Period ) { Period periodToSubtract = ( Period ) amountToSubtract ; return resolveLocal ( dateTime . minus ( periodToSubtract ) ) ; } Objects . requireNonNull ( amountToSubtract , "amountToSubtract" ) ; return ( ZonedDateTime ) amountToSubtract . subtractFrom ( this ) ;
|
public class Security { /** * Returns true if the given provider satisfies
* the selection criterion key : value . */
private static boolean isCriterionSatisfied ( Provider prov , String serviceName , String algName , String attrName , String filterValue ) { } }
|
String key = serviceName + '.' + algName ; if ( attrName != null ) { key += ' ' + attrName ; } // Check whether the provider has a property
// whose key is the same as the given key .
String propValue = getProviderProperty ( key , prov ) ; if ( propValue == null ) { // Check whether we have an alias instead
// of a standard name in the key .
String standardName = getProviderProperty ( "Alg.Alias." + serviceName + "." + algName , prov ) ; if ( standardName != null ) { key = serviceName + "." + standardName ; if ( attrName != null ) { key += ' ' + attrName ; } propValue = getProviderProperty ( key , prov ) ; } if ( propValue == null ) { // The provider doesn ' t have the given
// key in its property list .
return false ; } } // If the key is in the format of :
// < crypto _ service > . < algorithm _ or _ type > ,
// there is no need to check the value .
if ( attrName == null ) { return true ; } // If we get here , the key must be in the
// format of < crypto _ service > . < algorithm _ or _ provider > < attribute _ name > .
if ( isStandardAttr ( attrName ) ) { return isConstraintSatisfied ( attrName , filterValue , propValue ) ; } else { return filterValue . equalsIgnoreCase ( propValue ) ; }
|
public class BigtableAdmin { /** * { @ inheritDoc } */
@ Override public void snapshot ( SnapshotDescription snapshot ) throws IOException , SnapshotCreationException , IllegalArgumentException { } }
|
Objects . requireNonNull ( snapshot ) ; snapshot ( snapshot . getName ( ) , snapshot . getTableName ( ) ) ;
|
public class XIfExpressionImpl { /** * < ! - - begin - user - doc - - >
* < ! - - end - user - doc - - >
* @ generated */
public NotificationChain basicSetElse ( XExpression newElse , NotificationChain msgs ) { } }
|
XExpression oldElse = else_ ; else_ = newElse ; if ( eNotificationRequired ( ) ) { ENotificationImpl notification = new ENotificationImpl ( this , Notification . SET , XbasePackage . XIF_EXPRESSION__ELSE , oldElse , newElse ) ; if ( msgs == null ) msgs = notification ; else msgs . add ( notification ) ; } return msgs ;
|
public class VortexAggregateFuture { /** * Gets the inputs on Tasklet aggregation completion . */
private List < TInput > getInputs ( final List < Integer > taskletIds ) { } }
|
final List < TInput > inputList = new ArrayList < > ( taskletIds . size ( ) ) ; for ( final int taskletId : taskletIds ) { inputList . add ( taskletIdInputMap . get ( taskletId ) ) ; } return inputList ;
|
public class SessionBuilder { /** * Adds a custom filter to include / exclude nodes for a particular execution profile . This assumes
* that you ' re also using a dedicated load balancing policy for that profile .
* < p > The predicate ' s { @ link Predicate # test ( Object ) test ( ) } method will be invoked each time the
* { @ link LoadBalancingPolicy } processes a topology or state change : if it returns false , the
* policy will suggest distance IGNORED ( meaning the driver won ' t ever connect to it if all
* policies agree ) , and never included in any query plan .
* < p > Note that this behavior is implemented in the default load balancing policy . If you use a
* custom policy implementation , you ' ll need to explicitly invoke the filter .
* < p > If the filter is specified programmatically with this method , it overrides the configuration
* ( that is , the { @ code load - balancing - policy . filter . class } option will be ignored ) .
* @ see # withNodeFilter ( Predicate ) */
@ NonNull public SelfT withNodeFilter ( @ NonNull String profileName , @ NonNull Predicate < Node > nodeFilter ) { } }
|
this . nodeFilters . put ( profileName , nodeFilter ) ; return self ;
|
public class aaagroup_auditnslogpolicy_binding { /** * Use this API to fetch filtered set of aaagroup _ auditnslogpolicy _ binding resources .
* filter string should be in JSON format . eg : " port : 80 , servicetype : HTTP " . */
public static aaagroup_auditnslogpolicy_binding [ ] get_filtered ( nitro_service service , String groupname , String filter ) throws Exception { } }
|
aaagroup_auditnslogpolicy_binding obj = new aaagroup_auditnslogpolicy_binding ( ) ; obj . set_groupname ( groupname ) ; options option = new options ( ) ; option . set_filter ( filter ) ; aaagroup_auditnslogpolicy_binding [ ] response = ( aaagroup_auditnslogpolicy_binding [ ] ) obj . getfiltered ( service , option ) ; return response ;
|
public class Response { /** * Get response data as stream .
* @ return the response data as a stream */
public InputStream getStream ( ) { } }
|
if ( stream != null ) { return stream ; } try { return new ByteArrayInputStream ( content . getBytes ( "utf-8" ) ) ; } catch ( final UnsupportedEncodingException e ) { throw new ApiException ( "UTF-8 encoding not supported" , e ) ; }
|
public class DocWorkUnit { /** * Get the CommandLineProgramGroup object from the CommandLineProgramProperties of this work unit .
* @ return CommandLineProgramGroup if the feature has one , otherwise null . */
public CommandLineProgramGroup getCommandLineProgramGroup ( ) { } }
|
if ( commandLineProperties != null ) { try { return commandLineProperties . programGroup ( ) . newInstance ( ) ; } catch ( IllegalAccessException | InstantiationException e ) { logger . warn ( String . format ( "Can't instantiate program group class to retrieve summary for group %s for class %s" , commandLineProperties . programGroup ( ) . getName ( ) , clazz . getName ( ) ) ) ; } } return null ;
|
public class JUnit38ClassRunner { /** * Get the annotations associated with given TestCase .
* @ param test the TestCase . */
private static Annotation [ ] getAnnotations ( TestCase test ) { } }
|
try { Method m = test . getClass ( ) . getMethod ( test . getName ( ) ) ; return m . getDeclaredAnnotations ( ) ; } catch ( SecurityException e ) { } catch ( NoSuchMethodException e ) { } return new Annotation [ 0 ] ;
|
public class Utils { /** * Appends to a given StringBuilder the given indents depending on the indent level .
* @ param sb StringBuilder to write in .
* @ param indent The number ( level ) of indents . */
private static final void indent ( final StringBuilder sb , final int indent ) { } }
|
for ( int i = 0 ; i < indent ; i ++ ) { sb . append ( LOG_OUT_INDENT ) ; }
|
public class Insert { /** * Add all tables of the type to the expressions , because for the type an
* insert must be made for all tables ! ! ! */
private void addTables ( ) { } }
|
for ( final SQLTable table : getType ( ) . getTables ( ) ) { if ( ! getTable2values ( ) . containsKey ( table ) ) { getTable2values ( ) . put ( table , new ArrayList < Value > ( ) ) ; } }
|
public class XStreamFactory { /** * Create XStream for query and report load / save .
* @ return XStream */
public static XStream createXStream ( ) { } }
|
XStream xstream = new XStream ( new DomDriver ( "UTF-8" ) ) ; xstream . setMode ( XStream . NO_REFERENCES ) ; xstream . registerConverter ( new FontConverter ( ) ) ; xstream . alias ( "report" , Report . class ) ; xstream . alias ( "chart" , Chart . class ) ; xstream . useAttributeFor ( Report . class , "version" ) ; xstream . useAttributeFor ( Chart . class , "version" ) ; xstream = createQueryXStream ( xstream ) ; xstream = createReportXStream ( xstream ) ; xstream = createChartXStream ( xstream ) ; return xstream ;
|
public class ThriftConnectionPool { /** * 检查连接分区的连接数量 < br >
* 如果连接数量不足则向事件队列中添加新的创建连接信号
* @ param connectionPartition
* 需要检测的连接分区 */
protected void maybeSignalForMoreConnections ( ThriftConnectionPartition < T > connectionPartition ) { } }
|
if ( ! connectionPartition . isUnableToCreateMoreTransactions ( ) && ! this . poolShuttingDown && connectionPartition . getAvailableConnections ( ) * 100 / connectionPartition . getMaxConnections ( ) <= this . poolAvailabilityThreshold ) { connectionPartition . getPoolWatchThreadSignalQueue ( ) . offer ( new Object ( ) ) ; }
|
public class DevicesInner { /** * Gets the network settings of the specified data box edge / gateway device .
* @ param deviceName The device name .
* @ param resourceGroupName The resource group name .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ throws CloudException thrown if the request is rejected by server
* @ throws RuntimeException all other wrapped checked exceptions if the request fails to be sent
* @ return the NetworkSettingsInner object if successful . */
public NetworkSettingsInner getNetworkSettings ( String deviceName , String resourceGroupName ) { } }
|
return getNetworkSettingsWithServiceResponseAsync ( deviceName , resourceGroupName ) . toBlocking ( ) . single ( ) . body ( ) ;
|
public class AttributeLocalizedContentUrl { /** * Get Resource Url for UpdateLocalizedContents
* @ param attributeFQN Fully qualified name for an attribute .
* @ return String Resource Url */
public static MozuUrl updateLocalizedContentsUrl ( String attributeFQN ) { } }
|
UrlFormatter formatter = new UrlFormatter ( "/api/commerce/catalog/admin/attributedefinition/attributes/{attributeFQN}/LocalizedContent" ) ; formatter . formatUrl ( "attributeFQN" , attributeFQN ) ; return new MozuUrl ( formatter . getResourceUrl ( ) , MozuUrl . UrlLocation . TENANT_POD ) ;
|
public class MonitoringBaseAspect { protected Object doProfiling ( ProceedingJoinPoint pjp , String aProducerId , String aSubsystem , String aCategory ) throws Throwable { } }
|
// check if kill switch is active , return if so .
if ( moskitoConfiguration . getKillSwitch ( ) . disableMetricCollection ( ) ) return pjp . proceed ( ) ; // check if this a synthetic method like a switch or lambda method .
if ( ( ( MethodSignature ) pjp . getSignature ( ) ) . getMethod ( ) . isSynthetic ( ) ) return pjp . proceed ( ) ; OnDemandStatsProducer < ServiceStats > producer = getProducer ( pjp , aProducerId , aCategory , aSubsystem , false , FACTORY , true ) ; String producerId = producer . getProducerId ( ) ; String prevProducerId = lastProducerId . get ( ) ; lastProducerId . set ( producerId ) ; String methodName = getMethodStatName ( pjp . getSignature ( ) ) ; ServiceStats defaultStats = producer . getDefaultStats ( ) ; ServiceStats methodStats = producer . getStats ( methodName ) ; final Object [ ] args = pjp . getArgs ( ) ; defaultStats . addRequest ( ) ; if ( methodStats != null ) { methodStats . addRequest ( ) ; } TracedCall aRunningTrace = RunningTraceContainer . getCurrentlyTracedCall ( ) ; TraceStep currentStep = null ; CurrentlyTracedCall currentTrace = aRunningTrace . callTraced ( ) ? ( CurrentlyTracedCall ) aRunningTrace : null ; MoSKitoContext context = MoSKitoContext . get ( ) ; TracerRepository tracerRepository = TracerRepository . getInstance ( ) ; // only trace this producer if no tracers have been fired yet .
boolean tracePassingOfThisProducer = context . hasTracerFired ( ) ? false : tracerRepository . isTracingEnabledForProducer ( producerId ) ; Trace trace = null ; boolean journeyStartedByMe = false ; // we create trace here already , because we want to reserve a new trace id .
if ( tracePassingOfThisProducer ) { trace = new Trace ( ) ; context . setTracerFired ( ) ; } if ( currentTrace == null && tracePassingOfThisProducer ) { // ok , we will create a new journey on the fly .
String journeyCallName = Tracers . getCallName ( trace ) ; RunningTraceContainer . startTracedCall ( journeyCallName ) ; journeyStartedByMe = true ; currentTrace = ( CurrentlyTracedCall ) RunningTraceContainer . getCurrentlyTracedCall ( ) ; } StringBuilder call = null ; if ( currentTrace != null || tracePassingOfThisProducer ) { call = TracingUtil . buildCall ( producerId , methodName , args , tracePassingOfThisProducer ? Tracers . getCallName ( trace ) : null ) ; } if ( currentTrace != null ) { currentStep = currentTrace . startStep ( call . toString ( ) , producer , methodName ) ; } long startTime = System . nanoTime ( ) ; Object ret = null ; try { ret = pjp . proceed ( ) ; return ret ; } catch ( InvocationTargetException e ) { defaultStats . notifyError ( e . getTargetException ( ) ) ; if ( methodStats != null ) { methodStats . notifyError ( ) ; } if ( currentStep != null ) { currentStep . setAborted ( ) ; } throw e . getCause ( ) ; } catch ( Throwable t ) { defaultStats . notifyError ( t ) ; if ( methodStats != null ) { methodStats . notifyError ( ) ; } if ( currentStep != null ) { currentStep . setAborted ( ) ; } if ( tracePassingOfThisProducer ) { call . append ( " ERR " ) . append ( t . getMessage ( ) ) ; } throw t ; } finally { long exTime = System . nanoTime ( ) - startTime ; if ( ! producerId . equals ( prevProducerId ) ) { defaultStats . addExecutionTime ( exTime ) ; } if ( methodStats != null ) { methodStats . addExecutionTime ( exTime ) ; } lastProducerId . set ( prevProducerId ) ; defaultStats . notifyRequestFinished ( ) ; if ( methodStats != null ) { methodStats . notifyRequestFinished ( ) ; } if ( currentStep != null ) { currentStep . setDuration ( exTime ) ; try { currentStep . appendToCall ( " = " + TracingUtil . parameter2string ( ret ) ) ; } catch ( Throwable t ) { currentStep . appendToCall ( " = ERR: " + t . getMessage ( ) + " (" + t . getClass ( ) + ')' ) ; } } if ( currentTrace != null ) { currentTrace . endStep ( ) ; } if ( tracePassingOfThisProducer ) { call . append ( " = " ) . append ( TracingUtil . parameter2string ( ret ) ) ; trace . setCall ( call . toString ( ) ) ; trace . setDuration ( exTime ) ; trace . setElements ( Thread . currentThread ( ) . getStackTrace ( ) ) ; if ( journeyStartedByMe ) { // now finish the journey .
Journey myJourney = JourneyManagerFactory . getJourneyManager ( ) . getOrCreateJourney ( Tracers . getJourneyNameForTracers ( producerId ) ) ; myJourney . addUseCase ( ( CurrentlyTracedCall ) RunningTraceContainer . endTrace ( ) ) ; RunningTraceContainer . cleanup ( ) ; } tracerRepository . addTracedExecution ( producerId , trace ) ; } }
|
public class SimpleSipServlet { /** * { @ inheritDoc } */
protected void doResponse ( SipServletResponse response ) throws ServletException , IOException { } }
|
if ( logger . isInfoEnabled ( ) ) { logger . info ( "SimpleProxyServlet: Got response:\n" + response ) ; } if ( SipServletResponse . SC_OK == response . getStatus ( ) && "BYE" . equalsIgnoreCase ( response . getMethod ( ) ) ) { SipApplicationSession sipApplicationSession = response . getApplicationSession ( false ) ; if ( sipApplicationSession != null && sipApplicationSession . isValid ( ) ) { sipApplicationSession . invalidate ( ) ; } }
|
public class SOM { /** * Returns the cluster labels for each neuron . If the neurons have
* not been clustered , throws an Illegal State Exception . */
public int [ ] [ ] getClusterLabel ( ) { } }
|
if ( y == null ) { throw new IllegalStateException ( "Neuron cluster labels are not available. Call partition() first." ) ; } int [ ] [ ] clusterLabels = new int [ height ] [ width ] ; for ( int i = 0 , l = 0 ; i < height ; i ++ ) { for ( int j = 0 ; j < width ; j ++ ) { clusterLabels [ i ] [ j ] = y [ i * width + j ] ; } } return clusterLabels ;
|
public class LogServiceImpl { /** * ( non - Javadoc )
* @ see org . osgi . service . log . LogService # log ( org . osgi . framework . ServiceReference , int , java . lang . String ,
* java . lang . Throwable ) */
public void log ( ServiceReference sr , int level , String message , Throwable exception ) { } }
|
switch ( level ) { case LOG_DEBUG : if ( delegate . isDebugEnabled ( ) ) { delegate . debug ( createMessage ( sr , message ) , exception ) ; } break ; case LOG_ERROR : if ( delegate . isErrorEnabled ( ) ) { delegate . error ( createMessage ( sr , message ) , exception ) ; } break ; case LOG_INFO : if ( delegate . isInfoEnabled ( ) ) { delegate . info ( createMessage ( sr , message ) , exception ) ; } break ; case LOG_WARNING : if ( delegate . isWarnEnabled ( ) ) { delegate . warn ( createMessage ( sr , message ) , exception ) ; } break ; default : break ; }
|
public class FctBnProcessors { /** * < p > Lazy get PrcAbout . < / p >
* @ param pAddParam additional param
* @ return requested PrcAbout
* @ throws Exception - an exception */
protected final PrcAbout < RS > lazyGetPrcAbout ( final Map < String , Object > pAddParam ) throws Exception { } }
|
@ SuppressWarnings ( "unchecked" ) PrcAbout < RS > proc = ( PrcAbout < RS > ) this . processorsMap . get ( PrcAbout . class . getSimpleName ( ) ) ; if ( proc == null ) { proc = new PrcAbout < RS > ( ) ; proc . setSrvOrm ( getSrvOrm ( ) ) ; // assigning fully initialized object :
this . processorsMap . put ( PrcAbout . class . getSimpleName ( ) , proc ) ; } return proc ;
|
public class CommerceDiscountPersistenceImpl { /** * Returns an ordered range of all the commerce discounts where expirationDate & lt ; & # 63 ; and status = & # 63 ; .
* Useful when paginating results . Returns a maximum of < code > end - start < / code > instances . < code > start < / code > and < code > end < / code > are not primary keys , they are indexes in the result set . Thus , < code > 0 < / code > refers to the first result in the set . Setting both < code > start < / code > and < code > end < / code > to { @ link QueryUtil # ALL _ POS } will return the full result set . If < code > orderByComparator < / code > is specified , then the query will include the given ORDER BY logic . If < code > orderByComparator < / code > is absent and pagination is required ( < code > start < / code > and < code > end < / code > are not { @ link QueryUtil # ALL _ POS } ) , then the query will include the default ORDER BY logic from { @ link CommerceDiscountModelImpl } . If both < code > orderByComparator < / code > and pagination are absent , for performance reasons , the query will not have an ORDER BY clause and the returned result set will be sorted on by the primary key in an ascending order .
* @ param expirationDate the expiration date
* @ param status the status
* @ param start the lower bound of the range of commerce discounts
* @ param end the upper bound of the range of commerce discounts ( not inclusive )
* @ param orderByComparator the comparator to order the results by ( optionally < code > null < / code > )
* @ return the ordered range of matching commerce discounts */
@ Override public List < CommerceDiscount > findByLtE_S ( Date expirationDate , int status , int start , int end , OrderByComparator < CommerceDiscount > orderByComparator ) { } }
|
return findByLtE_S ( expirationDate , status , start , end , orderByComparator , true ) ;
|
public class TelURLImpl { /** * { @ inheritDoc } */
public void setPhoneNumber ( String number , String phoneContext ) { } }
|
setPhoneNumber ( number ) ; try { telUrl . setPhoneContext ( phoneContext ) ; } catch ( ParseException ex ) { logger . error ( "Error setting phone number " + number ) ; throw new java . lang . IllegalArgumentException ( "phone number " + number + " is invalid" , ex ) ; }
|
public class PollCachingESRegistry { /** * Checks the ES store to see if the ' dataVersion ' entry has been updated with a newer
* version # . If it has , then we need to invalidate our cache . */
protected void checkCacheVersion ( ) { } }
|
// Be very aggressive in invalidating the cache .
boolean invalidate = true ; try { Get get = new Get . Builder ( getDefaultIndexName ( ) , "instance" ) . type ( "dataVersion" ) . build ( ) ; // $ NON - NLS - 1 $ / / $ NON - NLS - 2 $
JestResult result = getClient ( ) . execute ( get ) ; if ( result . isSucceeded ( ) ) { String latestDV = result . getJsonObject ( ) . get ( "_version" ) . getAsString ( ) ; // $ NON - NLS - 1 $
if ( latestDV != null && dataVersion != null && latestDV . equals ( dataVersion ) ) { invalidate = false ; } else { dataVersion = latestDV ; } } } catch ( IOException e ) { logger . warn ( "Elasticsearch is not available, using cache" ) ; invalidate = false ; } if ( invalidate ) { invalidateCache ( ) ; }
|
public class MethodServices { /** * Get the method on origin class without proxies
* @ param cls
* @ param methodName
* @ param parameterTypes
* @ throws NoSuchMethodException
* @ return */
public Method getNonProxiedMethod ( Class cls , String methodName , Class < ? > ... parameterTypes ) throws NoSuchMethodException { } }
|
return cls . getMethod ( methodName , parameterTypes ) ;
|
public class Strings { /** * Split string using given pairs separator and every pair using pair component separator . Return the list of pair
* instances , possible empty if string value is empty . Returns null if string value is null .
* Returned pair components are trimmed for spaces . This means that spaces around separators are eliminated ; this is
* true for both pairs and pair components separators . For example , " john : doe ; jane : doe ; " will return
* Pair ( " john " , " doe " ) , Pair ( " jane " , " doe " ) .
* Trailing pairs separator is optional .
* @ param string string value ,
* @ param pairsSeparator pairs separator ,
* @ param componentsSeparator pair components separator .
* @ return list of pairs , possible empty .
* @ throws BugError if a pair is not valid , that is , pair components separator is missing . */
public static List < Pair > splitPairs ( String string , char pairsSeparator , char componentsSeparator ) { } }
|
if ( string == null ) { return null ; } string = string . trim ( ) ; final int length = string . length ( ) ; final List < Pair > list = new ArrayList < Pair > ( ) ; int beginIndex = 0 ; int endIndex = 0 ; while ( endIndex < length ) { if ( string . charAt ( endIndex ) == pairsSeparator ) { if ( endIndex > beginIndex ) { list . add ( pair ( string . substring ( beginIndex , endIndex ) , componentsSeparator ) ) ; } beginIndex = ++ endIndex ; } ++ endIndex ; } if ( beginIndex < length ) { list . add ( pair ( string . substring ( beginIndex ) , componentsSeparator ) ) ; } return list ;
|
public class ConfigTreeBuilder { /** * Analyze configuration class structure to extract all classes in hierarchy with all custom
* interfaces ( ignoring , for example Serializable or something like this ) .
* @ param roots all collected types so far
* @ param type type to analyze
* @ return all collected types */
@ SuppressWarnings ( "unchecked" ) private static List < Class > resolveRootTypes ( final List < Class > roots , final Class type ) { } }
|
roots . add ( type ) ; if ( type == Configuration . class ) { return roots ; } for ( Class iface : type . getInterfaces ( ) ) { if ( isInStopPackage ( iface ) ) { continue ; } roots . add ( iface ) ; } return resolveRootTypes ( roots , type . getSuperclass ( ) ) ;
|
public class FileGetFromComputeNodeHeaders { /** * Set the time at which the resource was last modified .
* @ param lastModified the lastModified value to set
* @ return the FileGetFromComputeNodeHeaders object itself . */
public FileGetFromComputeNodeHeaders withLastModified ( DateTime lastModified ) { } }
|
if ( lastModified == null ) { this . lastModified = null ; } else { this . lastModified = new DateTimeRfc1123 ( lastModified ) ; } return this ;
|
public class KeyGenerator { /** * Generates a secret key .
* @ return the new key */
public final SecretKey generateKey ( ) { } }
|
if ( serviceIterator == null ) { return spi . engineGenerateKey ( ) ; } RuntimeException failure = null ; KeyGeneratorSpi mySpi = spi ; do { try { return mySpi . engineGenerateKey ( ) ; } catch ( RuntimeException e ) { if ( failure == null ) { failure = e ; } mySpi = nextSpi ( mySpi , true ) ; } } while ( mySpi != null ) ; throw failure ;
|
public class ParseLocalTime { /** * { @ inheritDoc } */
@ Override protected LocalTime parse ( final String string , final DateTimeFormatter formatter ) { } }
|
return LocalTime . parse ( string , formatter ) ;
|
public class MethodHandle { /** * Creates a method handle representation of the given method for an explicit special method invocation of an otherwise virtual method .
* @ param method The method ro represent .
* @ param type The type on which the method is to be invoked on as a special method invocation .
* @ return A method handle representing the given method as special method invocation . */
public static MethodHandle ofSpecial ( Method method , Class < ? > type ) { } }
|
return ofSpecial ( new MethodDescription . ForLoadedMethod ( method ) , TypeDescription . ForLoadedType . of ( type ) ) ;
|
public class MetricMonitorService { /** * Gets the monitor id for this metric
* @ param identity The metric identity
* @ return The monitor id ( optional )
* @ throws IOException
* @ throws HttpException */
private int getMetricInfo ( final MetricIdentity identity , final AppIdentity appIdentity ) throws IOException , HttpException { } }
|
Preconditions . checkNotNull ( identity ) ; Preconditions . checkNotNull ( appIdentity ) ; // build the json objects
JsonGetMetricInfoRequest . Builder requestBuilder = JsonGetMetricInfoRequest . newBuilder ( ) ; requestBuilder . category ( identity . getCategory ( ) ) ; requestBuilder . metricName ( identity . getName ( ) ) ; requestBuilder . deviceId ( appIdentity . getDeviceId ( ) ) ; requestBuilder . deviceAppId ( appIdentity . getDeviceAppId ( ) ) ; requestBuilder . appNameId ( appIdentity . getAppNameId ( ) ) ; requestBuilder . metricTypeId ( identity . getType ( ) . getId ( ) ) ; JsonGetMetricInfoRequest request = requestBuilder . build ( ) ; // convert to json bytes
byte [ ] jsonBytes = objectMapper . writer ( ) . writeValueAsBytes ( request ) ; if ( LOGGER . isDebugEnabled ( ) ) { LOGGER . debug ( "GetMetricInfo Request: {}" , new String ( jsonBytes , "UTF-8" ) ) ; } // post to stackify
HttpClient httpClient = new HttpClient ( apiConfig ) ; String responseString = httpClient . post ( "/Metrics/GetMetricInfo" , jsonBytes ) ; LOGGER . debug ( "GetMetricInfo Response: {}" , responseString ) ; // deserialize the response and return the monitor id
ObjectReader jsonReader = objectMapper . reader ( new TypeReference < JsonGetMetricInfoResponse > ( ) { } ) ; JsonGetMetricInfoResponse response = jsonReader . readValue ( responseString ) ; if ( response != null ) { return response . getMonitorId ( ) ; } return 0 ;
|
public class MapConfig { /** * Enable de - serialized value caching when evaluating predicates . It has no effect when { @ link InMemoryFormat }
* is { @ link InMemoryFormat # OBJECT } or when { @ link com . hazelcast . nio . serialization . Portable } serialization is used .
* @ param optimizeQueries { @ code true } if queries should be optimized , { @ code false } otherwise
* @ return this { @ code MapConfig } instance
* @ see CacheDeserializedValues
* @ deprecated use { @ link # setCacheDeserializedValues ( CacheDeserializedValues ) } instead */
public MapConfig setOptimizeQueries ( boolean optimizeQueries ) { } }
|
validateSetOptimizeQueriesOption ( optimizeQueries ) ; if ( optimizeQueries ) { this . cacheDeserializedValues = CacheDeserializedValues . ALWAYS ; } // this is used to remember the method has been called explicitly
this . optimizeQueryExplicitlyInvoked = true ; return this ;
|
public class CommerceNotificationTemplatePersistenceImpl { /** * Returns the commerce notification templates before and after the current commerce notification template in the ordered set where groupId = & # 63 ; and enabled = & # 63 ; .
* @ param commerceNotificationTemplateId the primary key of the current commerce notification template
* @ param groupId the group ID
* @ param enabled the enabled
* @ param orderByComparator the comparator to order the set by ( optionally < code > null < / code > )
* @ return the previous , current , and next commerce notification template
* @ throws NoSuchNotificationTemplateException if a commerce notification template with the primary key could not be found */
@ Override public CommerceNotificationTemplate [ ] findByG_E_PrevAndNext ( long commerceNotificationTemplateId , long groupId , boolean enabled , OrderByComparator < CommerceNotificationTemplate > orderByComparator ) throws NoSuchNotificationTemplateException { } }
|
CommerceNotificationTemplate commerceNotificationTemplate = findByPrimaryKey ( commerceNotificationTemplateId ) ; Session session = null ; try { session = openSession ( ) ; CommerceNotificationTemplate [ ] array = new CommerceNotificationTemplateImpl [ 3 ] ; array [ 0 ] = getByG_E_PrevAndNext ( session , commerceNotificationTemplate , groupId , enabled , orderByComparator , true ) ; array [ 1 ] = commerceNotificationTemplate ; array [ 2 ] = getByG_E_PrevAndNext ( session , commerceNotificationTemplate , groupId , enabled , orderByComparator , false ) ; return array ; } catch ( Exception e ) { throw processException ( e ) ; } finally { closeSession ( session ) ; }
|
public class CheckSignatureAdapter { /** * method signatures */
@ Override public SignatureVisitor visitParameterType ( ) { } }
|
if ( type != METHOD_SIGNATURE || ( state & ( EMPTY | FORMAL | BOUND | PARAM ) ) == 0 ) { throw new IllegalArgumentException ( ) ; } state = PARAM ; SignatureVisitor v = sv == null ? null : sv . visitParameterType ( ) ; return new CheckSignatureAdapter ( TYPE_SIGNATURE , v ) ;
|
public class A_CmsSerialDateValue { /** * Set dates where the event should not take place , even if they are part of the series .
* @ param dates dates to set . */
public final void setExceptions ( SortedSet < Date > dates ) { } }
|
m_exceptions . clear ( ) ; if ( null != dates ) { m_exceptions . addAll ( dates ) ; }
|
public class ImageImpl { /** * Notifies this image that its implementation bitmap failed to load .
* This can be called from any thread . */
public synchronized void fail ( Throwable error ) { } }
|
if ( pixelWidth == 0 ) pixelWidth = 50 ; if ( pixelHeight == 0 ) pixelHeight = 50 ; setBitmap ( createErrorBitmap ( pixelWidth , pixelHeight ) ) ; ( ( RPromise < Image > ) state ) . fail ( error ) ; // state is a deferred promise
|
public class CmsPreferences { /** * Gets the site select options . < p >
* @ return the site select options */
public SelectOptions getSiteSelectOptions ( ) { } }
|
return getSiteSelectOptionsStatic ( getCms ( ) , CmsWorkplace . getStartSiteRoot ( getCms ( ) , m_userSettings ) , getSettings ( ) . getUserSettings ( ) . getLocale ( ) ) ;
|
public class DynamicConfigManager { /** * < p > Method to dynamically refresh the configuration of all destinations
* currently known about on this messaging engine . This includes localised
* destinations , as if its only the destination definition thats changed ,
* the admin code does not notify message processor of an alter to the
* destination . ( It was envisaged that admin would do this , but they
* dont and we are in stop ship defect fixing mode , so we must do the
* best we can now < / p > */
public void refreshDestinations ( ) { } }
|
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . entry ( tc , "refreshDestinations" ) ; DestinationTypeFilter filter = new DestinationTypeFilter ( ) ; // this filter should only let through Active , CleanupPending and CleanupDefered
filter . VISIBLE = Boolean . TRUE ; filter . CORRUPT = Boolean . FALSE ; filter . RESET_ON_RESTART = Boolean . FALSE ; SIMPIterator itr = _destinationIndex . iterator ( filter ) ; while ( itr . hasNext ( ) ) { DestinationHandler destinationHandler = ( DestinationHandler ) itr . next ( ) ; try { // Don ' t attempt to reload system destinations or deleted or toBeDeleted destinations .
if ( ! ( destinationHandler . isSystem ( ) || destinationHandler . isTemporary ( ) || destinationHandler . isToBeDeleted ( ) || // these should be invisible anyway
destinationHandler . isDeleted ( ) ) ) // these should be invisible anyway
{ reloadDestinationFromAdmin ( destinationHandler ) ; } } catch ( Exception e ) { // If the reload of a destination fails , an FFDC is taken , then we continue
FFDCFilter . processException ( e , "com.ibm.ws.sib.processor.impl.DynamicConfigManager.refreshDestinations" , "1:149:1.32" , this ) ; SibTr . exception ( tc , e ) ; } } itr . finished ( ) ; // 535718
ForeignBusTypeFilter foreignFilter = new ForeignBusTypeFilter ( ) ; // this filter should only let through Active , CleanupPending and CleanupDefered
foreignFilter . VISIBLE = Boolean . TRUE ; foreignFilter . CORRUPT = Boolean . FALSE ; foreignFilter . RESET_ON_RESTART = Boolean . FALSE ; SIMPIterator foreignItr = _foreignBusIndex . iterator ( foreignFilter ) ; while ( foreignItr . hasNext ( ) ) { BusHandler busHandler = ( BusHandler ) foreignItr . next ( ) ; try { // Don ' t attempt to reload deleted or toBeDeleted destinations .
if ( ! ( busHandler . isToBeDeleted ( ) || // these should be invisible anyway
busHandler . isDeleted ( ) ) ) // these should be invisible anyway
{ reloadForeignBusFromAdmin ( busHandler ) ; } } catch ( Exception e ) { // If the reload of a destination fails , an FFDC is taken , then we continue
FFDCFilter . processException ( e , "com.ibm.ws.sib.processor.impl.DynamicConfigManager.refreshDestinations" , "1:184:1.32" , this ) ; SibTr . exception ( tc , e ) ; } } foreignItr . finished ( ) ; // 535718
if ( TraceComponent . isAnyTracingEnabled ( ) && tc . isEntryEnabled ( ) ) SibTr . exit ( tc , "refreshDestinations" ) ;
|
public class EncryptedPrivateKeyReader { /** * Reads from the given { @ link File } that contains the password protected { @ link KeyPair } and
* returns it
* @ param encryptedPrivateKeyFile
* the file that contains the password protected { @ link KeyPair }
* @ param password
* the password
* @ return the key pair
* @ throws FileNotFoundException
* is thrown if the file did not found
* @ throws IOException
* Signals that an I / O exception has occurred .
* @ throws PEMException
* is thrown if an error occurs on read the pem file */
public static KeyPair getKeyPair ( final File encryptedPrivateKeyFile , final String password ) throws FileNotFoundException , IOException , PEMException { } }
|
PEMParser pemParser = new PEMParser ( new FileReader ( encryptedPrivateKeyFile ) ) ; Object pemObject = pemParser . readObject ( ) ; pemParser . close ( ) ; JcaPEMKeyConverter keyConverter = new JcaPEMKeyConverter ( ) . setProvider ( SecurityProvider . BC . name ( ) ) ; KeyPair keyPair ; if ( pemObject instanceof PEMEncryptedKeyPair ) { PEMDecryptorProvider decryptorProvider = new JcePEMDecryptorProviderBuilder ( ) . setProvider ( SecurityProvider . BC . name ( ) ) . build ( password . toCharArray ( ) ) ; keyPair = keyConverter . getKeyPair ( ( ( PEMEncryptedKeyPair ) pemObject ) . decryptKeyPair ( decryptorProvider ) ) ; } else { keyPair = keyConverter . getKeyPair ( ( PEMKeyPair ) pemObject ) ; } return keyPair ;
|
public class ValidDBInstanceModificationsMessage { /** * Valid storage options for your DB instance .
* @ return Valid storage options for your DB instance . */
public java . util . List < ValidStorageOptions > getStorage ( ) { } }
|
if ( storage == null ) { storage = new com . amazonaws . internal . SdkInternalList < ValidStorageOptions > ( ) ; } return storage ;
|
public class SearchArtistsRequest { /** * Search for artists .
* @ return An { @ link Artist } paging .
* @ throws IOException In case of networking issues .
* @ throws SpotifyWebApiException The Web API returned an error further specified in this exception ' s root cause . */
@ SuppressWarnings ( "unchecked" ) public Paging < Artist > execute ( ) throws IOException , SpotifyWebApiException { } }
|
return new Artist . JsonUtil ( ) . createModelObjectPaging ( getJson ( ) , "artists" ) ;
|
public class Collectors { /** * Use occurrences to save the count of largest objects if { @ code areAllLargestSame = true } ( e . g . { @ code Number / String / . . . } ) and return a list by repeat the largest object { @ code n } times .
* @ param areAllLargestSame
* @ return
* @ see Collectors # maxAll ( Comparator , int , boolean ) */
@ SuppressWarnings ( "rawtypes" ) public static < T extends Comparable > Collector < T , ? , List < T > > maxAll ( final boolean areAllLargestSame ) { } }
|
return maxAll ( Integer . MAX_VALUE , areAllLargestSame ) ;
|
public class MessageDigest { /** * Returns a MessageDigest object that implements the specified digest
* algorithm .
* < p > This method traverses the list of registered security Providers ,
* starting with the most preferred Provider .
* A new MessageDigest object encapsulating the
* MessageDigestSpi implementation from the first
* Provider that supports the specified algorithm is returned .
* < p > Note that the list of registered providers may be retrieved via
* the { @ link Security # getProviders ( ) Security . getProviders ( ) } method .
* @ param algorithm the name of the algorithm requested .
* See the MessageDigest section in the < a href =
* " { @ docRoot } openjdk - redirect . html ? v = 8 & path = / technotes / guides / security / StandardNames . html # MessageDigest " >
* Java Cryptography Architecture Standard Algorithm Name Documentation < / a >
* for information about standard algorithm names .
* @ return a Message Digest object that implements the specified algorithm .
* @ exception NoSuchAlgorithmException if no Provider supports a
* MessageDigestSpi implementation for the
* specified algorithm .
* @ see Provider */
public static MessageDigest getInstance ( String algorithm ) throws NoSuchAlgorithmException { } }
|
try { MessageDigest md ; Object [ ] objs = Security . getImpl ( algorithm , "MessageDigest" , ( String ) null ) ; if ( objs [ 0 ] instanceof MessageDigest ) { md = ( MessageDigest ) objs [ 0 ] ; } else { md = new Delegate ( ( MessageDigestSpi ) objs [ 0 ] , algorithm ) ; } md . provider = ( Provider ) objs [ 1 ] ; /* Android - removed : this debugging mechanism is not available in Android .
if ( ! skipDebug & & pdebug ! = null ) {
pdebug . println ( " MessageDigest . " + algorithm +
" algorithm from : " + md . provider . getName ( ) ) ; */
return md ; } catch ( NoSuchProviderException e ) { throw new NoSuchAlgorithmException ( algorithm + " not found" ) ; }
|
public class DataBinder { /** * Puts a new string value into this binder , but using an alternative value if the given one is null . */
public String put ( String name , String value , String alternative ) { } }
|
return put ( name , value != null ? value : alternative ) ;
|
public class VulnerableSoftwareBuilder { /** * Adds a base CPE object to build a vulnerable software object from .
* @ param cpe the base CPE
* @ return a reference to the builder */
public VulnerableSoftwareBuilder cpe ( Cpe cpe ) { } }
|
this . part ( cpe . getPart ( ) ) . wfVendor ( cpe . getWellFormedVendor ( ) ) . wfProduct ( cpe . getWellFormedProduct ( ) ) . wfVersion ( cpe . getWellFormedVersion ( ) ) . wfUpdate ( cpe . getWellFormedUpdate ( ) ) . wfEdition ( cpe . getWellFormedEdition ( ) ) . wfLanguage ( cpe . getWellFormedLanguage ( ) ) . wfSwEdition ( cpe . getWellFormedSwEdition ( ) ) . wfTargetSw ( cpe . getWellFormedTargetSw ( ) ) . wfTargetHw ( cpe . getWellFormedTargetHw ( ) ) . wfOther ( cpe . getWellFormedOther ( ) ) ; return this ;
|
public class Model { /** * < p > withUpdater . < / p >
* @ param sql a { @ link java . lang . String } object .
* @ return a { @ link ameba . db . model . Updater } object .
* @ since 0.1.6e
* @ param < M > a M object . */
public static < M extends Model > Updater < M > withUpdater ( String sql ) { } }
|
return withUpdater ( DataSourceManager . getDefaultDataSourceName ( ) , sql ) ;
|
public class Stopwatch { /** * Stops the stopwatch . Future reads will return the fixed duration that had
* elapsed up to this point .
* @ return this { @ code Stopwatch } instance
* @ throws IllegalStateException if the stopwatch is already stopped . */
public Stopwatch stop ( ) { } }
|
long tick = System . nanoTime ( ) ; isRunning = false ; elapsedNanos += tick - startTick ; return this ;
|
public class NumberValidateRequestMarshaller { /** * Marshall the given parameter object . */
public void marshall ( NumberValidateRequest numberValidateRequest , ProtocolMarshaller protocolMarshaller ) { } }
|
if ( numberValidateRequest == null ) { throw new SdkClientException ( "Invalid argument passed to marshall(...)" ) ; } try { protocolMarshaller . marshall ( numberValidateRequest . getIsoCountryCode ( ) , ISOCOUNTRYCODE_BINDING ) ; protocolMarshaller . marshall ( numberValidateRequest . getPhoneNumber ( ) , PHONENUMBER_BINDING ) ; } catch ( Exception e ) { throw new SdkClientException ( "Unable to marshall request to JSON: " + e . getMessage ( ) , e ) ; }
|
public class KeyVaultClientBaseImpl { /** * Updates the policy for a certificate .
* Set specified members in the certificate policy . Leave others as null . This operation requires the certificates / update permission .
* @ param vaultBaseUrl The vault name , for example https : / / myvault . vault . azure . net .
* @ param certificateName The name of the certificate in the given vault .
* @ param certificatePolicy The policy for the certificate .
* @ throws IllegalArgumentException thrown if parameters fail the validation
* @ return the observable to the CertificatePolicy object */
public Observable < CertificatePolicy > updateCertificatePolicyAsync ( String vaultBaseUrl , String certificateName , CertificatePolicy certificatePolicy ) { } }
|
return updateCertificatePolicyWithServiceResponseAsync ( vaultBaseUrl , certificateName , certificatePolicy ) . map ( new Func1 < ServiceResponse < CertificatePolicy > , CertificatePolicy > ( ) { @ Override public CertificatePolicy call ( ServiceResponse < CertificatePolicy > response ) { return response . body ( ) ; } } ) ;
|
public class Pattern { /** * Appends a new group pattern to the existing one . The new pattern enforces strict
* temporal contiguity . This means that the whole pattern sequence matches only
* if an event which matches this pattern directly follows the preceding matching
* event . Thus , there cannot be any events in between two matching events .
* @ param group the pattern to append
* @ return A new pattern which is appended to this one */
public GroupPattern < T , F > next ( Pattern < T , F > group ) { } }
|
return new GroupPattern < > ( this , group , ConsumingStrategy . STRICT , afterMatchSkipStrategy ) ;
|
public class OQueryDataProvider { /** * Set value for named parameter
* @ param paramName name of the parameter to set
* @ param value { @ link IModel } for the parameter value
* @ return this { @ link OQueryDataProvider } */
public OQueryDataProvider < K > setParameter ( String paramName , IModel < ? > value ) { } }
|
model . setParameter ( paramName , value ) ; return this ;
|
public class JobChange { /** * Processes the specified JobChange data file obtained from Webphone . Each line is
* read and processed and any fallout is written to the specified fallout file .
* If fallout file already exists it is deleted and a new one is created . A
* comparison of the supervisor id in the job data file is done against the one returned
* by the authz service and if the supervisor Id has changed then the record is updated
* using the authz service . An email is sent to the new supervisor to approve the roles
* assigned to the user .
* @ param fileName - name of the file to process including its path
* @ param falloutFileName - the file where the fallout entries have to be written
* @ param validDate - the valid effective date when the user had moved to the new supervisor
* @ throws Exception */
public void processJobChangeDataFile ( String fileName , String falloutFileName , Date validDate ) throws Exception { } }
|
BufferedWriter writer = null ; try { env . info ( ) . log ( "Reading file: " + fileName ) ; FileInputStream fstream = new FileInputStream ( fileName ) ; BufferedReader br = new BufferedReader ( new InputStreamReader ( fstream ) ) ; String strLine ; while ( ( strLine = br . readLine ( ) ) != null ) { processLine ( strLine , writer ) ; } br . close ( ) ; } catch ( IOException e ) { env . error ( ) . log ( "Error while reading from the input data file: " + e ) ; throw e ; }
|
public class ClassSelector { /** * Exclude properties from the result map . */
public ClassSelector except ( String ... propertyNames ) { } }
|
if ( ArrayUtils . isEmpty ( propertyNames ) ) { return this ; } boolean updated = false ; for ( String propertyToRemove : propertyNames ) { if ( removePropertySelector ( propertyToRemove ) ) { updated = true ; } } if ( updated ) { compositeSelector = new CompositeSelector ( this . propertySelectors ) ; } return this ;
|
public class EMRepository { /** * Retrieves EM
* @ param sessionToken
* @ return */
public EntityManager getEM ( String sessionToken ) { } }
|
if ( emMap == null ) { return null ; } else { return emMap . get ( sessionToken ) ; }
|
public class SearchBuilder { /** * Figures out the tolerance for a search . For example , if the user is searching for < code > 4.00 < / code > , this method
* returns < code > 0.005 < / code > because we shold actually match values which are
* < code > 4 ( + / - ) 0.005 < / code > according to the FHIR specs . */
static BigDecimal calculateFuzzAmount ( ParamPrefixEnum cmpValue , BigDecimal theValue ) { } }
|
if ( cmpValue == ParamPrefixEnum . APPROXIMATE ) { return theValue . multiply ( new BigDecimal ( 0.1 ) ) ; } else { String plainString = theValue . toPlainString ( ) ; int dotIdx = plainString . indexOf ( '.' ) ; if ( dotIdx == - 1 ) { return new BigDecimal ( 0.5 ) ; } int precision = plainString . length ( ) - ( dotIdx ) ; double mul = Math . pow ( 10 , - precision ) ; double val = mul * 5.0d ; return new BigDecimal ( val ) ; }
|
public class N { /** * Distinct by the value mapped from < code > keyMapper < / code > .
* Mostly it ' s designed for one - step operation to complete the operation in one step .
* < code > java . util . stream . Stream < / code > is preferred for multiple phases operation .
* @ param a
* @ param keyMapper don ' t change value of the input parameter .
* @ return */
public static < T , E extends Exception > List < T > distinctBy ( final T [ ] a , final Try . Function < ? super T , ? , E > keyMapper ) throws E { } }
|
if ( N . isNullOrEmpty ( a ) ) { return new ArrayList < > ( ) ; } return distinctBy ( a , 0 , a . length , keyMapper ) ;
|
public class Contracts { /** * check if a string is not empty .
* @ param pstring string to check
* @ param pmessage error message to throw if test fails */
public static void assertNotEmpty ( final String pstring , final String pmessage ) { } }
|
if ( pstring == null || pstring . length ( ) == 0 ) { throw new IllegalArgumentException ( pmessage ) ; }
|
public class ContextItems { /** * Saves a date item object as a context item .
* @ param itemName Item name
* @ param date Date value */
public void setDate ( String itemName , Date date ) { } }
|
if ( date == null ) { setItem ( itemName , null ) ; } else { setItem ( itemName , DateUtil . toHL7 ( date ) ) ; }
|
public class ClassDescriptor { /** * return the FieldDescriptor for the Attribute referenced in the path < br >
* the path may contain simple attribut names , functions and path expressions
* using relationships < br >
* ie : name , avg ( price ) , adress . street
* @ param aPath the path to the attribute
* @ param pathHints a Map containing the class to be used for a segment or < em > null < / em >
* if no segment was used .
* @ return the FieldDescriptor or null ( ie : for m : n queries ) */
public FieldDescriptor getFieldDescriptorForPath ( String aPath , Map pathHints ) { } }
|
ArrayList desc = getAttributeDescriptorsForPath ( aPath , pathHints ) ; FieldDescriptor fld = null ; Object temp ; if ( ! desc . isEmpty ( ) ) { temp = desc . get ( desc . size ( ) - 1 ) ; if ( temp instanceof FieldDescriptor ) { fld = ( FieldDescriptor ) temp ; } } return fld ;
|
public class Postcard { /** * Set object value , the value will be convert to string by ' Fastjson '
* @ param key a String , or null
* @ param value a Object , or null
* @ return current */
public Postcard withObject ( @ Nullable String key , @ Nullable Object value ) { } }
|
serializationService = ARouter . getInstance ( ) . navigation ( SerializationService . class ) ; mBundle . putString ( key , serializationService . object2Json ( value ) ) ; return this ;
|
public class RingbufferContainer { /** * Convert the supplied argument into serialized format .
* @ throws HazelcastSerializationException when serialization fails . */
private Data [ ] convertToData ( T [ ] items ) { } }
|
if ( items == null || items . length == 0 ) { return new Data [ 0 ] ; } if ( items [ 0 ] instanceof Data ) { return ( Data [ ] ) items ; } final Data [ ] ret = new Data [ items . length ] ; for ( int i = 0 ; i < items . length ; i ++ ) { ret [ i ] = convertToData ( items [ i ] ) ; } return ret ;
|
public class ValidationSession { /** * Add a setter listener to a field .
* @ param object
* @ param name
* @ param listener */
public void addListener ( ValidationObject object , String name , SetterListener listener ) { } }
|
m_validationEngine . addListener ( object , name , this , listener ) ;
|
public class AbstractProgramController { /** * Force this controller into error state .
* @ param t The */
protected final < V > void error ( Throwable t , SettableFuture < V > future ) { } }
|
state . set ( State . ERROR ) ; if ( future != null ) { future . setException ( t ) ; } caller . error ( t ) ;
|
public class JdbcQueue { /** * Queue a message , retry if deadlock .
* Note : http : / / dev . mysql . com / doc / refman / 5.0 / en / innodb - deadlocks . html
* InnoDB uses automatic row - level locking . You can get deadlocks even in
* the case of transactions that just insert or delete a single row . That is
* because these operations are not really " atomic " ; they automatically set
* locks on the ( possibly several ) index records of the row inserted or
* deleted .
* Note : the supplied queue message is mutable .
* @ param conn
* @ param msg
* @ param numRetries
* @ param maxRetries
* @ return */
protected boolean _queueWithRetries ( Connection conn , IQueueMessage < ID , DATA > msg , int numRetries , int maxRetries ) { } }
|
try { Date now = new Date ( ) ; msg . setNumRequeues ( 0 ) . setQueueTimestamp ( now ) . setTimestamp ( now ) ; return putToQueueStorage ( conn , msg ) ; } catch ( DuplicatedValueException dve ) { LOGGER . warn ( dve . getMessage ( ) , dve ) ; return true ; } catch ( DaoException de ) { if ( de . getCause ( ) instanceof DuplicateKeyException ) { LOGGER . warn ( de . getMessage ( ) , de ) ; return true ; } if ( de . getCause ( ) instanceof ConcurrencyFailureException ) { if ( numRetries > maxRetries ) { throw new QueueException ( de ) ; } else { incRetryCounter ( "_queueWithRetries" ) ; return _queueWithRetries ( conn , msg , numRetries + 1 , maxRetries ) ; } } throw de ; } catch ( Exception e ) { throw e instanceof QueueException ? ( QueueException ) e : new QueueException ( e ) ; }
|
public class ExtendedTetrahedral { /** * Helper method to locate two terminal atoms in a container for a given
* focus .
* @ param container structure representation
* @ param focus cumulated atom
* @ return the terminal atoms ( unordered ) */
public static IAtom [ ] findTerminalAtoms ( IAtomContainer container , IAtom focus ) { } }
|
List < IBond > focusBonds = container . getConnectedBondsList ( focus ) ; if ( focusBonds . size ( ) != 2 ) throw new IllegalArgumentException ( "focus must have exactly 2 neighbors" ) ; IAtom leftPrev = focus ; IAtom rightPrev = focus ; IAtom left = focusBonds . get ( 0 ) . getOther ( focus ) ; IAtom right = focusBonds . get ( 1 ) . getOther ( focus ) ; IAtom tmp ; while ( left != null && right != null ) { tmp = getOtherNbr ( container , left , leftPrev ) ; leftPrev = left ; left = tmp ; tmp = getOtherNbr ( container , right , rightPrev ) ; rightPrev = right ; right = tmp ; } return new IAtom [ ] { leftPrev , rightPrev } ;
|
public class AbstractConnection { /** * Send an exit signal to a remote process .
* @ param dest
* the Erlang PID of the remote process .
* @ param reason
* an Erlang term describing the exit reason .
* @ exception java . io . IOException
* if the connection is not active or a communication error
* occurs . */
protected void sendExit2 ( final OtpErlangPid from , final OtpErlangPid dest , final OtpErlangObject reason ) throws IOException { } }
|
sendExit ( exit2Tag , from , dest , reason ) ;
|
public class SchemaAnalyzer { /** * - - - - - private static methods - - - - - */
private static String getCombinedType ( final String startNodeType , final String relationshipType , final String endNodeType ) { } }
|
return startNodeType . concat ( relationshipType ) . concat ( endNodeType ) ;
|
public class InstallUtil { /** * Save a specific version as the last execute version .
* @ param version The version to save . */
static void saveLastExecVersion ( @ Nonnull String version ) { } }
|
File lastExecVersionFile = getLastExecVersionFile ( ) ; try { FileUtils . write ( lastExecVersionFile , version ) ; } catch ( IOException e ) { LOGGER . log ( SEVERE , "Failed to save " + lastExecVersionFile . getAbsolutePath ( ) , e ) ; }
|
public class ListStringCreativeTemplateVariable { /** * Sets the choices value for this ListStringCreativeTemplateVariable .
* @ param choices * The values within the list users need to select from . */
public void setChoices ( com . google . api . ads . admanager . axis . v201808 . ListStringCreativeTemplateVariableVariableChoice [ ] choices ) { } }
|
this . choices = choices ;
|
public class DiSH { /** * Computes the hierarchical clusters according to the cluster order .
* @ param database the database holding the objects
* @ param clusterOrder the cluster order */
private Clustering < SubspaceModel > computeClusters ( Relation < V > database , DiSHClusterOrder clusterOrder ) { } }
|
final int dimensionality = RelationUtil . dimensionality ( database ) ; // extract clusters
Object2ObjectOpenCustomHashMap < long [ ] , List < ArrayModifiableDBIDs > > clustersMap = extractClusters ( database , clusterOrder ) ; logClusterSizes ( "Step 1: extract clusters" , dimensionality , clustersMap ) ; // check if there are clusters < minpts
checkClusters ( database , clustersMap ) ; logClusterSizes ( "Step 2: check clusters" , dimensionality , clustersMap ) ; // sort the clusters
List < Cluster < SubspaceModel > > clusters = sortClusters ( database , clustersMap ) ; if ( LOG . isVerbose ( ) ) { StringBuilder msg = new StringBuilder ( "Step 3: sort clusters" ) ; for ( Cluster < SubspaceModel > c : clusters ) { msg . append ( '\n' ) . append ( BitsUtil . toStringLow ( c . getModel ( ) . getSubspace ( ) . getDimensions ( ) , dimensionality ) ) . append ( " ids " ) . append ( c . size ( ) ) ; } LOG . verbose ( msg . toString ( ) ) ; } // build the hierarchy
Clustering < SubspaceModel > clustering = new Clustering < > ( "DiSH clustering" , "dish-clustering" ) ; buildHierarchy ( database , clustering , clusters , dimensionality ) ; if ( LOG . isVerbose ( ) ) { StringBuilder msg = new StringBuilder ( "Step 4: build hierarchy" ) ; for ( Cluster < SubspaceModel > c : clusters ) { msg . append ( '\n' ) . append ( BitsUtil . toStringLow ( c . getModel ( ) . getSubspace ( ) . getDimensions ( ) , dimensionality ) ) . append ( " ids " ) . append ( c . size ( ) ) ; for ( It < Cluster < SubspaceModel > > iter = clustering . getClusterHierarchy ( ) . iterParents ( c ) ; iter . valid ( ) ; iter . advance ( ) ) { msg . append ( "\n parent " ) . append ( iter . get ( ) ) ; } for ( It < Cluster < SubspaceModel > > iter = clustering . getClusterHierarchy ( ) . iterChildren ( c ) ; iter . valid ( ) ; iter . advance ( ) ) { msg . append ( "\n child " ) . append ( iter . get ( ) ) ; } } LOG . verbose ( msg . toString ( ) ) ; } // build result
for ( Cluster < SubspaceModel > c : clusters ) { if ( clustering . getClusterHierarchy ( ) . numParents ( c ) == 0 ) { clustering . addToplevelCluster ( c ) ; } } return clustering ;
|
public class VodClient { /** * get media statistic info .
* The caller < i > must < / i > authenticate with a valid BCE Access Key / Private Key pair .
* @ param startTime , query media start time , default : 2016-04-30T16:00:00Z
* @ param endTime , query media end time , default : now
* @ param aggregate , if need aggregate , default : true
* @ return The media statistic info */
public GetMediaStatisticResponse getMediaStatistic ( String mediaId , Date startTime , Date endTime , boolean aggregate ) { } }
|
GetMediaStatisticRequest request = new GetMediaStatisticRequest ( ) . withMediaId ( mediaId ) . withStartTime ( startTime ) . withEndTime ( endTime ) . withAggregate ( aggregate ) ; return getMediaStatistic ( request ) ;
|
public class Util { /** * Unite two sorted lists and write the result to the provided output array
* @ param set1 first array
* @ param offset1 offset of first array
* @ param length1 length of first array
* @ param set2 second array
* @ param offset2 offset of second array
* @ param length2 length of second array
* @ param buffer output array
* @ return cardinality of the union */
public static int unsignedUnion2by2 ( final short [ ] set1 , final int offset1 , final int length1 , final short [ ] set2 , final int offset2 , final int length2 , final short [ ] buffer ) { } }
|
if ( 0 == length2 ) { System . arraycopy ( set1 , offset1 , buffer , 0 , length1 ) ; return length1 ; } if ( 0 == length1 ) { System . arraycopy ( set2 , offset2 , buffer , 0 , length2 ) ; return length2 ; } int pos = 0 ; int k1 = offset1 , k2 = offset2 ; short s1 = set1 [ k1 ] ; short s2 = set2 [ k2 ] ; while ( true ) { int v1 = toIntUnsigned ( s1 ) ; int v2 = toIntUnsigned ( s2 ) ; if ( v1 < v2 ) { buffer [ pos ++ ] = s1 ; ++ k1 ; if ( k1 >= length1 + offset1 ) { System . arraycopy ( set2 , k2 , buffer , pos , length2 - k2 + offset2 ) ; return pos + length2 - k2 + offset2 ; } s1 = set1 [ k1 ] ; } else if ( v1 == v2 ) { buffer [ pos ++ ] = s1 ; ++ k1 ; ++ k2 ; if ( k1 >= length1 + offset1 ) { System . arraycopy ( set2 , k2 , buffer , pos , length2 - k2 + offset2 ) ; return pos + length2 - k2 + offset2 ; } if ( k2 >= length2 + offset2 ) { System . arraycopy ( set1 , k1 , buffer , pos , length1 - k1 + offset1 ) ; return pos + length1 - k1 + offset1 ; } s1 = set1 [ k1 ] ; s2 = set2 [ k2 ] ; } else { // if ( set1 [ k1 ] > set2 [ k2 ] )
buffer [ pos ++ ] = s2 ; ++ k2 ; if ( k2 >= length2 + offset2 ) { System . arraycopy ( set1 , k1 , buffer , pos , length1 - k1 + offset1 ) ; return pos + length1 - k1 + offset1 ; } s2 = set2 [ k2 ] ; } } // return pos ;
|
public class PipelineTimeline { private void notifyListeners ( List < PipelineTimelineEntry > newEntries ) { } }
|
Map < CaseInsensitiveString , PipelineTimelineEntry > pipelineToOldestEntry = new HashMap < > ( ) ; for ( PipelineTimelineEntry challenger : newEntries ) { CaseInsensitiveString pipelineName = new CaseInsensitiveString ( challenger . getPipelineName ( ) ) ; PipelineTimelineEntry champion = pipelineToOldestEntry . get ( pipelineName ) ; if ( champion == null || challenger . compareTo ( champion ) < 0 ) { pipelineToOldestEntry . put ( pipelineName , challenger ) ; } } for ( TimelineUpdateListener listener : listeners ) { for ( Map . Entry < CaseInsensitiveString , PipelineTimelineEntry > entry : pipelineToOldestEntry . entrySet ( ) ) { try { listener . added ( entry . getValue ( ) , naturalOrderPmm . get ( entry . getKey ( ) ) ) ; } catch ( Exception e ) { LOGGER . warn ( "Ignoring exception when notifying listener: {}" , listener , e ) ; } } }
|
public class URLEncodedUtils { /** * Adds all parameters within the Scanner to the list of
* < code > parameters < / code > , as encoded by < code > encoding < / code > . For
* example , a scanner containing the string < code > a = 1 & b = 2 & c = 3 < / code > would
* add the { @ link NameValuePair NameValuePairs } a = 1 , b = 2 , and c = 3 to the
* list of parameters .
* @ param parameters
* List to add parameters to .
* @ param scanner
* Input that contains the parameters to parse .
* @ param charset
* Encoding to use when decoding the parameters . */
public static void parse ( final List < NameValuePair > parameters , final Scanner scanner , final String charset ) { } }
|
scanner . useDelimiter ( PARAMETER_SEPARATOR ) ; while ( scanner . hasNext ( ) ) { String name = null ; String value = null ; String token = scanner . next ( ) ; int i = token . indexOf ( NAME_VALUE_SEPARATOR ) ; if ( i != - 1 ) { name = decodeFormFields ( token . substring ( 0 , i ) . trim ( ) , charset ) ; value = decodeFormFields ( token . substring ( i + 1 ) . trim ( ) , charset ) ; } else { name = decodeFormFields ( token . trim ( ) , charset ) ; } parameters . add ( new BasicNameValuePair ( name , value ) ) ; }
|
public class MolecularFormulaManipulator { /** * Returns a set of nodes excluding all the hydrogens .
* @ param formula The IMolecularFormula
* @ return The heavyElements value into a List
* @ cdk . keyword hydrogen , removal */
public static List < IElement > getHeavyElements ( IMolecularFormula formula ) { } }
|
List < IElement > newEle = new ArrayList < IElement > ( ) ; for ( IElement element : elements ( formula ) ) { if ( ! element . getSymbol ( ) . equals ( "H" ) ) { newEle . add ( element ) ; } } return newEle ;
|
public class DefaultGroovyMethods { /** * Provides an easy way to append multiple Map . Entry values to a Map .
* @ param self a Map
* @ param entries a Collection of Map . Entry items to be added to the Map .
* @ return the same map , after the items have been added to it .
* @ since 1.6.1 */
public static < K , V > Map < K , V > putAll ( Map < K , V > self , Collection < ? extends Map . Entry < ? extends K , ? extends V > > entries ) { } }
|
for ( Map . Entry < ? extends K , ? extends V > entry : entries ) { self . put ( entry . getKey ( ) , entry . getValue ( ) ) ; } return self ;
|
public class AmazonRekognitionClient { /** * Starts asynchronous detection of labels in a stored video .
* Amazon Rekognition Video can detect labels in a video . Labels are instances of real - world entities . This includes
* objects like flower , tree , and table ; events like wedding , graduation , and birthday party ; concepts like
* landscape , evening , and nature ; and activities like a person getting out of a car or a person skiing .
* The video must be stored in an Amazon S3 bucket . Use < a > Video < / a > to specify the bucket name and the filename of
* the video . < code > StartLabelDetection < / code > returns a job identifier ( < code > JobId < / code > ) which you use to get
* the results of the operation . When label detection is finished , Amazon Rekognition Video publishes a completion
* status to the Amazon Simple Notification Service topic that you specify in < code > NotificationChannel < / code > .
* To get the results of the label detection operation , first check that the status value published to the Amazon
* SNS topic is < code > SUCCEEDED < / code > . If so , call < a > GetLabelDetection < / a > and pass the job identifier (
* < code > JobId < / code > ) from the initial call to < code > StartLabelDetection < / code > .
* @ param startLabelDetectionRequest
* @ return Result of the StartLabelDetection operation returned by the service .
* @ throws AccessDeniedException
* You are not authorized to perform the action .
* @ throws IdempotentParameterMismatchException
* A < code > ClientRequestToken < / code > input parameter was reused with an operation , but at least one of the
* other input parameters is different from the previous call to the operation .
* @ throws InvalidParameterException
* Input parameter violated a constraint . Validate your parameter before calling the API operation again .
* @ throws InvalidS3ObjectException
* Amazon Rekognition is unable to access the S3 object specified in the request .
* @ throws InternalServerErrorException
* Amazon Rekognition experienced a service issue . Try your call again .
* @ throws VideoTooLargeException
* The file size or duration of the supplied media is too large . The maximum file size is 8GB . The maximum
* duration is 2 hours .
* @ throws ProvisionedThroughputExceededException
* The number of requests exceeded your throughput limit . If you want to increase this limit , contact Amazon
* Rekognition .
* @ throws LimitExceededException
* An Amazon Rekognition service limit was exceeded . For example , if you start too many Amazon Rekognition
* Video jobs concurrently , calls to start operations ( < code > StartLabelDetection < / code > , for example ) will
* raise a < code > LimitExceededException < / code > exception ( HTTP status code : 400 ) until the number of
* concurrently running jobs is below the Amazon Rekognition service limit .
* @ throws ThrottlingException
* Amazon Rekognition is temporarily unable to process the request . Try your call again .
* @ sample AmazonRekognition . StartLabelDetection */
@ Override public StartLabelDetectionResult startLabelDetection ( StartLabelDetectionRequest request ) { } }
|
request = beforeClientExecution ( request ) ; return executeStartLabelDetection ( request ) ;
|
public class SharedFileHandler { /** * DoNewRecord Method . */
public void doNewRecord ( boolean bDisplayOption ) { } }
|
super . doNewRecord ( bDisplayOption ) ; BaseField fldTarget = null ; if ( typeFieldName != null ) fldTarget = this . getOwner ( ) . getField ( typeFieldName ) ; else fldTarget = this . getOwner ( ) . getField ( m_iTypeField ) ; boolean [ ] rgbEnabled = fldTarget . setEnableListeners ( false ) ; InitOnceFieldHandler listener = ( InitOnceFieldHandler ) fldTarget . getListener ( InitOnceFieldHandler . class . getName ( ) ) ; if ( listener != null ) listener . setFirstTime ( true ) ; // Special case - you shouldn ' t have put this listener here , but since you did . . .
fldTarget . setValue ( m_iTargetValue , DBConstants . DISPLAY , DBConstants . INIT_MOVE ) ; fldTarget . setModified ( false ) ; fldTarget . setEnableListeners ( rgbEnabled ) ;
|
public class ExternalMergeSort { /** * Performs an external merge on the values in the iterator .
* @ param values Iterator containing the data to sort .
* @ return an iterator the iterates over the sorted result .
* @ throws IOException if something fails when doing I / O . */
public CloseableIterator < T > mergeSort ( Iterator < T > values ) throws IOException { } }
|
ChunkSizeIterator < T > csi = new ChunkSizeIterator < T > ( values , config . chunkSize ) ; if ( csi . isMultipleChunks ( ) ) { List < File > sortedChunks = writeSortedChunks ( csi ) ; return mergeSortedChunks ( sortedChunks ) ; } else { if ( config . distinct ) { SortedSet < T > list = new TreeSet < T > ( comparator ) ; while ( csi . hasNext ( ) ) { list . add ( csi . next ( ) ) ; } return new DelegatingMergeIterator < T > ( list . iterator ( ) ) ; } else { List < T > list = new ArrayList < T > ( csi . getHeadSize ( ) ) ; while ( csi . hasNext ( ) ) { list . add ( csi . next ( ) ) ; } Collections . sort ( list , comparator ) ; return new DelegatingMergeIterator < T > ( list . iterator ( ) ) ; } }
|
public class AES { /** * 解密 */
public static byte [ ] decrypt ( byte [ ] data , byte [ ] key ) throws Exception { } }
|
// 恢复密钥生成器
SecretKey secretKey = new SecretKeySpec ( key , "AES" ) ; // Cipher完成解密
Cipher cipher = Cipher . getInstance ( "AES" ) ; // 根据密钥对cipher进行初始化
cipher . init ( Cipher . DECRYPT_MODE , secretKey ) ; byte [ ] plain = cipher . doFinal ( data ) ; return plain ;
|
public class H2DbConfig { /** * H2 console .
* @ return / console / * */
@ Bean public ServletRegistrationBean h2servletRegistration ( ) { } }
|
ServletRegistrationBean servletRegistrationBean = new ServletRegistrationBean ( new WebServlet ( ) ) ; servletRegistrationBean . addUrlMappings ( "/h2/*" ) ; return servletRegistrationBean ;
|
public class DefaultDiskResourceService { /** * { @ inheritDoc } */
@ Override public StateRepository getStateRepositoryWithin ( PersistenceSpaceIdentifier < ? > identifier , String name ) throws CachePersistenceException { } }
|
PersistenceSpace persistenceSpace = getPersistenceSpace ( identifier ) ; if ( persistenceSpace == null ) { throw newCachePersistenceException ( identifier ) ; } FileBasedStateRepository stateRepository = new FileBasedStateRepository ( FileUtils . createSubDirectory ( persistenceSpace . identifier . persistentSpaceId . getRoot ( ) , name ) ) ; FileBasedStateRepository previous = persistenceSpace . stateRepositories . putIfAbsent ( name , stateRepository ) ; if ( previous != null ) { return previous ; } return stateRepository ;
|
public class SQLiteExecutor { /** * Insert one record into database .
* To exclude the some properties or default value , invoke { @ code com . landawn . abacus . util . N # entity2Map ( Object , boolean , Collection , NamingPolicy ) }
* < p > The target table is identified by the simple class name of the specified entity . < / p >
* @ param entity with getter / setter methods
* @ return
* @ see com . landawn . abacus . util . Maps # entity2Map ( Object , boolean , Collection , NamingPolicy ) */
public long insert ( Object entity ) { } }
|
if ( ! N . isEntity ( entity . getClass ( ) ) ) { throw new IllegalArgumentException ( "The specified parameter must be an entity with getter/setter methods" ) ; } return insert ( getTableNameByEntity ( entity ) , entity ) ;
|
public class Producer { /** * Shuts down the producer . */
public void shutdown ( ) { } }
|
if ( _producer != null ) { _producer . close ( ) ; } _executorService . shutdown ( ) ; try { if ( ! _executorService . awaitTermination ( 10 , TimeUnit . SECONDS ) ) { _logger . warn ( "Shutdown of Kafka executor service timed out after 10 seconds." ) ; _executorService . shutdownNow ( ) ; } } catch ( InterruptedException ex ) { _logger . warn ( "Shutdown of executor service was interrupted." ) ; Thread . currentThread ( ) . interrupt ( ) ; }
|
public class MethodToMarshaller { /** * / * @ Override */
public T marshal ( S object ) { } }
|
try { final T result = ( T ) marshalHandle . invoke ( object ) ; return result ; } catch ( Throwable ex ) { if ( ex . getCause ( ) instanceof RuntimeException ) { throw ( RuntimeException ) ex . getCause ( ) ; } throw new BindingException ( ex . getMessage ( ) , ex . getCause ( ) ) ; }
|
public class AdalDeviceCodeAuthorizationGrant { /** * Converts the device code grant to a map of HTTP paramters .
* @ return The map with HTTP parameters . */
@ Override public Map < String , List < String > > toParameters ( ) { } }
|
final Map < String , List < String > > outParams = new LinkedHashMap < > ( ) ; outParams . put ( "resource" , Collections . singletonList ( resource ) ) ; outParams . put ( "grant_type" , Collections . singletonList ( GRANT_TYPE ) ) ; outParams . put ( "code" , Collections . singletonList ( deviceCode . getDeviceCode ( ) ) ) ; return outParams ;
|
public class JavaType { /** * Note a gosu class can be BOTH parameterzied AND generic . For example ,
* class Bar < T > {
* function blah ( ) : T { . . . }
* class Foo < T extends CharSequence > extends Bar < T > { }
* The class Bar < T > here is parameterized by the type var from Foo , yet it is
* still a generic class . The blah ( ) method in Foo ' s typeinfo must have a
* return type consistent with Foo ' s type var upper bound , CharSequence .
* / / # # todo : maybe we don ' t need this concept any longer ? i . e . , parameterization should work correctly regardless .
* @ param typeParams type parameters
* @ return generic type variables */
private GenericTypeVariable [ ] assignTypeVarsFromTypeParams ( IType [ ] typeParams ) { } }
|
List < GenericTypeVariable > genTypeVars = new ArrayList < GenericTypeVariable > ( ) ; for ( IType typeParam : typeParams ) { if ( typeParam instanceof TypeVariableType ) { genTypeVars . add ( ( GenericTypeVariable ) ( ( TypeVariableType ) typeParam ) . getTypeVarDef ( ) . getTypeVar ( ) ) ; } } return genTypeVars . toArray ( new GenericTypeVariable [ genTypeVars . size ( ) ] ) ;
|
public class AuditUtils { /** * Writes the data object as a JSON string .
* @ param data */
private static String toJSON ( Object data ) { } }
|
try { return mapper . writeValueAsString ( data ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; }
|
public class AbstractExpressionGenerator { /** * Compute the simple name for the called feature .
* @ param featureCall the feature call .
* @ param logicalContainerProvider the provider of logicial container .
* @ param featureNameProvider the provider of feature name .
* @ param nullKeyword the null - equivalent keyword .
* @ param thisKeyword the this - equivalent keyword .
* @ param superKeyword the super - equivalent keyword .
* @ param referenceNameLambda replies the reference name or { @ code null } if none .
* @ return the simple name . */
public static String getCallSimpleName ( XAbstractFeatureCall featureCall , ILogicalContainerProvider logicalContainerProvider , IdentifiableSimpleNameProvider featureNameProvider , Function0 < ? extends String > nullKeyword , Function0 < ? extends String > thisKeyword , Function0 < ? extends String > superKeyword , Function1 < ? super JvmIdentifiableElement , ? extends String > referenceNameLambda ) { } }
|
String name = null ; final JvmIdentifiableElement calledFeature = featureCall . getFeature ( ) ; if ( calledFeature instanceof JvmConstructor ) { final JvmDeclaredType constructorContainer = ( ( JvmConstructor ) calledFeature ) . getDeclaringType ( ) ; final JvmIdentifiableElement logicalContainer = logicalContainerProvider . getNearestLogicalContainer ( featureCall ) ; final JvmDeclaredType contextType = ( ( JvmMember ) logicalContainer ) . getDeclaringType ( ) ; if ( contextType == constructorContainer ) { name = thisKeyword . apply ( ) ; } else { name = superKeyword . apply ( ) ; } } else if ( calledFeature != null ) { final String referenceName = referenceNameLambda . apply ( calledFeature ) ; if ( referenceName != null ) { name = referenceName ; } else if ( calledFeature instanceof JvmOperation ) { name = featureNameProvider . getSimpleName ( calledFeature ) ; } else { name = featureCall . getConcreteSyntaxFeatureName ( ) ; } } if ( name == null ) { return nullKeyword . apply ( ) ; } return name ;
|
public class AnimaQuery { /** * Build a update statement .
* @ param model model instance
* @ param updateColumns update columns
* @ param < S >
* @ return update sql */
private < S extends Model > String buildUpdateSQL ( S model , Map < String , Object > updateColumns ) { } }
|
SQLParams sqlParams = SQLParams . builder ( ) . model ( model ) . modelClass ( this . modelClass ) . tableName ( this . tableName ) . pkName ( this . primaryKeyColumn ) . updateColumns ( updateColumns ) . conditionSQL ( this . conditionSQL ) . build ( ) ; return Anima . of ( ) . dialect ( ) . update ( sqlParams ) ;
|
public class ModelResourceStructure { /** * Find the beans for this beanType .
* This can use URL query parameters such as order and maxrows to configure
* the query .
* @ param includeDeleted a boolean .
* @ return a { @ link javax . ws . rs . core . Response } object .
* @ throws java . lang . Exception if any .
* @ see javax . ws . rs . GET
* @ see AbstractModelResource # find */
public Response find ( @ QueryParam ( "include_deleted" ) final boolean includeDeleted ) throws Exception { } }
|
matchedFind ( includeDeleted ) ; final Query < MODEL > query = server . find ( modelType ) ; if ( includeDeleted ) { query . setIncludeSoftDeletes ( ) ; } defaultFindOrderBy ( query ) ; final Ref < FutureRowCount > rowCount = Refs . emptyRef ( ) ; Object entity = executeTx ( t -> { configDefaultQuery ( query ) ; configFindQuery ( query , includeDeleted ) ; rowCount . set ( applyUriQuery ( query ) ) ; List < MODEL > list = query . findList ( ) ; return processFoundModelList ( list , includeDeleted ) ; } ) ; if ( isEmptyEntity ( entity ) ) { return Response . noContent ( ) . build ( ) ; } Response response = Response . ok ( entity ) . build ( ) ; applyRowCountHeader ( response . getHeaders ( ) , query , rowCount . get ( ) ) ; return response ;
|
public class Cached { /** * TODO utilize Segmented Caches , and fold " get " into " reads " */
@ SuppressWarnings ( "unchecked" ) public Result < List < DATA > > get ( TRANS trans , String key , Getter < DATA > getter ) { } }
|
List < DATA > ld = null ; Result < List < DATA > > rld = null ; int cacheIdx = cacheIdx ( key ) ; Map < String , Dated > map = ( ( Map < String , Dated > ) cache [ cacheIdx ] ) ; // Check for saved element in cache
Dated cached = map . get ( key ) ; // Note : These Segment Timestamps are kept up to date with DB
Date dbStamp = info . get ( trans , name , cacheIdx ) ; // Check for cache Entry and whether it is still good ( a good Cache Entry is same or after DBEntry , so we use " before " syntax )
if ( cached != null && dbStamp . before ( cached . timestamp ) ) { ld = ( List < DATA > ) cached . data ; rld = Result . ok ( ld ) ; } else { rld = getter . get ( ) ; if ( rld . isOK ( ) ) { // only store valid lists
map . put ( key , new Dated ( rld . value ) ) ; // successful item found gets put in cache
// } else if ( rld . status = = Result . ERR _ Backend ) {
// map . remove ( key ) ;
} } return rld ;
|
public class TimeUnitUtility { /** * This method is used to parse a string representation of a time
* unit , and return the appropriate constant value .
* @ param units string representation of a time unit
* @ param locale target locale
* @ return numeric constant
* @ throws MPXJException normally thrown when parsing fails */
@ SuppressWarnings ( "unchecked" ) public static TimeUnit getInstance ( String units , Locale locale ) throws MPXJException { } }
|
Map < String , Integer > map = LocaleData . getMap ( locale , LocaleData . TIME_UNITS_MAP ) ; Integer result = map . get ( units . toLowerCase ( ) ) ; if ( result == null ) { throw new MPXJException ( MPXJException . INVALID_TIME_UNIT + " " + units ) ; } return ( TimeUnit . getInstance ( result . intValue ( ) ) ) ;
|
public class PowerMock { /** * Suppress a specific method call . Use this for overloaded methods .
* @ deprecated Use { @ link # suppress ( Method ) } instead . */
@ Deprecated public static synchronized void suppressMethod ( Class < ? > clazz , String methodName , Class < ? > [ ] parameterTypes ) { } }
|
SuppressCode . suppressMethod ( clazz , methodName , parameterTypes ) ;
|
public class AbstractAppender { /** * Builds an empty AppendEntries request .
* Empty append requests are used as heartbeats to followers . */
protected AppendRequest buildAppendEmptyRequest ( RaftMemberContext member ) { } }
|
final RaftLogReader reader = member . getLogReader ( ) ; // Read the previous entry from the reader .
// The reader can be null for RESERVE members .
Indexed < RaftLogEntry > prevEntry = reader != null ? reader . getCurrentEntry ( ) : null ; DefaultRaftMember leader = raft . getLeader ( ) ; return AppendRequest . builder ( ) . withTerm ( raft . getTerm ( ) ) . withLeader ( leader != null ? leader . memberId ( ) : null ) . withPrevLogIndex ( prevEntry != null ? prevEntry . index ( ) : reader != null ? reader . getFirstIndex ( ) - 1 : 0 ) . withPrevLogTerm ( prevEntry != null ? prevEntry . entry ( ) . term ( ) : 0 ) . withEntries ( Collections . emptyList ( ) ) . withCommitIndex ( raft . getCommitIndex ( ) ) . build ( ) ;
|
public class ObjectFactory { /** * Create an instance of { @ link JAXBElement } { @ code < } { @ link TopoPrimitiveArrayAssociationType } { @ code > }
* @ param value
* Java instance representing xml element ' s value .
* @ return
* the new instance of { @ link JAXBElement } { @ code < } { @ link TopoPrimitiveArrayAssociationType } { @ code > } */
@ XmlElementDecl ( namespace = "http://www.opengis.net/gml" , name = "topoPrimitiveMembers" ) public JAXBElement < TopoPrimitiveArrayAssociationType > createTopoPrimitiveMembers ( TopoPrimitiveArrayAssociationType value ) { } }
|
return new JAXBElement < TopoPrimitiveArrayAssociationType > ( _TopoPrimitiveMembers_QNAME , TopoPrimitiveArrayAssociationType . class , null , value ) ;
|
public class Query { /** * Create a Query for the given Service and Domain .
* @ param service service to search for
* @ param domain domain to search on
* @ return a new Query object */
@ SuppressWarnings ( "unused" ) public static Query createFor ( Service service , Domain domain ) { } }
|
return new Query ( service , domain , BROWSING_TIMEOUT ) ;
|
public class JcrSession { /** * Determine if the supplied string represents just the { @ link Node # getIdentifier ( ) node ' s identifier } or whether it is a
* string representation of a NodeKey . If it is just the node ' s identifier , then the NodeKey is created by using the same
* { @ link NodeKey # getSourceKey ( ) source key } and { @ link NodeKey # getWorkspaceKey ( ) workspace key } from the supplied root node .
* @ param identifier the identifier string ; may not be null
* @ param rootKey the node of the root in the workspace ; may not be null
* @ return the node key re - created from the supplied identifier ; never null */
public static NodeKey createNodeKeyFromIdentifier ( String identifier , NodeKey rootKey ) { } }
|
// If this node is a random identifier , then we need to use it as a node key identifier . . .
if ( NodeKey . isValidRandomIdentifier ( identifier ) ) { return rootKey . withId ( identifier ) ; } return new NodeKey ( identifier ) ;
|
public class CollectionUtils { /** * Move an object in List up
* @ param list
* @ param key
* @ param keyMapper
* @ return */
public static < T , K > boolean moveUp ( List < T > list , K key , Function < T , K > keyMapper , int n ) { } }
|
if ( list == null ) return false ; ArrayList < T > newList = new ArrayList < T > ( ) ; boolean changed = false ; for ( int i = 0 ; i < list . size ( ) ; i ++ ) { T item = list . get ( i ) ; if ( i > 0 && key . equals ( keyMapper . apply ( item ) ) ) { int posi = i - n ; if ( posi < 0 ) posi = 0 ; newList . add ( posi , item ) ; changed = true ; } else newList . add ( item ) ; } if ( changed ) { list . clear ( ) ; list . addAll ( newList ) ; return true ; } return false ;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.